Upgrade V8 to 3.9.2
This commit is contained in:
parent
23514fc946
commit
8be699494e
1
deps/v8/AUTHORS
vendored
1
deps/v8/AUTHORS
vendored
@ -8,6 +8,7 @@ Sigma Designs Inc.
|
||||
ARM Ltd.
|
||||
Hewlett-Packard Development Company, LP
|
||||
Igalia, S.L.
|
||||
Joyent, Inc.
|
||||
|
||||
Akinori MUSHA <knu@FreeBSD.org>
|
||||
Alexander Botero-Lowry <alexbl@FreeBSD.org>
|
||||
|
29
deps/v8/ChangeLog
vendored
29
deps/v8/ChangeLog
vendored
@ -1,3 +1,32 @@
|
||||
2012-02-06: Version 3.9.2
|
||||
|
||||
Add timestamp to --trace-gc output. (issue 1932)
|
||||
|
||||
Heap profiler reports implicit references.
|
||||
|
||||
Optionally export metadata with libv8 to enable debuggers to inspect V8
|
||||
state.
|
||||
|
||||
|
||||
2012-02-02: Version 3.9.1
|
||||
|
||||
Fixed memory leak in NativeObjectsExplorer::FindOrAddGroupInfo
|
||||
(Chromium issue 112315).
|
||||
|
||||
Fixed a crash in dev tools (Chromium issue 107996).
|
||||
|
||||
Added 'dependencies_traverse': 1 to v8 GYP target.
|
||||
|
||||
Performance and stability improvements on all platforms.
|
||||
|
||||
|
||||
2012-02-01: Version 3.9.0
|
||||
|
||||
Reduce memory use immediately after starting V8.
|
||||
|
||||
Stability fixes and performance improvements on all platforms.
|
||||
|
||||
|
||||
2012-01-26: Version 3.8.9
|
||||
|
||||
Flush number string cache on GC (issue 1605).
|
||||
|
6
deps/v8/build/common.gypi
vendored
6
deps/v8/build/common.gypi
vendored
@ -85,6 +85,11 @@
|
||||
'v8_use_liveobjectlist%': 'false',
|
||||
'werror%': '-Werror',
|
||||
|
||||
# With post mortem support enabled, metadata is embedded into libv8 that
|
||||
# describes various parameters of the VM for use by debuggers. See
|
||||
# tools/gen-postmortem-metadata.py for details.
|
||||
'v8_postmortem_support%': 'false',
|
||||
|
||||
# For a shared library build, results in "libv8-<(soname_version).so".
|
||||
'soname_version%': '',
|
||||
},
|
||||
@ -322,6 +327,7 @@
|
||||
}], # OS=="mac"
|
||||
['OS=="win"', {
|
||||
'msvs_configuration_attributes': {
|
||||
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
|
||||
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
|
||||
'CharacterSet': '1',
|
||||
},
|
||||
|
13
deps/v8/include/v8-profiler.h
vendored
13
deps/v8/include/v8-profiler.h
vendored
@ -476,11 +476,22 @@ class V8EXPORT RetainedObjectInfo { // NOLINT
|
||||
virtual intptr_t GetHash() = 0;
|
||||
|
||||
/**
|
||||
* Returns human-readable label. It must be a NUL-terminated UTF-8
|
||||
* Returns human-readable label. It must be a null-terminated UTF-8
|
||||
* encoded string. V8 copies its contents during a call to GetLabel.
|
||||
*/
|
||||
virtual const char* GetLabel() = 0;
|
||||
|
||||
/**
|
||||
* Returns human-readable group label. It must be a null-terminated UTF-8
|
||||
* encoded string. V8 copies its contents during a call to GetGroupLabel.
|
||||
* Heap snapshot generator will collect all the group names, create
|
||||
* top level entries with these names and attach the objects to the
|
||||
* corresponding top level group objects. There is a default
|
||||
* implementation which is required because embedders don't have their
|
||||
* own implementation yet.
|
||||
*/
|
||||
virtual const char* GetGroupLabel() { return GetLabel(); }
|
||||
|
||||
/**
|
||||
* Returns element count in case if a global handle retains
|
||||
* a subgraph by holding one of its nodes.
|
||||
|
6
deps/v8/include/v8.h
vendored
6
deps/v8/include/v8.h
vendored
@ -3538,6 +3538,12 @@ class V8EXPORT Context {
|
||||
*/
|
||||
void AllowCodeGenerationFromStrings(bool allow);
|
||||
|
||||
/**
|
||||
* Returns true if code generation from strings is allowed for the context.
|
||||
* For more details see AllowCodeGenerationFromStrings(bool) documentation.
|
||||
*/
|
||||
bool IsCodeGenerationFromStringsAllowed();
|
||||
|
||||
/**
|
||||
* Stack-allocated class which sets the execution context for all
|
||||
* operations executed within a local scope.
|
||||
|
10
deps/v8/src/accessors.cc
vendored
10
deps/v8/src/accessors.cc
vendored
@ -487,16 +487,6 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
|
||||
NONE);
|
||||
}
|
||||
|
||||
if (function->has_initial_map()) {
|
||||
// If the function has allocated the initial map
|
||||
// replace it with a copy containing the new prototype.
|
||||
Object* new_map;
|
||||
{ MaybeObject* maybe_new_map =
|
||||
function->initial_map()->CopyDropTransitions();
|
||||
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
|
||||
}
|
||||
function->set_initial_map(Map::cast(new_map));
|
||||
}
|
||||
Object* prototype;
|
||||
{ MaybeObject* maybe_prototype = function->SetPrototype(value);
|
||||
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
|
||||
|
16
deps/v8/src/api.cc
vendored
16
deps/v8/src/api.cc
vendored
@ -4077,7 +4077,7 @@ bool v8::V8::IdleNotification(int hint) {
|
||||
void v8::V8::LowMemoryNotification() {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
if (isolate == NULL || !isolate->IsInitialized()) return;
|
||||
isolate->heap()->CollectAllAvailableGarbage();
|
||||
isolate->heap()->CollectAllAvailableGarbage("low memory notification");
|
||||
}
|
||||
|
||||
|
||||
@ -4313,6 +4313,20 @@ void Context::AllowCodeGenerationFromStrings(bool allow) {
|
||||
}
|
||||
|
||||
|
||||
bool Context::IsCodeGenerationFromStringsAllowed() {
|
||||
i::Isolate* isolate = i::Isolate::Current();
|
||||
if (IsDeadCheck(isolate,
|
||||
"v8::Context::IsCodeGenerationFromStringsAllowed()")) {
|
||||
return false;
|
||||
}
|
||||
ENTER_V8(isolate);
|
||||
i::Object** ctx = reinterpret_cast<i::Object**>(this);
|
||||
i::Handle<i::Context> context =
|
||||
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
|
||||
return !context->allow_code_gen_from_strings()->IsFalse();
|
||||
}
|
||||
|
||||
|
||||
void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
|
||||
i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
|
||||
}
|
||||
|
53
deps/v8/src/arm/builtins-arm.cc
vendored
53
deps/v8/src/arm/builtins-arm.cc
vendored
@ -114,9 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
||||
Label* gc_required) {
|
||||
const int initial_capacity = JSArray::kPreallocatedArrayElements;
|
||||
STATIC_ASSERT(initial_capacity >= 0);
|
||||
// Load the initial map from the array function.
|
||||
__ ldr(scratch1, FieldMemOperand(array_function,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
|
||||
|
||||
// Allocate the JSArray object together with space for a fixed array with the
|
||||
// requested elements.
|
||||
@ -210,9 +208,7 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
bool fill_with_hole,
|
||||
Label* gc_required) {
|
||||
// Load the initial map from the array function.
|
||||
__ ldr(elements_array_storage,
|
||||
FieldMemOperand(array_function,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
|
||||
|
||||
if (FLAG_debug_code) { // Assert that array size is not zero.
|
||||
__ tst(array_size, array_size);
|
||||
@ -667,7 +663,9 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
|
||||
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
bool is_api_function,
|
||||
bool count_constructions) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : number of arguments
|
||||
// -- r1 : constructor function
|
||||
@ -675,42 +673,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
|
||||
// -- sp[...]: constructor arguments
|
||||
// -----------------------------------
|
||||
|
||||
Label slow, non_function_call;
|
||||
// Check that the function is not a smi.
|
||||
__ JumpIfSmi(r1, &non_function_call);
|
||||
// Check that the function is a JSFunction.
|
||||
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
|
||||
__ b(ne, &slow);
|
||||
|
||||
// Jump to the function-specific construct stub.
|
||||
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
|
||||
__ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
// r0: number of arguments
|
||||
// r1: called object
|
||||
// r2: object type
|
||||
Label do_call;
|
||||
__ bind(&slow);
|
||||
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
|
||||
__ b(ne, &non_function_call);
|
||||
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
|
||||
__ jmp(&do_call);
|
||||
|
||||
__ bind(&non_function_call);
|
||||
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
|
||||
__ bind(&do_call);
|
||||
// Set expected number of arguments to zero (not changing r0).
|
||||
__ mov(r2, Operand(0, RelocInfo::NONE));
|
||||
__ SetCallKind(r5, CALL_AS_METHOD);
|
||||
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
||||
RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
bool is_api_function,
|
||||
bool count_constructions) {
|
||||
// Should never count constructions for api objects.
|
||||
ASSERT(!is_api_function || !count_constructions);
|
||||
|
||||
@ -1117,7 +1079,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
// Invoke the code and pass argc as r0.
|
||||
__ mov(r0, Operand(r3));
|
||||
if (is_construct) {
|
||||
__ Call(masm->isolate()->builtins()->JSConstructCall());
|
||||
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
|
||||
__ CallStub(&stub);
|
||||
} else {
|
||||
ParameterCount actual(r0);
|
||||
__ InvokeFunction(r1, actual, CALL_FUNCTION,
|
||||
@ -1297,7 +1260,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
||||
// 1. Make sure we have at least one argument.
|
||||
// r0: actual number of arguments
|
||||
{ Label done;
|
||||
__ tst(r0, Operand(r0));
|
||||
__ cmp(r0, Operand(0));
|
||||
__ b(ne, &done);
|
||||
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
|
||||
__ push(r2);
|
||||
|
207
deps/v8/src/arm/code-stubs-arm.cc
vendored
207
deps/v8/src/arm/code-stubs-arm.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -122,7 +122,6 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
||||
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
|
||||
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
|
||||
|
||||
|
||||
// Initialize the code pointer in the function to be the one
|
||||
// found in the shared function info object.
|
||||
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
|
||||
@ -157,20 +156,18 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(r3, MemOperand(sp, 0));
|
||||
|
||||
// Set up the object header.
|
||||
__ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
|
||||
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||
__ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
|
||||
__ mov(r2, Operand(Smi::FromInt(length)));
|
||||
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
|
||||
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||
|
||||
// Set up the fixed slots.
|
||||
// Set up the fixed slots, copy the global object from the previous context.
|
||||
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ mov(r1, Operand(Smi::FromInt(0)));
|
||||
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
||||
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
||||
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
|
||||
|
||||
// Copy the global object from the previous context.
|
||||
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
|
||||
// Initialize the rest of the slots to undefined.
|
||||
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
|
||||
@ -229,14 +226,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
|
||||
__ bind(&after_sentinel);
|
||||
|
||||
// Set up the fixed slots.
|
||||
// Set up the fixed slots, copy the global object from the previous context.
|
||||
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
|
||||
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
|
||||
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
|
||||
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
|
||||
|
||||
// Copy the global object from the previous context.
|
||||
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
|
||||
__ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
|
||||
__ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
|
||||
|
||||
// Initialize the rest of the slots to the hole value.
|
||||
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
|
||||
@ -326,8 +321,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
|
||||
Label double_elements, check_fast_elements;
|
||||
__ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
|
||||
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
|
||||
__ cmp(r0, ip);
|
||||
__ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
|
||||
__ b(ne, &check_fast_elements);
|
||||
GenerateFastCloneShallowArrayCommon(masm, 0,
|
||||
COPY_ON_WRITE_ELEMENTS, &slow_case);
|
||||
@ -336,8 +330,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
|
||||
__ Ret();
|
||||
|
||||
__ bind(&check_fast_elements);
|
||||
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
|
||||
__ cmp(r0, ip);
|
||||
__ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
|
||||
__ b(ne, &double_elements);
|
||||
GenerateFastCloneShallowArrayCommon(masm, length_,
|
||||
CLONE_ELEMENTS, &slow_case);
|
||||
@ -590,7 +583,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
||||
|
||||
Label is_smi, done;
|
||||
|
||||
__ JumpIfSmi(object, &is_smi);
|
||||
// Smi-check
|
||||
__ UntagAndJumpIfSmi(scratch1, object, &is_smi);
|
||||
// Heap number check
|
||||
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
|
||||
|
||||
// Handle loading a double from a heap number.
|
||||
@ -612,7 +607,6 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
||||
if (CpuFeatures::IsSupported(VFP3)) {
|
||||
CpuFeatures::Scope scope(VFP3);
|
||||
// Convert smi to double using VFP instructions.
|
||||
__ SmiUntag(scratch1, object);
|
||||
__ vmov(dst.high(), scratch1);
|
||||
__ vcvt_f64_s32(dst, dst.high());
|
||||
if (destination == kCoreRegisters) {
|
||||
@ -647,11 +641,10 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
|
||||
Heap::kHeapNumberMapRootIndex,
|
||||
"HeapNumberMap register clobbered.");
|
||||
}
|
||||
Label is_smi;
|
||||
Label done;
|
||||
Label not_in_int32_range;
|
||||
|
||||
__ JumpIfSmi(object, &is_smi);
|
||||
__ UntagAndJumpIfSmi(dst, object, &done);
|
||||
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
|
||||
__ cmp(scratch1, heap_number_map);
|
||||
__ b(ne, not_number);
|
||||
@ -671,10 +664,6 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
|
||||
scratch1,
|
||||
scratch2,
|
||||
scratch3);
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&is_smi);
|
||||
__ SmiUntag(dst, object);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
@ -847,10 +836,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
||||
|
||||
Label done;
|
||||
|
||||
// Untag the object into the destination register.
|
||||
__ SmiUntag(dst, object);
|
||||
// Just return if the object is a smi.
|
||||
__ JumpIfSmi(object, &done);
|
||||
__ UntagAndJumpIfSmi(dst, object, &done);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ AbortIfNotRootValue(heap_number_map,
|
||||
@ -2338,7 +2324,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
|
||||
__ cmp(ip, Operand(scratch2));
|
||||
__ b(ne, ¬_smi_result);
|
||||
// Go slow on zero result to handle -0.
|
||||
__ tst(scratch1, Operand(scratch1));
|
||||
__ cmp(scratch1, Operand(0));
|
||||
__ mov(right, Operand(scratch1), LeaveCC, ne);
|
||||
__ Ret(ne);
|
||||
// We need -0 if we were multiplying a negative number with 0 to get 0.
|
||||
@ -3310,8 +3296,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
||||
// Check if cache matches: Double value is stored in uint32_t[2] array.
|
||||
__ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
|
||||
__ cmp(r2, r4);
|
||||
__ b(ne, &calculate);
|
||||
__ cmp(r3, r5);
|
||||
__ cmp(r3, r5, eq);
|
||||
__ b(ne, &calculate);
|
||||
// Cache hit. Load result, cleanup and return.
|
||||
Counters* counters = masm->isolate()->counters();
|
||||
@ -3468,7 +3453,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
const Register scratch = r9;
|
||||
const Register scratch2 = r7;
|
||||
|
||||
Label call_runtime, done, exponent_not_smi, int_exponent;
|
||||
Label call_runtime, done, int_exponent;
|
||||
if (exponent_type_ == ON_STACK) {
|
||||
Label base_is_smi, unpack_exponent;
|
||||
// The exponent and base are supplied as arguments on the stack.
|
||||
@ -3479,7 +3464,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
|
||||
|
||||
__ JumpIfSmi(base, &base_is_smi);
|
||||
__ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
|
||||
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
|
||||
__ cmp(scratch, heapnumbermap);
|
||||
__ b(ne, &call_runtime);
|
||||
@ -3488,16 +3473,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
__ jmp(&unpack_exponent);
|
||||
|
||||
__ bind(&base_is_smi);
|
||||
__ SmiUntag(base);
|
||||
__ vmov(single_scratch, base);
|
||||
__ vmov(single_scratch, scratch);
|
||||
__ vcvt_f64_s32(double_base, single_scratch);
|
||||
__ bind(&unpack_exponent);
|
||||
|
||||
__ JumpIfNotSmi(exponent, &exponent_not_smi);
|
||||
__ SmiUntag(exponent);
|
||||
__ jmp(&int_exponent);
|
||||
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
||||
|
||||
__ bind(&exponent_not_smi);
|
||||
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
|
||||
__ cmp(scratch, heapnumbermap);
|
||||
__ b(ne, &call_runtime);
|
||||
@ -3505,11 +3486,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
||||
} else if (exponent_type_ == TAGGED) {
|
||||
// Base is already in double_base.
|
||||
__ JumpIfNotSmi(exponent, &exponent_not_smi);
|
||||
__ SmiUntag(exponent);
|
||||
__ jmp(&int_exponent);
|
||||
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
||||
|
||||
__ bind(&exponent_not_smi);
|
||||
__ vldr(double_exponent,
|
||||
FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
||||
}
|
||||
@ -3582,13 +3560,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
__ bind(&int_exponent_convert);
|
||||
__ vcvt_u32_f64(single_scratch, double_exponent);
|
||||
__ vmov(exponent, single_scratch);
|
||||
__ vmov(scratch, single_scratch);
|
||||
}
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
|
||||
__ mov(scratch, exponent); // Back up exponent.
|
||||
// Get two copies of exponent in the registers scratch and exponent.
|
||||
if (exponent_type_ == INTEGER) {
|
||||
__ mov(scratch, exponent);
|
||||
} else {
|
||||
// Exponent has previously been stored into scratch as untagged integer.
|
||||
__ mov(exponent, scratch);
|
||||
}
|
||||
__ vmov(double_scratch, double_base); // Back up base.
|
||||
__ vmov(double_result, 1.0);
|
||||
|
||||
@ -4098,11 +4082,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
|
||||
// real lookup and update the call site cache.
|
||||
if (!HasCallSiteInlineCheck()) {
|
||||
Label miss;
|
||||
__ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
|
||||
__ cmp(function, ip);
|
||||
__ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
|
||||
__ b(ne, &miss);
|
||||
__ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
|
||||
__ cmp(map, ip);
|
||||
__ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
|
||||
__ b(ne, &miss);
|
||||
__ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
|
||||
__ Ret(HasArgsInRegisters() ? 0 : 2);
|
||||
@ -4656,7 +4638,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
ExternalReference::address_of_regexp_stack_memory_size(isolate);
|
||||
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
|
||||
__ ldr(r0, MemOperand(r0, 0));
|
||||
__ tst(r0, Operand(r0));
|
||||
__ cmp(r0, Operand(0));
|
||||
__ b(eq, &runtime);
|
||||
|
||||
// Check that the first argument is a JSRegExp object.
|
||||
@ -4727,8 +4709,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(last_match_info_elements,
|
||||
FieldMemOperand(r0, JSArray::kElementsOffset));
|
||||
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
|
||||
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
|
||||
__ cmp(r0, ip);
|
||||
__ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
|
||||
__ b(ne, &runtime);
|
||||
// Check that the last match info has space for the capture registers and the
|
||||
// additional information.
|
||||
@ -5082,11 +5063,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Set input, index and length fields from arguments.
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize * 0));
|
||||
__ ldr(r2, MemOperand(sp, kPointerSize * 1));
|
||||
__ ldr(r6, MemOperand(sp, kPointerSize * 2));
|
||||
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize * 1));
|
||||
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
|
||||
__ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
|
||||
__ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
|
||||
__ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
|
||||
|
||||
// Fill out the elements FixedArray.
|
||||
// r0: JSArray, tagged.
|
||||
@ -5108,9 +5089,9 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
||||
// r3: Start of elements in FixedArray.
|
||||
// r5: Number of elements to fill.
|
||||
Label loop;
|
||||
__ tst(r5, Operand(r5));
|
||||
__ cmp(r5, Operand(0));
|
||||
__ bind(&loop);
|
||||
__ b(le, &done); // Jump if r1 is negative or zero.
|
||||
__ b(le, &done); // Jump if r5 is negative or zero.
|
||||
__ sub(r5, r5, Operand(1), SetCC);
|
||||
__ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
|
||||
__ jmp(&loop);
|
||||
@ -5124,24 +5105,48 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void CallFunctionStub::FinishCode(Handle<Code> code) {
|
||||
code->set_has_function_cache(false);
|
||||
}
|
||||
static void GenerateRecordCallTarget(MacroAssembler* masm) {
|
||||
// Cache the called function in a global property cell. Cache states
|
||||
// are uninitialized, monomorphic (indicated by a JSFunction), and
|
||||
// megamorphic.
|
||||
// r1 : the function to call
|
||||
// r2 : cache cell for call target
|
||||
Label done;
|
||||
|
||||
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
|
||||
masm->isolate()->heap()->undefined_value());
|
||||
ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
|
||||
masm->isolate()->heap()->the_hole_value());
|
||||
|
||||
void CallFunctionStub::Clear(Heap* heap, Address address) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
// Load the cache state into r3.
|
||||
__ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
// A monomorphic cache hit or an already megamorphic state: invoke the
|
||||
// function without changing the state.
|
||||
__ cmp(r3, r1);
|
||||
__ b(eq, &done);
|
||||
__ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
|
||||
__ b(eq, &done);
|
||||
|
||||
Object* CallFunctionStub::GetCachedValue(Address address) {
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
|
||||
// megamorphic.
|
||||
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
|
||||
// MegamorphicSentinel is an immortal immovable object (undefined) so no
|
||||
// write-barrier is needed.
|
||||
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
|
||||
__ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
|
||||
|
||||
// An uninitialized cache is patched with the function.
|
||||
__ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
|
||||
// No need for a write barrier here - cells are rescanned.
|
||||
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
// r1 : the function to call
|
||||
// r2 : cache cell for call target
|
||||
Label slow, non_function;
|
||||
|
||||
// The receiver might implicitly be the global object. This is
|
||||
@ -5219,6 +5224,48 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void CallConstructStub::Generate(MacroAssembler* masm) {
|
||||
// r0 : number of arguments
|
||||
// r1 : the function to call
|
||||
// r2 : cache cell for call target
|
||||
Label slow, non_function_call;
|
||||
|
||||
// Check that the function is not a smi.
|
||||
__ JumpIfSmi(r1, &non_function_call);
|
||||
// Check that the function is a JSFunction.
|
||||
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
|
||||
__ b(ne, &slow);
|
||||
|
||||
if (RecordCallTarget()) {
|
||||
GenerateRecordCallTarget(masm);
|
||||
}
|
||||
|
||||
// Jump to the function-specific construct stub.
|
||||
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
|
||||
__ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
// r0: number of arguments
|
||||
// r1: called object
|
||||
// r3: object type
|
||||
Label do_call;
|
||||
__ bind(&slow);
|
||||
__ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
|
||||
__ b(ne, &non_function_call);
|
||||
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
|
||||
__ jmp(&do_call);
|
||||
|
||||
__ bind(&non_function_call);
|
||||
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
|
||||
__ bind(&do_call);
|
||||
// Set expected number of arguments to zero (not changing r0).
|
||||
__ mov(r2, Operand(0, RelocInfo::NONE));
|
||||
__ SetCallKind(r5, CALL_AS_METHOD);
|
||||
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
||||
RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
// Unfortunately you have to run without snapshots to see most of these
|
||||
// names in the profile since most compare stubs end up in the snapshot.
|
||||
void CompareStub::PrintName(StringStream* stream) {
|
||||
@ -5370,8 +5417,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
|
||||
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
||||
__ cmp(result_, Operand(ip));
|
||||
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
|
||||
__ b(eq, &slow_case_);
|
||||
__ bind(&exit_);
|
||||
}
|
||||
@ -5799,10 +5845,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
__ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
|
||||
// If either to or from had the smi tag bit set, then carry is set now.
|
||||
__ b(cs, &runtime); // Either "from" or "to" is not a smi.
|
||||
__ b(mi, &runtime); // From is negative.
|
||||
|
||||
// We want to bailout to runtime here if From is negative. In that case, the
|
||||
// next instruction is not executed and we fall through to bailing out to
|
||||
// runtime. pl is the opposite of mi.
|
||||
// Both r2 and r3 are untagged integers.
|
||||
__ sub(r2, r2, Operand(r3), SetCC);
|
||||
__ sub(r2, r2, Operand(r3), SetCC, pl);
|
||||
__ b(mi, &runtime); // Fail if from > to.
|
||||
|
||||
// Make sure first argument is a string.
|
||||
@ -5875,9 +5922,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
__ bind(&sliced_string);
|
||||
// Sliced string. Fetch parent and correct start index by offset.
|
||||
__ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
|
||||
__ add(r3, r3, Operand(r5, ASR, 1));
|
||||
__ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
|
||||
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
|
||||
__ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
|
||||
// Update instance type.
|
||||
__ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
|
||||
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
|
||||
@ -6020,7 +6067,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
|
||||
Label compare_chars;
|
||||
__ bind(&check_zero_length);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ tst(length, Operand(length));
|
||||
__ cmp(length, Operand(0));
|
||||
__ b(ne, &compare_chars);
|
||||
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
|
||||
__ Ret();
|
||||
@ -6053,7 +6100,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
|
||||
__ mov(scratch1, scratch2, LeaveCC, gt);
|
||||
Register min_length = scratch1;
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ tst(min_length, Operand(min_length));
|
||||
__ cmp(min_length, Operand(0));
|
||||
__ b(eq, &compare_lengths);
|
||||
|
||||
// Compare loop.
|
||||
@ -6811,7 +6858,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
||||
__ mov(r1, Operand(Handle<String>(name)));
|
||||
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
|
||||
__ CallStub(&stub);
|
||||
__ tst(r0, Operand(r0));
|
||||
__ cmp(r0, Operand(0));
|
||||
__ ldm(ia_w, sp, spill_mask);
|
||||
|
||||
__ b(eq, done);
|
||||
@ -6888,7 +6935,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
||||
}
|
||||
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
|
||||
__ CallStub(&stub);
|
||||
__ tst(r0, Operand(r0));
|
||||
__ cmp(r0, Operand(0));
|
||||
__ mov(scratch2, Operand(r2));
|
||||
__ ldm(ia_w, sp, spill_mask);
|
||||
|
||||
|
24
deps/v8/src/arm/codegen-arm.cc
vendored
24
deps/v8/src/arm/codegen-arm.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -104,10 +104,10 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
||||
__ add(lr, lr, Operand(r5, LSL, 2));
|
||||
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
// r6: destination FixedDoubleArray, not tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
|
||||
// Set destination FixedDoubleArray's length.
|
||||
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
|
||||
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
|
||||
// Update receiver's map.
|
||||
|
||||
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
|
||||
@ -155,10 +155,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
||||
__ bind(&loop);
|
||||
__ ldr(r9, MemOperand(r3, 4, PostIndex));
|
||||
// r9: current element
|
||||
__ JumpIfNotSmi(r9, &convert_hole);
|
||||
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
|
||||
|
||||
// Normal smi, convert to double and store.
|
||||
__ SmiUntag(r9);
|
||||
if (vfp3_supported) {
|
||||
CpuFeatures::Scope scope(VFP3);
|
||||
__ vmov(s0, r9);
|
||||
@ -181,6 +180,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
||||
// Hole found, store the-hole NaN.
|
||||
__ bind(&convert_hole);
|
||||
if (FLAG_debug_code) {
|
||||
// Restore a "smi-untagged" heap object.
|
||||
__ SmiTag(r9);
|
||||
__ orr(r9, r9, Operand(1));
|
||||
__ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(eq, "object found in smi-only array");
|
||||
}
|
||||
@ -208,9 +210,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
Label entry, loop, convert_hole, gc_required;
|
||||
|
||||
__ push(lr);
|
||||
__ Push(r3, r2, r1, r0);
|
||||
|
||||
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
|
||||
__ Push(r3, r2, r1, r0);
|
||||
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
||||
// r4: source FixedDoubleArray
|
||||
// r5: number of elements (smi-tagged)
|
||||
@ -220,10 +221,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
__ add(r0, r0, Operand(r5, LSL, 1));
|
||||
__ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
// r6: destination FixedArray, not tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
|
||||
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
|
||||
// Set destination FixedDoubleArray's length.
|
||||
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
|
||||
__ str(r9, MemOperand(r6, HeapObject::kMapOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
|
||||
@ -325,8 +326,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
// Handle slices.
|
||||
Label indirect_string_loaded;
|
||||
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
|
||||
__ add(index, index, Operand(result, ASR, kSmiTagSize));
|
||||
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
|
||||
__ add(index, index, Operand(result, ASR, kSmiTagSize));
|
||||
__ jmp(&indirect_string_loaded);
|
||||
|
||||
// Handle cons strings.
|
||||
@ -336,8 +337,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
// the string.
|
||||
__ bind(&cons_string);
|
||||
__ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
|
||||
__ LoadRoot(ip, Heap::kEmptyStringRootIndex);
|
||||
__ cmp(result, ip);
|
||||
__ CompareRoot(result, Heap::kEmptyStringRootIndex);
|
||||
__ b(ne, call_runtime);
|
||||
// Get the first of the two strings and load its instance type.
|
||||
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
|
||||
|
40
deps/v8/src/arm/debug-arm.cc
vendored
40
deps/v8/src/arm/debug-arm.cc
vendored
@ -251,14 +251,6 @@ void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
|
||||
// Calling convention for construct call (from builtins-arm.cc)
|
||||
// -- r0 : number of arguments (not smi)
|
||||
// -- r1 : constructor function
|
||||
Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
|
||||
// In places other than IC call sites it is expected that r0 is TOS which
|
||||
// is an object - this is not generally the case so this should be used with
|
||||
@ -268,6 +260,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallFunctionStub (from code-stubs-arm.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- r1 : function
|
||||
// -----------------------------------
|
||||
@ -275,6 +268,37 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallFunctionStub (from code-stubs-arm.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- r1 : function
|
||||
// -- r2 : cache cell for call target
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
|
||||
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : number of arguments (not smi)
|
||||
// -- r1 : constructor function
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
|
||||
// Calling convention for CallConstructStub (from code-stubs-arm.cc)
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : number of arguments (not smi)
|
||||
// -- r1 : constructor function
|
||||
// -- r2 : cache cell for call target
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateSlot(MacroAssembler* masm) {
|
||||
// Generate enough nop's to make space for a call instruction. Avoid emitting
|
||||
// the constant pool in the debug break slot code.
|
||||
|
21
deps/v8/src/arm/full-codegen-arm.cc
vendored
21
deps/v8/src/arm/full-codegen-arm.cc
vendored
@ -1820,7 +1820,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
|
||||
__ mov(ip, Operand(scratch1, ASR, 31));
|
||||
__ cmp(ip, Operand(scratch2));
|
||||
__ b(ne, &stub_call);
|
||||
__ tst(scratch1, Operand(scratch1));
|
||||
__ cmp(scratch1, Operand(0));
|
||||
__ mov(right, Operand(scratch1), LeaveCC, ne);
|
||||
__ b(ne, &done);
|
||||
__ add(scratch2, right, Operand(left), SetCC);
|
||||
@ -2379,9 +2379,22 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
|
||||
__ mov(r0, Operand(arg_count));
|
||||
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
|
||||
|
||||
Handle<Code> construct_builtin =
|
||||
isolate()->builtins()->JSConstructCall();
|
||||
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
|
||||
// Record call targets in unoptimized code, but not in the snapshot.
|
||||
CallFunctionFlags flags;
|
||||
if (!Serializer::enabled()) {
|
||||
flags = RECORD_CALL_TARGET;
|
||||
Handle<Object> uninitialized =
|
||||
TypeFeedbackCells::UninitializedSentinel(isolate());
|
||||
Handle<JSGlobalPropertyCell> cell =
|
||||
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
|
||||
RecordTypeFeedbackCell(expr->id(), cell);
|
||||
__ mov(r2, Operand(cell));
|
||||
} else {
|
||||
flags = NO_CALL_FUNCTION_FLAGS;
|
||||
}
|
||||
|
||||
CallConstructStub stub(flags);
|
||||
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
|
||||
context()->Plug(r0);
|
||||
}
|
||||
|
||||
|
62
deps/v8/src/arm/ic-arm.cc
vendored
62
deps/v8/src/arm/ic-arm.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -1312,14 +1312,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
Label slow, array, extra, check_if_double_array;
|
||||
Label fast_object_with_map_check, fast_object_without_map_check;
|
||||
Label fast_double_with_map_check, fast_double_without_map_check;
|
||||
Label transition_smi_elements, finish_object_store, non_double_value;
|
||||
Label transition_double_elements;
|
||||
|
||||
// Register usage.
|
||||
Register value = r0;
|
||||
Register key = r1;
|
||||
Register receiver = r2;
|
||||
Register elements = r3; // Elements array of the receiver.
|
||||
Register receiver_map = r3;
|
||||
Register elements_map = r6;
|
||||
Register receiver_map = r7;
|
||||
Register elements = r7; // Elements array of the receiver.
|
||||
// r4 and r5 are used as general scratch registers.
|
||||
|
||||
// Check that the key is a smi.
|
||||
@ -1417,9 +1419,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
__ Ret();
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Escape to slow case when writing non-smi into smi-only array.
|
||||
__ CheckFastObjectElements(receiver_map, scratch_value, &slow);
|
||||
// Escape to elements kind transition case.
|
||||
__ CheckFastObjectElements(receiver_map, scratch_value,
|
||||
&transition_smi_elements);
|
||||
// Fast elements array, store the value to the elements backing store.
|
||||
__ bind(&finish_object_store);
|
||||
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
__ str(value, MemOperand(address));
|
||||
@ -1445,12 +1449,56 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
key,
|
||||
receiver,
|
||||
elements,
|
||||
r3,
|
||||
r4,
|
||||
r5,
|
||||
r6,
|
||||
r7,
|
||||
&slow);
|
||||
&transition_double_elements);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
|
||||
__ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
|
||||
__ b(ne, &non_double_value);
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
|
||||
// FAST_DOUBLE_ELEMENTS and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
|
||||
FAST_DOUBLE_ELEMENTS,
|
||||
receiver_map,
|
||||
r4,
|
||||
&slow);
|
||||
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
|
||||
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
|
||||
FAST_ELEMENTS,
|
||||
receiver_map,
|
||||
r4,
|
||||
&slow);
|
||||
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
|
||||
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
|
||||
FAST_ELEMENTS,
|
||||
receiver_map,
|
||||
r4,
|
||||
&slow);
|
||||
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
|
||||
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
}
|
||||
|
||||
|
||||
|
32
deps/v8/src/arm/lithium-arm.cc
vendored
32
deps/v8/src/arm/lithium-arm.cc
vendored
@ -581,11 +581,6 @@ void LChunkBuilder::Abort(const char* format, ...) {
|
||||
}
|
||||
|
||||
|
||||
LRegister* LChunkBuilder::ToOperand(Register reg) {
|
||||
return LRegister::Create(Register::ToAllocationIndex(reg));
|
||||
}
|
||||
|
||||
|
||||
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
|
||||
return new LUnallocated(LUnallocated::FIXED_REGISTER,
|
||||
Register::ToAllocationIndex(reg));
|
||||
@ -676,7 +671,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
|
||||
HInstruction* instr = HInstruction::cast(value);
|
||||
VisitInstruction(instr);
|
||||
}
|
||||
allocator_->RecordUse(value, operand);
|
||||
operand->set_virtual_register(value->id());
|
||||
return operand;
|
||||
}
|
||||
|
||||
@ -684,18 +679,12 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
|
||||
template<int I, int T>
|
||||
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
|
||||
LUnallocated* result) {
|
||||
allocator_->RecordDefinition(current_instruction_, result);
|
||||
result->set_virtual_register(current_instruction_->id());
|
||||
instr->set_result(result);
|
||||
return instr;
|
||||
}
|
||||
|
||||
|
||||
template<int I, int T>
|
||||
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
|
||||
return Define(instr, new LUnallocated(LUnallocated::NONE));
|
||||
}
|
||||
|
||||
|
||||
template<int I, int T>
|
||||
LInstruction* LChunkBuilder::DefineAsRegister(
|
||||
LTemplateInstruction<1, I, T>* instr) {
|
||||
@ -802,21 +791,22 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
|
||||
|
||||
LUnallocated* LChunkBuilder::TempRegister() {
|
||||
LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
|
||||
allocator_->RecordTemporary(operand);
|
||||
operand->set_virtual_register(allocator_->GetVirtualRegister());
|
||||
if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(Register reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
allocator_->RecordTemporary(operand);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
allocator_->RecordTemporary(operand);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
@ -1631,11 +1621,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
return AssignEnvironment(DefineAsRegister(res));
|
||||
} else {
|
||||
ASSERT(to.IsInteger32());
|
||||
LOperand* value = UseRegister(instr->value());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
bool needs_check = !instr->value()->type().IsSmi();
|
||||
LInstruction* res = NULL;
|
||||
if (!needs_check) {
|
||||
res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
|
||||
res = DefineAsRegister(new LSmiUntag(value, needs_check));
|
||||
} else {
|
||||
LOperand* temp1 = TempRegister();
|
||||
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
|
||||
@ -1671,12 +1661,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
} else if (from.IsInteger32()) {
|
||||
if (to.IsTagged()) {
|
||||
HValue* val = instr->value();
|
||||
LOperand* value = UseRegister(val);
|
||||
LOperand* value = UseRegisterAtStart(val);
|
||||
if (val->HasRange() && val->range()->IsInSmiRange()) {
|
||||
return DefineSameAsFirst(new LSmiTag(value));
|
||||
return DefineAsRegister(new LSmiTag(value));
|
||||
} else {
|
||||
LNumberTagI* result = new LNumberTagI(value);
|
||||
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
|
||||
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
|
||||
}
|
||||
} else {
|
||||
ASSERT(to.IsDouble());
|
||||
|
3
deps/v8/src/arm/lithium-arm.h
vendored
3
deps/v8/src/arm/lithium-arm.h
vendored
@ -2161,7 +2161,6 @@ class LChunkBuilder BASE_EMBEDDED {
|
||||
void Abort(const char* format, ...);
|
||||
|
||||
// Methods for getting operands for Use / Define / Temp.
|
||||
LRegister* ToOperand(Register reg);
|
||||
LUnallocated* ToUnallocated(Register reg);
|
||||
LUnallocated* ToUnallocated(DoubleRegister reg);
|
||||
|
||||
@ -2211,8 +2210,6 @@ class LChunkBuilder BASE_EMBEDDED {
|
||||
template<int I, int T>
|
||||
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
|
||||
LUnallocated* result);
|
||||
template<int I, int T>
|
||||
LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
|
||||
template<int I, int T>
|
||||
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
|
||||
template<int I, int T>
|
||||
|
54
deps/v8/src/arm/lithium-codegen-arm.cc
vendored
54
deps/v8/src/arm/lithium-codegen-arm.cc
vendored
@ -3376,9 +3376,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
|
||||
ASSERT(ToRegister(instr->InputAt(0)).is(r1));
|
||||
ASSERT(ToRegister(instr->result()).is(r0));
|
||||
|
||||
Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
|
||||
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
|
||||
__ mov(r0, Operand(instr->arity()));
|
||||
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
|
||||
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
|
||||
}
|
||||
|
||||
|
||||
@ -3796,12 +3796,11 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
||||
LNumberTagI* instr_;
|
||||
};
|
||||
|
||||
LOperand* input = instr->InputAt(0);
|
||||
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
||||
Register reg = ToRegister(input);
|
||||
Register src = ToRegister(instr->InputAt(0));
|
||||
Register dst = ToRegister(instr->result());
|
||||
|
||||
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
|
||||
__ SmiTag(reg, SetCC);
|
||||
__ SmiTag(dst, src, SetCC);
|
||||
__ b(vs, deferred->entry());
|
||||
__ bind(deferred->exit());
|
||||
}
|
||||
@ -3809,7 +3808,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
||||
|
||||
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
||||
Label slow;
|
||||
Register reg = ToRegister(instr->InputAt(0));
|
||||
Register src = ToRegister(instr->InputAt(0));
|
||||
Register dst = ToRegister(instr->result());
|
||||
DoubleRegister dbl_scratch = double_scratch0();
|
||||
SwVfpRegister flt_scratch = dbl_scratch.low();
|
||||
|
||||
@ -3820,14 +3820,16 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
||||
// disagree. Try to allocate a heap number in new space and store
|
||||
// the value in there. If that fails, call the runtime system.
|
||||
Label done;
|
||||
__ SmiUntag(reg);
|
||||
__ eor(reg, reg, Operand(0x80000000));
|
||||
__ vmov(flt_scratch, reg);
|
||||
if (dst.is(src)) {
|
||||
__ SmiUntag(src, dst);
|
||||
__ eor(src, src, Operand(0x80000000));
|
||||
}
|
||||
__ vmov(flt_scratch, src);
|
||||
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
|
||||
if (FLAG_inline_new) {
|
||||
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
|
||||
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
|
||||
if (!reg.is(r5)) __ mov(reg, r5);
|
||||
__ Move(dst, r5);
|
||||
__ b(&done);
|
||||
}
|
||||
|
||||
@ -3838,16 +3840,16 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
||||
// register is stored, as this register is in the pointer map, but contains an
|
||||
// integer value.
|
||||
__ mov(ip, Operand(0));
|
||||
__ StoreToSafepointRegisterSlot(ip, reg);
|
||||
__ StoreToSafepointRegisterSlot(ip, dst);
|
||||
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
|
||||
if (!reg.is(r0)) __ mov(reg, r0);
|
||||
__ Move(dst, r0);
|
||||
|
||||
// Done. Put the value in dbl_scratch into the value of the allocated heap
|
||||
// number.
|
||||
__ bind(&done);
|
||||
__ sub(ip, reg, Operand(kHeapObjectTag));
|
||||
__ sub(ip, dst, Operand(kHeapObjectTag));
|
||||
__ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
|
||||
__ StoreToSafepointRegisterSlot(reg, reg);
|
||||
__ StoreToSafepointRegisterSlot(dst, dst);
|
||||
}
|
||||
|
||||
|
||||
@ -3895,23 +3897,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
||||
LOperand* input = instr->InputAt(0);
|
||||
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
||||
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
|
||||
__ SmiTag(ToRegister(input));
|
||||
__ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
||||
LOperand* input = instr->InputAt(0);
|
||||
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
||||
Register input = ToRegister(instr->InputAt(0));
|
||||
Register result = ToRegister(instr->result());
|
||||
if (instr->needs_check()) {
|
||||
STATIC_ASSERT(kHeapObjectTag == 1);
|
||||
// If the input is a HeapObject, SmiUntag will set the carry flag.
|
||||
__ SmiUntag(ToRegister(input), SetCC);
|
||||
__ SmiUntag(result, input, SetCC);
|
||||
DeoptimizeIf(cs, instr->environment());
|
||||
} else {
|
||||
__ SmiUntag(ToRegister(input));
|
||||
__ SmiUntag(result, input);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3928,7 +3928,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
Label load_smi, heap_number, done;
|
||||
|
||||
// Smi check.
|
||||
__ JumpIfSmi(input_reg, &load_smi);
|
||||
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
|
||||
|
||||
// Heap number map check.
|
||||
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
||||
@ -3967,10 +3967,9 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
|
||||
// Smi to double register conversion
|
||||
__ bind(&load_smi);
|
||||
__ SmiUntag(input_reg); // Untag smi before converting to float.
|
||||
__ vmov(flt_scratch, input_reg);
|
||||
// scratch: untagged value of input_reg
|
||||
__ vmov(flt_scratch, scratch);
|
||||
__ vcvt_f64_s32(result_reg, flt_scratch);
|
||||
__ SmiTag(input_reg); // Retag smi.
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
@ -4256,7 +4255,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
||||
Label is_smi, done, heap_number;
|
||||
|
||||
// Both smi and heap number cases are handled.
|
||||
__ JumpIfSmi(input_reg, &is_smi);
|
||||
__ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
|
||||
|
||||
// Check for heap number
|
||||
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
||||
@ -4279,7 +4278,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
||||
|
||||
// smi
|
||||
__ bind(&is_smi);
|
||||
__ SmiUntag(result_reg, input_reg);
|
||||
__ ClampUint8(result_reg, result_reg);
|
||||
|
||||
__ bind(&done);
|
||||
|
57
deps/v8/src/arm/macro-assembler-arm.cc
vendored
57
deps/v8/src/arm/macro-assembler-arm.cc
vendored
@ -2879,6 +2879,47 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match) {
|
||||
// Load the global or builtins object from the current context.
|
||||
ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
int expected_index =
|
||||
Context::GetContextMapIndexFromElementsKind(expected_kind);
|
||||
ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
|
||||
cmp(map_in_out, ip);
|
||||
b(ne, no_map_match);
|
||||
|
||||
// Use the transitioned cached map.
|
||||
int trans_index =
|
||||
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
|
||||
ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadInitialArrayMap(
|
||||
Register function_in, Register scratch, Register map_out) {
|
||||
ASSERT(!function_in.is(map_out));
|
||||
Label done;
|
||||
ldr(map_out, FieldMemOperand(function_in,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
if (!FLAG_smi_only_arrays) {
|
||||
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
|
||||
FAST_ELEMENTS,
|
||||
map_out,
|
||||
scratch,
|
||||
&done);
|
||||
}
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
|
||||
// Load the global or builtins object from the current context.
|
||||
ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
@ -2939,6 +2980,22 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfSmi(
|
||||
Register dst, Register src, Label* smi_case) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
|
||||
b(cc, smi_case); // Shifter carry is not set for a smi.
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfNotSmi(
|
||||
Register dst, Register src, Label* non_smi_case) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
|
||||
b(cs, non_smi_case); // Shifter carry is set for a non-smi.
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfEitherSmi(Register reg1,
|
||||
Register reg2,
|
||||
Label* on_either_smi) {
|
||||
|
24
deps/v8/src/arm/macro-assembler-arm.h
vendored
24
deps/v8/src/arm/macro-assembler-arm.h
vendored
@ -491,6 +491,22 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
void LoadContext(Register dst, int context_chain_length);
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the global context if the map in register
|
||||
// map_in_out is the cached Array map in the global context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
// Load the initial map for new Arrays from a JSFunction.
|
||||
void LoadInitialArrayMap(Register function_in,
|
||||
Register scratch,
|
||||
Register map_out);
|
||||
|
||||
void LoadGlobalFunction(int index, Register function);
|
||||
|
||||
// Load the initial map from the global function. The registers
|
||||
@ -1144,6 +1160,14 @@ class MacroAssembler: public Assembler {
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize), s);
|
||||
}
|
||||
|
||||
// Untag the source value into destination and jump if source is a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
|
||||
|
||||
// Untag the source value into destination and jump if source is not a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
||||
|
||||
// Jump the register contains a smi.
|
||||
inline void JumpIfSmi(Register value, Label* smi_label) {
|
||||
tst(value, Operand(kSmiTagMask));
|
||||
|
@ -571,7 +571,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
|
||||
ExternalReference map = ExternalReference::re_word_character_map();
|
||||
__ mov(r0, Operand(map));
|
||||
__ ldrb(r0, MemOperand(r0, current_character()));
|
||||
__ tst(r0, Operand(r0));
|
||||
__ cmp(r0, Operand(0));
|
||||
BranchOrBacktrack(eq, on_no_match);
|
||||
return true;
|
||||
}
|
||||
@ -585,7 +585,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
|
||||
ExternalReference map = ExternalReference::re_word_character_map();
|
||||
__ mov(r0, Operand(map));
|
||||
__ ldrb(r0, MemOperand(r0, current_character()));
|
||||
__ tst(r0, Operand(r0));
|
||||
__ cmp(r0, Operand(0));
|
||||
BranchOrBacktrack(ne, on_no_match);
|
||||
if (mode_ != ASCII) {
|
||||
__ bind(&done);
|
||||
@ -681,7 +681,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
|
||||
|
||||
// Determine whether the start index is zero, that is at the start of the
|
||||
// string, and store that value in a local variable.
|
||||
__ tst(r1, Operand(r1));
|
||||
__ cmp(r1, Operand(0));
|
||||
__ mov(r1, Operand(1), LeaveCC, eq);
|
||||
__ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
|
||||
__ str(r1, MemOperand(frame_pointer(), kAtStart));
|
||||
|
62
deps/v8/src/arm/stub-cache-arm.cc
vendored
62
deps/v8/src/arm/stub-cache-arm.cc
vendored
@ -45,6 +45,7 @@ static void ProbeTable(Isolate* isolate,
|
||||
StubCache::Table table,
|
||||
Register name,
|
||||
Register offset,
|
||||
int offset_shift_bits,
|
||||
Register scratch,
|
||||
Register scratch2) {
|
||||
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
|
||||
@ -63,23 +64,34 @@ static void ProbeTable(Isolate* isolate,
|
||||
|
||||
// Check that the key in the entry matches the name.
|
||||
__ mov(offsets_base_addr, Operand(key_offset));
|
||||
__ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1));
|
||||
__ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
|
||||
__ cmp(name, ip);
|
||||
__ b(ne, &miss);
|
||||
|
||||
// Get the code entry from the cache.
|
||||
__ add(offsets_base_addr, offsets_base_addr,
|
||||
Operand(value_off_addr - key_off_addr));
|
||||
__ ldr(scratch2, MemOperand(offsets_base_addr, offset, LSL, 1));
|
||||
__ ldr(scratch2,
|
||||
MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
|
||||
|
||||
// Check that the flags match what we're looking for.
|
||||
__ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
|
||||
__ bic(scratch2, scratch2, Operand(Code::kFlagsNotUsedInLookup));
|
||||
__ cmp(scratch2, Operand(flags));
|
||||
// It's a nice optimization if this constant is encodable in the bic insn.
|
||||
|
||||
uint32_t mask = Code::kFlagsNotUsedInLookup;
|
||||
ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
|
||||
__ bic(scratch2, scratch2, Operand(mask));
|
||||
// Using cmn and the negative instead of cmp means we can use movw.
|
||||
if (flags < 0) {
|
||||
__ cmn(scratch2, Operand(-flags));
|
||||
} else {
|
||||
__ cmp(scratch2, Operand(flags));
|
||||
}
|
||||
__ b(ne, &miss);
|
||||
|
||||
// Re-load code entry from cache.
|
||||
__ ldr(offset, MemOperand(offsets_base_addr, offset, LSL, 1));
|
||||
__ ldr(offset,
|
||||
MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
|
||||
|
||||
// Jump to the first instruction in the code stub.
|
||||
__ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
@ -189,23 +201,41 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
|
||||
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
|
||||
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||
__ add(scratch, scratch, Operand(ip));
|
||||
__ eor(scratch, scratch, Operand(flags));
|
||||
__ and_(scratch,
|
||||
scratch,
|
||||
Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
|
||||
uint32_t mask = (kPrimaryTableSize - 1) << kHeapObjectTagSize;
|
||||
// Mask down the eor argument to the minimum to keep the immediate
|
||||
// ARM-encodable.
|
||||
__ eor(scratch, scratch, Operand(flags & mask));
|
||||
// Prefer and_ to ubfx here because ubfx takes 2 cycles.
|
||||
__ and_(scratch, scratch, Operand(mask));
|
||||
__ mov(scratch, Operand(scratch, LSR, 1));
|
||||
|
||||
// Probe the primary table.
|
||||
ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
|
||||
ProbeTable(isolate,
|
||||
masm,
|
||||
flags,
|
||||
kPrimary,
|
||||
name,
|
||||
scratch,
|
||||
1,
|
||||
extra,
|
||||
extra2);
|
||||
|
||||
// Primary miss: Compute hash for secondary probe.
|
||||
__ sub(scratch, scratch, Operand(name));
|
||||
__ add(scratch, scratch, Operand(flags));
|
||||
__ and_(scratch,
|
||||
scratch,
|
||||
Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
|
||||
__ sub(scratch, scratch, Operand(name, LSR, 1));
|
||||
uint32_t mask2 = (kSecondaryTableSize - 1) << (kHeapObjectTagSize - 1);
|
||||
__ add(scratch, scratch, Operand((flags >> 1) & mask2));
|
||||
__ and_(scratch, scratch, Operand(mask2));
|
||||
|
||||
// Probe the secondary table.
|
||||
ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
|
||||
ProbeTable(isolate,
|
||||
masm,
|
||||
flags,
|
||||
kSecondary,
|
||||
name,
|
||||
scratch,
|
||||
1,
|
||||
extra,
|
||||
extra2);
|
||||
|
||||
// Cache miss: Fall-through and let caller handle the miss by
|
||||
// entering the runtime system.
|
||||
|
35
deps/v8/src/ast.cc
vendored
35
deps/v8/src/ast.cc
vendored
@ -730,33 +730,32 @@ void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
|
||||
|
||||
|
||||
bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
|
||||
// If there is an interceptor, we can't compute the target for
|
||||
// a direct call.
|
||||
// If there is an interceptor, we can't compute the target for a direct call.
|
||||
if (type->has_named_interceptor()) return false;
|
||||
|
||||
if (check_type_ == RECEIVER_MAP_CHECK) {
|
||||
// For primitive checks the holder is set up to point to the
|
||||
// corresponding prototype object, i.e. one step of the algorithm
|
||||
// below has been already performed.
|
||||
// For non-primitive checks we clear it to allow computing targets
|
||||
// for polymorphic calls.
|
||||
// For primitive checks the holder is set up to point to the corresponding
|
||||
// prototype object, i.e. one step of the algorithm below has been already
|
||||
// performed. For non-primitive checks we clear it to allow computing
|
||||
// targets for polymorphic calls.
|
||||
holder_ = Handle<JSObject>::null();
|
||||
}
|
||||
LookupResult lookup(type->GetIsolate());
|
||||
while (true) {
|
||||
LookupResult lookup(type->GetIsolate());
|
||||
type->LookupInDescriptors(NULL, *name, &lookup);
|
||||
// If the function wasn't found directly in the map, we start
|
||||
// looking upwards through the prototype chain.
|
||||
if ((!lookup.IsFound() || IsTransitionType(lookup.type()))
|
||||
&& type->prototype()->IsJSObject()) {
|
||||
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
|
||||
type = Handle<Map>(holder()->map());
|
||||
} else if (lookup.IsFound() && lookup.type() == CONSTANT_FUNCTION) {
|
||||
target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
|
||||
return true;
|
||||
} else {
|
||||
// For properties we know the target iff we have a constant function.
|
||||
if (lookup.IsFound() && lookup.IsProperty()) {
|
||||
if (lookup.type() == CONSTANT_FUNCTION) {
|
||||
target_ = Handle<JSFunction>(lookup.GetConstantFunctionFromMap(*type));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
// If we reach the end of the prototype chain, we don't know the target.
|
||||
if (!type->prototype()->IsJSObject()) return false;
|
||||
// Go up the prototype chain, recording where we are currently.
|
||||
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
|
||||
type = Handle<Map>(holder()->map());
|
||||
}
|
||||
}
|
||||
|
||||
|
46
deps/v8/src/bootstrapper.cc
vendored
46
deps/v8/src/bootstrapper.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -76,22 +76,15 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
|
||||
Factory* factory = isolate->factory();
|
||||
Heap* heap = isolate->heap();
|
||||
if (heap->natives_source_cache()->get(index)->IsUndefined()) {
|
||||
if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
|
||||
// We can use external strings for the natives.
|
||||
Vector<const char> source = Natives::GetRawScriptSource(index);
|
||||
NativesExternalStringResource* resource =
|
||||
new NativesExternalStringResource(this,
|
||||
source.start(),
|
||||
source.length());
|
||||
Handle<String> source_code =
|
||||
factory->NewExternalStringFromAscii(resource);
|
||||
heap->natives_source_cache()->set(index, *source_code);
|
||||
} else {
|
||||
// Old snapshot code can't cope with external strings at all.
|
||||
Handle<String> source_code =
|
||||
factory->NewStringFromAscii(Natives::GetRawScriptSource(index));
|
||||
heap->natives_source_cache()->set(index, *source_code);
|
||||
}
|
||||
// We can use external strings for the natives.
|
||||
Vector<const char> source = Natives::GetRawScriptSource(index);
|
||||
NativesExternalStringResource* resource =
|
||||
new NativesExternalStringResource(this,
|
||||
source.start(),
|
||||
source.length());
|
||||
Handle<String> source_code =
|
||||
factory->NewExternalStringFromAscii(resource);
|
||||
heap->natives_source_cache()->set(index, *source_code);
|
||||
}
|
||||
Handle<Object> cached_source(heap->natives_source_cache()->get(index));
|
||||
return Handle<String>::cast(cached_source);
|
||||
@ -894,15 +887,12 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
|
||||
factory->NewForeign(&Accessors::ArrayLength),
|
||||
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
|
||||
|
||||
// Cache the fast JavaScript array map
|
||||
global_context()->set_js_array_map(array_function->initial_map());
|
||||
global_context()->js_array_map()->set_instance_descriptors(
|
||||
*array_descriptors);
|
||||
// array_function is used internally. JS code creating array object should
|
||||
// search for the 'Array' property on the global object and use that one
|
||||
// as the constructor. 'Array' property on a global object can be
|
||||
// overwritten by JS code.
|
||||
global_context()->set_array_function(*array_function);
|
||||
array_function->initial_map()->set_instance_descriptors(*array_descriptors);
|
||||
}
|
||||
|
||||
{ // --- N u m b e r ---
|
||||
@ -1646,7 +1636,7 @@ bool Genesis::InstallNatives() {
|
||||
MaybeObject* maybe_map =
|
||||
array_function->initial_map()->CopyDropTransitions();
|
||||
Map* new_map;
|
||||
if (!maybe_map->To<Map>(&new_map)) return maybe_map;
|
||||
if (!maybe_map->To<Map>(&new_map)) return false;
|
||||
new_map->set_elements_kind(FAST_ELEMENTS);
|
||||
array_function->set_initial_map(new_map);
|
||||
|
||||
@ -1745,17 +1735,15 @@ bool Genesis::InstallNatives() {
|
||||
initial_map->set_prototype(*array_prototype);
|
||||
|
||||
// Update map with length accessor from Array and add "index" and "input".
|
||||
Handle<Map> array_map(global_context()->js_array_map());
|
||||
Handle<DescriptorArray> array_descriptors(
|
||||
array_map->instance_descriptors());
|
||||
ASSERT_EQ(1, array_descriptors->number_of_descriptors());
|
||||
|
||||
Handle<DescriptorArray> reresult_descriptors =
|
||||
factory()->NewDescriptorArray(3);
|
||||
|
||||
DescriptorArray::WhitenessWitness witness(*reresult_descriptors);
|
||||
|
||||
reresult_descriptors->CopyFrom(0, *array_descriptors, 0, witness);
|
||||
JSFunction* array_function = global_context()->array_function();
|
||||
Handle<DescriptorArray> array_descriptors(
|
||||
array_function->initial_map()->instance_descriptors());
|
||||
int index = array_descriptors->SearchWithCache(heap()->length_symbol());
|
||||
reresult_descriptors->CopyFrom(0, *array_descriptors, index, witness);
|
||||
|
||||
int enum_index = 0;
|
||||
{
|
||||
|
226
deps/v8/src/builtins.cc
vendored
226
deps/v8/src/builtins.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -193,13 +193,22 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
|
||||
JSArray* array;
|
||||
if (CalledAsConstructor(isolate)) {
|
||||
array = JSArray::cast((*args)[0]);
|
||||
// Initialize elements and length in case later allocations fail so that the
|
||||
// array object is initialized in a valid state.
|
||||
array->set_length(Smi::FromInt(0));
|
||||
array->set_elements(heap->empty_fixed_array());
|
||||
if (!FLAG_smi_only_arrays) {
|
||||
Context* global_context = isolate->context()->global_context();
|
||||
if (array->GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
|
||||
!global_context->object_js_array_map()->IsUndefined()) {
|
||||
array->set_map(Map::cast(global_context->object_js_array_map()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Allocate the JS Array
|
||||
Object* obj;
|
||||
{ MaybeObject* maybe_obj = heap->AllocateJSObject(constructor);
|
||||
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
|
||||
}
|
||||
array = JSArray::cast(obj);
|
||||
MaybeObject* maybe_obj =
|
||||
heap->AllocateEmptyJSArray(FAST_SMI_ONLY_ELEMENTS);
|
||||
if (!maybe_obj->To(&array)) return maybe_obj;
|
||||
}
|
||||
|
||||
// Optimize the case where there is one argument and the argument is a
|
||||
@ -301,29 +310,6 @@ BUILTIN(ArrayCodeGeneric) {
|
||||
}
|
||||
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* AllocateJSArray(Heap* heap) {
|
||||
JSFunction* array_function =
|
||||
heap->isolate()->context()->global_context()->array_function();
|
||||
Object* result;
|
||||
{ MaybeObject* maybe_result = heap->AllocateJSObject(array_function);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* AllocateEmptyJSArray(Heap* heap) {
|
||||
Object* result;
|
||||
{ MaybeObject* maybe_result = AllocateJSArray(heap);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
JSArray* result_array = JSArray::cast(result);
|
||||
result_array->set_length(Smi::FromInt(0));
|
||||
result_array->set_elements(heap->empty_fixed_array());
|
||||
return result_array;
|
||||
}
|
||||
|
||||
|
||||
static void CopyElements(Heap* heap,
|
||||
AssertNoAllocation* no_gc,
|
||||
FixedArray* dst,
|
||||
@ -331,6 +317,7 @@ static void CopyElements(Heap* heap,
|
||||
FixedArray* src,
|
||||
int src_index,
|
||||
int len) {
|
||||
if (len == 0) return;
|
||||
ASSERT(dst != src); // Use MoveElements instead.
|
||||
ASSERT(dst->map() != HEAP->fixed_cow_array_map());
|
||||
ASSERT(len > 0);
|
||||
@ -352,6 +339,7 @@ static void MoveElements(Heap* heap,
|
||||
FixedArray* src,
|
||||
int src_index,
|
||||
int len) {
|
||||
if (len == 0) return;
|
||||
ASSERT(dst->map() != HEAP->fixed_cow_array_map());
|
||||
memmove(dst->data_start() + dst_index,
|
||||
src->data_start() + src_index,
|
||||
@ -543,9 +531,7 @@ BUILTIN(ArrayPush) {
|
||||
FixedArray* new_elms = FixedArray::cast(obj);
|
||||
|
||||
AssertNoAllocation no_gc;
|
||||
if (len > 0) {
|
||||
CopyElements(heap, &no_gc, new_elms, 0, elms, 0, len);
|
||||
}
|
||||
CopyElements(heap, &no_gc, new_elms, 0, elms, 0, len);
|
||||
FillWithHoles(heap, new_elms, new_length, capacity);
|
||||
|
||||
elms = new_elms;
|
||||
@ -681,9 +667,7 @@ BUILTIN(ArrayUnshift) {
|
||||
}
|
||||
FixedArray* new_elms = FixedArray::cast(obj);
|
||||
AssertNoAllocation no_gc;
|
||||
if (len > 0) {
|
||||
CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
|
||||
}
|
||||
CopyElements(heap, &no_gc, new_elms, to_add, elms, 0, len);
|
||||
FillWithHoles(heap, new_elms, new_length, capacity);
|
||||
elms = new_elms;
|
||||
array->set_elements(elms);
|
||||
@ -781,45 +765,22 @@ BUILTIN(ArraySlice) {
|
||||
int final = (relative_end < 0) ? Max(len + relative_end, 0)
|
||||
: Min(relative_end, len);
|
||||
|
||||
ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
|
||||
|
||||
// Calculate the length of result array.
|
||||
int result_len = final - k;
|
||||
if (result_len <= 0) {
|
||||
return AllocateEmptyJSArray(heap);
|
||||
}
|
||||
int result_len = Max(final - k, 0);
|
||||
|
||||
Object* result;
|
||||
{ MaybeObject* maybe_result = AllocateJSArray(heap);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
JSArray* result_array = JSArray::cast(result);
|
||||
|
||||
{ MaybeObject* maybe_result =
|
||||
heap->AllocateUninitializedFixedArray(result_len);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
FixedArray* result_elms = FixedArray::cast(result);
|
||||
|
||||
MaybeObject* maybe_object =
|
||||
result_array->EnsureCanContainElements(result_elms,
|
||||
DONT_ALLOW_DOUBLE_ELEMENTS);
|
||||
if (maybe_object->IsFailure()) return maybe_object;
|
||||
MaybeObject* maybe_array =
|
||||
heap->AllocateJSArrayAndStorage(elements_kind,
|
||||
result_len,
|
||||
result_len);
|
||||
JSArray* result_array;
|
||||
if (!maybe_array->To(&result_array)) return maybe_array;
|
||||
|
||||
AssertNoAllocation no_gc;
|
||||
CopyElements(heap, &no_gc, result_elms, 0, elms, k, result_len);
|
||||
CopyElements(heap, &no_gc, FixedArray::cast(result_array->elements()), 0,
|
||||
elms, k, result_len);
|
||||
|
||||
// Set elements.
|
||||
result_array->set_elements(result_elms);
|
||||
|
||||
// Set the length.
|
||||
result_array->set_length(Smi::FromInt(result_len));
|
||||
|
||||
// Set the ElementsKind.
|
||||
ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
|
||||
if (IsMoreGeneralElementsKindTransition(result_array->GetElementsKind(),
|
||||
elements_kind)) {
|
||||
MaybeObject* maybe = result_array->TransitionElementsKind(elements_kind);
|
||||
if (maybe->IsFailure()) return maybe;
|
||||
}
|
||||
return result_array;
|
||||
}
|
||||
|
||||
@ -880,47 +841,22 @@ BUILTIN(ArraySplice) {
|
||||
}
|
||||
|
||||
JSArray* result_array = NULL;
|
||||
if (actual_delete_count == 0) {
|
||||
Object* result;
|
||||
{ MaybeObject* maybe_result = AllocateEmptyJSArray(heap);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
result_array = JSArray::cast(result);
|
||||
} else {
|
||||
// Allocate result array.
|
||||
Object* result;
|
||||
{ MaybeObject* maybe_result = AllocateJSArray(heap);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
result_array = JSArray::cast(result);
|
||||
|
||||
{ MaybeObject* maybe_result =
|
||||
heap->AllocateUninitializedFixedArray(actual_delete_count);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
FixedArray* result_elms = FixedArray::cast(result);
|
||||
ElementsKind elements_kind =
|
||||
JSObject::cast(receiver)->GetElementsKind();
|
||||
MaybeObject* maybe_array =
|
||||
heap->AllocateJSArrayAndStorage(elements_kind,
|
||||
actual_delete_count,
|
||||
actual_delete_count);
|
||||
if (!maybe_array->To(&result_array)) return maybe_array;
|
||||
|
||||
{
|
||||
AssertNoAllocation no_gc;
|
||||
// Fill newly created array.
|
||||
CopyElements(heap,
|
||||
&no_gc,
|
||||
result_elms, 0,
|
||||
FixedArray::cast(result_array->elements()), 0,
|
||||
elms, actual_start,
|
||||
actual_delete_count);
|
||||
|
||||
// Set elements.
|
||||
result_array->set_elements(result_elms);
|
||||
|
||||
// Set the length.
|
||||
result_array->set_length(Smi::FromInt(actual_delete_count));
|
||||
|
||||
// Set the ElementsKind.
|
||||
ElementsKind elements_kind = array->GetElementsKind();
|
||||
if (IsMoreGeneralElementsKindTransition(result_array->GetElementsKind(),
|
||||
elements_kind)) {
|
||||
MaybeObject* maybe = result_array->TransitionElementsKind(elements_kind);
|
||||
if (maybe->IsFailure()) return maybe;
|
||||
}
|
||||
}
|
||||
|
||||
int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
|
||||
@ -935,7 +871,7 @@ BUILTIN(ArraySplice) {
|
||||
if (trim_array) {
|
||||
const int delta = actual_delete_count - item_count;
|
||||
|
||||
if (actual_start > 0) {
|
||||
{
|
||||
AssertNoAllocation no_gc;
|
||||
MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
|
||||
}
|
||||
@ -967,18 +903,17 @@ BUILTIN(ArraySplice) {
|
||||
}
|
||||
FixedArray* new_elms = FixedArray::cast(obj);
|
||||
|
||||
AssertNoAllocation no_gc;
|
||||
// Copy the part before actual_start as is.
|
||||
if (actual_start > 0) {
|
||||
{
|
||||
AssertNoAllocation no_gc;
|
||||
// Copy the part before actual_start as is.
|
||||
CopyElements(heap, &no_gc, new_elms, 0, elms, 0, actual_start);
|
||||
}
|
||||
const int to_copy = len - actual_delete_count - actual_start;
|
||||
if (to_copy > 0) {
|
||||
const int to_copy = len - actual_delete_count - actual_start;
|
||||
CopyElements(heap, &no_gc,
|
||||
new_elms, actual_start + item_count,
|
||||
elms, actual_start + actual_delete_count,
|
||||
to_copy);
|
||||
}
|
||||
|
||||
FillWithHoles(heap, new_elms, new_length, capacity);
|
||||
|
||||
elms = new_elms;
|
||||
@ -1022,6 +957,7 @@ BUILTIN(ArrayConcat) {
|
||||
// and calculating total length.
|
||||
int n_arguments = args.length();
|
||||
int result_len = 0;
|
||||
ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
|
||||
for (int i = 0; i < n_arguments; i++) {
|
||||
Object* arg = args[i];
|
||||
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastTypeElements()
|
||||
@ -1041,54 +977,34 @@ BUILTIN(ArrayConcat) {
|
||||
if (result_len > FixedArray::kMaxLength) {
|
||||
return CallJsBuiltin(isolate, "ArrayConcat", args);
|
||||
}
|
||||
}
|
||||
|
||||
if (result_len == 0) {
|
||||
return AllocateEmptyJSArray(heap);
|
||||
if (!JSArray::cast(arg)->HasFastElements()) {
|
||||
elements_kind = FAST_ELEMENTS;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate result.
|
||||
Object* result;
|
||||
{ MaybeObject* maybe_result = AllocateJSArray(heap);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
JSArray* result_array = JSArray::cast(result);
|
||||
|
||||
{ MaybeObject* maybe_result =
|
||||
heap->AllocateUninitializedFixedArray(result_len);
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
FixedArray* result_elms = FixedArray::cast(result);
|
||||
|
||||
// Ensure element type transitions happen before copying elements in.
|
||||
if (result_array->HasFastSmiOnlyElements()) {
|
||||
for (int i = 0; i < n_arguments; i++) {
|
||||
JSArray* array = JSArray::cast(args[i]);
|
||||
if (!array->HasFastSmiOnlyElements()) {
|
||||
result_array->EnsureCanContainHeapObjectElements();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
JSArray* result_array;
|
||||
MaybeObject* maybe_array =
|
||||
heap->AllocateJSArrayAndStorage(elements_kind,
|
||||
result_len,
|
||||
result_len);
|
||||
if (!maybe_array->To(&result_array)) return maybe_array;
|
||||
if (result_len == 0) return result_array;
|
||||
|
||||
// Copy data.
|
||||
AssertNoAllocation no_gc;
|
||||
int start_pos = 0;
|
||||
FixedArray* result_elms(FixedArray::cast(result_array->elements()));
|
||||
for (int i = 0; i < n_arguments; i++) {
|
||||
JSArray* array = JSArray::cast(args[i]);
|
||||
int len = Smi::cast(array->length())->value();
|
||||
if (len > 0) {
|
||||
FixedArray* elms = FixedArray::cast(array->elements());
|
||||
CopyElements(heap, &no_gc, result_elms, start_pos, elms, 0, len);
|
||||
start_pos += len;
|
||||
}
|
||||
FixedArray* elms = FixedArray::cast(array->elements());
|
||||
CopyElements(heap, &no_gc, result_elms, start_pos, elms, 0, len);
|
||||
start_pos += len;
|
||||
}
|
||||
ASSERT(start_pos == result_len);
|
||||
|
||||
// Set the length and elements.
|
||||
result_array->set_length(Smi::FromInt(result_len));
|
||||
result_array->set_elements(result_elms);
|
||||
|
||||
return result_array;
|
||||
}
|
||||
|
||||
@ -1592,11 +1508,6 @@ static void Generate_KeyedStoreIC_DebugBreak(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
static void Generate_ConstructCall_DebugBreak(MacroAssembler* masm) {
|
||||
Debug::GenerateConstructCallDebugBreak(masm);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_Return_DebugBreak(MacroAssembler* masm) {
|
||||
Debug::GenerateReturnDebugBreak(masm);
|
||||
}
|
||||
@ -1607,6 +1518,23 @@ static void Generate_CallFunctionStub_DebugBreak(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
static void Generate_CallFunctionStub_Recording_DebugBreak(
|
||||
MacroAssembler* masm) {
|
||||
Debug::GenerateCallFunctionStubRecordDebugBreak(masm);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_CallConstructStub_DebugBreak(MacroAssembler* masm) {
|
||||
Debug::GenerateCallConstructStubDebugBreak(masm);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_CallConstructStub_Recording_DebugBreak(
|
||||
MacroAssembler* masm) {
|
||||
Debug::GenerateCallConstructStubRecordDebugBreak(masm);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_Slot_DebugBreak(MacroAssembler* masm) {
|
||||
Debug::GenerateSlotDebugBreak(masm);
|
||||
}
|
||||
|
47
deps/v8/src/builtins.h
vendored
47
deps/v8/src/builtins.h
vendored
@ -67,8 +67,6 @@ enum BuiltinExtraArguments {
|
||||
#define BUILTIN_LIST_A(V) \
|
||||
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(JSConstructCall, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
|
||||
@ -196,26 +194,30 @@ enum BuiltinExtraArguments {
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
// Define list of builtins used by the debugger implemented in assembly.
|
||||
#define BUILTIN_LIST_DEBUG_A(V) \
|
||||
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(ConstructCall_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState)
|
||||
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState) \
|
||||
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
|
||||
Code::kNoExtraICState)
|
||||
#else
|
||||
#define BUILTIN_LIST_DEBUG_A(V)
|
||||
#endif
|
||||
@ -346,7 +348,6 @@ class Builtins {
|
||||
static void Generate_Adaptor(MacroAssembler* masm,
|
||||
CFunctionId id,
|
||||
BuiltinExtraArguments extra_args);
|
||||
static void Generate_JSConstructCall(MacroAssembler* masm);
|
||||
static void Generate_JSConstructStubCountdown(MacroAssembler* masm);
|
||||
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
|
||||
static void Generate_JSConstructStubApi(MacroAssembler* masm);
|
||||
|
16
deps/v8/src/checks.h
vendored
16
deps/v8/src/checks.h
vendored
@ -51,20 +51,12 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
|
||||
#endif
|
||||
|
||||
|
||||
// Used by the CHECK macro -- should not be called directly.
|
||||
inline void CheckHelper(const char* file,
|
||||
int line,
|
||||
const char* source,
|
||||
bool condition) {
|
||||
if (!condition)
|
||||
V8_Fatal(file, line, "CHECK(%s) failed", source);
|
||||
}
|
||||
|
||||
|
||||
// The CHECK macro checks that the given condition is true; if not, it
|
||||
// prints a message to stderr and aborts.
|
||||
#define CHECK(condition) do { \
|
||||
if (!(condition)) CheckHelper(__FILE__, __LINE__, #condition, false); \
|
||||
#define CHECK(condition) do { \
|
||||
if (!(condition)) { \
|
||||
V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
6
deps/v8/src/code-stubs.cc
vendored
6
deps/v8/src/code-stubs.cc
vendored
@ -342,6 +342,12 @@ void CallFunctionStub::PrintName(StringStream* stream) {
|
||||
}
|
||||
|
||||
|
||||
void CallConstructStub::PrintName(StringStream* stream) {
|
||||
stream->Add("CallConstructStub");
|
||||
if (RecordCallTarget()) stream->Add("_Recording");
|
||||
}
|
||||
|
||||
|
||||
void ToBooleanStub::PrintName(StringStream* stream) {
|
||||
stream->Add("ToBooleanStub_");
|
||||
types_.Print(stream);
|
||||
|
49
deps/v8/src/code-stubs.h
vendored
49
deps/v8/src/code-stubs.h
vendored
@ -38,6 +38,7 @@ namespace internal {
|
||||
// List of code stubs used on all platforms.
|
||||
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
|
||||
V(CallFunction) \
|
||||
V(CallConstruct) \
|
||||
V(UnaryOp) \
|
||||
V(BinaryOp) \
|
||||
V(StringAdd) \
|
||||
@ -738,32 +739,14 @@ class CallFunctionStub: public CodeStub {
|
||||
|
||||
void Generate(MacroAssembler* masm);
|
||||
|
||||
virtual void FinishCode(Handle<Code> code);
|
||||
|
||||
static void Clear(Heap* heap, Address address);
|
||||
|
||||
static Object* GetCachedValue(Address address);
|
||||
virtual void FinishCode(Handle<Code> code) {
|
||||
code->set_has_function_cache(RecordCallTarget());
|
||||
}
|
||||
|
||||
static int ExtractArgcFromMinorKey(int minor_key) {
|
||||
return ArgcBits::decode(minor_key);
|
||||
}
|
||||
|
||||
// The object that indicates an uninitialized cache.
|
||||
static Handle<Object> UninitializedSentinel(Isolate* isolate) {
|
||||
return isolate->factory()->the_hole_value();
|
||||
}
|
||||
|
||||
// A raw version of the uninitialized sentinel that's safe to read during
|
||||
// garbage collection (e.g., for patching the cache).
|
||||
static Object* RawUninitializedSentinel(Heap* heap) {
|
||||
return heap->raw_unchecked_the_hole_value();
|
||||
}
|
||||
|
||||
// The object that indicates a megamorphic state.
|
||||
static Handle<Object> MegamorphicSentinel(Isolate* isolate) {
|
||||
return isolate->factory()->undefined_value();
|
||||
}
|
||||
|
||||
private:
|
||||
int argc_;
|
||||
CallFunctionFlags flags_;
|
||||
@ -790,6 +773,30 @@ class CallFunctionStub: public CodeStub {
|
||||
};
|
||||
|
||||
|
||||
class CallConstructStub: public CodeStub {
|
||||
public:
|
||||
explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
|
||||
|
||||
void Generate(MacroAssembler* masm);
|
||||
|
||||
virtual void FinishCode(Handle<Code> code) {
|
||||
code->set_has_function_cache(RecordCallTarget());
|
||||
}
|
||||
|
||||
private:
|
||||
CallFunctionFlags flags_;
|
||||
|
||||
virtual void PrintName(StringStream* stream);
|
||||
|
||||
Major MajorKey() { return CallConstruct; }
|
||||
int MinorKey() { return flags_; }
|
||||
|
||||
bool RecordCallTarget() {
|
||||
return (flags_ & RECORD_CALL_TARGET) != 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
enum StringIndexFlags {
|
||||
// Accepts smis or heap numbers.
|
||||
STRING_INDEX_IS_NUMBER,
|
||||
|
4
deps/v8/src/compiler.cc
vendored
4
deps/v8/src/compiler.cc
vendored
@ -194,7 +194,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
|
||||
// Fall back to using the full code generator if it's not possible
|
||||
// to use the Hydrogen-based optimizing compiler. We already have
|
||||
// generated code for this from the shared function object.
|
||||
if (AlwaysFullCompiler() || !FLAG_use_hydrogen) {
|
||||
if (AlwaysFullCompiler()) {
|
||||
info->SetCode(code);
|
||||
return true;
|
||||
}
|
||||
@ -291,7 +291,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (graph != NULL && FLAG_build_lithium) {
|
||||
if (graph != NULL) {
|
||||
Handle<Code> optimized_code = graph->Compile(info);
|
||||
if (!optimized_code.is_null()) {
|
||||
info->SetCode(optimized_code);
|
||||
|
22
deps/v8/src/contexts.h
vendored
22
deps/v8/src/contexts.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -106,6 +106,9 @@ enum BindingFlags {
|
||||
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
|
||||
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
|
||||
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
|
||||
V(SMI_JS_ARRAY_MAP_INDEX, Object, smi_js_array_map) \
|
||||
V(DOUBLE_JS_ARRAY_MAP_INDEX, Object, double_js_array_map) \
|
||||
V(OBJECT_JS_ARRAY_MAP_INDEX, Object, object_js_array_map) \
|
||||
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
|
||||
V(JSON_OBJECT_INDEX, JSObject, json_object) \
|
||||
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
|
||||
@ -129,7 +132,6 @@ enum BindingFlags {
|
||||
V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
|
||||
V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
|
||||
strict_mode_function_instance_map) \
|
||||
V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
|
||||
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
|
||||
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
|
||||
V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
|
||||
@ -231,7 +233,6 @@ class Context: public FixedArray {
|
||||
ARGUMENTS_BOILERPLATE_INDEX,
|
||||
ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
|
||||
STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
|
||||
JS_ARRAY_MAP_INDEX,
|
||||
REGEXP_RESULT_MAP_INDEX,
|
||||
FUNCTION_MAP_INDEX,
|
||||
STRICT_MODE_FUNCTION_MAP_INDEX,
|
||||
@ -247,6 +248,9 @@ class Context: public FixedArray {
|
||||
OBJECT_FUNCTION_INDEX,
|
||||
INTERNAL_ARRAY_FUNCTION_INDEX,
|
||||
ARRAY_FUNCTION_INDEX,
|
||||
SMI_JS_ARRAY_MAP_INDEX,
|
||||
DOUBLE_JS_ARRAY_MAP_INDEX,
|
||||
OBJECT_JS_ARRAY_MAP_INDEX,
|
||||
DATE_FUNCTION_INDEX,
|
||||
JSON_OBJECT_INDEX,
|
||||
REGEXP_FUNCTION_INDEX,
|
||||
@ -365,6 +369,18 @@ class Context: public FixedArray {
|
||||
Object* OptimizedFunctionsListHead();
|
||||
void ClearOptimizedFunctions();
|
||||
|
||||
static int GetContextMapIndexFromElementsKind(
|
||||
ElementsKind elements_kind) {
|
||||
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
|
||||
return Context::DOUBLE_JS_ARRAY_MAP_INDEX;
|
||||
} else if (elements_kind == FAST_ELEMENTS) {
|
||||
return Context::OBJECT_JS_ARRAY_MAP_INDEX;
|
||||
} else {
|
||||
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
|
||||
return Context::SMI_JS_ARRAY_MAP_INDEX;
|
||||
}
|
||||
}
|
||||
|
||||
#define GLOBAL_CONTEXT_FIELD_ACCESSORS(index, type, name) \
|
||||
void set_##name(type* value) { \
|
||||
ASSERT(IsGlobalContext()); \
|
||||
|
8
deps/v8/src/d8.cc
vendored
8
deps/v8/src/d8.cc
vendored
@ -1485,6 +1485,14 @@ int Shell::Main(int argc, char* argv[]) {
|
||||
}
|
||||
printf("======== Full Deoptimization =======\n");
|
||||
Testing::DeoptimizeAll();
|
||||
#if !defined(V8_SHARED)
|
||||
} else if (i::FLAG_stress_runs > 0) {
|
||||
int stress_runs = i::FLAG_stress_runs;
|
||||
for (int i = 0; i < stress_runs && result == 0; i++) {
|
||||
printf("============ Run %d/%d ============\n", i + 1, stress_runs);
|
||||
result = RunMain(argc, argv);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
result = RunMain(argc, argv);
|
||||
}
|
||||
|
46
deps/v8/src/debug.cc
vendored
46
deps/v8/src/debug.cc
vendored
@ -85,12 +85,6 @@ static void PrintLn(v8::Local<v8::Value> value) {
|
||||
}
|
||||
|
||||
|
||||
static Handle<Code> ComputeCallDebugBreak(int argc, Code::Kind kind) {
|
||||
Isolate* isolate = Isolate::Current();
|
||||
return isolate->stub_cache()->ComputeCallDebugBreak(argc, kind);
|
||||
}
|
||||
|
||||
|
||||
static Handle<Code> ComputeCallDebugPrepareStepIn(int argc, Code::Kind kind) {
|
||||
Isolate* isolate = Isolate::Current();
|
||||
return isolate->stub_cache()->ComputeCallDebugPrepareStepIn(argc, kind);
|
||||
@ -1538,40 +1532,47 @@ bool Debug::IsBreakStub(Code* code) {
|
||||
|
||||
// Find the builtin to use for invoking the debug break
|
||||
Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
|
||||
Isolate* isolate = Isolate::Current();
|
||||
|
||||
// Find the builtin debug break function matching the calling convention
|
||||
// used by the call site.
|
||||
if (code->is_inline_cache_stub()) {
|
||||
switch (code->kind()) {
|
||||
case Code::CALL_IC:
|
||||
case Code::KEYED_CALL_IC:
|
||||
return ComputeCallDebugBreak(code->arguments_count(), code->kind());
|
||||
return isolate->stub_cache()->ComputeCallDebugBreak(
|
||||
code->arguments_count(), code->kind());
|
||||
|
||||
case Code::LOAD_IC:
|
||||
return Isolate::Current()->builtins()->LoadIC_DebugBreak();
|
||||
return isolate->builtins()->LoadIC_DebugBreak();
|
||||
|
||||
case Code::STORE_IC:
|
||||
return Isolate::Current()->builtins()->StoreIC_DebugBreak();
|
||||
return isolate->builtins()->StoreIC_DebugBreak();
|
||||
|
||||
case Code::KEYED_LOAD_IC:
|
||||
return Isolate::Current()->builtins()->KeyedLoadIC_DebugBreak();
|
||||
return isolate->builtins()->KeyedLoadIC_DebugBreak();
|
||||
|
||||
case Code::KEYED_STORE_IC:
|
||||
return Isolate::Current()->builtins()->KeyedStoreIC_DebugBreak();
|
||||
return isolate->builtins()->KeyedStoreIC_DebugBreak();
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
if (RelocInfo::IsConstructCall(mode)) {
|
||||
Handle<Code> result =
|
||||
Isolate::Current()->builtins()->ConstructCall_DebugBreak();
|
||||
return result;
|
||||
if (code->has_function_cache()) {
|
||||
return isolate->builtins()->CallConstructStub_Recording_DebugBreak();
|
||||
} else {
|
||||
return isolate->builtins()->CallConstructStub_DebugBreak();
|
||||
}
|
||||
}
|
||||
if (code->kind() == Code::STUB) {
|
||||
ASSERT(code->major_key() == CodeStub::CallFunction);
|
||||
Handle<Code> result =
|
||||
Isolate::Current()->builtins()->CallFunctionStub_DebugBreak();
|
||||
return result;
|
||||
if (code->has_function_cache()) {
|
||||
return isolate->builtins()->CallFunctionStub_Recording_DebugBreak();
|
||||
} else {
|
||||
return isolate->builtins()->CallFunctionStub_DebugBreak();
|
||||
}
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
@ -1903,7 +1904,8 @@ void Debug::PrepareForBreakPoints() {
|
||||
{
|
||||
// We are going to iterate heap to find all functions without
|
||||
// debug break slots.
|
||||
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
|
||||
"preparing for breakpoints");
|
||||
|
||||
// Ensure no GC in this scope as we are going to use gc_metadata
|
||||
// field in the Code object to mark active functions.
|
||||
@ -2229,8 +2231,9 @@ void Debug::CreateScriptCache() {
|
||||
// rid of all the cached script wrappers and the second gets rid of the
|
||||
// scripts which are no longer referenced. The second also sweeps precisely,
|
||||
// which saves us doing yet another GC to make the heap iterable.
|
||||
heap->CollectAllGarbage(Heap::kNoGCFlags);
|
||||
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
heap->CollectAllGarbage(Heap::kNoGCFlags, "Debug::CreateScriptCache");
|
||||
heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
|
||||
"Debug::CreateScriptCache");
|
||||
|
||||
ASSERT(script_cache_ == NULL);
|
||||
script_cache_ = new ScriptCache();
|
||||
@ -2280,7 +2283,8 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
|
||||
|
||||
// Perform GC to get unreferenced scripts evicted from the cache before
|
||||
// returning the content.
|
||||
isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags);
|
||||
isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
|
||||
"Debug::GetLoadedScripts");
|
||||
|
||||
// Get the scripts from the cache.
|
||||
return script_cache_->GetScripts();
|
||||
|
4
deps/v8/src/debug.h
vendored
4
deps/v8/src/debug.h
vendored
@ -402,9 +402,11 @@ class Debug {
|
||||
static void GenerateStoreICDebugBreak(MacroAssembler* masm);
|
||||
static void GenerateKeyedLoadICDebugBreak(MacroAssembler* masm);
|
||||
static void GenerateKeyedStoreICDebugBreak(MacroAssembler* masm);
|
||||
static void GenerateConstructCallDebugBreak(MacroAssembler* masm);
|
||||
static void GenerateReturnDebugBreak(MacroAssembler* masm);
|
||||
static void GenerateCallFunctionStubDebugBreak(MacroAssembler* masm);
|
||||
static void GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm);
|
||||
static void GenerateCallConstructStubDebugBreak(MacroAssembler* masm);
|
||||
static void GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm);
|
||||
static void GenerateSlotDebugBreak(MacroAssembler* masm);
|
||||
static void GeneratePlainReturnLiveEdit(MacroAssembler* masm);
|
||||
|
||||
|
6
deps/v8/src/deoptimizer.cc
vendored
6
deps/v8/src/deoptimizer.cc
vendored
@ -1603,6 +1603,11 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(
|
||||
SetFunction(output_frame->GetFunction());
|
||||
expression_count_ = output_frame->GetExpressionCount();
|
||||
expression_stack_ = new Object*[expression_count_];
|
||||
// Get the source position using the unoptimized code.
|
||||
Address pc = reinterpret_cast<Address>(output_frame->GetPc());
|
||||
Code* code = Code::cast(Isolate::Current()->heap()->FindCodeObject(pc));
|
||||
source_position_ = code->SourcePosition(pc);
|
||||
|
||||
for (int i = 0; i < expression_count_; i++) {
|
||||
SetExpression(i, output_frame->GetExpression(i));
|
||||
}
|
||||
@ -1625,6 +1630,7 @@ DeoptimizedFrameInfo::~DeoptimizedFrameInfo() {
|
||||
delete[] parameters_;
|
||||
}
|
||||
|
||||
|
||||
void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
|
||||
v->VisitPointer(BitCast<Object**>(&function_));
|
||||
v->VisitPointers(parameters_, parameters_ + parameters_count_);
|
||||
|
11
deps/v8/src/deoptimizer.h
vendored
11
deps/v8/src/deoptimizer.h
vendored
@ -267,7 +267,11 @@ class Deoptimizer : public Malloced {
|
||||
int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
|
||||
|
||||
private:
|
||||
static const int kNumberOfEntries = 8192;
|
||||
#ifdef V8_TARGET_ARCH_MIPS
|
||||
static const int kNumberOfEntries = 4096;
|
||||
#else
|
||||
static const int kNumberOfEntries = 16384;
|
||||
#endif
|
||||
|
||||
Deoptimizer(Isolate* isolate,
|
||||
JSFunction* function,
|
||||
@ -745,6 +749,10 @@ class DeoptimizedFrameInfo : public Malloced {
|
||||
return expression_stack_[index];
|
||||
}
|
||||
|
||||
int GetSourcePosition() {
|
||||
return source_position_;
|
||||
}
|
||||
|
||||
private:
|
||||
// Set the frame function.
|
||||
void SetFunction(JSFunction* function) {
|
||||
@ -768,6 +776,7 @@ class DeoptimizedFrameInfo : public Malloced {
|
||||
int expression_count_;
|
||||
Object** parameters_;
|
||||
Object** expression_stack_;
|
||||
int source_position_;
|
||||
|
||||
friend class Deoptimizer;
|
||||
};
|
||||
|
2
deps/v8/src/execution.cc
vendored
2
deps/v8/src/execution.cc
vendored
@ -877,7 +877,7 @@ MaybeObject* Execution::HandleStackGuardInterrupt() {
|
||||
StackGuard* stack_guard = isolate->stack_guard();
|
||||
|
||||
if (stack_guard->IsGCRequest()) {
|
||||
isolate->heap()->CollectAllGarbage(false);
|
||||
isolate->heap()->CollectAllGarbage(false, "StackGuard GC request");
|
||||
stack_guard->Continue(GC_REQUEST);
|
||||
}
|
||||
|
||||
|
2
deps/v8/src/extensions/gc-extension.cc
vendored
2
deps/v8/src/extensions/gc-extension.cc
vendored
@ -40,7 +40,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
|
||||
|
||||
|
||||
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
|
||||
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
|
||||
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
|
||||
return v8::Undefined();
|
||||
}
|
||||
|
||||
|
34
deps/v8/src/factory.cc
vendored
34
deps/v8/src/factory.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -485,8 +485,9 @@ Handle<Map> Factory::CopyMapDropTransitions(Handle<Map> src) {
|
||||
Handle<Map> Factory::GetElementsTransitionMap(
|
||||
Handle<JSObject> src,
|
||||
ElementsKind elements_kind) {
|
||||
CALL_HEAP_FUNCTION(isolate(),
|
||||
src->GetElementsTransitionMap(elements_kind),
|
||||
Isolate* i = isolate();
|
||||
CALL_HEAP_FUNCTION(i,
|
||||
src->GetElementsTransitionMap(i, elements_kind),
|
||||
Map);
|
||||
}
|
||||
|
||||
@ -754,12 +755,9 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
|
||||
if (force_initial_map ||
|
||||
type != JS_OBJECT_TYPE ||
|
||||
instance_size != JSObject::kHeaderSize) {
|
||||
ElementsKind default_elements_kind = FLAG_smi_only_arrays
|
||||
? FAST_SMI_ONLY_ELEMENTS
|
||||
: FAST_ELEMENTS;
|
||||
Handle<Map> initial_map = NewMap(type,
|
||||
instance_size,
|
||||
default_elements_kind);
|
||||
FAST_SMI_ONLY_ELEMENTS);
|
||||
function->set_initial_map(*initial_map);
|
||||
initial_map->set_constructor(*function);
|
||||
}
|
||||
@ -938,22 +936,28 @@ Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
|
||||
|
||||
|
||||
Handle<JSArray> Factory::NewJSArray(int capacity,
|
||||
ElementsKind elements_kind,
|
||||
PretenureFlag pretenure) {
|
||||
Handle<JSObject> obj = NewJSObject(isolate()->array_function(), pretenure);
|
||||
CALL_HEAP_FUNCTION(isolate(),
|
||||
Handle<JSArray>::cast(obj)->Initialize(capacity),
|
||||
isolate()->heap()->AllocateJSArrayAndStorage(
|
||||
elements_kind,
|
||||
0,
|
||||
capacity,
|
||||
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
|
||||
pretenure),
|
||||
JSArray);
|
||||
}
|
||||
|
||||
|
||||
Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
|
||||
ElementsKind elements_kind,
|
||||
PretenureFlag pretenure) {
|
||||
Handle<JSArray> result =
|
||||
Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
|
||||
pretenure));
|
||||
result->set_length(Smi::FromInt(0));
|
||||
SetContent(result, elements);
|
||||
return result;
|
||||
CALL_HEAP_FUNCTION(
|
||||
isolate(),
|
||||
isolate()->heap()->AllocateJSArrayWithElements(*elements,
|
||||
elements_kind,
|
||||
pretenure),
|
||||
JSArray);
|
||||
}
|
||||
|
||||
|
||||
|
4
deps/v8/src/factory.h
vendored
4
deps/v8/src/factory.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -262,10 +262,12 @@ class Factory {
|
||||
|
||||
// JS arrays are pretenured when allocated by the parser.
|
||||
Handle<JSArray> NewJSArray(int capacity,
|
||||
ElementsKind elements_kind = FAST_ELEMENTS,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
Handle<JSArray> NewJSArrayWithElements(
|
||||
Handle<FixedArrayBase> elements,
|
||||
ElementsKind elements_kind = FAST_ELEMENTS,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
void SetElementsCapacityAndLength(Handle<JSArray> array,
|
||||
|
23
deps/v8/src/flag-definitions.h
vendored
23
deps/v8/src/flag-definitions.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -112,28 +112,24 @@ DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
|
||||
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
|
||||
DEFINE_bool(harmony_collections, false,
|
||||
"enable harmony collections (sets, maps, and weak maps)")
|
||||
DEFINE_bool(harmony, false, "enable all harmony features")
|
||||
DEFINE_implication(harmony, harmony_typeof)
|
||||
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
|
||||
DEFINE_implication(harmony, harmony_scoping)
|
||||
DEFINE_implication(harmony, harmony_proxies)
|
||||
DEFINE_implication(harmony, harmony_collections)
|
||||
|
||||
// Flags for experimental implementation features.
|
||||
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
|
||||
DEFINE_bool(smi_only_arrays, false, "tracks arrays with only smi values")
|
||||
DEFINE_bool(string_slices, true, "use string slices")
|
||||
|
||||
DEFINE_bool(clever_optimizations,
|
||||
true,
|
||||
"Optimize object size, Array shift, DOM strings and string +")
|
||||
|
||||
// Flags for data representation optimizations
|
||||
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
|
||||
DEFINE_bool(string_slices, true, "use string slices")
|
||||
|
||||
// Flags for Crankshaft.
|
||||
DEFINE_bool(crankshaft, true, "use crankshaft")
|
||||
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
|
||||
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
|
||||
DEFINE_bool(build_lithium, true, "use lithium chunk builder")
|
||||
DEFINE_bool(alloc_lithium, true, "use lithium register allocator")
|
||||
DEFINE_bool(use_lithium, true, "use lithium code generator")
|
||||
DEFINE_bool(use_range, true, "use hydrogen range analysis")
|
||||
DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
|
||||
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
|
||||
@ -166,6 +162,7 @@ DEFINE_bool(use_osr, true, "use on-stack replacement")
|
||||
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
|
||||
DEFINE_int(stress_runs, 0, "number of stress runs")
|
||||
DEFINE_bool(optimize_closures, true, "optimize closures")
|
||||
DEFINE_int(loop_weight, 1, "loop weight for representation inference")
|
||||
|
||||
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
|
||||
DEFINE_bool(debug_code, false,
|
||||
@ -250,7 +247,7 @@ DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
|
||||
|
||||
// execution.cc
|
||||
DEFINE_int(stack_size, kPointerSize * 128,
|
||||
"default size of stack region v8 is allowed to use (in KkBytes)")
|
||||
"default size of stack region v8 is allowed to use (in kBytes)")
|
||||
|
||||
// frames.cc
|
||||
DEFINE_int(max_stack_trace_source_length, 300,
|
||||
@ -326,10 +323,6 @@ DEFINE_int(max_map_space_pages, MapSpace::kMaxMapPageIndex - 1,
|
||||
"forwarding pointers. That's actually a constant, but it's useful "
|
||||
"to control it with a flag for better testing.")
|
||||
|
||||
// mksnapshot.cc
|
||||
DEFINE_bool(h, false, "print this message")
|
||||
DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
|
||||
|
||||
// objects.cc
|
||||
DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
|
||||
|
||||
|
26
deps/v8/src/full-codegen.cc
vendored
26
deps/v8/src/full-codegen.cc
vendored
@ -285,6 +285,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
|
||||
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
|
||||
code->set_optimizable(info->IsOptimizable());
|
||||
cgen.PopulateDeoptimizationData(code);
|
||||
cgen.PopulateTypeFeedbackCells(code);
|
||||
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
|
||||
code->set_handler_table(*cgen.handler_table());
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
@ -329,8 +330,7 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
|
||||
ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
|
||||
if (!info_->HasDeoptimizationSupport()) return;
|
||||
int length = bailout_entries_.length();
|
||||
Handle<DeoptimizationOutputData> data =
|
||||
isolate()->factory()->
|
||||
Handle<DeoptimizationOutputData> data = isolate()->factory()->
|
||||
NewDeoptimizationOutputData(length, TENURED);
|
||||
for (int i = 0; i < length; i++) {
|
||||
data->SetAstId(i, Smi::FromInt(bailout_entries_[i].id));
|
||||
@ -340,6 +340,21 @@ void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::PopulateTypeFeedbackCells(Handle<Code> code) {
|
||||
if (type_feedback_cells_.is_empty()) return;
|
||||
int length = type_feedback_cells_.length();
|
||||
int array_size = TypeFeedbackCells::LengthOfFixedArray(length);
|
||||
Handle<TypeFeedbackCells> cache = Handle<TypeFeedbackCells>::cast(
|
||||
isolate()->factory()->NewFixedArray(array_size, TENURED));
|
||||
for (int i = 0; i < length; i++) {
|
||||
cache->SetAstId(i, Smi::FromInt(type_feedback_cells_[i].ast_id));
|
||||
cache->SetCell(i, *type_feedback_cells_[i].cell);
|
||||
}
|
||||
code->set_type_feedback_cells(*cache);
|
||||
}
|
||||
|
||||
|
||||
|
||||
void FullCodeGenerator::PrepareForBailout(Expression* node, State state) {
|
||||
PrepareForBailoutForId(node->id(), state);
|
||||
}
|
||||
@ -385,6 +400,13 @@ void FullCodeGenerator::PrepareForBailoutForId(unsigned id, State state) {
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::RecordTypeFeedbackCell(
|
||||
unsigned id, Handle<JSGlobalPropertyCell> cell) {
|
||||
TypeFeedbackCellEntry entry = { id, cell };
|
||||
type_feedback_cells_.Add(entry);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::RecordStackCheck(unsigned ast_id) {
|
||||
// The pc offset does not need to be encoded and packed together with a
|
||||
// state.
|
||||
|
14
deps/v8/src/full-codegen.h
vendored
14
deps/v8/src/full-codegen.h
vendored
@ -85,13 +85,15 @@ class FullCodeGenerator: public AstVisitor {
|
||||
loop_depth_(0),
|
||||
context_(NULL),
|
||||
bailout_entries_(0),
|
||||
stack_checks_(2) { // There's always at least one.
|
||||
stack_checks_(2), // There's always at least one.
|
||||
type_feedback_cells_(0) {
|
||||
}
|
||||
|
||||
static bool MakeCode(CompilationInfo* info);
|
||||
|
||||
void Generate(CompilationInfo* info);
|
||||
void PopulateDeoptimizationData(Handle<Code> code);
|
||||
void PopulateTypeFeedbackCells(Handle<Code> code);
|
||||
|
||||
Handle<FixedArray> handler_table() { return handler_table_; }
|
||||
|
||||
@ -394,6 +396,10 @@ class FullCodeGenerator: public AstVisitor {
|
||||
void PrepareForBailout(Expression* node, State state);
|
||||
void PrepareForBailoutForId(unsigned id, State state);
|
||||
|
||||
// Cache cell support. This associates AST ids with global property cells
|
||||
// that will be cleared during GC and collected by the type-feedback oracle.
|
||||
void RecordTypeFeedbackCell(unsigned id, Handle<JSGlobalPropertyCell> cell);
|
||||
|
||||
// Record a call's return site offset, used to rebuild the frame if the
|
||||
// called function was inlined at the site.
|
||||
void RecordJSReturnSite(Call* call);
|
||||
@ -573,6 +579,11 @@ class FullCodeGenerator: public AstVisitor {
|
||||
unsigned pc_and_state;
|
||||
};
|
||||
|
||||
struct TypeFeedbackCellEntry {
|
||||
unsigned ast_id;
|
||||
Handle<JSGlobalPropertyCell> cell;
|
||||
};
|
||||
|
||||
|
||||
class ExpressionContext BASE_EMBEDDED {
|
||||
public:
|
||||
@ -759,6 +770,7 @@ class FullCodeGenerator: public AstVisitor {
|
||||
const ExpressionContext* context_;
|
||||
ZoneList<BailoutEntry> bailout_entries_;
|
||||
ZoneList<BailoutEntry> stack_checks_;
|
||||
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
|
||||
Handle<FixedArray> handler_table_;
|
||||
|
||||
friend class NestedStatement;
|
||||
|
102
deps/v8/src/heap-inl.h
vendored
102
deps/v8/src/heap-inl.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -438,8 +438,10 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
|
||||
}
|
||||
|
||||
|
||||
bool Heap::CollectGarbage(AllocationSpace space) {
|
||||
return CollectGarbage(space, SelectGarbageCollector(space));
|
||||
bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
|
||||
const char* collector_reason = NULL;
|
||||
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
|
||||
return CollectGarbage(space, collector, gc_reason, collector_reason);
|
||||
}
|
||||
|
||||
|
||||
@ -474,7 +476,7 @@ int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
|
||||
amount_of_external_allocated_memory_ -
|
||||
amount_of_external_allocated_memory_at_last_global_gc_;
|
||||
if (amount_since_last_global_gc > external_allocation_limit_) {
|
||||
CollectAllGarbage(kNoGCFlags);
|
||||
CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
|
||||
}
|
||||
} else {
|
||||
// Avoid underflow.
|
||||
@ -523,7 +525,8 @@ Isolate* Heap::isolate() {
|
||||
} \
|
||||
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
|
||||
ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)-> \
|
||||
allocation_space()); \
|
||||
allocation_space(), \
|
||||
"allocation failure"); \
|
||||
__maybe_object__ = FUNCTION_CALL; \
|
||||
if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE; \
|
||||
if (__maybe_object__->IsOutOfMemory()) { \
|
||||
@ -531,7 +534,7 @@ Isolate* Heap::isolate() {
|
||||
} \
|
||||
if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY; \
|
||||
ISOLATE->counters()->gc_last_resort_from_handles()->Increment(); \
|
||||
ISOLATE->heap()->CollectAllAvailableGarbage(); \
|
||||
ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc"); \
|
||||
{ \
|
||||
AlwaysAllocateScope __scope__; \
|
||||
__maybe_object__ = FUNCTION_CALL; \
|
||||
@ -700,11 +703,94 @@ MaybeObject* TranscendentalCache::SubCache::Get(double input) {
|
||||
}
|
||||
|
||||
|
||||
Heap* _inline_get_heap_() {
|
||||
return HEAP;
|
||||
AlwaysAllocateScope::AlwaysAllocateScope() {
|
||||
// We shouldn't hit any nested scopes, because that requires
|
||||
// non-handle code to call handle code. The code still works but
|
||||
// performance will degrade, so we want to catch this situation
|
||||
// in debug mode.
|
||||
ASSERT(HEAP->always_allocate_scope_depth_ == 0);
|
||||
HEAP->always_allocate_scope_depth_++;
|
||||
}
|
||||
|
||||
|
||||
AlwaysAllocateScope::~AlwaysAllocateScope() {
|
||||
HEAP->always_allocate_scope_depth_--;
|
||||
ASSERT(HEAP->always_allocate_scope_depth_ == 0);
|
||||
}
|
||||
|
||||
|
||||
LinearAllocationScope::LinearAllocationScope() {
|
||||
HEAP->linear_allocation_scope_depth_++;
|
||||
}
|
||||
|
||||
|
||||
LinearAllocationScope::~LinearAllocationScope() {
|
||||
HEAP->linear_allocation_scope_depth_--;
|
||||
ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
|
||||
for (Object** current = start; current < end; current++) {
|
||||
if ((*current)->IsHeapObject()) {
|
||||
HeapObject* object = HeapObject::cast(*current);
|
||||
ASSERT(HEAP->Contains(object));
|
||||
ASSERT(object->map()->IsMap());
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
double GCTracer::SizeOfHeapObjects() {
|
||||
return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
DisallowAllocationFailure::DisallowAllocationFailure() {
|
||||
old_state_ = HEAP->disallow_allocation_failure_;
|
||||
HEAP->disallow_allocation_failure_ = true;
|
||||
}
|
||||
|
||||
|
||||
DisallowAllocationFailure::~DisallowAllocationFailure() {
|
||||
HEAP->disallow_allocation_failure_ = old_state_;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
AssertNoAllocation::AssertNoAllocation() {
|
||||
old_state_ = HEAP->allow_allocation(false);
|
||||
}
|
||||
|
||||
|
||||
AssertNoAllocation::~AssertNoAllocation() {
|
||||
HEAP->allow_allocation(old_state_);
|
||||
}
|
||||
|
||||
|
||||
DisableAssertNoAllocation::DisableAssertNoAllocation() {
|
||||
old_state_ = HEAP->allow_allocation(true);
|
||||
}
|
||||
|
||||
|
||||
DisableAssertNoAllocation::~DisableAssertNoAllocation() {
|
||||
HEAP->allow_allocation(old_state_);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
AssertNoAllocation::AssertNoAllocation() { }
|
||||
AssertNoAllocation::~AssertNoAllocation() { }
|
||||
DisableAssertNoAllocation::DisableAssertNoAllocation() { }
|
||||
DisableAssertNoAllocation::~DisableAssertNoAllocation() { }
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_HEAP_INL_H_
|
||||
|
285
deps/v8/src/heap.cc
vendored
285
deps/v8/src/heap.cc
vendored
@ -236,16 +236,19 @@ int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
|
||||
}
|
||||
|
||||
|
||||
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
|
||||
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
|
||||
const char** reason) {
|
||||
// Is global GC requested?
|
||||
if (space != NEW_SPACE || FLAG_gc_global) {
|
||||
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
|
||||
*reason = "GC in old space requested";
|
||||
return MARK_COMPACTOR;
|
||||
}
|
||||
|
||||
// Is enough data promoted to justify a global GC?
|
||||
if (OldGenerationPromotionLimitReached()) {
|
||||
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
|
||||
*reason = "promotion limit reached";
|
||||
return MARK_COMPACTOR;
|
||||
}
|
||||
|
||||
@ -253,6 +256,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
|
||||
if (old_gen_exhausted_) {
|
||||
isolate_->counters()->
|
||||
gc_compactor_caused_by_oldspace_exhaustion()->Increment();
|
||||
*reason = "old generations exhausted";
|
||||
return MARK_COMPACTOR;
|
||||
}
|
||||
|
||||
@ -268,10 +272,12 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
|
||||
if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
|
||||
isolate_->counters()->
|
||||
gc_compactor_caused_by_oldspace_exhaustion()->Increment();
|
||||
*reason = "scavenge might not succeed";
|
||||
return MARK_COMPACTOR;
|
||||
}
|
||||
|
||||
// Default
|
||||
*reason = NULL;
|
||||
return SCAVENGER;
|
||||
}
|
||||
|
||||
@ -431,17 +437,17 @@ void Heap::GarbageCollectionEpilogue() {
|
||||
}
|
||||
|
||||
|
||||
void Heap::CollectAllGarbage(int flags) {
|
||||
void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
|
||||
// Since we are ignoring the return value, the exact choice of space does
|
||||
// not matter, so long as we do not specify NEW_SPACE, which would not
|
||||
// cause a full GC.
|
||||
mark_compact_collector_.SetFlags(flags);
|
||||
CollectGarbage(OLD_POINTER_SPACE);
|
||||
CollectGarbage(OLD_POINTER_SPACE, gc_reason);
|
||||
mark_compact_collector_.SetFlags(kNoGCFlags);
|
||||
}
|
||||
|
||||
|
||||
void Heap::CollectAllAvailableGarbage() {
|
||||
void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
|
||||
// Since we are ignoring the return value, the exact choice of space does
|
||||
// not matter, so long as we do not specify NEW_SPACE, which would not
|
||||
// cause a full GC.
|
||||
@ -453,11 +459,12 @@ void Heap::CollectAllAvailableGarbage() {
|
||||
// Note: as weak callbacks can execute arbitrary code, we cannot
|
||||
// hope that eventually there will be no weak callbacks invocations.
|
||||
// Therefore stop recollecting after several attempts.
|
||||
mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
|
||||
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
|
||||
kReduceMemoryFootprintMask);
|
||||
isolate_->compilation_cache()->Clear();
|
||||
const int kMaxNumberOfAttempts = 7;
|
||||
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
|
||||
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
|
||||
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -469,7 +476,10 @@ void Heap::CollectAllAvailableGarbage() {
|
||||
}
|
||||
|
||||
|
||||
bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
|
||||
bool Heap::CollectGarbage(AllocationSpace space,
|
||||
GarbageCollector collector,
|
||||
const char* gc_reason,
|
||||
const char* collector_reason) {
|
||||
// The VM is in the GC state until exiting this function.
|
||||
VMState state(isolate_, GC);
|
||||
|
||||
@ -497,11 +507,12 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
|
||||
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
|
||||
}
|
||||
collector = SCAVENGER;
|
||||
collector_reason = "incremental marking delaying mark-sweep";
|
||||
}
|
||||
|
||||
bool next_gc_likely_to_collect_more = false;
|
||||
|
||||
{ GCTracer tracer(this);
|
||||
{ GCTracer tracer(this, gc_reason, collector_reason);
|
||||
GarbageCollectionPrologue();
|
||||
// The GC count was incremented in the prologue. Tell the tracer about
|
||||
// it.
|
||||
@ -533,7 +544,7 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
|
||||
|
||||
|
||||
void Heap::PerformScavenge() {
|
||||
GCTracer tracer(this);
|
||||
GCTracer tracer(this, NULL, NULL);
|
||||
if (incremental_marking()->IsStopped()) {
|
||||
PerformGarbageCollection(SCAVENGER, &tracer);
|
||||
} else {
|
||||
@ -588,27 +599,33 @@ void Heap::ReserveSpace(
|
||||
while (gc_performed && counter++ < kThreshold) {
|
||||
gc_performed = false;
|
||||
if (!new_space->ReserveSpace(new_space_size)) {
|
||||
Heap::CollectGarbage(NEW_SPACE);
|
||||
Heap::CollectGarbage(NEW_SPACE,
|
||||
"failed to reserve space in the new space");
|
||||
gc_performed = true;
|
||||
}
|
||||
if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
|
||||
Heap::CollectGarbage(OLD_POINTER_SPACE);
|
||||
Heap::CollectGarbage(OLD_POINTER_SPACE,
|
||||
"failed to reserve space in the old pointer space");
|
||||
gc_performed = true;
|
||||
}
|
||||
if (!(old_data_space->ReserveSpace(data_space_size))) {
|
||||
Heap::CollectGarbage(OLD_DATA_SPACE);
|
||||
Heap::CollectGarbage(OLD_DATA_SPACE,
|
||||
"failed to reserve space in the old data space");
|
||||
gc_performed = true;
|
||||
}
|
||||
if (!(code_space->ReserveSpace(code_space_size))) {
|
||||
Heap::CollectGarbage(CODE_SPACE);
|
||||
Heap::CollectGarbage(CODE_SPACE,
|
||||
"failed to reserve space in the code space");
|
||||
gc_performed = true;
|
||||
}
|
||||
if (!(map_space->ReserveSpace(map_space_size))) {
|
||||
Heap::CollectGarbage(MAP_SPACE);
|
||||
Heap::CollectGarbage(MAP_SPACE,
|
||||
"failed to reserve space in the map space");
|
||||
gc_performed = true;
|
||||
}
|
||||
if (!(cell_space->ReserveSpace(cell_space_size))) {
|
||||
Heap::CollectGarbage(CELL_SPACE);
|
||||
Heap::CollectGarbage(CELL_SPACE,
|
||||
"failed to reserve space in the cell space");
|
||||
gc_performed = true;
|
||||
}
|
||||
// We add a slack-factor of 2 in order to have space for a series of
|
||||
@ -620,7 +637,8 @@ void Heap::ReserveSpace(
|
||||
large_object_size += cell_space_size + map_space_size + code_space_size +
|
||||
data_space_size + pointer_space_size;
|
||||
if (!(lo_space->ReserveSpace(large_object_size))) {
|
||||
Heap::CollectGarbage(LO_SPACE);
|
||||
Heap::CollectGarbage(LO_SPACE,
|
||||
"failed to reserve space in the large object space");
|
||||
gc_performed = true;
|
||||
}
|
||||
}
|
||||
@ -902,8 +920,7 @@ void Heap::MarkCompactPrologue() {
|
||||
|
||||
CompletelyClearInstanceofCache();
|
||||
|
||||
// TODO(1605) select heuristic for flushing NumberString cache with
|
||||
// FlushNumberStringCache
|
||||
FlushNumberStringCache();
|
||||
if (FLAG_cleanup_code_caches_at_gc) {
|
||||
polymorphic_code_cache()->set_cache(undefined_value());
|
||||
}
|
||||
@ -2512,7 +2529,10 @@ bool Heap::CreateInitialObjects() {
|
||||
}
|
||||
set_intrinsic_function_names(StringDictionary::cast(obj));
|
||||
|
||||
if (InitializeNumberStringCache()->IsFailure()) return false;
|
||||
{ MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
|
||||
if (!maybe_obj->ToObject(&obj)) return false;
|
||||
}
|
||||
set_number_string_cache(FixedArray::cast(obj));
|
||||
|
||||
// Allocate cache for single character ASCII strings.
|
||||
{ MaybeObject* maybe_obj =
|
||||
@ -2622,20 +2642,44 @@ void StringSplitCache::Clear(FixedArray* cache) {
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::InitializeNumberStringCache() {
|
||||
// Compute the size of the number string cache based on the max heap size.
|
||||
// max_semispace_size_ == 512 KB => number_string_cache_size = 32.
|
||||
// max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
|
||||
int number_string_cache_size = max_semispace_size_ / 512;
|
||||
number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
|
||||
Object* obj;
|
||||
MaybeObject* Heap::AllocateInitialNumberStringCache() {
|
||||
MaybeObject* maybe_obj =
|
||||
AllocateFixedArray(number_string_cache_size * 2, TENURED);
|
||||
if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
|
||||
AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
|
||||
return maybe_obj;
|
||||
}
|
||||
|
||||
|
||||
int Heap::FullSizeNumberStringCacheLength() {
|
||||
// Compute the size of the number string cache based on the max newspace size.
|
||||
// The number string cache has a minimum size based on twice the initial cache
|
||||
// size to ensure that it is bigger after being made 'full size'.
|
||||
int number_string_cache_size = max_semispace_size_ / 512;
|
||||
number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
|
||||
Min(0x4000, number_string_cache_size));
|
||||
// There is a string and a number per entry so the length is twice the number
|
||||
// of entries.
|
||||
return number_string_cache_size * 2;
|
||||
}
|
||||
|
||||
|
||||
void Heap::AllocateFullSizeNumberStringCache() {
|
||||
// The idea is to have a small number string cache in the snapshot to keep
|
||||
// boot-time memory usage down. If we expand the number string cache already
|
||||
// while creating the snapshot then that didn't work out.
|
||||
ASSERT(!Serializer::enabled());
|
||||
MaybeObject* maybe_obj =
|
||||
AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
|
||||
Object* new_cache;
|
||||
if (maybe_obj->ToObject(&new_cache)) {
|
||||
// We don't bother to repopulate the cache with entries from the old cache.
|
||||
// It will be repopulated soon enough with new strings.
|
||||
set_number_string_cache(FixedArray::cast(new_cache));
|
||||
}
|
||||
// If allocation fails then we just return without doing anything. It is only
|
||||
// a cache, so best effort is OK here.
|
||||
}
|
||||
|
||||
|
||||
void Heap::FlushNumberStringCache() {
|
||||
// Flush the number to string cache.
|
||||
int len = number_string_cache()->length();
|
||||
@ -2681,11 +2725,17 @@ void Heap::SetNumberStringCache(Object* number, String* string) {
|
||||
int mask = (number_string_cache()->length() >> 1) - 1;
|
||||
if (number->IsSmi()) {
|
||||
hash = smi_get_hash(Smi::cast(number)) & mask;
|
||||
number_string_cache()->set(hash * 2, Smi::cast(number));
|
||||
} else {
|
||||
hash = double_get_hash(number->Number()) & mask;
|
||||
number_string_cache()->set(hash * 2, number);
|
||||
}
|
||||
if (number_string_cache()->get(hash * 2) != undefined_value() &&
|
||||
number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
|
||||
// The first time we have a hash collision, we move to the full sized
|
||||
// number string cache.
|
||||
AllocateFullSizeNumberStringCache();
|
||||
return;
|
||||
}
|
||||
number_string_cache()->set(hash * 2, number);
|
||||
number_string_cache()->set(hash * 2 + 1, string);
|
||||
}
|
||||
|
||||
@ -3307,6 +3357,8 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
|
||||
code->set_check_type(RECEIVER_MAP_CHECK);
|
||||
}
|
||||
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
|
||||
code->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
|
||||
SKIP_WRITE_BARRIER);
|
||||
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
|
||||
code->set_gc_metadata(Smi::FromInt(0));
|
||||
// Allow self references to created code object by patching the handle to
|
||||
@ -3726,8 +3778,8 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
|
||||
Map::cast(initial_map)->set_constructor(constructor);
|
||||
}
|
||||
// Allocate the object based on the constructors initial map.
|
||||
MaybeObject* result =
|
||||
AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
|
||||
MaybeObject* result = AllocateJSObjectFromMap(
|
||||
constructor->initial_map(), pretenure);
|
||||
#ifdef DEBUG
|
||||
// Make sure result is NOT a global object if valid.
|
||||
Object* non_failure;
|
||||
@ -3737,6 +3789,64 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateJSArrayAndStorage(
|
||||
ElementsKind elements_kind,
|
||||
int length,
|
||||
int capacity,
|
||||
ArrayStorageAllocationMode mode,
|
||||
PretenureFlag pretenure) {
|
||||
ASSERT(capacity >= length);
|
||||
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
|
||||
JSArray* array;
|
||||
if (!maybe_array->To(&array)) return maybe_array;
|
||||
|
||||
if (capacity == 0) {
|
||||
array->set_length(Smi::FromInt(0));
|
||||
array->set_elements(empty_fixed_array());
|
||||
return array;
|
||||
}
|
||||
|
||||
FixedArrayBase* elms;
|
||||
MaybeObject* maybe_elms = NULL;
|
||||
if (elements_kind == FAST_DOUBLE_ELEMENTS) {
|
||||
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
|
||||
maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
|
||||
} else {
|
||||
ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
|
||||
maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
|
||||
}
|
||||
} else {
|
||||
ASSERT(elements_kind == FAST_ELEMENTS ||
|
||||
elements_kind == FAST_SMI_ONLY_ELEMENTS);
|
||||
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
|
||||
maybe_elms = AllocateUninitializedFixedArray(capacity);
|
||||
} else {
|
||||
ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
|
||||
maybe_elms = AllocateFixedArrayWithHoles(capacity);
|
||||
}
|
||||
}
|
||||
if (!maybe_elms->To(&elms)) return maybe_elms;
|
||||
|
||||
array->set_elements(elms);
|
||||
array->set_length(Smi::FromInt(length));
|
||||
return array;
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateJSArrayWithElements(
|
||||
FixedArrayBase* elements,
|
||||
ElementsKind elements_kind,
|
||||
PretenureFlag pretenure) {
|
||||
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
|
||||
JSArray* array;
|
||||
if (!maybe_array->To(&array)) return maybe_array;
|
||||
|
||||
array->set_elements(elements);
|
||||
array->set_length(Smi::FromInt(elements->length()));
|
||||
return array;
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
|
||||
// Allocate map.
|
||||
// TODO(rossberg): Once we optimize proxies, think about a scheme to share
|
||||
@ -4241,6 +4351,25 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateJSArray(
|
||||
ElementsKind elements_kind,
|
||||
PretenureFlag pretenure) {
|
||||
Context* global_context = isolate()->context()->global_context();
|
||||
JSFunction* array_function = global_context->array_function();
|
||||
Map* map = array_function->initial_map();
|
||||
if (elements_kind == FAST_ELEMENTS || !FLAG_smi_only_arrays) {
|
||||
map = Map::cast(global_context->object_js_array_map());
|
||||
} else if (elements_kind == FAST_DOUBLE_ELEMENTS) {
|
||||
map = Map::cast(global_context->double_js_array_map());
|
||||
} else {
|
||||
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
|
||||
ASSERT(map == global_context->smi_js_array_map());
|
||||
}
|
||||
|
||||
return AllocateJSObjectFromMap(map, pretenure);
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateEmptyFixedArray() {
|
||||
int size = FixedArray::SizeFor(0);
|
||||
Object* result;
|
||||
@ -4431,15 +4560,36 @@ MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
|
||||
PretenureFlag pretenure) {
|
||||
if (length == 0) return empty_fixed_double_array();
|
||||
|
||||
Object* obj;
|
||||
{ MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
|
||||
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
|
||||
Object* elements_object;
|
||||
MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
|
||||
if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
|
||||
FixedDoubleArray* elements =
|
||||
reinterpret_cast<FixedDoubleArray*>(elements_object);
|
||||
|
||||
elements->set_map_no_write_barrier(fixed_double_array_map());
|
||||
elements->set_length(length);
|
||||
return elements;
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
|
||||
int length,
|
||||
PretenureFlag pretenure) {
|
||||
if (length == 0) return empty_fixed_double_array();
|
||||
|
||||
Object* elements_object;
|
||||
MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
|
||||
if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
|
||||
FixedDoubleArray* elements =
|
||||
reinterpret_cast<FixedDoubleArray*>(elements_object);
|
||||
|
||||
for (int i = 0; i < length; ++i) {
|
||||
elements->set_the_hole(i);
|
||||
}
|
||||
|
||||
reinterpret_cast<FixedDoubleArray*>(obj)->set_map_no_write_barrier(
|
||||
fixed_double_array_map());
|
||||
FixedDoubleArray::cast(obj)->set_length(length);
|
||||
return obj;
|
||||
elements->set_map_no_write_barrier(fixed_double_array_map());
|
||||
elements->set_length(length);
|
||||
return elements;
|
||||
}
|
||||
|
||||
|
||||
@ -4488,6 +4638,9 @@ MaybeObject* Heap::AllocateGlobalContext() {
|
||||
}
|
||||
Context* context = reinterpret_cast<Context*>(result);
|
||||
context->set_map_no_write_barrier(global_context_map());
|
||||
context->set_smi_js_array_map(undefined_value());
|
||||
context->set_double_js_array_map(undefined_value());
|
||||
context->set_object_js_array_map(undefined_value());
|
||||
ASSERT(context->IsGlobalContext());
|
||||
ASSERT(result->IsContext());
|
||||
return result;
|
||||
@ -4607,7 +4760,7 @@ bool Heap::IsHeapIterable() {
|
||||
void Heap::EnsureHeapIsIterable() {
|
||||
ASSERT(IsAllocationAllowed());
|
||||
if (!IsHeapIterable()) {
|
||||
CollectAllGarbage(kMakeHeapIterableMask);
|
||||
CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
|
||||
}
|
||||
ASSERT(IsHeapIterable());
|
||||
}
|
||||
@ -4677,7 +4830,7 @@ bool Heap::IdleNotification(int hint) {
|
||||
isolate_->compilation_cache()->Clear();
|
||||
uncommit = true;
|
||||
}
|
||||
CollectAllGarbage(kNoGCFlags);
|
||||
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
|
||||
gc_count_at_last_idle_gc_ = gc_count_;
|
||||
if (uncommit) {
|
||||
new_space_.Shrink();
|
||||
@ -4718,9 +4871,10 @@ bool Heap::IdleGlobalGC() {
|
||||
if (number_idle_notifications_ == kIdlesBeforeScavenge) {
|
||||
if (contexts_disposed_ > 0) {
|
||||
HistogramTimerScope scope(isolate_->counters()->gc_context());
|
||||
CollectAllGarbage(kNoGCFlags);
|
||||
CollectAllGarbage(kReduceMemoryFootprintMask,
|
||||
"idle notification: contexts disposed");
|
||||
} else {
|
||||
CollectGarbage(NEW_SPACE);
|
||||
CollectGarbage(NEW_SPACE, "idle notification");
|
||||
}
|
||||
new_space_.Shrink();
|
||||
last_idle_notification_gc_count_ = gc_count_;
|
||||
@ -4730,12 +4884,12 @@ bool Heap::IdleGlobalGC() {
|
||||
// generated code for cached functions.
|
||||
isolate_->compilation_cache()->Clear();
|
||||
|
||||
CollectAllGarbage(kNoGCFlags);
|
||||
CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
|
||||
new_space_.Shrink();
|
||||
last_idle_notification_gc_count_ = gc_count_;
|
||||
|
||||
} else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
|
||||
CollectAllGarbage(kNoGCFlags);
|
||||
CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
|
||||
new_space_.Shrink();
|
||||
last_idle_notification_gc_count_ = gc_count_;
|
||||
number_idle_notifications_ = 0;
|
||||
@ -4745,7 +4899,8 @@ bool Heap::IdleGlobalGC() {
|
||||
contexts_disposed_ = 0;
|
||||
} else {
|
||||
HistogramTimerScope scope(isolate_->counters()->gc_context());
|
||||
CollectAllGarbage(kNoGCFlags);
|
||||
CollectAllGarbage(kReduceMemoryFootprintMask,
|
||||
"idle notification: contexts disposed");
|
||||
last_idle_notification_gc_count_ = gc_count_;
|
||||
}
|
||||
// If this is the first idle notification, we reset the
|
||||
@ -6376,18 +6531,24 @@ static intptr_t CountTotalHolesSize() {
|
||||
}
|
||||
|
||||
|
||||
GCTracer::GCTracer(Heap* heap)
|
||||
GCTracer::GCTracer(Heap* heap,
|
||||
const char* gc_reason,
|
||||
const char* collector_reason)
|
||||
: start_time_(0.0),
|
||||
start_size_(0),
|
||||
start_object_size_(0),
|
||||
start_memory_size_(0),
|
||||
gc_count_(0),
|
||||
full_gc_count_(0),
|
||||
allocated_since_last_gc_(0),
|
||||
spent_in_mutator_(0),
|
||||
promoted_objects_size_(0),
|
||||
heap_(heap) {
|
||||
heap_(heap),
|
||||
gc_reason_(gc_reason),
|
||||
collector_reason_(collector_reason) {
|
||||
if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
|
||||
start_time_ = OS::TimeCurrentMillis();
|
||||
start_size_ = heap_->SizeOfObjects();
|
||||
start_object_size_ = heap_->SizeOfObjects();
|
||||
start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
|
||||
|
||||
for (int i = 0; i < Scope::kNumberOfScopes; i++) {
|
||||
scopes_[i] = 0;
|
||||
@ -6434,13 +6595,20 @@ GCTracer::~GCTracer() {
|
||||
}
|
||||
}
|
||||
|
||||
PrintF("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
|
||||
|
||||
if (!FLAG_trace_gc_nvp) {
|
||||
int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
|
||||
|
||||
PrintF("%s %.1f -> %.1f MB, ",
|
||||
double end_memory_size_mb =
|
||||
static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
|
||||
|
||||
PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
|
||||
CollectorString(),
|
||||
static_cast<double>(start_size_) / MB,
|
||||
SizeOfHeapObjects());
|
||||
static_cast<double>(start_object_size_) / MB,
|
||||
static_cast<double>(start_memory_size_) / MB,
|
||||
SizeOfHeapObjects(),
|
||||
end_memory_size_mb);
|
||||
|
||||
if (external_time > 0) PrintF("%d / ", external_time);
|
||||
PrintF("%d ms", time);
|
||||
@ -6457,6 +6625,15 @@ GCTracer::~GCTracer() {
|
||||
longest_step_);
|
||||
}
|
||||
}
|
||||
|
||||
if (gc_reason_ != NULL) {
|
||||
PrintF(" [%s]", gc_reason_);
|
||||
}
|
||||
|
||||
if (collector_reason_ != NULL) {
|
||||
PrintF(" [%s]", collector_reason_);
|
||||
}
|
||||
|
||||
PrintF(".\n");
|
||||
} else {
|
||||
PrintF("pause=%d ", time);
|
||||
@ -6494,7 +6671,7 @@ GCTracer::~GCTracer() {
|
||||
PrintF("misc_compaction=%d ",
|
||||
static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
|
||||
|
||||
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
|
||||
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
|
||||
PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
|
||||
PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
|
||||
in_free_list_or_wasted_before_gc_);
|
||||
|
218
deps/v8/src/heap.h
vendored
218
deps/v8/src/heap.h
vendored
@ -45,12 +45,6 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// TODO(isolates): remove HEAP here
|
||||
#define HEAP (_inline_get_heap_())
|
||||
class Heap;
|
||||
inline Heap* _inline_get_heap_();
|
||||
|
||||
|
||||
// Defines all the roots in Heap.
|
||||
#define STRONG_ROOT_LIST(V) \
|
||||
V(Map, byte_array_map, ByteArrayMap) \
|
||||
@ -432,6 +426,11 @@ class ExternalStringTable {
|
||||
};
|
||||
|
||||
|
||||
enum ArrayStorageAllocationMode {
|
||||
DONT_INITIALIZE_ARRAY_ELEMENTS,
|
||||
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
|
||||
};
|
||||
|
||||
class Heap {
|
||||
public:
|
||||
// Configure heap size before setup. Return false if the heap has been
|
||||
@ -533,6 +532,30 @@ class Heap {
|
||||
MUST_USE_RESULT MaybeObject* AllocateJSObject(
|
||||
JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
// Allocate a JSArray with no elements
|
||||
MUST_USE_RESULT MaybeObject* AllocateEmptyJSArray(
|
||||
ElementsKind elements_kind,
|
||||
PretenureFlag pretenure = NOT_TENURED) {
|
||||
return AllocateJSArrayAndStorage(elements_kind, 0, 0,
|
||||
DONT_INITIALIZE_ARRAY_ELEMENTS,
|
||||
pretenure);
|
||||
}
|
||||
|
||||
// Allocate a JSArray with a specified length but elements that are left
|
||||
// uninitialized.
|
||||
MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
|
||||
ElementsKind elements_kind,
|
||||
int length,
|
||||
int capacity,
|
||||
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
// Allocate a JSArray with no elements
|
||||
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
|
||||
FixedArrayBase* array_base,
|
||||
ElementsKind elements_kind,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
// Allocates and initializes a new global object based on a constructor.
|
||||
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
|
||||
// failed.
|
||||
@ -779,6 +802,13 @@ class Heap {
|
||||
int length,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
// Allocates a fixed double array with hole values. Returns
|
||||
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
|
||||
// Please note this does not perform a garbage collection.
|
||||
MUST_USE_RESULT MaybeObject* AllocateFixedDoubleArrayWithHoles(
|
||||
int length,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
// AllocateHashTable is identical to AllocateFixedArray except
|
||||
// that the resulting object has hash_table_map as map.
|
||||
MUST_USE_RESULT MaybeObject* AllocateHashTable(
|
||||
@ -995,23 +1025,28 @@ class Heap {
|
||||
// Performs garbage collection operation.
|
||||
// Returns whether there is a chance that another major GC could
|
||||
// collect more garbage.
|
||||
bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
|
||||
bool CollectGarbage(AllocationSpace space,
|
||||
GarbageCollector collector,
|
||||
const char* gc_reason,
|
||||
const char* collector_reason);
|
||||
|
||||
// Performs garbage collection operation.
|
||||
// Returns whether there is a chance that another major GC could
|
||||
// collect more garbage.
|
||||
inline bool CollectGarbage(AllocationSpace space);
|
||||
inline bool CollectGarbage(AllocationSpace space,
|
||||
const char* gc_reason = NULL);
|
||||
|
||||
static const int kNoGCFlags = 0;
|
||||
static const int kMakeHeapIterableMask = 1;
|
||||
static const int kReduceMemoryFootprintMask = 2;
|
||||
|
||||
// Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
|
||||
// non-zero, then the slower precise sweeper is used, which leaves the heap
|
||||
// in a state where we can iterate over the heap visiting all objects.
|
||||
void CollectAllGarbage(int flags);
|
||||
void CollectAllGarbage(int flags, const char* gc_reason = NULL);
|
||||
|
||||
// Last hope GC, should try to squeeze as much as possible.
|
||||
void CollectAllAvailableGarbage();
|
||||
void CollectAllAvailableGarbage(const char* gc_reason = NULL);
|
||||
|
||||
// Check whether the heap is currently iterable.
|
||||
bool IsHeapIterable();
|
||||
@ -1711,7 +1746,8 @@ class Heap {
|
||||
}
|
||||
|
||||
// Checks whether a global GC is necessary
|
||||
GarbageCollector SelectGarbageCollector(AllocationSpace space);
|
||||
GarbageCollector SelectGarbageCollector(AllocationSpace space,
|
||||
const char** reason);
|
||||
|
||||
// Performs garbage collection
|
||||
// Returns whether there is a chance another major GC could
|
||||
@ -1751,6 +1787,11 @@ class Heap {
|
||||
Object* to_number,
|
||||
byte kind);
|
||||
|
||||
// Allocate a JSArray with no elements
|
||||
MUST_USE_RESULT MaybeObject* AllocateJSArray(
|
||||
ElementsKind elements_kind,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
// Allocate empty fixed array.
|
||||
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
|
||||
|
||||
@ -1798,8 +1839,13 @@ class Heap {
|
||||
GCTracer* tracer_;
|
||||
|
||||
|
||||
// Initializes the number to string cache based on the max semispace size.
|
||||
MUST_USE_RESULT MaybeObject* InitializeNumberStringCache();
|
||||
// Allocates a small number to string cache.
|
||||
MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache();
|
||||
// Creates and installs the full-sized number string cache.
|
||||
void AllocateFullSizeNumberStringCache();
|
||||
// Get the length of the number to string cache based on the max semispace
|
||||
// size.
|
||||
int FullSizeNumberStringCacheLength();
|
||||
// Flush the number to string cache.
|
||||
void FlushNumberStringCache();
|
||||
|
||||
@ -1896,6 +1942,7 @@ class Heap {
|
||||
|
||||
static const int kInitialSymbolTableSize = 2048;
|
||||
static const int kInitialEvalCacheSize = 64;
|
||||
static const int kInitialNumberStringCacheSize = 256;
|
||||
|
||||
// Maximum GC pause.
|
||||
int max_gc_pause_;
|
||||
@ -1995,32 +2042,15 @@ class HeapStats {
|
||||
|
||||
class AlwaysAllocateScope {
|
||||
public:
|
||||
AlwaysAllocateScope() {
|
||||
// We shouldn't hit any nested scopes, because that requires
|
||||
// non-handle code to call handle code. The code still works but
|
||||
// performance will degrade, so we want to catch this situation
|
||||
// in debug mode.
|
||||
ASSERT(HEAP->always_allocate_scope_depth_ == 0);
|
||||
HEAP->always_allocate_scope_depth_++;
|
||||
}
|
||||
|
||||
~AlwaysAllocateScope() {
|
||||
HEAP->always_allocate_scope_depth_--;
|
||||
ASSERT(HEAP->always_allocate_scope_depth_ == 0);
|
||||
}
|
||||
inline AlwaysAllocateScope();
|
||||
inline ~AlwaysAllocateScope();
|
||||
};
|
||||
|
||||
|
||||
class LinearAllocationScope {
|
||||
public:
|
||||
LinearAllocationScope() {
|
||||
HEAP->linear_allocation_scope_depth_++;
|
||||
}
|
||||
|
||||
~LinearAllocationScope() {
|
||||
HEAP->linear_allocation_scope_depth_--;
|
||||
ASSERT(HEAP->linear_allocation_scope_depth_ >= 0);
|
||||
}
|
||||
inline LinearAllocationScope();
|
||||
inline ~LinearAllocationScope();
|
||||
};
|
||||
|
||||
|
||||
@ -2032,15 +2062,7 @@ class LinearAllocationScope {
|
||||
// objects in a heap space but above the allocation pointer.
|
||||
class VerifyPointersVisitor: public ObjectVisitor {
|
||||
public:
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
for (Object** current = start; current < end; current++) {
|
||||
if ((*current)->IsHeapObject()) {
|
||||
HeapObject* object = HeapObject::cast(*current);
|
||||
ASSERT(HEAP->Contains(object));
|
||||
ASSERT(object->map()->IsMap());
|
||||
}
|
||||
}
|
||||
}
|
||||
inline void VisitPointers(Object** start, Object** end);
|
||||
};
|
||||
#endif
|
||||
|
||||
@ -2266,6 +2288,18 @@ class DescriptorLookupCache {
|
||||
};
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
class DisallowAllocationFailure {
|
||||
public:
|
||||
inline DisallowAllocationFailure();
|
||||
inline ~DisallowAllocationFailure();
|
||||
|
||||
private:
|
||||
bool old_state_;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
// A helper class to document/test C++ scopes where we do not
|
||||
// expect a GC. Usage:
|
||||
//
|
||||
@ -2273,65 +2307,28 @@ class DescriptorLookupCache {
|
||||
// { AssertNoAllocation nogc;
|
||||
// ...
|
||||
// }
|
||||
class AssertNoAllocation {
|
||||
public:
|
||||
inline AssertNoAllocation();
|
||||
inline ~AssertNoAllocation();
|
||||
|
||||
#ifdef DEBUG
|
||||
|
||||
class DisallowAllocationFailure {
|
||||
public:
|
||||
DisallowAllocationFailure() {
|
||||
old_state_ = HEAP->disallow_allocation_failure_;
|
||||
HEAP->disallow_allocation_failure_ = true;
|
||||
}
|
||||
~DisallowAllocationFailure() {
|
||||
HEAP->disallow_allocation_failure_ = old_state_;
|
||||
}
|
||||
private:
|
||||
bool old_state_;
|
||||
};
|
||||
|
||||
class AssertNoAllocation {
|
||||
public:
|
||||
AssertNoAllocation() {
|
||||
old_state_ = HEAP->allow_allocation(false);
|
||||
}
|
||||
|
||||
~AssertNoAllocation() {
|
||||
HEAP->allow_allocation(old_state_);
|
||||
}
|
||||
|
||||
private:
|
||||
bool old_state_;
|
||||
};
|
||||
|
||||
class DisableAssertNoAllocation {
|
||||
public:
|
||||
DisableAssertNoAllocation() {
|
||||
old_state_ = HEAP->allow_allocation(true);
|
||||
}
|
||||
|
||||
~DisableAssertNoAllocation() {
|
||||
HEAP->allow_allocation(old_state_);
|
||||
}
|
||||
|
||||
private:
|
||||
bool old_state_;
|
||||
};
|
||||
|
||||
#else // ndef DEBUG
|
||||
|
||||
class AssertNoAllocation {
|
||||
public:
|
||||
AssertNoAllocation() { }
|
||||
~AssertNoAllocation() { }
|
||||
};
|
||||
|
||||
class DisableAssertNoAllocation {
|
||||
public:
|
||||
DisableAssertNoAllocation() { }
|
||||
~DisableAssertNoAllocation() { }
|
||||
};
|
||||
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
class DisableAssertNoAllocation {
|
||||
public:
|
||||
inline DisableAssertNoAllocation();
|
||||
inline ~DisableAssertNoAllocation();
|
||||
|
||||
#ifdef DEBUG
|
||||
private:
|
||||
bool old_state_;
|
||||
#endif
|
||||
};
|
||||
|
||||
// GCTracer collects and prints ONE line after each garbage collector
|
||||
// invocation IFF --trace_gc is used.
|
||||
@ -2373,7 +2370,9 @@ class GCTracer BASE_EMBEDDED {
|
||||
double start_time_;
|
||||
};
|
||||
|
||||
explicit GCTracer(Heap* heap);
|
||||
explicit GCTracer(Heap* heap,
|
||||
const char* gc_reason,
|
||||
const char* collector_reason);
|
||||
~GCTracer();
|
||||
|
||||
// Sets the collector.
|
||||
@ -2394,13 +2393,19 @@ class GCTracer BASE_EMBEDDED {
|
||||
const char* CollectorString();
|
||||
|
||||
// Returns size of object in heap (in MB).
|
||||
double SizeOfHeapObjects() {
|
||||
return (static_cast<double>(HEAP->SizeOfObjects())) / MB;
|
||||
}
|
||||
inline double SizeOfHeapObjects();
|
||||
|
||||
double start_time_; // Timestamp set in the constructor.
|
||||
intptr_t start_size_; // Size of objects in heap set in constructor.
|
||||
GarbageCollector collector_; // Type of collector.
|
||||
// Timestamp set in the constructor.
|
||||
double start_time_;
|
||||
|
||||
// Size of objects in heap set in constructor.
|
||||
intptr_t start_object_size_;
|
||||
|
||||
// Size of memory allocated from OS set in constructor.
|
||||
intptr_t start_memory_size_;
|
||||
|
||||
// Type of collector.
|
||||
GarbageCollector collector_;
|
||||
|
||||
// A count (including this one, e.g. the first collection is 1) of the
|
||||
// number of garbage collections.
|
||||
@ -2435,6 +2440,9 @@ class GCTracer BASE_EMBEDDED {
|
||||
double steps_took_since_last_gc_;
|
||||
|
||||
Heap* heap_;
|
||||
|
||||
const char* gc_reason_;
|
||||
const char* collector_reason_;
|
||||
};
|
||||
|
||||
|
||||
@ -2646,6 +2654,4 @@ class PathTracer : public ObjectVisitor {
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#undef HEAP
|
||||
|
||||
#endif // V8_HEAP_H_
|
||||
|
10
deps/v8/src/hydrogen-instructions.cc
vendored
10
deps/v8/src/hydrogen-instructions.cc
vendored
@ -67,6 +67,14 @@ const char* Representation::Mnemonic() const {
|
||||
}
|
||||
|
||||
|
||||
int HValue::LoopWeight() const {
|
||||
const int w = FLAG_loop_weight;
|
||||
static const int weights[] = { 1, w, w*w, w*w*w, w*w*w*w };
|
||||
return weights[Min(block()->LoopNestingDepth(),
|
||||
static_cast<int>(ARRAY_SIZE(weights)-1))];
|
||||
}
|
||||
|
||||
|
||||
void HValue::AssumeRepresentation(Representation r) {
|
||||
if (CheckFlag(kFlexibleRepresentation)) {
|
||||
ChangeRepresentation(r);
|
||||
@ -1139,7 +1147,7 @@ void HPhi::InitRealUses(int phi_id) {
|
||||
HValue* value = it.value();
|
||||
if (!value->IsPhi()) {
|
||||
Representation rep = value->RequiredInputRepresentation(it.index());
|
||||
++non_phi_uses_[rep.kind()];
|
||||
non_phi_uses_[rep.kind()] += value->LoopWeight();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
deps/v8/src/hydrogen-instructions.h
vendored
1
deps/v8/src/hydrogen-instructions.h
vendored
@ -569,6 +569,7 @@ class HValue: public ZoneObject {
|
||||
|
||||
HBasicBlock* block() const { return block_; }
|
||||
void SetBlock(HBasicBlock* block);
|
||||
int LoopWeight() const;
|
||||
|
||||
int id() const { return id_; }
|
||||
void set_id(int id) { id_ = id; }
|
||||
|
33
deps/v8/src/hydrogen.cc
vendored
33
deps/v8/src/hydrogen.cc
vendored
@ -625,25 +625,23 @@ HGraph::HGraph(CompilationInfo* info)
|
||||
|
||||
Handle<Code> HGraph::Compile(CompilationInfo* info) {
|
||||
int values = GetMaximumValueID();
|
||||
if (values > LAllocator::max_initial_value_ids()) {
|
||||
if (values > LUnallocated::kMaxVirtualRegisters) {
|
||||
if (FLAG_trace_bailout) {
|
||||
SmartArrayPointer<char> name(
|
||||
info->shared_info()->DebugName()->ToCString());
|
||||
PrintF("Function @\"%s\" is too big.\n", *name);
|
||||
PrintF("Not enough virtual registers for (values).\n");
|
||||
}
|
||||
return Handle<Code>::null();
|
||||
}
|
||||
|
||||
LAllocator allocator(values, this);
|
||||
LChunkBuilder builder(info, this, &allocator);
|
||||
LChunk* chunk = builder.Build();
|
||||
if (chunk == NULL) return Handle<Code>::null();
|
||||
|
||||
if (!FLAG_alloc_lithium) return Handle<Code>::null();
|
||||
|
||||
allocator.Allocate(chunk);
|
||||
|
||||
if (!FLAG_use_lithium) return Handle<Code>::null();
|
||||
if (!allocator.Allocate(chunk)) {
|
||||
if (FLAG_trace_bailout) {
|
||||
PrintF("Not enough virtual registers (regalloc).\n");
|
||||
}
|
||||
return Handle<Code>::null();
|
||||
}
|
||||
|
||||
MacroAssembler assembler(info->isolate(), NULL, 0);
|
||||
LCodeGen generator(chunk, &assembler, info);
|
||||
@ -1672,7 +1670,7 @@ Representation HInferRepresentation::TryChange(HValue* value) {
|
||||
Representation rep = use->RequiredInputRepresentation(it.index());
|
||||
if (rep.IsNone()) continue;
|
||||
if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
|
||||
++use_count[rep.kind()];
|
||||
use_count[rep.kind()] += use->LoopWeight();
|
||||
}
|
||||
int tagged_count = use_count[Representation::kTagged];
|
||||
int double_count = use_count[Representation::kDouble];
|
||||
@ -4798,7 +4796,8 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
|
||||
|
||||
// Do a quick check on source code length to avoid parsing large
|
||||
// inlining candidates.
|
||||
if (FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize) {
|
||||
if ((FLAG_limit_inlining && target->shared()->SourceSize() > kMaxSourceSize)
|
||||
|| target->shared()->SourceSize() > kUnlimitedMaxSourceSize) {
|
||||
TraceInline(target, caller, "target text too big");
|
||||
return false;
|
||||
}
|
||||
@ -4846,7 +4845,8 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
|
||||
}
|
||||
|
||||
// We don't want to add more than a certain number of nodes from inlining.
|
||||
if (FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) {
|
||||
if ((FLAG_limit_inlining && inlined_count_ > kMaxInlinedNodes) ||
|
||||
inlined_count_ > kUnlimitedMaxInlinedNodes) {
|
||||
TraceInline(target, caller, "cumulative AST node limit reached");
|
||||
return false;
|
||||
}
|
||||
@ -4874,7 +4874,8 @@ bool HGraphBuilder::TryInline(Call* expr, bool drop_extra) {
|
||||
|
||||
// Count the number of AST nodes added by inlining this call.
|
||||
int nodes_added = AstNode::Count() - count_before;
|
||||
if (FLAG_limit_inlining && nodes_added > kMaxInlinedSize) {
|
||||
if ((FLAG_limit_inlining && nodes_added > kMaxInlinedSize) ||
|
||||
nodes_added > kUnlimitedMaxInlinedSize) {
|
||||
TraceInline(target, caller, "target AST is too large");
|
||||
return false;
|
||||
}
|
||||
@ -7326,7 +7327,9 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type) {
|
||||
}
|
||||
LOperand* op = range->FirstHint();
|
||||
int hint_index = -1;
|
||||
if (op != NULL && op->IsUnallocated()) hint_index = op->VirtualRegister();
|
||||
if (op != NULL && op->IsUnallocated()) {
|
||||
hint_index = LUnallocated::cast(op)->virtual_register();
|
||||
}
|
||||
trace_.Add(" %d %d", parent_index, hint_index);
|
||||
UseInterval* cur_interval = range->first_interval();
|
||||
while (cur_interval != NULL && range->Covers(cur_interval->start())) {
|
||||
|
6
deps/v8/src/hydrogen.h
vendored
6
deps/v8/src/hydrogen.h
vendored
@ -773,6 +773,12 @@ class HGraphBuilder: public AstVisitor {
|
||||
static const int kMaxInlinedSize = 196;
|
||||
static const int kMaxSourceSize = 600;
|
||||
|
||||
// Even in the 'unlimited' case we have to have some limit in order not to
|
||||
// overflow the stack.
|
||||
static const int kUnlimitedMaxInlinedNodes = 1000;
|
||||
static const int kUnlimitedMaxInlinedSize = 1000;
|
||||
static const int kUnlimitedMaxSourceSize = 600;
|
||||
|
||||
// Simple accessors.
|
||||
void set_function_state(FunctionState* state) { function_state_ = state; }
|
||||
|
||||
|
2
deps/v8/src/ia32/assembler-ia32.h
vendored
2
deps/v8/src/ia32/assembler-ia32.h
vendored
@ -621,8 +621,6 @@ class Assembler : public AssemblerBase {
|
||||
// The debug break slot must be able to contain a call instruction.
|
||||
static const int kDebugBreakSlotLength = kCallInstructionLength;
|
||||
|
||||
// One byte opcode for test eax,0xXXXXXXXX.
|
||||
static const byte kTestEaxByte = 0xA9;
|
||||
// One byte opcode for test al, 0xXX.
|
||||
static const byte kTestAlByte = 0xA8;
|
||||
// One byte opcode for nop.
|
||||
|
64
deps/v8/src/ia32/builtins-ia32.cc
vendored
64
deps/v8/src/ia32/builtins-ia32.cc
vendored
@ -74,50 +74,14 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
|
||||
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
bool is_api_function,
|
||||
bool count_constructions) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax: number of arguments
|
||||
// -- edi: constructor function
|
||||
// -----------------------------------
|
||||
|
||||
Label slow, non_function_call;
|
||||
// Check that function is not a smi.
|
||||
__ JumpIfSmi(edi, &non_function_call);
|
||||
// Check that function is a JSFunction.
|
||||
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
|
||||
__ j(not_equal, &slow);
|
||||
|
||||
// Jump to the function-specific construct stub.
|
||||
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
|
||||
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
|
||||
__ jmp(ebx);
|
||||
|
||||
// edi: called object
|
||||
// eax: number of arguments
|
||||
// ecx: object map
|
||||
Label do_call;
|
||||
__ bind(&slow);
|
||||
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
|
||||
__ j(not_equal, &non_function_call);
|
||||
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
|
||||
__ jmp(&do_call);
|
||||
|
||||
__ bind(&non_function_call);
|
||||
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
|
||||
__ bind(&do_call);
|
||||
// Set expected number of arguments to zero (not changing eax).
|
||||
__ Set(ebx, Immediate(0));
|
||||
Handle<Code> arguments_adaptor =
|
||||
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
|
||||
__ SetCallKind(ecx, CALL_AS_METHOD);
|
||||
__ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
bool is_api_function,
|
||||
bool count_constructions) {
|
||||
// Should never count constructions for api objects.
|
||||
ASSERT(!is_api_function || !count_constructions);
|
||||
|
||||
@ -454,8 +418,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
|
||||
// Invoke the code.
|
||||
if (is_construct) {
|
||||
__ call(masm->isolate()->builtins()->JSConstructCall(),
|
||||
RelocInfo::CODE_TARGET);
|
||||
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
|
||||
__ CallStub(&stub);
|
||||
} else {
|
||||
ParameterCount actual(eax);
|
||||
__ InvokeFunction(edi, actual, CALL_FUNCTION,
|
||||
@ -929,9 +893,8 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
||||
Label* gc_required) {
|
||||
const int initial_capacity = JSArray::kPreallocatedArrayElements;
|
||||
STATIC_ASSERT(initial_capacity >= 0);
|
||||
// Load the initial map from the array function.
|
||||
__ mov(scratch1, FieldOperand(array_function,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
|
||||
__ LoadInitialArrayMap(array_function, scratch2, scratch1);
|
||||
|
||||
// Allocate the JSArray object together with space for a fixed array with the
|
||||
// requested elements.
|
||||
@ -1034,10 +997,7 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
ASSERT(!fill_with_hole || array_size.is(ecx)); // rep stos count
|
||||
ASSERT(!fill_with_hole || !result.is(eax)); // result is never eax
|
||||
|
||||
// Load the initial map from the array function.
|
||||
__ mov(elements_array,
|
||||
FieldOperand(array_function,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ LoadInitialArrayMap(array_function, scratch, elements_array);
|
||||
|
||||
// Allocate the JSArray object together with space for a FixedArray with the
|
||||
// requested elements.
|
||||
@ -1321,7 +1281,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
|
||||
__ LoadGlobalFunction(Context::INTERNAL_ARRAY_FUNCTION_INDEX, edi);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
// Initial map for the builtin InternalArray function shoud be a map.
|
||||
// Initial map for the builtin InternalArray function should be a map.
|
||||
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
// Will both indicate a NULL and a Smi.
|
||||
__ test(ebx, Immediate(kSmiTagMask));
|
||||
@ -1334,8 +1294,8 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
|
||||
// function.
|
||||
ArrayNativeCode(masm, false, &generic_array_code);
|
||||
|
||||
// Jump to the generic array code in case the specialized code cannot handle
|
||||
// the construction.
|
||||
// Jump to the generic internal array code in case the specialized code cannot
|
||||
// handle the construction.
|
||||
__ bind(&generic_array_code);
|
||||
Handle<Code> array_code =
|
||||
masm->isolate()->builtins()->InternalArrayCodeGeneric();
|
||||
@ -1355,7 +1315,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
|
||||
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, edi);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
// Initial map for the builtin Array function shoud be a map.
|
||||
// Initial map for the builtin Array function should be a map.
|
||||
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
// Will both indicate a NULL and a Smi.
|
||||
__ test(ebx, Immediate(kSmiTagMask));
|
||||
|
140
deps/v8/src/ia32/code-stubs-ia32.cc
vendored
140
deps/v8/src/ia32/code-stubs-ia32.cc
vendored
@ -4573,30 +4573,46 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void CallFunctionStub::FinishCode(Handle<Code> code) {
|
||||
code->set_has_function_cache(RecordCallTarget());
|
||||
}
|
||||
static void GenerateRecordCallTarget(MacroAssembler* masm) {
|
||||
// Cache the called function in a global property cell. Cache states
|
||||
// are uninitialized, monomorphic (indicated by a JSFunction), and
|
||||
// megamorphic.
|
||||
// ebx : cache cell for call target
|
||||
// edi : the function to call
|
||||
Isolate* isolate = masm->isolate();
|
||||
Label initialize, done;
|
||||
|
||||
// Load the cache state into ecx.
|
||||
__ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
void CallFunctionStub::Clear(Heap* heap, Address address) {
|
||||
ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
|
||||
// 1 ~ size of the test eax opcode.
|
||||
Object* cell = Memory::Object_at(address + kPointerSize + 1);
|
||||
// Low-level because clearing happens during GC.
|
||||
reinterpret_cast<JSGlobalPropertyCell*>(cell)->set_value(
|
||||
RawUninitializedSentinel(heap));
|
||||
}
|
||||
// A monomorphic cache hit or an already megamorphic state: invoke the
|
||||
// function without changing the state.
|
||||
__ cmp(ecx, edi);
|
||||
__ j(equal, &done, Label::kNear);
|
||||
__ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
|
||||
__ j(equal, &done, Label::kNear);
|
||||
|
||||
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
|
||||
// megamorphic.
|
||||
__ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
|
||||
__ j(equal, &initialize, Label::kNear);
|
||||
// MegamorphicSentinel is an immortal immovable object (undefined) so no
|
||||
// write-barrier is needed.
|
||||
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
|
||||
Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
|
||||
__ jmp(&done, Label::kNear);
|
||||
|
||||
Object* CallFunctionStub::GetCachedValue(Address address) {
|
||||
ASSERT(Memory::uint8_at(address + kPointerSize) == Assembler::kTestEaxByte);
|
||||
// 1 ~ size of the test eax opcode.
|
||||
Object* cell = Memory::Object_at(address + kPointerSize + 1);
|
||||
return JSGlobalPropertyCell::cast(cell)->value();
|
||||
// An uninitialized cache is patched with the function.
|
||||
__ bind(&initialize);
|
||||
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
|
||||
// No need for a write barrier here - cells are rescanned.
|
||||
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
// ebx : cache cell for call target
|
||||
// edi : the function to call
|
||||
Isolate* isolate = masm->isolate();
|
||||
Label slow, non_function;
|
||||
@ -4613,9 +4629,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
__ cmp(eax, isolate->factory()->the_hole_value());
|
||||
__ j(not_equal, &receiver_ok, Label::kNear);
|
||||
// Patch the receiver on the stack with the global receiver object.
|
||||
__ mov(ebx, GlobalObjectOperand());
|
||||
__ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
|
||||
__ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
|
||||
__ mov(ecx, GlobalObjectOperand());
|
||||
__ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
|
||||
__ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
|
||||
__ bind(&receiver_ok);
|
||||
}
|
||||
|
||||
@ -4626,38 +4642,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
__ j(not_equal, &slow);
|
||||
|
||||
if (RecordCallTarget()) {
|
||||
// Cache the called function in a global property cell in the
|
||||
// instruction stream after the call. Cache states are uninitialized,
|
||||
// monomorphic (indicated by a JSFunction), and megamorphic.
|
||||
Label initialize, call;
|
||||
// Load the cache cell address into ebx and the cache state into ecx.
|
||||
__ mov(ebx, Operand(esp, 0)); // Return address.
|
||||
__ mov(ebx, Operand(ebx, 1)); // 1 ~ sizeof 'test eax' opcode in bytes.
|
||||
__ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
// A monomorphic cache hit or an already megamorphic state: invoke the
|
||||
// function without changing the state.
|
||||
__ cmp(ecx, edi);
|
||||
__ j(equal, &call, Label::kNear);
|
||||
__ cmp(ecx, Immediate(MegamorphicSentinel(isolate)));
|
||||
__ j(equal, &call, Label::kNear);
|
||||
|
||||
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
|
||||
// megamorphic.
|
||||
__ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
|
||||
__ j(equal, &initialize, Label::kNear);
|
||||
// MegamorphicSentinel is an immortal immovable object (undefined) so no
|
||||
// write-barrier is needed.
|
||||
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
|
||||
Immediate(MegamorphicSentinel(isolate)));
|
||||
__ jmp(&call, Label::kNear);
|
||||
|
||||
// An uninitialized cache is patched with the function.
|
||||
__ bind(&initialize);
|
||||
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
|
||||
// No need for a write barrier here - cells are rescanned.
|
||||
|
||||
__ bind(&call);
|
||||
GenerateRecordCallTarget(masm);
|
||||
}
|
||||
|
||||
// Fast-case: Just invoke the function.
|
||||
@ -4684,13 +4669,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
__ bind(&slow);
|
||||
if (RecordCallTarget()) {
|
||||
// If there is a call target cache, mark it megamorphic in the
|
||||
// non-function case.
|
||||
__ mov(ebx, Operand(esp, 0));
|
||||
__ mov(ebx, Operand(ebx, 1));
|
||||
// MegamorphicSentinel is an immortal immovable object (undefined) so no
|
||||
// write barrier is needed.
|
||||
// non-function case. MegamorphicSentinel is an immortal immovable
|
||||
// object (undefined) so no write barrier is needed.
|
||||
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
|
||||
Immediate(MegamorphicSentinel(isolate)));
|
||||
Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
|
||||
}
|
||||
// Check for function proxy.
|
||||
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
|
||||
@ -4720,6 +4702,50 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void CallConstructStub::Generate(MacroAssembler* masm) {
|
||||
// eax : number of arguments
|
||||
// ebx : cache cell for call target
|
||||
// edi : constructor function
|
||||
Label slow, non_function_call;
|
||||
|
||||
// Check that function is not a smi.
|
||||
__ JumpIfSmi(edi, &non_function_call);
|
||||
// Check that function is a JSFunction.
|
||||
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
|
||||
__ j(not_equal, &slow);
|
||||
|
||||
if (RecordCallTarget()) {
|
||||
GenerateRecordCallTarget(masm);
|
||||
}
|
||||
|
||||
// Jump to the function-specific construct stub.
|
||||
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
|
||||
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
|
||||
__ jmp(ebx);
|
||||
|
||||
// edi: called object
|
||||
// eax: number of arguments
|
||||
// ecx: object map
|
||||
Label do_call;
|
||||
__ bind(&slow);
|
||||
__ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
|
||||
__ j(not_equal, &non_function_call);
|
||||
__ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
|
||||
__ jmp(&do_call);
|
||||
|
||||
__ bind(&non_function_call);
|
||||
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
|
||||
__ bind(&do_call);
|
||||
// Set expected number of arguments to zero (not changing eax).
|
||||
__ Set(ebx, Immediate(0));
|
||||
Handle<Code> arguments_adaptor =
|
||||
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
|
||||
__ SetCallKind(ecx, CALL_AS_METHOD);
|
||||
__ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
bool CEntryStub::NeedsImmovableCode() {
|
||||
return false;
|
||||
}
|
||||
|
52
deps/v8/src/ia32/debug-ia32.cc
vendored
52
deps/v8/src/ia32/debug-ia32.cc
vendored
@ -222,8 +222,36 @@ void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
|
||||
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
|
||||
// Register state just before return from JS function (from codegen-ia32.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax: return value
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallFunctionStub (from code-stubs-ia32.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- edi: function
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallFunctionStub (from code-stubs-ia32.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- ebx: cache cell for call target
|
||||
// -- edi: function
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), 0, false);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallConstructStub (from code-stubs-ia32.cc).
|
||||
// eax is the actual number of arguments not encoded as a smi see comment
|
||||
// above IC call.
|
||||
// ----------- S t a t e -------------
|
||||
@ -235,21 +263,17 @@ void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
|
||||
// Register state just before return from JS function (from codegen-ia32.cc).
|
||||
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallConstructStub (from code-stubs-ia32.cc).
|
||||
// eax is the actual number of arguments not encoded as a smi see comment
|
||||
// above IC call.
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax: return value
|
||||
// -- eax: number of arguments (not smi)
|
||||
// -- ebx: cache cell for call target
|
||||
// -- edi: constructor function
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, eax.bit(), 0, true);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for stub CallFunction (from CallFunctionStub in ic-ia32.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- edi: function
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, edi.bit(), 0, false);
|
||||
// The number of arguments in eax is not smi encoded.
|
||||
Generate_DebugBreakCallHelper(masm, ebx.bit() | edi.bit(), eax.bit(), false);
|
||||
}
|
||||
|
||||
|
||||
|
43
deps/v8/src/ia32/full-codegen-ia32.cc
vendored
43
deps/v8/src/ia32/full-codegen-ia32.cc
vendored
@ -2130,27 +2130,19 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
|
||||
SetSourcePosition(expr->position());
|
||||
|
||||
// Record call targets in unoptimized code, but not in the snapshot.
|
||||
bool record_call_target = !Serializer::enabled();
|
||||
if (record_call_target) {
|
||||
if (!Serializer::enabled()) {
|
||||
flags = static_cast<CallFunctionFlags>(flags | RECORD_CALL_TARGET);
|
||||
Handle<Object> uninitialized =
|
||||
TypeFeedbackCells::UninitializedSentinel(isolate());
|
||||
Handle<JSGlobalPropertyCell> cell =
|
||||
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
|
||||
RecordTypeFeedbackCell(expr->id(), cell);
|
||||
__ mov(ebx, cell);
|
||||
}
|
||||
|
||||
CallFunctionStub stub(arg_count, flags);
|
||||
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
|
||||
__ CallStub(&stub, expr->id());
|
||||
if (record_call_target) {
|
||||
// There is a one element cache in the instruction stream.
|
||||
#ifdef DEBUG
|
||||
int return_site_offset = masm()->pc_offset();
|
||||
#endif
|
||||
Handle<Object> uninitialized =
|
||||
CallFunctionStub::UninitializedSentinel(isolate());
|
||||
Handle<JSGlobalPropertyCell> cell =
|
||||
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
|
||||
__ test(eax, Immediate(cell));
|
||||
// Patching code in the stub assumes the opcode is 1 byte and there is
|
||||
// word for a pointer in the operand.
|
||||
ASSERT(masm()->pc_offset() - return_site_offset >= 1 + kPointerSize);
|
||||
}
|
||||
|
||||
RecordJSReturnSite(expr);
|
||||
// Restore context register.
|
||||
@ -2325,9 +2317,22 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
|
||||
__ SafeSet(eax, Immediate(arg_count));
|
||||
__ mov(edi, Operand(esp, arg_count * kPointerSize));
|
||||
|
||||
Handle<Code> construct_builtin =
|
||||
isolate()->builtins()->JSConstructCall();
|
||||
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
|
||||
// Record call targets in unoptimized code, but not in the snapshot.
|
||||
CallFunctionFlags flags;
|
||||
if (!Serializer::enabled()) {
|
||||
flags = RECORD_CALL_TARGET;
|
||||
Handle<Object> uninitialized =
|
||||
TypeFeedbackCells::UninitializedSentinel(isolate());
|
||||
Handle<JSGlobalPropertyCell> cell =
|
||||
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
|
||||
RecordTypeFeedbackCell(expr->id(), cell);
|
||||
__ mov(ebx, cell);
|
||||
} else {
|
||||
flags = NO_CALL_FUNCTION_FLAGS;
|
||||
}
|
||||
|
||||
CallConstructStub stub(flags);
|
||||
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
|
||||
context()->Plug(eax);
|
||||
}
|
||||
|
||||
|
58
deps/v8/src/ia32/ic-ia32.cc
vendored
58
deps/v8/src/ia32/ic-ia32.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -765,7 +765,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
// -----------------------------------
|
||||
Label slow, fast_object_with_map_check, fast_object_without_map_check;
|
||||
Label fast_double_with_map_check, fast_double_without_map_check;
|
||||
Label check_if_double_array, array, extra;
|
||||
Label check_if_double_array, array, extra, transition_smi_elements;
|
||||
Label finish_object_store, non_double_value, transition_double_elements;
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ JumpIfSmi(edx, &slow);
|
||||
@ -862,11 +863,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&non_smi_value);
|
||||
// Escape to slow case when writing non-smi into smi-only array.
|
||||
// Escape to elements kind transition case.
|
||||
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
|
||||
__ CheckFastObjectElements(edi, &slow, Label::kNear);
|
||||
__ CheckFastObjectElements(edi, &transition_smi_elements);
|
||||
|
||||
// Fast elements array, store the value to the elements backing store.
|
||||
__ bind(&finish_object_store);
|
||||
__ mov(CodeGenerator::FixedArrayElementOperand(ebx, ecx), eax);
|
||||
// Update write barrier for the elements array address.
|
||||
__ mov(edx, eax); // Preserve the value which is returned.
|
||||
@ -882,8 +884,54 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
__ bind(&fast_double_without_map_check);
|
||||
// If the value is a number, store it as a double in the FastDoubleElements
|
||||
// array.
|
||||
__ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0, &slow, false);
|
||||
__ StoreNumberToDoubleElements(eax, ebx, ecx, edx, xmm0,
|
||||
&transition_double_elements, false);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&transition_smi_elements);
|
||||
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
|
||||
|
||||
// Transition the array appropriately depending on the value type.
|
||||
__ CheckMap(eax,
|
||||
masm->isolate()->factory()->heap_number_map(),
|
||||
&non_double_value,
|
||||
DONT_DO_SMI_CHECK);
|
||||
|
||||
// Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
|
||||
// FAST_DOUBLE_ELEMENTS and complete the store.
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
|
||||
FAST_DOUBLE_ELEMENTS,
|
||||
ebx,
|
||||
edi,
|
||||
&slow);
|
||||
ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
|
||||
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ jmp(&fast_double_without_map_check);
|
||||
|
||||
__ bind(&non_double_value);
|
||||
// Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
|
||||
__ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
|
||||
FAST_ELEMENTS,
|
||||
ebx,
|
||||
edi,
|
||||
&slow);
|
||||
ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
|
||||
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
|
||||
__ bind(&transition_double_elements);
|
||||
// Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
|
||||
// HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
|
||||
// transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
|
||||
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
|
||||
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
|
||||
FAST_ELEMENTS,
|
||||
ebx,
|
||||
edi,
|
||||
&slow);
|
||||
ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
|
||||
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ jmp(&finish_object_store);
|
||||
}
|
||||
|
||||
|
||||
|
4
deps/v8/src/ia32/lithium-codegen-ia32.cc
vendored
4
deps/v8/src/ia32/lithium-codegen-ia32.cc
vendored
@ -3219,9 +3219,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
|
||||
ASSERT(ToRegister(instr->constructor()).is(edi));
|
||||
ASSERT(ToRegister(instr->result()).is(eax));
|
||||
|
||||
Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
|
||||
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
|
||||
__ Set(eax, Immediate(instr->arity()));
|
||||
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
|
||||
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
|
||||
}
|
||||
|
||||
|
||||
|
24
deps/v8/src/ia32/lithium-ia32.cc
vendored
24
deps/v8/src/ia32/lithium-ia32.cc
vendored
@ -580,11 +580,6 @@ void LChunkBuilder::Abort(const char* format, ...) {
|
||||
}
|
||||
|
||||
|
||||
LRegister* LChunkBuilder::ToOperand(Register reg) {
|
||||
return LRegister::Create(Register::ToAllocationIndex(reg));
|
||||
}
|
||||
|
||||
|
||||
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
|
||||
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
|
||||
Register::ToAllocationIndex(reg));
|
||||
@ -675,7 +670,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
|
||||
HInstruction* instr = HInstruction::cast(value);
|
||||
VisitInstruction(instr);
|
||||
}
|
||||
allocator_->RecordUse(value, operand);
|
||||
operand->set_virtual_register(value->id());
|
||||
return operand;
|
||||
}
|
||||
|
||||
@ -683,18 +678,12 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
|
||||
template<int I, int T>
|
||||
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
|
||||
LUnallocated* result) {
|
||||
allocator_->RecordDefinition(current_instruction_, result);
|
||||
result->set_virtual_register(current_instruction_->id());
|
||||
instr->set_result(result);
|
||||
return instr;
|
||||
}
|
||||
|
||||
|
||||
template<int I, int T>
|
||||
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
|
||||
return Define(instr, new(zone()) LUnallocated(LUnallocated::NONE));
|
||||
}
|
||||
|
||||
|
||||
template<int I, int T>
|
||||
LInstruction* LChunkBuilder::DefineAsRegister(
|
||||
LTemplateInstruction<1, I, T>* instr) {
|
||||
@ -807,21 +796,24 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
|
||||
LUnallocated* LChunkBuilder::TempRegister() {
|
||||
LUnallocated* operand =
|
||||
new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
|
||||
allocator_->RecordTemporary(operand);
|
||||
operand->set_virtual_register(allocator_->GetVirtualRegister());
|
||||
if (!allocator_->AllocationOk()) {
|
||||
Abort("Not enough virtual registers (temps).");
|
||||
}
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(Register reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
allocator_->RecordTemporary(operand);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
allocator_->RecordTemporary(operand);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
3
deps/v8/src/ia32/lithium-ia32.h
vendored
3
deps/v8/src/ia32/lithium-ia32.h
vendored
@ -2273,7 +2273,6 @@ class LChunkBuilder BASE_EMBEDDED {
|
||||
void Abort(const char* format, ...);
|
||||
|
||||
// Methods for getting operands for Use / Define / Temp.
|
||||
LRegister* ToOperand(Register reg);
|
||||
LUnallocated* ToUnallocated(Register reg);
|
||||
LUnallocated* ToUnallocated(XMMRegister reg);
|
||||
|
||||
@ -2323,8 +2322,6 @@ class LChunkBuilder BASE_EMBEDDED {
|
||||
template<int I, int T>
|
||||
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
|
||||
LUnallocated* result);
|
||||
template<int I, int T>
|
||||
LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
|
||||
template<int I, int T>
|
||||
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
|
||||
template<int I, int T>
|
||||
|
40
deps/v8/src/ia32/macro-assembler-ia32.cc
vendored
40
deps/v8/src/ia32/macro-assembler-ia32.cc
vendored
@ -2168,6 +2168,46 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match) {
|
||||
// Load the global or builtins object from the current context.
|
||||
mov(scratch, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
mov(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
|
||||
|
||||
// Check that the function's map is the same as the expected cached map.
|
||||
int expected_index =
|
||||
Context::GetContextMapIndexFromElementsKind(expected_kind);
|
||||
cmp(map_in_out, Operand(scratch, Context::SlotOffset(expected_index)));
|
||||
j(not_equal, no_map_match);
|
||||
|
||||
// Use the transitioned cached map.
|
||||
int trans_index =
|
||||
Context::GetContextMapIndexFromElementsKind(transitioned_kind);
|
||||
mov(map_in_out, Operand(scratch, Context::SlotOffset(trans_index)));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadInitialArrayMap(
|
||||
Register function_in, Register scratch, Register map_out) {
|
||||
ASSERT(!function_in.is(map_out));
|
||||
Label done;
|
||||
mov(map_out, FieldOperand(function_in,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
if (!FLAG_smi_only_arrays) {
|
||||
LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
|
||||
FAST_ELEMENTS,
|
||||
map_out,
|
||||
scratch,
|
||||
&done);
|
||||
}
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
|
||||
// Load the global or builtins object from the current context.
|
||||
mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
|
16
deps/v8/src/ia32/macro-assembler-ia32.h
vendored
16
deps/v8/src/ia32/macro-assembler-ia32.h
vendored
@ -221,6 +221,22 @@ class MacroAssembler: public Assembler {
|
||||
// Find the function context up the context chain.
|
||||
void LoadContext(Register dst, int context_chain_length);
|
||||
|
||||
// Conditionally load the cached Array transitioned map of type
|
||||
// transitioned_kind from the global context if the map in register
|
||||
// map_in_out is the cached Array map in the global context of
|
||||
// expected_kind.
|
||||
void LoadTransitionedArrayMapConditional(
|
||||
ElementsKind expected_kind,
|
||||
ElementsKind transitioned_kind,
|
||||
Register map_in_out,
|
||||
Register scratch,
|
||||
Label* no_map_match);
|
||||
|
||||
// Load the initial map for new Arrays from a JSFunction.
|
||||
void LoadInitialArrayMap(Register function_in,
|
||||
Register scratch,
|
||||
Register map_out);
|
||||
|
||||
// Load the global function with the given index.
|
||||
void LoadGlobalFunction(int index, Register function);
|
||||
|
||||
|
1
deps/v8/src/isolate.cc
vendored
1
deps/v8/src/isolate.cc
vendored
@ -1834,6 +1834,7 @@ bool Isolate::Init(Deserializer* des) {
|
||||
}
|
||||
|
||||
state_ = INITIALIZED;
|
||||
time_millis_at_init_ = OS::TimeCurrentMillis();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
7
deps/v8/src/isolate.h
vendored
7
deps/v8/src/isolate.h
vendored
@ -1030,6 +1030,10 @@ class Isolate {
|
||||
context_exit_happened_ = context_exit_happened;
|
||||
}
|
||||
|
||||
double time_millis_since_init() {
|
||||
return OS::TimeCurrentMillis() - time_millis_at_init_;
|
||||
}
|
||||
|
||||
private:
|
||||
Isolate();
|
||||
|
||||
@ -1200,6 +1204,9 @@ class Isolate {
|
||||
// that a context was recently exited.
|
||||
bool context_exit_happened_;
|
||||
|
||||
// Time stamp at initialization.
|
||||
double time_millis_at_init_;
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
|
||||
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
|
||||
bool simulator_initialized_;
|
||||
|
91
deps/v8/src/lithium-allocator.cc
vendored
91
deps/v8/src/lithium-allocator.cc
vendored
@ -546,6 +546,7 @@ LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
|
||||
|
||||
LAllocator::LAllocator(int num_values, HGraph* graph)
|
||||
: chunk_(NULL),
|
||||
allocation_ok_(true),
|
||||
live_in_sets_(graph->blocks()->length()),
|
||||
live_ranges_(num_values * 2),
|
||||
fixed_live_ranges_(NULL),
|
||||
@ -697,7 +698,7 @@ LGap* LAllocator::GetLastGap(HBasicBlock* block) {
|
||||
|
||||
HPhi* LAllocator::LookupPhi(LOperand* operand) const {
|
||||
if (!operand->IsUnallocated()) return NULL;
|
||||
int index = operand->VirtualRegister();
|
||||
int index = LUnallocated::cast(operand)->virtual_register();
|
||||
HValue* instr = graph_->LookupValue(index);
|
||||
if (instr != NULL && instr->IsPhi()) {
|
||||
return HPhi::cast(instr);
|
||||
@ -765,7 +766,8 @@ void LAllocator::AddConstraintsGapMove(int index,
|
||||
LMoveOperands cur = move_operands->at(i);
|
||||
LOperand* cur_to = cur.destination();
|
||||
if (cur_to->IsUnallocated()) {
|
||||
if (cur_to->VirtualRegister() == from->VirtualRegister()) {
|
||||
if (LUnallocated::cast(cur_to)->virtual_register() ==
|
||||
LUnallocated::cast(from)->virtual_register()) {
|
||||
move->AddMove(cur.source(), to);
|
||||
return;
|
||||
}
|
||||
@ -786,6 +788,7 @@ void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
|
||||
if (i < end) instr = InstructionAt(i + 1);
|
||||
if (i > start) prev_instr = InstructionAt(i - 1);
|
||||
MeetConstraintsBetween(prev_instr, instr, i);
|
||||
if (!AllocationOk()) return;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -807,11 +810,11 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
|
||||
// Handle fixed output operand.
|
||||
if (first != NULL && first->Output() != NULL) {
|
||||
LUnallocated* first_output = LUnallocated::cast(first->Output());
|
||||
LiveRange* range = LiveRangeFor(first_output->VirtualRegister());
|
||||
LiveRange* range = LiveRangeFor(first_output->virtual_register());
|
||||
bool assigned = false;
|
||||
if (first_output->HasFixedPolicy()) {
|
||||
LUnallocated* output_copy = first_output->CopyUnconstrained();
|
||||
bool is_tagged = HasTaggedValue(first_output->VirtualRegister());
|
||||
bool is_tagged = HasTaggedValue(first_output->virtual_register());
|
||||
AllocateFixed(first_output, gap_index, is_tagged);
|
||||
|
||||
// This value is produced on the stack, we never need to spill it.
|
||||
@ -842,7 +845,7 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
|
||||
LUnallocated* cur_input = LUnallocated::cast(it.Current());
|
||||
if (cur_input->HasFixedPolicy()) {
|
||||
LUnallocated* input_copy = cur_input->CopyUnconstrained();
|
||||
bool is_tagged = HasTaggedValue(cur_input->VirtualRegister());
|
||||
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
|
||||
AllocateFixed(cur_input, gap_index + 1, is_tagged);
|
||||
AddConstraintsGapMove(gap_index, input_copy, cur_input);
|
||||
} else if (cur_input->policy() == LUnallocated::WRITABLE_REGISTER) {
|
||||
@ -851,7 +854,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
|
||||
ASSERT(!cur_input->IsUsedAtStart());
|
||||
|
||||
LUnallocated* input_copy = cur_input->CopyUnconstrained();
|
||||
cur_input->set_virtual_register(next_virtual_register_++);
|
||||
cur_input->set_virtual_register(GetVirtualRegister());
|
||||
if (!AllocationOk()) return;
|
||||
|
||||
if (RequiredRegisterKind(input_copy->virtual_register()) ==
|
||||
DOUBLE_REGISTERS) {
|
||||
@ -869,8 +873,8 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first,
|
||||
LUnallocated* second_output = LUnallocated::cast(second->Output());
|
||||
if (second_output->HasSameAsInputPolicy()) {
|
||||
LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
|
||||
int output_vreg = second_output->VirtualRegister();
|
||||
int input_vreg = cur_input->VirtualRegister();
|
||||
int output_vreg = second_output->virtual_register();
|
||||
int input_vreg = cur_input->virtual_register();
|
||||
|
||||
LUnallocated* input_copy = cur_input->CopyUnconstrained();
|
||||
cur_input->set_virtual_register(second_output->virtual_register());
|
||||
@ -925,9 +929,9 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
|
||||
}
|
||||
} else {
|
||||
if (to->IsUnallocated()) {
|
||||
if (live->Contains(to->VirtualRegister())) {
|
||||
if (live->Contains(LUnallocated::cast(to)->virtual_register())) {
|
||||
Define(curr_position, to, from);
|
||||
live->Remove(to->VirtualRegister());
|
||||
live->Remove(LUnallocated::cast(to)->virtual_register());
|
||||
} else {
|
||||
cur->Eliminate();
|
||||
continue;
|
||||
@ -938,7 +942,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
|
||||
}
|
||||
Use(block_start_position, curr_position, from, hint);
|
||||
if (from->IsUnallocated()) {
|
||||
live->Add(from->VirtualRegister());
|
||||
live->Add(LUnallocated::cast(from)->virtual_register());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -948,7 +952,9 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
|
||||
if (instr != NULL) {
|
||||
LOperand* output = instr->Output();
|
||||
if (output != NULL) {
|
||||
if (output->IsUnallocated()) live->Remove(output->VirtualRegister());
|
||||
if (output->IsUnallocated()) {
|
||||
live->Remove(LUnallocated::cast(output)->virtual_register());
|
||||
}
|
||||
Define(curr_position, output, NULL);
|
||||
}
|
||||
|
||||
@ -986,7 +992,9 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
|
||||
}
|
||||
|
||||
Use(block_start_position, use_pos, input, NULL);
|
||||
if (input->IsUnallocated()) live->Add(input->VirtualRegister());
|
||||
if (input->IsUnallocated()) {
|
||||
live->Add(LUnallocated::cast(input)->virtual_register());
|
||||
}
|
||||
}
|
||||
|
||||
for (TempIterator it(instr); !it.Done(); it.Advance()) {
|
||||
@ -1064,18 +1072,22 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
|
||||
}
|
||||
|
||||
|
||||
void LAllocator::Allocate(LChunk* chunk) {
|
||||
bool LAllocator::Allocate(LChunk* chunk) {
|
||||
ASSERT(chunk_ == NULL);
|
||||
chunk_ = chunk;
|
||||
MeetRegisterConstraints();
|
||||
if (!AllocationOk()) return false;
|
||||
ResolvePhis();
|
||||
BuildLiveRanges();
|
||||
AllocateGeneralRegisters();
|
||||
if (!AllocationOk()) return false;
|
||||
AllocateDoubleRegisters();
|
||||
if (!AllocationOk()) return false;
|
||||
PopulatePointerMaps();
|
||||
if (has_osr_entry_) ProcessOsrEntry();
|
||||
ConnectRanges();
|
||||
ResolveControlFlow();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@ -1086,6 +1098,7 @@ void LAllocator::MeetRegisterConstraints() {
|
||||
for (int i = 0; i < blocks->length(); ++i) {
|
||||
HBasicBlock* block = blocks->at(i);
|
||||
MeetRegisterConstraints(block);
|
||||
if (!AllocationOk()) return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1270,7 +1283,8 @@ void LAllocator::BuildLiveRanges() {
|
||||
LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START);
|
||||
for (int j = 0; j < move->move_operands()->length(); ++j) {
|
||||
LOperand* to = move->move_operands()->at(j).destination();
|
||||
if (to->IsUnallocated() && to->VirtualRegister() == phi->id()) {
|
||||
if (to->IsUnallocated() &&
|
||||
LUnallocated::cast(to)->virtual_register() == phi->id()) {
|
||||
hint = move->move_operands()->at(j).source();
|
||||
phi_operand = to;
|
||||
break;
|
||||
@ -1538,6 +1552,7 @@ void LAllocator::AllocateRegisters() {
|
||||
// Do not spill live range eagerly if use position that can benefit from
|
||||
// the register is too close to the start of live range.
|
||||
SpillBetween(current, current->Start(), pos->pos());
|
||||
if (!AllocationOk()) return;
|
||||
ASSERT(UnhandledIsSorted());
|
||||
continue;
|
||||
}
|
||||
@ -1568,9 +1583,10 @@ void LAllocator::AllocateRegisters() {
|
||||
ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
|
||||
|
||||
bool result = TryAllocateFreeReg(current);
|
||||
if (!result) {
|
||||
AllocateBlockedReg(current);
|
||||
}
|
||||
if (!AllocationOk()) return;
|
||||
|
||||
if (!result) AllocateBlockedReg(current);
|
||||
if (!AllocationOk()) return;
|
||||
|
||||
if (current->HasRegisterAssigned()) {
|
||||
AddToActive(current);
|
||||
@ -1624,29 +1640,6 @@ RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
|
||||
}
|
||||
|
||||
|
||||
void LAllocator::RecordDefinition(HInstruction* instr, LUnallocated* operand) {
|
||||
operand->set_virtual_register(instr->id());
|
||||
}
|
||||
|
||||
|
||||
void LAllocator::RecordTemporary(LUnallocated* operand) {
|
||||
ASSERT(next_virtual_register_ < LUnallocated::kMaxVirtualRegisters);
|
||||
if (!operand->HasFixedPolicy()) {
|
||||
operand->set_virtual_register(next_virtual_register_++);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LAllocator::RecordUse(HValue* value, LUnallocated* operand) {
|
||||
operand->set_virtual_register(value->id());
|
||||
}
|
||||
|
||||
|
||||
int LAllocator::max_initial_value_ids() {
|
||||
return LUnallocated::kMaxVirtualRegisters / 16;
|
||||
}
|
||||
|
||||
|
||||
void LAllocator::AddToActive(LiveRange* range) {
|
||||
TraceAlloc("Add live range %d to active\n", range->id());
|
||||
active_live_ranges_.Add(range);
|
||||
@ -1841,7 +1834,8 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
|
||||
if (pos.Value() < current->End().Value()) {
|
||||
// Register reg is available at the range start but becomes blocked before
|
||||
// the range end. Split current at position where it becomes blocked.
|
||||
LiveRange* tail = SplitAt(current, pos);
|
||||
LiveRange* tail = SplitRangeAt(current, pos);
|
||||
if (!AllocationOk()) return false;
|
||||
AddToUnhandledSorted(tail);
|
||||
}
|
||||
|
||||
@ -1996,7 +1990,7 @@ bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
|
||||
}
|
||||
|
||||
|
||||
LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
|
||||
LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
|
||||
ASSERT(!range->IsFixed());
|
||||
TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
|
||||
|
||||
@ -2007,7 +2001,8 @@ LiveRange* LAllocator::SplitAt(LiveRange* range, LifetimePosition pos) {
|
||||
ASSERT(pos.IsInstructionStart() ||
|
||||
!chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
|
||||
|
||||
LiveRange* result = LiveRangeFor(next_virtual_register_++);
|
||||
LiveRange* result = LiveRangeFor(GetVirtualRegister());
|
||||
if (!AllocationOk()) return NULL;
|
||||
range->SplitAt(pos, result);
|
||||
return result;
|
||||
}
|
||||
@ -2024,7 +2019,7 @@ LiveRange* LAllocator::SplitBetween(LiveRange* range,
|
||||
|
||||
LifetimePosition split_pos = FindOptimalSplitPos(start, end);
|
||||
ASSERT(split_pos.Value() >= start.Value());
|
||||
return SplitAt(range, split_pos);
|
||||
return SplitRangeAt(range, split_pos);
|
||||
}
|
||||
|
||||
|
||||
@ -2063,7 +2058,8 @@ LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
|
||||
|
||||
|
||||
void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
|
||||
LiveRange* second_part = SplitAt(range, pos);
|
||||
LiveRange* second_part = SplitRangeAt(range, pos);
|
||||
if (!AllocationOk()) return;
|
||||
Spill(second_part);
|
||||
}
|
||||
|
||||
@ -2072,7 +2068,8 @@ void LAllocator::SpillBetween(LiveRange* range,
|
||||
LifetimePosition start,
|
||||
LifetimePosition end) {
|
||||
ASSERT(start.Value() < end.Value());
|
||||
LiveRange* second_part = SplitAt(range, start);
|
||||
LiveRange* second_part = SplitRangeAt(range, start);
|
||||
if (!AllocationOk()) return;
|
||||
|
||||
if (second_part->Start().Value() < end.Value()) {
|
||||
// The split result intersects with [start, end[.
|
||||
|
27
deps/v8/src/lithium-allocator.h
vendored
27
deps/v8/src/lithium-allocator.h
vendored
@ -431,24 +431,13 @@ class LAllocator BASE_EMBEDDED {
|
||||
|
||||
static void TraceAlloc(const char* msg, ...);
|
||||
|
||||
// Lithium translation support.
|
||||
// Record a use of an input operand in the current instruction.
|
||||
void RecordUse(HValue* value, LUnallocated* operand);
|
||||
// Record the definition of the output operand.
|
||||
void RecordDefinition(HInstruction* instr, LUnallocated* operand);
|
||||
// Record a temporary operand.
|
||||
void RecordTemporary(LUnallocated* operand);
|
||||
|
||||
// Checks whether the value of a given virtual register is tagged.
|
||||
bool HasTaggedValue(int virtual_register) const;
|
||||
|
||||
// Returns the register kind required by the given virtual register.
|
||||
RegisterKind RequiredRegisterKind(int virtual_register) const;
|
||||
|
||||
// Control max function size.
|
||||
static int max_initial_value_ids();
|
||||
|
||||
void Allocate(LChunk* chunk);
|
||||
bool Allocate(LChunk* chunk);
|
||||
|
||||
const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
|
||||
const Vector<LiveRange*>* fixed_live_ranges() const {
|
||||
@ -461,6 +450,15 @@ class LAllocator BASE_EMBEDDED {
|
||||
LChunk* chunk() const { return chunk_; }
|
||||
HGraph* graph() const { return graph_; }
|
||||
|
||||
int GetVirtualRegister() {
|
||||
if (next_virtual_register_ > LUnallocated::kMaxVirtualRegisters) {
|
||||
allocation_ok_ = false;
|
||||
}
|
||||
return next_virtual_register_++;
|
||||
}
|
||||
|
||||
bool AllocationOk() { return allocation_ok_; }
|
||||
|
||||
void MarkAsOsrEntry() {
|
||||
// There can be only one.
|
||||
ASSERT(!has_osr_entry_);
|
||||
@ -533,7 +531,7 @@ class LAllocator BASE_EMBEDDED {
|
||||
// Otherwise returns the live range that starts at pos and contains
|
||||
// all uses from the original range that follow pos. Uses at pos will
|
||||
// still be owned by the original range after splitting.
|
||||
LiveRange* SplitAt(LiveRange* range, LifetimePosition pos);
|
||||
LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
|
||||
|
||||
// Split the given range in a position from the interval [start, end].
|
||||
LiveRange* SplitBetween(LiveRange* range,
|
||||
@ -591,6 +589,9 @@ class LAllocator BASE_EMBEDDED {
|
||||
|
||||
LChunk* chunk_;
|
||||
|
||||
// Indicates success or failure during register allocation.
|
||||
bool allocation_ok_;
|
||||
|
||||
// During liveness analysis keep a mapping from block id to live_in sets
|
||||
// for blocks already analyzed.
|
||||
ZoneList<BitVector*> live_in_sets_;
|
||||
|
6
deps/v8/src/lithium.cc
vendored
6
deps/v8/src/lithium.cc
vendored
@ -95,12 +95,6 @@ void LOperand::PrintTo(StringStream* stream) {
|
||||
}
|
||||
|
||||
|
||||
int LOperand::VirtualRegister() {
|
||||
LUnallocated* unalloc = LUnallocated::cast(this);
|
||||
return unalloc->virtual_register();
|
||||
}
|
||||
|
||||
|
||||
bool LParallelMove::IsRedundant() const {
|
||||
for (int i = 0; i < move_operands_.length(); ++i) {
|
||||
if (!move_operands_[i].IsRedundant()) return false;
|
||||
|
13
deps/v8/src/lithium.h
vendored
13
deps/v8/src/lithium.h
vendored
@ -61,7 +61,6 @@ class LOperand: public ZoneObject {
|
||||
bool IsUnallocated() const { return kind() == UNALLOCATED; }
|
||||
bool IsIgnored() const { return kind() == INVALID; }
|
||||
bool Equals(LOperand* other) const { return value_ == other->value_; }
|
||||
int VirtualRegister();
|
||||
|
||||
void PrintTo(StringStream* stream);
|
||||
void ConvertTo(Kind kind, int index) {
|
||||
@ -169,7 +168,7 @@ class LUnallocated: public LOperand {
|
||||
return static_cast<int>(value_) >> kFixedIndexShift;
|
||||
}
|
||||
|
||||
unsigned virtual_register() const {
|
||||
int virtual_register() const {
|
||||
return VirtualRegisterField::decode(value_);
|
||||
}
|
||||
|
||||
@ -454,7 +453,7 @@ class LEnvironment: public ZoneObject {
|
||||
parameter_count_(parameter_count),
|
||||
pc_offset_(-1),
|
||||
values_(value_count),
|
||||
representations_(value_count),
|
||||
is_tagged_(value_count),
|
||||
spilled_registers_(NULL),
|
||||
spilled_double_registers_(NULL),
|
||||
outer_(outer) {
|
||||
@ -476,11 +475,13 @@ class LEnvironment: public ZoneObject {
|
||||
|
||||
void AddValue(LOperand* operand, Representation representation) {
|
||||
values_.Add(operand);
|
||||
representations_.Add(representation);
|
||||
if (representation.IsTagged()) {
|
||||
is_tagged_.Add(values_.length() - 1);
|
||||
}
|
||||
}
|
||||
|
||||
bool HasTaggedValueAt(int index) const {
|
||||
return representations_[index].IsTagged();
|
||||
return is_tagged_.Contains(index);
|
||||
}
|
||||
|
||||
void Register(int deoptimization_index,
|
||||
@ -515,7 +516,7 @@ class LEnvironment: public ZoneObject {
|
||||
int parameter_count_;
|
||||
int pc_offset_;
|
||||
ZoneList<LOperand*> values_;
|
||||
ZoneList<Representation> representations_;
|
||||
BitVector is_tagged_;
|
||||
|
||||
// Allocation index indexed arrays of spill slot operands for registers
|
||||
// that are also in spill slots at an OSR entry. NULL for environments
|
||||
|
9
deps/v8/src/log.cc
vendored
9
deps/v8/src/log.cc
vendored
@ -1521,7 +1521,8 @@ void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
|
||||
|
||||
|
||||
void Logger::LogCodeObjects() {
|
||||
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
|
||||
"Logger::LogCodeObjects");
|
||||
HeapIterator iterator;
|
||||
AssertNoAllocation no_alloc;
|
||||
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
|
||||
@ -1576,7 +1577,8 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
|
||||
|
||||
|
||||
void Logger::LogCompiledFunctions() {
|
||||
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
|
||||
"Logger::LogCompiledFunctions");
|
||||
HandleScope scope;
|
||||
const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
|
||||
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
|
||||
@ -1595,7 +1597,8 @@ void Logger::LogCompiledFunctions() {
|
||||
|
||||
|
||||
void Logger::LogAccessorCallbacks() {
|
||||
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
|
||||
"Logger::LogAccessorCallbacks");
|
||||
HeapIterator iterator;
|
||||
AssertNoAllocation no_alloc;
|
||||
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
|
||||
|
1
deps/v8/src/mark-compact-inl.h
vendored
1
deps/v8/src/mark-compact-inl.h
vendored
@ -46,6 +46,7 @@ MarkBit Marking::MarkBitFrom(Address addr) {
|
||||
|
||||
void MarkCompactCollector::SetFlags(int flags) {
|
||||
sweep_precisely_ = ((flags & Heap::kMakeHeapIterableMask) != 0);
|
||||
reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
|
||||
}
|
||||
|
||||
|
||||
|
178
deps/v8/src/mark-compact.cc
vendored
178
deps/v8/src/mark-compact.cc
vendored
@ -230,6 +230,18 @@ void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
|
||||
}
|
||||
|
||||
|
||||
static void TraceFragmentation(PagedSpace* space) {
|
||||
int number_of_pages = space->CountTotalPages();
|
||||
intptr_t reserved = (number_of_pages * Page::kObjectAreaSize);
|
||||
intptr_t free = reserved - space->SizeOfObjects();
|
||||
PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
|
||||
AllocationSpaceName(space->identity()),
|
||||
number_of_pages,
|
||||
static_cast<int>(free),
|
||||
static_cast<double>(free) * 100 / reserved);
|
||||
}
|
||||
|
||||
|
||||
bool MarkCompactCollector::StartCompaction() {
|
||||
if (!compacting_) {
|
||||
ASSERT(evacuation_candidates_.length() == 0);
|
||||
@ -239,6 +251,13 @@ bool MarkCompactCollector::StartCompaction() {
|
||||
|
||||
if (FLAG_compact_code_space) {
|
||||
CollectEvacuationCandidates(heap()->code_space());
|
||||
} else if (FLAG_trace_fragmentation) {
|
||||
TraceFragmentation(heap()->code_space());
|
||||
}
|
||||
|
||||
if (FLAG_trace_fragmentation) {
|
||||
TraceFragmentation(heap()->map_space());
|
||||
TraceFragmentation(heap()->cell_space());
|
||||
}
|
||||
|
||||
heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
|
||||
@ -414,6 +433,65 @@ const char* AllocationSpaceName(AllocationSpace space) {
|
||||
}
|
||||
|
||||
|
||||
// Returns zero for pages that have so little fragmentation that it is not
|
||||
// worth defragmenting them. Otherwise a positive integer that gives an
|
||||
// estimate of fragmentation on an arbitrary scale.
|
||||
static int FreeListFragmentation(PagedSpace* space, Page* p) {
|
||||
// If page was not swept then there are no free list items on it.
|
||||
if (!p->WasSwept()) {
|
||||
if (FLAG_trace_fragmentation) {
|
||||
PrintF("%p [%s]: %d bytes live (unswept)\n",
|
||||
reinterpret_cast<void*>(p),
|
||||
AllocationSpaceName(space->identity()),
|
||||
p->LiveBytes());
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
FreeList::SizeStats sizes;
|
||||
space->CountFreeListItems(p, &sizes);
|
||||
|
||||
intptr_t ratio;
|
||||
intptr_t ratio_threshold;
|
||||
if (space->identity() == CODE_SPACE) {
|
||||
ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
|
||||
Page::kObjectAreaSize;
|
||||
ratio_threshold = 10;
|
||||
} else {
|
||||
ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
|
||||
Page::kObjectAreaSize;
|
||||
ratio_threshold = 15;
|
||||
}
|
||||
|
||||
if (FLAG_trace_fragmentation) {
|
||||
PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
|
||||
reinterpret_cast<void*>(p),
|
||||
AllocationSpaceName(space->identity()),
|
||||
static_cast<int>(sizes.small_size_),
|
||||
static_cast<double>(sizes.small_size_ * 100) /
|
||||
Page::kObjectAreaSize,
|
||||
static_cast<int>(sizes.medium_size_),
|
||||
static_cast<double>(sizes.medium_size_ * 100) /
|
||||
Page::kObjectAreaSize,
|
||||
static_cast<int>(sizes.large_size_),
|
||||
static_cast<double>(sizes.large_size_ * 100) /
|
||||
Page::kObjectAreaSize,
|
||||
static_cast<int>(sizes.huge_size_),
|
||||
static_cast<double>(sizes.huge_size_ * 100) /
|
||||
Page::kObjectAreaSize,
|
||||
(ratio > ratio_threshold) ? "[fragmented]" : "");
|
||||
}
|
||||
|
||||
if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (ratio <= ratio_threshold) return 0; // Not fragmented.
|
||||
|
||||
return static_cast<int>(ratio - ratio_threshold);
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
ASSERT(space->identity() == OLD_POINTER_SPACE ||
|
||||
space->identity() == OLD_DATA_SPACE ||
|
||||
@ -421,7 +499,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
|
||||
int number_of_pages = space->CountTotalPages();
|
||||
|
||||
PageIterator it(space);
|
||||
const int kMaxMaxEvacuationCandidates = 1000;
|
||||
int max_evacuation_candidates = Min(
|
||||
kMaxMaxEvacuationCandidates,
|
||||
@ -444,22 +521,89 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
Page* page_;
|
||||
};
|
||||
|
||||
enum CompactionMode {
|
||||
COMPACT_FREE_LISTS,
|
||||
REDUCE_MEMORY_FOOTPRINT
|
||||
};
|
||||
|
||||
CompactionMode mode = COMPACT_FREE_LISTS;
|
||||
|
||||
intptr_t reserved = number_of_pages * Page::kObjectAreaSize;
|
||||
intptr_t over_reserved = reserved - space->SizeOfObjects();
|
||||
static const intptr_t kFreenessThreshold = 50;
|
||||
|
||||
if (over_reserved >= 2 * Page::kObjectAreaSize &&
|
||||
reduce_memory_footprint_) {
|
||||
mode = REDUCE_MEMORY_FOOTPRINT;
|
||||
|
||||
// We expect that empty pages are easier to compact so slightly bump the
|
||||
// limit.
|
||||
max_evacuation_candidates += 2;
|
||||
|
||||
if (FLAG_trace_fragmentation) {
|
||||
PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
|
||||
static_cast<double>(over_reserved) / MB,
|
||||
static_cast<int>(kFreenessThreshold));
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t estimated_release = 0;
|
||||
|
||||
Candidate candidates[kMaxMaxEvacuationCandidates];
|
||||
|
||||
int count = 0;
|
||||
if (it.has_next()) it.next(); // Never compact the first page.
|
||||
int fragmentation = 0;
|
||||
Candidate* least = NULL;
|
||||
|
||||
PageIterator it(space);
|
||||
if (it.has_next()) it.next(); // Never compact the first page.
|
||||
|
||||
while (it.has_next()) {
|
||||
Page* p = it.next();
|
||||
p->ClearEvacuationCandidate();
|
||||
|
||||
if (FLAG_stress_compaction) {
|
||||
int counter = space->heap()->ms_count();
|
||||
uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
|
||||
if ((counter & 1) == (page_number & 1)) fragmentation = 1;
|
||||
} else if (mode == REDUCE_MEMORY_FOOTPRINT) {
|
||||
// Don't try to release too many pages.
|
||||
if (estimated_release >= ((over_reserved * 3) / 4)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
intptr_t free_bytes = 0;
|
||||
|
||||
if (!p->WasSwept()) {
|
||||
free_bytes = (Page::kObjectAreaSize - p->LiveBytes());
|
||||
} else {
|
||||
FreeList::SizeStats sizes;
|
||||
space->CountFreeListItems(p, &sizes);
|
||||
free_bytes = sizes.Total();
|
||||
}
|
||||
|
||||
int free_pct = static_cast<int>(free_bytes * 100 / Page::kObjectAreaSize);
|
||||
|
||||
if (free_pct >= kFreenessThreshold) {
|
||||
estimated_release += Page::kObjectAreaSize +
|
||||
(Page::kObjectAreaSize - free_bytes);
|
||||
fragmentation = free_pct;
|
||||
} else {
|
||||
fragmentation = 0;
|
||||
}
|
||||
|
||||
if (FLAG_trace_fragmentation) {
|
||||
PrintF("%p [%s]: %d (%.2f%%) free %s\n",
|
||||
reinterpret_cast<void*>(p),
|
||||
AllocationSpaceName(space->identity()),
|
||||
static_cast<int>(free_bytes),
|
||||
static_cast<double>(free_bytes * 100) / Page::kObjectAreaSize,
|
||||
(fragmentation > 0) ? "[fragmented]" : "");
|
||||
}
|
||||
} else {
|
||||
fragmentation = space->Fragmentation(p);
|
||||
fragmentation = FreeListFragmentation(space, p);
|
||||
}
|
||||
|
||||
if (fragmentation != 0) {
|
||||
if (count < max_evacuation_candidates) {
|
||||
candidates[count++] = Candidate(fragmentation, p);
|
||||
@ -479,6 +623,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
AddEvacuationCandidate(candidates[i].page());
|
||||
}
|
||||
@ -894,17 +1039,9 @@ class StaticMarkingVisitor : public StaticVisitorBase {
|
||||
heap->mark_compact_collector()->flush_monomorphic_ics_)) {
|
||||
IC::Clear(rinfo->pc());
|
||||
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
|
||||
} else {
|
||||
if (FLAG_cleanup_code_caches_at_gc &&
|
||||
target->kind() == Code::STUB &&
|
||||
target->major_key() == CodeStub::CallFunction &&
|
||||
target->has_function_cache()) {
|
||||
CallFunctionStub::Clear(heap, rinfo->pc());
|
||||
}
|
||||
}
|
||||
MarkBit code_mark = Marking::MarkBitFrom(target);
|
||||
heap->mark_compact_collector()->MarkObject(target, code_mark);
|
||||
|
||||
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
|
||||
}
|
||||
|
||||
@ -1025,8 +1162,17 @@ class StaticMarkingVisitor : public StaticVisitorBase {
|
||||
}
|
||||
|
||||
static void VisitCode(Map* map, HeapObject* object) {
|
||||
reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
|
||||
map->GetHeap());
|
||||
Heap* heap = map->GetHeap();
|
||||
Code* code = reinterpret_cast<Code*>(object);
|
||||
if (FLAG_cleanup_code_caches_at_gc) {
|
||||
TypeFeedbackCells* type_feedback_cells = code->type_feedback_cells();
|
||||
for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
|
||||
ASSERT(type_feedback_cells->AstId(i)->IsSmi());
|
||||
JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
|
||||
cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
|
||||
}
|
||||
}
|
||||
code->CodeIterateBody<StaticMarkingVisitor>(heap);
|
||||
}
|
||||
|
||||
// Code flushing support.
|
||||
@ -2368,9 +2514,9 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
|
||||
void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
|
||||
MarkBit map_mark) {
|
||||
// Follow the chain of back pointers to find the prototype.
|
||||
Map* real_prototype = map;
|
||||
Object* real_prototype = map;
|
||||
while (real_prototype->IsMap()) {
|
||||
real_prototype = reinterpret_cast<Map*>(real_prototype->prototype());
|
||||
real_prototype = Map::cast(real_prototype)->prototype();
|
||||
ASSERT(real_prototype->IsHeapObject());
|
||||
}
|
||||
|
||||
@ -3241,6 +3387,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
|
||||
p->set_scan_on_scavenge(false);
|
||||
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
|
||||
p->ClearEvacuationCandidate();
|
||||
p->ResetLiveBytes();
|
||||
space->ReleasePage(p);
|
||||
}
|
||||
evacuation_candidates_.Rewind(0);
|
||||
compacting_ = false;
|
||||
|
4
deps/v8/src/mark-compact.h
vendored
4
deps/v8/src/mark-compact.h
vendored
@ -374,7 +374,7 @@ class SlotsBuffer {
|
||||
static const int kNumberOfElements = 1021;
|
||||
|
||||
private:
|
||||
static const int kChainLengthThreshold = 6;
|
||||
static const int kChainLengthThreshold = 15;
|
||||
|
||||
intptr_t idx_;
|
||||
intptr_t chain_length_;
|
||||
@ -572,6 +572,8 @@ class MarkCompactCollector {
|
||||
// heap.
|
||||
bool sweep_precisely_;
|
||||
|
||||
bool reduce_memory_footprint_;
|
||||
|
||||
// True if we are collecting slots to perform evacuation from evacuation
|
||||
// candidates.
|
||||
bool compacting_;
|
||||
|
56
deps/v8/src/mips/builtins-mips.cc
vendored
56
deps/v8/src/mips/builtins-mips.cc
vendored
@ -116,9 +116,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
||||
Label* gc_required) {
|
||||
const int initial_capacity = JSArray::kPreallocatedArrayElements;
|
||||
STATIC_ASSERT(initial_capacity >= 0);
|
||||
// Load the initial map from the array function.
|
||||
__ lw(scratch1, FieldMemOperand(array_function,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ LoadGlobalInitialConstructedArrayMap(array_function, scratch2, scratch1);
|
||||
|
||||
// Allocate the JSArray object together with space for a fixed array with the
|
||||
// requested elements.
|
||||
@ -214,9 +212,8 @@ static void AllocateJSArray(MacroAssembler* masm,
|
||||
bool fill_with_hole,
|
||||
Label* gc_required) {
|
||||
// Load the initial map from the array function.
|
||||
__ lw(elements_array_storage,
|
||||
FieldMemOperand(array_function,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ LoadGlobalInitialConstructedArrayMap(array_function, scratch2,
|
||||
elements_array_storage);
|
||||
|
||||
if (FLAG_debug_code) { // Assert that array size is not zero.
|
||||
__ Assert(
|
||||
@ -681,7 +678,9 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
|
||||
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
bool is_api_function,
|
||||
bool count_constructions) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- a0 : number of arguments
|
||||
// -- a1 : constructor function
|
||||
@ -689,45 +688,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
|
||||
// -- sp[...]: constructor arguments
|
||||
// -----------------------------------
|
||||
|
||||
Label slow, non_function_call;
|
||||
// Check that the function is not a smi.
|
||||
__ JumpIfSmi(a1, &non_function_call);
|
||||
// Check that the function is a JSFunction.
|
||||
__ GetObjectType(a1, a2, a2);
|
||||
__ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
|
||||
|
||||
// Jump to the function-specific construct stub.
|
||||
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
|
||||
__ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(t9);
|
||||
|
||||
// a0: number of arguments
|
||||
// a1: called object
|
||||
// a2: object type
|
||||
Label do_call;
|
||||
__ bind(&slow);
|
||||
__ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
|
||||
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
|
||||
__ jmp(&do_call);
|
||||
|
||||
__ bind(&non_function_call);
|
||||
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
|
||||
__ bind(&do_call);
|
||||
// CALL_NON_FUNCTION expects the non-function constructor as receiver
|
||||
// (instead of the original receiver from the call site). The receiver is
|
||||
// stack element argc.
|
||||
// Set expected number of arguments to zero (not changing a0).
|
||||
__ mov(a2, zero_reg);
|
||||
__ SetCallKind(t1, CALL_AS_METHOD);
|
||||
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
||||
RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
||||
bool is_api_function,
|
||||
bool count_constructions) {
|
||||
// Should never count constructions for api objects.
|
||||
ASSERT(!is_api_function || !count_constructions);
|
||||
|
||||
@ -1150,7 +1110,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
|
||||
// Invoke the code and pass argc as a0.
|
||||
__ mov(a0, a3);
|
||||
if (is_construct) {
|
||||
__ Call(masm->isolate()->builtins()->JSConstructCall());
|
||||
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
|
||||
__ CallStub(&stub);
|
||||
} else {
|
||||
ParameterCount actual(a0);
|
||||
__ InvokeFunction(a1, actual, CALL_FUNCTION,
|
||||
@ -1800,6 +1761,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
||||
|
||||
__ Call(a3);
|
||||
|
||||
masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
|
||||
// Exit frame and return.
|
||||
LeaveArgumentsAdaptorFrame(masm);
|
||||
__ Ret();
|
||||
|
172
deps/v8/src/mips/code-stubs-mips.cc
vendored
172
deps/v8/src/mips/code-stubs-mips.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -158,20 +158,18 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
|
||||
__ lw(a3, MemOperand(sp, 0));
|
||||
|
||||
// Set up the object header.
|
||||
__ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
|
||||
__ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
|
||||
__ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
|
||||
__ li(a2, Operand(Smi::FromInt(length)));
|
||||
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
|
||||
__ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
|
||||
|
||||
// Set up the fixed slots.
|
||||
// Set up the fixed slots, copy the global object from the previous context.
|
||||
__ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ li(a1, Operand(Smi::FromInt(0)));
|
||||
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
||||
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
||||
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
|
||||
|
||||
// Copy the global object from the previous context.
|
||||
__ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
__ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
|
||||
// Initialize the rest of the slots to undefined.
|
||||
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
|
||||
@ -229,14 +227,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
||||
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
|
||||
__ bind(&after_sentinel);
|
||||
|
||||
// Set up the fixed slots.
|
||||
// Set up the fixed slots, copy the global object from the previous context.
|
||||
__ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
|
||||
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
|
||||
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
|
||||
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
|
||||
|
||||
// Copy the global object from the previous context.
|
||||
__ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
|
||||
__ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
|
||||
__ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
|
||||
|
||||
// Initialize the rest of the slots to the hole value.
|
||||
__ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
|
||||
@ -592,7 +588,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
||||
|
||||
Label is_smi, done;
|
||||
|
||||
__ JumpIfSmi(object, &is_smi);
|
||||
// Smi-check
|
||||
__ UntagAndJumpIfSmi(scratch1, object, &is_smi);
|
||||
// Heap number check
|
||||
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
|
||||
|
||||
// Handle loading a double from a heap number.
|
||||
@ -619,7 +617,6 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
|
||||
if (CpuFeatures::IsSupported(FPU)) {
|
||||
CpuFeatures::Scope scope(FPU);
|
||||
// Convert smi to double using FPU instructions.
|
||||
__ SmiUntag(scratch1, object);
|
||||
__ mtc1(scratch1, dst);
|
||||
__ cvt_d_w(dst, dst);
|
||||
if (destination == kCoreRegisters) {
|
||||
@ -654,11 +651,10 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
|
||||
Heap::kHeapNumberMapRootIndex,
|
||||
"HeapNumberMap register clobbered.");
|
||||
}
|
||||
Label is_smi;
|
||||
Label done;
|
||||
Label not_in_int32_range;
|
||||
|
||||
__ JumpIfSmi(object, &is_smi);
|
||||
__ UntagAndJumpIfSmi(dst, object, &done);
|
||||
__ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
|
||||
__ Branch(not_number, ne, scratch1, Operand(heap_number_map));
|
||||
__ ConvertToInt32(object,
|
||||
@ -678,10 +674,6 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
|
||||
scratch2,
|
||||
scratch3);
|
||||
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&is_smi);
|
||||
__ SmiUntag(dst, object);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
@ -863,10 +855,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
||||
|
||||
Label done;
|
||||
|
||||
// Untag the object into the destination register.
|
||||
__ SmiUntag(dst, object);
|
||||
// Just return if the object is a smi.
|
||||
__ JumpIfSmi(object, &done);
|
||||
__ UntagAndJumpIfSmi(dst, object, &done);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ AbortIfNotRootValue(heap_number_map,
|
||||
@ -3605,7 +3594,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
const Register scratch = t5;
|
||||
const Register scratch2 = t3;
|
||||
|
||||
Label call_runtime, done, exponent_not_smi, int_exponent;
|
||||
Label call_runtime, done, int_exponent;
|
||||
if (exponent_type_ == ON_STACK) {
|
||||
Label base_is_smi, unpack_exponent;
|
||||
// The exponent and base are supplied as arguments on the stack.
|
||||
@ -3616,7 +3605,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
|
||||
|
||||
__ JumpIfSmi(base, &base_is_smi);
|
||||
__ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
|
||||
__ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
|
||||
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
|
||||
|
||||
@ -3624,27 +3613,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
__ jmp(&unpack_exponent);
|
||||
|
||||
__ bind(&base_is_smi);
|
||||
__ SmiUntag(base);
|
||||
__ mtc1(base, single_scratch);
|
||||
__ mtc1(scratch, single_scratch);
|
||||
__ cvt_d_w(double_base, single_scratch);
|
||||
__ bind(&unpack_exponent);
|
||||
|
||||
__ JumpIfNotSmi(exponent, &exponent_not_smi);
|
||||
__ SmiUntag(exponent);
|
||||
__ jmp(&int_exponent);
|
||||
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
||||
|
||||
__ bind(&exponent_not_smi);
|
||||
__ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
|
||||
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
|
||||
__ ldc1(double_exponent,
|
||||
FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
||||
} else if (exponent_type_ == TAGGED) {
|
||||
// Base is already in double_base.
|
||||
__ JumpIfNotSmi(exponent, &exponent_not_smi);
|
||||
__ SmiUntag(exponent);
|
||||
__ jmp(&int_exponent);
|
||||
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
|
||||
|
||||
__ bind(&exponent_not_smi);
|
||||
__ ldc1(double_exponent,
|
||||
FieldMemOperand(exponent, HeapNumber::kValueOffset));
|
||||
}
|
||||
@ -3724,13 +3706,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&int_exponent_convert);
|
||||
__ mfc1(exponent, single_scratch);
|
||||
__ mfc1(scratch, single_scratch);
|
||||
}
|
||||
|
||||
// Calculate power with integer exponent.
|
||||
__ bind(&int_exponent);
|
||||
|
||||
__ mov(scratch, exponent); // Back up exponent.
|
||||
// Get two copies of exponent in the registers scratch and exponent.
|
||||
if (exponent_type_ == INTEGER) {
|
||||
__ mov(scratch, exponent);
|
||||
} else {
|
||||
// Exponent has previously been stored into scratch as untagged integer.
|
||||
__ mov(exponent, scratch);
|
||||
}
|
||||
|
||||
__ mov_d(double_scratch, double_base); // Back up base.
|
||||
__ Move(double_result, 1.0);
|
||||
|
||||
@ -5298,11 +5287,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Set input, index and length fields from arguments.
|
||||
__ lw(a1, MemOperand(sp, kPointerSize * 0));
|
||||
__ lw(a2, MemOperand(sp, kPointerSize * 1));
|
||||
__ lw(t2, MemOperand(sp, kPointerSize * 2));
|
||||
__ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
|
||||
__ lw(a1, MemOperand(sp, kPointerSize * 1));
|
||||
__ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
|
||||
__ lw(a1, MemOperand(sp, kPointerSize * 2));
|
||||
__ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
|
||||
__ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
|
||||
__ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
|
||||
|
||||
// Fill out the elements FixedArray.
|
||||
// v0: JSArray, tagged.
|
||||
@ -5341,24 +5330,49 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void CallFunctionStub::FinishCode(Handle<Code> code) {
|
||||
code->set_has_function_cache(false);
|
||||
}
|
||||
static void GenerateRecordCallTarget(MacroAssembler* masm) {
|
||||
// Cache the called function in a global property cell. Cache states
|
||||
// are uninitialized, monomorphic (indicated by a JSFunction), and
|
||||
// megamorphic.
|
||||
// a1 : the function to call
|
||||
// a2 : cache cell for call target
|
||||
Label done;
|
||||
|
||||
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
|
||||
masm->isolate()->heap()->undefined_value());
|
||||
ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
|
||||
masm->isolate()->heap()->the_hole_value());
|
||||
|
||||
void CallFunctionStub::Clear(Heap* heap, Address address) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
// Load the cache state into a3.
|
||||
__ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
// A monomorphic cache hit or an already megamorphic state: invoke the
|
||||
// function without changing the state.
|
||||
__ Branch(&done, eq, a3, Operand(a1));
|
||||
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
||||
__ Branch(&done, eq, a3, Operand(at));
|
||||
|
||||
Object* CallFunctionStub::GetCachedValue(Address address) {
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
|
||||
// megamorphic.
|
||||
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
|
||||
__ Branch(&done, eq, a3, Operand(at));
|
||||
// MegamorphicSentinel is an immortal immovable object (undefined) so no
|
||||
// write-barrier is needed.
|
||||
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
|
||||
__ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
|
||||
__ Branch(&done);
|
||||
|
||||
// An uninitialized cache is patched with the function.
|
||||
__ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
|
||||
// No need for a write barrier here - cells are rescanned.
|
||||
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
// a1 : the function to call
|
||||
// a2 : cache cell for call target
|
||||
Label slow, non_function;
|
||||
|
||||
// The receiver might implicitly be the global object. This is
|
||||
@ -5435,6 +5449,48 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void CallConstructStub::Generate(MacroAssembler* masm) {
|
||||
// a0 : number of arguments
|
||||
// a1 : the function to call
|
||||
// a2 : cache cell for call target
|
||||
Label slow, non_function_call;
|
||||
|
||||
// Check that the function is not a smi.
|
||||
__ JumpIfSmi(a1, &non_function_call);
|
||||
// Check that the function is a JSFunction.
|
||||
__ GetObjectType(a1, a3, a3);
|
||||
__ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
|
||||
|
||||
if (RecordCallTarget()) {
|
||||
GenerateRecordCallTarget(masm);
|
||||
}
|
||||
|
||||
// Jump to the function-specific construct stub.
|
||||
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
|
||||
__ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ Jump(at);
|
||||
|
||||
// a0: number of arguments
|
||||
// a1: called object
|
||||
// a3: object type
|
||||
Label do_call;
|
||||
__ bind(&slow);
|
||||
__ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
|
||||
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
|
||||
__ jmp(&do_call);
|
||||
|
||||
__ bind(&non_function_call);
|
||||
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
|
||||
__ bind(&do_call);
|
||||
// Set expected number of arguments to zero (not changing r0).
|
||||
__ li(a2, Operand(0, RelocInfo::NONE));
|
||||
__ SetCallKind(t1, CALL_AS_METHOD);
|
||||
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
|
||||
RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
|
||||
// Unfortunately you have to run without snapshots to see most of these
|
||||
// names in the profile since most compare stubs end up in the snapshot.
|
||||
void CompareStub::PrintName(StringStream* stream) {
|
||||
@ -6002,10 +6058,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
|
||||
// safe in this case.
|
||||
__ JumpIfSmi(a2, &runtime, at, USE_DELAY_SLOT);
|
||||
__ SmiUntag(a2);
|
||||
__ JumpIfSmi(a3, &runtime, at, USE_DELAY_SLOT);
|
||||
__ SmiUntag(a3);
|
||||
__ UntagAndJumpIfSmi(a2, a2, &runtime);
|
||||
__ UntagAndJumpIfSmi(a3, a3, &runtime);
|
||||
|
||||
// Both a2 and a3 are untagged integers.
|
||||
|
||||
@ -6089,10 +6143,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
__ bind(&sliced_string);
|
||||
// Sliced string. Fetch parent and correct start index by offset.
|
||||
__ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
||||
__ sra(t1, t1, 1);
|
||||
__ Addu(a3, a3, t1);
|
||||
__ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
|
||||
__ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
|
||||
__ sra(t0, t0, 1); // Add offset to index.
|
||||
__ Addu(a3, a3, t0);
|
||||
// Update instance type.
|
||||
__ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
|
||||
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
|
||||
|
18
deps/v8/src/mips/codegen-mips.cc
vendored
18
deps/v8/src/mips/codegen-mips.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -105,10 +105,10 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
||||
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
|
||||
__ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
// t2: destination FixedDoubleArray, not tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
|
||||
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
|
||||
// Set destination FixedDoubleArray's length.
|
||||
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
|
||||
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
|
||||
// Update receiver's map.
|
||||
|
||||
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
|
||||
@ -159,10 +159,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
||||
__ lw(t5, MemOperand(a3));
|
||||
__ Addu(a3, a3, kIntSize);
|
||||
// t5: current element
|
||||
__ JumpIfNotSmi(t5, &convert_hole);
|
||||
__ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
|
||||
|
||||
// Normal smi, convert to double and store.
|
||||
__ SmiUntag(t5);
|
||||
if (fpu_supported) {
|
||||
CpuFeatures::Scope scope(FPU);
|
||||
__ mtc1(t5, f0);
|
||||
@ -187,6 +186,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
|
||||
// Hole found, store the-hole NaN.
|
||||
__ bind(&convert_hole);
|
||||
if (FLAG_debug_code) {
|
||||
// Restore a "smi-untagged" heap object.
|
||||
__ SmiTag(t5);
|
||||
__ Or(t5, t5, Operand(1));
|
||||
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
|
||||
__ Assert(eq, "object found in smi-only array", at, Operand(t5));
|
||||
}
|
||||
@ -225,10 +227,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
||||
__ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
|
||||
__ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
|
||||
// t2: destination FixedArray, not tagged as heap object
|
||||
// Set destination FixedDoubleArray's length and map.
|
||||
__ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
|
||||
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
|
||||
// Set destination FixedDoubleArray's length.
|
||||
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
|
||||
__ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
|
||||
|
||||
// Prepare for conversion loop.
|
||||
__ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
|
||||
@ -333,9 +335,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
||||
// Handle slices.
|
||||
Label indirect_string_loaded;
|
||||
__ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
|
||||
__ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
|
||||
__ sra(at, result, kSmiTagSize);
|
||||
__ Addu(index, index, at);
|
||||
__ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
|
||||
__ jmp(&indirect_string_loaded);
|
||||
|
||||
// Handle cons strings.
|
||||
|
12
deps/v8/src/mips/cpu-mips.cc
vendored
12
deps/v8/src/mips/cpu-mips.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -64,15 +64,19 @@ void CPU::FlushICache(void* start, size_t size) {
|
||||
}
|
||||
|
||||
#if !defined (USE_SIMULATOR)
|
||||
#if defined(ANDROID)
|
||||
// Bionic cacheflush can typically run in userland, avoiding kernel call.
|
||||
char *end = reinterpret_cast<char *>(start) + size;
|
||||
cacheflush(
|
||||
reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
|
||||
#else // ANDROID
|
||||
int res;
|
||||
|
||||
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
|
||||
res = syscall(__NR_cacheflush, start, size, ICACHE);
|
||||
|
||||
if (res) {
|
||||
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
|
||||
}
|
||||
|
||||
#endif // ANDROID
|
||||
#else // USE_SIMULATOR.
|
||||
// Not generating mips instructions for C-code. This means that we are
|
||||
// building a mips emulator based target. We should notify the simulator
|
||||
|
40
deps/v8/src/mips/debug-mips.cc
vendored
40
deps/v8/src/mips/debug-mips.cc
vendored
@ -243,14 +243,6 @@ void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
|
||||
// Calling convention for construct call (from builtins-mips.cc).
|
||||
// -- a0 : number of arguments (not smi)
|
||||
// -- a1 : constructor function
|
||||
Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
|
||||
// In places other than IC call sites it is expected that v0 is TOS which
|
||||
// is an object - this is not generally the case so this should be used with
|
||||
@ -260,6 +252,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallFunctionStub (from code-stubs-mips.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- a1 : function
|
||||
// -----------------------------------
|
||||
@ -267,6 +260,37 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
|
||||
// Register state for CallFunctionStub (from code-stubs-mips.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- a1 : function
|
||||
// -- a2 : cache cell for call target
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
|
||||
// Calling convention for CallConstructStub (from code-stubs-mips.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- a0 : number of arguments (not smi)
|
||||
// -- a1 : constructor function
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
|
||||
// Calling convention for CallConstructStub (from code-stubs-mips.cc).
|
||||
// ----------- S t a t e -------------
|
||||
// -- a0 : number of arguments (not smi)
|
||||
// -- a1 : constructor function
|
||||
// -- a2 : cache cell for call target
|
||||
// -----------------------------------
|
||||
Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
|
||||
}
|
||||
|
||||
|
||||
void Debug::GenerateSlot(MacroAssembler* masm) {
|
||||
// Generate enough nop's to make space for a call instruction. Avoid emitting
|
||||
// the trampoline pool in the debug break slot code.
|
||||
|
123
deps/v8/src/mips/deoptimizer-mips.cc
vendored
123
deps/v8/src/mips/deoptimizer-mips.cc
vendored
@ -218,12 +218,13 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
|
||||
ASSERT(Translation::BEGIN == opcode);
|
||||
USE(opcode);
|
||||
int count = iterator.Next();
|
||||
iterator.Skip(1); // Drop JS frame count.
|
||||
ASSERT(count == 1);
|
||||
USE(count);
|
||||
|
||||
opcode = static_cast<Translation::Opcode>(iterator.Next());
|
||||
USE(opcode);
|
||||
ASSERT(Translation::FRAME == opcode);
|
||||
ASSERT(Translation::JS_FRAME == opcode);
|
||||
unsigned node_id = iterator.Next();
|
||||
USE(node_id);
|
||||
ASSERT(node_id == ast_id);
|
||||
@ -259,9 +260,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
|
||||
output_ = new FrameDescription*[1];
|
||||
output_[0] = new(output_frame_size) FrameDescription(
|
||||
output_frame_size, function_);
|
||||
#ifdef DEBUG
|
||||
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
|
||||
#endif
|
||||
output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
|
||||
|
||||
// Clear the incoming parameters in the optimized frame to avoid
|
||||
// confusing the garbage collector.
|
||||
@ -349,15 +348,115 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
|
||||
int frame_index) {
|
||||
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
|
||||
unsigned height = iterator->Next();
|
||||
unsigned height_in_bytes = height * kPointerSize;
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
|
||||
}
|
||||
|
||||
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
|
||||
unsigned input_frame_size = input_->GetFrameSize();
|
||||
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
|
||||
|
||||
// Allocate and store the output frame description.
|
||||
FrameDescription* output_frame =
|
||||
new(output_frame_size) FrameDescription(output_frame_size, function);
|
||||
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
|
||||
|
||||
// Arguments adaptor can not be topmost or bottommost.
|
||||
ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
|
||||
ASSERT(output_[frame_index] == NULL);
|
||||
output_[frame_index] = output_frame;
|
||||
|
||||
// The top address of the frame is computed from the previous
|
||||
// frame's top and this frame's size.
|
||||
uint32_t top_address;
|
||||
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
|
||||
output_frame->SetTop(top_address);
|
||||
|
||||
// Compute the incoming parameter translation.
|
||||
int parameter_count = height;
|
||||
unsigned output_offset = output_frame_size;
|
||||
unsigned input_offset = input_frame_size;
|
||||
for (int i = 0; i < parameter_count; ++i) {
|
||||
output_offset -= kPointerSize;
|
||||
DoTranslateCommand(iterator, frame_index, output_offset);
|
||||
}
|
||||
input_offset -= (parameter_count * kPointerSize);
|
||||
|
||||
// Read caller's PC from the previous frame.
|
||||
output_offset -= kPointerSize;
|
||||
input_offset -= kPointerSize;
|
||||
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
|
||||
output_frame->SetFrameSlot(output_offset, callers_pc);
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
|
||||
top_address + output_offset, output_offset, callers_pc);
|
||||
}
|
||||
|
||||
// Read caller's FP from the previous frame, and set this frame's FP.
|
||||
output_offset -= kPointerSize;
|
||||
input_offset -= kPointerSize;
|
||||
intptr_t value = output_[frame_index - 1]->GetFp();
|
||||
output_frame->SetFrameSlot(output_offset, value);
|
||||
intptr_t fp_value = top_address + output_offset;
|
||||
output_frame->SetFp(fp_value);
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
|
||||
fp_value, output_offset, value);
|
||||
}
|
||||
|
||||
// A marker value is used in place of the context.
|
||||
output_offset -= kPointerSize;
|
||||
input_offset -= kPointerSize;
|
||||
intptr_t context = reinterpret_cast<intptr_t>(
|
||||
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
||||
output_frame->SetFrameSlot(output_offset, context);
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
|
||||
top_address + output_offset, output_offset, context);
|
||||
}
|
||||
|
||||
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
|
||||
output_offset -= kPointerSize;
|
||||
input_offset -= kPointerSize;
|
||||
value = reinterpret_cast<intptr_t>(function);
|
||||
output_frame->SetFrameSlot(output_offset, value);
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
|
||||
top_address + output_offset, output_offset, value);
|
||||
}
|
||||
|
||||
// Number of incoming arguments.
|
||||
output_offset -= kPointerSize;
|
||||
input_offset -= kPointerSize;
|
||||
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
|
||||
output_frame->SetFrameSlot(output_offset, value);
|
||||
if (FLAG_trace_deopt) {
|
||||
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
|
||||
top_address + output_offset, output_offset, value, height - 1);
|
||||
}
|
||||
|
||||
ASSERT(0 == output_offset);
|
||||
|
||||
Builtins* builtins = isolate_->builtins();
|
||||
Code* adaptor_trampoline =
|
||||
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
|
||||
uint32_t pc = reinterpret_cast<uint32_t>(
|
||||
adaptor_trampoline->instruction_start() +
|
||||
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
|
||||
output_frame->SetPc(pc);
|
||||
}
|
||||
|
||||
|
||||
// This code is very similar to ia32/arm code, but relies on register names
|
||||
// (fp, sp) and how the frame is laid out.
|
||||
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
|
||||
int frame_index) {
|
||||
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
|
||||
int frame_index) {
|
||||
// Read the ast node id, function, and frame height for this output frame.
|
||||
Translation::Opcode opcode =
|
||||
static_cast<Translation::Opcode>(iterator->Next());
|
||||
USE(opcode);
|
||||
ASSERT(Translation::FRAME == opcode);
|
||||
int node_id = iterator->Next();
|
||||
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
|
||||
unsigned height = iterator->Next();
|
||||
@ -377,9 +476,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
|
||||
// Allocate and store the output frame description.
|
||||
FrameDescription* output_frame =
|
||||
new(output_frame_size) FrameDescription(output_frame_size, function);
|
||||
#ifdef DEBUG
|
||||
output_frame->SetKind(Code::FUNCTION);
|
||||
#endif
|
||||
output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
|
||||
|
||||
bool is_bottommost = (0 == frame_index);
|
||||
bool is_topmost = (output_count_ - 1 == frame_index);
|
||||
|
5
deps/v8/src/mips/frames-mips.h
vendored
5
deps/v8/src/mips/frames-mips.h
vendored
@ -195,6 +195,9 @@ class ExitFrameConstants : public AllStatic {
|
||||
|
||||
class StandardFrameConstants : public AllStatic {
|
||||
public:
|
||||
// Fixed part of the frame consists of return address, caller fp,
|
||||
// context and function.
|
||||
static const int kFixedFrameSize = 4 * kPointerSize;
|
||||
static const int kExpressionsOffset = -3 * kPointerSize;
|
||||
static const int kMarkerOffset = -2 * kPointerSize;
|
||||
static const int kContextOffset = -1 * kPointerSize;
|
||||
@ -230,6 +233,8 @@ class JavaScriptFrameConstants : public AllStatic {
|
||||
class ArgumentsAdaptorFrameConstants : public AllStatic {
|
||||
public:
|
||||
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
|
||||
static const int kFrameSize =
|
||||
StandardFrameConstants::kFixedFrameSize + kPointerSize;
|
||||
};
|
||||
|
||||
|
||||
|
19
deps/v8/src/mips/full-codegen-mips.cc
vendored
19
deps/v8/src/mips/full-codegen-mips.cc
vendored
@ -2403,9 +2403,22 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
|
||||
__ li(a0, Operand(arg_count));
|
||||
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
|
||||
|
||||
Handle<Code> construct_builtin =
|
||||
isolate()->builtins()->JSConstructCall();
|
||||
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
|
||||
// Record call targets in unoptimized code, but not in the snapshot.
|
||||
CallFunctionFlags flags;
|
||||
if (!Serializer::enabled()) {
|
||||
flags = RECORD_CALL_TARGET;
|
||||
Handle<Object> uninitialized =
|
||||
TypeFeedbackCells::UninitializedSentinel(isolate());
|
||||
Handle<JSGlobalPropertyCell> cell =
|
||||
isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
|
||||
RecordTypeFeedbackCell(expr->id(), cell);
|
||||
__ li(a2, Operand(cell));
|
||||
} else {
|
||||
flags = NO_CALL_FUNCTION_FLAGS;
|
||||
}
|
||||
|
||||
CallConstructStub stub(flags);
|
||||
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
|
||||
context()->Plug(v0);
|
||||
}
|
||||
|
||||
|
69
deps/v8/src/mips/lithium-codegen-mips.cc
vendored
69
deps/v8/src/mips/lithium-codegen-mips.cc
vendored
@ -447,7 +447,11 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
|
||||
|
||||
WriteTranslation(environment->outer(), translation);
|
||||
int closure_id = DefineDeoptimizationLiteral(environment->closure());
|
||||
translation->BeginFrame(environment->ast_id(), closure_id, height);
|
||||
if (environment->is_arguments_adaptor()) {
|
||||
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
|
||||
} else {
|
||||
translation->BeginJSFrame(environment->ast_id(), closure_id, height);
|
||||
}
|
||||
for (int i = 0; i < translation_size; ++i) {
|
||||
LOperand* value = environment->values()->at(i);
|
||||
// spilled_registers_ and spilled_double_registers_ are either
|
||||
@ -573,10 +577,14 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
|
||||
// |>------------ translation_size ------------<|
|
||||
|
||||
int frame_count = 0;
|
||||
int jsframe_count = 0;
|
||||
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
|
||||
++frame_count;
|
||||
if (!e->is_arguments_adaptor()) {
|
||||
++jsframe_count;
|
||||
}
|
||||
}
|
||||
Translation translation(&translations_, frame_count);
|
||||
Translation translation(&translations_, frame_count, jsframe_count);
|
||||
WriteTranslation(environment, &translation);
|
||||
int deoptimization_index = deoptimizations_.length();
|
||||
int pc_offset = masm()->pc_offset();
|
||||
@ -3269,9 +3277,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
|
||||
ASSERT(ToRegister(instr->InputAt(0)).is(a1));
|
||||
ASSERT(ToRegister(instr->result()).is(v0));
|
||||
|
||||
Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
|
||||
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
|
||||
__ li(a0, Operand(instr->arity()));
|
||||
CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
|
||||
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
|
||||
}
|
||||
|
||||
|
||||
@ -3706,13 +3714,12 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
||||
LNumberTagI* instr_;
|
||||
};
|
||||
|
||||
LOperand* input = instr->InputAt(0);
|
||||
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
||||
Register reg = ToRegister(input);
|
||||
Register src = ToRegister(instr->InputAt(0));
|
||||
Register dst = ToRegister(instr->result());
|
||||
Register overflow = scratch0();
|
||||
|
||||
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
|
||||
__ SmiTagCheckOverflow(reg, overflow);
|
||||
__ SmiTagCheckOverflow(dst, src, overflow);
|
||||
__ BranchOnOverflow(deferred->entry(), overflow);
|
||||
__ bind(deferred->exit());
|
||||
}
|
||||
@ -3720,7 +3727,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
||||
|
||||
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
||||
Label slow;
|
||||
Register reg = ToRegister(instr->InputAt(0));
|
||||
Register src = ToRegister(instr->InputAt(0));
|
||||
Register dst = ToRegister(instr->result());
|
||||
FPURegister dbl_scratch = double_scratch0();
|
||||
|
||||
// Preserve the value of all registers.
|
||||
@ -3730,14 +3738,16 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
||||
// disagree. Try to allocate a heap number in new space and store
|
||||
// the value in there. If that fails, call the runtime system.
|
||||
Label done;
|
||||
__ SmiUntag(reg);
|
||||
__ Xor(reg, reg, Operand(0x80000000));
|
||||
__ mtc1(reg, dbl_scratch);
|
||||
if (dst.is(src)) {
|
||||
__ SmiUntag(src, dst);
|
||||
__ Xor(src, src, Operand(0x80000000));
|
||||
}
|
||||
__ mtc1(src, dbl_scratch);
|
||||
__ cvt_d_w(dbl_scratch, dbl_scratch);
|
||||
if (FLAG_inline_new) {
|
||||
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
|
||||
__ AllocateHeapNumber(t1, a3, t0, t2, &slow);
|
||||
if (!reg.is(t1)) __ mov(reg, t1);
|
||||
__ Move(dst, t1);
|
||||
__ Branch(&done);
|
||||
}
|
||||
|
||||
@ -3747,15 +3757,15 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
||||
// TODO(3095996): Put a valid pointer value in the stack slot where the result
|
||||
// register is stored, as this register is in the pointer map, but contains an
|
||||
// integer value.
|
||||
__ StoreToSafepointRegisterSlot(zero_reg, reg);
|
||||
__ StoreToSafepointRegisterSlot(zero_reg, dst);
|
||||
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
|
||||
if (!reg.is(v0)) __ mov(reg, v0);
|
||||
__ Move(dst, v0);
|
||||
|
||||
// Done. Put the value in dbl_scratch into the value of the allocated heap
|
||||
// number.
|
||||
__ bind(&done);
|
||||
__ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
||||
__ StoreToSafepointRegisterSlot(reg, reg);
|
||||
__ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
|
||||
__ StoreToSafepointRegisterSlot(dst, dst);
|
||||
}
|
||||
|
||||
|
||||
@ -3802,25 +3812,23 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
||||
LOperand* input = instr->InputAt(0);
|
||||
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
||||
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
|
||||
__ SmiTag(ToRegister(input));
|
||||
__ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
||||
Register scratch = scratch0();
|
||||
LOperand* input = instr->InputAt(0);
|
||||
ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
||||
Register input = ToRegister(instr->InputAt(0));
|
||||
Register result = ToRegister(instr->result());
|
||||
if (instr->needs_check()) {
|
||||
STATIC_ASSERT(kHeapObjectTag == 1);
|
||||
// If the input is a HeapObject, value of scratch won't be zero.
|
||||
__ And(scratch, ToRegister(input), Operand(kHeapObjectTag));
|
||||
__ SmiUntag(ToRegister(input));
|
||||
__ And(scratch, input, Operand(kHeapObjectTag));
|
||||
__ SmiUntag(result, input);
|
||||
DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
|
||||
} else {
|
||||
__ SmiUntag(ToRegister(input));
|
||||
__ SmiUntag(result, input);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3835,7 +3843,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
Label load_smi, heap_number, done;
|
||||
|
||||
// Smi check.
|
||||
__ JumpIfSmi(input_reg, &load_smi);
|
||||
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
|
||||
|
||||
// Heap number map check.
|
||||
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
||||
@ -3868,10 +3876,9 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
|
||||
|
||||
// Smi to double register conversion
|
||||
__ bind(&load_smi);
|
||||
__ SmiUntag(input_reg); // Untag smi before converting to float.
|
||||
__ mtc1(input_reg, result_reg);
|
||||
// scratch: untagged value of input_reg
|
||||
__ mtc1(scratch, result_reg);
|
||||
__ cvt_d_w(result_reg, result_reg);
|
||||
__ SmiTag(input_reg); // Retag smi.
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
@ -4152,7 +4159,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
||||
Label is_smi, done, heap_number;
|
||||
|
||||
// Both smi and heap number cases are handled.
|
||||
__ JumpIfSmi(input_reg, &is_smi);
|
||||
__ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
|
||||
|
||||
// Check for heap number
|
||||
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
||||
@ -4172,9 +4179,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
||||
__ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
|
||||
__ jmp(&done);
|
||||
|
||||
// smi
|
||||
__ bind(&is_smi);
|
||||
__ SmiUntag(scratch, input_reg);
|
||||
__ ClampUint8(result_reg, scratch);
|
||||
|
||||
__ bind(&done);
|
||||
|
46
deps/v8/src/mips/lithium-mips.cc
vendored
46
deps/v8/src/mips/lithium-mips.cc
vendored
@ -581,11 +581,6 @@ void LChunkBuilder::Abort(const char* format, ...) {
|
||||
}
|
||||
|
||||
|
||||
LRegister* LChunkBuilder::ToOperand(Register reg) {
|
||||
return LRegister::Create(Register::ToAllocationIndex(reg));
|
||||
}
|
||||
|
||||
|
||||
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
|
||||
return new LUnallocated(LUnallocated::FIXED_REGISTER,
|
||||
Register::ToAllocationIndex(reg));
|
||||
@ -676,7 +671,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
|
||||
HInstruction* instr = HInstruction::cast(value);
|
||||
VisitInstruction(instr);
|
||||
}
|
||||
allocator_->RecordUse(value, operand);
|
||||
operand->set_virtual_register(value->id());
|
||||
return operand;
|
||||
}
|
||||
|
||||
@ -684,18 +679,12 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
|
||||
template<int I, int T>
|
||||
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
|
||||
LUnallocated* result) {
|
||||
allocator_->RecordDefinition(current_instruction_, result);
|
||||
result->set_virtual_register(current_instruction_->id());
|
||||
instr->set_result(result);
|
||||
return instr;
|
||||
}
|
||||
|
||||
|
||||
template<int I, int T>
|
||||
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
|
||||
return Define(instr, new LUnallocated(LUnallocated::NONE));
|
||||
}
|
||||
|
||||
|
||||
template<int I, int T>
|
||||
LInstruction* LChunkBuilder::DefineAsRegister(
|
||||
LTemplateInstruction<1, I, T>* instr) {
|
||||
@ -802,21 +791,22 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
|
||||
|
||||
LUnallocated* LChunkBuilder::TempRegister() {
|
||||
LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
|
||||
allocator_->RecordTemporary(operand);
|
||||
operand->set_virtual_register(allocator_->GetVirtualRegister());
|
||||
if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(Register reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
allocator_->RecordTemporary(operand);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
|
||||
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
|
||||
LUnallocated* operand = ToUnallocated(reg);
|
||||
allocator_->RecordTemporary(operand);
|
||||
ASSERT(operand->HasFixedPolicy());
|
||||
return operand;
|
||||
}
|
||||
|
||||
@ -1005,14 +995,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
|
||||
LEnvironment* outer =
|
||||
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
|
||||
int ast_id = hydrogen_env->ast_id();
|
||||
ASSERT(ast_id != AstNode::kNoNumber);
|
||||
ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
|
||||
int value_count = hydrogen_env->length();
|
||||
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
|
||||
hydrogen_env->is_arguments_adaptor(),
|
||||
ast_id,
|
||||
hydrogen_env->parameter_count(),
|
||||
argument_count_,
|
||||
value_count,
|
||||
outer);
|
||||
int argument_index = *argument_index_accumulator;
|
||||
for (int i = 0; i < value_count; ++i) {
|
||||
if (hydrogen_env->is_special_index(i)) continue;
|
||||
|
||||
@ -1021,13 +1013,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
|
||||
if (value->IsArgumentsObject()) {
|
||||
op = NULL;
|
||||
} else if (value->IsPushArgument()) {
|
||||
op = new LArgument((*argument_index_accumulator)++);
|
||||
op = new LArgument(argument_index++);
|
||||
} else {
|
||||
op = UseAny(value);
|
||||
}
|
||||
result->AddValue(op, value->representation());
|
||||
}
|
||||
|
||||
if (!hydrogen_env->is_arguments_adaptor()) {
|
||||
*argument_index_accumulator = argument_index;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1627,11 +1623,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
return AssignEnvironment(DefineAsRegister(res));
|
||||
} else {
|
||||
ASSERT(to.IsInteger32());
|
||||
LOperand* value = UseRegister(instr->value());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
bool needs_check = !instr->value()->type().IsSmi();
|
||||
LInstruction* res = NULL;
|
||||
if (!needs_check) {
|
||||
res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
|
||||
res = DefineAsRegister(new LSmiUntag(value, needs_check));
|
||||
} else {
|
||||
LOperand* temp1 = TempRegister();
|
||||
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
|
||||
@ -1667,12 +1663,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
|
||||
} else if (from.IsInteger32()) {
|
||||
if (to.IsTagged()) {
|
||||
HValue* val = instr->value();
|
||||
LOperand* value = UseRegister(val);
|
||||
LOperand* value = UseRegisterAtStart(val);
|
||||
if (val->HasRange() && val->range()->IsInSmiRange()) {
|
||||
return DefineSameAsFirst(new LSmiTag(value));
|
||||
return DefineAsRegister(new LSmiTag(value));
|
||||
} else {
|
||||
LNumberTagI* result = new LNumberTagI(value);
|
||||
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
|
||||
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
|
||||
}
|
||||
} else {
|
||||
ASSERT(to.IsDouble());
|
||||
@ -2247,6 +2243,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
|
||||
HEnvironment* outer = current_block_->last_environment();
|
||||
HConstant* undefined = graph()->GetConstantUndefined();
|
||||
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
|
||||
instr->arguments_count(),
|
||||
instr->function(),
|
||||
undefined,
|
||||
instr->call_kind());
|
||||
@ -2257,7 +2254,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
|
||||
HEnvironment* outer = current_block_->last_environment()->outer();
|
||||
HEnvironment* outer = current_block_->last_environment()->
|
||||
DiscardInlined(false);
|
||||
current_block_->UpdateEnvironment(outer);
|
||||
return NULL;
|
||||
}
|
||||
|
3
deps/v8/src/mips/lithium-mips.h
vendored
3
deps/v8/src/mips/lithium-mips.h
vendored
@ -2161,7 +2161,6 @@ class LChunkBuilder BASE_EMBEDDED {
|
||||
void Abort(const char* format, ...);
|
||||
|
||||
// Methods for getting operands for Use / Define / Temp.
|
||||
LRegister* ToOperand(Register reg);
|
||||
LUnallocated* ToUnallocated(Register reg);
|
||||
LUnallocated* ToUnallocated(DoubleRegister reg);
|
||||
|
||||
@ -2211,8 +2210,6 @@ class LChunkBuilder BASE_EMBEDDED {
|
||||
template<int I, int T>
|
||||
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
|
||||
LUnallocated* result);
|
||||
template<int I, int T>
|
||||
LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
|
||||
template<int I, int T>
|
||||
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
|
||||
template<int I, int T>
|
||||
|
83
deps/v8/src/mips/macro-assembler-mips.cc
vendored
83
deps/v8/src/mips/macro-assembler-mips.cc
vendored
@ -4279,6 +4279,31 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadGlobalInitialConstructedArrayMap(
|
||||
Register function_in, Register scratch, Register map_out) {
|
||||
ASSERT(!function_in.is(map_out));
|
||||
Label done;
|
||||
lw(map_out, FieldMemOperand(function_in,
|
||||
JSFunction::kPrototypeOrInitialMapOffset));
|
||||
if (!FLAG_smi_only_arrays) {
|
||||
// Load the global or builtins object from the current context.
|
||||
lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
|
||||
|
||||
// Check that the function's map is same as the cached map.
|
||||
lw(at, MemOperand(
|
||||
scratch, Context::SlotOffset(Context::SMI_JS_ARRAY_MAP_INDEX)));
|
||||
Branch(&done, ne, map_out, Operand(at));
|
||||
|
||||
// Use the cached transitioned map.
|
||||
lw(map_out,
|
||||
MemOperand(scratch,
|
||||
Context::SlotOffset(Context::OBJECT_JS_ARRAY_MAP_INDEX)));
|
||||
}
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
|
||||
// Load the global or builtins object from the current context.
|
||||
lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
||||
@ -4492,6 +4517,64 @@ void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
|
||||
ASSERT(!reg.is(overflow));
|
||||
mov(overflow, reg); // Save original value.
|
||||
SmiTag(reg);
|
||||
xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiTagCheckOverflow(Register dst,
|
||||
Register src,
|
||||
Register overflow) {
|
||||
if (dst.is(src)) {
|
||||
// Fall back to slower case.
|
||||
SmiTagCheckOverflow(dst, overflow);
|
||||
} else {
|
||||
ASSERT(!dst.is(src));
|
||||
ASSERT(!dst.is(overflow));
|
||||
ASSERT(!src.is(overflow));
|
||||
SmiTag(dst, src);
|
||||
xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfSmi(Register dst,
|
||||
Register src,
|
||||
Label* smi_case) {
|
||||
JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
|
||||
Register src,
|
||||
Label* non_smi_case) {
|
||||
JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
|
||||
SmiUntag(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfSmi(Register value,
|
||||
Label* smi_label,
|
||||
Register scratch,
|
||||
BranchDelaySlot bd) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
andi(scratch, value, kSmiTagMask);
|
||||
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
void MacroAssembler::JumpIfNotSmi(Register value,
|
||||
Label* not_smi_label,
|
||||
Register scratch,
|
||||
BranchDelaySlot bd) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
andi(scratch, value, kSmiTagMask);
|
||||
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
|
||||
Register reg2,
|
||||
Label* on_not_both_smi) {
|
||||
|
49
deps/v8/src/mips/macro-assembler-mips.h
vendored
49
deps/v8/src/mips/macro-assembler-mips.h
vendored
@ -772,6 +772,11 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
void LoadContext(Register dst, int context_chain_length);
|
||||
|
||||
// Load the initial map for new Arrays of a given type.
|
||||
void LoadGlobalInitialConstructedArrayMap(Register function_in,
|
||||
Register scratch,
|
||||
Register map_out);
|
||||
|
||||
void LoadGlobalFunction(int index, Register function);
|
||||
|
||||
// Load the initial map from the global function. The registers
|
||||
@ -1217,24 +1222,13 @@ class MacroAssembler: public Assembler {
|
||||
// -------------------------------------------------------------------------
|
||||
// Smi utilities.
|
||||
|
||||
// Try to convert int32 to smi. If the value is to large, preserve
|
||||
// the original value and jump to not_a_smi. Destroys scratch and
|
||||
// sets flags.
|
||||
// This is only used by crankshaft atm so it is unimplemented on MIPS.
|
||||
void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
|
||||
UNIMPLEMENTED_MIPS();
|
||||
}
|
||||
|
||||
void SmiTag(Register reg) {
|
||||
Addu(reg, reg, reg);
|
||||
}
|
||||
|
||||
// Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
|
||||
void SmiTagCheckOverflow(Register reg, Register overflow) {
|
||||
mov(overflow, reg); // Save original value.
|
||||
addu(reg, reg, reg);
|
||||
xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
|
||||
}
|
||||
void SmiTagCheckOverflow(Register reg, Register overflow);
|
||||
void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
|
||||
|
||||
void SmiTag(Register dst, Register src) {
|
||||
Addu(dst, src, src);
|
||||
@ -1248,22 +1242,25 @@ class MacroAssembler: public Assembler {
|
||||
sra(dst, src, kSmiTagSize);
|
||||
}
|
||||
|
||||
// Untag the source value into destination and jump if source is a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
|
||||
|
||||
// Untag the source value into destination and jump if source is not a smi.
|
||||
// Souce and destination can be the same register.
|
||||
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
||||
|
||||
// Jump the register contains a smi.
|
||||
inline void JumpIfSmi(Register value, Label* smi_label,
|
||||
Register scratch = at,
|
||||
BranchDelaySlot bd = PROTECT) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
andi(scratch, value, kSmiTagMask);
|
||||
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
|
||||
}
|
||||
void JumpIfSmi(Register value,
|
||||
Label* smi_label,
|
||||
Register scratch = at,
|
||||
BranchDelaySlot bd = PROTECT);
|
||||
|
||||
// Jump if the register contains a non-smi.
|
||||
inline void JumpIfNotSmi(Register value, Label* not_smi_label,
|
||||
Register scratch = at) {
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
andi(scratch, value, kSmiTagMask);
|
||||
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
|
||||
}
|
||||
void JumpIfNotSmi(Register value,
|
||||
Label* not_smi_label,
|
||||
Register scratch = at,
|
||||
BranchDelaySlot bd = PROTECT);
|
||||
|
||||
// Jump if either of the registers contain a non-smi.
|
||||
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
|
||||
|
2
deps/v8/src/mksnapshot.cc
vendored
2
deps/v8/src/mksnapshot.cc
vendored
@ -312,7 +312,7 @@ int main(int argc, char** argv) {
|
||||
}
|
||||
// If we don't do this then we end up with a stray root pointing at the
|
||||
// context even after we have disposed of the context.
|
||||
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
|
||||
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
|
||||
i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
|
||||
context.Dispose();
|
||||
CppByteSink sink(argv[1]);
|
||||
|
132
deps/v8/src/objects-inl.h
vendored
132
deps/v8/src/objects-inl.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -45,7 +45,7 @@
|
||||
#include "spaces.h"
|
||||
#include "store-buffer.h"
|
||||
#include "v8memory.h"
|
||||
|
||||
#include "factory.h"
|
||||
#include "incremental-marking.h"
|
||||
|
||||
namespace v8 {
|
||||
@ -554,6 +554,16 @@ bool Object::IsDeoptimizationOutputData() {
|
||||
}
|
||||
|
||||
|
||||
bool Object::IsTypeFeedbackCells() {
|
||||
if (!IsFixedArray()) return false;
|
||||
// There's actually no way to see the difference between a fixed array and
|
||||
// a cache cells array. Since this is used for asserts we can check that
|
||||
// the length is plausible though.
|
||||
if (FixedArray::cast(this)->length() % 2 != 0) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool Object::IsContext() {
|
||||
if (Object::IsHeapObject()) {
|
||||
Map* map = HeapObject::cast(this)->map();
|
||||
@ -1290,6 +1300,29 @@ MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* JSObject::GetElementsTransitionMap(Isolate* isolate,
|
||||
ElementsKind to_kind) {
|
||||
Map* current_map = map();
|
||||
ElementsKind from_kind = current_map->elements_kind();
|
||||
|
||||
if (from_kind == to_kind) return current_map;
|
||||
|
||||
Context* global_context = isolate->context()->global_context();
|
||||
if (current_map == global_context->smi_js_array_map()) {
|
||||
if (to_kind == FAST_ELEMENTS) {
|
||||
return global_context->object_js_array_map();
|
||||
} else {
|
||||
if (to_kind == FAST_DOUBLE_ELEMENTS) {
|
||||
return global_context->double_js_array_map();
|
||||
} else {
|
||||
ASSERT(to_kind == DICTIONARY_ELEMENTS);
|
||||
}
|
||||
}
|
||||
}
|
||||
return GetElementsTransitionMapSlow(to_kind);
|
||||
}
|
||||
|
||||
|
||||
void JSObject::set_map_and_elements(Map* new_map,
|
||||
FixedArrayBase* value,
|
||||
WriteBarrierMode mode) {
|
||||
@ -1339,7 +1372,8 @@ MaybeObject* JSObject::ResetElements() {
|
||||
ElementsKind elements_kind = FLAG_smi_only_arrays
|
||||
? FAST_SMI_ONLY_ELEMENTS
|
||||
: FAST_ELEMENTS;
|
||||
MaybeObject* maybe_obj = GetElementsTransitionMap(elements_kind);
|
||||
MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
|
||||
elements_kind);
|
||||
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
|
||||
set_map(Map::cast(obj));
|
||||
initialize_elements();
|
||||
@ -1957,8 +1991,28 @@ bool DescriptorArray::IsProperty(int descriptor_number) {
|
||||
}
|
||||
|
||||
|
||||
bool DescriptorArray::IsTransition(int descriptor_number) {
|
||||
return IsTransitionType(GetType(descriptor_number));
|
||||
bool DescriptorArray::IsTransitionOnly(int descriptor_number) {
|
||||
switch (GetType(descriptor_number)) {
|
||||
case MAP_TRANSITION:
|
||||
case CONSTANT_TRANSITION:
|
||||
case ELEMENTS_TRANSITION:
|
||||
return true;
|
||||
case CALLBACKS: {
|
||||
Object* value = GetValue(descriptor_number);
|
||||
if (!value->IsAccessorPair()) return false;
|
||||
AccessorPair* accessors = AccessorPair::cast(value);
|
||||
return accessors->getter()->IsMap() && accessors->setter()->IsMap();
|
||||
}
|
||||
case NORMAL:
|
||||
case FIELD:
|
||||
case CONSTANT_FUNCTION:
|
||||
case HANDLER:
|
||||
case INTERCEPTOR:
|
||||
case NULL_DESCRIPTOR:
|
||||
return false;
|
||||
}
|
||||
UNREACHABLE(); // Keep the compiler happy.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -2101,6 +2155,7 @@ CAST_ACCESSOR(FixedDoubleArray)
|
||||
CAST_ACCESSOR(DescriptorArray)
|
||||
CAST_ACCESSOR(DeoptimizationInputData)
|
||||
CAST_ACCESSOR(DeoptimizationOutputData)
|
||||
CAST_ACCESSOR(TypeFeedbackCells)
|
||||
CAST_ACCESSOR(SymbolTable)
|
||||
CAST_ACCESSOR(JSFunctionResultCache)
|
||||
CAST_ACCESSOR(NormalizedMapCache)
|
||||
@ -3878,6 +3933,36 @@ void JSFunction::set_initial_map(Map* value) {
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
|
||||
Map* initial_map) {
|
||||
Context* global_context = context()->global_context();
|
||||
Object* array_function =
|
||||
global_context->get(Context::ARRAY_FUNCTION_INDEX);
|
||||
if (array_function->IsJSFunction() &&
|
||||
this == JSFunction::cast(array_function)) {
|
||||
ASSERT(initial_map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
|
||||
|
||||
MaybeObject* maybe_map = initial_map->CopyDropTransitions();
|
||||
Map* new_double_map = NULL;
|
||||
if (!maybe_map->To<Map>(&new_double_map)) return maybe_map;
|
||||
new_double_map->set_elements_kind(FAST_DOUBLE_ELEMENTS);
|
||||
initial_map->AddElementsTransition(FAST_DOUBLE_ELEMENTS, new_double_map);
|
||||
|
||||
maybe_map = new_double_map->CopyDropTransitions();
|
||||
Map* new_object_map = NULL;
|
||||
if (!maybe_map->To<Map>(&new_object_map)) return maybe_map;
|
||||
new_object_map->set_elements_kind(FAST_ELEMENTS);
|
||||
new_double_map->AddElementsTransition(FAST_ELEMENTS, new_object_map);
|
||||
|
||||
global_context->set_smi_js_array_map(initial_map);
|
||||
global_context->set_double_js_array_map(new_double_map);
|
||||
global_context->set_object_js_array_map(new_object_map);
|
||||
}
|
||||
set_initial_map(initial_map);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
bool JSFunction::has_initial_map() {
|
||||
return prototype_or_initial_map()->IsMap();
|
||||
}
|
||||
@ -4042,6 +4127,8 @@ INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
|
||||
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
|
||||
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
|
||||
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
|
||||
ACCESSORS(Code, type_feedback_cells, TypeFeedbackCells,
|
||||
kTypeFeedbackCellsOffset)
|
||||
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
|
||||
|
||||
|
||||
@ -4682,6 +4769,41 @@ MaybeObject* FixedDoubleArray::Copy() {
|
||||
}
|
||||
|
||||
|
||||
void TypeFeedbackCells::SetAstId(int index, Smi* id) {
|
||||
set(1 + index * 2, id);
|
||||
}
|
||||
|
||||
|
||||
Smi* TypeFeedbackCells::AstId(int index) {
|
||||
return Smi::cast(get(1 + index * 2));
|
||||
}
|
||||
|
||||
|
||||
void TypeFeedbackCells::SetCell(int index, JSGlobalPropertyCell* cell) {
|
||||
set(index * 2, cell);
|
||||
}
|
||||
|
||||
|
||||
JSGlobalPropertyCell* TypeFeedbackCells::Cell(int index) {
|
||||
return JSGlobalPropertyCell::cast(get(index * 2));
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> TypeFeedbackCells::UninitializedSentinel(Isolate* isolate) {
|
||||
return isolate->factory()->the_hole_value();
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
|
||||
return isolate->factory()->undefined_value();
|
||||
}
|
||||
|
||||
|
||||
Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
|
||||
return heap->raw_unchecked_the_hole_value();
|
||||
}
|
||||
|
||||
|
||||
Relocatable::Relocatable(Isolate* isolate) {
|
||||
ASSERT(isolate == Isolate::Current());
|
||||
isolate_ = isolate;
|
||||
|
8
deps/v8/src/objects-printer.cc
vendored
8
deps/v8/src/objects-printer.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -569,7 +569,11 @@ void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
|
||||
HeapObject::PrintHeader(out, "FixedDoubleArray");
|
||||
PrintF(out, " - length: %d", length());
|
||||
for (int i = 0; i < length(); i++) {
|
||||
PrintF(out, "\n [%d]: %g", i, get_scalar(i));
|
||||
if (is_the_hole(i)) {
|
||||
PrintF(out, "\n [%d]: <the hole>", i);
|
||||
} else {
|
||||
PrintF(out, "\n [%d]: %g", i, get_scalar(i));
|
||||
}
|
||||
}
|
||||
PrintF(out, "\n");
|
||||
}
|
||||
|
4
deps/v8/src/objects-visiting-inl.h
vendored
4
deps/v8/src/objects-visiting-inl.h
vendored
@ -109,6 +109,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
|
||||
IteratePointer(v, kRelocationInfoOffset);
|
||||
IteratePointer(v, kHandlerTableOffset);
|
||||
IteratePointer(v, kDeoptimizationDataOffset);
|
||||
IteratePointer(v, kTypeFeedbackCellsOffset);
|
||||
|
||||
RelocIterator it(this, mode_mask);
|
||||
for (; !it.done(); it.next()) {
|
||||
@ -138,6 +139,9 @@ void Code::CodeIterateBody(Heap* heap) {
|
||||
StaticVisitor::VisitPointer(
|
||||
heap,
|
||||
reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
|
||||
StaticVisitor::VisitPointer(
|
||||
heap,
|
||||
reinterpret_cast<Object**>(this->address() + kTypeFeedbackCellsOffset));
|
||||
|
||||
RelocIterator it(this, mode_mask);
|
||||
for (; !it.done(); it.next()) {
|
||||
|
295
deps/v8/src/objects.cc
vendored
295
deps/v8/src/objects.cc
vendored
@ -1823,7 +1823,7 @@ MaybeObject* JSObject::ReplaceSlowProperty(String* name,
|
||||
int new_enumeration_index = 0; // 0 means "Use the next available index."
|
||||
if (old_index != -1) {
|
||||
// All calls to ReplaceSlowProperty have had all transitions removed.
|
||||
ASSERT(!dictionary->DetailsAt(old_index).IsTransition());
|
||||
ASSERT(!dictionary->ContainsTransition(old_index));
|
||||
new_enumeration_index = dictionary->DetailsAt(old_index).index();
|
||||
}
|
||||
|
||||
@ -2456,12 +2456,12 @@ Handle<Map> JSObject::GetElementsTransitionMap(Handle<JSObject> object,
|
||||
ElementsKind to_kind) {
|
||||
Isolate* isolate = object->GetIsolate();
|
||||
CALL_HEAP_FUNCTION(isolate,
|
||||
object->GetElementsTransitionMap(to_kind),
|
||||
object->GetElementsTransitionMap(isolate, to_kind),
|
||||
Map);
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* JSObject::GetElementsTransitionMap(ElementsKind to_kind) {
|
||||
MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
|
||||
Map* current_map = map();
|
||||
ElementsKind from_kind = current_map->elements_kind();
|
||||
|
||||
@ -2503,9 +2503,9 @@ MaybeObject* JSObject::GetElementsTransitionMap(ElementsKind to_kind) {
|
||||
// Only remember the map transition if the object's map is NOT equal to the
|
||||
// global object_function's map and there is not an already existing
|
||||
// non-matching element transition.
|
||||
Context* global_context = GetIsolate()->context()->global_context();
|
||||
bool allow_map_transition = safe_to_add_transition &&
|
||||
(GetIsolate()->context()->global_context()->object_function()->map() !=
|
||||
map());
|
||||
(global_context->object_function()->map() != map());
|
||||
if (allow_map_transition) {
|
||||
MaybeObject* maybe_transition =
|
||||
current_map->AddElementsTransition(to_kind, new_map);
|
||||
@ -3578,7 +3578,8 @@ MaybeObject* JSObject::NormalizeElements() {
|
||||
// Set the new map first to satify the elements type assert in
|
||||
// set_elements().
|
||||
Object* new_map;
|
||||
MaybeObject* maybe = GetElementsTransitionMap(DICTIONARY_ELEMENTS);
|
||||
MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(),
|
||||
DICTIONARY_ELEMENTS);
|
||||
if (!maybe->ToObject(&new_map)) return maybe;
|
||||
set_map(Map::cast(new_map));
|
||||
set_elements(dictionary);
|
||||
@ -5728,7 +5729,7 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
|
||||
// Conversely, we filter after replacing, so replacing a transition and
|
||||
// removing all other transitions is not supported.
|
||||
bool remove_transitions = transition_flag == REMOVE_TRANSITIONS;
|
||||
ASSERT(remove_transitions == !descriptor->GetDetails().IsTransition());
|
||||
ASSERT(remove_transitions == !descriptor->ContainsTransition());
|
||||
ASSERT(descriptor->GetDetails().type() != NULL_DESCRIPTOR);
|
||||
|
||||
// Ensure the key is a symbol.
|
||||
@ -5737,29 +5738,18 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
|
||||
if (!maybe_result->ToObject(&result)) return maybe_result;
|
||||
}
|
||||
|
||||
int transitions = 0;
|
||||
int null_descriptors = 0;
|
||||
if (remove_transitions) {
|
||||
for (int i = 0; i < number_of_descriptors(); i++) {
|
||||
if (IsTransition(i)) transitions++;
|
||||
if (IsNullDescriptor(i)) null_descriptors++;
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < number_of_descriptors(); i++) {
|
||||
if (IsNullDescriptor(i)) null_descriptors++;
|
||||
}
|
||||
int new_size = 0;
|
||||
for (int i = 0; i < number_of_descriptors(); i++) {
|
||||
if (IsNullDescriptor(i)) continue;
|
||||
if (remove_transitions && IsTransitionOnly(i)) continue;
|
||||
new_size++;
|
||||
}
|
||||
int new_size = number_of_descriptors() - transitions - null_descriptors;
|
||||
|
||||
// If key is in descriptor, we replace it in-place when filtering.
|
||||
// Count a null descriptor for key as inserted, not replaced.
|
||||
int index = Search(descriptor->GetKey());
|
||||
const bool inserting = (index == kNotFound);
|
||||
const bool replacing = !inserting;
|
||||
const bool replacing = (index != kNotFound);
|
||||
bool keep_enumeration_index = false;
|
||||
if (inserting) {
|
||||
++new_size;
|
||||
}
|
||||
if (replacing) {
|
||||
// We are replacing an existing descriptor. We keep the enumeration
|
||||
// index of a visible property.
|
||||
@ -5774,6 +5764,8 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
|
||||
// a transition that will be replaced. Adjust count in this case.
|
||||
++new_size;
|
||||
}
|
||||
} else {
|
||||
++new_size;
|
||||
}
|
||||
|
||||
DescriptorArray* new_descriptors;
|
||||
@ -5788,7 +5780,7 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
|
||||
// Set the enumeration index in the descriptors and set the enumeration index
|
||||
// in the result.
|
||||
int enumeration_index = NextEnumerationIndex();
|
||||
if (!descriptor->GetDetails().IsTransition()) {
|
||||
if (!descriptor->ContainsTransition()) {
|
||||
if (keep_enumeration_index) {
|
||||
descriptor->SetEnumerationIndex(
|
||||
PropertyDetails(GetDetails(index)).index());
|
||||
@ -5811,7 +5803,7 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
|
||||
break;
|
||||
}
|
||||
if (IsNullDescriptor(from_index)) continue;
|
||||
if (remove_transitions && IsTransition(from_index)) continue;
|
||||
if (remove_transitions && IsTransitionOnly(from_index)) continue;
|
||||
new_descriptors->CopyFrom(to_index++, this, from_index, witness);
|
||||
}
|
||||
|
||||
@ -5820,7 +5812,7 @@ MaybeObject* DescriptorArray::CopyInsert(Descriptor* descriptor,
|
||||
|
||||
for (; from_index < number_of_descriptors(); from_index++) {
|
||||
if (IsNullDescriptor(from_index)) continue;
|
||||
if (remove_transitions && IsTransition(from_index)) continue;
|
||||
if (remove_transitions && IsTransitionOnly(from_index)) continue;
|
||||
new_descriptors->CopyFrom(to_index++, this, from_index, witness);
|
||||
}
|
||||
|
||||
@ -7221,7 +7213,9 @@ void String::PrintOn(FILE* file) {
|
||||
}
|
||||
|
||||
|
||||
void Map::CreateOneBackPointer(Map* target) {
|
||||
void Map::CreateOneBackPointer(Object* transition_target) {
|
||||
if (!transition_target->IsMap()) return;
|
||||
Map* target = Map::cast(transition_target);
|
||||
#ifdef DEBUG
|
||||
// Verify target.
|
||||
Object* source_prototype = prototype();
|
||||
@ -7243,86 +7237,131 @@ void Map::CreateOneBackPointer(Map* target) {
|
||||
void Map::CreateBackPointers() {
|
||||
DescriptorArray* descriptors = instance_descriptors();
|
||||
for (int i = 0; i < descriptors->number_of_descriptors(); i++) {
|
||||
if (descriptors->IsTransition(i)) {
|
||||
Object* object = reinterpret_cast<Object*>(descriptors->GetValue(i));
|
||||
if (object->IsMap()) {
|
||||
CreateOneBackPointer(reinterpret_cast<Map*>(object));
|
||||
} else {
|
||||
ASSERT(object->IsFixedArray());
|
||||
ASSERT(descriptors->GetType(i) == ELEMENTS_TRANSITION);
|
||||
FixedArray* array = reinterpret_cast<FixedArray*>(object);
|
||||
for (int i = 0; i < array->length(); ++i) {
|
||||
Map* target = reinterpret_cast<Map*>(array->get(i));
|
||||
if (!target->IsUndefined()) {
|
||||
CreateOneBackPointer(target);
|
||||
switch (descriptors->GetType(i)) {
|
||||
case MAP_TRANSITION:
|
||||
case CONSTANT_TRANSITION:
|
||||
CreateOneBackPointer(descriptors->GetValue(i));
|
||||
break;
|
||||
case ELEMENTS_TRANSITION: {
|
||||
Object* object = descriptors->GetValue(i);
|
||||
if (object->IsMap()) {
|
||||
CreateOneBackPointer(object);
|
||||
} else {
|
||||
FixedArray* array = FixedArray::cast(object);
|
||||
for (int i = 0; i < array->length(); ++i) {
|
||||
CreateOneBackPointer(array->get(i));
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CALLBACKS: {
|
||||
Object* object = descriptors->GetValue(i);
|
||||
if (object->IsAccessorPair()) {
|
||||
AccessorPair* accessors = AccessorPair::cast(object);
|
||||
CreateOneBackPointer(accessors->getter());
|
||||
CreateOneBackPointer(accessors->setter());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case NORMAL:
|
||||
case FIELD:
|
||||
case CONSTANT_FUNCTION:
|
||||
case HANDLER:
|
||||
case INTERCEPTOR:
|
||||
case NULL_DESCRIPTOR:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Map::RestoreOneBackPointer(Object* object,
|
||||
Object* real_prototype,
|
||||
bool* keep_entry) {
|
||||
if (!object->IsMap()) return false;
|
||||
Map* map = Map::cast(object);
|
||||
if (Marking::MarkBitFrom(map).Get()) {
|
||||
*keep_entry = true;
|
||||
return false;
|
||||
}
|
||||
ASSERT(map->prototype() == this || map->prototype() == real_prototype);
|
||||
// Getter prototype() is read-only, set_prototype() has side effects.
|
||||
*RawField(map, Map::kPrototypeOffset) = real_prototype;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) {
|
||||
// Live DescriptorArray objects will be marked, so we must use
|
||||
// low-level accessors to get and modify their data.
|
||||
DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
|
||||
DescriptorArray* d = DescriptorArray::cast(
|
||||
*RawField(this, Map::kInstanceDescriptorsOrBitField3Offset));
|
||||
if (d->IsEmpty()) return;
|
||||
Smi* NullDescriptorDetails =
|
||||
PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi();
|
||||
FixedArray* contents = reinterpret_cast<FixedArray*>(
|
||||
FixedArray* contents = FixedArray::cast(
|
||||
d->get(DescriptorArray::kContentArrayIndex));
|
||||
ASSERT(contents->length() >= 2);
|
||||
for (int i = 0; i < contents->length(); i += 2) {
|
||||
// If the pair (value, details) is a map transition,
|
||||
// check if the target is live. If not, null the descriptor.
|
||||
// Also drop the back pointer for that map transition, so that this
|
||||
// map is not reached again by following a back pointer from a
|
||||
// non-live object.
|
||||
// If the pair (value, details) is a map transition, check if the target is
|
||||
// live. If not, null the descriptor. Also drop the back pointer for that
|
||||
// map transition, so that this map is not reached again by following a back
|
||||
// pointer from a non-live object.
|
||||
bool keep_entry = false;
|
||||
PropertyDetails details(Smi::cast(contents->get(i + 1)));
|
||||
if (IsTransitionType(details.type())) {
|
||||
Object* object = reinterpret_cast<Object*>(contents->get(i));
|
||||
if (object->IsMap()) {
|
||||
Map* target = reinterpret_cast<Map*>(object);
|
||||
ASSERT(target->IsHeapObject());
|
||||
MarkBit map_mark = Marking::MarkBitFrom(target);
|
||||
if (!map_mark.Get()) {
|
||||
ASSERT(target->IsMap());
|
||||
contents->set_unchecked(i + 1, NullDescriptorDetails);
|
||||
contents->set_null_unchecked(heap, i);
|
||||
ASSERT(target->prototype() == this ||
|
||||
target->prototype() == real_prototype);
|
||||
// Getter prototype() is read-only, set_prototype() has side effects.
|
||||
*RawField(target, Map::kPrototypeOffset) = real_prototype;
|
||||
}
|
||||
} else {
|
||||
ASSERT(object->IsFixedArray());
|
||||
ASSERT(details.type() == ELEMENTS_TRANSITION);
|
||||
FixedArray* array = reinterpret_cast<FixedArray*>(object);
|
||||
bool reachable_map_found = false;
|
||||
for (int j = 0; j < array->length(); ++j) {
|
||||
Map* target = reinterpret_cast<Map*>(array->get(j));
|
||||
ASSERT(target->IsHeapObject());
|
||||
MarkBit map_mark = Marking::MarkBitFrom(target);
|
||||
if (!map_mark.Get()) {
|
||||
ASSERT(target->IsMap());
|
||||
array->set_undefined(j);
|
||||
ASSERT(target->prototype() == this ||
|
||||
target->prototype() == real_prototype);
|
||||
// Getter prototype() is read-only, set_prototype() has side
|
||||
// effects.
|
||||
*RawField(target, Map::kPrototypeOffset) = real_prototype;
|
||||
} else if (target->IsMap()) {
|
||||
reachable_map_found = true;
|
||||
switch (details.type()) {
|
||||
case MAP_TRANSITION:
|
||||
case CONSTANT_TRANSITION:
|
||||
RestoreOneBackPointer(contents->get(i), real_prototype, &keep_entry);
|
||||
break;
|
||||
case ELEMENTS_TRANSITION: {
|
||||
Object* object = contents->get(i);
|
||||
if (object->IsMap()) {
|
||||
RestoreOneBackPointer(object, real_prototype, &keep_entry);
|
||||
} else {
|
||||
FixedArray* array = FixedArray::cast(object);
|
||||
for (int j = 0; j < array->length(); ++j) {
|
||||
if (RestoreOneBackPointer(array->get(j),
|
||||
real_prototype,
|
||||
&keep_entry)) {
|
||||
array->set_undefined(j);
|
||||
}
|
||||
}
|
||||
}
|
||||
// If no map was found, make sure the FixedArray also gets collected.
|
||||
if (!reachable_map_found) {
|
||||
contents->set_unchecked(i + 1, NullDescriptorDetails);
|
||||
contents->set_null_unchecked(heap, i);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case CALLBACKS: {
|
||||
Object* object = contents->get(i);
|
||||
if (object->IsAccessorPair()) {
|
||||
AccessorPair* accessors = AccessorPair::cast(object);
|
||||
if (RestoreOneBackPointer(accessors->getter(),
|
||||
real_prototype,
|
||||
&keep_entry)) {
|
||||
accessors->set_getter(heap->the_hole_value());
|
||||
}
|
||||
if (RestoreOneBackPointer(accessors->setter(),
|
||||
real_prototype,
|
||||
&keep_entry)) {
|
||||
accessors->set_setter(heap->the_hole_value());
|
||||
}
|
||||
} else {
|
||||
keep_entry = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case NORMAL:
|
||||
case FIELD:
|
||||
case CONSTANT_FUNCTION:
|
||||
case HANDLER:
|
||||
case INTERCEPTOR:
|
||||
case NULL_DESCRIPTOR:
|
||||
keep_entry = true;
|
||||
break;
|
||||
}
|
||||
// Make sure that an entry containing only dead transitions gets collected.
|
||||
// What we *really* want to do here is removing this entry completely, but
|
||||
// for technical reasons we can't do this, so we zero it out instead.
|
||||
if (!keep_entry) {
|
||||
contents->set_unchecked(i + 1, NullDescriptorDetails);
|
||||
contents->set_null_unchecked(heap, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -7445,11 +7484,19 @@ bool JSFunction::IsInlineable() {
|
||||
}
|
||||
|
||||
|
||||
Object* JSFunction::SetInstancePrototype(Object* value) {
|
||||
MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
|
||||
ASSERT(value->IsJSObject());
|
||||
Heap* heap = GetHeap();
|
||||
if (has_initial_map()) {
|
||||
initial_map()->set_prototype(value);
|
||||
// If the function has allocated the initial map
|
||||
// replace it with a copy containing the new prototype.
|
||||
Map* new_map;
|
||||
MaybeObject* maybe_new_map = initial_map()->CopyDropTransitions();
|
||||
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
|
||||
new_map->set_prototype(value);
|
||||
MaybeObject* maybe_object =
|
||||
set_initial_map_and_cache_transitions(new_map);
|
||||
if (maybe_object->IsFailure()) return maybe_object;
|
||||
} else {
|
||||
// Put the value in the initial map field until an initial map is
|
||||
// needed. At that point, a new initial map is created and the
|
||||
@ -8474,7 +8521,7 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
|
||||
ElementsKind elements_kind = has_fast_smi_only_elements
|
||||
? FAST_SMI_ONLY_ELEMENTS
|
||||
: FAST_ELEMENTS;
|
||||
MaybeObject* maybe = GetElementsTransitionMap(elements_kind);
|
||||
MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
|
||||
if (!maybe->ToObject(&object)) return maybe;
|
||||
new_map = Map::cast(object);
|
||||
}
|
||||
@ -8558,7 +8605,7 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
|
||||
|
||||
if (FLAG_trace_elements_transitions) {
|
||||
PrintElementsTransition(stdout, elements_kind, old_elements_raw,
|
||||
FAST_ELEMENTS, new_elements);
|
||||
GetElementsKind(), new_elements);
|
||||
}
|
||||
|
||||
// Update the length if necessary.
|
||||
@ -8585,7 +8632,7 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
|
||||
FixedDoubleArray* elems = FixedDoubleArray::cast(obj);
|
||||
|
||||
{ MaybeObject* maybe_obj =
|
||||
GetElementsTransitionMap(FAST_DOUBLE_ELEMENTS);
|
||||
GetElementsTransitionMap(heap->isolate(), FAST_DOUBLE_ELEMENTS);
|
||||
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
|
||||
}
|
||||
Map* new_map = Map::cast(obj);
|
||||
@ -9395,7 +9442,8 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
|
||||
}
|
||||
// Change elements kind from SMI_ONLY to generic FAST if necessary.
|
||||
if (HasFastSmiOnlyElements() && !value->IsSmi()) {
|
||||
MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
|
||||
MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
|
||||
FAST_ELEMENTS);
|
||||
Map* new_map;
|
||||
if (!maybe_new_map->To<Map>(&new_map)) return maybe_new_map;
|
||||
set_map(new_map);
|
||||
@ -9805,9 +9853,24 @@ Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
|
||||
}
|
||||
|
||||
|
||||
MUST_USE_RESULT MaybeObject* JSObject::TransitionElementsKind(
|
||||
ElementsKind to_kind) {
|
||||
MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
|
||||
ElementsKind from_kind = map()->elements_kind();
|
||||
|
||||
Isolate* isolate = GetIsolate();
|
||||
if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
|
||||
(to_kind == FAST_ELEMENTS ||
|
||||
elements() == isolate->heap()->empty_fixed_array())) {
|
||||
MaybeObject* maybe_new_map = GetElementsTransitionMap(isolate, to_kind);
|
||||
Map* new_map;
|
||||
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
|
||||
set_map(new_map);
|
||||
if (FLAG_trace_elements_transitions) {
|
||||
FixedArrayBase* elms = FixedArrayBase::cast(elements());
|
||||
PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
FixedArrayBase* elms = FixedArrayBase::cast(elements());
|
||||
uint32_t capacity = static_cast<uint32_t>(elms->length());
|
||||
uint32_t length = capacity;
|
||||
@ -9823,18 +9886,6 @@ MUST_USE_RESULT MaybeObject* JSObject::TransitionElementsKind(
|
||||
}
|
||||
}
|
||||
|
||||
if ((from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) ||
|
||||
(length == 0)) {
|
||||
MaybeObject* maybe_new_map = GetElementsTransitionMap(to_kind);
|
||||
Map* new_map;
|
||||
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
|
||||
if (FLAG_trace_elements_transitions) {
|
||||
PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
|
||||
}
|
||||
set_map(new_map);
|
||||
return this;
|
||||
}
|
||||
|
||||
if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
|
||||
to_kind == FAST_DOUBLE_ELEMENTS) {
|
||||
MaybeObject* maybe_result =
|
||||
@ -11073,6 +11124,31 @@ int StringDictionary::FindEntry(String* key) {
|
||||
}
|
||||
|
||||
|
||||
bool StringDictionary::ContainsTransition(int entry) {
|
||||
switch (DetailsAt(entry).type()) {
|
||||
case MAP_TRANSITION:
|
||||
case CONSTANT_TRANSITION:
|
||||
case ELEMENTS_TRANSITION:
|
||||
return true;
|
||||
case CALLBACKS: {
|
||||
Object* value = ValueAt(entry);
|
||||
if (!value->IsAccessorPair()) return false;
|
||||
AccessorPair* accessors = AccessorPair::cast(value);
|
||||
return accessors->getter()->IsMap() || accessors->setter()->IsMap();
|
||||
}
|
||||
case NORMAL:
|
||||
case FIELD:
|
||||
case CONSTANT_FUNCTION:
|
||||
case HANDLER:
|
||||
case INTERCEPTOR:
|
||||
case NULL_DESCRIPTOR:
|
||||
return false;
|
||||
}
|
||||
UNREACHABLE(); // Keep the compiler happy.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
template<typename Shape, typename Key>
|
||||
MaybeObject* HashTable<Shape, Key>::Rehash(HashTable* new_table, Key key) {
|
||||
ASSERT(NumberOfElements() < new_table->Capacity());
|
||||
@ -11401,7 +11477,8 @@ MaybeObject* JSObject::PrepareElementsForSort(uint32_t limit) {
|
||||
// Convert to fast elements.
|
||||
|
||||
Object* obj;
|
||||
{ MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
|
||||
{ MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
|
||||
FAST_ELEMENTS);
|
||||
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
|
||||
}
|
||||
Map* new_map = Map::cast(obj);
|
||||
|
77
deps/v8/src/objects.h
vendored
77
deps/v8/src/objects.h
vendored
@ -793,6 +793,7 @@ class MaybeObject BASE_EMBEDDED {
|
||||
V(DescriptorArray) \
|
||||
V(DeoptimizationInputData) \
|
||||
V(DeoptimizationOutputData) \
|
||||
V(TypeFeedbackCells) \
|
||||
V(FixedArray) \
|
||||
V(FixedDoubleArray) \
|
||||
V(Context) \
|
||||
@ -1854,7 +1855,10 @@ class JSObject: public JSReceiver {
|
||||
// map and the ElementsKind set.
|
||||
static Handle<Map> GetElementsTransitionMap(Handle<JSObject> object,
|
||||
ElementsKind to_kind);
|
||||
MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
|
||||
inline MUST_USE_RESULT MaybeObject* GetElementsTransitionMap(
|
||||
Isolate* isolate,
|
||||
ElementsKind elements_kind);
|
||||
MUST_USE_RESULT MaybeObject* GetElementsTransitionMapSlow(
|
||||
ElementsKind elements_kind);
|
||||
|
||||
static Handle<Object> TransitionElementsKind(Handle<JSObject> object,
|
||||
@ -2408,7 +2412,7 @@ class DescriptorArray: public FixedArray {
|
||||
inline Object* GetCallbacksObject(int descriptor_number);
|
||||
inline AccessorDescriptor* GetCallbacks(int descriptor_number);
|
||||
inline bool IsProperty(int descriptor_number);
|
||||
inline bool IsTransition(int descriptor_number);
|
||||
inline bool IsTransitionOnly(int descriptor_number);
|
||||
inline bool IsNullDescriptor(int descriptor_number);
|
||||
inline bool IsDontEnum(int descriptor_number);
|
||||
|
||||
@ -3034,6 +3038,8 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> {
|
||||
// Find entry for key, otherwise return kNotFound. Optimized version of
|
||||
// HashTable::FindEntry.
|
||||
int FindEntry(String* key);
|
||||
|
||||
bool ContainsTransition(int entry);
|
||||
};
|
||||
|
||||
|
||||
@ -3977,8 +3983,44 @@ class DeoptimizationOutputData: public FixedArray {
|
||||
};
|
||||
|
||||
|
||||
class SafepointEntry;
|
||||
// Forward declaration.
|
||||
class JSGlobalPropertyCell;
|
||||
|
||||
// TypeFeedbackCells is a fixed array used to hold the association between
|
||||
// cache cells and AST ids for code generated by the full compiler.
|
||||
// The format of the these objects is
|
||||
// [i * 2]: Global property cell of ith cache cell.
|
||||
// [i * 2 + 1]: Ast ID for ith cache cell.
|
||||
class TypeFeedbackCells: public FixedArray {
|
||||
public:
|
||||
int CellCount() { return length() / 2; }
|
||||
static int LengthOfFixedArray(int cell_count) { return cell_count * 2; }
|
||||
|
||||
// Accessors for AST ids associated with cache values.
|
||||
inline Smi* AstId(int index);
|
||||
inline void SetAstId(int index, Smi* id);
|
||||
|
||||
// Accessors for global property cells holding the cache values.
|
||||
inline JSGlobalPropertyCell* Cell(int index);
|
||||
inline void SetCell(int index, JSGlobalPropertyCell* cell);
|
||||
|
||||
// The object that indicates an uninitialized cache.
|
||||
static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
|
||||
|
||||
// The object that indicates a megamorphic state.
|
||||
static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
|
||||
|
||||
// A raw version of the uninitialized sentinel that's safe to read during
|
||||
// garbage collection (e.g., for patching the cache).
|
||||
static inline Object* RawUninitializedSentinel(Heap* heap);
|
||||
|
||||
// Casting.
|
||||
static inline TypeFeedbackCells* cast(Object* obj);
|
||||
};
|
||||
|
||||
|
||||
// Forward declaration.
|
||||
class SafepointEntry;
|
||||
|
||||
// Code describes objects with on-the-fly generated machine code.
|
||||
class Code: public HeapObject {
|
||||
@ -4050,6 +4092,9 @@ class Code: public HeapObject {
|
||||
// [deoptimization_data]: Array containing data for deopt.
|
||||
DECL_ACCESSORS(deoptimization_data, FixedArray)
|
||||
|
||||
// [type_feedback_cells]: Array containing cache cells used for type feedback.
|
||||
DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
|
||||
|
||||
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
|
||||
// field does not have to be traced during garbage collection since
|
||||
// it is only used by the garbage collector itself.
|
||||
@ -4158,8 +4203,8 @@ class Code: public HeapObject {
|
||||
inline byte to_boolean_state();
|
||||
inline void set_to_boolean_state(byte value);
|
||||
|
||||
// For kind STUB, major_key == CallFunction, tells whether there is
|
||||
// a function cache in the instruction stream.
|
||||
// [has_function_cache]: For kind STUB tells whether there is a function
|
||||
// cache is passed to the stub.
|
||||
inline bool has_function_cache();
|
||||
inline void set_has_function_cache(bool flag);
|
||||
|
||||
@ -4277,7 +4322,9 @@ class Code: public HeapObject {
|
||||
static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
|
||||
static const int kDeoptimizationDataOffset =
|
||||
kHandlerTableOffset + kPointerSize;
|
||||
static const int kGCMetadataOffset = kDeoptimizationDataOffset + kPointerSize;
|
||||
static const int kTypeFeedbackCellsOffset =
|
||||
kDeoptimizationDataOffset + kPointerSize;
|
||||
static const int kGCMetadataOffset = kTypeFeedbackCellsOffset + kPointerSize;
|
||||
static const int kFlagsOffset = kGCMetadataOffset + kPointerSize;
|
||||
|
||||
static const int kKindSpecificFlagsOffset = kFlagsOffset + kIntSize;
|
||||
@ -4319,8 +4366,8 @@ class Code: public HeapObject {
|
||||
// Flags layout. BitField<type, shift, size>.
|
||||
class ICStateField: public BitField<InlineCacheState, 0, 3> {};
|
||||
class TypeField: public BitField<PropertyType, 3, 4> {};
|
||||
class KindField: public BitField<Kind, 7, 4> {};
|
||||
class CacheHolderField: public BitField<InlineCacheHolderFlag, 11, 1> {};
|
||||
class CacheHolderField: public BitField<InlineCacheHolderFlag, 7, 1> {};
|
||||
class KindField: public BitField<Kind, 8, 4> {};
|
||||
class ExtraICStateField: public BitField<ExtraICState, 12, 2> {};
|
||||
class IsPregeneratedField: public BitField<bool, 14, 1> {};
|
||||
|
||||
@ -4328,6 +4375,7 @@ class Code: public HeapObject {
|
||||
static const int kArgumentsCountShift = 15;
|
||||
static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1);
|
||||
|
||||
// This constant should be encodable in an ARM instruction.
|
||||
static const int kFlagsNotUsedInLookup =
|
||||
TypeField::kMask | CacheHolderField::kMask;
|
||||
|
||||
@ -4625,7 +4673,7 @@ class Map: public HeapObject {
|
||||
// This is undone in MarkCompactCollector::ClearNonLiveTransitions().
|
||||
void CreateBackPointers();
|
||||
|
||||
void CreateOneBackPointer(Map* transition_target);
|
||||
void CreateOneBackPointer(Object* transition_target);
|
||||
|
||||
// Set all map transitions from this map to dead maps to null.
|
||||
// Also, restore the original prototype on the targets of these
|
||||
@ -4633,6 +4681,13 @@ class Map: public HeapObject {
|
||||
// following back pointers.
|
||||
void ClearNonLiveTransitions(Heap* heap, Object* real_prototype);
|
||||
|
||||
// Restore a possible back pointer in the prototype field of object.
|
||||
// Return true in that case and false otherwise. Set *keep_entry to
|
||||
// true when a live map transition has been found.
|
||||
bool RestoreOneBackPointer(Object* object,
|
||||
Object* real_prototype,
|
||||
bool* keep_entry);
|
||||
|
||||
// Computes a hash value for this map, to be used in HashTables and such.
|
||||
int Hash();
|
||||
|
||||
@ -5565,6 +5620,7 @@ class JSFunction: public JSObject {
|
||||
// The initial map for an object created by this constructor.
|
||||
inline Map* initial_map();
|
||||
inline void set_initial_map(Map* value);
|
||||
inline MaybeObject* set_initial_map_and_cache_transitions(Map* value);
|
||||
inline bool has_initial_map();
|
||||
|
||||
// Get and set the prototype property on a JSFunction. If the
|
||||
@ -5575,7 +5631,7 @@ class JSFunction: public JSObject {
|
||||
inline bool has_instance_prototype();
|
||||
inline Object* prototype();
|
||||
inline Object* instance_prototype();
|
||||
Object* SetInstancePrototype(Object* value);
|
||||
MaybeObject* SetInstancePrototype(Object* value);
|
||||
MUST_USE_RESULT MaybeObject* SetPrototype(Object* value);
|
||||
|
||||
// After prototype is removed, it will not be created when accessed, and
|
||||
@ -5697,7 +5753,6 @@ class JSGlobalProxy : public JSObject {
|
||||
|
||||
// Forward declaration.
|
||||
class JSBuiltinsObject;
|
||||
class JSGlobalPropertyCell;
|
||||
|
||||
// Common super class for JavaScript global objects and the special
|
||||
// builtins global objects.
|
||||
|
6
deps/v8/src/parser.cc
vendored
6
deps/v8/src/parser.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -4682,8 +4682,8 @@ Expression* Parser::NewThrowError(Handle<String> constructor,
|
||||
elements->set(i, *element);
|
||||
}
|
||||
}
|
||||
Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(elements,
|
||||
TENURED);
|
||||
Handle<JSArray> array = isolate()->factory()->NewJSArrayWithElements(
|
||||
elements, FAST_ELEMENTS, TENURED);
|
||||
|
||||
ZoneList<Expression*>* args = new(zone()) ZoneList<Expression*>(2);
|
||||
args->Add(NewLiteral(type));
|
||||
|
32
deps/v8/src/platform-linux.cc
vendored
32
deps/v8/src/platform-linux.cc
vendored
@ -944,6 +944,38 @@ typedef struct ucontext {
|
||||
} ucontext_t;
|
||||
enum ArmRegisters {R15 = 15, R13 = 13, R11 = 11};
|
||||
|
||||
#elif !defined(__GLIBC__) && defined(__mips__)
|
||||
// MIPS version of sigcontext, for Android bionic.
|
||||
struct sigcontext {
|
||||
uint32_t regmask;
|
||||
uint32_t status;
|
||||
uint64_t pc;
|
||||
uint64_t gregs[32];
|
||||
uint64_t fpregs[32];
|
||||
uint32_t acx;
|
||||
uint32_t fpc_csr;
|
||||
uint32_t fpc_eir;
|
||||
uint32_t used_math;
|
||||
uint32_t dsp;
|
||||
uint64_t mdhi;
|
||||
uint64_t mdlo;
|
||||
uint32_t hi1;
|
||||
uint32_t lo1;
|
||||
uint32_t hi2;
|
||||
uint32_t lo2;
|
||||
uint32_t hi3;
|
||||
uint32_t lo3;
|
||||
};
|
||||
typedef uint32_t __sigset_t;
|
||||
typedef struct sigcontext mcontext_t;
|
||||
typedef struct ucontext {
|
||||
uint32_t uc_flags;
|
||||
struct ucontext* uc_link;
|
||||
stack_t uc_stack;
|
||||
mcontext_t uc_mcontext;
|
||||
__sigset_t uc_sigmask;
|
||||
} ucontext_t;
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
199
deps/v8/src/profile-generator.cc
vendored
199
deps/v8/src/profile-generator.cc
vendored
@ -1308,19 +1308,6 @@ HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* HeapSnapshot::AddNativesRootEntry(int children_count,
|
||||
int retainers_count) {
|
||||
ASSERT(natives_root_entry_ == NULL);
|
||||
return (natives_root_entry_ = AddEntry(
|
||||
HeapEntry::kObject,
|
||||
"(Native objects)",
|
||||
HeapObjectsMap::kNativesRootObjectId,
|
||||
0,
|
||||
children_count,
|
||||
retainers_count));
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
|
||||
const char* name,
|
||||
uint64_t id,
|
||||
@ -1402,10 +1389,8 @@ void HeapSnapshot::Print(int max_depth) {
|
||||
const uint64_t HeapObjectsMap::kInternalRootObjectId = 1;
|
||||
const uint64_t HeapObjectsMap::kGcRootsObjectId =
|
||||
HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
|
||||
const uint64_t HeapObjectsMap::kNativesRootObjectId =
|
||||
HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
|
||||
const uint64_t HeapObjectsMap::kGcRootsFirstSubrootId =
|
||||
HeapObjectsMap::kNativesRootObjectId + HeapObjectsMap::kObjectIdStep;
|
||||
HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
|
||||
const uint64_t HeapObjectsMap::kFirstAvailableObjectId =
|
||||
HeapObjectsMap::kGcRootsFirstSubrootId +
|
||||
VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
|
||||
@ -1577,7 +1562,8 @@ void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
|
||||
|
||||
Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
|
||||
// First perform a full GC in order to avoid dead objects.
|
||||
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
|
||||
"HeapSnapshotsCollection::FindHeapObjectById");
|
||||
AssertNoAllocation no_allocation;
|
||||
HeapObject* object = NULL;
|
||||
HeapIterator iterator(HeapIterator::kFilterUnreachable);
|
||||
@ -2712,11 +2698,6 @@ class GlobalHandlesExtractor : public ObjectVisitor {
|
||||
NativeObjectsExplorer* explorer_;
|
||||
};
|
||||
|
||||
HeapThing const NativeObjectsExplorer::kNativesRootObject =
|
||||
reinterpret_cast<HeapThing>(
|
||||
static_cast<intptr_t>(HeapObjectsMap::kNativesRootObjectId));
|
||||
|
||||
|
||||
NativeObjectsExplorer::NativeObjectsExplorer(
|
||||
HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
|
||||
: snapshot_(snapshot),
|
||||
@ -2724,6 +2705,7 @@ NativeObjectsExplorer::NativeObjectsExplorer(
|
||||
progress_(progress),
|
||||
embedder_queried_(false),
|
||||
objects_by_info_(RetainedInfosMatch),
|
||||
native_groups_(StringsMatch),
|
||||
filler_(NULL) {
|
||||
}
|
||||
|
||||
@ -2739,37 +2721,34 @@ NativeObjectsExplorer::~NativeObjectsExplorer() {
|
||||
reinterpret_cast<List<HeapObject*>* >(p->value);
|
||||
delete objects;
|
||||
}
|
||||
for (HashMap::Entry* p = native_groups_.Start();
|
||||
p != NULL;
|
||||
p = native_groups_.Next(p)) {
|
||||
v8::RetainedObjectInfo* info =
|
||||
reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
|
||||
info->Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* NativeObjectsExplorer::AllocateEntry(
|
||||
HeapThing ptr, int children_count, int retainers_count) {
|
||||
if (ptr == kNativesRootObject) {
|
||||
return snapshot_->AddNativesRootEntry(children_count, retainers_count);
|
||||
} else {
|
||||
v8::RetainedObjectInfo* info =
|
||||
reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
|
||||
intptr_t elements = info->GetElementCount();
|
||||
intptr_t size = info->GetSizeInBytes();
|
||||
return snapshot_->AddEntry(
|
||||
HeapEntry::kNative,
|
||||
elements != -1 ?
|
||||
collection_->names()->GetFormatted(
|
||||
"%s / %" V8_PTR_PREFIX "d entries",
|
||||
info->GetLabel(),
|
||||
info->GetElementCount()) :
|
||||
collection_->names()->GetCopy(info->GetLabel()),
|
||||
HeapObjectsMap::GenerateId(info),
|
||||
size != -1 ? static_cast<int>(size) : 0,
|
||||
children_count,
|
||||
retainers_count);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void NativeObjectsExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
|
||||
if (EstimateObjectsCount() <= 0) return;
|
||||
filler->AddEntry(kNativesRootObject, this);
|
||||
v8::RetainedObjectInfo* info =
|
||||
reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
|
||||
intptr_t elements = info->GetElementCount();
|
||||
intptr_t size = info->GetSizeInBytes();
|
||||
return snapshot_->AddEntry(
|
||||
HeapEntry::kNative,
|
||||
elements != -1 ?
|
||||
collection_->names()->GetFormatted(
|
||||
"%s / %" V8_PTR_PREFIX "d entries",
|
||||
info->GetLabel(),
|
||||
info->GetElementCount()) :
|
||||
collection_->names()->GetCopy(info->GetLabel()),
|
||||
HeapObjectsMap::GenerateId(info),
|
||||
size != -1 ? static_cast<int>(size) : 0,
|
||||
children_count,
|
||||
retainers_count);
|
||||
}
|
||||
|
||||
|
||||
@ -2804,6 +2783,27 @@ void NativeObjectsExplorer::FillRetainedObjects() {
|
||||
embedder_queried_ = true;
|
||||
}
|
||||
|
||||
void NativeObjectsExplorer::FillImplicitReferences() {
|
||||
Isolate* isolate = Isolate::Current();
|
||||
List<ImplicitRefGroup*>* groups =
|
||||
isolate->global_handles()->implicit_ref_groups();
|
||||
for (int i = 0; i < groups->length(); ++i) {
|
||||
ImplicitRefGroup* group = groups->at(i);
|
||||
HeapObject* parent = *group->parent_;
|
||||
HeapEntry* parent_entry = filler_->FindOrAddEntry(parent, this);
|
||||
ASSERT(parent_entry != NULL);
|
||||
Object*** children = group->children_;
|
||||
for (size_t j = 0; j < group->length_; ++j) {
|
||||
Object* child = *children[j];
|
||||
HeapEntry* child_entry = filler_->FindOrAddEntry(child, this);
|
||||
filler_->SetNamedReference(
|
||||
HeapGraphEdge::kInternal,
|
||||
parent, parent_entry,
|
||||
"native",
|
||||
child, child_entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
|
||||
v8::RetainedObjectInfo* info) {
|
||||
@ -2820,34 +2820,80 @@ List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
|
||||
|
||||
bool NativeObjectsExplorer::IterateAndExtractReferences(
|
||||
SnapshotFillerInterface* filler) {
|
||||
if (EstimateObjectsCount() <= 0) return true;
|
||||
filler_ = filler;
|
||||
FillRetainedObjects();
|
||||
for (HashMap::Entry* p = objects_by_info_.Start();
|
||||
p != NULL;
|
||||
p = objects_by_info_.Next(p)) {
|
||||
v8::RetainedObjectInfo* info =
|
||||
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
|
||||
SetNativeRootReference(info);
|
||||
List<HeapObject*>* objects =
|
||||
reinterpret_cast<List<HeapObject*>* >(p->value);
|
||||
for (int i = 0; i < objects->length(); ++i) {
|
||||
SetWrapperNativeReferences(objects->at(i), info);
|
||||
FillImplicitReferences();
|
||||
if (EstimateObjectsCount() > 0) {
|
||||
for (HashMap::Entry* p = objects_by_info_.Start();
|
||||
p != NULL;
|
||||
p = objects_by_info_.Next(p)) {
|
||||
v8::RetainedObjectInfo* info =
|
||||
reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
|
||||
SetNativeRootReference(info);
|
||||
List<HeapObject*>* objects =
|
||||
reinterpret_cast<List<HeapObject*>* >(p->value);
|
||||
for (int i = 0; i < objects->length(); ++i) {
|
||||
SetWrapperNativeReferences(objects->at(i), info);
|
||||
}
|
||||
}
|
||||
SetRootNativeRootsReference();
|
||||
}
|
||||
SetRootNativesRootReference();
|
||||
filler_ = NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
|
||||
public:
|
||||
explicit NativeGroupRetainedObjectInfo(const char* label)
|
||||
: disposed_(false),
|
||||
hash_(reinterpret_cast<intptr_t>(label)),
|
||||
label_(label) {
|
||||
}
|
||||
|
||||
virtual ~NativeGroupRetainedObjectInfo() {}
|
||||
virtual void Dispose() {
|
||||
CHECK(!disposed_);
|
||||
disposed_ = true;
|
||||
delete this;
|
||||
}
|
||||
virtual bool IsEquivalent(RetainedObjectInfo* other) {
|
||||
return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
|
||||
}
|
||||
virtual intptr_t GetHash() { return hash_; }
|
||||
virtual const char* GetLabel() { return label_; }
|
||||
|
||||
private:
|
||||
bool disposed_;
|
||||
intptr_t hash_;
|
||||
const char* label_;
|
||||
};
|
||||
|
||||
|
||||
NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
|
||||
const char* label) {
|
||||
const char* label_copy = collection_->names()->GetCopy(label);
|
||||
uint32_t hash = HashSequentialString(label_copy,
|
||||
static_cast<int>(strlen(label_copy)),
|
||||
HEAP->HashSeed());
|
||||
HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
|
||||
hash, true);
|
||||
if (entry->value == NULL)
|
||||
entry->value = new NativeGroupRetainedObjectInfo(label);
|
||||
return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
|
||||
}
|
||||
|
||||
|
||||
void NativeObjectsExplorer::SetNativeRootReference(
|
||||
v8::RetainedObjectInfo* info) {
|
||||
HeapEntry* child_entry = filler_->FindOrAddEntry(info, this);
|
||||
ASSERT(child_entry != NULL);
|
||||
filler_->SetIndexedAutoIndexReference(
|
||||
HeapGraphEdge::kElement,
|
||||
kNativesRootObject, snapshot_->natives_root(),
|
||||
NativeGroupRetainedObjectInfo* group_info =
|
||||
FindOrAddGroupInfo(info->GetGroupLabel());
|
||||
HeapEntry* group_entry = filler_->FindOrAddEntry(group_info, this);
|
||||
filler_->SetNamedAutoIndexReference(
|
||||
HeapGraphEdge::kInternal,
|
||||
group_info, group_entry,
|
||||
info, child_entry);
|
||||
}
|
||||
|
||||
@ -2868,11 +2914,19 @@ void NativeObjectsExplorer::SetWrapperNativeReferences(
|
||||
}
|
||||
|
||||
|
||||
void NativeObjectsExplorer::SetRootNativesRootReference() {
|
||||
filler_->SetIndexedAutoIndexReference(
|
||||
HeapGraphEdge::kElement,
|
||||
V8HeapExplorer::kInternalRootObject, snapshot_->root(),
|
||||
kNativesRootObject, snapshot_->natives_root());
|
||||
void NativeObjectsExplorer::SetRootNativeRootsReference() {
|
||||
for (HashMap::Entry* entry = native_groups_.Start();
|
||||
entry;
|
||||
entry = native_groups_.Next(entry)) {
|
||||
NativeGroupRetainedObjectInfo* group_info =
|
||||
static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
|
||||
HeapEntry* group_entry = filler_->FindOrAddEntry(group_info, this);
|
||||
ASSERT(group_entry != NULL);
|
||||
filler_->SetIndexedAutoIndexReference(
|
||||
HeapGraphEdge::kElement,
|
||||
V8HeapExplorer::kInternalRootObject, snapshot_->root(),
|
||||
group_info, group_entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -3026,8 +3080,12 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
|
||||
// full GC is reachable from the root when computing dominators.
|
||||
// This is not true for weakly reachable objects.
|
||||
// As a temporary solution we call GC twice.
|
||||
Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
Isolate::Current()->heap()->CollectAllGarbage(
|
||||
Heap::kMakeHeapIterableMask,
|
||||
"HeapSnapshotGenerator::GenerateSnapshot");
|
||||
Isolate::Current()->heap()->CollectAllGarbage(
|
||||
Heap::kMakeHeapIterableMask,
|
||||
"HeapSnapshotGenerator::GenerateSnapshot");
|
||||
|
||||
#ifdef DEBUG
|
||||
Heap* debug_heap = Isolate::Current()->heap();
|
||||
@ -3107,7 +3165,6 @@ void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
|
||||
bool HeapSnapshotGenerator::CountEntriesAndReferences() {
|
||||
SnapshotCounter counter(&entries_);
|
||||
v8_heap_explorer_.AddRootEntries(&counter);
|
||||
dom_explorer_.AddRootEntries(&counter);
|
||||
return
|
||||
v8_heap_explorer_.IterateAndExtractReferences(&counter) &&
|
||||
dom_explorer_.IterateAndExtractReferences(&counter);
|
||||
|
11
deps/v8/src/profile-generator.h
vendored
11
deps/v8/src/profile-generator.h
vendored
@ -1026,6 +1026,7 @@ class V8HeapExplorer : public HeapEntriesAllocator {
|
||||
DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
|
||||
};
|
||||
|
||||
class NativeGroupRetainedObjectInfo;
|
||||
|
||||
// An implementation of retained native objects extractor.
|
||||
class NativeObjectsExplorer : public HeapEntriesAllocator {
|
||||
@ -1041,9 +1042,10 @@ class NativeObjectsExplorer : public HeapEntriesAllocator {
|
||||
|
||||
private:
|
||||
void FillRetainedObjects();
|
||||
void FillImplicitReferences();
|
||||
List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
|
||||
void SetNativeRootReference(v8::RetainedObjectInfo* info);
|
||||
void SetRootNativesRootReference();
|
||||
void SetRootNativeRootsReference();
|
||||
void SetWrapperNativeReferences(HeapObject* wrapper,
|
||||
v8::RetainedObjectInfo* info);
|
||||
void VisitSubtreeWrapper(Object** p, uint16_t class_id);
|
||||
@ -1057,6 +1059,12 @@ class NativeObjectsExplorer : public HeapEntriesAllocator {
|
||||
(reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
|
||||
reinterpret_cast<v8::RetainedObjectInfo*>(key2));
|
||||
}
|
||||
INLINE(static bool StringsMatch(void* key1, void* key2)) {
|
||||
return strcmp(reinterpret_cast<char*>(key1),
|
||||
reinterpret_cast<char*>(key2)) == 0;
|
||||
}
|
||||
|
||||
NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
|
||||
|
||||
HeapSnapshot* snapshot_;
|
||||
HeapSnapshotsCollection* collection_;
|
||||
@ -1065,6 +1073,7 @@ class NativeObjectsExplorer : public HeapEntriesAllocator {
|
||||
HeapObjectsSet in_groups_;
|
||||
// RetainedObjectInfo* -> List<HeapObject*>*
|
||||
HashMap objects_by_info_;
|
||||
HashMap native_groups_;
|
||||
// Used during references extraction.
|
||||
SnapshotFillerInterface* filler_;
|
||||
|
||||
|
28
deps/v8/src/property-details.h
vendored
28
deps/v8/src/property-details.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -73,26 +73,6 @@ enum PropertyType {
|
||||
};
|
||||
|
||||
|
||||
inline bool IsTransitionType(PropertyType type) {
|
||||
switch (type) {
|
||||
case MAP_TRANSITION:
|
||||
case CONSTANT_TRANSITION:
|
||||
case ELEMENTS_TRANSITION:
|
||||
return true;
|
||||
case NORMAL:
|
||||
case FIELD:
|
||||
case CONSTANT_FUNCTION:
|
||||
case CALLBACKS:
|
||||
case HANDLER:
|
||||
case INTERCEPTOR:
|
||||
case NULL_DESCRIPTOR:
|
||||
return false;
|
||||
}
|
||||
UNREACHABLE(); // keep the compiler happy
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
inline bool IsRealProperty(PropertyType type) {
|
||||
switch (type) {
|
||||
case NORMAL:
|
||||
@ -139,12 +119,6 @@ class PropertyDetails BASE_EMBEDDED {
|
||||
|
||||
PropertyType type() { return TypeField::decode(value_); }
|
||||
|
||||
bool IsTransition() {
|
||||
PropertyType t = type();
|
||||
ASSERT(t != INTERCEPTOR);
|
||||
return IsTransitionType(t);
|
||||
}
|
||||
|
||||
bool IsProperty() {
|
||||
return IsRealProperty(type());
|
||||
}
|
||||
|
29
deps/v8/src/property.cc
vendored
29
deps/v8/src/property.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -91,6 +91,9 @@ void LookupResult::Print(FILE* out) {
|
||||
break;
|
||||
case CONSTANT_TRANSITION:
|
||||
PrintF(out, " -type = constant property transition\n");
|
||||
PrintF(out, " -map:\n");
|
||||
GetTransitionMap()->Print(out);
|
||||
PrintF(out, "\n");
|
||||
break;
|
||||
case NULL_DESCRIPTOR:
|
||||
PrintF(out, " =type = null descriptor\n");
|
||||
@ -111,4 +114,28 @@ void Descriptor::Print(FILE* out) {
|
||||
#endif
|
||||
|
||||
|
||||
bool Descriptor::ContainsTransition() {
|
||||
switch (details_.type()) {
|
||||
case MAP_TRANSITION:
|
||||
case CONSTANT_TRANSITION:
|
||||
case ELEMENTS_TRANSITION:
|
||||
return true;
|
||||
case CALLBACKS: {
|
||||
if (!value_->IsAccessorPair()) return false;
|
||||
AccessorPair* accessors = AccessorPair::cast(value_);
|
||||
return accessors->getter()->IsMap() || accessors->setter()->IsMap();
|
||||
}
|
||||
case NORMAL:
|
||||
case FIELD:
|
||||
case CONSTANT_FUNCTION:
|
||||
case HANDLER:
|
||||
case INTERCEPTOR:
|
||||
case NULL_DESCRIPTOR:
|
||||
return false;
|
||||
}
|
||||
UNREACHABLE(); // Keep the compiler happy.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
8
deps/v8/src/property.h
vendored
8
deps/v8/src/property.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Copyright 2012 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -71,6 +71,8 @@ class Descriptor BASE_EMBEDDED {
|
||||
details_ = PropertyDetails(details_.attributes(), details_.type(), index);
|
||||
}
|
||||
|
||||
bool ContainsTransition();
|
||||
|
||||
private:
|
||||
String* key_;
|
||||
Object* value_;
|
||||
@ -290,7 +292,9 @@ class LookupResult BASE_EMBEDDED {
|
||||
|
||||
Map* GetTransitionMap() {
|
||||
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
|
||||
ASSERT(IsTransitionType(type()));
|
||||
ASSERT(type() == MAP_TRANSITION ||
|
||||
type() == ELEMENTS_TRANSITION ||
|
||||
type() == CONSTANT_TRANSITION);
|
||||
return Map::cast(GetValue());
|
||||
}
|
||||
|
||||
|
96
deps/v8/src/runtime.cc
vendored
96
deps/v8/src/runtime.cc
vendored
@ -428,6 +428,23 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* TransitionElements(Handle<Object> object,
|
||||
ElementsKind to_kind,
|
||||
Isolate* isolate) {
|
||||
HandleScope scope(isolate);
|
||||
if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
|
||||
ElementsKind from_kind =
|
||||
Handle<JSObject>::cast(object)->map()->elements_kind();
|
||||
if (Map::IsValidElementsTransition(from_kind, to_kind)) {
|
||||
Handle<Object> result = JSObject::TransitionElementsKind(
|
||||
Handle<JSObject>::cast(object), to_kind);
|
||||
if (result.is_null()) return isolate->ThrowIllegalOperation();
|
||||
return *result;
|
||||
}
|
||||
return isolate->ThrowIllegalOperation();
|
||||
}
|
||||
|
||||
|
||||
static const int kSmiOnlyLiteralMinimumLength = 1024;
|
||||
|
||||
|
||||
@ -446,25 +463,13 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
|
||||
Handle<FixedArrayBase> constant_elements_values(
|
||||
FixedArrayBase::cast(elements->get(1)));
|
||||
|
||||
ASSERT(FLAG_smi_only_arrays || constant_elements_kind == FAST_ELEMENTS ||
|
||||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS);
|
||||
bool allow_literal_kind_transition = FLAG_smi_only_arrays &&
|
||||
constant_elements_kind > object->GetElementsKind();
|
||||
|
||||
if (!FLAG_smi_only_arrays &&
|
||||
constant_elements_values->length() > kSmiOnlyLiteralMinimumLength &&
|
||||
constant_elements_kind != object->GetElementsKind()) {
|
||||
allow_literal_kind_transition = true;
|
||||
}
|
||||
|
||||
// If the ElementsKind of the constant values of the array literal are less
|
||||
// specific than the ElementsKind of the boilerplate array object, change the
|
||||
// boilerplate array object's map to reflect that kind.
|
||||
if (allow_literal_kind_transition) {
|
||||
Handle<Map> transitioned_array_map =
|
||||
isolate->factory()->GetElementsTransitionMap(object,
|
||||
constant_elements_kind);
|
||||
object->set_map(*transitioned_array_map);
|
||||
Context* global_context = isolate->context()->global_context();
|
||||
if (constant_elements_kind == FAST_SMI_ONLY_ELEMENTS) {
|
||||
object->set_map(Map::cast(global_context->smi_js_array_map()));
|
||||
} else if (constant_elements_kind == FAST_DOUBLE_ELEMENTS) {
|
||||
object->set_map(Map::cast(global_context->double_js_array_map()));
|
||||
} else {
|
||||
object->set_map(Map::cast(global_context->object_js_array_map()));
|
||||
}
|
||||
|
||||
Handle<FixedArrayBase> copied_elements_values;
|
||||
@ -509,6 +514,16 @@ Handle<Object> Runtime::CreateArrayLiteralBoilerplate(
|
||||
}
|
||||
object->set_elements(*copied_elements_values);
|
||||
object->set_length(Smi::FromInt(copied_elements_values->length()));
|
||||
|
||||
// Ensure that the boilerplate object has FAST_ELEMENTS, unless the flag is
|
||||
// on or the object is larger than the threshold.
|
||||
if (!FLAG_smi_only_arrays &&
|
||||
constant_elements_values->length() < kSmiOnlyLiteralMinimumLength) {
|
||||
if (object->GetElementsKind() != FAST_ELEMENTS) {
|
||||
CHECK(!TransitionElements(object, FAST_ELEMENTS, isolate)->IsFailure());
|
||||
}
|
||||
}
|
||||
|
||||
return object;
|
||||
}
|
||||
|
||||
@ -4202,23 +4217,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* TransitionElements(Handle<Object> object,
|
||||
ElementsKind to_kind,
|
||||
Isolate* isolate) {
|
||||
HandleScope scope(isolate);
|
||||
if (!object->IsJSObject()) return isolate->ThrowIllegalOperation();
|
||||
ElementsKind from_kind =
|
||||
Handle<JSObject>::cast(object)->map()->elements_kind();
|
||||
if (Map::IsValidElementsTransition(from_kind, to_kind)) {
|
||||
Handle<Object> result = JSObject::TransitionElementsKind(
|
||||
Handle<JSObject>::cast(object), to_kind);
|
||||
if (result.is_null()) return isolate->ThrowIllegalOperation();
|
||||
return *result;
|
||||
}
|
||||
return isolate->ThrowIllegalOperation();
|
||||
}
|
||||
|
||||
|
||||
// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
|
||||
RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
|
||||
NoHandleAllocation ha;
|
||||
@ -10236,7 +10234,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) {
|
||||
} else {
|
||||
elements_kind = DICTIONARY_ELEMENTS;
|
||||
}
|
||||
maybe_new_map = to->GetElementsTransitionMap(elements_kind);
|
||||
maybe_new_map = to->GetElementsTransitionMap(isolate, elements_kind);
|
||||
Object* new_map;
|
||||
if (!maybe_new_map->ToObject(&new_map)) return maybe_new_map;
|
||||
to->set_map(Map::cast(new_map));
|
||||
@ -10749,6 +10747,11 @@ class FrameInspector {
|
||||
? deoptimized_frame_->GetExpression(index)
|
||||
: frame_->GetExpression(index);
|
||||
}
|
||||
int GetSourcePosition() {
|
||||
return is_optimized_
|
||||
? deoptimized_frame_->GetSourcePosition()
|
||||
: frame_->LookupCode()->SourcePosition(frame_->pc());
|
||||
}
|
||||
|
||||
// To inspect all the provided arguments the frame might need to be
|
||||
// replaced with the arguments frame.
|
||||
@ -10854,9 +10857,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
|
||||
// Get the frame id.
|
||||
Handle<Object> frame_id(WrapFrameId(it.frame()->id()), isolate);
|
||||
|
||||
// Find source position.
|
||||
int position =
|
||||
it.frame()->LookupCode()->SourcePosition(it.frame()->pc());
|
||||
// Find source position in unoptimized code.
|
||||
int position = frame_inspector.GetSourcePosition();
|
||||
|
||||
// Check for constructor frame. Inlined frames cannot be construct calls.
|
||||
bool inlined_frame = is_optimized && inlined_jsframe_index != 0;
|
||||
@ -12443,7 +12445,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
|
||||
ASSERT(args.length() == 3);
|
||||
|
||||
// First perform a full GC in order to avoid references from dead objects.
|
||||
isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
|
||||
"%DebugReferencedBy");
|
||||
// The heap iterator reserves the right to do a GC to make the heap iterable.
|
||||
// Due to the GC above we know it won't need to do that, but it seems cleaner
|
||||
// to get the heap iterator constructed before we start having unprotected
|
||||
@ -12534,7 +12537,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
|
||||
ASSERT(args.length() == 2);
|
||||
|
||||
// First perform a full GC in order to avoid dead objects.
|
||||
isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
|
||||
isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
|
||||
"%DebugConstructedBy");
|
||||
|
||||
// Check parameters.
|
||||
CONVERT_CHECKED(JSFunction, constructor, args[0]);
|
||||
@ -12932,7 +12936,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
|
||||
// Performs a GC.
|
||||
// Presently, it only does a full GC.
|
||||
RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
|
||||
isolate->heap()->CollectAllGarbage(true);
|
||||
isolate->heap()->CollectAllGarbage(true, "%CollectGarbage");
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
@ -13643,12 +13647,14 @@ void Runtime::PerformGC(Object* result) {
|
||||
}
|
||||
// Try to do a garbage collection; ignore it if it fails. The C
|
||||
// entry stub will throw an out-of-memory exception in that case.
|
||||
isolate->heap()->CollectGarbage(failure->allocation_space());
|
||||
isolate->heap()->CollectGarbage(failure->allocation_space(),
|
||||
"Runtime::PerformGC");
|
||||
} else {
|
||||
// Handle last resort GC and make sure to allow future allocations
|
||||
// to grow the heap without causing GCs (if possible).
|
||||
isolate->counters()->gc_last_resort_from_js()->Increment();
|
||||
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
|
||||
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
|
||||
"Runtime::PerformGC");
|
||||
}
|
||||
}
|
||||
|
||||
|
8
deps/v8/src/scopes.cc
vendored
8
deps/v8/src/scopes.cc
vendored
@ -149,12 +149,10 @@ Scope::Scope(Scope* inner_scope,
|
||||
SetDefaults(type, NULL, scope_info);
|
||||
if (!scope_info.is_null()) {
|
||||
num_heap_slots_ = scope_info_->ContextLength();
|
||||
if (*scope_info != ScopeInfo::Empty()) {
|
||||
language_mode_ = scope_info->language_mode();
|
||||
}
|
||||
} else if (is_with_scope()) {
|
||||
num_heap_slots_ = Context::MIN_CONTEXT_SLOTS;
|
||||
}
|
||||
// Ensure at least MIN_CONTEXT_SLOTS to indicate a materialized context.
|
||||
num_heap_slots_ = Max(num_heap_slots_,
|
||||
static_cast<int>(Context::MIN_CONTEXT_SLOTS));
|
||||
AddInnerScope(inner_scope);
|
||||
}
|
||||
|
||||
|
46
deps/v8/src/spaces.h
vendored
46
deps/v8/src/spaces.h
vendored
@ -1589,50 +1589,8 @@ class PagedSpace : public Space {
|
||||
Page* FirstPage() { return anchor_.next_page(); }
|
||||
Page* LastPage() { return anchor_.prev_page(); }
|
||||
|
||||
// Returns zero for pages that have so little fragmentation that it is not
|
||||
// worth defragmenting them. Otherwise a positive integer that gives an
|
||||
// estimate of fragmentation on an arbitrary scale.
|
||||
int Fragmentation(Page* p) {
|
||||
FreeList::SizeStats sizes;
|
||||
free_list_.CountFreeListItems(p, &sizes);
|
||||
|
||||
intptr_t ratio;
|
||||
intptr_t ratio_threshold;
|
||||
if (identity() == CODE_SPACE) {
|
||||
ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
|
||||
Page::kObjectAreaSize;
|
||||
ratio_threshold = 10;
|
||||
} else {
|
||||
ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
|
||||
Page::kObjectAreaSize;
|
||||
ratio_threshold = 15;
|
||||
}
|
||||
|
||||
if (FLAG_trace_fragmentation) {
|
||||
PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
|
||||
reinterpret_cast<void*>(p),
|
||||
identity(),
|
||||
static_cast<int>(sizes.small_size_),
|
||||
static_cast<double>(sizes.small_size_ * 100) /
|
||||
Page::kObjectAreaSize,
|
||||
static_cast<int>(sizes.medium_size_),
|
||||
static_cast<double>(sizes.medium_size_ * 100) /
|
||||
Page::kObjectAreaSize,
|
||||
static_cast<int>(sizes.large_size_),
|
||||
static_cast<double>(sizes.large_size_ * 100) /
|
||||
Page::kObjectAreaSize,
|
||||
static_cast<int>(sizes.huge_size_),
|
||||
static_cast<double>(sizes.huge_size_ * 100) /
|
||||
Page::kObjectAreaSize,
|
||||
(ratio > ratio_threshold) ? "[fragmented]" : "");
|
||||
}
|
||||
|
||||
if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
|
||||
return 1;
|
||||
}
|
||||
if (ratio <= ratio_threshold) return 0; // Not fragmented.
|
||||
|
||||
return static_cast<int>(ratio - ratio_threshold);
|
||||
void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
|
||||
free_list_.CountFreeListItems(p, sizes);
|
||||
}
|
||||
|
||||
void EvictEvacuationCandidatesFromFreeLists();
|
||||
|
6
deps/v8/src/stub-cache.h
vendored
6
deps/v8/src/stub-cache.h
vendored
@ -343,8 +343,10 @@ class StubCache {
|
||||
reinterpret_cast<Address>(table) + (offset << shift_amount));
|
||||
}
|
||||
|
||||
static const int kPrimaryTableSize = 2048;
|
||||
static const int kSecondaryTableSize = 512;
|
||||
static const int kPrimaryTableBits = 11;
|
||||
static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
|
||||
static const int kSecondaryTableBits = 9;
|
||||
static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
|
||||
|
||||
Entry primary_[kPrimaryTableSize];
|
||||
Entry secondary_[kSecondaryTableSize];
|
||||
|
36
deps/v8/src/type-info.cc
vendored
36
deps/v8/src/type-info.cc
vendored
@ -140,6 +140,12 @@ bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
|
||||
}
|
||||
|
||||
|
||||
bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
|
||||
Handle<Object> value = GetInfo(expr->id());
|
||||
return value->IsJSFunction();
|
||||
}
|
||||
|
||||
|
||||
Handle<Map> TypeFeedbackOracle::LoadMonomorphicReceiverType(Property* expr) {
|
||||
ASSERT(LoadIsMonomorphicNormal(expr));
|
||||
Handle<Object> map_or_code = GetInfo(expr->id());
|
||||
@ -541,6 +547,7 @@ void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
|
||||
GetRelocInfos(code, &infos);
|
||||
CreateDictionary(code, &infos);
|
||||
ProcessRelocInfos(&infos);
|
||||
ProcessTypeFeedbackCells(code);
|
||||
// Allocate handle in the parent scope.
|
||||
dictionary_ = scope.CloseAndEscape(dictionary_);
|
||||
}
|
||||
@ -558,8 +565,9 @@ void TypeFeedbackOracle::GetRelocInfos(Handle<Code> code,
|
||||
void TypeFeedbackOracle::CreateDictionary(Handle<Code> code,
|
||||
ZoneList<RelocInfo>* infos) {
|
||||
DisableAssertNoAllocation allocation_allowed;
|
||||
int length = infos->length() + code->type_feedback_cells()->CellCount();
|
||||
byte* old_start = code->instruction_start();
|
||||
dictionary_ = FACTORY->NewUnseededNumberDictionary(infos->length());
|
||||
dictionary_ = FACTORY->NewUnseededNumberDictionary(length);
|
||||
byte* new_start = code->instruction_start();
|
||||
RelocateRelocInfos(infos, old_start, new_start);
|
||||
}
|
||||
@ -619,18 +627,6 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
|
||||
SetInfo(ast_id, target);
|
||||
break;
|
||||
|
||||
case Code::STUB:
|
||||
if (target->major_key() == CodeStub::CallFunction &&
|
||||
target->has_function_cache()) {
|
||||
Object* value = CallFunctionStub::GetCachedValue(reloc_entry.pc());
|
||||
if (value->IsJSFunction() &&
|
||||
!CanRetainOtherContext(JSFunction::cast(value),
|
||||
*global_context_)) {
|
||||
SetInfo(ast_id, value);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -638,6 +634,20 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
|
||||
}
|
||||
|
||||
|
||||
void TypeFeedbackOracle::ProcessTypeFeedbackCells(Handle<Code> code) {
|
||||
Handle<TypeFeedbackCells> cache(code->type_feedback_cells());
|
||||
for (int i = 0; i < cache->CellCount(); i++) {
|
||||
unsigned ast_id = cache->AstId(i)->value();
|
||||
Object* value = cache->Cell(i)->value();
|
||||
if (value->IsJSFunction() &&
|
||||
!CanRetainOtherContext(JSFunction::cast(value),
|
||||
*global_context_)) {
|
||||
SetInfo(ast_id, value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void TypeFeedbackOracle::SetInfo(unsigned ast_id, Object* target) {
|
||||
ASSERT(dictionary_->FindEntry(ast_id) == UnseededNumberDictionary::kNotFound);
|
||||
MaybeObject* maybe_result = dictionary_->AtNumberPut(ast_id, target);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user