deps: upgrade v8 to 4.2.77.18

This commit applies a secondary change in order to make `make test`
pass cleanly, specifically re-disabling post-mortem debugging in
common.gypi.

PR-URL: https://github.com/iojs/io.js/pull/1506
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
This commit is contained in:
Chris Dickinson 2015-04-24 17:19:24 -07:00
parent 01e6632d70
commit 01652c7709
14 changed files with 186 additions and 147 deletions

View File

@ -20,16 +20,17 @@
# Enable disassembler for `--print-code` v8 options
'v8_enable_disassembler': 1,
# Disable support for postmortem debugging, continuously broken.
'v8_postmortem_support%': 'false',
# Don't bake anything extra into the snapshot.
'v8_use_external_startup_data%': 0,
'conditions': [
['OS == "win"', {
'os_posix': 0,
'v8_postmortem_support%': 'false',
}, {
'os_posix': 1,
'v8_postmortem_support%': 'true',
}],
['GENERATOR == "ninja" or OS== "mac"', {
'OBJ_DIR': '<(PRODUCT_DIR)/obj',

2
deps/v8/AUTHORS vendored
View File

@ -61,7 +61,7 @@ Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Joel Stanley <joel.stan@gmail.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>

View File

@ -11,7 +11,7 @@
#define V8_MAJOR_VERSION 4
#define V8_MINOR_VERSION 2
#define V8_BUILD_NUMBER 77
#define V8_PATCH_LEVEL 15
#define V8_PATCH_LEVEL 18
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

36
deps/v8/include/v8.h vendored
View File

@ -932,6 +932,24 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
internal::Object** escape_slot_;
};
class V8_EXPORT SealHandleScope {
public:
SealHandleScope(Isolate* isolate);
~SealHandleScope();
private:
// Make it hard to create heap-allocated or illegal handle scopes by
// disallowing certain operations.
SealHandleScope(const SealHandleScope&);
void operator=(const SealHandleScope&);
void* operator new(size_t size);
void operator delete(void*, size_t);
internal::Isolate* isolate_;
int prev_level_;
internal::Object** prev_limit_;
};
/**
* A simple Maybe type, representing an object which may or may not have a
@ -1004,24 +1022,6 @@ class ScriptOrigin {
Handle<Integer> script_id_;
};
class V8_EXPORT SealHandleScope {
public:
SealHandleScope(Isolate* isolate);
~SealHandleScope();
private:
// Make it hard to create heap-allocated or illegal handle scopes by
// disallowing certain operations.
SealHandleScope(const SealHandleScope&);
void operator=(const SealHandleScope&);
void* operator new(size_t size);
void operator delete(void*, size_t);
internal::Isolate* isolate_;
int prev_level_;
internal::Object** prev_limit_;
};
/**
* A compiled JavaScript script, not yet tied to a Context.

View File

@ -812,22 +812,12 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Node* value = node->InputAt(0);
if (CanCover(node, value)) {
switch (value->opcode()) {
case IrOpcode::kWord64Sar: {
case IrOpcode::kWord64Sar:
case IrOpcode::kWord64Shr: {
Int64BinopMatcher m(value);
if (m.right().IsInRange(1, 32)) {
if (m.right().Is(32)) {
Emit(kX64Shr, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()));
return;
}
break;
}
case IrOpcode::kWord64Shl: {
Int64BinopMatcher m(value);
if (m.right().IsInRange(1, 31)) {
Emit(kX64Shl32, g.DefineSameAsFirst(node),
g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()));
g.UseRegister(m.left().node()), g.TempImmediate(32));
return;
}
break;
@ -836,9 +826,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
break;
}
}
// Otherwise truncation from 64-bit to 32-bit is a no-nop, as 32-bit
// operations just ignore the upper 64-bit.
Emit(kArchNop, g.DefineAsRegister(node), g.Use(value));
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
}

View File

@ -125,6 +125,9 @@ class GCIdleTimeHandler {
// That is the maximum idle time we will have during frame rendering.
static const size_t kMaxFrameRenderingIdleTime = 16;
// Minimum idle time to start incremental marking.
static const size_t kMinIdleTimeToStartIncrementalMarking = 10;
// If we haven't recorded any scavenger events yet, we use a conservative
// lower bound for the scavenger speed.
static const size_t kInitialConservativeScavengeSpeed = 100 * KB;

View File

@ -104,6 +104,8 @@ Heap::Heap()
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
idle_old_generation_allocation_limit_(
kMinimumOldGenerationAllocationLimit),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
@ -1159,8 +1161,7 @@ bool Heap::PerformGarbageCollection(
// Temporarily set the limit for case when PostGarbageCollectionProcessing
// allocates and triggers GC. The real limit is set at after
// PostGarbageCollectionProcessing.
old_generation_allocation_limit_ =
OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
old_generation_size_configured_ = true;
} else {
@ -1194,8 +1195,8 @@ bool Heap::PerformGarbageCollection(
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
old_generation_allocation_limit_ = OldGenerationAllocationLimit(
PromotedSpaceSizeOfObjects(), freed_global_handles);
SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
freed_global_handles);
// We finished a marking cycle. We can uncommit the marking deque until
// we start marking again.
mark_compact_collector_.UncommitMarkingDeque();
@ -4558,7 +4559,7 @@ bool Heap::TryFinalizeIdleIncrementalMarking(
bool Heap::WorthActivatingIncrementalMarking() {
return incremental_marking()->IsStopped() &&
incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
incremental_marking()->ShouldActivate();
}
@ -4583,6 +4584,7 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
static_cast<double>(base::Time::kMillisecondsPerSecond);
HistogramTimerScope idle_notification_scope(
isolate_->counters()->gc_idle_notification());
double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
GCIdleTimeHandler::HeapState heap_state;
heap_state.contexts_disposed = contexts_disposed_;
@ -4591,8 +4593,15 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
// TODO(ulan): Start incremental marking only for large heaps.
intptr_t limit = old_generation_allocation_limit_;
if (static_cast<size_t>(idle_time_in_ms) >
GCIdleTimeHandler::kMinIdleTimeToStartIncrementalMarking) {
limit = idle_old_generation_allocation_limit_;
}
heap_state.can_start_incremental_marking =
incremental_marking()->ShouldActivate() && FLAG_incremental_marking;
incremental_marking()->WorthActivating() &&
NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking;
heap_state.sweeping_in_progress =
mark_compact_collector()->sweeping_in_progress();
heap_state.mark_compact_speed_in_bytes_per_ms =
@ -4610,7 +4619,6 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
static_cast<size_t>(
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
GCIdleTimeAction action =
gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
@ -5358,21 +5366,37 @@ int64_t Heap::PromotedExternalMemorySize() {
}
intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles) {
intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
intptr_t old_gen_size) {
CHECK(factor > 1.0);
CHECK(old_gen_size > 0);
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
}
void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles) {
const int kMaxHandles = 1000;
const int kMinHandles = 100;
double min_factor = 1.1;
const double min_factor = 1.1;
double max_factor = 4;
const double idle_max_factor = 1.5;
// We set the old generation growing factor to 2 to grow the heap slower on
// memory-constrained devices.
if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
max_factor = 2;
}
// If there are many freed global handles, then the next full GC will
// likely collect a lot of garbage. Choose the heap growing factor
// depending on freed global handles.
// TODO(ulan, hpayer): Take into account mutator utilization.
// TODO(hpayer): The idle factor could make the handles heuristic obsolete.
// Look into that.
double factor;
if (freed_global_handles <= kMinHandles) {
factor = max_factor;
@ -5391,11 +5415,10 @@ intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
factor = min_factor;
}
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
limit += new_space_.Capacity();
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
return Min(limit, halfway_to_the_max);
old_generation_allocation_limit_ =
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
idle_old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
Min(factor, idle_max_factor), old_gen_size);
}

View File

@ -628,6 +628,10 @@ class Heap {
// Returns of size of all objects residing in the heap.
intptr_t SizeOfObjects();
intptr_t old_generation_allocation_limit() const {
return old_generation_allocation_limit_;
}
// Return the starting address and a mask for the new space. And-masking an
// address with the mask will result in the start address of the new space
// for all addresses in either semispace.
@ -1112,8 +1116,14 @@ class Heap {
static const int kMaxExecutableSizeHugeMemoryDevice =
256 * kPointerMultiplier;
intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles);
// Calculates the allocation limit based on a given growing factor and a
// given old generation size.
intptr_t CalculateOldGenerationAllocationLimit(double factor,
intptr_t old_gen_size);
// Sets the allocation limit to trigger the next full garbage collection.
void SetOldGenerationAllocationLimit(intptr_t old_gen_size,
int freed_global_handles);
// Indicates whether inline bump-pointer allocation has been disabled.
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
@ -1219,13 +1229,12 @@ class Heap {
survived_since_last_expansion_ += survived;
}
inline bool NextGCIsLikelyToBeFull() {
inline bool NextGCIsLikelyToBeFull(intptr_t limit) {
if (FLAG_gc_global) return true;
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
intptr_t adjusted_allocation_limit =
old_generation_allocation_limit_ - new_space_.Capacity();
intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
@ -1604,6 +1613,10 @@ class Heap {
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
// The allocation limit when there is > kMinIdleTimeToStartIncrementalMarking
// idle time in the idle time handler.
intptr_t idle_old_generation_allocation_limit_;
// Indicates that an allocation has failed in the old generation since the
// last GC.
bool old_gen_exhausted_;

View File

@ -422,7 +422,9 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
bool IncrementalMarking::ShouldActivate() {
return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
return WorthActivating() &&
heap_->NextGCIsLikelyToBeFull(
heap_->old_generation_allocation_limit());
}

View File

@ -2391,6 +2391,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
PropertyAccessType access_type,
LoadKeyedHoleMode load_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(top_info()->IsStub() || checked_object->IsCompareMap() ||
checked_object->IsCheckMaps());
DCHECK((!IsExternalArrayElementsKind(elements_kind) &&
!IsFixedTypedArrayElementsKind(elements_kind)) ||
!is_js_array);
@ -8401,11 +8403,10 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());
bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
BuildUncheckedMonomorphicElementAccess(array, length,
value_to_push, is_array,
elements_kind, STORE,
NEVER_RETURN_HOLE,
STORE_AND_GROW_NO_TRANSITION);
HValue* checked_array = Add<HCheckMaps>(array, receiver_map);
BuildUncheckedMonomorphicElementAccess(
checked_array, length, value_to_push, is_array, elements_kind,
STORE, NEVER_RETURN_HOLE, STORE_AND_GROW_NO_TRANSITION);
if (!ast_context()->IsEffect()) Push(new_size);
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
@ -8763,18 +8764,9 @@ void HOptimizedGraphBuilder::HandleIndirectCall(Call* expr, HValue* function,
int args_count_no_receiver = arguments_count - 1;
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
HValue* receiver = environment()->ExpressionStackAt(args_count_no_receiver);
Handle<Map> receiver_map;
if (receiver->IsConstant() &&
HConstant::cast(receiver)->handle(isolate())->IsHeapObject()) {
receiver_map =
handle(Handle<HeapObject>::cast(
HConstant::cast(receiver)->handle(isolate()))->map());
}
known_function =
Handle<JSFunction>::cast(HConstant::cast(function)->handle(isolate()));
if (TryInlineBuiltinMethodCall(expr, known_function, receiver_map,
if (TryInlineBuiltinMethodCall(expr, known_function, Handle<Map>(),
args_count_no_receiver)) {
if (FLAG_trace_inlining) {
PrintF("Inlining builtin ");

View File

@ -18883,38 +18883,6 @@ void CallCompletedCallbackException() {
}
TEST(SealHandleScope) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
LocalContext env;
v8::SealHandleScope seal(isolate);
// Should fail
v8::Local<v8::Object> obj = v8::Object::New(isolate);
USE(obj);
}
TEST(SealHandleScopeNested) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
LocalContext env;
v8::SealHandleScope seal(isolate);
{
v8::HandleScope handle_scope(isolate);
// Should work
v8::Local<v8::Object> obj = v8::Object::New(isolate);
USE(obj);
}
}
TEST(CallCompletedCallbackOneException) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@ -21978,3 +21946,35 @@ TEST(NewStringRangeError) {
}
free(buffer);
}
TEST(SealHandleScope) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
LocalContext env;
v8::SealHandleScope seal(isolate);
// Should fail
v8::Local<v8::Object> obj = v8::Object::New(isolate);
USE(obj);
}
TEST(SealHandleScopeNested) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
LocalContext env;
v8::SealHandleScope seal(isolate);
{
v8::HandleScope handle_scope(isolate);
// Should work
v8::Local<v8::Object> obj = v8::Object::New(isolate);
USE(obj);
}
}

View File

@ -0,0 +1,20 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax
var a = [1.5];
function p() {
Array.prototype.push.call(a, 1.7);
}
p();
p();
p();
%OptimizeFunctionOnNextCall(p);
p();
a.push({});
p();
assertEquals(1.7, a[a.length - 1]);

View File

@ -63,6 +63,15 @@ TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
}
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
StreamBuilder m(this, kMachInt32, kMachInt64);
m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
}
// -----------------------------------------------------------------------------
// Loads and stores
@ -197,49 +206,37 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
// TruncateInt64ToInt32.
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
StreamBuilder m(this, kMachInt32, kMachInt64);
m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(0U, s.size());
}
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
TRACED_FORRANGE(int32_t, k, 1, 32) {
StreamBuilder m(this, kMachInt32, kMachInt64);
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(k)));
m.Return(t);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
StreamBuilder m(this, kMachInt32, kMachInt64);
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
m.Return(t);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shl) {
TRACED_FORRANGE(int32_t, k, 1, 31) {
StreamBuilder m(this, kMachInt32, kMachInt64);
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Shl(p, m.Int64Constant(k)));
m.Return(t);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Shl32, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
StreamBuilder m(this, kMachInt32, kMachInt64);
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
m.Return(t);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
}

View File

@ -91,7 +91,7 @@ consts_misc = [
{ 'name': 'prop_idx_first',
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'DATA' },
'value': 'FIELD' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
{ 'name': 'prop_index_mask',