deps: upgrade v8 to 4.2.77.18
This commit applies a secondary change in order to make `make test` pass cleanly, specifically re-disabling post-mortem debugging in common.gypi. PR-URL: https://github.com/iojs/io.js/pull/1506 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
This commit is contained in:
parent
01e6632d70
commit
01652c7709
@ -20,16 +20,17 @@
|
|||||||
# Enable disassembler for `--print-code` v8 options
|
# Enable disassembler for `--print-code` v8 options
|
||||||
'v8_enable_disassembler': 1,
|
'v8_enable_disassembler': 1,
|
||||||
|
|
||||||
|
# Disable support for postmortem debugging, continuously broken.
|
||||||
|
'v8_postmortem_support%': 'false',
|
||||||
|
|
||||||
# Don't bake anything extra into the snapshot.
|
# Don't bake anything extra into the snapshot.
|
||||||
'v8_use_external_startup_data%': 0,
|
'v8_use_external_startup_data%': 0,
|
||||||
|
|
||||||
'conditions': [
|
'conditions': [
|
||||||
['OS == "win"', {
|
['OS == "win"', {
|
||||||
'os_posix': 0,
|
'os_posix': 0,
|
||||||
'v8_postmortem_support%': 'false',
|
|
||||||
}, {
|
}, {
|
||||||
'os_posix': 1,
|
'os_posix': 1,
|
||||||
'v8_postmortem_support%': 'true',
|
|
||||||
}],
|
}],
|
||||||
['GENERATOR == "ninja" or OS== "mac"', {
|
['GENERATOR == "ninja" or OS== "mac"', {
|
||||||
'OBJ_DIR': '<(PRODUCT_DIR)/obj',
|
'OBJ_DIR': '<(PRODUCT_DIR)/obj',
|
||||||
|
2
deps/v8/AUTHORS
vendored
2
deps/v8/AUTHORS
vendored
@ -61,7 +61,7 @@ Jan de Mooij <jandemooij@gmail.com>
|
|||||||
Jay Freeman <saurik@saurik.com>
|
Jay Freeman <saurik@saurik.com>
|
||||||
James Pike <g00gle@chilon.net>
|
James Pike <g00gle@chilon.net>
|
||||||
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
|
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
|
||||||
Joel Stanley <joel.stan@gmail.com>
|
Joel Stanley <joel@jms.id.au>
|
||||||
Johan Bergström <johan@bergstroem.nu>
|
Johan Bergström <johan@bergstroem.nu>
|
||||||
Jonathan Liu <net147@gmail.com>
|
Jonathan Liu <net147@gmail.com>
|
||||||
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
|
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
|
||||||
|
2
deps/v8/include/v8-version.h
vendored
2
deps/v8/include/v8-version.h
vendored
@ -11,7 +11,7 @@
|
|||||||
#define V8_MAJOR_VERSION 4
|
#define V8_MAJOR_VERSION 4
|
||||||
#define V8_MINOR_VERSION 2
|
#define V8_MINOR_VERSION 2
|
||||||
#define V8_BUILD_NUMBER 77
|
#define V8_BUILD_NUMBER 77
|
||||||
#define V8_PATCH_LEVEL 15
|
#define V8_PATCH_LEVEL 18
|
||||||
|
|
||||||
// Use 1 for candidates and 0 otherwise.
|
// Use 1 for candidates and 0 otherwise.
|
||||||
// (Boolean macro values are not supported by all preprocessors.)
|
// (Boolean macro values are not supported by all preprocessors.)
|
||||||
|
36
deps/v8/include/v8.h
vendored
36
deps/v8/include/v8.h
vendored
@ -932,6 +932,24 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
|
|||||||
internal::Object** escape_slot_;
|
internal::Object** escape_slot_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class V8_EXPORT SealHandleScope {
|
||||||
|
public:
|
||||||
|
SealHandleScope(Isolate* isolate);
|
||||||
|
~SealHandleScope();
|
||||||
|
|
||||||
|
private:
|
||||||
|
// Make it hard to create heap-allocated or illegal handle scopes by
|
||||||
|
// disallowing certain operations.
|
||||||
|
SealHandleScope(const SealHandleScope&);
|
||||||
|
void operator=(const SealHandleScope&);
|
||||||
|
void* operator new(size_t size);
|
||||||
|
void operator delete(void*, size_t);
|
||||||
|
|
||||||
|
internal::Isolate* isolate_;
|
||||||
|
int prev_level_;
|
||||||
|
internal::Object** prev_limit_;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A simple Maybe type, representing an object which may or may not have a
|
* A simple Maybe type, representing an object which may or may not have a
|
||||||
@ -1004,24 +1022,6 @@ class ScriptOrigin {
|
|||||||
Handle<Integer> script_id_;
|
Handle<Integer> script_id_;
|
||||||
};
|
};
|
||||||
|
|
||||||
class V8_EXPORT SealHandleScope {
|
|
||||||
public:
|
|
||||||
SealHandleScope(Isolate* isolate);
|
|
||||||
~SealHandleScope();
|
|
||||||
|
|
||||||
private:
|
|
||||||
// Make it hard to create heap-allocated or illegal handle scopes by
|
|
||||||
// disallowing certain operations.
|
|
||||||
SealHandleScope(const SealHandleScope&);
|
|
||||||
void operator=(const SealHandleScope&);
|
|
||||||
void* operator new(size_t size);
|
|
||||||
void operator delete(void*, size_t);
|
|
||||||
|
|
||||||
internal::Isolate* isolate_;
|
|
||||||
int prev_level_;
|
|
||||||
internal::Object** prev_limit_;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A compiled JavaScript script, not yet tied to a Context.
|
* A compiled JavaScript script, not yet tied to a Context.
|
||||||
|
@ -812,22 +812,12 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
|
|||||||
Node* value = node->InputAt(0);
|
Node* value = node->InputAt(0);
|
||||||
if (CanCover(node, value)) {
|
if (CanCover(node, value)) {
|
||||||
switch (value->opcode()) {
|
switch (value->opcode()) {
|
||||||
case IrOpcode::kWord64Sar: {
|
case IrOpcode::kWord64Sar:
|
||||||
|
case IrOpcode::kWord64Shr: {
|
||||||
Int64BinopMatcher m(value);
|
Int64BinopMatcher m(value);
|
||||||
if (m.right().IsInRange(1, 32)) {
|
if (m.right().Is(32)) {
|
||||||
Emit(kX64Shr, g.DefineSameAsFirst(node),
|
Emit(kX64Shr, g.DefineSameAsFirst(node),
|
||||||
g.UseRegister(m.left().node()),
|
g.UseRegister(m.left().node()), g.TempImmediate(32));
|
||||||
g.UseImmediate(m.right().node()));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case IrOpcode::kWord64Shl: {
|
|
||||||
Int64BinopMatcher m(value);
|
|
||||||
if (m.right().IsInRange(1, 31)) {
|
|
||||||
Emit(kX64Shl32, g.DefineSameAsFirst(node),
|
|
||||||
g.UseRegister(m.left().node()),
|
|
||||||
g.UseImmediate(m.right().node()));
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -836,9 +826,7 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Otherwise truncation from 64-bit to 32-bit is a no-nop, as 32-bit
|
Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
|
||||||
// operations just ignore the upper 64-bit.
|
|
||||||
Emit(kArchNop, g.DefineAsRegister(node), g.Use(value));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
3
deps/v8/src/heap/gc-idle-time-handler.h
vendored
3
deps/v8/src/heap/gc-idle-time-handler.h
vendored
@ -125,6 +125,9 @@ class GCIdleTimeHandler {
|
|||||||
// That is the maximum idle time we will have during frame rendering.
|
// That is the maximum idle time we will have during frame rendering.
|
||||||
static const size_t kMaxFrameRenderingIdleTime = 16;
|
static const size_t kMaxFrameRenderingIdleTime = 16;
|
||||||
|
|
||||||
|
// Minimum idle time to start incremental marking.
|
||||||
|
static const size_t kMinIdleTimeToStartIncrementalMarking = 10;
|
||||||
|
|
||||||
// If we haven't recorded any scavenger events yet, we use a conservative
|
// If we haven't recorded any scavenger events yet, we use a conservative
|
||||||
// lower bound for the scavenger speed.
|
// lower bound for the scavenger speed.
|
||||||
static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
|
static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
|
||||||
|
53
deps/v8/src/heap/heap.cc
vendored
53
deps/v8/src/heap/heap.cc
vendored
@ -104,6 +104,8 @@ Heap::Heap()
|
|||||||
allocation_timeout_(0),
|
allocation_timeout_(0),
|
||||||
#endif // DEBUG
|
#endif // DEBUG
|
||||||
old_generation_allocation_limit_(initial_old_generation_size_),
|
old_generation_allocation_limit_(initial_old_generation_size_),
|
||||||
|
idle_old_generation_allocation_limit_(
|
||||||
|
kMinimumOldGenerationAllocationLimit),
|
||||||
old_gen_exhausted_(false),
|
old_gen_exhausted_(false),
|
||||||
inline_allocation_disabled_(false),
|
inline_allocation_disabled_(false),
|
||||||
store_buffer_rebuilder_(store_buffer()),
|
store_buffer_rebuilder_(store_buffer()),
|
||||||
@ -1159,8 +1161,7 @@ bool Heap::PerformGarbageCollection(
|
|||||||
// Temporarily set the limit for case when PostGarbageCollectionProcessing
|
// Temporarily set the limit for case when PostGarbageCollectionProcessing
|
||||||
// allocates and triggers GC. The real limit is set at after
|
// allocates and triggers GC. The real limit is set at after
|
||||||
// PostGarbageCollectionProcessing.
|
// PostGarbageCollectionProcessing.
|
||||||
old_generation_allocation_limit_ =
|
SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
|
||||||
OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
|
|
||||||
old_gen_exhausted_ = false;
|
old_gen_exhausted_ = false;
|
||||||
old_generation_size_configured_ = true;
|
old_generation_size_configured_ = true;
|
||||||
} else {
|
} else {
|
||||||
@ -1194,8 +1195,8 @@ bool Heap::PerformGarbageCollection(
|
|||||||
// Register the amount of external allocated memory.
|
// Register the amount of external allocated memory.
|
||||||
amount_of_external_allocated_memory_at_last_global_gc_ =
|
amount_of_external_allocated_memory_at_last_global_gc_ =
|
||||||
amount_of_external_allocated_memory_;
|
amount_of_external_allocated_memory_;
|
||||||
old_generation_allocation_limit_ = OldGenerationAllocationLimit(
|
SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
|
||||||
PromotedSpaceSizeOfObjects(), freed_global_handles);
|
freed_global_handles);
|
||||||
// We finished a marking cycle. We can uncommit the marking deque until
|
// We finished a marking cycle. We can uncommit the marking deque until
|
||||||
// we start marking again.
|
// we start marking again.
|
||||||
mark_compact_collector_.UncommitMarkingDeque();
|
mark_compact_collector_.UncommitMarkingDeque();
|
||||||
@ -4558,7 +4559,7 @@ bool Heap::TryFinalizeIdleIncrementalMarking(
|
|||||||
|
|
||||||
bool Heap::WorthActivatingIncrementalMarking() {
|
bool Heap::WorthActivatingIncrementalMarking() {
|
||||||
return incremental_marking()->IsStopped() &&
|
return incremental_marking()->IsStopped() &&
|
||||||
incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
|
incremental_marking()->ShouldActivate();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -4583,6 +4584,7 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
|
|||||||
static_cast<double>(base::Time::kMillisecondsPerSecond);
|
static_cast<double>(base::Time::kMillisecondsPerSecond);
|
||||||
HistogramTimerScope idle_notification_scope(
|
HistogramTimerScope idle_notification_scope(
|
||||||
isolate_->counters()->gc_idle_notification());
|
isolate_->counters()->gc_idle_notification());
|
||||||
|
double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
|
||||||
|
|
||||||
GCIdleTimeHandler::HeapState heap_state;
|
GCIdleTimeHandler::HeapState heap_state;
|
||||||
heap_state.contexts_disposed = contexts_disposed_;
|
heap_state.contexts_disposed = contexts_disposed_;
|
||||||
@ -4591,8 +4593,15 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
|
|||||||
heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
|
heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
|
||||||
heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
|
heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
|
||||||
// TODO(ulan): Start incremental marking only for large heaps.
|
// TODO(ulan): Start incremental marking only for large heaps.
|
||||||
|
intptr_t limit = old_generation_allocation_limit_;
|
||||||
|
if (static_cast<size_t>(idle_time_in_ms) >
|
||||||
|
GCIdleTimeHandler::kMinIdleTimeToStartIncrementalMarking) {
|
||||||
|
limit = idle_old_generation_allocation_limit_;
|
||||||
|
}
|
||||||
|
|
||||||
heap_state.can_start_incremental_marking =
|
heap_state.can_start_incremental_marking =
|
||||||
incremental_marking()->ShouldActivate() && FLAG_incremental_marking;
|
incremental_marking()->WorthActivating() &&
|
||||||
|
NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking;
|
||||||
heap_state.sweeping_in_progress =
|
heap_state.sweeping_in_progress =
|
||||||
mark_compact_collector()->sweeping_in_progress();
|
mark_compact_collector()->sweeping_in_progress();
|
||||||
heap_state.mark_compact_speed_in_bytes_per_ms =
|
heap_state.mark_compact_speed_in_bytes_per_ms =
|
||||||
@ -4610,7 +4619,6 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
|
|||||||
static_cast<size_t>(
|
static_cast<size_t>(
|
||||||
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
|
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
|
||||||
|
|
||||||
double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
|
|
||||||
GCIdleTimeAction action =
|
GCIdleTimeAction action =
|
||||||
gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
|
gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
|
||||||
isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
|
isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
|
||||||
@ -5358,21 +5366,37 @@ int64_t Heap::PromotedExternalMemorySize() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
|
intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
|
||||||
int freed_global_handles) {
|
intptr_t old_gen_size) {
|
||||||
|
CHECK(factor > 1.0);
|
||||||
|
CHECK(old_gen_size > 0);
|
||||||
|
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
|
||||||
|
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
|
||||||
|
limit += new_space_.Capacity();
|
||||||
|
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
|
||||||
|
return Min(limit, halfway_to_the_max);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
|
||||||
|
int freed_global_handles) {
|
||||||
const int kMaxHandles = 1000;
|
const int kMaxHandles = 1000;
|
||||||
const int kMinHandles = 100;
|
const int kMinHandles = 100;
|
||||||
double min_factor = 1.1;
|
const double min_factor = 1.1;
|
||||||
double max_factor = 4;
|
double max_factor = 4;
|
||||||
|
const double idle_max_factor = 1.5;
|
||||||
// We set the old generation growing factor to 2 to grow the heap slower on
|
// We set the old generation growing factor to 2 to grow the heap slower on
|
||||||
// memory-constrained devices.
|
// memory-constrained devices.
|
||||||
if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
|
if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
|
||||||
max_factor = 2;
|
max_factor = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there are many freed global handles, then the next full GC will
|
// If there are many freed global handles, then the next full GC will
|
||||||
// likely collect a lot of garbage. Choose the heap growing factor
|
// likely collect a lot of garbage. Choose the heap growing factor
|
||||||
// depending on freed global handles.
|
// depending on freed global handles.
|
||||||
// TODO(ulan, hpayer): Take into account mutator utilization.
|
// TODO(ulan, hpayer): Take into account mutator utilization.
|
||||||
|
// TODO(hpayer): The idle factor could make the handles heuristic obsolete.
|
||||||
|
// Look into that.
|
||||||
double factor;
|
double factor;
|
||||||
if (freed_global_handles <= kMinHandles) {
|
if (freed_global_handles <= kMinHandles) {
|
||||||
factor = max_factor;
|
factor = max_factor;
|
||||||
@ -5391,11 +5415,10 @@ intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
|
|||||||
factor = min_factor;
|
factor = min_factor;
|
||||||
}
|
}
|
||||||
|
|
||||||
intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
|
old_generation_allocation_limit_ =
|
||||||
limit = Max(limit, kMinimumOldGenerationAllocationLimit);
|
CalculateOldGenerationAllocationLimit(factor, old_gen_size);
|
||||||
limit += new_space_.Capacity();
|
idle_old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
|
||||||
intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
|
Min(factor, idle_max_factor), old_gen_size);
|
||||||
return Min(limit, halfway_to_the_max);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
23
deps/v8/src/heap/heap.h
vendored
23
deps/v8/src/heap/heap.h
vendored
@ -628,6 +628,10 @@ class Heap {
|
|||||||
// Returns of size of all objects residing in the heap.
|
// Returns of size of all objects residing in the heap.
|
||||||
intptr_t SizeOfObjects();
|
intptr_t SizeOfObjects();
|
||||||
|
|
||||||
|
intptr_t old_generation_allocation_limit() const {
|
||||||
|
return old_generation_allocation_limit_;
|
||||||
|
}
|
||||||
|
|
||||||
// Return the starting address and a mask for the new space. And-masking an
|
// Return the starting address and a mask for the new space. And-masking an
|
||||||
// address with the mask will result in the start address of the new space
|
// address with the mask will result in the start address of the new space
|
||||||
// for all addresses in either semispace.
|
// for all addresses in either semispace.
|
||||||
@ -1112,8 +1116,14 @@ class Heap {
|
|||||||
static const int kMaxExecutableSizeHugeMemoryDevice =
|
static const int kMaxExecutableSizeHugeMemoryDevice =
|
||||||
256 * kPointerMultiplier;
|
256 * kPointerMultiplier;
|
||||||
|
|
||||||
intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
|
// Calculates the allocation limit based on a given growing factor and a
|
||||||
int freed_global_handles);
|
// given old generation size.
|
||||||
|
intptr_t CalculateOldGenerationAllocationLimit(double factor,
|
||||||
|
intptr_t old_gen_size);
|
||||||
|
|
||||||
|
// Sets the allocation limit to trigger the next full garbage collection.
|
||||||
|
void SetOldGenerationAllocationLimit(intptr_t old_gen_size,
|
||||||
|
int freed_global_handles);
|
||||||
|
|
||||||
// Indicates whether inline bump-pointer allocation has been disabled.
|
// Indicates whether inline bump-pointer allocation has been disabled.
|
||||||
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
|
bool inline_allocation_disabled() { return inline_allocation_disabled_; }
|
||||||
@ -1219,13 +1229,12 @@ class Heap {
|
|||||||
survived_since_last_expansion_ += survived;
|
survived_since_last_expansion_ += survived;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool NextGCIsLikelyToBeFull() {
|
inline bool NextGCIsLikelyToBeFull(intptr_t limit) {
|
||||||
if (FLAG_gc_global) return true;
|
if (FLAG_gc_global) return true;
|
||||||
|
|
||||||
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
|
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
|
||||||
|
|
||||||
intptr_t adjusted_allocation_limit =
|
intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
|
||||||
old_generation_allocation_limit_ - new_space_.Capacity();
|
|
||||||
|
|
||||||
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
|
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
|
||||||
|
|
||||||
@ -1604,6 +1613,10 @@ class Heap {
|
|||||||
// generation and on every allocation in large object space.
|
// generation and on every allocation in large object space.
|
||||||
intptr_t old_generation_allocation_limit_;
|
intptr_t old_generation_allocation_limit_;
|
||||||
|
|
||||||
|
// The allocation limit when there is > kMinIdleTimeToStartIncrementalMarking
|
||||||
|
// idle time in the idle time handler.
|
||||||
|
intptr_t idle_old_generation_allocation_limit_;
|
||||||
|
|
||||||
// Indicates that an allocation has failed in the old generation since the
|
// Indicates that an allocation has failed in the old generation since the
|
||||||
// last GC.
|
// last GC.
|
||||||
bool old_gen_exhausted_;
|
bool old_gen_exhausted_;
|
||||||
|
4
deps/v8/src/heap/incremental-marking.cc
vendored
4
deps/v8/src/heap/incremental-marking.cc
vendored
@ -422,7 +422,9 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
|
|||||||
|
|
||||||
|
|
||||||
bool IncrementalMarking::ShouldActivate() {
|
bool IncrementalMarking::ShouldActivate() {
|
||||||
return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
|
return WorthActivating() &&
|
||||||
|
heap_->NextGCIsLikelyToBeFull(
|
||||||
|
heap_->old_generation_allocation_limit());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
22
deps/v8/src/hydrogen.cc
vendored
22
deps/v8/src/hydrogen.cc
vendored
@ -2391,6 +2391,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
|
|||||||
PropertyAccessType access_type,
|
PropertyAccessType access_type,
|
||||||
LoadKeyedHoleMode load_mode,
|
LoadKeyedHoleMode load_mode,
|
||||||
KeyedAccessStoreMode store_mode) {
|
KeyedAccessStoreMode store_mode) {
|
||||||
|
DCHECK(top_info()->IsStub() || checked_object->IsCompareMap() ||
|
||||||
|
checked_object->IsCheckMaps());
|
||||||
DCHECK((!IsExternalArrayElementsKind(elements_kind) &&
|
DCHECK((!IsExternalArrayElementsKind(elements_kind) &&
|
||||||
!IsFixedTypedArrayElementsKind(elements_kind)) ||
|
!IsFixedTypedArrayElementsKind(elements_kind)) ||
|
||||||
!is_js_array);
|
!is_js_array);
|
||||||
@ -8401,11 +8403,10 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
|
|||||||
new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());
|
new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());
|
||||||
|
|
||||||
bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
|
bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
|
||||||
BuildUncheckedMonomorphicElementAccess(array, length,
|
HValue* checked_array = Add<HCheckMaps>(array, receiver_map);
|
||||||
value_to_push, is_array,
|
BuildUncheckedMonomorphicElementAccess(
|
||||||
elements_kind, STORE,
|
checked_array, length, value_to_push, is_array, elements_kind,
|
||||||
NEVER_RETURN_HOLE,
|
STORE, NEVER_RETURN_HOLE, STORE_AND_GROW_NO_TRANSITION);
|
||||||
STORE_AND_GROW_NO_TRANSITION);
|
|
||||||
|
|
||||||
if (!ast_context()->IsEffect()) Push(new_size);
|
if (!ast_context()->IsEffect()) Push(new_size);
|
||||||
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
|
Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
|
||||||
@ -8763,18 +8764,9 @@ void HOptimizedGraphBuilder::HandleIndirectCall(Call* expr, HValue* function,
|
|||||||
int args_count_no_receiver = arguments_count - 1;
|
int args_count_no_receiver = arguments_count - 1;
|
||||||
if (function->IsConstant() &&
|
if (function->IsConstant() &&
|
||||||
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
|
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
|
||||||
HValue* receiver = environment()->ExpressionStackAt(args_count_no_receiver);
|
|
||||||
Handle<Map> receiver_map;
|
|
||||||
if (receiver->IsConstant() &&
|
|
||||||
HConstant::cast(receiver)->handle(isolate())->IsHeapObject()) {
|
|
||||||
receiver_map =
|
|
||||||
handle(Handle<HeapObject>::cast(
|
|
||||||
HConstant::cast(receiver)->handle(isolate()))->map());
|
|
||||||
}
|
|
||||||
|
|
||||||
known_function =
|
known_function =
|
||||||
Handle<JSFunction>::cast(HConstant::cast(function)->handle(isolate()));
|
Handle<JSFunction>::cast(HConstant::cast(function)->handle(isolate()));
|
||||||
if (TryInlineBuiltinMethodCall(expr, known_function, receiver_map,
|
if (TryInlineBuiltinMethodCall(expr, known_function, Handle<Map>(),
|
||||||
args_count_no_receiver)) {
|
args_count_no_receiver)) {
|
||||||
if (FLAG_trace_inlining) {
|
if (FLAG_trace_inlining) {
|
||||||
PrintF("Inlining builtin ");
|
PrintF("Inlining builtin ");
|
||||||
|
64
deps/v8/test/cctest/test-api.cc
vendored
64
deps/v8/test/cctest/test-api.cc
vendored
@ -18883,38 +18883,6 @@ void CallCompletedCallbackException() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST(SealHandleScope) {
|
|
||||||
v8::Isolate* isolate = CcTest::isolate();
|
|
||||||
v8::HandleScope handle_scope(isolate);
|
|
||||||
LocalContext env;
|
|
||||||
|
|
||||||
v8::SealHandleScope seal(isolate);
|
|
||||||
|
|
||||||
// Should fail
|
|
||||||
v8::Local<v8::Object> obj = v8::Object::New(isolate);
|
|
||||||
|
|
||||||
USE(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
TEST(SealHandleScopeNested) {
|
|
||||||
v8::Isolate* isolate = CcTest::isolate();
|
|
||||||
v8::HandleScope handle_scope(isolate);
|
|
||||||
LocalContext env;
|
|
||||||
|
|
||||||
v8::SealHandleScope seal(isolate);
|
|
||||||
|
|
||||||
{
|
|
||||||
v8::HandleScope handle_scope(isolate);
|
|
||||||
|
|
||||||
// Should work
|
|
||||||
v8::Local<v8::Object> obj = v8::Object::New(isolate);
|
|
||||||
|
|
||||||
USE(obj);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
TEST(CallCompletedCallbackOneException) {
|
TEST(CallCompletedCallbackOneException) {
|
||||||
LocalContext env;
|
LocalContext env;
|
||||||
v8::HandleScope scope(env->GetIsolate());
|
v8::HandleScope scope(env->GetIsolate());
|
||||||
@ -21978,3 +21946,35 @@ TEST(NewStringRangeError) {
|
|||||||
}
|
}
|
||||||
free(buffer);
|
free(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST(SealHandleScope) {
|
||||||
|
v8::Isolate* isolate = CcTest::isolate();
|
||||||
|
v8::HandleScope handle_scope(isolate);
|
||||||
|
LocalContext env;
|
||||||
|
|
||||||
|
v8::SealHandleScope seal(isolate);
|
||||||
|
|
||||||
|
// Should fail
|
||||||
|
v8::Local<v8::Object> obj = v8::Object::New(isolate);
|
||||||
|
|
||||||
|
USE(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST(SealHandleScopeNested) {
|
||||||
|
v8::Isolate* isolate = CcTest::isolate();
|
||||||
|
v8::HandleScope handle_scope(isolate);
|
||||||
|
LocalContext env;
|
||||||
|
|
||||||
|
v8::SealHandleScope seal(isolate);
|
||||||
|
|
||||||
|
{
|
||||||
|
v8::HandleScope handle_scope(isolate);
|
||||||
|
|
||||||
|
// Should work
|
||||||
|
v8::Local<v8::Object> obj = v8::Object::New(isolate);
|
||||||
|
|
||||||
|
USE(obj);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
20
deps/v8/test/mjsunit/regress/regress-indirect-push-unchecked.js
vendored
Normal file
20
deps/v8/test/mjsunit/regress/regress-indirect-push-unchecked.js
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
// Copyright 2015 the V8 project authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// Flags: --allow-natives-syntax
|
||||||
|
|
||||||
|
var a = [1.5];
|
||||||
|
|
||||||
|
function p() {
|
||||||
|
Array.prototype.push.call(a, 1.7);
|
||||||
|
}
|
||||||
|
|
||||||
|
p();
|
||||||
|
p();
|
||||||
|
p();
|
||||||
|
%OptimizeFunctionOnNextCall(p);
|
||||||
|
p();
|
||||||
|
a.push({});
|
||||||
|
p();
|
||||||
|
assertEquals(1.7, a[a.length - 1]);
|
@ -63,6 +63,15 @@ TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
|
||||||
|
StreamBuilder m(this, kMachInt32, kMachInt64);
|
||||||
|
m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
|
||||||
|
Stream s = m.Build();
|
||||||
|
ASSERT_EQ(1U, s.size());
|
||||||
|
EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// Loads and stores
|
// Loads and stores
|
||||||
|
|
||||||
@ -197,49 +206,37 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
|
|||||||
// TruncateInt64ToInt32.
|
// TruncateInt64ToInt32.
|
||||||
|
|
||||||
|
|
||||||
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
|
|
||||||
StreamBuilder m(this, kMachInt32, kMachInt64);
|
|
||||||
m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
|
|
||||||
Stream s = m.Build();
|
|
||||||
ASSERT_EQ(0U, s.size());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
|
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
|
||||||
TRACED_FORRANGE(int32_t, k, 1, 32) {
|
StreamBuilder m(this, kMachInt32, kMachInt64);
|
||||||
StreamBuilder m(this, kMachInt32, kMachInt64);
|
Node* const p = m.Parameter(0);
|
||||||
Node* const p = m.Parameter(0);
|
Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
|
||||||
Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(k)));
|
m.Return(t);
|
||||||
m.Return(t);
|
Stream s = m.Build();
|
||||||
Stream s = m.Build();
|
ASSERT_EQ(1U, s.size());
|
||||||
ASSERT_EQ(1U, s.size());
|
EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
|
||||||
EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
|
ASSERT_EQ(2U, s[0]->InputCount());
|
||||||
ASSERT_EQ(2U, s[0]->InputCount());
|
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
|
||||||
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
|
EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
|
||||||
EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(1)));
|
ASSERT_EQ(1U, s[0]->OutputCount());
|
||||||
ASSERT_EQ(1U, s[0]->OutputCount());
|
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
|
||||||
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
|
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
|
||||||
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shl) {
|
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
|
||||||
TRACED_FORRANGE(int32_t, k, 1, 31) {
|
StreamBuilder m(this, kMachInt32, kMachInt64);
|
||||||
StreamBuilder m(this, kMachInt32, kMachInt64);
|
Node* const p = m.Parameter(0);
|
||||||
Node* const p = m.Parameter(0);
|
Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
|
||||||
Node* const t = m.TruncateInt64ToInt32(m.Word64Shl(p, m.Int64Constant(k)));
|
m.Return(t);
|
||||||
m.Return(t);
|
Stream s = m.Build();
|
||||||
Stream s = m.Build();
|
ASSERT_EQ(1U, s.size());
|
||||||
ASSERT_EQ(1U, s.size());
|
EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
|
||||||
EXPECT_EQ(kX64Shl32, s[0]->arch_opcode());
|
ASSERT_EQ(2U, s[0]->InputCount());
|
||||||
ASSERT_EQ(2U, s[0]->InputCount());
|
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
|
||||||
EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
|
EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
|
||||||
EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(1)));
|
ASSERT_EQ(1U, s[0]->OutputCount());
|
||||||
ASSERT_EQ(1U, s[0]->OutputCount());
|
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
|
||||||
EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
|
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
|
||||||
EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
2
deps/v8/tools/gen-postmortem-metadata.py
vendored
2
deps/v8/tools/gen-postmortem-metadata.py
vendored
@ -91,7 +91,7 @@ consts_misc = [
|
|||||||
{ 'name': 'prop_idx_first',
|
{ 'name': 'prop_idx_first',
|
||||||
'value': 'DescriptorArray::kFirstIndex' },
|
'value': 'DescriptorArray::kFirstIndex' },
|
||||||
{ 'name': 'prop_type_field',
|
{ 'name': 'prop_type_field',
|
||||||
'value': 'DATA' },
|
'value': 'FIELD' },
|
||||||
{ 'name': 'prop_type_mask',
|
{ 'name': 'prop_type_mask',
|
||||||
'value': 'PropertyDetails::TypeField::kMask' },
|
'value': 'PropertyDetails::TypeField::kMask' },
|
||||||
{ 'name': 'prop_index_mask',
|
{ 'name': 'prop_index_mask',
|
||||||
|
Loading…
x
Reference in New Issue
Block a user