deps: patch V8 to 7.4.288.27
Refs: https://github.com/v8/v8/compare/7.4.288.21...7.4.288.27 PR-URL: https://github.com/nodejs/node/pull/27615 Reviewed-By: Michaël Zasso <targos@protonmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com> Reviewed-By: Rich Trott <rtrott@gmail.com>
This commit is contained in:
parent
3b2633e0bf
commit
95c1cb4c2f
2
deps/v8/include/v8-version.h
vendored
2
deps/v8/include/v8-version.h
vendored
@ -11,7 +11,7 @@
|
||||
#define V8_MAJOR_VERSION 7
|
||||
#define V8_MINOR_VERSION 4
|
||||
#define V8_BUILD_NUMBER 288
|
||||
#define V8_PATCH_LEVEL 21
|
||||
#define V8_PATCH_LEVEL 27
|
||||
|
||||
// Use 1 for candidates and 0 otherwise.
|
||||
// (Boolean macro values are not supported by all preprocessors.)
|
||||
|
3
deps/v8/src/api.cc
vendored
3
deps/v8/src/api.cc
vendored
@ -8538,7 +8538,8 @@ void Isolate::EnqueueMicrotask(Local<Function> v8_function) {
|
||||
if (!i::JSReceiver::GetContextForMicrotask(function).ToHandle(
|
||||
&handler_context))
|
||||
handler_context = isolate->native_context();
|
||||
handler_context->microtask_queue()->EnqueueMicrotask(this, v8_function);
|
||||
MicrotaskQueue* microtask_queue = handler_context->microtask_queue();
|
||||
if (microtask_queue) microtask_queue->EnqueueMicrotask(this, v8_function);
|
||||
}
|
||||
|
||||
void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
|
||||
|
15
deps/v8/src/arm64/assembler-arm64.cc
vendored
15
deps/v8/src/arm64/assembler-arm64.cc
vendored
@ -109,14 +109,9 @@ CPURegList CPURegList::GetCalleeSavedV(int size) {
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCallerSaved(int size) {
|
||||
#if defined(V8_OS_WIN)
|
||||
// x18 is reserved as platform register on Windows arm64.
|
||||
// x18 is the platform register and is reserved for the use of platform ABIs.
|
||||
// Registers x0-x17 and lr (x30) are caller-saved.
|
||||
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 17);
|
||||
#else
|
||||
// Registers x0-x18 and lr (x30) are caller-saved.
|
||||
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
|
||||
#endif
|
||||
list.Combine(lr);
|
||||
return list;
|
||||
}
|
||||
@ -149,13 +144,7 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
|
||||
list.Remove(16);
|
||||
list.Remove(17);
|
||||
|
||||
// Don't add x18 to safepoint list on Windows arm64 because it is reserved
|
||||
// as platform register.
|
||||
#if !defined(V8_OS_WIN)
|
||||
// Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
|
||||
// is a caller-saved register according to the procedure call standard.
|
||||
list.Combine(18);
|
||||
#endif
|
||||
// x18 is the platform register and is reserved for the use of platform ABIs.
|
||||
|
||||
// Add the link register (x30) to the safepoint list.
|
||||
list.Combine(30);
|
||||
|
12
deps/v8/src/arm64/deoptimizer-arm64.cc
vendored
12
deps/v8/src/arm64/deoptimizer-arm64.cc
vendored
@ -55,6 +55,12 @@ void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
|
||||
masm->Sub(dst, dst, dst_offset);
|
||||
}
|
||||
|
||||
// TODO(jgruber): There's a hack here to explicitly skip restoration of the
|
||||
// so-called 'arm64 platform register' x18. The register may be in use by the
|
||||
// OS, thus we should not clobber it. Instead of this hack, it would be nicer
|
||||
// not to add x18 to the list of saved registers in the first place. The
|
||||
// complication here is that we require `reg_list.Count() % 2 == 0` in multiple
|
||||
// spots.
|
||||
void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
|
||||
const Register& src_base, int src_offset) {
|
||||
DCHECK_EQ(reg_list.Count() % 2, 0);
|
||||
@ -68,10 +74,8 @@ void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
|
||||
Register src = temps.AcquireX();
|
||||
masm->Add(src, src_base, src_offset);
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// x18 is reserved as platform register on Windows.
|
||||
// x18 is the platform register and is reserved for the use of platform ABIs.
|
||||
restore_list.Remove(x18);
|
||||
#endif
|
||||
|
||||
// Restore every register in restore_list from src.
|
||||
while (!restore_list.IsEmpty()) {
|
||||
@ -79,12 +83,10 @@ void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
|
||||
CPURegister reg1 = restore_list.PopLowestIndex();
|
||||
int offset0 = reg0.code() * reg_size;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
if (reg1 == NoCPUReg) {
|
||||
masm->Ldr(reg0, MemOperand(src, offset0));
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
int offset1 = reg1.code() * reg_size;
|
||||
|
||||
|
50
deps/v8/src/arm64/macro-assembler-arm64.cc
vendored
50
deps/v8/src/arm64/macro-assembler-arm64.cc
vendored
@ -48,20 +48,15 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
|
||||
// However, we leave it in the argument list to mirror the prototype for
|
||||
// Push/PopCallerSaved().
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// X18 is excluded from caller-saved register list on Windows ARM64 which
|
||||
// makes caller-saved registers in odd number. padreg is used accordingly
|
||||
// to maintain the alignment.
|
||||
// X18 is excluded from caller-saved register list on ARM64 which makes
|
||||
// caller-saved registers in odd number. padreg is used accordingly to
|
||||
// maintain the alignment.
|
||||
DCHECK_EQ(list.Count() % 2, 1);
|
||||
if (exclusion.Is(no_reg)) {
|
||||
bytes += kXRegSizeInBits / 8;
|
||||
} else {
|
||||
bytes -= kXRegSizeInBits / 8;
|
||||
}
|
||||
#else
|
||||
DCHECK_EQ(list.Count() % 2, 0);
|
||||
USE(exclusion);
|
||||
#endif
|
||||
|
||||
bytes += list.Count() * kXRegSizeInBits / 8;
|
||||
|
||||
@ -77,21 +72,13 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
|
||||
int bytes = 0;
|
||||
auto list = kCallerSaved;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// X18 is excluded from caller-saved register list on Windows ARM64, use
|
||||
// padreg accordingly to maintain alignment.
|
||||
// X18 is excluded from caller-saved register list on ARM64, use padreg
|
||||
// accordingly to maintain alignment.
|
||||
if (!exclusion.Is(no_reg)) {
|
||||
list.Remove(exclusion);
|
||||
} else {
|
||||
list.Combine(padreg);
|
||||
}
|
||||
#else
|
||||
if (!exclusion.Is(no_reg)) {
|
||||
// Replace the excluded register with padding to maintain alignment.
|
||||
list.Remove(exclusion);
|
||||
list.Combine(padreg);
|
||||
}
|
||||
#endif
|
||||
|
||||
DCHECK_EQ(list.Count() % 2, 0);
|
||||
PushCPURegList(list);
|
||||
@ -115,21 +102,13 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
|
||||
|
||||
auto list = kCallerSaved;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// X18 is excluded from caller-saved register list on Windows ARM64, use
|
||||
// padreg accordingly to maintain alignment.
|
||||
// X18 is excluded from caller-saved register list on ARM64, use padreg
|
||||
// accordingly to maintain alignment.
|
||||
if (!exclusion.Is(no_reg)) {
|
||||
list.Remove(exclusion);
|
||||
} else {
|
||||
list.Combine(padreg);
|
||||
}
|
||||
#else
|
||||
if (!exclusion.Is(no_reg)) {
|
||||
// Replace the excluded register with padding to maintain alignment.
|
||||
list.Remove(exclusion);
|
||||
list.Combine(padreg);
|
||||
}
|
||||
#endif
|
||||
|
||||
DCHECK_EQ(list.Count() % 2, 0);
|
||||
PopCPURegList(list);
|
||||
@ -3389,14 +3368,20 @@ void MacroAssembler::Printf(const char * format,
|
||||
TmpList()->set_list(0);
|
||||
FPTmpList()->set_list(0);
|
||||
|
||||
// x18 is the platform register and is reserved for the use of platform ABIs.
|
||||
// It is not part of the kCallerSaved list, but we add it here anyway to
|
||||
// ensure `reg_list.Count() % 2 == 0` which is required in multiple spots.
|
||||
CPURegList saved_registers = kCallerSaved;
|
||||
saved_registers.Combine(x18.code());
|
||||
|
||||
// Preserve all caller-saved registers as well as NZCV.
|
||||
// PushCPURegList asserts that the size of each list is a multiple of 16
|
||||
// bytes.
|
||||
PushCPURegList(kCallerSaved);
|
||||
PushCPURegList(saved_registers);
|
||||
PushCPURegList(kCallerSavedV);
|
||||
|
||||
// We can use caller-saved registers as scratch values (except for argN).
|
||||
CPURegList tmp_list = kCallerSaved;
|
||||
CPURegList tmp_list = saved_registers;
|
||||
CPURegList fp_tmp_list = kCallerSavedV;
|
||||
tmp_list.Remove(arg0, arg1, arg2, arg3);
|
||||
fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
|
||||
@ -3416,7 +3401,8 @@ void MacroAssembler::Printf(const char * format,
|
||||
// to PrintfNoPreserve as an argument.
|
||||
Register arg_sp = temps.AcquireX();
|
||||
Add(arg_sp, sp,
|
||||
kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
|
||||
saved_registers.TotalSizeInBytes() +
|
||||
kCallerSavedV.TotalSizeInBytes());
|
||||
if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
|
||||
if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
|
||||
if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
|
||||
@ -3441,7 +3427,7 @@ void MacroAssembler::Printf(const char * format,
|
||||
}
|
||||
|
||||
PopCPURegList(kCallerSavedV);
|
||||
PopCPURegList(kCallerSaved);
|
||||
PopCPURegList(saved_registers);
|
||||
|
||||
TmpList()->set_list(old_tmp_list);
|
||||
FPTmpList()->set_list(old_fp_tmp_list);
|
||||
|
16
deps/v8/src/arm64/register-arm64.h
vendored
16
deps/v8/src/arm64/register-arm64.h
vendored
@ -28,20 +28,13 @@ namespace internal {
|
||||
R(x16) R(x17) R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) \
|
||||
R(x24) R(x25) R(x26) R(x27) R(x28) R(x29) R(x30) R(x31)
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// x18 is reserved as platform register on Windows ARM64.
|
||||
// x18 is the platform register and is reserved for the use of platform ABIs.
|
||||
// It is known to be reserved by the OS at least on Windows and iOS.
|
||||
#define ALLOCATABLE_GENERAL_REGISTERS(R) \
|
||||
R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
|
||||
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
|
||||
R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
|
||||
R(x27) R(x28)
|
||||
#else
|
||||
#define ALLOCATABLE_GENERAL_REGISTERS(R) \
|
||||
R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
|
||||
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
|
||||
R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x25) \
|
||||
R(x27) R(x28)
|
||||
#endif
|
||||
|
||||
#define FLOAT_REGISTERS(V) \
|
||||
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
|
||||
@ -728,12 +721,7 @@ constexpr Register kJSFunctionRegister = x1;
|
||||
constexpr Register kContextRegister = cp;
|
||||
constexpr Register kAllocateSizeRegister = x1;
|
||||
|
||||
#if defined(V8_OS_WIN)
|
||||
// x18 is reserved as platform register on Windows ARM64.
|
||||
constexpr Register kSpeculationPoisonRegister = x23;
|
||||
#else
|
||||
constexpr Register kSpeculationPoisonRegister = x18;
|
||||
#endif
|
||||
|
||||
constexpr Register kInterpreterAccumulatorRegister = x0;
|
||||
constexpr Register kInterpreterBytecodeOffsetRegister = x19;
|
||||
|
12
deps/v8/src/builtins/arm64/builtins-arm64.cc
vendored
12
deps/v8/src/builtins/arm64/builtins-arm64.cc
vendored
@ -1278,15 +1278,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ Mov(
|
||||
kInterpreterDispatchTableRegister,
|
||||
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
|
||||
#if defined(V8_OS_WIN)
|
||||
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
|
||||
#else
|
||||
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x18, LSL, kSystemPointerSizeLog2));
|
||||
#endif
|
||||
__ Ldr(kJavaScriptCallCodeStartRegister,
|
||||
MemOperand(kInterpreterDispatchTableRegister, x1));
|
||||
__ Call(kJavaScriptCallCodeStartRegister);
|
||||
@ -1531,15 +1525,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Dispatch to the target bytecode.
|
||||
#if defined(V8_OS_WIN)
|
||||
__ Ldrb(x23, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2));
|
||||
#else
|
||||
__ Ldrb(x18, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Mov(x1, Operand(x18, LSL, kSystemPointerSizeLog2));
|
||||
#endif
|
||||
__ Ldr(kJavaScriptCallCodeStartRegister,
|
||||
MemOperand(kInterpreterDispatchTableRegister, x1));
|
||||
__ Jump(kJavaScriptCallCodeStartRegister);
|
||||
|
8
deps/v8/src/compiler/access-info.cc
vendored
8
deps/v8/src/compiler/access-info.cc
vendored
@ -327,6 +327,14 @@ bool AccessInfoFactory::ComputeDataFieldAccessInfo(
|
||||
PropertyDetails const details = descriptors->GetDetails(number);
|
||||
int index = descriptors->GetFieldIndex(number);
|
||||
Representation details_representation = details.representation();
|
||||
if (details_representation.IsNone()) {
|
||||
// The ICs collect feedback in PREMONOMORPHIC state already,
|
||||
// but at this point the {receiver_map} might still contain
|
||||
// fields for which the representation has not yet been
|
||||
// determined by the runtime. So we need to catch this case
|
||||
// here and fall back to use the regular IC logic instead.
|
||||
return false;
|
||||
}
|
||||
FieldIndex field_index =
|
||||
FieldIndex::ForPropertyIndex(*map, index, details_representation);
|
||||
Type field_type = Type::NonInternal();
|
||||
|
21
deps/v8/src/compiler/int64-lowering.cc
vendored
21
deps/v8/src/compiler/int64-lowering.cc
vendored
@ -119,6 +119,7 @@ int GetReturnCountAfterLowering(Signature<MachineRepresentation>* signature) {
|
||||
|
||||
void Int64Lowering::LowerWord64AtomicBinop(Node* node, const Operator* op) {
|
||||
DCHECK_EQ(5, node->InputCount());
|
||||
LowerMemoryBaseAndIndex(node);
|
||||
Node* value = node->InputAt(2);
|
||||
node->ReplaceInput(2, GetReplacementLow(value));
|
||||
node->InsertInput(zone(), 3, GetReplacementHigh(value));
|
||||
@ -143,9 +144,6 @@ int Int64Lowering::GetParameterCountAfterLowering(
|
||||
|
||||
void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
|
||||
Node*& index_high) {
|
||||
if (HasReplacementLow(index)) {
|
||||
index = GetReplacementLow(index);
|
||||
}
|
||||
#if defined(V8_TARGET_LITTLE_ENDIAN)
|
||||
index_low = index;
|
||||
index_high = graph()->NewNode(machine()->Int32Add(), index,
|
||||
@ -179,6 +177,7 @@ void Int64Lowering::LowerNode(Node* node) {
|
||||
}
|
||||
|
||||
if (rep == MachineRepresentation::kWord64) {
|
||||
LowerMemoryBaseAndIndex(node);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* index_low;
|
||||
@ -228,6 +227,7 @@ void Int64Lowering::LowerNode(Node* node) {
|
||||
// a new store node to store the high word. The effect and control edges
|
||||
// are copied from the original store to the new store node, the effect
|
||||
// edge of the original store is redirected to the new store.
|
||||
LowerMemoryBaseAndIndex(node);
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
Node* index_low;
|
||||
@ -900,6 +900,7 @@ void Int64Lowering::LowerNode(Node* node) {
|
||||
DCHECK_EQ(5, node->InputCount());
|
||||
MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
|
||||
if (rep == MachineRepresentation::kWord64) {
|
||||
LowerMemoryBaseAndIndex(node);
|
||||
Node* value = node->InputAt(2);
|
||||
node->ReplaceInput(2, GetReplacementLow(value));
|
||||
node->InsertInput(zone(), 3, GetReplacementHigh(value));
|
||||
@ -930,6 +931,7 @@ void Int64Lowering::LowerNode(Node* node) {
|
||||
case IrOpcode::kWord64AtomicCompareExchange: {
|
||||
MachineType type = AtomicOpType(node->op());
|
||||
if (type == MachineType::Uint64()) {
|
||||
LowerMemoryBaseAndIndex(node);
|
||||
Node* old_value = node->InputAt(2);
|
||||
Node* new_value = node->InputAt(3);
|
||||
node->ReplaceInput(2, GetReplacementLow(old_value));
|
||||
@ -1051,6 +1053,19 @@ void Int64Lowering::ReplaceNodeWithProjections(Node* node) {
|
||||
ReplaceNode(node, low_node, high_node);
|
||||
}
|
||||
|
||||
void Int64Lowering::LowerMemoryBaseAndIndex(Node* node) {
|
||||
DCHECK(node != nullptr);
|
||||
// Low word only replacements for memory operands for 32-bit address space.
|
||||
Node* base = node->InputAt(0);
|
||||
Node* index = node->InputAt(1);
|
||||
if (HasReplacementLow(base)) {
|
||||
node->ReplaceInput(0, GetReplacementLow(base));
|
||||
}
|
||||
if (HasReplacementLow(index)) {
|
||||
node->ReplaceInput(1, GetReplacementLow(index));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
1
deps/v8/src/compiler/int64-lowering.h
vendored
1
deps/v8/src/compiler/int64-lowering.h
vendored
@ -61,6 +61,7 @@ class V8_EXPORT_PRIVATE Int64Lowering {
|
||||
void PreparePhiReplacement(Node* phi);
|
||||
void GetIndexNodes(Node* index, Node*& index_low, Node*& index_high);
|
||||
void ReplaceNodeWithProjections(Node* node);
|
||||
void LowerMemoryBaseAndIndex(Node* node);
|
||||
|
||||
struct NodeState {
|
||||
Node* node;
|
||||
|
21
deps/v8/src/conversions-inl.h
vendored
21
deps/v8/src/conversions-inl.h
vendored
@ -59,9 +59,24 @@ inline unsigned int FastD2UI(double x) {
|
||||
|
||||
|
||||
inline float DoubleToFloat32(double x) {
|
||||
typedef std::numeric_limits<float> limits;
|
||||
if (x > limits::max()) return limits::infinity();
|
||||
if (x < limits::lowest()) return -limits::infinity();
|
||||
using limits = std::numeric_limits<float>;
|
||||
if (x > limits::max()) {
|
||||
// kRoundingThreshold is the maximum double that rounds down to
|
||||
// the maximum representable float. Its mantissa bits are:
|
||||
// 1111111111111111111111101111111111111111111111111111
|
||||
// [<--- float range --->]
|
||||
// Note the zero-bit right after the float mantissa range, which
|
||||
// determines the rounding-down.
|
||||
static const double kRoundingThreshold = 3.4028235677973362e+38;
|
||||
if (x <= kRoundingThreshold) return limits::max();
|
||||
return limits::infinity();
|
||||
}
|
||||
if (x < limits::lowest()) {
|
||||
// Same as above, mirrored to negative numbers.
|
||||
static const double kRoundingThreshold = -3.4028235677973362e+38;
|
||||
if (x >= kRoundingThreshold) return limits::lowest();
|
||||
return -limits::infinity();
|
||||
}
|
||||
return static_cast<float>(x);
|
||||
}
|
||||
|
||||
|
11
deps/v8/src/objects.cc
vendored
11
deps/v8/src/objects.cc
vendored
@ -5999,7 +5999,9 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
|
||||
promise)
|
||||
.Check();
|
||||
}
|
||||
isolate->native_context()->microtask_queue()->EnqueueMicrotask(*task);
|
||||
MicrotaskQueue* microtask_queue =
|
||||
isolate->native_context()->microtask_queue();
|
||||
if (microtask_queue) microtask_queue->EnqueueMicrotask(*task);
|
||||
|
||||
// 13. Return undefined.
|
||||
return isolate->factory()->undefined_value();
|
||||
@ -6081,8 +6083,11 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
|
||||
PromiseRejectReactionJobTask::kPromiseOrCapabilityOffset));
|
||||
}
|
||||
|
||||
handler_context->microtask_queue()->EnqueueMicrotask(
|
||||
*Handle<PromiseReactionJobTask>::cast(task));
|
||||
MicrotaskQueue* microtask_queue = handler_context->microtask_queue();
|
||||
if (microtask_queue) {
|
||||
microtask_queue->EnqueueMicrotask(
|
||||
*Handle<PromiseReactionJobTask>::cast(task));
|
||||
}
|
||||
}
|
||||
|
||||
return isolate->factory()->undefined_value();
|
||||
|
4
deps/v8/src/objects/js-promise.h
vendored
4
deps/v8/src/objects/js-promise.h
vendored
@ -53,8 +53,8 @@ class JSPromise : public JSObject {
|
||||
void set_status(Promise::PromiseState status);
|
||||
|
||||
// ES section #sec-fulfillpromise
|
||||
static Handle<Object> Fulfill(Handle<JSPromise> promise,
|
||||
Handle<Object> value);
|
||||
V8_EXPORT_PRIVATE static Handle<Object> Fulfill(Handle<JSPromise> promise,
|
||||
Handle<Object> value);
|
||||
// ES section #sec-rejectpromise
|
||||
static Handle<Object> Reject(Handle<JSPromise> promise, Handle<Object> reason,
|
||||
bool debug_event = true);
|
||||
|
4
deps/v8/src/runtime/runtime-promise.cc
vendored
4
deps/v8/src/runtime/runtime-promise.cc
vendored
@ -79,7 +79,9 @@ RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
|
||||
|
||||
Handle<CallableTask> microtask = isolate->factory()->NewCallableTask(
|
||||
function, handle(function->native_context(), isolate));
|
||||
function->native_context()->microtask_queue()->EnqueueMicrotask(*microtask);
|
||||
MicrotaskQueue* microtask_queue =
|
||||
function->native_context()->microtask_queue();
|
||||
if (microtask_queue) microtask_queue->EnqueueMicrotask(*microtask);
|
||||
return ReadOnlyRoots(isolate).undefined_value();
|
||||
}
|
||||
|
||||
|
61
deps/v8/src/snapshot/code-serializer.cc
vendored
61
deps/v8/src/snapshot/code-serializer.cc
vendored
@ -187,6 +187,18 @@ void CodeSerializer::SerializeObject(HeapObject obj) {
|
||||
return;
|
||||
}
|
||||
|
||||
// NOTE(mmarchini): If we try to serialize an InterpreterData our process
|
||||
// will crash since it stores a code object. Instead, we serialize the
|
||||
// bytecode array stored within the InterpreterData, which is the important
|
||||
// information. On deserialization we'll create our code objects again, if
|
||||
// --interpreted-frames-native-stack is on. See v8:9122 for more context
|
||||
#ifndef V8_TARGET_ARCH_ARM
|
||||
if (V8_UNLIKELY(FLAG_interpreted_frames_native_stack) &&
|
||||
obj->IsInterpreterData()) {
|
||||
obj = InterpreterData::cast(obj)->bytecode_array();
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_ARM
|
||||
|
||||
if (obj->IsBytecodeArray()) {
|
||||
// Clear the stack frame cache if present
|
||||
BytecodeArray::cast(obj)->ClearFrameCacheFromSourcePositionTable();
|
||||
@ -210,6 +222,48 @@ void CodeSerializer::SerializeGeneric(HeapObject heap_object) {
|
||||
serializer.Serialize();
|
||||
}
|
||||
|
||||
#ifndef V8_TARGET_ARCH_ARM
|
||||
// NOTE(mmarchini): when FLAG_interpreted_frames_native_stack is on, we want to
|
||||
// create duplicates of InterpreterEntryTrampoline for the deserialized
|
||||
// functions, otherwise we'll call the builtin IET for those functions (which
|
||||
// is not what a user of this flag wants).
|
||||
void CreateInterpreterDataForDeserializedCode(Isolate* isolate,
|
||||
Handle<SharedFunctionInfo> sfi,
|
||||
bool log_code_creation) {
|
||||
Script script = Script::cast(sfi->script());
|
||||
Handle<Script> script_handle(script, isolate);
|
||||
String name = ReadOnlyRoots(isolate).empty_string();
|
||||
if (script->name()->IsString()) name = String::cast(script->name());
|
||||
Handle<String> name_handle(name, isolate);
|
||||
|
||||
SharedFunctionInfo::ScriptIterator iter(isolate, script);
|
||||
for (SharedFunctionInfo info = iter.Next(); !info.is_null();
|
||||
info = iter.Next()) {
|
||||
if (!info->HasBytecodeArray()) continue;
|
||||
Handle<Code> code = isolate->factory()->CopyCode(Handle<Code>::cast(
|
||||
isolate->factory()->interpreter_entry_trampoline_for_profiling()));
|
||||
|
||||
Handle<InterpreterData> interpreter_data =
|
||||
Handle<InterpreterData>::cast(isolate->factory()->NewStruct(
|
||||
INTERPRETER_DATA_TYPE, TENURED));
|
||||
|
||||
interpreter_data->set_bytecode_array(info->GetBytecodeArray());
|
||||
interpreter_data->set_interpreter_trampoline(*code);
|
||||
|
||||
info->set_interpreter_data(*interpreter_data);
|
||||
|
||||
if (!log_code_creation) continue;
|
||||
Handle<AbstractCode> abstract_code = Handle<AbstractCode>::cast(code);
|
||||
int line_num = script->GetLineNumber(info->StartPosition()) + 1;
|
||||
int column_num = script->GetColumnNumber(info->StartPosition()) + 1;
|
||||
PROFILE(isolate,
|
||||
CodeCreateEvent(CodeEventListener::INTERPRETED_FUNCTION_TAG,
|
||||
*abstract_code, info, *name_handle, line_num,
|
||||
column_num));
|
||||
}
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_ARM
|
||||
|
||||
MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
|
||||
Isolate* isolate, ScriptData* cached_data, Handle<String> source,
|
||||
ScriptOriginOptions origin_options) {
|
||||
@ -253,6 +307,13 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
|
||||
isolate->logger()->is_listening_to_code_events() ||
|
||||
isolate->is_profiling() ||
|
||||
isolate->code_event_dispatcher()->IsListeningToCodeEvents();
|
||||
|
||||
#ifndef V8_TARGET_ARCH_ARM
|
||||
if (V8_UNLIKELY(FLAG_interpreted_frames_native_stack))
|
||||
CreateInterpreterDataForDeserializedCode(isolate, result,
|
||||
log_code_creation);
|
||||
#endif // V8_TARGET_ARCH_ARM
|
||||
|
||||
if (log_code_creation || FLAG_log_function_events) {
|
||||
String name = ReadOnlyRoots(isolate).empty_string();
|
||||
Script script = Script::cast(result->script());
|
||||
|
@ -60,10 +60,11 @@ constexpr RegList kLiftoffAssemblerFpCacheRegs =
|
||||
|
||||
#elif V8_TARGET_ARCH_ARM64
|
||||
|
||||
// x16: ip0, x17: ip1, x26: root, x27: cp, x29: fp, x30: lr, x31: xzr.
|
||||
// x16: ip0, x17: ip1, x18: platform register, x26: root, x27: cp, x29: fp,
|
||||
// x30: lr, x31: xzr.
|
||||
constexpr RegList kLiftoffAssemblerGpCacheRegs =
|
||||
CPURegister::ListOf<x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12,
|
||||
x13, x14, x15, x18, x19, x20, x21, x22, x23, x24, x25,
|
||||
x13, x14, x15, x19, x20, x21, x22, x23, x24, x25,
|
||||
x28>();
|
||||
|
||||
// d15: fp_zero, d30-d31: macro-assembler scratch V Registers.
|
||||
|
10
deps/v8/test/cctest/cctest.status
vendored
10
deps/v8/test/cctest/cctest.status
vendored
@ -580,6 +580,14 @@
|
||||
'test-cpu-profiler/TickLinesBaseline': [SKIP],
|
||||
'test-cpu-profiler/TickLinesOptimized': [SKIP],
|
||||
'test-cpu-profiler/Inlining2': [SKIP],
|
||||
|
||||
# TODO(mythria): Code logging tests that currently fail with lazy feedback
|
||||
# allocation. Fix logging to work without feedback vectors and enable these
|
||||
# tests in lite_mode.
|
||||
'test-log/ExternalCodeEventListenerWithInterpretedFramesNativeStack': [SKIP],
|
||||
'test-log/LogInterpretedFramesNativeStack': [SKIP],
|
||||
'test-log/LogInterpretedFramesNativeStackWithSerialization': [SKIP],
|
||||
'test-serialize/CodeSerializerOnePlusOneWithInterpretedFramesNativeStack': [SKIP]
|
||||
}], # lite_mode
|
||||
|
||||
##############################################################################
|
||||
@ -618,6 +626,8 @@
|
||||
# --interpreted-frames-native-stack tests
|
||||
'test-log/ExternalCodeEventListenerWithInterpretedFramesNativeStack': [SKIP],
|
||||
'test-log/LogInterpretedFramesNativeStack': [SKIP],
|
||||
'test-log/LogInterpretedFramesNativeStackWithSerialization': [SKIP],
|
||||
'test-serialize/CodeSerializerOnePlusOneWithInterpretedFramesNativeStack': [SKIP],
|
||||
|
||||
# Crashes on native arm.
|
||||
'test-macro-assembler-arm/ExtractLane': [PASS, ['arch == arm and not simulator_run', SKIP]],
|
||||
|
63
deps/v8/test/cctest/test-log.cc
vendored
63
deps/v8/test/cctest/test-log.cc
vendored
@ -31,6 +31,7 @@
|
||||
#include <vector>
|
||||
#include "src/api-inl.h"
|
||||
#include "src/builtins/builtins.h"
|
||||
#include "src/compilation-cache.h"
|
||||
#include "src/log-utils.h"
|
||||
#include "src/log.h"
|
||||
#include "src/objects-inl.h"
|
||||
@ -659,6 +660,68 @@ TEST(LogInterpretedFramesNativeStack) {
|
||||
}
|
||||
isolate->Dispose();
|
||||
}
|
||||
|
||||
TEST(LogInterpretedFramesNativeStackWithSerialization) {
|
||||
SETUP_FLAGS();
|
||||
i::FLAG_interpreted_frames_native_stack = true;
|
||||
i::FLAG_always_opt = false;
|
||||
v8::Isolate::CreateParams create_params;
|
||||
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
||||
|
||||
v8::ScriptCompiler::CachedData* cache = nullptr;
|
||||
|
||||
bool has_cache = cache != nullptr;
|
||||
// NOTE(mmarchini): Runs the test two times. The first time it will compile
|
||||
// our script and will create a code cache for it. The second time we'll
|
||||
// deserialize the cache and check if our function was logged correctly.
|
||||
// We disallow compilation on the second run to ensure we're loading from
|
||||
// cache.
|
||||
do {
|
||||
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
||||
|
||||
{
|
||||
ScopedLoggerInitializer logger(saved_log, saved_prof, isolate);
|
||||
|
||||
has_cache = cache != nullptr;
|
||||
v8::ScriptCompiler::CompileOptions options =
|
||||
has_cache ? v8::ScriptCompiler::kConsumeCodeCache
|
||||
: v8::ScriptCompiler::kEagerCompile;
|
||||
|
||||
v8::HandleScope scope(isolate);
|
||||
v8::Isolate::Scope isolate_scope(isolate);
|
||||
v8::Local<v8::Context> context = v8::Context::New(isolate);
|
||||
v8::Local<v8::String> source = v8_str(
|
||||
"function eyecatcher() { return a * a; } return eyecatcher();");
|
||||
v8::Local<v8::String> arg_str = v8_str("a");
|
||||
v8::ScriptOrigin origin(v8_str("filename"));
|
||||
|
||||
i::DisallowCompilation* no_compile_expected =
|
||||
has_cache ? new i::DisallowCompilation(
|
||||
reinterpret_cast<i::Isolate*>(isolate))
|
||||
: nullptr;
|
||||
|
||||
v8::ScriptCompiler::Source script_source(source, origin, cache);
|
||||
v8::Local<v8::Function> fun =
|
||||
v8::ScriptCompiler::CompileFunctionInContext(
|
||||
context, &script_source, 1, &arg_str, 0, nullptr, options)
|
||||
.ToLocalChecked();
|
||||
if (has_cache) {
|
||||
logger.StopLogging();
|
||||
CHECK(logger.ContainsLine({"InterpretedFunction", "eyecatcher"}));
|
||||
}
|
||||
v8::Local<v8::Value> arg = v8_num(3);
|
||||
v8::Local<v8::Value> result =
|
||||
fun->Call(context, v8::Undefined(isolate), 1, &arg).ToLocalChecked();
|
||||
CHECK_EQ(9, result->Int32Value(context).FromJust());
|
||||
cache = v8::ScriptCompiler::CreateCodeCacheForFunction(fun);
|
||||
|
||||
if (no_compile_expected != nullptr) delete no_compile_expected;
|
||||
}
|
||||
|
||||
isolate->Dispose();
|
||||
} while (!has_cache);
|
||||
delete cache;
|
||||
}
|
||||
#endif // V8_TARGET_ARCH_ARM
|
||||
|
||||
TEST(ExternalCodeEventListener) {
|
||||
|
14
deps/v8/test/cctest/test-serialize.cc
vendored
14
deps/v8/test/cctest/test-serialize.cc
vendored
@ -1578,7 +1578,7 @@ static Handle<SharedFunctionInfo> CompileScriptAndProduceCache(
|
||||
return sfi;
|
||||
}
|
||||
|
||||
void TestCodeSerializerOnePlusOneImpl() {
|
||||
void TestCodeSerializerOnePlusOneImpl(bool verify_builtins_count = true) {
|
||||
LocalContext context;
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
isolate->compilation_cache()->Disable(); // Disable same-isolate code cache.
|
||||
@ -1622,13 +1622,23 @@ void TestCodeSerializerOnePlusOneImpl() {
|
||||
Execution::Call(isolate, copy_fun, global, 0, nullptr).ToHandleChecked();
|
||||
CHECK_EQ(2, Handle<Smi>::cast(copy_result)->value());
|
||||
|
||||
CHECK_EQ(builtins_count, CountBuiltins());
|
||||
if (verify_builtins_count) CHECK_EQ(builtins_count, CountBuiltins());
|
||||
|
||||
delete cache;
|
||||
}
|
||||
|
||||
TEST(CodeSerializerOnePlusOne) { TestCodeSerializerOnePlusOneImpl(); }
|
||||
|
||||
// See bug v8:9122
|
||||
#ifndef V8_TARGET_ARCH_ARM
|
||||
TEST(CodeSerializerOnePlusOneWithInterpretedFramesNativeStack) {
|
||||
FLAG_interpreted_frames_native_stack = true;
|
||||
// We pass false because this test will create IET copies (which are
|
||||
// builtins).
|
||||
TestCodeSerializerOnePlusOneImpl(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
TEST(CodeSerializerOnePlusOneWithDebugger) {
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
static v8::debug::DebugDelegate dummy_delegate;
|
||||
|
@ -541,14 +541,23 @@ void RunNonConstIndexTest(ExecutionTier execution_tier, WasmOpcode wasm_op,
|
||||
static_cast<uint32_t>(r.builder().ReadMemory(&memory[0])));
|
||||
}
|
||||
|
||||
// Test a set of Narrow operations
|
||||
#define TEST_OPERATION(Name) \
|
||||
WASM_EXEC_TEST(I64AtomicConstIndex##Name) { \
|
||||
WASM_EXEC_TEST(I64AtomicConstIndex##Name##Narrow) { \
|
||||
RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name##32U, Name); \
|
||||
}
|
||||
OPERATION_LIST(TEST_OPERATION)
|
||||
#undef TEST_OPERATION
|
||||
|
||||
WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchange) {
|
||||
// Test a set of Regular operations
|
||||
#define TEST_OPERATION(Name) \
|
||||
WASM_EXEC_TEST(I64AtomicConstIndex##Name) { \
|
||||
RunNonConstIndexTest(execution_tier, kExprI64Atomic##Name, Name); \
|
||||
}
|
||||
OPERATION_LIST(TEST_OPERATION)
|
||||
#undef TEST_OPERATION
|
||||
|
||||
WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchangeNarrow) {
|
||||
EXPERIMENTAL_FLAG_SCOPE(threads);
|
||||
WasmRunner<uint32_t, uint64_t, uint64_t> r(execution_tier);
|
||||
uint64_t* memory =
|
||||
@ -567,6 +576,25 @@ WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchange) {
|
||||
static_cast<uint16_t>(r.builder().ReadMemory(&memory[0])));
|
||||
}
|
||||
|
||||
WASM_EXEC_TEST(I64AtomicNonConstIndexCompareExchange) {
|
||||
EXPERIMENTAL_FLAG_SCOPE(threads);
|
||||
WasmRunner<uint32_t, uint64_t, uint64_t> r(execution_tier);
|
||||
uint64_t* memory =
|
||||
r.builder().AddMemoryElems<uint64_t>(kWasmPageSize / sizeof(uint64_t));
|
||||
r.builder().SetHasSharedMemory();
|
||||
|
||||
BUILD(r, WASM_I32_CONVERT_I64(WASM_ATOMICS_TERNARY_OP(
|
||||
kExprI64AtomicCompareExchange,
|
||||
WASM_I64_EQ(WASM_I64V(1), WASM_I64V(0)), WASM_GET_LOCAL(0),
|
||||
WASM_GET_LOCAL(1), MachineRepresentation::kWord16)));
|
||||
|
||||
uint64_t initial = 4444333322221111, local = 0x9999888877776666;
|
||||
r.builder().WriteMemory(&memory[0], initial);
|
||||
CHECK_EQ(static_cast<uint32_t>(initial), r.Call(initial, local));
|
||||
CHECK_EQ(CompareExchange(initial, initial, local),
|
||||
r.builder().ReadMemory(&memory[0]));
|
||||
}
|
||||
|
||||
WASM_EXEC_TEST(I64AtomicNonConstIndexLoad8U) {
|
||||
EXPERIMENTAL_FLAG_SCOPE(threads);
|
||||
WasmRunner<uint32_t> r(execution_tier);
|
||||
|
15
deps/v8/test/mjsunit/regress/regress-crbug-944865.js
vendored
Normal file
15
deps/v8/test/mjsunit/regress/regress-crbug-944865.js
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --allow-natives-syntax
|
||||
|
||||
function foo() {
|
||||
const r = {e: NaN, g: undefined, c: undefined};
|
||||
const u = {__proto__: {}, e: new Set(), g: 0, c: undefined};
|
||||
return r;
|
||||
}
|
||||
foo();
|
||||
%OptimizeFunctionOnNextCall(foo);
|
||||
const o = foo();
|
||||
Object.defineProperty(o, 'c', {value: 42});
|
@ -36,11 +36,11 @@ class BackgroundCompileTaskTest : public TestWithNativeContext {
|
||||
static void SetUpTestCase() {
|
||||
CHECK_NULL(save_flags_);
|
||||
save_flags_ = new SaveFlags();
|
||||
TestWithNativeContext ::SetUpTestCase();
|
||||
TestWithNativeContext::SetUpTestCase();
|
||||
}
|
||||
|
||||
static void TearDownTestCase() {
|
||||
TestWithNativeContext ::TearDownTestCase();
|
||||
TestWithNativeContext::TearDownTestCase();
|
||||
CHECK_NOT_NULL(save_flags_);
|
||||
delete save_flags_;
|
||||
save_flags_ = nullptr;
|
||||
|
@ -59,11 +59,11 @@ class CompilerDispatcherTest : public TestWithNativeContext {
|
||||
|
||||
static void SetUpTestCase() {
|
||||
CompilerDispatcherTestFlags::SetFlagsForTest();
|
||||
TestWithNativeContext ::SetUpTestCase();
|
||||
TestWithNativeContext::SetUpTestCase();
|
||||
}
|
||||
|
||||
static void TearDownTestCase() {
|
||||
TestWithNativeContext ::TearDownTestCase();
|
||||
TestWithNativeContext::TearDownTestCase();
|
||||
CompilerDispatcherTestFlags::RestoreFlags();
|
||||
}
|
||||
|
||||
|
@ -32,17 +32,34 @@ void RunStdFunction(void* data) {
|
||||
template <typename TMixin>
|
||||
class WithFinalizationGroupMixin : public TMixin {
|
||||
public:
|
||||
WithFinalizationGroupMixin() {
|
||||
WithFinalizationGroupMixin() = default;
|
||||
~WithFinalizationGroupMixin() override = default;
|
||||
|
||||
static void SetUpTestCase() {
|
||||
CHECK_NULL(save_flags_);
|
||||
save_flags_ = new SaveFlags();
|
||||
FLAG_harmony_weak_refs = true;
|
||||
FLAG_expose_gc = true;
|
||||
FLAG_allow_natives_syntax = true;
|
||||
TMixin::SetUpTestCase();
|
||||
}
|
||||
|
||||
static void TearDownTestCase() {
|
||||
TMixin::TearDownTestCase();
|
||||
CHECK_NOT_NULL(save_flags_);
|
||||
delete save_flags_;
|
||||
save_flags_ = nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
SaveFlags save_flags_;
|
||||
static SaveFlags* save_flags_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(WithFinalizationGroupMixin);
|
||||
};
|
||||
|
||||
template <typename TMixin>
|
||||
SaveFlags* WithFinalizationGroupMixin<TMixin>::save_flags_ = nullptr;
|
||||
|
||||
using TestWithNativeContextAndFinalizationGroup = //
|
||||
WithInternalIsolateMixin< //
|
||||
WithContextMixin< //
|
||||
@ -498,5 +515,47 @@ TEST_F(MicrotaskQueueTest, DetachGlobal_HandlerContext) {
|
||||
.FromJust());
|
||||
}
|
||||
|
||||
TEST_F(MicrotaskQueueTest, DetachGlobal_InactiveHandler) {
|
||||
Local<v8::Context> sub_context = v8::Context::New(v8_isolate());
|
||||
Utils::OpenHandle(*sub_context)
|
||||
->native_context()
|
||||
->set_microtask_queue(microtask_queue());
|
||||
|
||||
Handle<JSArray> result;
|
||||
Handle<JSFunction> stale_handler;
|
||||
Handle<JSPromise> stale_promise;
|
||||
{
|
||||
v8::Context::Scope scope(sub_context);
|
||||
result = RunJS<JSArray>("var result = [false, false]; result");
|
||||
stale_handler = RunJS<JSFunction>("() => { result[0] = true; }");
|
||||
stale_promise = RunJS<JSPromise>(
|
||||
"var stale_promise = new Promise(()=>{});"
|
||||
"stale_promise");
|
||||
RunJS("stale_promise.then(() => { result [1] = true; });");
|
||||
}
|
||||
sub_context->DetachGlobal();
|
||||
sub_context.Clear();
|
||||
|
||||
// The context of |stale_handler| and |stale_promise| is detached at this
|
||||
// point.
|
||||
// Ensure that resolution handling for |stale_handler| is cancelled without
|
||||
// crash. Also, the resolution of |stale_promise| is also cancelled.
|
||||
|
||||
SetGlobalProperty("stale_handler", Utils::ToLocal(stale_handler));
|
||||
RunJS("%EnqueueMicrotask(stale_handler)");
|
||||
|
||||
v8_isolate()->EnqueueMicrotask(Utils::ToLocal(stale_handler));
|
||||
|
||||
JSPromise::Fulfill(
|
||||
stale_promise,
|
||||
handle(ReadOnlyRoots(isolate()).undefined_value(), isolate()));
|
||||
|
||||
microtask_queue()->RunMicrotasks(isolate());
|
||||
EXPECT_TRUE(
|
||||
Object::GetElement(isolate(), result, 0).ToHandleChecked()->IsFalse());
|
||||
EXPECT_TRUE(
|
||||
Object::GetElement(isolate(), result, 1).ToHandleChecked()->IsFalse());
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
Loading…
x
Reference in New Issue
Block a user