Upgrade V8 to 2.4.4
This commit is contained in:
parent
d2de8ba400
commit
431e43009c
5
deps/v8/AUTHORS
vendored
5
deps/v8/AUTHORS
vendored
@ -9,6 +9,8 @@ ARM Ltd.
|
||||
|
||||
Alexander Botero-Lowry <alexbl@FreeBSD.org>
|
||||
Alexandre Vassalotti <avassalotti@gmail.com>
|
||||
Andreas Anyuru <andreas.anyuru@gmail.com>
|
||||
Burcu Dogan <burcujdogan@gmail.com>
|
||||
Craig Schlenter <craig.schlenter@gmail.com>
|
||||
Daniel Andersson <kodandersson@gmail.com>
|
||||
Daniel James <dnljms@gmail.com>
|
||||
@ -21,6 +23,7 @@ John Jozwiak <jjozwiak@codeaurora.org>
|
||||
Kun Zhang <zhangk@codeaurora.org>
|
||||
Matt Hanselman <mjhanselman@gmail.com>
|
||||
Martyn Capewell <martyn.capewell@arm.com>
|
||||
Michael Smith <mike@w3.org>
|
||||
Paolo Giarrusso <p.giarrusso@gmail.com>
|
||||
Patrick Gansterer <paroga@paroga.com>
|
||||
Rafal Krypa <rafal@krypa.net>
|
||||
@ -28,6 +31,4 @@ Rene Rebe <rene@exactcode.de>
|
||||
Rodolph Perfetta <rodolph.perfetta@arm.com>
|
||||
Ryan Dahl <coldredlemur@gmail.com>
|
||||
Subrato K De <subratokde@codeaurora.org>
|
||||
Burcu Dogan <burcujdogan@gmail.com>
|
||||
Vlad Burlik <vladbph@gmail.com>
|
||||
|
||||
|
18
deps/v8/ChangeLog
vendored
18
deps/v8/ChangeLog
vendored
@ -1,3 +1,21 @@
|
||||
2010-09-15: Version 2.4.4
|
||||
|
||||
Fix bug with hangs on very large sparse arrays.
|
||||
|
||||
Try harder to free up memory when running out of space.
|
||||
|
||||
Add heap snapshots to JSON format to API.
|
||||
|
||||
Recalibrate benchmarks.
|
||||
|
||||
|
||||
2010-09-13: Version 2.4.3
|
||||
|
||||
Made Date.parse properly handle TZ offsets (issue 857).
|
||||
|
||||
Performance improvements on all platforms.
|
||||
|
||||
|
||||
2010-09-08: Version 2.4.2
|
||||
|
||||
Fixed GC crash bug.
|
||||
|
2
deps/v8/benchmarks/crypto.js
vendored
2
deps/v8/benchmarks/crypto.js
vendored
@ -31,7 +31,7 @@
|
||||
|
||||
|
||||
// The code has been adapted for use as a benchmark by Google.
|
||||
var Crypto = new BenchmarkSuite('Crypto', 110465, [
|
||||
var Crypto = new BenchmarkSuite('Crypto', 266181, [
|
||||
new Benchmark("Encrypt", encrypt),
|
||||
new Benchmark("Decrypt", decrypt)
|
||||
]);
|
||||
|
2
deps/v8/benchmarks/deltablue.js
vendored
2
deps/v8/benchmarks/deltablue.js
vendored
@ -23,7 +23,7 @@
|
||||
// more like a JavaScript program.
|
||||
|
||||
|
||||
var DeltaBlue = new BenchmarkSuite('DeltaBlue', 30282, [
|
||||
var DeltaBlue = new BenchmarkSuite('DeltaBlue', 66118, [
|
||||
new Benchmark('DeltaBlue', deltaBlue)
|
||||
]);
|
||||
|
||||
|
2
deps/v8/benchmarks/earley-boyer.js
vendored
2
deps/v8/benchmarks/earley-boyer.js
vendored
@ -1,7 +1,7 @@
|
||||
// This file is automatically generated by scheme2js, except for the
|
||||
// benchmark harness code at the beginning and end of the file.
|
||||
|
||||
var EarleyBoyer = new BenchmarkSuite('EarleyBoyer', 280581, [
|
||||
var EarleyBoyer = new BenchmarkSuite('EarleyBoyer', 666463, [
|
||||
new Benchmark("Earley", function () { BgL_earleyzd2benchmarkzd2(); }),
|
||||
new Benchmark("Boyer", function () { BgL_nboyerzd2benchmarkzd2(); })
|
||||
]);
|
||||
|
2
deps/v8/benchmarks/raytrace.js
vendored
2
deps/v8/benchmarks/raytrace.js
vendored
@ -8,7 +8,7 @@
|
||||
// untouched. This file also contains a copy of parts of the Prototype
|
||||
// JavaScript framework which is used by the ray tracer.
|
||||
|
||||
var RayTrace = new BenchmarkSuite('RayTrace', 533115, [
|
||||
var RayTrace = new BenchmarkSuite('RayTrace', 739989, [
|
||||
new Benchmark('RayTrace', renderScene)
|
||||
]);
|
||||
|
||||
|
2
deps/v8/benchmarks/regexp.js
vendored
2
deps/v8/benchmarks/regexp.js
vendored
@ -35,7 +35,7 @@
|
||||
// letters in the data are encoded using ROT13 in a way that does not
|
||||
// affect how the regexps match their input.
|
||||
|
||||
var RegRxp = new BenchmarkSuite('RegExp', 601250, [
|
||||
var RegRxp = new BenchmarkSuite('RegExp', 910985, [
|
||||
new Benchmark("RegExp", runRegExpBenchmark)
|
||||
]);
|
||||
|
||||
|
2
deps/v8/benchmarks/richards.js
vendored
2
deps/v8/benchmarks/richards.js
vendored
@ -35,7 +35,7 @@
|
||||
// Martin Richards.
|
||||
|
||||
|
||||
var Richards = new BenchmarkSuite('Richards', 20687, [
|
||||
var Richards = new BenchmarkSuite('Richards', 35302, [
|
||||
new Benchmark("Richards", runRichards)
|
||||
]);
|
||||
|
||||
|
2
deps/v8/benchmarks/splay.js
vendored
2
deps/v8/benchmarks/splay.js
vendored
@ -33,7 +33,7 @@
|
||||
// also has to deal with a lot of changes to the large tree object
|
||||
// graph.
|
||||
|
||||
var Splay = new BenchmarkSuite('Splay', 21915, [
|
||||
var Splay = new BenchmarkSuite('Splay', 81491, [
|
||||
new Benchmark("Splay", SplayRun, SplaySetup, SplayTearDown)
|
||||
]);
|
||||
|
||||
|
29
deps/v8/include/v8-profiler.h
vendored
29
deps/v8/include/v8-profiler.h
vendored
@ -323,7 +323,10 @@ class V8EXPORT HeapSnapshot {
|
||||
enum Type {
|
||||
kFull = 0, // Heap snapshot with all instances and references.
|
||||
kAggregated = 1 // Snapshot doesn't contain individual heap entries,
|
||||
//instead they are grouped by constructor name.
|
||||
// instead they are grouped by constructor name.
|
||||
};
|
||||
enum SerializationFormat {
|
||||
kJSON = 0 // See format description near 'Serialize' method.
|
||||
};
|
||||
|
||||
/** Returns heap snapshot type. */
|
||||
@ -343,6 +346,30 @@ class V8EXPORT HeapSnapshot {
|
||||
* of the same type can be compared.
|
||||
*/
|
||||
const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const;
|
||||
|
||||
/**
|
||||
* Prepare a serialized representation of the snapshot. The result
|
||||
* is written into the stream provided in chunks of specified size.
|
||||
* The total length of the serialized snapshot is unknown in
|
||||
* advance, it is can be roughly equal to JS heap size (that means,
|
||||
* it can be really big - tens of megabytes).
|
||||
*
|
||||
* For the JSON format, heap contents are represented as an object
|
||||
* with the following structure:
|
||||
*
|
||||
* {
|
||||
* snapshot: {title: "...", uid: nnn},
|
||||
* nodes: [
|
||||
* meta-info (JSON string),
|
||||
* nodes themselves
|
||||
* ],
|
||||
* strings: [strings]
|
||||
* }
|
||||
*
|
||||
* Outgoing node links are stored after each node. Nodes reference strings
|
||||
* and other nodes by their indexes in corresponding arrays.
|
||||
*/
|
||||
void Serialize(OutputStream* stream, SerializationFormat format) const;
|
||||
};
|
||||
|
||||
|
||||
|
28
deps/v8/include/v8.h
vendored
28
deps/v8/include/v8.h
vendored
@ -3196,6 +3196,34 @@ class V8EXPORT Locker {
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* An interface for exporting data from V8, using "push" model.
|
||||
*/
|
||||
class V8EXPORT OutputStream {
|
||||
public:
|
||||
enum OutputEncoding {
|
||||
kAscii = 0 // 7-bit ASCII.
|
||||
};
|
||||
enum WriteResult {
|
||||
kContinue = 0,
|
||||
kAbort = 1
|
||||
};
|
||||
virtual ~OutputStream() {}
|
||||
/** Notify about the end of stream. */
|
||||
virtual void EndOfStream() = 0;
|
||||
/** Get preferred output chunk size. Called only once. */
|
||||
virtual int GetChunkSize() { return 1024; }
|
||||
/** Get preferred output encoding. Called only once. */
|
||||
virtual OutputEncoding GetOutputEncoding() { return kAscii; }
|
||||
/**
|
||||
* Writes the next chunk of snapshot data into the stream. Writing
|
||||
* can be stopped by returning kAbort as function result. EndOfStream
|
||||
* will not be called in case writing was aborted.
|
||||
*/
|
||||
virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
|
||||
};
|
||||
|
||||
|
||||
|
||||
// --- I m p l e m e n t a t i o n ---
|
||||
|
||||
|
17
deps/v8/src/api.cc
vendored
17
deps/v8/src/api.cc
vendored
@ -4739,6 +4739,23 @@ const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshot::Serialize(OutputStream* stream,
|
||||
HeapSnapshot::SerializationFormat format) const {
|
||||
IsDeadCheck("v8::HeapSnapshot::Serialize");
|
||||
ApiCheck(format == kJSON,
|
||||
"v8::HeapSnapshot::Serialize",
|
||||
"Unknown serialization format");
|
||||
ApiCheck(stream->GetOutputEncoding() == OutputStream::kAscii,
|
||||
"v8::HeapSnapshot::Serialize",
|
||||
"Unsupported output encoding");
|
||||
ApiCheck(stream->GetChunkSize() > 0,
|
||||
"v8::HeapSnapshot::Serialize",
|
||||
"Invalid stream chunk size");
|
||||
i::HeapSnapshotJSONSerializer serializer(ToInternal(this));
|
||||
serializer.Serialize(stream);
|
||||
}
|
||||
|
||||
|
||||
int HeapProfiler::GetSnapshotsCount() {
|
||||
IsDeadCheck("v8::HeapProfiler::GetSnapshotsCount");
|
||||
return i::HeapProfiler::GetSnapshotsCount();
|
||||
|
152
deps/v8/src/arm/code-stubs-arm.cc
vendored
152
deps/v8/src/arm/code-stubs-arm.cc
vendored
@ -930,6 +930,24 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
||||
Label slow; // Call builtin.
|
||||
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
|
||||
|
||||
if (include_smi_compare_) {
|
||||
Label not_two_smis, smi_done;
|
||||
__ orr(r2, r1, r0);
|
||||
__ tst(r2, Operand(kSmiTagMask));
|
||||
__ b(ne, ¬_two_smis);
|
||||
__ sub(r0, r1, r0);
|
||||
__ b(vc, &smi_done);
|
||||
// Correct the sign in case of overflow.
|
||||
__ rsb(r0, r0, Operand(0, RelocInfo::NONE));
|
||||
__ bind(&smi_done);
|
||||
__ Ret();
|
||||
__ bind(¬_two_smis);
|
||||
} else if (FLAG_debug_code) {
|
||||
__ orr(r2, r1, r0);
|
||||
__ tst(r2, Operand(kSmiTagMask));
|
||||
__ Assert(nz, "CompareStub: unexpected smi operands.");
|
||||
}
|
||||
|
||||
// NOTICE! This code is only reached after a smi-fast-case check, so
|
||||
// it is certain that at least one operand isn't a smi.
|
||||
|
||||
@ -2288,7 +2306,7 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
|
||||
__ push(r0);
|
||||
__ TailCallRuntime(Runtime::kStackGuard, 1, 1);
|
||||
|
||||
__ StubReturn(1);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
@ -2299,32 +2317,37 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
||||
|
||||
if (op_ == Token::SUB) {
|
||||
// Check whether the value is a smi.
|
||||
Label try_float;
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ b(ne, &try_float);
|
||||
if (include_smi_code_) {
|
||||
// Check whether the value is a smi.
|
||||
Label try_float;
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ b(ne, &try_float);
|
||||
|
||||
// Go slow case if the value of the expression is zero
|
||||
// to make sure that we switch between 0 and -0.
|
||||
if (negative_zero_ == kStrictNegativeZero) {
|
||||
// If we have to check for zero, then we can check for the max negative
|
||||
// smi while we are at it.
|
||||
__ bic(ip, r0, Operand(0x80000000), SetCC);
|
||||
__ b(eq, &slow);
|
||||
__ rsb(r0, r0, Operand(0, RelocInfo::NONE));
|
||||
__ StubReturn(1);
|
||||
} else {
|
||||
// The value of the expression is a smi and 0 is OK for -0. Try
|
||||
// optimistic subtraction '0 - value'.
|
||||
__ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
|
||||
__ StubReturn(1, vc);
|
||||
// We don't have to reverse the optimistic neg since the only case
|
||||
// where we fall through is the minimum negative Smi, which is the case
|
||||
// where the neg leaves the register unchanged.
|
||||
__ jmp(&slow); // Go slow on max negative Smi.
|
||||
// Go slow case if the value of the expression is zero
|
||||
// to make sure that we switch between 0 and -0.
|
||||
if (negative_zero_ == kStrictNegativeZero) {
|
||||
// If we have to check for zero, then we can check for the max negative
|
||||
// smi while we are at it.
|
||||
__ bic(ip, r0, Operand(0x80000000), SetCC);
|
||||
__ b(eq, &slow);
|
||||
__ rsb(r0, r0, Operand(0, RelocInfo::NONE));
|
||||
__ Ret();
|
||||
} else {
|
||||
// The value of the expression is a smi and 0 is OK for -0. Try
|
||||
// optimistic subtraction '0 - value'.
|
||||
__ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
|
||||
__ Ret(vc);
|
||||
// We don't have to reverse the optimistic neg since the only case
|
||||
// where we fall through is the minimum negative Smi, which is the case
|
||||
// where the neg leaves the register unchanged.
|
||||
__ jmp(&slow); // Go slow on max negative Smi.
|
||||
}
|
||||
__ bind(&try_float);
|
||||
} else if (FLAG_debug_code) {
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ Assert(ne, "Unexpected smi operand.");
|
||||
}
|
||||
|
||||
__ bind(&try_float);
|
||||
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
||||
__ cmp(r1, heap_number_map);
|
||||
@ -2344,6 +2367,19 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
__ mov(r0, Operand(r1));
|
||||
}
|
||||
} else if (op_ == Token::BIT_NOT) {
|
||||
if (include_smi_code_) {
|
||||
Label non_smi;
|
||||
__ BranchOnNotSmi(r0, &non_smi);
|
||||
__ mvn(r0, Operand(r0));
|
||||
// Bit-clear inverted smi-tag.
|
||||
__ bic(r0, r0, Operand(kSmiTagMask));
|
||||
__ Ret();
|
||||
__ bind(&non_smi);
|
||||
} else if (FLAG_debug_code) {
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ Assert(ne, "Unexpected smi operand.");
|
||||
}
|
||||
|
||||
// Check if the operand is a heap number.
|
||||
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||
__ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
||||
@ -2391,7 +2427,7 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
__ bind(&done);
|
||||
__ StubReturn(1);
|
||||
__ Ret();
|
||||
|
||||
// Handle the slow case by jumping to the JavaScript builtin.
|
||||
__ bind(&slow);
|
||||
@ -3499,6 +3535,11 @@ const char* CompareStub::GetName() {
|
||||
include_number_compare_name = "_NO_NUMBER";
|
||||
}
|
||||
|
||||
const char* include_smi_compare_name = "";
|
||||
if (!include_smi_compare_) {
|
||||
include_smi_compare_name = "_NO_SMI";
|
||||
}
|
||||
|
||||
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
|
||||
"CompareStub_%s%s%s%s%s%s",
|
||||
cc_name,
|
||||
@ -3506,7 +3547,8 @@ const char* CompareStub::GetName() {
|
||||
rhs_name,
|
||||
strict_name,
|
||||
never_nan_nan_name,
|
||||
include_number_compare_name);
|
||||
include_number_compare_name,
|
||||
include_smi_compare_name);
|
||||
return name_;
|
||||
}
|
||||
|
||||
@ -3522,7 +3564,8 @@ int CompareStub::MinorKey() {
|
||||
| RegisterField::encode(lhs_.is(r0))
|
||||
| StrictField::encode(strict_)
|
||||
| NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
|
||||
| IncludeNumberCompareField::encode(include_number_compare_);
|
||||
| IncludeNumberCompareField::encode(include_number_compare_)
|
||||
| IncludeSmiCompareField::encode(include_smi_compare_);
|
||||
}
|
||||
|
||||
|
||||
@ -4144,17 +4187,21 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
|
||||
// Check bounds and smi-ness.
|
||||
__ ldr(r7, MemOperand(sp, kToOffset));
|
||||
__ ldr(r6, MemOperand(sp, kFromOffset));
|
||||
Register to = r6;
|
||||
Register from = r7;
|
||||
__ Ldrd(to, from, MemOperand(sp, kToOffset));
|
||||
STATIC_ASSERT(kFromOffset == kToOffset + 4);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
||||
// I.e., arithmetic shift right by one un-smi-tags.
|
||||
__ mov(r2, Operand(r7, ASR, 1), SetCC);
|
||||
__ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
|
||||
// If either r2 or r6 had the smi tag bit set, then carry is set now.
|
||||
__ mov(r2, Operand(to, ASR, 1), SetCC);
|
||||
__ mov(r3, Operand(from, ASR, 1), SetCC, cc);
|
||||
// If either to or from had the smi tag bit set, then carry is set now.
|
||||
__ b(cs, &runtime); // Either "from" or "to" is not a smi.
|
||||
__ b(mi, &runtime); // From is negative.
|
||||
|
||||
// Both to and from are smis.
|
||||
|
||||
__ sub(r2, r2, Operand(r3), SetCC);
|
||||
__ b(mi, &runtime); // Fail if from > to.
|
||||
// Special handling of sub-strings of length 1 and 2. One character strings
|
||||
@ -4165,8 +4212,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// r2: length
|
||||
// r3: from index (untaged smi)
|
||||
// r6: from (smi)
|
||||
// r7: to (smi)
|
||||
// r6 (a.k.a. to): to (smi)
|
||||
// r7 (a.k.a. from): from offset (smi)
|
||||
|
||||
// Make sure first argument is a sequential (or flat) string.
|
||||
__ ldr(r5, MemOperand(sp, kStringOffset));
|
||||
@ -4178,10 +4225,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
// r1: instance type
|
||||
// r2: length
|
||||
// r3: from index (untaged smi)
|
||||
// r3: from index (untagged smi)
|
||||
// r5: string
|
||||
// r6: from (smi)
|
||||
// r7: to (smi)
|
||||
// r6 (a.k.a. to): to (smi)
|
||||
// r7 (a.k.a. from): from offset (smi)
|
||||
Label seq_string;
|
||||
__ and_(r4, r1, Operand(kStringRepresentationMask));
|
||||
STATIC_ASSERT(kSeqStringTag < kConsStringTag);
|
||||
@ -4207,17 +4254,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
// r2: length
|
||||
// r3: from index (untaged smi)
|
||||
// r5: string
|
||||
// r6: from (smi)
|
||||
// r7: to (smi)
|
||||
// r6 (a.k.a. to): to (smi)
|
||||
// r7 (a.k.a. from): from offset (smi)
|
||||
__ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
|
||||
__ cmp(r4, Operand(r7));
|
||||
__ cmp(r4, Operand(to));
|
||||
__ b(lt, &runtime); // Fail if to > length.
|
||||
to = no_reg;
|
||||
|
||||
// r1: instance type.
|
||||
// r2: result string length.
|
||||
// r3: from index (untaged smi)
|
||||
// r5: string.
|
||||
// r6: from offset (smi)
|
||||
// r7 (a.k.a. from): from offset (smi)
|
||||
// Check for flat ascii string.
|
||||
Label non_ascii_flat;
|
||||
__ tst(r1, Operand(kStringEncodingMask));
|
||||
@ -4259,12 +4307,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
// r0: result string.
|
||||
// r2: result string length.
|
||||
// r5: string.
|
||||
// r6: from offset (smi)
|
||||
// r7 (a.k.a. from): from offset (smi)
|
||||
// Locate first character of result.
|
||||
__ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
|
||||
// Locate 'from' character of string.
|
||||
__ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
|
||||
__ add(r5, r5, Operand(r6, ASR, 1));
|
||||
__ add(r5, r5, Operand(from, ASR, 1));
|
||||
|
||||
// r0: result string.
|
||||
// r1: first character of result string.
|
||||
@ -4280,7 +4328,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
__ bind(&non_ascii_flat);
|
||||
// r2: result string length.
|
||||
// r5: string.
|
||||
// r6: from offset (smi)
|
||||
// r7 (a.k.a. from): from offset (smi)
|
||||
// Check for flat two byte string.
|
||||
|
||||
// Allocate the result.
|
||||
@ -4292,18 +4340,19 @@ void SubStringStub::Generate(MacroAssembler* masm) {
|
||||
// Locate first character of result.
|
||||
__ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
|
||||
// Locate 'from' character of string.
|
||||
__ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
|
||||
__ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
|
||||
// As "from" is a smi it is 2 times the value which matches the size of a two
|
||||
// byte character.
|
||||
__ add(r5, r5, Operand(r6));
|
||||
__ add(r5, r5, Operand(from));
|
||||
from = no_reg;
|
||||
|
||||
// r0: result string.
|
||||
// r1: first character of result.
|
||||
// r2: result length.
|
||||
// r5: first character of string to copy.
|
||||
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
||||
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
|
||||
DEST_ALWAYS_ALIGNED);
|
||||
StringHelper::GenerateCopyCharactersLong(
|
||||
masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
|
||||
__ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
|
||||
__ add(sp, sp, Operand(3 * kPointerSize));
|
||||
__ Ret();
|
||||
@ -4379,8 +4428,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
|
||||
// Stack frame on entry.
|
||||
// sp[0]: right string
|
||||
// sp[4]: left string
|
||||
__ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // left
|
||||
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // right
|
||||
__ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
|
||||
|
||||
Label not_same;
|
||||
__ cmp(r0, r1);
|
||||
@ -4395,12 +4443,12 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
|
||||
__ bind(¬_same);
|
||||
|
||||
// Check that both objects are sequential ascii strings.
|
||||
__ JumpIfNotBothSequentialAsciiStrings(r0, r1, r2, r3, &runtime);
|
||||
__ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
|
||||
|
||||
// Compare flat ascii strings natively. Remove arguments from stack first.
|
||||
__ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
|
||||
__ add(sp, sp, Operand(2 * kPointerSize));
|
||||
GenerateCompareFlatAsciiStrings(masm, r0, r1, r2, r3, r4, r5);
|
||||
GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
|
||||
|
||||
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
|
||||
// tagged as a small integer.
|
||||
|
7
deps/v8/src/arm/codegen-arm.cc
vendored
7
deps/v8/src/arm/codegen-arm.cc
vendored
@ -1651,7 +1651,7 @@ void CodeGenerator::Comparison(Condition cc,
|
||||
// Perform non-smi comparison by stub.
|
||||
// CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
|
||||
// We call with 0 args because there are 0 on the stack.
|
||||
CompareStub stub(cc, strict, kBothCouldBeNaN, true, lhs, rhs);
|
||||
CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
|
||||
frame_->CallStub(&stub, 0);
|
||||
__ cmp(r0, Operand(0, RelocInfo::NONE));
|
||||
exit.Jump();
|
||||
@ -5985,6 +5985,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
GenericUnaryOpStub stub(
|
||||
Token::SUB,
|
||||
overwrite,
|
||||
NO_UNARY_FLAGS,
|
||||
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
|
||||
frame_->CallStub(&stub, 0);
|
||||
frame_->EmitPush(r0); // r0 has result
|
||||
@ -6009,7 +6010,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
not_smi_label.Bind();
|
||||
frame_->SpillAll();
|
||||
__ Move(r0, tos);
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT,
|
||||
overwrite,
|
||||
NO_UNARY_SMI_CODE_IN_STUB);
|
||||
frame_->CallStub(&stub, 0);
|
||||
frame_->EmitPush(r0);
|
||||
|
||||
|
20
deps/v8/src/arm/codegen-arm.h
vendored
20
deps/v8/src/arm/codegen-arm.h
vendored
@ -271,10 +271,6 @@ class CodeGenerator: public AstVisitor {
|
||||
|
||||
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
|
||||
|
||||
// If the name is an inline runtime function call return the number of
|
||||
// expected arguments. Otherwise return -1.
|
||||
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
|
||||
|
||||
// Constants related to patching of inlined load/store.
|
||||
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
|
||||
return FLAG_debug_code ? 32 : 13;
|
||||
@ -290,6 +286,12 @@ class CodeGenerator: public AstVisitor {
|
||||
}
|
||||
|
||||
private:
|
||||
// Type of a member function that generates inline code for a native function.
|
||||
typedef void (CodeGenerator::*InlineFunctionGenerator)
|
||||
(ZoneList<Expression*>*);
|
||||
|
||||
static const InlineFunctionGenerator kInlineFunctionGenerators[];
|
||||
|
||||
// Construction/Destruction
|
||||
explicit CodeGenerator(MacroAssembler* masm);
|
||||
|
||||
@ -447,13 +449,9 @@ class CodeGenerator: public AstVisitor {
|
||||
void Branch(bool if_true, JumpTarget* target);
|
||||
void CheckStack();
|
||||
|
||||
struct InlineRuntimeLUT {
|
||||
void (CodeGenerator::*method)(ZoneList<Expression*>*);
|
||||
const char* name;
|
||||
int nargs;
|
||||
};
|
||||
static InlineFunctionGenerator FindInlineFunctionGenerator(
|
||||
Runtime::FunctionId function_id);
|
||||
|
||||
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
|
||||
bool CheckForInlineRuntimeCall(CallRuntime* node);
|
||||
|
||||
static Handle<Code> ComputeLazyCompile(int argc);
|
||||
@ -599,8 +597,6 @@ class CodeGenerator: public AstVisitor {
|
||||
// Size of inlined write barriers generated by EmitNamedStore.
|
||||
static int inlined_write_barrier_size_;
|
||||
|
||||
static InlineRuntimeLUT kInlineRuntimeLUT[];
|
||||
|
||||
friend class VirtualFrame;
|
||||
friend class JumpTarget;
|
||||
friend class Reference;
|
||||
|
265
deps/v8/src/arm/full-codegen-arm.cc
vendored
265
deps/v8/src/arm/full-codegen-arm.cc
vendored
@ -493,7 +493,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
|
||||
int context_chain_length =
|
||||
scope()->ContextChainLength(slot->var()->scope());
|
||||
__ LoadContext(scratch, context_chain_length);
|
||||
return CodeGenerator::ContextOperand(scratch, slot->index());
|
||||
return ContextOperand(scratch, slot->index());
|
||||
}
|
||||
case Slot::LOOKUP:
|
||||
UNREACHABLE();
|
||||
@ -557,19 +557,17 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
|
||||
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
if (FLAG_debug_code) {
|
||||
// Check if we have the correct context pointer.
|
||||
__ ldr(r1,
|
||||
CodeGenerator::ContextOperand(cp, Context::FCONTEXT_INDEX));
|
||||
__ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
|
||||
__ cmp(r1, cp);
|
||||
__ Check(eq, "Unexpected declaration in current context.");
|
||||
}
|
||||
if (mode == Variable::CONST) {
|
||||
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
||||
__ str(ip, CodeGenerator::ContextOperand(cp, slot->index()));
|
||||
__ str(ip, ContextOperand(cp, slot->index()));
|
||||
// No write barrier since the_hole_value is in old space.
|
||||
} else if (function != NULL) {
|
||||
VisitForValue(function, kAccumulator);
|
||||
__ str(result_register(),
|
||||
CodeGenerator::ContextOperand(cp, slot->index()));
|
||||
__ str(result_register(), ContextOperand(cp, slot->index()));
|
||||
int offset = Context::SlotOffset(slot->index());
|
||||
// We know that we have written a function, which is not a smi.
|
||||
__ mov(r1, Operand(cp));
|
||||
@ -674,7 +672,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
|
||||
|
||||
// Perform the comparison as if via '==='.
|
||||
__ ldr(r1, MemOperand(sp, 0)); // Switch value.
|
||||
if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
|
||||
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
|
||||
if (inline_smi_code) {
|
||||
Label slow_case;
|
||||
__ orr(r2, r1, r0);
|
||||
__ tst(r2, Operand(kSmiTagMask));
|
||||
@ -686,7 +685,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
|
||||
__ bind(&slow_case);
|
||||
}
|
||||
|
||||
CompareStub stub(eq, true, kBothCouldBeNaN, true, r1, r0);
|
||||
CompareFlags flags = inline_smi_code
|
||||
? NO_SMI_COMPARE_IN_STUB
|
||||
: NO_COMPARE_FLAGS;
|
||||
CompareStub stub(eq, true, flags, r1, r0);
|
||||
__ CallStub(&stub);
|
||||
__ cmp(r0, Operand(0, RelocInfo::NONE));
|
||||
__ b(ne, &next_test);
|
||||
@ -746,11 +748,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ bind(&done_convert);
|
||||
__ push(r0);
|
||||
|
||||
// TODO(kasperl): Check cache validity in generated code. This is a
|
||||
// fast case for the JSObject::IsSimpleEnum cache validity
|
||||
// checks. If we cannot guarantee cache validity, call the runtime
|
||||
// system to check cache validity or get the property names in a
|
||||
// fixed array.
|
||||
// BUG(867): Check cache validity in generated code. This is a fast
|
||||
// case for the JSObject::IsSimpleEnum cache validity checks. If we
|
||||
// cannot guarantee cache validity, call the runtime system to check
|
||||
// cache validity or get the property names in a fixed array.
|
||||
|
||||
// Get the set of properties to enumerate.
|
||||
__ push(r0); // Duplicate the enumerable object on the stack.
|
||||
@ -881,6 +882,150 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
|
||||
}
|
||||
|
||||
|
||||
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
|
||||
Slot* slot,
|
||||
Label* slow) {
|
||||
ASSERT(slot->type() == Slot::CONTEXT);
|
||||
Register current = cp;
|
||||
Register next = r3;
|
||||
Register temp = r4;
|
||||
|
||||
for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
|
||||
if (s->num_heap_slots() > 0) {
|
||||
if (s->calls_eval()) {
|
||||
// Check that extension is NULL.
|
||||
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
|
||||
__ tst(temp, temp);
|
||||
__ b(ne, slow);
|
||||
}
|
||||
__ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
|
||||
__ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
|
||||
// Walk the rest of the chain without clobbering cp.
|
||||
current = next;
|
||||
}
|
||||
}
|
||||
// Check that last extension is NULL.
|
||||
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
|
||||
__ tst(temp, temp);
|
||||
__ b(ne, slow);
|
||||
__ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX));
|
||||
return ContextOperand(temp, slot->index());
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
|
||||
Slot* slot,
|
||||
TypeofState typeof_state,
|
||||
Label* slow,
|
||||
Label* done) {
|
||||
// Generate fast-case code for variables that might be shadowed by
|
||||
// eval-introduced variables. Eval is used a lot without
|
||||
// introducing variables. In those cases, we do not want to
|
||||
// perform a runtime call for all variables in the scope
|
||||
// containing the eval.
|
||||
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
|
||||
EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
|
||||
__ jmp(done);
|
||||
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
|
||||
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
|
||||
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
|
||||
if (potential_slot != NULL) {
|
||||
// Generate fast case for locals that rewrite to slots.
|
||||
__ ldr(r0, ContextSlotOperandCheckExtensions(potential_slot, slow));
|
||||
if (potential_slot->var()->mode() == Variable::CONST) {
|
||||
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
||||
__ cmp(r0, ip);
|
||||
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
|
||||
}
|
||||
__ jmp(done);
|
||||
} else if (rewrite != NULL) {
|
||||
// Generate fast case for calls of an argument function.
|
||||
Property* property = rewrite->AsProperty();
|
||||
if (property != NULL) {
|
||||
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
|
||||
Literal* key_literal = property->key()->AsLiteral();
|
||||
if (obj_proxy != NULL &&
|
||||
key_literal != NULL &&
|
||||
obj_proxy->IsArguments() &&
|
||||
key_literal->handle()->IsSmi()) {
|
||||
// Load arguments object if there are no eval-introduced
|
||||
// variables. Then load the argument from the arguments
|
||||
// object using keyed load.
|
||||
__ ldr(r1,
|
||||
ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
|
||||
slow));
|
||||
__ mov(r0, Operand(key_literal->handle()));
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
__ jmp(done);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
|
||||
Slot* slot,
|
||||
TypeofState typeof_state,
|
||||
Label* slow) {
|
||||
Register current = cp;
|
||||
Register next = r1;
|
||||
Register temp = r2;
|
||||
|
||||
Scope* s = scope();
|
||||
while (s != NULL) {
|
||||
if (s->num_heap_slots() > 0) {
|
||||
if (s->calls_eval()) {
|
||||
// Check that extension is NULL.
|
||||
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
|
||||
__ tst(temp, temp);
|
||||
__ b(ne, slow);
|
||||
}
|
||||
// Load next context in chain.
|
||||
__ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
|
||||
__ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
|
||||
// Walk the rest of the chain without clobbering cp.
|
||||
current = next;
|
||||
}
|
||||
// If no outer scope calls eval, we do not need to check more
|
||||
// context extensions.
|
||||
if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
|
||||
s = s->outer_scope();
|
||||
}
|
||||
|
||||
if (s->is_eval_scope()) {
|
||||
Label loop, fast;
|
||||
if (!current.is(next)) {
|
||||
__ Move(next, current);
|
||||
}
|
||||
__ bind(&loop);
|
||||
// Terminate at global context.
|
||||
__ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
|
||||
__ LoadRoot(ip, Heap::kGlobalContextMapRootIndex);
|
||||
__ cmp(temp, ip);
|
||||
__ b(eq, &fast);
|
||||
// Check that extension is NULL.
|
||||
__ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
|
||||
__ tst(temp, temp);
|
||||
__ b(ne, slow);
|
||||
// Load next context in chain.
|
||||
__ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX));
|
||||
__ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
|
||||
__ b(&loop);
|
||||
__ bind(&fast);
|
||||
}
|
||||
|
||||
__ ldr(r0, CodeGenerator::GlobalObject());
|
||||
__ mov(r2, Operand(slot->var()->name()));
|
||||
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
|
||||
? RelocInfo::CODE_TARGET
|
||||
: RelocInfo::CODE_TARGET_CONTEXT;
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
||||
__ Call(ic, mode);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitVariableLoad(Variable* var,
|
||||
Expression::Context context) {
|
||||
// Four cases: non-this global variables, lookup slots, all other
|
||||
@ -900,10 +1045,19 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
|
||||
Apply(context, r0);
|
||||
|
||||
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
|
||||
Label done, slow;
|
||||
|
||||
// Generate code for loading from variables potentially shadowed
|
||||
// by eval-introduced variables.
|
||||
EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
|
||||
|
||||
__ bind(&slow);
|
||||
Comment cmnt(masm_, "Lookup slot");
|
||||
__ mov(r1, Operand(var->name()));
|
||||
__ Push(cp, r1); // Context and name.
|
||||
__ CallRuntime(Runtime::kLoadContextSlot, 2);
|
||||
__ bind(&done);
|
||||
|
||||
Apply(context, r0);
|
||||
|
||||
} else if (slot != NULL) {
|
||||
@ -913,14 +1067,11 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
|
||||
if (var->mode() == Variable::CONST) {
|
||||
// Constants may be the hole value if they have not been initialized.
|
||||
// Unhole them.
|
||||
Label done;
|
||||
MemOperand slot_operand = EmitSlotSearch(slot, r0);
|
||||
__ ldr(r0, slot_operand);
|
||||
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
||||
__ cmp(r0, ip);
|
||||
__ b(ne, &done);
|
||||
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
|
||||
__ bind(&done);
|
||||
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
|
||||
Apply(context, r0);
|
||||
} else {
|
||||
Apply(context, slot);
|
||||
@ -1647,15 +1798,41 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
|
||||
} else if (var != NULL && var->slot() != NULL &&
|
||||
var->slot()->type() == Slot::LOOKUP) {
|
||||
// Call to a lookup slot (dynamically introduced variable). Call the
|
||||
// runtime to find the function to call (returned in eax) and the object
|
||||
// holding it (returned in edx).
|
||||
// Call to a lookup slot (dynamically introduced variable).
|
||||
Label slow, done;
|
||||
|
||||
// Generate code for loading from variables potentially shadowed
|
||||
// by eval-introduced variables.
|
||||
EmitDynamicLoadFromSlotFastCase(var->slot(),
|
||||
NOT_INSIDE_TYPEOF,
|
||||
&slow,
|
||||
&done);
|
||||
|
||||
__ bind(&slow);
|
||||
// Call the runtime to find the function to call (returned in eax)
|
||||
// and the object holding it (returned in edx).
|
||||
__ push(context_register());
|
||||
__ mov(r2, Operand(var->name()));
|
||||
__ push(r2);
|
||||
__ CallRuntime(Runtime::kLoadContextSlot, 2);
|
||||
__ push(r0); // Function.
|
||||
__ push(r1); // Receiver.
|
||||
__ Push(r0, r1); // Function, receiver.
|
||||
|
||||
// If fast case code has been generated, emit code to push the
|
||||
// function and receiver and have the slow path jump around this
|
||||
// code.
|
||||
if (done.is_linked()) {
|
||||
Label call;
|
||||
__ b(&call);
|
||||
__ bind(&done);
|
||||
// Push function.
|
||||
__ push(r0);
|
||||
// Push global receiver.
|
||||
__ ldr(r1, CodeGenerator::GlobalObject());
|
||||
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
|
||||
__ push(r1);
|
||||
__ bind(&call);
|
||||
}
|
||||
|
||||
EmitCallWithStub(expr);
|
||||
} else if (fun->AsProperty() != NULL) {
|
||||
// Call to an object property.
|
||||
@ -1678,12 +1855,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
// Push result (function).
|
||||
__ push(r0);
|
||||
// Push Global receiver.
|
||||
__ ldr(r1, CodeGenerator::GlobalObject());
|
||||
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
|
||||
__ push(r1);
|
||||
__ Push(r0, r1); // Function, receiver.
|
||||
EmitCallWithStub(expr);
|
||||
} else {
|
||||
EmitKeyedCallWithIC(expr, prop->key(), RelocInfo::CODE_TARGET);
|
||||
@ -2464,11 +2638,9 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
|
||||
|
||||
Register key = r0;
|
||||
Register cache = r1;
|
||||
__ ldr(cache, CodeGenerator::ContextOperand(cp, Context::GLOBAL_INDEX));
|
||||
__ ldr(cache, ContextOperand(cp, Context::GLOBAL_INDEX));
|
||||
__ ldr(cache, FieldMemOperand(cache, GlobalObject::kGlobalContextOffset));
|
||||
__ ldr(cache,
|
||||
CodeGenerator::ContextOperand(
|
||||
cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
|
||||
__ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
|
||||
__ ldr(cache,
|
||||
FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
|
||||
|
||||
@ -2720,7 +2892,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
|
||||
UnaryOverwriteMode overwrite =
|
||||
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
|
||||
GenericUnaryOpStub stub(Token::SUB, overwrite);
|
||||
GenericUnaryOpStub stub(Token::SUB,
|
||||
overwrite,
|
||||
NO_UNARY_FLAGS);
|
||||
// GenericUnaryOpStub expects the argument to be in the
|
||||
// accumulator register r0.
|
||||
VisitForValue(expr->expression(), kAccumulator);
|
||||
@ -2735,7 +2909,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
// in the accumulator register r0.
|
||||
VisitForValue(expr->expression(), kAccumulator);
|
||||
Label done;
|
||||
if (ShouldInlineSmiCase(expr->op())) {
|
||||
bool inline_smi_code = ShouldInlineSmiCase(expr->op());
|
||||
if (inline_smi_code) {
|
||||
Label call_stub;
|
||||
__ BranchOnNotSmi(r0, &call_stub);
|
||||
__ mvn(r0, Operand(r0));
|
||||
@ -2745,9 +2920,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
__ bind(&call_stub);
|
||||
}
|
||||
bool overwrite = expr->expression()->ResultOverwriteAllowed();
|
||||
UnaryOpFlags flags = inline_smi_code
|
||||
? NO_UNARY_SMI_CODE_IN_STUB
|
||||
: NO_UNARY_FLAGS;
|
||||
UnaryOverwriteMode mode =
|
||||
overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT, mode);
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
|
||||
__ CallStub(&stub);
|
||||
__ bind(&done);
|
||||
Apply(context_, r0);
|
||||
@ -2929,9 +3107,19 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
|
||||
} else if (proxy != NULL &&
|
||||
proxy->var()->slot() != NULL &&
|
||||
proxy->var()->slot()->type() == Slot::LOOKUP) {
|
||||
Label done, slow;
|
||||
|
||||
// Generate code for loading from variables potentially shadowed
|
||||
// by eval-introduced variables.
|
||||
Slot* slot = proxy->var()->slot();
|
||||
EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
|
||||
|
||||
__ bind(&slow);
|
||||
__ mov(r0, Operand(proxy->name()));
|
||||
__ Push(cp, r0);
|
||||
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
|
||||
__ bind(&done);
|
||||
|
||||
if (where == kStack) __ push(r0);
|
||||
} else {
|
||||
// This expression cannot throw a reference error at the top level.
|
||||
@ -3114,7 +3302,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
if (ShouldInlineSmiCase(op)) {
|
||||
bool inline_smi_code = ShouldInlineSmiCase(op);
|
||||
if (inline_smi_code) {
|
||||
Label slow_case;
|
||||
__ orr(r2, r0, Operand(r1));
|
||||
__ BranchOnNotSmi(r2, &slow_case);
|
||||
@ -3122,8 +3311,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
|
||||
Split(cc, if_true, if_false, NULL);
|
||||
__ bind(&slow_case);
|
||||
}
|
||||
|
||||
CompareStub stub(cc, strict, kBothCouldBeNaN, true, r1, r0);
|
||||
CompareFlags flags = inline_smi_code
|
||||
? NO_SMI_COMPARE_IN_STUB
|
||||
: NO_COMPARE_FLAGS;
|
||||
CompareStub stub(cc, strict, flags, r1, r0);
|
||||
__ CallStub(&stub);
|
||||
__ cmp(r0, Operand(0, RelocInfo::NONE));
|
||||
Split(cc, if_true, if_false, fall_through);
|
||||
@ -3187,7 +3378,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
|
||||
|
||||
|
||||
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
|
||||
__ ldr(dst, CodeGenerator::ContextOperand(cp, context_index));
|
||||
__ ldr(dst, ContextOperand(cp, context_index));
|
||||
}
|
||||
|
||||
|
||||
|
9
deps/v8/src/arm/macro-assembler-arm.cc
vendored
9
deps/v8/src/arm/macro-assembler-arm.cc
vendored
@ -1242,15 +1242,6 @@ void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::StubReturn(int argc, Condition cond) {
|
||||
ASSERT(argc >= 1 && generating_stub());
|
||||
if (argc > 1) {
|
||||
add(sp, sp, Operand((argc - 1) * kPointerSize), LeaveCC, cond);
|
||||
}
|
||||
Ret(cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::IllegalOperation(int num_arguments) {
|
||||
if (num_arguments > 0) {
|
||||
add(sp, sp, Operand(num_arguments * kPointerSize));
|
||||
|
3
deps/v8/src/arm/macro-assembler-arm.h
vendored
3
deps/v8/src/arm/macro-assembler-arm.h
vendored
@ -531,9 +531,6 @@ class MacroAssembler: public Assembler {
|
||||
// Call a code stub.
|
||||
void TailCallStub(CodeStub* stub, Condition cond = al);
|
||||
|
||||
// Return from a code stub after popping its arguments.
|
||||
void StubReturn(int argc, Condition cond = al);
|
||||
|
||||
// Call a runtime routine.
|
||||
void CallRuntime(Runtime::Function* f, int num_arguments);
|
||||
|
||||
|
229
deps/v8/src/arm/stub-cache-arm.cc
vendored
229
deps/v8/src/arm/stub-cache-arm.cc
vendored
@ -1220,6 +1220,62 @@ void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
|
||||
}
|
||||
|
||||
|
||||
void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
|
||||
JSObject* holder,
|
||||
String* name,
|
||||
Label* miss) {
|
||||
ASSERT(holder->IsGlobalObject());
|
||||
|
||||
// Get the number of arguments.
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
|
||||
|
||||
// If the object is the holder then we know that it's a global
|
||||
// object which can only happen for contextual calls. In this case,
|
||||
// the receiver cannot be a smi.
|
||||
if (object != holder) {
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ b(eq, miss);
|
||||
}
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
|
||||
}
|
||||
|
||||
|
||||
void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
Label* miss) {
|
||||
// Get the value from the cell.
|
||||
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
|
||||
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
// Check that the cell contains the same function.
|
||||
if (Heap::InNewSpace(function)) {
|
||||
// We can't embed a pointer to a function in new space so we have
|
||||
// to verify that the shared function info is unchanged. This has
|
||||
// the nice side effect that multiple closures based on the same
|
||||
// function can all use this call IC. Before we load through the
|
||||
// function, we have to verify that it still is a function.
|
||||
__ tst(r1, Operand(kSmiTagMask));
|
||||
__ b(eq, miss);
|
||||
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
|
||||
__ b(ne, miss);
|
||||
|
||||
// Check the shared function info. Make sure it hasn't changed.
|
||||
__ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
|
||||
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ cmp(r4, r3);
|
||||
__ b(ne, miss);
|
||||
} else {
|
||||
__ cmp(r1, Operand(Handle<JSFunction>(function)));
|
||||
__ b(ne, miss);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::GenerateMissBranch() {
|
||||
Object* obj = StubCache::ComputeCallMiss(arguments().immediate(), kind_);
|
||||
if (obj->IsFailure()) return obj;
|
||||
@ -1266,21 +1322,18 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
|
||||
|
||||
Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r2 : name
|
||||
// -- lr : return address
|
||||
// -----------------------------------
|
||||
|
||||
// If object is not an array, bail out to regular call.
|
||||
if (!object->IsJSArray()) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
|
||||
// TODO(639): faster implementation.
|
||||
ASSERT(check == RECEIVER_MAP_CHECK);
|
||||
|
||||
// If object is not an array, bail out to regular call.
|
||||
if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
Label miss;
|
||||
|
||||
@ -1313,21 +1366,18 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
|
||||
Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r2 : name
|
||||
// -- lr : return address
|
||||
// -----------------------------------
|
||||
|
||||
// If object is not an array, bail out to regular call.
|
||||
if (!object->IsJSArray()) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
|
||||
// TODO(642): faster implementation.
|
||||
ASSERT(check == RECEIVER_MAP_CHECK);
|
||||
|
||||
// If object is not an array, bail out to regular call.
|
||||
if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
Label miss;
|
||||
|
||||
@ -1358,11 +1408,12 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
Object* CallStubCompiler::CompileStringCharCodeAtCall(
|
||||
Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r2 : function name
|
||||
// -- lr : return address
|
||||
@ -1372,7 +1423,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
// -----------------------------------
|
||||
|
||||
// If object is not a string, bail out to regular call.
|
||||
if (!object->IsString()) return Heap::undefined_value();
|
||||
if (!object->IsString() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
@ -1430,9 +1481,9 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
|
||||
Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r2 : function name
|
||||
// -- lr : return address
|
||||
@ -1442,7 +1493,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
// -----------------------------------
|
||||
|
||||
// If object is not a string, bail out to regular call.
|
||||
if (!object->IsString()) return Heap::undefined_value();
|
||||
if (!object->IsString() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
@ -1501,6 +1552,80 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileStringFromCharCodeCall(
|
||||
Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r2 : function name
|
||||
// -- lr : return address
|
||||
// -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
|
||||
// -- ...
|
||||
// -- sp[argc * 4] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
// If the object is not a JSObject or we got an unexpected number of
|
||||
// arguments, bail out to the regular call.
|
||||
if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
|
||||
|
||||
Label miss;
|
||||
GenerateNameCheck(name, &miss);
|
||||
|
||||
if (cell == NULL) {
|
||||
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
||||
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ tst(r1, Operand(kSmiTagMask));
|
||||
__ b(eq, &miss);
|
||||
|
||||
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
|
||||
&miss);
|
||||
} else {
|
||||
ASSERT(cell->value() == function);
|
||||
GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
|
||||
GenerateLoadFunctionFromCell(cell, function, &miss);
|
||||
}
|
||||
|
||||
// Load the char code argument.
|
||||
Register code = r1;
|
||||
__ ldr(code, MemOperand(sp, 0 * kPointerSize));
|
||||
|
||||
// Check the code is a smi.
|
||||
Label slow;
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ tst(code, Operand(kSmiTagMask));
|
||||
__ b(ne, &slow);
|
||||
|
||||
// Convert the smi code to uint16.
|
||||
__ and_(code, code, Operand(Smi::FromInt(0xffff)));
|
||||
|
||||
StringCharFromCodeGenerator char_from_code_generator(code, r0);
|
||||
char_from_code_generator.GenerateFast(masm());
|
||||
__ Drop(argc + 1);
|
||||
__ Ret();
|
||||
|
||||
ICRuntimeCallHelper call_helper;
|
||||
char_from_code_generator.GenerateSlow(masm(), call_helper);
|
||||
|
||||
// Tail call the full function. We do not have to patch the receiver
|
||||
// because the function makes no use of it.
|
||||
__ bind(&slow);
|
||||
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
|
||||
|
||||
__ bind(&miss);
|
||||
// r2: function name.
|
||||
Object* obj = GenerateMissBranch();
|
||||
if (obj->IsFailure()) return obj;
|
||||
|
||||
// Return the generated code.
|
||||
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileCallConstant(Object* object,
|
||||
JSObject* holder,
|
||||
JSFunction* function,
|
||||
@ -1513,8 +1638,8 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
||||
SharedFunctionInfo* function_info = function->shared();
|
||||
if (function_info->HasCustomCallGenerator()) {
|
||||
const int id = function_info->custom_call_generator_id();
|
||||
Object* result =
|
||||
CompileCustomCall(id, object, holder, function, name, check);
|
||||
Object* result = CompileCustomCall(
|
||||
id, object, holder, NULL, function, name);
|
||||
// undefined means bail out to regular compiler.
|
||||
if (!result->IsUndefined()) {
|
||||
return result;
|
||||
@ -1714,6 +1839,16 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
// -- r2 : name
|
||||
// -- lr : return address
|
||||
// -----------------------------------
|
||||
|
||||
SharedFunctionInfo* function_info = function->shared();
|
||||
if (function_info->HasCustomCallGenerator()) {
|
||||
const int id = function_info->custom_call_generator_id();
|
||||
Object* result = CompileCustomCall(
|
||||
id, object, holder, cell, function, name);
|
||||
// undefined means bail out to regular compiler.
|
||||
if (!result->IsUndefined()) return result;
|
||||
}
|
||||
|
||||
Label miss;
|
||||
|
||||
GenerateNameCheck(name, &miss);
|
||||
@ -1721,45 +1856,9 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
// Get the number of arguments.
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
|
||||
GenerateGlobalReceiverCheck(object, holder, name, &miss);
|
||||
|
||||
// If the object is the holder then we know that it's a global
|
||||
// object which can only happen for contextual calls. In this case,
|
||||
// the receiver cannot be a smi.
|
||||
if (object != holder) {
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ b(eq, &miss);
|
||||
}
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
CheckPrototypes(object, r0, holder, r3, r1, r4, name, &miss);
|
||||
|
||||
// Get the value from the cell.
|
||||
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
|
||||
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
// Check that the cell contains the same function.
|
||||
if (Heap::InNewSpace(function)) {
|
||||
// We can't embed a pointer to a function in new space so we have
|
||||
// to verify that the shared function info is unchanged. This has
|
||||
// the nice side effect that multiple closures based on the same
|
||||
// function can all use this call IC. Before we load through the
|
||||
// function, we have to verify that it still is a function.
|
||||
__ tst(r1, Operand(kSmiTagMask));
|
||||
__ b(eq, &miss);
|
||||
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
|
||||
__ b(ne, &miss);
|
||||
|
||||
// Check the shared function info. Make sure it hasn't changed.
|
||||
__ mov(r3, Operand(Handle<SharedFunctionInfo>(function->shared())));
|
||||
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ cmp(r4, r3);
|
||||
__ b(ne, &miss);
|
||||
} else {
|
||||
__ cmp(r1, Operand(Handle<JSFunction>(function)));
|
||||
__ b(ne, &miss);
|
||||
}
|
||||
GenerateLoadFunctionFromCell(cell, function, &miss);
|
||||
|
||||
// Patch the receiver on the stack with the global proxy if
|
||||
// necessary.
|
||||
|
63
deps/v8/src/array.js
vendored
63
deps/v8/src/array.js
vendored
@ -957,14 +957,41 @@ function ArrayIndexOf(element, index) {
|
||||
// If index is still negative, search the entire array.
|
||||
if (index < 0) index = 0;
|
||||
}
|
||||
var min = index;
|
||||
var max = length;
|
||||
if (UseSparseVariant(this, length, true)) {
|
||||
var intervals = %GetArrayKeys(this, length);
|
||||
if (intervals.length == 2 && intervals[0] < 0) {
|
||||
// A single interval.
|
||||
var intervalMin = -(intervals[0] + 1);
|
||||
var intervalMax = intervalMin + intervals[1];
|
||||
min = MAX(min, intervalMin);
|
||||
max = intervalMax; // Capped by length already.
|
||||
// Fall through to loop below.
|
||||
} else {
|
||||
if (intervals.length == 0) return -1;
|
||||
// Get all the keys in sorted order.
|
||||
var sortedKeys = GetSortedArrayKeys(this, intervals);
|
||||
var n = sortedKeys.length;
|
||||
var i = 0;
|
||||
while (i < n && sortedKeys[i] < index) i++;
|
||||
while (i < n) {
|
||||
var key = sortedKeys[i];
|
||||
if (!IS_UNDEFINED(key) && this[key] === element) return key;
|
||||
i++;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
// Lookup through the array.
|
||||
if (!IS_UNDEFINED(element)) {
|
||||
for (var i = index; i < length; i++) {
|
||||
for (var i = min; i < max; i++) {
|
||||
if (this[i] === element) return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
for (var i = index; i < length; i++) {
|
||||
// Lookup through the array.
|
||||
for (var i = min; i < max; i++) {
|
||||
if (IS_UNDEFINED(this[i]) && i in this) {
|
||||
return i;
|
||||
}
|
||||
@ -981,19 +1008,43 @@ function ArrayLastIndexOf(element, index) {
|
||||
} else {
|
||||
index = TO_INTEGER(index);
|
||||
// If index is negative, index from end of the array.
|
||||
if (index < 0) index = length + index;
|
||||
if (index < 0) index += length;
|
||||
// If index is still negative, do not search the array.
|
||||
if (index < 0) index = -1;
|
||||
if (index < 0) return -1;
|
||||
else if (index >= length) index = length - 1;
|
||||
}
|
||||
var min = 0;
|
||||
var max = index;
|
||||
if (UseSparseVariant(this, length, true)) {
|
||||
var intervals = %GetArrayKeys(this, index + 1);
|
||||
if (intervals.length == 2 && intervals[0] < 0) {
|
||||
// A single interval.
|
||||
var intervalMin = -(intervals[0] + 1);
|
||||
var intervalMax = intervalMin + intervals[1];
|
||||
min = MAX(min, intervalMin);
|
||||
max = intervalMax; // Capped by index already.
|
||||
// Fall through to loop below.
|
||||
} else {
|
||||
if (intervals.length == 0) return -1;
|
||||
// Get all the keys in sorted order.
|
||||
var sortedKeys = GetSortedArrayKeys(this, intervals);
|
||||
var i = sortedKeys.length - 1;
|
||||
while (i >= 0) {
|
||||
var key = sortedKeys[i];
|
||||
if (!IS_UNDEFINED(key) && this[key] === element) return key;
|
||||
i--;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
// Lookup through the array.
|
||||
if (!IS_UNDEFINED(element)) {
|
||||
for (var i = index; i >= 0; i--) {
|
||||
for (var i = max; i >= min; i--) {
|
||||
if (this[i] === element) return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
for (var i = index; i >= 0; i--) {
|
||||
for (var i = max; i >= min; i--) {
|
||||
if (IS_UNDEFINED(this[i]) && i in this) {
|
||||
return i;
|
||||
}
|
||||
|
24
deps/v8/src/bootstrapper.cc
vendored
24
deps/v8/src/bootstrapper.cc
vendored
@ -1344,23 +1344,33 @@ bool Genesis::InstallNatives() {
|
||||
}
|
||||
|
||||
|
||||
static void InstallCustomCallGenerator(Handle<JSFunction> holder_function,
|
||||
const char* function_name,
|
||||
int id) {
|
||||
Handle<JSObject> proto(JSObject::cast(holder_function->instance_prototype()));
|
||||
static void InstallCustomCallGenerator(
|
||||
Handle<JSFunction> holder_function,
|
||||
CallStubCompiler::CustomGeneratorOwner owner_flag,
|
||||
const char* function_name,
|
||||
int id) {
|
||||
Handle<JSObject> owner;
|
||||
if (owner_flag == CallStubCompiler::FUNCTION) {
|
||||
owner = Handle<JSObject>::cast(holder_function);
|
||||
} else {
|
||||
ASSERT(owner_flag == CallStubCompiler::INSTANCE_PROTOTYPE);
|
||||
owner = Handle<JSObject>(
|
||||
JSObject::cast(holder_function->instance_prototype()));
|
||||
}
|
||||
Handle<String> name = Factory::LookupAsciiSymbol(function_name);
|
||||
Handle<JSFunction> function(JSFunction::cast(proto->GetProperty(*name)));
|
||||
Handle<JSFunction> function(JSFunction::cast(owner->GetProperty(*name)));
|
||||
function->shared()->set_function_data(Smi::FromInt(id));
|
||||
}
|
||||
|
||||
|
||||
void Genesis::InstallCustomCallGenerators() {
|
||||
HandleScope scope;
|
||||
#define INSTALL_CALL_GENERATOR(holder_fun, fun_name, name) \
|
||||
#define INSTALL_CALL_GENERATOR(holder_fun, owner_flag, fun_name, name) \
|
||||
{ \
|
||||
Handle<JSFunction> holder(global_context()->holder_fun##_function()); \
|
||||
const int id = CallStubCompiler::k##name##CallGenerator; \
|
||||
InstallCustomCallGenerator(holder, #fun_name, id); \
|
||||
InstallCustomCallGenerator(holder, CallStubCompiler::owner_flag, \
|
||||
#fun_name, id); \
|
||||
}
|
||||
CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
|
||||
#undef INSTALL_CALL_GENERATOR
|
||||
|
73
deps/v8/src/code-stubs.h
vendored
73
deps/v8/src/code-stubs.h
vendored
@ -340,27 +340,40 @@ enum NegativeZeroHandling {
|
||||
};
|
||||
|
||||
|
||||
enum UnaryOpFlags {
|
||||
NO_UNARY_FLAGS = 0,
|
||||
NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
|
||||
};
|
||||
|
||||
|
||||
class GenericUnaryOpStub : public CodeStub {
|
||||
public:
|
||||
GenericUnaryOpStub(Token::Value op,
|
||||
UnaryOverwriteMode overwrite,
|
||||
UnaryOpFlags flags,
|
||||
NegativeZeroHandling negative_zero = kStrictNegativeZero)
|
||||
: op_(op), overwrite_(overwrite), negative_zero_(negative_zero) { }
|
||||
: op_(op),
|
||||
overwrite_(overwrite),
|
||||
include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
|
||||
negative_zero_(negative_zero) { }
|
||||
|
||||
private:
|
||||
Token::Value op_;
|
||||
UnaryOverwriteMode overwrite_;
|
||||
bool include_smi_code_;
|
||||
NegativeZeroHandling negative_zero_;
|
||||
|
||||
class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
|
||||
class NegativeZeroField: public BitField<NegativeZeroHandling, 1, 1> {};
|
||||
class OpField: public BitField<Token::Value, 2, kMinorBits - 2> {};
|
||||
class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
|
||||
class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
|
||||
class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
|
||||
|
||||
Major MajorKey() { return GenericUnaryOp; }
|
||||
int MinorKey() {
|
||||
return OpField::encode(op_) |
|
||||
OverwriteField::encode(overwrite_) |
|
||||
NegativeZeroField::encode(negative_zero_);
|
||||
OverwriteField::encode(overwrite_) |
|
||||
IncludeSmiCodeField::encode(include_smi_code_) |
|
||||
NegativeZeroField::encode(negative_zero_);
|
||||
}
|
||||
|
||||
void Generate(MacroAssembler* masm);
|
||||
@ -375,22 +388,43 @@ enum NaNInformation {
|
||||
};
|
||||
|
||||
|
||||
// Flags that control the compare stub code generation.
|
||||
enum CompareFlags {
|
||||
NO_COMPARE_FLAGS = 0,
|
||||
NO_SMI_COMPARE_IN_STUB = 1 << 0,
|
||||
NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
|
||||
CANT_BOTH_BE_NAN = 1 << 2
|
||||
};
|
||||
|
||||
|
||||
class CompareStub: public CodeStub {
|
||||
public:
|
||||
CompareStub(Condition cc,
|
||||
bool strict,
|
||||
NaNInformation nan_info = kBothCouldBeNaN,
|
||||
bool include_number_compare = true,
|
||||
Register lhs = no_reg,
|
||||
Register rhs = no_reg) :
|
||||
CompareFlags flags,
|
||||
Register lhs,
|
||||
Register rhs) :
|
||||
cc_(cc),
|
||||
strict_(strict),
|
||||
never_nan_nan_(nan_info == kCantBothBeNaN),
|
||||
include_number_compare_(include_number_compare),
|
||||
never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
|
||||
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
|
||||
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
|
||||
lhs_(lhs),
|
||||
rhs_(rhs),
|
||||
name_(NULL) { }
|
||||
|
||||
CompareStub(Condition cc,
|
||||
bool strict,
|
||||
CompareFlags flags) :
|
||||
cc_(cc),
|
||||
strict_(strict),
|
||||
never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
|
||||
include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
|
||||
include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
|
||||
lhs_(no_reg),
|
||||
rhs_(no_reg),
|
||||
name_(NULL) { }
|
||||
|
||||
void Generate(MacroAssembler* masm);
|
||||
|
||||
private:
|
||||
@ -406,6 +440,10 @@ class CompareStub: public CodeStub {
|
||||
// comparison code is used when the number comparison has been inlined, and
|
||||
// the stub will be called if one of the operands is not a number.
|
||||
bool include_number_compare_;
|
||||
|
||||
// Generate the comparison code for two smi operands in the stub.
|
||||
bool include_smi_compare_;
|
||||
|
||||
// Register holding the left hand side of the comparison if the stub gives
|
||||
// a choice, no_reg otherwise.
|
||||
Register lhs_;
|
||||
@ -413,12 +451,13 @@ class CompareStub: public CodeStub {
|
||||
// a choice, no_reg otherwise.
|
||||
Register rhs_;
|
||||
|
||||
// Encoding of the minor key CCCCCCCCCCCCRCNS.
|
||||
// Encoding of the minor key in 16 bits.
|
||||
class StrictField: public BitField<bool, 0, 1> {};
|
||||
class NeverNanNanField: public BitField<bool, 1, 1> {};
|
||||
class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
|
||||
class RegisterField: public BitField<bool, 3, 1> {};
|
||||
class ConditionField: public BitField<int, 4, 12> {};
|
||||
class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
|
||||
class RegisterField: public BitField<bool, 4, 1> {};
|
||||
class ConditionField: public BitField<int, 5, 11> {};
|
||||
|
||||
Major MajorKey() { return Compare; }
|
||||
|
||||
@ -436,11 +475,13 @@ class CompareStub: public CodeStub {
|
||||
const char* GetName();
|
||||
#ifdef DEBUG
|
||||
void Print() {
|
||||
PrintF("CompareStub (cc %d), (strict %s), "
|
||||
"(never_nan_nan %s), (number_compare %s) ",
|
||||
PrintF("CompareStub (minor %d) (cc %d), (strict %s), "
|
||||
"(never_nan_nan %s), (smi_compare %s) (number_compare %s) ",
|
||||
MinorKey(),
|
||||
static_cast<int>(cc_),
|
||||
strict_ ? "true" : "false",
|
||||
never_nan_nan_ ? "true" : "false",
|
||||
include_smi_compare_ ? "inluded" : "not included",
|
||||
include_number_compare_ ? "included" : "not included");
|
||||
|
||||
if (!lhs_.is(no_reg) && !rhs_.is(no_reg)) {
|
||||
|
53
deps/v8/src/codegen.cc
vendored
53
deps/v8/src/codegen.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -344,40 +344,35 @@ void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
|
||||
}
|
||||
|
||||
|
||||
// List of special runtime calls which are generated inline. For some of these
|
||||
// functions the code will be generated inline, and for others a call to a code
|
||||
// stub will be inlined.
|
||||
// Lookup table for code generators for special runtime calls which are
|
||||
// generated inline.
|
||||
#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
|
||||
&CodeGenerator::Generate##Name,
|
||||
|
||||
#define INLINE_RUNTIME_ENTRY(Name, argc, ressize) \
|
||||
{&CodeGenerator::Generate##Name, "_" #Name, argc}, \
|
||||
|
||||
CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
|
||||
INLINE_RUNTIME_FUNCTION_LIST(INLINE_RUNTIME_ENTRY)
|
||||
const CodeGenerator::InlineFunctionGenerator
|
||||
CodeGenerator::kInlineFunctionGenerators[] = {
|
||||
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
|
||||
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
|
||||
};
|
||||
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
|
||||
|
||||
#undef INLINE_RUNTIME_ENTRY
|
||||
|
||||
CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
|
||||
Handle<String> name) {
|
||||
const int entries_count =
|
||||
sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
|
||||
for (int i = 0; i < entries_count; i++) {
|
||||
InlineRuntimeLUT* entry = &kInlineRuntimeLUT[i];
|
||||
if (name->IsEqualTo(CStrVector(entry->name))) {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
CodeGenerator::InlineFunctionGenerator
|
||||
CodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
|
||||
return kInlineFunctionGenerators[
|
||||
static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction)];
|
||||
}
|
||||
|
||||
|
||||
bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
|
||||
ZoneList<Expression*>* args = node->arguments();
|
||||
Handle<String> name = node->name();
|
||||
if (name->length() > 0 && name->Get(0) == '_') {
|
||||
InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
|
||||
if (entry != NULL) {
|
||||
((*this).*(entry->method))(args);
|
||||
Runtime::Function* function = node->function();
|
||||
if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
|
||||
InlineFunctionGenerator generator =
|
||||
FindInlineFunctionGenerator(function->function_id);
|
||||
if (generator != NULL) {
|
||||
((*this).*(generator))(args);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -385,14 +380,6 @@ bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
|
||||
}
|
||||
|
||||
|
||||
int CodeGenerator::InlineRuntimeCallArgumentsCount(Handle<String> name) {
|
||||
CodeGenerator::InlineRuntimeLUT* f =
|
||||
CodeGenerator::FindInlineRuntimeLUT(name);
|
||||
if (f != NULL) return f->nargs;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
|
||||
// known result for the test expression, with no side effects.
|
||||
CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
|
||||
|
44
deps/v8/src/codegen.h
vendored
44
deps/v8/src/codegen.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2006-2008 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -71,48 +71,6 @@
|
||||
// CodeForDoWhileConditionPosition
|
||||
// CodeForSourcePosition
|
||||
|
||||
|
||||
#define INLINE_RUNTIME_FUNCTION_LIST(F) \
|
||||
F(IsSmi, 1, 1) \
|
||||
F(IsNonNegativeSmi, 1, 1) \
|
||||
F(IsArray, 1, 1) \
|
||||
F(IsRegExp, 1, 1) \
|
||||
F(CallFunction, -1 /* receiver + n args + function */, 1) \
|
||||
F(IsConstructCall, 0, 1) \
|
||||
F(ArgumentsLength, 0, 1) \
|
||||
F(Arguments, 1, 1) \
|
||||
F(ClassOf, 1, 1) \
|
||||
F(ValueOf, 1, 1) \
|
||||
F(SetValueOf, 2, 1) \
|
||||
F(StringCharCodeAt, 2, 1) \
|
||||
F(StringCharFromCode, 1, 1) \
|
||||
F(StringCharAt, 2, 1) \
|
||||
F(ObjectEquals, 2, 1) \
|
||||
F(Log, 3, 1) \
|
||||
F(RandomHeapNumber, 0, 1) \
|
||||
F(IsObject, 1, 1) \
|
||||
F(IsFunction, 1, 1) \
|
||||
F(IsUndetectableObject, 1, 1) \
|
||||
F(IsSpecObject, 1, 1) \
|
||||
F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
|
||||
F(StringAdd, 2, 1) \
|
||||
F(SubString, 3, 1) \
|
||||
F(StringCompare, 2, 1) \
|
||||
F(RegExpExec, 4, 1) \
|
||||
F(RegExpConstructResult, 3, 1) \
|
||||
F(RegExpCloneResult, 1, 1) \
|
||||
F(GetFromCache, 2, 1) \
|
||||
F(NumberToString, 1, 1) \
|
||||
F(SwapElements, 3, 1) \
|
||||
F(MathPow, 2, 1) \
|
||||
F(MathSin, 1, 1) \
|
||||
F(MathCos, 1, 1) \
|
||||
F(MathSqrt, 1, 1) \
|
||||
F(IsRegExpEquivalent, 2, 1) \
|
||||
F(HasCachedArrayIndex, 1, 1) \
|
||||
F(GetCachedArrayIndex, 1, 1)
|
||||
|
||||
|
||||
#if V8_TARGET_ARCH_IA32
|
||||
#include "ia32/codegen-ia32.h"
|
||||
#elif V8_TARGET_ARCH_X64
|
||||
|
15
deps/v8/src/compiler.cc
vendored
15
deps/v8/src/compiler.cc
vendored
@ -269,10 +269,19 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
|
||||
}
|
||||
|
||||
if (result.is_null()) {
|
||||
// No cache entry found. Do pre-parsing and compile the script.
|
||||
// No cache entry found. Do pre-parsing, if it makes sense, and compile
|
||||
// the script.
|
||||
// Building preparse data that is only used immediately after is only a
|
||||
// saving if we might skip building the AST for lazily compiled functions.
|
||||
// I.e., preparse data isn't relevant when the lazy flag is off, and
|
||||
// for small sources, odds are that there aren't many functions
|
||||
// that would be compiled lazily anyway, so we skip the preparse step
|
||||
// in that case too.
|
||||
ScriptDataImpl* pre_data = input_pre_data;
|
||||
if (pre_data == NULL && source_length >= FLAG_min_preparse_length) {
|
||||
pre_data = PreParse(source, NULL, extension);
|
||||
if (pre_data == NULL
|
||||
&& FLAG_lazy
|
||||
&& source_length >= FLAG_min_preparse_length) {
|
||||
pre_data = PartialPreParse(source, NULL, extension);
|
||||
}
|
||||
|
||||
// Create a script object describing the script to be compiled.
|
||||
|
6
deps/v8/src/dateparser-inl.h
vendored
6
deps/v8/src/dateparser-inl.h
vendored
@ -65,8 +65,10 @@ bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
|
||||
tz.SetAbsoluteMinute(n);
|
||||
} else if (time.IsExpecting(n)) {
|
||||
time.AddFinal(n);
|
||||
// Require end, white space or Z immediately after finalizing time.
|
||||
if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z')) return false;
|
||||
// Require end, white space, "Z", "+" or "-" immediately after
|
||||
// finalizing time.
|
||||
if (!in.IsEnd() && !in.SkipWhiteSpace() && !in.Is('Z') &&
|
||||
!in.IsAsciiSign()) return false;
|
||||
} else {
|
||||
if (!day.Add(n)) return false;
|
||||
in.Skip('-'); // Ignore suffix '-' for year, month, or day.
|
||||
|
4
deps/v8/src/flag-definitions.h
vendored
4
deps/v8/src/flag-definitions.h
vendored
@ -174,6 +174,10 @@ DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
|
||||
DEFINE_int(max_stack_trace_source_length, 300,
|
||||
"maximum length of function source code printed in a stack trace.")
|
||||
|
||||
// full-codegen.cc
|
||||
DEFINE_bool(always_inline_smi_code, false,
|
||||
"always inline smi code in non-opt code")
|
||||
|
||||
// heap.cc
|
||||
DEFINE_int(max_new_space_size, 0, "max size of the new generation")
|
||||
DEFINE_int(max_old_space_size, 0, "max size of the old generation")
|
||||
|
55
deps/v8/src/full-codegen.cc
vendored
55
deps/v8/src/full-codegen.cc
vendored
@ -298,6 +298,11 @@ Handle<Code> FullCodeGenerator::MakeCode(CompilationInfo* info) {
|
||||
}
|
||||
|
||||
|
||||
MemOperand FullCodeGenerator::ContextOperand(Register context, int index) {
|
||||
return CodeGenerator::ContextOperand(context, index);
|
||||
}
|
||||
|
||||
|
||||
int FullCodeGenerator::SlotOffset(Slot* slot) {
|
||||
ASSERT(slot != NULL);
|
||||
// Offset is negative because higher indexes are at lower addresses.
|
||||
@ -319,15 +324,11 @@ int FullCodeGenerator::SlotOffset(Slot* slot) {
|
||||
|
||||
|
||||
bool FullCodeGenerator::ShouldInlineSmiCase(Token::Value op) {
|
||||
// TODO(kasperl): Once the compare stub allows leaving out the
|
||||
// inlined smi case, we should get rid of this check.
|
||||
if (Token::IsCompareOp(op)) return true;
|
||||
// TODO(kasperl): Once the unary bit not stub allows leaving out
|
||||
// the inlined smi case, we should get rid of this check.
|
||||
if (op == Token::BIT_NOT) return true;
|
||||
// Inline smi case inside loops, but not division and modulo which
|
||||
// are too complicated and take up too much space.
|
||||
return (op != Token::DIV) && (op != Token::MOD) && (loop_depth_ > 0);
|
||||
if (op == Token::DIV ||op == Token::MOD) return false;
|
||||
if (FLAG_always_inline_smi_code) return true;
|
||||
return loop_depth_ > 0;
|
||||
}
|
||||
|
||||
|
||||
@ -500,18 +501,36 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
|
||||
Handle<String> name = expr->name();
|
||||
SmartPointer<char> cstring = name->ToCString();
|
||||
// Lookup table for code generators for special runtime calls which are
|
||||
// generated inline.
|
||||
#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
|
||||
&FullCodeGenerator::Emit##Name,
|
||||
|
||||
#define CHECK_EMIT_INLINE_CALL(name, x, y) \
|
||||
if (strcmp("_"#name, *cstring) == 0) { \
|
||||
Emit##name(expr->arguments()); \
|
||||
return; \
|
||||
}
|
||||
INLINE_RUNTIME_FUNCTION_LIST(CHECK_EMIT_INLINE_CALL)
|
||||
#undef CHECK_EMIT_INLINE_CALL
|
||||
UNREACHABLE();
|
||||
const FullCodeGenerator::InlineFunctionGenerator
|
||||
FullCodeGenerator::kInlineFunctionGenerators[] = {
|
||||
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
|
||||
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
|
||||
};
|
||||
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
|
||||
|
||||
|
||||
FullCodeGenerator::InlineFunctionGenerator
|
||||
FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
|
||||
return kInlineFunctionGenerators[
|
||||
static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction)];
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* node) {
|
||||
ZoneList<Expression*>* args = node->arguments();
|
||||
Handle<String> name = node->name();
|
||||
Runtime::Function* function = node->function();
|
||||
ASSERT(function != NULL);
|
||||
ASSERT(function->intrinsic_type == Runtime::INLINE);
|
||||
InlineFunctionGenerator generator =
|
||||
FindInlineFunctionGenerator(function->function_id);
|
||||
ASSERT(generator != NULL);
|
||||
((*this).*(generator))(args);
|
||||
}
|
||||
|
||||
|
||||
|
20
deps/v8/src/full-codegen.h
vendored
20
deps/v8/src/full-codegen.h
vendored
@ -243,6 +243,12 @@ class FullCodeGenerator: public AstVisitor {
|
||||
kRightConstant
|
||||
};
|
||||
|
||||
// Type of a member function that generates inline code for a native function.
|
||||
typedef void (FullCodeGenerator::*InlineFunctionGenerator)
|
||||
(ZoneList<Expression*>*);
|
||||
|
||||
static const InlineFunctionGenerator kInlineFunctionGenerators[];
|
||||
|
||||
// Compute the frame pointer relative offset for a given local or
|
||||
// parameter slot.
|
||||
int SlotOffset(Slot* slot);
|
||||
@ -373,14 +379,25 @@ class FullCodeGenerator: public AstVisitor {
|
||||
void EmitKeyedCallWithIC(Call* expr, Expression* key, RelocInfo::Mode mode);
|
||||
|
||||
// Platform-specific code for inline runtime calls.
|
||||
InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
|
||||
|
||||
void EmitInlineRuntimeCall(CallRuntime* expr);
|
||||
|
||||
#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
|
||||
void Emit##name(ZoneList<Expression*>* arguments);
|
||||
INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
|
||||
INLINE_RUNTIME_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
|
||||
#undef EMIT_INLINE_RUNTIME_CALL
|
||||
|
||||
// Platform-specific code for loading variables.
|
||||
void EmitLoadGlobalSlotCheckExtensions(Slot* slot,
|
||||
TypeofState typeof_state,
|
||||
Label* slow);
|
||||
MemOperand ContextSlotOperandCheckExtensions(Slot* slot, Label* slow);
|
||||
void EmitDynamicLoadFromSlotFastCase(Slot* slot,
|
||||
TypeofState typeof_state,
|
||||
Label* slow,
|
||||
Label* done);
|
||||
void EmitVariableLoad(Variable* expr, Expression::Context context);
|
||||
|
||||
// Platform-specific support for allocating a new closure based on
|
||||
@ -500,6 +517,9 @@ class FullCodeGenerator: public AstVisitor {
|
||||
// in v8::internal::Context.
|
||||
void LoadContextField(Register dst, int context_index);
|
||||
|
||||
// Create an operand for a context field.
|
||||
MemOperand ContextOperand(Register context, int context_index);
|
||||
|
||||
// AST node visit functions.
|
||||
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
|
||||
AST_NODE_LIST(DECLARE_VISIT)
|
||||
|
5
deps/v8/src/global-handles.cc
vendored
5
deps/v8/src/global-handles.cc
vendored
@ -372,13 +372,14 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
|
||||
|
||||
int post_gc_processing_count = 0;
|
||||
|
||||
void GlobalHandles::PostGarbageCollectionProcessing() {
|
||||
bool GlobalHandles::PostGarbageCollectionProcessing() {
|
||||
// Process weak global handle callbacks. This must be done after the
|
||||
// GC is completely done, because the callbacks may invoke arbitrary
|
||||
// API functions.
|
||||
// At the same time deallocate all DESTROYED nodes.
|
||||
ASSERT(Heap::gc_state() == Heap::NOT_IN_GC);
|
||||
const int initial_post_gc_processing_count = ++post_gc_processing_count;
|
||||
bool weak_callback_invoked = false;
|
||||
Node** p = &head_;
|
||||
while (*p != NULL) {
|
||||
if ((*p)->PostGarbageCollectionProcessing()) {
|
||||
@ -389,6 +390,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
|
||||
// restart the processing).
|
||||
break;
|
||||
}
|
||||
weak_callback_invoked = true;
|
||||
}
|
||||
if ((*p)->state_ == Node::DESTROYED) {
|
||||
// Delete the link.
|
||||
@ -407,6 +409,7 @@ void GlobalHandles::PostGarbageCollectionProcessing() {
|
||||
if (first_deallocated()) {
|
||||
first_deallocated()->set_next(head());
|
||||
}
|
||||
return weak_callback_invoked;
|
||||
}
|
||||
|
||||
|
||||
|
5
deps/v8/src/global-handles.h
vendored
5
deps/v8/src/global-handles.h
vendored
@ -95,8 +95,9 @@ class GlobalHandles : public AllStatic {
|
||||
// Tells whether global handle is weak.
|
||||
static bool IsWeak(Object** location);
|
||||
|
||||
// Process pending weak handles.
|
||||
static void PostGarbageCollectionProcessing();
|
||||
// Process pending weak handles. Returns true if any weak handle
|
||||
// callback has been invoked.
|
||||
static bool PostGarbageCollectionProcessing();
|
||||
|
||||
// Iterates over all strong handles.
|
||||
static void IterateStrongRoots(ObjectVisitor* v);
|
||||
|
1
deps/v8/src/handles.cc
vendored
1
deps/v8/src/handles.cc
vendored
@ -31,7 +31,6 @@
|
||||
#include "api.h"
|
||||
#include "arguments.h"
|
||||
#include "bootstrapper.h"
|
||||
#include "codegen.h"
|
||||
#include "compiler.h"
|
||||
#include "debug.h"
|
||||
#include "execution.h"
|
||||
|
12
deps/v8/src/heap-inl.h
vendored
12
deps/v8/src/heap-inl.h
vendored
@ -35,6 +35,16 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
void Heap::UpdateOldSpaceLimits() {
|
||||
int old_gen_size = PromotedSpaceSize();
|
||||
old_gen_promotion_limit_ =
|
||||
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
|
||||
old_gen_allocation_limit_ =
|
||||
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
|
||||
old_gen_exhausted_ = false;
|
||||
}
|
||||
|
||||
|
||||
int Heap::MaxObjectSizeInPagedSpace() {
|
||||
return Page::kMaxHeapObjectSize;
|
||||
}
|
||||
@ -403,7 +413,7 @@ void Heap::SetLastScriptId(Object* last_script_id) {
|
||||
} \
|
||||
if (!__object__->IsRetryAfterGC()) RETURN_EMPTY; \
|
||||
Counters::gc_last_resort_from_handles.Increment(); \
|
||||
Heap::CollectAllGarbage(false); \
|
||||
Heap::CollectAllAvailableGarbage(); \
|
||||
{ \
|
||||
AlwaysAllocateScope __scope__; \
|
||||
__object__ = FUNCTION_CALL; \
|
||||
|
96
deps/v8/src/heap.cc
vendored
96
deps/v8/src/heap.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -55,7 +55,6 @@ namespace internal {
|
||||
String* Heap::hidden_symbol_;
|
||||
Object* Heap::roots_[Heap::kRootListLength];
|
||||
|
||||
|
||||
NewSpace Heap::new_space_;
|
||||
OldSpace* Heap::old_pointer_space_ = NULL;
|
||||
OldSpace* Heap::old_data_space_ = NULL;
|
||||
@ -64,9 +63,6 @@ MapSpace* Heap::map_space_ = NULL;
|
||||
CellSpace* Heap::cell_space_ = NULL;
|
||||
LargeObjectSpace* Heap::lo_space_ = NULL;
|
||||
|
||||
static const int kMinimumPromotionLimit = 2*MB;
|
||||
static const int kMinimumAllocationLimit = 8*MB;
|
||||
|
||||
int Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
|
||||
int Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
|
||||
|
||||
@ -405,17 +401,26 @@ void Heap::GarbageCollectionEpilogue() {
|
||||
}
|
||||
|
||||
|
||||
void Heap::CollectAllGarbage(bool force_compaction) {
|
||||
void Heap::CollectAllGarbage(bool force_compaction,
|
||||
CollectionPolicy collectionPolicy) {
|
||||
// Since we are ignoring the return value, the exact choice of space does
|
||||
// not matter, so long as we do not specify NEW_SPACE, which would not
|
||||
// cause a full GC.
|
||||
MarkCompactCollector::SetForceCompaction(force_compaction);
|
||||
CollectGarbage(0, OLD_POINTER_SPACE);
|
||||
CollectGarbage(0, OLD_POINTER_SPACE, collectionPolicy);
|
||||
MarkCompactCollector::SetForceCompaction(false);
|
||||
}
|
||||
|
||||
|
||||
bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
|
||||
void Heap::CollectAllAvailableGarbage() {
|
||||
CompilationCache::Clear();
|
||||
CollectAllGarbage(true, AGGRESSIVE);
|
||||
}
|
||||
|
||||
|
||||
bool Heap::CollectGarbage(int requested_size,
|
||||
AllocationSpace space,
|
||||
CollectionPolicy collectionPolicy) {
|
||||
// The VM is in the GC state until exiting this function.
|
||||
VMState state(GC);
|
||||
|
||||
@ -442,7 +447,7 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
|
||||
? &Counters::gc_scavenger
|
||||
: &Counters::gc_compactor;
|
||||
rate->Start();
|
||||
PerformGarbageCollection(space, collector, &tracer);
|
||||
PerformGarbageCollection(collector, &tracer, collectionPolicy);
|
||||
rate->Stop();
|
||||
|
||||
GarbageCollectionEpilogue();
|
||||
@ -475,7 +480,7 @@ bool Heap::CollectGarbage(int requested_size, AllocationSpace space) {
|
||||
|
||||
void Heap::PerformScavenge() {
|
||||
GCTracer tracer;
|
||||
PerformGarbageCollection(NEW_SPACE, SCAVENGER, &tracer);
|
||||
PerformGarbageCollection(SCAVENGER, &tracer, NORMAL);
|
||||
}
|
||||
|
||||
|
||||
@ -664,9 +669,9 @@ void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
|
||||
survival_rate_ = survival_rate;
|
||||
}
|
||||
|
||||
void Heap::PerformGarbageCollection(AllocationSpace space,
|
||||
GarbageCollector collector,
|
||||
GCTracer* tracer) {
|
||||
void Heap::PerformGarbageCollection(GarbageCollector collector,
|
||||
GCTracer* tracer,
|
||||
CollectionPolicy collectionPolicy) {
|
||||
VerifySymbolTable();
|
||||
if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
|
||||
ASSERT(!allocation_allowed_);
|
||||
@ -696,25 +701,45 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
|
||||
|
||||
UpdateSurvivalRateTrend(start_new_space_size);
|
||||
|
||||
int old_gen_size = PromotedSpaceSize();
|
||||
old_gen_promotion_limit_ =
|
||||
old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
|
||||
old_gen_allocation_limit_ =
|
||||
old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
|
||||
UpdateOldSpaceLimits();
|
||||
|
||||
if (high_survival_rate_during_scavenges &&
|
||||
IsStableOrIncreasingSurvivalTrend()) {
|
||||
// Stable high survival rates of young objects both during partial and
|
||||
// full collection indicate that mutator is either building or modifying
|
||||
// a structure with a long lifetime.
|
||||
// In this case we aggressively raise old generation memory limits to
|
||||
// postpone subsequent mark-sweep collection and thus trade memory
|
||||
// space for the mutation speed.
|
||||
old_gen_promotion_limit_ *= 2;
|
||||
old_gen_allocation_limit_ *= 2;
|
||||
// Major GC would invoke weak handle callbacks on weakly reachable
|
||||
// handles, but won't collect weakly reachable objects until next
|
||||
// major GC. Therefore if we collect aggressively and weak handle callback
|
||||
// has been invoked, we rerun major GC to release objects which become
|
||||
// garbage.
|
||||
if (collectionPolicy == AGGRESSIVE) {
|
||||
// Note: as weak callbacks can execute arbitrary code, we cannot
|
||||
// hope that eventually there will be no weak callbacks invocations.
|
||||
// Therefore stop recollecting after several attempts.
|
||||
const int kMaxNumberOfAttempts = 7;
|
||||
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
|
||||
{ DisableAssertNoAllocation allow_allocation;
|
||||
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
|
||||
if (!GlobalHandles::PostGarbageCollectionProcessing()) break;
|
||||
}
|
||||
MarkCompact(tracer);
|
||||
// Weak handle callbacks can allocate data, so keep limits correct.
|
||||
UpdateOldSpaceLimits();
|
||||
}
|
||||
} else {
|
||||
if (high_survival_rate_during_scavenges &&
|
||||
IsStableOrIncreasingSurvivalTrend()) {
|
||||
// Stable high survival rates of young objects both during partial and
|
||||
// full collection indicate that mutator is either building or modifying
|
||||
// a structure with a long lifetime.
|
||||
// In this case we aggressively raise old generation memory limits to
|
||||
// postpone subsequent mark-sweep collection and thus trade memory
|
||||
// space for the mutation speed.
|
||||
old_gen_promotion_limit_ *= 2;
|
||||
old_gen_allocation_limit_ *= 2;
|
||||
}
|
||||
}
|
||||
|
||||
old_gen_exhausted_ = false;
|
||||
{ DisableAssertNoAllocation allow_allocation;
|
||||
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
|
||||
GlobalHandles::PostGarbageCollectionProcessing();
|
||||
}
|
||||
} else {
|
||||
tracer_ = tracer;
|
||||
Scavenge();
|
||||
@ -725,12 +750,6 @@ void Heap::PerformGarbageCollection(AllocationSpace space,
|
||||
|
||||
Counters::objs_since_last_young.Set(0);
|
||||
|
||||
if (collector == MARK_COMPACTOR) {
|
||||
DisableAssertNoAllocation allow_allocation;
|
||||
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
|
||||
GlobalHandles::PostGarbageCollectionProcessing();
|
||||
}
|
||||
|
||||
// Update relocatables.
|
||||
Relocatable::PostGarbageCollectionProcessing();
|
||||
|
||||
@ -1834,6 +1853,13 @@ bool Heap::CreateInitialObjects() {
|
||||
|
||||
CreateFixedStubs();
|
||||
|
||||
// Allocate the dictionary of intrinsic function names.
|
||||
obj = StringDictionary::Allocate(Runtime::kNumFunctions);
|
||||
if (obj->IsFailure()) return false;
|
||||
obj = Runtime::InitializeIntrinsicFunctionNames(obj);
|
||||
if (obj->IsFailure()) return false;
|
||||
set_intrinsic_function_names(StringDictionary::cast(obj));
|
||||
|
||||
if (InitializeNumberStringCache()->IsFailure()) return false;
|
||||
|
||||
// Allocate cache for single character ASCII strings.
|
||||
|
26
deps/v8/src/heap.h
vendored
26
deps/v8/src/heap.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2006-2008 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -114,6 +114,7 @@ namespace internal {
|
||||
V(Object, last_script_id, LastScriptId) \
|
||||
V(Script, empty_script, EmptyScript) \
|
||||
V(Smi, real_stack_limit, RealStackLimit) \
|
||||
V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
|
||||
|
||||
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
|
||||
#define STRONG_ROOT_LIST(V) \
|
||||
@ -686,13 +687,21 @@ class Heap : public AllStatic {
|
||||
static void GarbageCollectionPrologue();
|
||||
static void GarbageCollectionEpilogue();
|
||||
|
||||
enum CollectionPolicy { NORMAL, AGGRESSIVE };
|
||||
|
||||
// Performs garbage collection operation.
|
||||
// Returns whether required_space bytes are available after the collection.
|
||||
static bool CollectGarbage(int required_space, AllocationSpace space);
|
||||
static bool CollectGarbage(int required_space,
|
||||
AllocationSpace space,
|
||||
CollectionPolicy collectionPolicy = NORMAL);
|
||||
|
||||
// Performs a full garbage collection. Force compaction if the
|
||||
// parameter is true.
|
||||
static void CollectAllGarbage(bool force_compaction);
|
||||
static void CollectAllGarbage(bool force_compaction,
|
||||
CollectionPolicy collectionPolicy = NORMAL);
|
||||
|
||||
// Last hope GC, should try to squeeze as much as possible.
|
||||
static void CollectAllAvailableGarbage();
|
||||
|
||||
// Notify the heap that a context has been disposed.
|
||||
static int NotifyContextDisposed() { return ++contexts_disposed_; }
|
||||
@ -1213,9 +1222,14 @@ class Heap : public AllStatic {
|
||||
static GarbageCollector SelectGarbageCollector(AllocationSpace space);
|
||||
|
||||
// Performs garbage collection
|
||||
static void PerformGarbageCollection(AllocationSpace space,
|
||||
GarbageCollector collector,
|
||||
GCTracer* tracer);
|
||||
static void PerformGarbageCollection(GarbageCollector collector,
|
||||
GCTracer* tracer,
|
||||
CollectionPolicy collectionPolicy);
|
||||
|
||||
static const int kMinimumPromotionLimit = 2 * MB;
|
||||
static const int kMinimumAllocationLimit = 8 * MB;
|
||||
|
||||
inline static void UpdateOldSpaceLimits();
|
||||
|
||||
// Allocate an uninitialized object in map space. The behavior is identical
|
||||
// to Heap::AllocateRaw(size_in_bytes, MAP_SPACE), except that (a) it doesn't
|
||||
|
7
deps/v8/src/ia32/assembler-ia32.cc
vendored
7
deps/v8/src/ia32/assembler-ia32.cc
vendored
@ -860,9 +860,14 @@ void Assembler::add(const Operand& dst, const Immediate& x) {
|
||||
|
||||
|
||||
void Assembler::and_(Register dst, int32_t imm32) {
|
||||
and_(dst, Immediate(imm32));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::and_(Register dst, const Immediate& x) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit_arith(4, Operand(dst), Immediate(imm32));
|
||||
emit_arith(4, Operand(dst), x);
|
||||
}
|
||||
|
||||
|
||||
|
1
deps/v8/src/ia32/assembler-ia32.h
vendored
1
deps/v8/src/ia32/assembler-ia32.h
vendored
@ -577,6 +577,7 @@ class Assembler : public Malloced {
|
||||
void add(const Operand& dst, const Immediate& x);
|
||||
|
||||
void and_(Register dst, int32_t imm32);
|
||||
void and_(Register dst, const Immediate& x);
|
||||
void and_(Register dst, const Operand& src);
|
||||
void and_(const Operand& src, Register dst);
|
||||
void and_(const Operand& dst, const Immediate& x);
|
||||
|
100
deps/v8/src/ia32/code-stubs-ia32.cc
vendored
100
deps/v8/src/ia32/code-stubs-ia32.cc
vendored
@ -1879,36 +1879,36 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
|
||||
|
||||
|
||||
void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
Label slow, done;
|
||||
Label slow, done, undo;
|
||||
|
||||
if (op_ == Token::SUB) {
|
||||
// Check whether the value is a smi.
|
||||
Label try_float;
|
||||
__ test(eax, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &try_float, not_taken);
|
||||
if (include_smi_code_) {
|
||||
// Check whether the value is a smi.
|
||||
Label try_float;
|
||||
__ test(eax, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &try_float, not_taken);
|
||||
|
||||
if (negative_zero_ == kStrictNegativeZero) {
|
||||
// Go slow case if the value of the expression is zero
|
||||
// to make sure that we switch between 0 and -0.
|
||||
__ test(eax, Operand(eax));
|
||||
__ j(zero, &slow, not_taken);
|
||||
if (negative_zero_ == kStrictNegativeZero) {
|
||||
// Go slow case if the value of the expression is zero
|
||||
// to make sure that we switch between 0 and -0.
|
||||
__ test(eax, Operand(eax));
|
||||
__ j(zero, &slow, not_taken);
|
||||
}
|
||||
|
||||
// The value of the expression is a smi that is not zero. Try
|
||||
// optimistic subtraction '0 - value'.
|
||||
__ mov(edx, Operand(eax));
|
||||
__ Set(eax, Immediate(0));
|
||||
__ sub(eax, Operand(edx));
|
||||
__ j(overflow, &undo, not_taken);
|
||||
__ StubReturn(1);
|
||||
|
||||
// Try floating point case.
|
||||
__ bind(&try_float);
|
||||
} else if (FLAG_debug_code) {
|
||||
__ AbortIfSmi(eax);
|
||||
}
|
||||
|
||||
// The value of the expression is a smi that is not zero. Try
|
||||
// optimistic subtraction '0 - value'.
|
||||
Label undo;
|
||||
__ mov(edx, Operand(eax));
|
||||
__ Set(eax, Immediate(0));
|
||||
__ sub(eax, Operand(edx));
|
||||
__ j(no_overflow, &done, taken);
|
||||
|
||||
// Restore eax and go slow case.
|
||||
__ bind(&undo);
|
||||
__ mov(eax, Operand(edx));
|
||||
__ jmp(&slow);
|
||||
|
||||
// Try floating point case.
|
||||
__ bind(&try_float);
|
||||
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
|
||||
__ cmp(edx, Factory::heap_number_map());
|
||||
__ j(not_equal, &slow);
|
||||
@ -1928,6 +1928,18 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
__ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
|
||||
}
|
||||
} else if (op_ == Token::BIT_NOT) {
|
||||
if (include_smi_code_) {
|
||||
Label non_smi;
|
||||
__ test(eax, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &non_smi);
|
||||
__ not_(eax);
|
||||
__ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag.
|
||||
__ ret(0);
|
||||
__ bind(&non_smi);
|
||||
} else if (FLAG_debug_code) {
|
||||
__ AbortIfSmi(eax);
|
||||
}
|
||||
|
||||
// Check if the operand is a heap number.
|
||||
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
|
||||
__ cmp(edx, Factory::heap_number_map());
|
||||
@ -1978,6 +1990,10 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
__ bind(&done);
|
||||
__ StubReturn(1);
|
||||
|
||||
// Restore eax and go slow case.
|
||||
__ bind(&undo);
|
||||
__ mov(eax, Operand(edx));
|
||||
|
||||
// Handle the slow case by jumping to the JavaScript builtin.
|
||||
__ bind(&slow);
|
||||
__ pop(ecx); // pop return address.
|
||||
@ -2613,6 +2629,27 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
Label check_unequal_objects, done;
|
||||
|
||||
// Compare two smis if required.
|
||||
if (include_smi_compare_) {
|
||||
Label non_smi, smi_done;
|
||||
__ mov(ecx, Operand(edx));
|
||||
__ or_(ecx, Operand(eax));
|
||||
__ test(ecx, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &non_smi, not_taken);
|
||||
__ sub(edx, Operand(eax)); // Return on the result of the subtraction.
|
||||
__ j(no_overflow, &smi_done);
|
||||
__ neg(edx); // Correct sign in case of overflow.
|
||||
__ bind(&smi_done);
|
||||
__ mov(eax, edx);
|
||||
__ ret(0);
|
||||
__ bind(&non_smi);
|
||||
} else if (FLAG_debug_code) {
|
||||
__ mov(ecx, Operand(edx));
|
||||
__ or_(ecx, Operand(eax));
|
||||
__ test(ecx, Immediate(kSmiTagMask));
|
||||
__ Assert(not_zero, "Unexpected smi operands.");
|
||||
}
|
||||
|
||||
// NOTICE! This code is only reached after a smi-fast-case check, so
|
||||
// it is certain that at least one operand isn't a smi.
|
||||
|
||||
@ -3501,7 +3538,8 @@ int CompareStub::MinorKey() {
|
||||
| RegisterField::encode(false) // lhs_ and rhs_ are not used
|
||||
| StrictField::encode(strict_)
|
||||
| NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
|
||||
| IncludeNumberCompareField::encode(include_number_compare_);
|
||||
| IncludeNumberCompareField::encode(include_number_compare_)
|
||||
| IncludeSmiCompareField::encode(include_smi_compare_);
|
||||
}
|
||||
|
||||
|
||||
@ -3541,12 +3579,18 @@ const char* CompareStub::GetName() {
|
||||
include_number_compare_name = "_NO_NUMBER";
|
||||
}
|
||||
|
||||
const char* include_smi_compare_name = "";
|
||||
if (!include_smi_compare_) {
|
||||
include_smi_compare_name = "_NO_SMI";
|
||||
}
|
||||
|
||||
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
|
||||
"CompareStub_%s%s%s%s",
|
||||
"CompareStub_%s%s%s%s%s",
|
||||
cc_name,
|
||||
strict_name,
|
||||
never_nan_nan_name,
|
||||
include_number_compare_name);
|
||||
include_number_compare_name,
|
||||
include_smi_compare_name);
|
||||
return name_;
|
||||
}
|
||||
|
||||
|
34
deps/v8/src/ia32/codegen-ia32.cc
vendored
34
deps/v8/src/ia32/codegen-ia32.cc
vendored
@ -2646,6 +2646,19 @@ static Condition DoubleCondition(Condition cc) {
|
||||
}
|
||||
|
||||
|
||||
static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
|
||||
bool inline_number_compare) {
|
||||
CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
|
||||
if (nan_info == kCantBothBeNaN) {
|
||||
flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
|
||||
}
|
||||
if (inline_number_compare) {
|
||||
flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::Comparison(AstNode* node,
|
||||
Condition cc,
|
||||
bool strict,
|
||||
@ -2773,7 +2786,9 @@ void CodeGenerator::Comparison(AstNode* node,
|
||||
|
||||
// Setup and call the compare stub.
|
||||
is_not_string.Bind(&left_side);
|
||||
CompareStub stub(cc, strict, kCantBothBeNaN);
|
||||
CompareFlags flags =
|
||||
static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_COMPARE_IN_STUB);
|
||||
CompareStub stub(cc, strict, flags);
|
||||
Result result = frame_->CallStub(&stub, &left_side, &right_side);
|
||||
result.ToRegister();
|
||||
__ cmp(result.reg(), 0);
|
||||
@ -2867,7 +2882,8 @@ void CodeGenerator::Comparison(AstNode* node,
|
||||
|
||||
// End of in-line compare, call out to the compare stub. Don't include
|
||||
// number comparison in the stub if it was inlined.
|
||||
CompareStub stub(cc, strict, nan_info, !inline_number_compare);
|
||||
CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
|
||||
CompareStub stub(cc, strict, flags);
|
||||
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
||||
__ test(answer.reg(), Operand(answer.reg()));
|
||||
answer.Unuse();
|
||||
@ -2900,7 +2916,9 @@ void CodeGenerator::Comparison(AstNode* node,
|
||||
|
||||
// End of in-line compare, call out to the compare stub. Don't include
|
||||
// number comparison in the stub if it was inlined.
|
||||
CompareStub stub(cc, strict, nan_info, !inline_number_compare);
|
||||
CompareFlags flags =
|
||||
ComputeCompareFlags(nan_info, inline_number_compare);
|
||||
CompareStub stub(cc, strict, flags);
|
||||
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
||||
__ test(answer.reg(), Operand(answer.reg()));
|
||||
answer.Unuse();
|
||||
@ -2994,7 +3012,6 @@ void CodeGenerator::ConstantSmiComparison(Condition cc,
|
||||
dest->false_target()->Branch(zero);
|
||||
} else {
|
||||
// Do the smi check, then the comparison.
|
||||
JumpTarget is_not_smi;
|
||||
__ test(left_reg, Immediate(kSmiTagMask));
|
||||
is_smi.Branch(zero, left_side, right_side);
|
||||
}
|
||||
@ -3031,7 +3048,9 @@ void CodeGenerator::ConstantSmiComparison(Condition cc,
|
||||
}
|
||||
|
||||
// Setup and call the compare stub.
|
||||
CompareStub stub(cc, strict, kCantBothBeNaN);
|
||||
CompareFlags flags =
|
||||
static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
|
||||
CompareStub stub(cc, strict, flags);
|
||||
Result result = frame_->CallStub(&stub, left_side, right_side);
|
||||
result.ToRegister();
|
||||
__ test(result.reg(), Operand(result.reg()));
|
||||
@ -8146,6 +8165,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
GenericUnaryOpStub stub(
|
||||
Token::SUB,
|
||||
overwrite,
|
||||
NO_UNARY_FLAGS,
|
||||
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
|
||||
Result operand = frame_->Pop();
|
||||
Result answer = frame_->CallStub(&stub, &operand);
|
||||
@ -8173,7 +8193,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
__ test(operand.reg(), Immediate(kSmiTagMask));
|
||||
smi_label.Branch(zero, &operand, taken);
|
||||
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT,
|
||||
overwrite,
|
||||
NO_UNARY_SMI_CODE_IN_STUB);
|
||||
Result answer = frame_->CallStub(&stub, &operand);
|
||||
continue_label.Jump(&answer);
|
||||
|
||||
|
20
deps/v8/src/ia32/codegen-ia32.h
vendored
20
deps/v8/src/ia32/codegen-ia32.h
vendored
@ -345,10 +345,6 @@ class CodeGenerator: public AstVisitor {
|
||||
bool in_spilled_code() const { return in_spilled_code_; }
|
||||
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
|
||||
|
||||
// If the name is an inline runtime function call return the number of
|
||||
// expected arguments. Otherwise return -1.
|
||||
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
|
||||
|
||||
// Return a position of the element at |index_as_smi| + |additional_offset|
|
||||
// in FixedArray pointer to which is held in |array|. |index_as_smi| is Smi.
|
||||
static Operand FixedArrayElementOperand(Register array,
|
||||
@ -363,6 +359,12 @@ class CodeGenerator: public AstVisitor {
|
||||
}
|
||||
|
||||
private:
|
||||
// Type of a member function that generates inline code for a native function.
|
||||
typedef void (CodeGenerator::*InlineFunctionGenerator)
|
||||
(ZoneList<Expression*>*);
|
||||
|
||||
static const InlineFunctionGenerator kInlineFunctionGenerators[];
|
||||
|
||||
// Construction/Destruction
|
||||
explicit CodeGenerator(MacroAssembler* masm);
|
||||
|
||||
@ -624,13 +626,9 @@ class CodeGenerator: public AstVisitor {
|
||||
|
||||
void CheckStack();
|
||||
|
||||
struct InlineRuntimeLUT {
|
||||
void (CodeGenerator::*method)(ZoneList<Expression*>*);
|
||||
const char* name;
|
||||
int nargs;
|
||||
};
|
||||
static InlineFunctionGenerator FindInlineFunctionGenerator(
|
||||
Runtime::FunctionId function_id);
|
||||
|
||||
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
|
||||
bool CheckForInlineRuntimeCall(CallRuntime* node);
|
||||
|
||||
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
|
||||
@ -792,8 +790,6 @@ class CodeGenerator: public AstVisitor {
|
||||
// in a spilled state.
|
||||
bool in_spilled_code_;
|
||||
|
||||
static InlineRuntimeLUT kInlineRuntimeLUT[];
|
||||
|
||||
friend class VirtualFrame;
|
||||
friend class JumpTarget;
|
||||
friend class Reference;
|
||||
|
299
deps/v8/src/ia32/full-codegen-ia32.cc
vendored
299
deps/v8/src/ia32/full-codegen-ia32.cc
vendored
@ -514,7 +514,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
|
||||
int context_chain_length =
|
||||
scope()->ContextChainLength(slot->var()->scope());
|
||||
__ LoadContext(scratch, context_chain_length);
|
||||
return CodeGenerator::ContextOperand(scratch, slot->index());
|
||||
return ContextOperand(scratch, slot->index());
|
||||
}
|
||||
case Slot::LOOKUP:
|
||||
UNREACHABLE();
|
||||
@ -574,19 +574,17 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
|
||||
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
if (FLAG_debug_code) {
|
||||
// Check if we have the correct context pointer.
|
||||
__ mov(ebx,
|
||||
CodeGenerator::ContextOperand(esi, Context::FCONTEXT_INDEX));
|
||||
__ mov(ebx, ContextOperand(esi, Context::FCONTEXT_INDEX));
|
||||
__ cmp(ebx, Operand(esi));
|
||||
__ Check(equal, "Unexpected declaration in current context.");
|
||||
}
|
||||
if (mode == Variable::CONST) {
|
||||
__ mov(CodeGenerator::ContextOperand(esi, slot->index()),
|
||||
__ mov(ContextOperand(esi, slot->index()),
|
||||
Immediate(Factory::the_hole_value()));
|
||||
// No write barrier since the hole value is in old space.
|
||||
} else if (function != NULL) {
|
||||
VisitForValue(function, kAccumulator);
|
||||
__ mov(CodeGenerator::ContextOperand(esi, slot->index()),
|
||||
result_register());
|
||||
__ mov(ContextOperand(esi, slot->index()), result_register());
|
||||
int offset = Context::SlotOffset(slot->index());
|
||||
__ mov(ebx, esi);
|
||||
__ RecordWrite(ebx, offset, result_register(), ecx);
|
||||
@ -686,7 +684,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
|
||||
|
||||
// Perform the comparison as if via '==='.
|
||||
__ mov(edx, Operand(esp, 0)); // Switch value.
|
||||
if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
|
||||
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
|
||||
if (inline_smi_code) {
|
||||
Label slow_case;
|
||||
__ mov(ecx, edx);
|
||||
__ or_(ecx, Operand(eax));
|
||||
@ -699,7 +698,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
|
||||
__ bind(&slow_case);
|
||||
}
|
||||
|
||||
CompareStub stub(equal, true);
|
||||
CompareFlags flags = inline_smi_code
|
||||
? NO_SMI_COMPARE_IN_STUB
|
||||
: NO_COMPARE_FLAGS;
|
||||
CompareStub stub(equal, true, flags);
|
||||
__ CallStub(&stub);
|
||||
__ test(eax, Operand(eax));
|
||||
__ j(not_equal, &next_test);
|
||||
@ -758,13 +760,57 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ bind(&done_convert);
|
||||
__ push(eax);
|
||||
|
||||
// TODO(kasperl): Check cache validity in generated code. This is a
|
||||
// fast case for the JSObject::IsSimpleEnum cache validity
|
||||
// checks. If we cannot guarantee cache validity, call the runtime
|
||||
// system to check cache validity or get the property names in a
|
||||
// fixed array.
|
||||
// Check cache validity in generated code. This is a fast case for
|
||||
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
|
||||
// guarantee cache validity, call the runtime system to check cache
|
||||
// validity or get the property names in a fixed array.
|
||||
Label next, call_runtime;
|
||||
__ mov(ecx, eax);
|
||||
__ bind(&next);
|
||||
|
||||
// Check that there are no elements. Register ecx contains the
|
||||
// current JS object we've reached through the prototype chain.
|
||||
__ cmp(FieldOperand(ecx, JSObject::kElementsOffset),
|
||||
Factory::empty_fixed_array());
|
||||
__ j(not_equal, &call_runtime);
|
||||
|
||||
// Check that instance descriptors are not empty so that we can
|
||||
// check for an enum cache. Leave the map in ebx for the subsequent
|
||||
// prototype load.
|
||||
__ mov(ebx, FieldOperand(ecx, HeapObject::kMapOffset));
|
||||
__ mov(edx, FieldOperand(ebx, Map::kInstanceDescriptorsOffset));
|
||||
__ cmp(edx, Factory::empty_descriptor_array());
|
||||
__ j(equal, &call_runtime);
|
||||
|
||||
// Check that there in an enum cache in the non-empty instance
|
||||
// descriptors (edx). This is the case if the next enumeration
|
||||
// index field does not contain a smi.
|
||||
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
|
||||
__ test(edx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &call_runtime);
|
||||
|
||||
// For all objects but the receiver, check that the cache is empty.
|
||||
Label check_prototype;
|
||||
__ cmp(ecx, Operand(eax));
|
||||
__ j(equal, &check_prototype);
|
||||
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumCacheBridgeCacheOffset));
|
||||
__ cmp(edx, Factory::empty_fixed_array());
|
||||
__ j(not_equal, &call_runtime);
|
||||
|
||||
// Load the prototype from the map and loop if non-null.
|
||||
__ bind(&check_prototype);
|
||||
__ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
|
||||
__ cmp(ecx, Factory::null_value());
|
||||
__ j(not_equal, &next);
|
||||
|
||||
// The enum cache is valid. Load the map of the object being
|
||||
// iterated over and use the cache for the iteration.
|
||||
Label use_cache;
|
||||
__ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
|
||||
__ jmp(&use_cache);
|
||||
|
||||
// Get the set of properties to enumerate.
|
||||
__ bind(&call_runtime);
|
||||
__ push(eax); // Duplicate the enumerable object on the stack.
|
||||
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
|
||||
|
||||
@ -776,6 +822,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ j(not_equal, &fixed_array);
|
||||
|
||||
// We got a map in register eax. Get the enumeration cache from it.
|
||||
__ bind(&use_cache);
|
||||
__ mov(ecx, FieldOperand(eax, Map::kInstanceDescriptorsOffset));
|
||||
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
|
||||
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
|
||||
@ -885,6 +932,152 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
|
||||
Slot* slot,
|
||||
TypeofState typeof_state,
|
||||
Label* slow) {
|
||||
Register context = esi;
|
||||
Register temp = edx;
|
||||
|
||||
Scope* s = scope();
|
||||
while (s != NULL) {
|
||||
if (s->num_heap_slots() > 0) {
|
||||
if (s->calls_eval()) {
|
||||
// Check that extension is NULL.
|
||||
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
|
||||
Immediate(0));
|
||||
__ j(not_equal, slow);
|
||||
}
|
||||
// Load next context in chain.
|
||||
__ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
|
||||
__ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
|
||||
// Walk the rest of the chain without clobbering esi.
|
||||
context = temp;
|
||||
}
|
||||
// If no outer scope calls eval, we do not need to check more
|
||||
// context extensions. If we have reached an eval scope, we check
|
||||
// all extensions from this point.
|
||||
if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
|
||||
s = s->outer_scope();
|
||||
}
|
||||
|
||||
if (s != NULL && s->is_eval_scope()) {
|
||||
// Loop up the context chain. There is no frame effect so it is
|
||||
// safe to use raw labels here.
|
||||
Label next, fast;
|
||||
if (!context.is(temp)) {
|
||||
__ mov(temp, context);
|
||||
}
|
||||
__ bind(&next);
|
||||
// Terminate at global context.
|
||||
__ cmp(FieldOperand(temp, HeapObject::kMapOffset),
|
||||
Immediate(Factory::global_context_map()));
|
||||
__ j(equal, &fast);
|
||||
// Check that extension is NULL.
|
||||
__ cmp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
|
||||
__ j(not_equal, slow);
|
||||
// Load next context in chain.
|
||||
__ mov(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
|
||||
__ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
|
||||
__ jmp(&next);
|
||||
__ bind(&fast);
|
||||
}
|
||||
|
||||
// All extension objects were empty and it is safe to use a global
|
||||
// load IC call.
|
||||
__ mov(eax, CodeGenerator::GlobalObject());
|
||||
__ mov(ecx, slot->var()->name());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
||||
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
|
||||
? RelocInfo::CODE_TARGET
|
||||
: RelocInfo::CODE_TARGET_CONTEXT;
|
||||
__ call(ic, mode);
|
||||
__ nop(); // Signal no inlined code.
|
||||
}
|
||||
|
||||
|
||||
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
|
||||
Slot* slot,
|
||||
Label* slow) {
|
||||
ASSERT(slot->type() == Slot::CONTEXT);
|
||||
Register context = esi;
|
||||
Register temp = ebx;
|
||||
|
||||
for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
|
||||
if (s->num_heap_slots() > 0) {
|
||||
if (s->calls_eval()) {
|
||||
// Check that extension is NULL.
|
||||
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX),
|
||||
Immediate(0));
|
||||
__ j(not_equal, slow);
|
||||
}
|
||||
__ mov(temp, ContextOperand(context, Context::CLOSURE_INDEX));
|
||||
__ mov(temp, FieldOperand(temp, JSFunction::kContextOffset));
|
||||
// Walk the rest of the chain without clobbering esi.
|
||||
context = temp;
|
||||
}
|
||||
}
|
||||
// Check that last extension is NULL.
|
||||
__ cmp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
|
||||
__ j(not_equal, slow);
|
||||
__ mov(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
|
||||
return ContextOperand(temp, slot->index());
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
|
||||
Slot* slot,
|
||||
TypeofState typeof_state,
|
||||
Label* slow,
|
||||
Label* done) {
|
||||
// Generate fast-case code for variables that might be shadowed by
|
||||
// eval-introduced variables. Eval is used a lot without
|
||||
// introducing variables. In those cases, we do not want to
|
||||
// perform a runtime call for all variables in the scope
|
||||
// containing the eval.
|
||||
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
|
||||
EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
|
||||
__ jmp(done);
|
||||
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
|
||||
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
|
||||
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
|
||||
if (potential_slot != NULL) {
|
||||
// Generate fast case for locals that rewrite to slots.
|
||||
__ mov(eax,
|
||||
ContextSlotOperandCheckExtensions(potential_slot, slow));
|
||||
if (potential_slot->var()->mode() == Variable::CONST) {
|
||||
__ cmp(eax, Factory::the_hole_value());
|
||||
__ j(not_equal, done);
|
||||
__ mov(eax, Factory::undefined_value());
|
||||
}
|
||||
__ jmp(done);
|
||||
} else if (rewrite != NULL) {
|
||||
// Generate fast case for calls of an argument function.
|
||||
Property* property = rewrite->AsProperty();
|
||||
if (property != NULL) {
|
||||
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
|
||||
Literal* key_literal = property->key()->AsLiteral();
|
||||
if (obj_proxy != NULL &&
|
||||
key_literal != NULL &&
|
||||
obj_proxy->IsArguments() &&
|
||||
key_literal->handle()->IsSmi()) {
|
||||
// Load arguments object if there are no eval-introduced
|
||||
// variables. Then load the argument from the arguments
|
||||
// object using keyed load.
|
||||
__ mov(edx,
|
||||
ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
|
||||
slow));
|
||||
__ mov(eax, Immediate(key_literal->handle()));
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
||||
__ call(ic, RelocInfo::CODE_TARGET);
|
||||
__ jmp(done);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitVariableLoad(Variable* var,
|
||||
Expression::Context context) {
|
||||
// Four cases: non-this global variables, lookup slots, all other
|
||||
@ -909,10 +1102,19 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
|
||||
Apply(context, eax);
|
||||
|
||||
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
|
||||
Label done, slow;
|
||||
|
||||
// Generate code for loading from variables potentially shadowed
|
||||
// by eval-introduced variables.
|
||||
EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
|
||||
|
||||
__ bind(&slow);
|
||||
Comment cmnt(masm_, "Lookup slot");
|
||||
__ push(esi); // Context.
|
||||
__ push(Immediate(var->name()));
|
||||
__ CallRuntime(Runtime::kLoadContextSlot, 2);
|
||||
__ bind(&done);
|
||||
|
||||
Apply(context, eax);
|
||||
|
||||
} else if (slot != NULL) {
|
||||
@ -1953,14 +2155,40 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
|
||||
} else if (var != NULL && var->slot() != NULL &&
|
||||
var->slot()->type() == Slot::LOOKUP) {
|
||||
// Call to a lookup slot (dynamically introduced variable). Call the
|
||||
// runtime to find the function to call (returned in eax) and the object
|
||||
// holding it (returned in edx).
|
||||
// Call to a lookup slot (dynamically introduced variable).
|
||||
Label slow, done;
|
||||
|
||||
// Generate code for loading from variables potentially shadowed
|
||||
// by eval-introduced variables.
|
||||
EmitDynamicLoadFromSlotFastCase(var->slot(),
|
||||
NOT_INSIDE_TYPEOF,
|
||||
&slow,
|
||||
&done);
|
||||
|
||||
__ bind(&slow);
|
||||
// Call the runtime to find the function to call (returned in eax)
|
||||
// and the object holding it (returned in edx).
|
||||
__ push(context_register());
|
||||
__ push(Immediate(var->name()));
|
||||
__ CallRuntime(Runtime::kLoadContextSlot, 2);
|
||||
__ push(eax); // Function.
|
||||
__ push(edx); // Receiver.
|
||||
|
||||
// If fast case code has been generated, emit code to push the
|
||||
// function and receiver and have the slow path jump around this
|
||||
// code.
|
||||
if (done.is_linked()) {
|
||||
Label call;
|
||||
__ jmp(&call);
|
||||
__ bind(&done);
|
||||
// Push function.
|
||||
__ push(eax);
|
||||
// Push global receiver.
|
||||
__ mov(ebx, CodeGenerator::GlobalObject());
|
||||
__ push(FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
|
||||
__ bind(&call);
|
||||
}
|
||||
|
||||
EmitCallWithStub(expr);
|
||||
} else if (fun->AsProperty() != NULL) {
|
||||
// Call to an object property.
|
||||
@ -2781,12 +3009,10 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
|
||||
Register key = eax;
|
||||
Register cache = ebx;
|
||||
Register tmp = ecx;
|
||||
__ mov(cache, CodeGenerator::ContextOperand(esi, Context::GLOBAL_INDEX));
|
||||
__ mov(cache, ContextOperand(esi, Context::GLOBAL_INDEX));
|
||||
__ mov(cache,
|
||||
FieldOperand(cache, GlobalObject::kGlobalContextOffset));
|
||||
__ mov(cache,
|
||||
CodeGenerator::ContextOperand(
|
||||
cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
|
||||
__ mov(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
|
||||
__ mov(cache,
|
||||
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
|
||||
|
||||
@ -2917,7 +3143,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
|
||||
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
|
||||
Handle<Code> ic = CodeGenerator::ComputeCallInitialize(arg_count, in_loop);
|
||||
__ call(ic, RelocInfo::CODE_TARGET);
|
||||
// Restore context register.
|
||||
// Restore context register.
|
||||
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
||||
} else {
|
||||
// Call the C runtime function.
|
||||
@ -3036,7 +3262,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
|
||||
UnaryOverwriteMode overwrite =
|
||||
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
|
||||
GenericUnaryOpStub stub(Token::SUB, overwrite);
|
||||
GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
|
||||
// GenericUnaryOpStub expects the argument to be in the
|
||||
// accumulator register eax.
|
||||
VisitForValue(expr->expression(), kAccumulator);
|
||||
@ -3051,7 +3277,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
// in the accumulator register eax.
|
||||
VisitForValue(expr->expression(), kAccumulator);
|
||||
Label done;
|
||||
if (ShouldInlineSmiCase(expr->op())) {
|
||||
bool inline_smi_case = ShouldInlineSmiCase(expr->op());
|
||||
if (inline_smi_case) {
|
||||
Label call_stub;
|
||||
__ test(eax, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &call_stub);
|
||||
@ -3063,7 +3290,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
bool overwrite = expr->expression()->ResultOverwriteAllowed();
|
||||
UnaryOverwriteMode mode =
|
||||
overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT, mode);
|
||||
UnaryOpFlags flags = inline_smi_case
|
||||
? NO_UNARY_SMI_CODE_IN_STUB
|
||||
: NO_UNARY_FLAGS;
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
|
||||
__ CallStub(&stub);
|
||||
__ bind(&done);
|
||||
Apply(context_, eax);
|
||||
@ -3262,13 +3492,24 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
|
||||
// Use a regular load, not a contextual load, to avoid a reference
|
||||
// error.
|
||||
__ call(ic, RelocInfo::CODE_TARGET);
|
||||
__ nop(); // Signal no inlined code.
|
||||
if (where == kStack) __ push(eax);
|
||||
} else if (proxy != NULL &&
|
||||
proxy->var()->slot() != NULL &&
|
||||
proxy->var()->slot()->type() == Slot::LOOKUP) {
|
||||
Label done, slow;
|
||||
|
||||
// Generate code for loading from variables potentially shadowed
|
||||
// by eval-introduced variables.
|
||||
Slot* slot = proxy->var()->slot();
|
||||
EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
|
||||
|
||||
__ bind(&slow);
|
||||
__ push(esi);
|
||||
__ push(Immediate(proxy->name()));
|
||||
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
|
||||
__ bind(&done);
|
||||
|
||||
if (where == kStack) __ push(eax);
|
||||
} else {
|
||||
// This expression cannot throw a reference error at the top level.
|
||||
@ -3441,7 +3682,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
if (ShouldInlineSmiCase(op)) {
|
||||
bool inline_smi_code = ShouldInlineSmiCase(op);
|
||||
if (inline_smi_code) {
|
||||
Label slow_case;
|
||||
__ mov(ecx, Operand(edx));
|
||||
__ or_(ecx, Operand(eax));
|
||||
@ -3452,7 +3694,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
|
||||
__ bind(&slow_case);
|
||||
}
|
||||
|
||||
CompareStub stub(cc, strict);
|
||||
CompareFlags flags = inline_smi_code
|
||||
? NO_SMI_COMPARE_IN_STUB
|
||||
: NO_COMPARE_FLAGS;
|
||||
CompareStub stub(cc, strict, flags);
|
||||
__ CallStub(&stub);
|
||||
__ test(eax, Operand(eax));
|
||||
Split(cc, if_true, if_false, fall_through);
|
||||
@ -3512,7 +3757,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
|
||||
|
||||
|
||||
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
|
||||
__ mov(dst, CodeGenerator::ContextOperand(esi, context_index));
|
||||
__ mov(dst, ContextOperand(esi, context_index));
|
||||
}
|
||||
|
||||
|
||||
|
222
deps/v8/src/ia32/stub-cache-ia32.cc
vendored
222
deps/v8/src/ia32/stub-cache-ia32.cc
vendored
@ -1255,6 +1255,61 @@ void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
|
||||
}
|
||||
|
||||
|
||||
void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
|
||||
JSObject* holder,
|
||||
String* name,
|
||||
Label* miss) {
|
||||
ASSERT(holder->IsGlobalObject());
|
||||
|
||||
// Get the number of arguments.
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
|
||||
|
||||
// If the object is the holder then we know that it's a global
|
||||
// object which can only happen for contextual calls. In this case,
|
||||
// the receiver cannot be a smi.
|
||||
if (object != holder) {
|
||||
__ test(edx, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss, not_taken);
|
||||
}
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
CheckPrototypes(object, edx, holder, ebx, eax, edi, name, miss);
|
||||
}
|
||||
|
||||
|
||||
void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
Label* miss) {
|
||||
// Get the value from the cell.
|
||||
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
|
||||
__ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
// Check that the cell contains the same function.
|
||||
if (Heap::InNewSpace(function)) {
|
||||
// We can't embed a pointer to a function in new space so we have
|
||||
// to verify that the shared function info is unchanged. This has
|
||||
// the nice side effect that multiple closures based on the same
|
||||
// function can all use this call IC. Before we load through the
|
||||
// function, we have to verify that it still is a function.
|
||||
__ test(edi, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss, not_taken);
|
||||
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
|
||||
__ j(not_equal, miss, not_taken);
|
||||
|
||||
// Check the shared function info. Make sure it hasn't changed.
|
||||
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
|
||||
Immediate(Handle<SharedFunctionInfo>(function->shared())));
|
||||
__ j(not_equal, miss, not_taken);
|
||||
} else {
|
||||
__ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
|
||||
__ j(not_equal, miss, not_taken);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::GenerateMissBranch() {
|
||||
Object* obj = StubCache::ComputeCallMiss(arguments().immediate(), kind_);
|
||||
if (obj->IsFailure()) return obj;
|
||||
@ -1320,9 +1375,9 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
|
||||
|
||||
Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : name
|
||||
// -- esp[0] : return address
|
||||
@ -1330,12 +1385,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
// -- ...
|
||||
// -- esp[(argc + 1) * 4] : receiver
|
||||
// -----------------------------------
|
||||
ASSERT(check == RECEIVER_MAP_CHECK);
|
||||
|
||||
// If object is not an array, bail out to regular call.
|
||||
if (!object->IsJSArray()) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
Label miss;
|
||||
|
||||
@ -1469,9 +1521,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
|
||||
Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : name
|
||||
// -- esp[0] : return address
|
||||
@ -1479,12 +1531,9 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
||||
// -- ...
|
||||
// -- esp[(argc + 1) * 4] : receiver
|
||||
// -----------------------------------
|
||||
ASSERT(check == RECEIVER_MAP_CHECK);
|
||||
|
||||
// If object is not an array, bail out to regular call.
|
||||
if (!object->IsJSArray()) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
Label miss, return_undefined, call_builtin;
|
||||
|
||||
@ -1551,11 +1600,12 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
Object* CallStubCompiler::CompileStringCharCodeAtCall(
|
||||
Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : function name
|
||||
// -- esp[0] : return address
|
||||
@ -1565,7 +1615,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
// -----------------------------------
|
||||
|
||||
// If object is not a string, bail out to regular call.
|
||||
if (!object->IsString()) return Heap::undefined_value();
|
||||
if (!object->IsString() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
@ -1621,9 +1671,9 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
|
||||
Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : function name
|
||||
// -- esp[0] : return address
|
||||
@ -1633,7 +1683,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
// -----------------------------------
|
||||
|
||||
// If object is not a string, bail out to regular call.
|
||||
if (!object->IsString()) return Heap::undefined_value();
|
||||
if (!object->IsString() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
@ -1690,6 +1740,79 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileStringFromCharCodeCall(
|
||||
Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- ecx : function name
|
||||
// -- esp[0] : return address
|
||||
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
|
||||
// -- ...
|
||||
// -- esp[(argc + 1) * 4] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
// If the object is not a JSObject or we got an unexpected number of
|
||||
// arguments, bail out to the regular call.
|
||||
if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
|
||||
|
||||
Label miss;
|
||||
GenerateNameCheck(name, &miss);
|
||||
|
||||
if (cell == NULL) {
|
||||
__ mov(edx, Operand(esp, 2 * kPointerSize));
|
||||
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ test(edx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss);
|
||||
|
||||
CheckPrototypes(JSObject::cast(object), edx, holder, ebx, eax, edi, name,
|
||||
&miss);
|
||||
} else {
|
||||
ASSERT(cell->value() == function);
|
||||
GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
|
||||
GenerateLoadFunctionFromCell(cell, function, &miss);
|
||||
}
|
||||
|
||||
// Load the char code argument.
|
||||
Register code = ebx;
|
||||
__ mov(code, Operand(esp, 1 * kPointerSize));
|
||||
|
||||
// Check the code is a smi.
|
||||
Label slow;
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ test(code, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &slow);
|
||||
|
||||
// Convert the smi code to uint16.
|
||||
__ and_(code, Immediate(Smi::FromInt(0xffff)));
|
||||
|
||||
StringCharFromCodeGenerator char_from_code_generator(code, eax);
|
||||
char_from_code_generator.GenerateFast(masm());
|
||||
__ ret(2 * kPointerSize);
|
||||
|
||||
ICRuntimeCallHelper call_helper;
|
||||
char_from_code_generator.GenerateSlow(masm(), call_helper);
|
||||
|
||||
// Tail call the full function. We do not have to patch the receiver
|
||||
// because the function makes no use of it.
|
||||
__ bind(&slow);
|
||||
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
|
||||
|
||||
__ bind(&miss);
|
||||
// ecx: function name.
|
||||
Object* obj = GenerateMissBranch();
|
||||
if (obj->IsFailure()) return obj;
|
||||
|
||||
// Return the generated code.
|
||||
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileCallConstant(Object* object,
|
||||
JSObject* holder,
|
||||
JSFunction* function,
|
||||
@ -1706,12 +1829,10 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
||||
SharedFunctionInfo* function_info = function->shared();
|
||||
if (function_info->HasCustomCallGenerator()) {
|
||||
const int id = function_info->custom_call_generator_id();
|
||||
Object* result =
|
||||
CompileCustomCall(id, object, holder, function, name, check);
|
||||
Object* result = CompileCustomCall(
|
||||
id, object, holder, NULL, function, name);
|
||||
// undefined means bail out to regular compiler.
|
||||
if (!result->IsUndefined()) {
|
||||
return result;
|
||||
}
|
||||
if (!result->IsUndefined()) return result;
|
||||
}
|
||||
|
||||
Label miss_in_smi_check;
|
||||
@ -1922,6 +2043,16 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
// -- ...
|
||||
// -- esp[(argc + 1) * 4] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
SharedFunctionInfo* function_info = function->shared();
|
||||
if (function_info->HasCustomCallGenerator()) {
|
||||
const int id = function_info->custom_call_generator_id();
|
||||
Object* result = CompileCustomCall(
|
||||
id, object, holder, cell, function, name);
|
||||
// undefined means bail out to regular compiler.
|
||||
if (!result->IsUndefined()) return result;
|
||||
}
|
||||
|
||||
Label miss;
|
||||
|
||||
GenerateNameCheck(name, &miss);
|
||||
@ -1929,44 +2060,9 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
// Get the number of arguments.
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
|
||||
GenerateGlobalReceiverCheck(object, holder, name, &miss);
|
||||
|
||||
// If the object is the holder then we know that it's a global
|
||||
// object which can only happen for contextual calls. In this case,
|
||||
// the receiver cannot be a smi.
|
||||
if (object != holder) {
|
||||
__ test(edx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss, not_taken);
|
||||
}
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss);
|
||||
|
||||
// Get the value from the cell.
|
||||
__ mov(edi, Immediate(Handle<JSGlobalPropertyCell>(cell)));
|
||||
__ mov(edi, FieldOperand(edi, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
// Check that the cell contains the same function.
|
||||
if (Heap::InNewSpace(function)) {
|
||||
// We can't embed a pointer to a function in new space so we have
|
||||
// to verify that the shared function info is unchanged. This has
|
||||
// the nice side effect that multiple closures based on the same
|
||||
// function can all use this call IC. Before we load through the
|
||||
// function, we have to verify that it still is a function.
|
||||
__ test(edi, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss, not_taken);
|
||||
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
|
||||
__ j(not_equal, &miss, not_taken);
|
||||
|
||||
// Check the shared function info. Make sure it hasn't changed.
|
||||
__ cmp(FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset),
|
||||
Immediate(Handle<SharedFunctionInfo>(function->shared())));
|
||||
__ j(not_equal, &miss, not_taken);
|
||||
} else {
|
||||
__ cmp(Operand(edi), Immediate(Handle<JSFunction>(function)));
|
||||
__ j(not_equal, &miss, not_taken);
|
||||
}
|
||||
GenerateLoadFunctionFromCell(cell, function, &miss);
|
||||
|
||||
// Patch the receiver on the stack with the global proxy.
|
||||
if (object->IsGlobalObject()) {
|
||||
|
60
deps/v8/src/liveedit.cc
vendored
60
deps/v8/src/liveedit.cc
vendored
@ -617,9 +617,33 @@ class FunctionInfoListener {
|
||||
current_parent_index_ = info.GetParentIndex();
|
||||
}
|
||||
|
||||
// TODO(LiveEdit): Move private method below.
|
||||
// This private section was created here to avoid moving the function
|
||||
// to keep already complex diff simpler.
|
||||
public:
|
||||
// Saves only function code, because for a script function we
|
||||
// may never create a SharedFunctionInfo object.
|
||||
void FunctionCode(Handle<Code> function_code) {
|
||||
FunctionInfoWrapper info =
|
||||
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
|
||||
info.SetFunctionCode(function_code, Handle<Object>(Heap::null_value()));
|
||||
}
|
||||
|
||||
// Saves full information about a function: its code, its scope info
|
||||
// and a SharedFunctionInfo object.
|
||||
void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope) {
|
||||
if (!shared->IsSharedFunctionInfo()) {
|
||||
return;
|
||||
}
|
||||
FunctionInfoWrapper info =
|
||||
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
|
||||
info.SetFunctionCode(Handle<Code>(shared->code()),
|
||||
Handle<Object>(shared->scope_info()));
|
||||
info.SetSharedFunctionInfo(shared);
|
||||
|
||||
Handle<Object> scope_info_list(SerializeFunctionScope(scope));
|
||||
info.SetOuterScopeInfo(scope_info_list);
|
||||
}
|
||||
|
||||
Handle<JSArray> GetResult() { return result_; }
|
||||
|
||||
private:
|
||||
Object* SerializeFunctionScope(Scope* scope) {
|
||||
HandleScope handle_scope;
|
||||
@ -676,36 +700,6 @@ class FunctionInfoListener {
|
||||
return *scope_info_list;
|
||||
}
|
||||
|
||||
public:
|
||||
// Saves only function code, because for a script function we
|
||||
// may never create a SharedFunctionInfo object.
|
||||
void FunctionCode(Handle<Code> function_code) {
|
||||
FunctionInfoWrapper info =
|
||||
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
|
||||
info.SetFunctionCode(function_code, Handle<Object>(Heap::null_value()));
|
||||
}
|
||||
|
||||
// Saves full information about a function: its code, its scope info
|
||||
// and a SharedFunctionInfo object.
|
||||
void FunctionInfo(Handle<SharedFunctionInfo> shared, Scope* scope) {
|
||||
if (!shared->IsSharedFunctionInfo()) {
|
||||
return;
|
||||
}
|
||||
FunctionInfoWrapper info =
|
||||
FunctionInfoWrapper::cast(result_->GetElement(current_parent_index_));
|
||||
info.SetFunctionCode(Handle<Code>(shared->code()),
|
||||
Handle<Object>(shared->scope_info()));
|
||||
info.SetSharedFunctionInfo(shared);
|
||||
|
||||
Handle<Object> scope_info_list(SerializeFunctionScope(scope));
|
||||
info.SetOuterScopeInfo(scope_info_list);
|
||||
}
|
||||
|
||||
Handle<JSArray> GetResult() {
|
||||
return result_;
|
||||
}
|
||||
|
||||
private:
|
||||
Handle<JSArray> result_;
|
||||
int len_;
|
||||
int current_parent_index_;
|
||||
|
359
deps/v8/src/parser.cc
vendored
359
deps/v8/src/parser.cc
vendored
@ -872,11 +872,14 @@ class ParserLog BASE_EMBEDDED {
|
||||
// Records the occurrence of a function.
|
||||
virtual FunctionEntry LogFunction(int start) { return FunctionEntry(); }
|
||||
virtual void LogSymbol(int start, Vector<const char> symbol) {}
|
||||
virtual void LogError() { }
|
||||
// Return the current position in the function entry log.
|
||||
virtual int function_position() { return 0; }
|
||||
virtual int symbol_position() { return 0; }
|
||||
virtual int symbol_ids() { return 0; }
|
||||
virtual void LogError() { }
|
||||
virtual Vector<unsigned> ExtractData() {
|
||||
return Vector<unsigned>();
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@ -889,9 +892,14 @@ class AstBuildingParserFactory : public ParserFactory {
|
||||
|
||||
virtual Handle<String> LookupSymbol(int symbol_id,
|
||||
Vector<const char> string) {
|
||||
// If there is no preparse data, we have no simpler way to identify similar
|
||||
// symbols.
|
||||
if (symbol_id < 0) return Factory::LookupSymbol(string);
|
||||
// Length of symbol cache is the number of identified symbols.
|
||||
// If we are larger than that, or negative, it's not a cached symbol.
|
||||
// This might also happen if there is no preparser symbol data, even
|
||||
// if there is some preparser data.
|
||||
if (static_cast<unsigned>(symbol_id)
|
||||
>= static_cast<unsigned>(symbol_cache_.length())) {
|
||||
return Factory::LookupSymbol(string);
|
||||
}
|
||||
return LookupCachedSymbol(symbol_id, string);
|
||||
}
|
||||
|
||||
@ -933,34 +941,78 @@ class AstBuildingParserFactory : public ParserFactory {
|
||||
};
|
||||
|
||||
|
||||
class ParserRecorder: public ParserLog {
|
||||
// Record only functions.
|
||||
class PartialParserRecorder: public ParserLog {
|
||||
public:
|
||||
ParserRecorder();
|
||||
PartialParserRecorder();
|
||||
virtual FunctionEntry LogFunction(int start);
|
||||
|
||||
virtual int function_position() { return function_store_.size(); }
|
||||
|
||||
virtual void LogError() { }
|
||||
|
||||
virtual void LogMessage(Scanner::Location loc,
|
||||
const char* message,
|
||||
Vector<const char*> args);
|
||||
|
||||
virtual Vector<unsigned> ExtractData() {
|
||||
int function_size = function_store_.size();
|
||||
int total_size = ScriptDataImpl::kHeaderSize + function_size;
|
||||
Vector<unsigned> data = Vector<unsigned>::New(total_size);
|
||||
preamble_[ScriptDataImpl::kFunctionsSizeOffset] = function_size;
|
||||
preamble_[ScriptDataImpl::kSymbolCountOffset] = 0;
|
||||
memcpy(data.start(), preamble_, sizeof(preamble_));
|
||||
int symbol_start = ScriptDataImpl::kHeaderSize + function_size;
|
||||
if (function_size > 0) {
|
||||
function_store_.WriteTo(data.SubVector(ScriptDataImpl::kHeaderSize,
|
||||
symbol_start));
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
protected:
|
||||
bool has_error() {
|
||||
return static_cast<bool>(preamble_[ScriptDataImpl::kHasErrorOffset]);
|
||||
}
|
||||
|
||||
void WriteString(Vector<const char> str);
|
||||
|
||||
Collector<unsigned> function_store_;
|
||||
unsigned preamble_[ScriptDataImpl::kHeaderSize];
|
||||
#ifdef DEBUG
|
||||
int prev_start;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
// Record both functions and symbols.
|
||||
class CompleteParserRecorder: public PartialParserRecorder {
|
||||
public:
|
||||
CompleteParserRecorder();
|
||||
|
||||
virtual void LogSymbol(int start, Vector<const char> literal) {
|
||||
int hash = vector_hash(literal);
|
||||
HashMap::Entry* entry = symbol_table_.Lookup(&literal, hash, true);
|
||||
int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
|
||||
if (id == 0) {
|
||||
// Put (symbol_id_ + 1) into entry and increment it.
|
||||
symbol_id_++;
|
||||
entry->value = reinterpret_cast<void*>(symbol_id_);
|
||||
id = ++symbol_id_;
|
||||
entry->value = reinterpret_cast<void*>(id);
|
||||
Vector<Vector<const char> > symbol = symbol_entries_.AddBlock(1, literal);
|
||||
entry->key = &symbol[0];
|
||||
} else {
|
||||
// Log a reuse of an earlier seen symbol.
|
||||
symbol_store_.Add(start);
|
||||
symbol_store_.Add(id - 1);
|
||||
}
|
||||
symbol_store_.Add(id - 1);
|
||||
}
|
||||
virtual void LogError() { }
|
||||
virtual void LogMessage(Scanner::Location loc,
|
||||
const char* message,
|
||||
Vector<const char*> args);
|
||||
Vector<unsigned> ExtractData() {
|
||||
|
||||
virtual Vector<unsigned> ExtractData() {
|
||||
int function_size = function_store_.size();
|
||||
// Add terminator to symbols, then pad to unsigned size.
|
||||
int symbol_size = symbol_store_.size();
|
||||
int total_size = ScriptDataImpl::kHeaderSize + function_size + symbol_size;
|
||||
int padding = sizeof(unsigned) - (symbol_size % sizeof(unsigned));
|
||||
symbol_store_.AddBlock(padding, ScriptDataImpl::kNumberTerminator);
|
||||
symbol_size += padding;
|
||||
int total_size = ScriptDataImpl::kHeaderSize + function_size
|
||||
+ (symbol_size / sizeof(unsigned));
|
||||
Vector<unsigned> data = Vector<unsigned>::New(total_size);
|
||||
preamble_[ScriptDataImpl::kFunctionsSizeOffset] = function_size;
|
||||
preamble_[ScriptDataImpl::kSymbolCountOffset] = symbol_id_;
|
||||
@ -970,23 +1022,17 @@ class ParserRecorder: public ParserLog {
|
||||
function_store_.WriteTo(data.SubVector(ScriptDataImpl::kHeaderSize,
|
||||
symbol_start));
|
||||
}
|
||||
if (symbol_size > 0) {
|
||||
symbol_store_.WriteTo(data.SubVector(symbol_start, total_size));
|
||||
if (!has_error()) {
|
||||
symbol_store_.WriteTo(
|
||||
Vector<byte>::cast(data.SubVector(symbol_start, total_size)));
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
virtual int function_position() { return function_store_.size(); }
|
||||
virtual int symbol_position() { return symbol_store_.size(); }
|
||||
virtual int symbol_ids() { return symbol_id_; }
|
||||
private:
|
||||
Collector<unsigned> function_store_;
|
||||
Collector<unsigned> symbol_store_;
|
||||
Collector<Vector<const char> > symbol_entries_;
|
||||
HashMap symbol_table_;
|
||||
int symbol_id_;
|
||||
|
||||
static int vector_hash(Vector<const char> string) {
|
||||
static int vector_hash(Vector<const char> string) {
|
||||
int hash = 0;
|
||||
for (int i = 0; i < string.length(); i++) {
|
||||
int c = string[i];
|
||||
@ -1005,15 +1051,13 @@ class ParserRecorder: public ParserLog {
|
||||
return memcmp(string1->start(), string2->start(), length) == 0;
|
||||
}
|
||||
|
||||
unsigned preamble_[ScriptDataImpl::kHeaderSize];
|
||||
#ifdef DEBUG
|
||||
int prev_start;
|
||||
#endif
|
||||
// Write a non-negative number to the symbol store.
|
||||
void WriteNumber(int number);
|
||||
|
||||
bool has_error() {
|
||||
return static_cast<bool>(preamble_[ScriptDataImpl::kHasErrorOffset]);
|
||||
}
|
||||
void WriteString(Vector<const char> str);
|
||||
Collector<byte> symbol_store_;
|
||||
Collector<Vector<const char> > symbol_entries_;
|
||||
HashMap symbol_table_;
|
||||
int symbol_id_;
|
||||
};
|
||||
|
||||
|
||||
@ -1038,18 +1082,11 @@ FunctionEntry ScriptDataImpl::GetFunctionEntry(int start) {
|
||||
}
|
||||
|
||||
|
||||
int ScriptDataImpl::GetSymbolIdentifier(int start) {
|
||||
int next = symbol_index_ + 2;
|
||||
if (next <= store_.length()
|
||||
&& static_cast<int>(store_[symbol_index_]) == start) {
|
||||
symbol_index_ = next;
|
||||
return store_[next - 1];
|
||||
}
|
||||
return symbol_id_++;
|
||||
int ScriptDataImpl::GetSymbolIdentifier() {
|
||||
return ReadNumber(&symbol_data_);
|
||||
}
|
||||
|
||||
|
||||
|
||||
bool ScriptDataImpl::SanityCheck() {
|
||||
// Check that the header data is valid and doesn't specify
|
||||
// point to positions outside the store.
|
||||
@ -1080,7 +1117,7 @@ bool ScriptDataImpl::SanityCheck() {
|
||||
int symbol_count =
|
||||
static_cast<int>(store_[ScriptDataImpl::kSymbolCountOffset]);
|
||||
if (symbol_count < 0) return false;
|
||||
// Check that the total size has room both function entries.
|
||||
// Check that the total size has room for header and function entries.
|
||||
int minimum_size =
|
||||
ScriptDataImpl::kHeaderSize + functions_size;
|
||||
if (store_.length() < minimum_size) return false;
|
||||
@ -1088,15 +1125,8 @@ bool ScriptDataImpl::SanityCheck() {
|
||||
}
|
||||
|
||||
|
||||
ParserRecorder::ParserRecorder()
|
||||
: function_store_(0),
|
||||
symbol_store_(0),
|
||||
symbol_entries_(0),
|
||||
symbol_table_(vector_compare),
|
||||
symbol_id_(0) {
|
||||
#ifdef DEBUG
|
||||
prev_start = -1;
|
||||
#endif
|
||||
|
||||
PartialParserRecorder::PartialParserRecorder() : function_store_(0) {
|
||||
preamble_[ScriptDataImpl::kMagicOffset] = ScriptDataImpl::kMagicNumber;
|
||||
preamble_[ScriptDataImpl::kVersionOffset] = ScriptDataImpl::kCurrentVersion;
|
||||
preamble_[ScriptDataImpl::kHasErrorOffset] = false;
|
||||
@ -1104,10 +1134,22 @@ ParserRecorder::ParserRecorder()
|
||||
preamble_[ScriptDataImpl::kSymbolCountOffset] = 0;
|
||||
preamble_[ScriptDataImpl::kSizeOffset] = 0;
|
||||
ASSERT_EQ(6, ScriptDataImpl::kHeaderSize);
|
||||
#ifdef DEBUG
|
||||
prev_start = -1;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void ParserRecorder::WriteString(Vector<const char> str) {
|
||||
CompleteParserRecorder::CompleteParserRecorder()
|
||||
: PartialParserRecorder(),
|
||||
symbol_store_(0),
|
||||
symbol_entries_(0),
|
||||
symbol_table_(vector_compare),
|
||||
symbol_id_(0) {
|
||||
}
|
||||
|
||||
|
||||
void PartialParserRecorder::WriteString(Vector<const char> str) {
|
||||
function_store_.Add(str.length());
|
||||
for (int i = 0; i < str.length(); i++) {
|
||||
function_store_.Add(str[i]);
|
||||
@ -1115,6 +1157,22 @@ void ParserRecorder::WriteString(Vector<const char> str) {
|
||||
}
|
||||
|
||||
|
||||
void CompleteParserRecorder::WriteNumber(int number) {
|
||||
ASSERT(number >= 0);
|
||||
|
||||
int mask = (1 << 28) - 1;
|
||||
for (int i = 28; i > 0; i -= 7) {
|
||||
if (number > mask) {
|
||||
symbol_store_.Add(static_cast<byte>(number >> i) | 0x80u);
|
||||
number &= mask;
|
||||
}
|
||||
mask >>= 7;
|
||||
}
|
||||
symbol_store_.Add(static_cast<byte>(number));
|
||||
}
|
||||
|
||||
|
||||
|
||||
const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
|
||||
int length = start[0];
|
||||
char* result = NewArray<char>(length + 1);
|
||||
@ -1127,8 +1185,9 @@ const char* ScriptDataImpl::ReadString(unsigned* start, int* chars) {
|
||||
}
|
||||
|
||||
|
||||
void ParserRecorder::LogMessage(Scanner::Location loc, const char* message,
|
||||
Vector<const char*> args) {
|
||||
void PartialParserRecorder::LogMessage(Scanner::Location loc,
|
||||
const char* message,
|
||||
Vector<const char*> args) {
|
||||
if (has_error()) return;
|
||||
preamble_[ScriptDataImpl::kHasErrorOffset] = true;
|
||||
function_store_.Reset();
|
||||
@ -1162,7 +1221,8 @@ const char* ScriptDataImpl::BuildMessage() {
|
||||
Vector<const char*> ScriptDataImpl::BuildArgs() {
|
||||
int arg_count = Read(kMessageArgCountPos);
|
||||
const char** array = NewArray<const char*>(arg_count);
|
||||
// Position after the string starting at position 3.
|
||||
// Position after text found by skipping past length field and
|
||||
// length field content words.
|
||||
int pos = kMessageTextPos + 1 + Read(kMessageTextPos);
|
||||
for (int i = 0; i < arg_count; i++) {
|
||||
int count = 0;
|
||||
@ -1183,7 +1243,7 @@ unsigned* ScriptDataImpl::ReadAddress(int position) {
|
||||
}
|
||||
|
||||
|
||||
FunctionEntry ParserRecorder::LogFunction(int start) {
|
||||
FunctionEntry PartialParserRecorder::LogFunction(int start) {
|
||||
#ifdef DEBUG
|
||||
ASSERT(start > prev_start);
|
||||
prev_start = start;
|
||||
@ -1206,7 +1266,7 @@ class AstBuildingParser : public Parser {
|
||||
factory(),
|
||||
log(),
|
||||
pre_data),
|
||||
factory_(pre_data ? pre_data->symbol_count() : 16) { }
|
||||
factory_(pre_data ? pre_data->symbol_count() : 0) { }
|
||||
virtual void ReportMessageAt(Scanner::Location loc, const char* message,
|
||||
Vector<const char*> args);
|
||||
virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
|
||||
@ -1223,23 +1283,46 @@ class AstBuildingParser : public Parser {
|
||||
class PreParser : public Parser {
|
||||
public:
|
||||
PreParser(Handle<Script> script, bool allow_natives_syntax,
|
||||
v8::Extension* extension)
|
||||
v8::Extension* extension, ParserLog* recorder)
|
||||
: Parser(script, allow_natives_syntax, extension, PREPARSE,
|
||||
factory(), recorder(), NULL),
|
||||
factory(), recorder, NULL),
|
||||
factory_(true) { }
|
||||
virtual void ReportMessageAt(Scanner::Location loc, const char* message,
|
||||
Vector<const char*> args);
|
||||
virtual VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
|
||||
FunctionLiteral* fun, bool resolve, bool* ok);
|
||||
ParserFactory* factory() { return &factory_; }
|
||||
ParserRecorder* recorder() { return &recorder_; }
|
||||
virtual PartialParserRecorder* recorder() = 0;
|
||||
|
||||
private:
|
||||
ParserRecorder recorder_;
|
||||
ParserFactory factory_;
|
||||
};
|
||||
|
||||
|
||||
class CompletePreParser : public PreParser {
|
||||
public:
|
||||
CompletePreParser(Handle<Script> script, bool allow_natives_syntax,
|
||||
v8::Extension* extension)
|
||||
: PreParser(script, allow_natives_syntax, extension, &recorder_),
|
||||
recorder_() { }
|
||||
virtual PartialParserRecorder* recorder() { return &recorder_; }
|
||||
private:
|
||||
CompleteParserRecorder recorder_;
|
||||
};
|
||||
|
||||
|
||||
class PartialPreParser : public PreParser {
|
||||
public:
|
||||
PartialPreParser(Handle<Script> script, bool allow_natives_syntax,
|
||||
v8::Extension* extension)
|
||||
: PreParser(script, allow_natives_syntax, extension, &recorder_),
|
||||
recorder_() { }
|
||||
virtual PartialParserRecorder* recorder() { return &recorder_; }
|
||||
private:
|
||||
PartialParserRecorder recorder_;
|
||||
};
|
||||
|
||||
|
||||
Scope* AstBuildingParserFactory::NewScope(Scope* parent, Scope::Type type,
|
||||
bool inside_with) {
|
||||
Scope* result = new Scope(parent, type);
|
||||
@ -1574,17 +1657,12 @@ void Parser::ReportMessage(const char* type, Vector<const char*> args) {
|
||||
|
||||
|
||||
Handle<String> Parser::GetSymbol(bool* ok) {
|
||||
if (pre_data() != NULL) {
|
||||
int symbol_id =
|
||||
pre_data()->GetSymbolIdentifier(scanner_.location().beg_pos);
|
||||
if (symbol_id < 0) {
|
||||
ReportInvalidPreparseData(Factory::empty_symbol(), ok);
|
||||
return Handle<String>::null();
|
||||
}
|
||||
return factory()->LookupSymbol(symbol_id, scanner_.literal());
|
||||
}
|
||||
log()->LogSymbol(scanner_.location().beg_pos, scanner_.literal());
|
||||
return factory()->LookupSymbol(-1, scanner_.literal());
|
||||
int symbol_id = -1;
|
||||
if (pre_data() != NULL) {
|
||||
symbol_id = pre_data()->GetSymbolIdentifier();
|
||||
}
|
||||
return factory()->LookupSymbol(symbol_id, scanner_.literal());
|
||||
}
|
||||
|
||||
|
||||
@ -4111,8 +4189,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
|
||||
Counters::total_preparse_skipped.Increment(end_pos - function_block_pos);
|
||||
scanner_.SeekForward(end_pos);
|
||||
pre_data()->Skip(entry.predata_function_skip(),
|
||||
entry.predata_symbol_skip(),
|
||||
entry.symbol_id_skip());
|
||||
entry.predata_symbol_skip());
|
||||
materialized_literal_count = entry.literal_count();
|
||||
expected_property_count = entry.property_count();
|
||||
only_simple_this_property_assignments = false;
|
||||
@ -4126,7 +4203,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
|
||||
FunctionEntry entry = log()->LogFunction(function_block_pos);
|
||||
int predata_function_position_before = log()->function_position();
|
||||
int predata_symbol_position_before = log()->symbol_position();
|
||||
int symbol_ids_before = log()->symbol_ids();
|
||||
ParseSourceElements(&body, Token::RBRACE, CHECK_OK);
|
||||
materialized_literal_count = temp_scope.materialized_literal_count();
|
||||
expected_property_count = temp_scope.expected_property_count();
|
||||
@ -4144,8 +4220,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
|
||||
log()->function_position() - predata_function_position_before);
|
||||
entry.set_predata_symbol_skip(
|
||||
log()->symbol_position() - predata_symbol_position_before);
|
||||
entry.set_symbol_id_skip(
|
||||
log()->symbol_ids() - symbol_ids_before);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4178,58 +4252,43 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
|
||||
|
||||
Expect(Token::MOD, CHECK_OK);
|
||||
Handle<String> name = ParseIdentifier(CHECK_OK);
|
||||
Runtime::Function* function =
|
||||
Runtime::FunctionForName(scanner_.literal());
|
||||
ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
|
||||
if (function == NULL && extension_ != NULL) {
|
||||
if (is_pre_parsing_) return NULL;
|
||||
|
||||
if (extension_ != NULL) {
|
||||
// The extension structures are only accessible while parsing the
|
||||
// very first time not when reparsing because of lazy compilation.
|
||||
top_scope_->ForceEagerCompilation();
|
||||
}
|
||||
|
||||
// Check for built-in macros.
|
||||
if (!is_pre_parsing_) {
|
||||
if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
|
||||
// %IS_VAR(x)
|
||||
// evaluates to x if x is a variable,
|
||||
// leads to a parse error otherwise
|
||||
if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
|
||||
return args->at(0);
|
||||
}
|
||||
*ok = false;
|
||||
// Check here for other macros.
|
||||
// } else if (function == Runtime::FunctionForId(Runtime::kIS_VAR)) {
|
||||
// ...
|
||||
}
|
||||
Runtime::Function* function = Runtime::FunctionForSymbol(name);
|
||||
|
||||
if (!*ok) {
|
||||
// We found a macro but it failed.
|
||||
// Check for built-in IS_VAR macro.
|
||||
if (function != NULL &&
|
||||
function->intrinsic_type == Runtime::RUNTIME &&
|
||||
function->function_id == Runtime::kIS_VAR) {
|
||||
// %IS_VAR(x) evaluates to x if x is a variable,
|
||||
// leads to a parse error otherwise. Could be implemented as an
|
||||
// inline function %_IS_VAR(x) to eliminate this special case.
|
||||
if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
|
||||
return args->at(0);
|
||||
} else {
|
||||
ReportMessage("unable_to_parse", Vector<const char*>::empty());
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the expected number arguments are passed to runtime functions.
|
||||
if (!is_pre_parsing_) {
|
||||
if (function != NULL
|
||||
&& function->nargs != -1
|
||||
&& function->nargs != args->length()) {
|
||||
ReportMessage("illegal_access", Vector<const char*>::empty());
|
||||
*ok = false;
|
||||
return NULL;
|
||||
} else if (function == NULL && !name.is_null()) {
|
||||
// If this is not a runtime function implemented in C++ it might be an
|
||||
// inlined runtime function.
|
||||
int argc = CodeGenerator::InlineRuntimeCallArgumentsCount(name);
|
||||
if (argc != -1 && argc != args->length()) {
|
||||
ReportMessage("illegal_access", Vector<const char*>::empty());
|
||||
*ok = false;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise we have a valid runtime call.
|
||||
// Check that the expected number of arguments are being passed.
|
||||
if (function != NULL &&
|
||||
function->nargs != -1 &&
|
||||
function->nargs != args->length()) {
|
||||
ReportMessage("illegal_access", Vector<const char*>::empty());
|
||||
*ok = false;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// We have a valid intrinsics call or a call to a builtin.
|
||||
return NEW(CallRuntime(name, function, args));
|
||||
}
|
||||
|
||||
@ -5413,6 +5472,66 @@ bool ScriptDataImpl::HasError() {
|
||||
}
|
||||
|
||||
|
||||
// Preparse, but only collect data that is immediately useful,
|
||||
// even if the preparser data is only used once.
|
||||
ScriptDataImpl* PartialPreParse(Handle<String> source,
|
||||
unibrow::CharacterStream* stream,
|
||||
v8::Extension* extension) {
|
||||
Handle<Script> no_script;
|
||||
bool allow_natives_syntax =
|
||||
always_allow_natives_syntax ||
|
||||
FLAG_allow_natives_syntax ||
|
||||
Bootstrapper::IsActive();
|
||||
PartialPreParser parser(no_script, allow_natives_syntax, extension);
|
||||
if (!parser.PreParseProgram(source, stream)) return NULL;
|
||||
// Extract the accumulated data from the recorder as a single
|
||||
// contiguous vector that we are responsible for disposing.
|
||||
Vector<unsigned> store = parser.recorder()->ExtractData();
|
||||
return new ScriptDataImpl(store);
|
||||
}
|
||||
|
||||
|
||||
void ScriptDataImpl::Initialize() {
|
||||
if (store_.length() >= kHeaderSize) {
|
||||
int symbol_data_offset = kHeaderSize + store_[kFunctionsSizeOffset];
|
||||
if (store_.length() > symbol_data_offset) {
|
||||
symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]);
|
||||
} else {
|
||||
// Partial preparse causes no symbol information.
|
||||
symbol_data_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
|
||||
}
|
||||
symbol_data_end_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int ScriptDataImpl::ReadNumber(byte** source) {
|
||||
// Reads a number from symbol_data_ in base 128. The most significant
|
||||
// bit marks that there are more digits.
|
||||
// If the first byte is 0x80 (kNumberTerminator), it would normally
|
||||
// represent a leading zero. Since that is useless, and therefore won't
|
||||
// appear as the first digit of any actual value, it is used to
|
||||
// mark the end of the input stream.
|
||||
byte* data = *source;
|
||||
if (data >= symbol_data_end_) return -1;
|
||||
byte input = *data;
|
||||
if (input == kNumberTerminator) {
|
||||
// End of stream marker.
|
||||
return -1;
|
||||
}
|
||||
int result = input & 0x7f;
|
||||
data++;
|
||||
while ((input & 0x80u) != 0) {
|
||||
if (data >= symbol_data_end_) return -1;
|
||||
input = *data;
|
||||
result = (result << 7) | (input & 0x7f);
|
||||
data++;
|
||||
}
|
||||
*source = data;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
ScriptDataImpl* PreParse(Handle<String> source,
|
||||
unibrow::CharacterStream* stream,
|
||||
v8::Extension* extension) {
|
||||
@ -5421,7 +5540,7 @@ ScriptDataImpl* PreParse(Handle<String> source,
|
||||
always_allow_natives_syntax ||
|
||||
FLAG_allow_natives_syntax ||
|
||||
Bootstrapper::IsActive();
|
||||
PreParser parser(no_script, allow_natives_syntax, extension);
|
||||
CompletePreParser parser(no_script, allow_natives_syntax, extension);
|
||||
if (!parser.PreParseProgram(source, stream)) return NULL;
|
||||
// Extract the accumulated data from the recorder as a single
|
||||
// contiguous vector that we are responsible for disposing.
|
||||
|
60
deps/v8/src/parser.h
vendored
60
deps/v8/src/parser.h
vendored
@ -82,15 +82,9 @@ class FunctionEntry BASE_EMBEDDED {
|
||||
backing_[kPredataSymbolSkipOffset] = value;
|
||||
}
|
||||
|
||||
int symbol_id_skip() { return backing_[kSymbolIdSkipOffset]; }
|
||||
void set_symbol_id_skip(int value) {
|
||||
backing_[kSymbolIdSkipOffset] = value;
|
||||
}
|
||||
|
||||
|
||||
bool is_valid() { return backing_.length() > 0; }
|
||||
|
||||
static const int kSize = 7;
|
||||
static const int kSize = 6;
|
||||
|
||||
private:
|
||||
Vector<unsigned> backing_;
|
||||
@ -100,7 +94,6 @@ class FunctionEntry BASE_EMBEDDED {
|
||||
static const int kPropertyCountOffset = 3;
|
||||
static const int kPredataFunctionSkipOffset = 4;
|
||||
static const int kPredataSymbolSkipOffset = 5;
|
||||
static const int kSymbolIdSkipOffset = 6;
|
||||
};
|
||||
|
||||
|
||||
@ -109,18 +102,10 @@ class ScriptDataImpl : public ScriptData {
|
||||
explicit ScriptDataImpl(Vector<unsigned> store)
|
||||
: store_(store),
|
||||
function_index_(kHeaderSize),
|
||||
symbol_id_(0),
|
||||
owns_store_(true) {
|
||||
Initialize();
|
||||
}
|
||||
|
||||
void Initialize() {
|
||||
if (store_.length() >= kHeaderSize) {
|
||||
// Otherwise we won't satisfy the SanityCheck.
|
||||
symbol_index_ = kHeaderSize + store_[kFunctionsSizeOffset];
|
||||
}
|
||||
}
|
||||
|
||||
// Create an empty ScriptDataImpl that is guaranteed to not satisfy
|
||||
// a SanityCheck.
|
||||
ScriptDataImpl() : store_(Vector<unsigned>()), owns_store_(false) { }
|
||||
@ -130,8 +115,11 @@ class ScriptDataImpl : public ScriptData {
|
||||
virtual const char* Data();
|
||||
virtual bool HasError();
|
||||
|
||||
void Initialize();
|
||||
void ReadNextSymbolPosition();
|
||||
|
||||
FunctionEntry GetFunctionEntry(int start);
|
||||
int GetSymbolIdentifier(int start);
|
||||
int GetSymbolIdentifier();
|
||||
void SkipFunctionEntry(int start);
|
||||
bool SanityCheck();
|
||||
|
||||
@ -149,19 +137,27 @@ class ScriptDataImpl : public ScriptData {
|
||||
unsigned version() { return store_[kVersionOffset]; }
|
||||
|
||||
// Skip forward in the preparser data by the given number
|
||||
// of unsigned ints.
|
||||
virtual void Skip(int function_entries, int symbol_entries, int symbol_ids) {
|
||||
// of unsigned ints of function entries and the given number of bytes of
|
||||
// symbol id encoding.
|
||||
void Skip(int function_entries, int symbol_entries) {
|
||||
ASSERT(function_entries >= 0);
|
||||
ASSERT(function_entries
|
||||
<= (static_cast<int>(store_[kFunctionsSizeOffset])
|
||||
- (function_index_ - kHeaderSize)));
|
||||
function_index_ += function_entries;
|
||||
symbol_index_ += symbol_entries;
|
||||
symbol_id_ += symbol_ids;
|
||||
ASSERT(symbol_entries >= 0);
|
||||
ASSERT(symbol_entries <= symbol_data_end_ - symbol_data_);
|
||||
|
||||
unsigned max_function_skip = store_[kFunctionsSizeOffset] -
|
||||
static_cast<unsigned>(function_index_ - kHeaderSize);
|
||||
function_index_ +=
|
||||
Min(static_cast<unsigned>(function_entries), max_function_skip);
|
||||
symbol_data_ +=
|
||||
Min(static_cast<unsigned>(symbol_entries),
|
||||
static_cast<unsigned>(symbol_data_end_ - symbol_data_));
|
||||
}
|
||||
|
||||
static const unsigned kMagicNumber = 0xBadDead;
|
||||
static const unsigned kCurrentVersion = 2;
|
||||
static const unsigned kCurrentVersion = 3;
|
||||
|
||||
static const int kMagicOffset = 0;
|
||||
static const int kVersionOffset = 1;
|
||||
@ -171,26 +167,30 @@ class ScriptDataImpl : public ScriptData {
|
||||
static const int kSizeOffset = 5;
|
||||
static const int kHeaderSize = 6;
|
||||
|
||||
// If encoding a message, the following positions are fixed.
|
||||
static const int kMessageStartPos = 0;
|
||||
static const int kMessageEndPos = 1;
|
||||
static const int kMessageArgCountPos = 2;
|
||||
static const int kMessageTextPos = 3;
|
||||
|
||||
static const byte kNumberTerminator = 0x80u;
|
||||
|
||||
private:
|
||||
Vector<unsigned> store_;
|
||||
unsigned char* symbol_data_;
|
||||
unsigned char* symbol_data_end_;
|
||||
int function_index_;
|
||||
int symbol_index_;
|
||||
int symbol_id_;
|
||||
bool owns_store_;
|
||||
|
||||
unsigned Read(int position);
|
||||
unsigned* ReadAddress(int position);
|
||||
// Reads a number from the current symbols
|
||||
int ReadNumber(byte** source);
|
||||
|
||||
ScriptDataImpl(const char* backing_store, int length)
|
||||
: store_(reinterpret_cast<unsigned*>(const_cast<char*>(backing_store)),
|
||||
length / sizeof(unsigned)),
|
||||
function_index_(kHeaderSize),
|
||||
symbol_id_(0),
|
||||
owns_store_(false) {
|
||||
ASSERT_EQ(0, reinterpret_cast<intptr_t>(backing_store) % sizeof(unsigned));
|
||||
Initialize();
|
||||
@ -212,11 +212,17 @@ FunctionLiteral* MakeAST(bool compile_in_global_context,
|
||||
ScriptDataImpl* pre_data,
|
||||
bool is_json = false);
|
||||
|
||||
|
||||
// Generic preparser generating full preparse data.
|
||||
ScriptDataImpl* PreParse(Handle<String> source,
|
||||
unibrow::CharacterStream* stream,
|
||||
v8::Extension* extension);
|
||||
|
||||
// Preparser that only does preprocessing that makes sense if only used
|
||||
// immediately after.
|
||||
ScriptDataImpl* PartialPreParse(Handle<String> source,
|
||||
unibrow::CharacterStream* stream,
|
||||
v8::Extension* extension);
|
||||
|
||||
|
||||
bool ParseRegExp(FlatStringReader* input,
|
||||
bool multiline,
|
||||
|
352
deps/v8/src/profile-generator.cc
vendored
352
deps/v8/src/profile-generator.cc
vendored
@ -31,6 +31,7 @@
|
||||
#include "global-handles.h"
|
||||
#include "scopeinfo.h"
|
||||
#include "top.h"
|
||||
#include "unicode.h"
|
||||
#include "zone-inl.h"
|
||||
|
||||
#include "profile-generator-inl.h"
|
||||
@ -2132,6 +2133,357 @@ HeapSnapshotsDiff* HeapSnapshotsComparator::Compare(HeapSnapshot* snapshot1,
|
||||
return diff;
|
||||
}
|
||||
|
||||
|
||||
class OutputStreamWriter {
|
||||
public:
|
||||
explicit OutputStreamWriter(v8::OutputStream* stream)
|
||||
: stream_(stream),
|
||||
chunk_size_(stream->GetChunkSize()),
|
||||
chunk_(chunk_size_),
|
||||
chunk_pos_(0),
|
||||
aborted_(false) {
|
||||
ASSERT(chunk_size_ > 0);
|
||||
}
|
||||
bool aborted() { return aborted_; }
|
||||
void AddCharacter(char c) {
|
||||
ASSERT(c != '\0');
|
||||
ASSERT(chunk_pos_ < chunk_size_);
|
||||
chunk_[chunk_pos_++] = c;
|
||||
MaybeWriteChunk();
|
||||
}
|
||||
void AddString(const char* s) {
|
||||
AddSubstring(s, StrLength(s));
|
||||
}
|
||||
void AddSubstring(const char* s, int n) {
|
||||
if (n <= 0) return;
|
||||
ASSERT(static_cast<size_t>(n) <= strlen(s));
|
||||
const char* s_end = s + n;
|
||||
while (s < s_end) {
|
||||
int s_chunk_size = Min(
|
||||
chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
|
||||
ASSERT(s_chunk_size > 0);
|
||||
memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
|
||||
s += s_chunk_size;
|
||||
chunk_pos_ += s_chunk_size;
|
||||
MaybeWriteChunk();
|
||||
}
|
||||
}
|
||||
void AddNumber(int n) { AddNumberImpl<int>(n, "%d"); }
|
||||
void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
|
||||
void AddNumber(uint64_t n) { AddNumberImpl<uint64_t>(n, "%llu"); }
|
||||
void Finalize() {
|
||||
if (aborted_) return;
|
||||
ASSERT(chunk_pos_ < chunk_size_);
|
||||
if (chunk_pos_ != 0) {
|
||||
WriteChunk();
|
||||
}
|
||||
stream_->EndOfStream();
|
||||
}
|
||||
|
||||
private:
|
||||
template<typename T>
|
||||
void AddNumberImpl(T n, const char* format) {
|
||||
ScopedVector<char> buffer(32);
|
||||
int result = OS::SNPrintF(buffer, format, n);
|
||||
USE(result);
|
||||
ASSERT(result != -1);
|
||||
AddString(buffer.start());
|
||||
}
|
||||
void MaybeWriteChunk() {
|
||||
ASSERT(chunk_pos_ <= chunk_size_);
|
||||
if (chunk_pos_ == chunk_size_) {
|
||||
WriteChunk();
|
||||
chunk_pos_ = 0;
|
||||
}
|
||||
}
|
||||
void WriteChunk() {
|
||||
if (aborted_) return;
|
||||
if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
|
||||
v8::OutputStream::kAbort) aborted_ = true;
|
||||
}
|
||||
|
||||
v8::OutputStream* stream_;
|
||||
int chunk_size_;
|
||||
ScopedVector<char> chunk_;
|
||||
int chunk_pos_;
|
||||
bool aborted_;
|
||||
};
|
||||
|
||||
void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
|
||||
ASSERT(writer_ == NULL);
|
||||
writer_ = new OutputStreamWriter(stream);
|
||||
|
||||
// Since nodes graph is cyclic, we need the first pass to enumerate
|
||||
// them. Strings can be serialized in one pass.
|
||||
EnumerateNodes();
|
||||
SerializeImpl();
|
||||
|
||||
delete writer_;
|
||||
writer_ = NULL;
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotJSONSerializer::SerializeImpl() {
|
||||
writer_->AddCharacter('{');
|
||||
writer_->AddString("\"snapshot\":{");
|
||||
SerializeSnapshot();
|
||||
if (writer_->aborted()) return;
|
||||
writer_->AddString("},\n");
|
||||
writer_->AddString("\"nodes\":[");
|
||||
SerializeNodes();
|
||||
if (writer_->aborted()) return;
|
||||
writer_->AddString("],\n");
|
||||
writer_->AddString("\"strings\":[");
|
||||
SerializeStrings();
|
||||
if (writer_->aborted()) return;
|
||||
writer_->AddCharacter(']');
|
||||
writer_->AddCharacter('}');
|
||||
writer_->Finalize();
|
||||
}
|
||||
|
||||
|
||||
class HeapSnapshotJSONSerializerEnumerator {
|
||||
public:
|
||||
explicit HeapSnapshotJSONSerializerEnumerator(HeapSnapshotJSONSerializer* s)
|
||||
: s_(s) {
|
||||
}
|
||||
void Apply(HeapEntry** entry) {
|
||||
s_->GetNodeId(*entry);
|
||||
}
|
||||
private:
|
||||
HeapSnapshotJSONSerializer* s_;
|
||||
};
|
||||
|
||||
void HeapSnapshotJSONSerializer::EnumerateNodes() {
|
||||
GetNodeId(snapshot_->root()); // Make sure root gets the first id.
|
||||
HeapSnapshotJSONSerializerEnumerator iter(this);
|
||||
snapshot_->IterateEntries(&iter);
|
||||
}
|
||||
|
||||
|
||||
int HeapSnapshotJSONSerializer::GetNodeId(HeapEntry* entry) {
|
||||
HashMap::Entry* cache_entry = nodes_.Lookup(entry, ObjectHash(entry), true);
|
||||
if (cache_entry->value == NULL) {
|
||||
cache_entry->value = reinterpret_cast<void*>(next_node_id_++);
|
||||
}
|
||||
return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
|
||||
}
|
||||
|
||||
|
||||
int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
|
||||
HashMap::Entry* cache_entry = strings_.Lookup(
|
||||
const_cast<char*>(s), ObjectHash(s), true);
|
||||
if (cache_entry->value == NULL) {
|
||||
cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
|
||||
}
|
||||
return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
|
||||
writer_->AddCharacter(',');
|
||||
writer_->AddNumber(edge->type());
|
||||
writer_->AddCharacter(',');
|
||||
if (edge->type() == HeapGraphEdge::kElement) {
|
||||
writer_->AddNumber(edge->index());
|
||||
} else {
|
||||
writer_->AddNumber(GetStringId(edge->name()));
|
||||
}
|
||||
writer_->AddCharacter(',');
|
||||
writer_->AddNumber(GetNodeId(edge->to()));
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
|
||||
writer_->AddCharacter('\n');
|
||||
writer_->AddCharacter(',');
|
||||
writer_->AddNumber(entry->type());
|
||||
writer_->AddCharacter(',');
|
||||
writer_->AddNumber(GetStringId(entry->name()));
|
||||
writer_->AddCharacter(',');
|
||||
writer_->AddNumber(entry->id());
|
||||
writer_->AddCharacter(',');
|
||||
writer_->AddNumber(entry->self_size());
|
||||
Vector<HeapGraphEdge> children = entry->children();
|
||||
writer_->AddCharacter(',');
|
||||
writer_->AddNumber(children.length());
|
||||
for (int i = 0; i < children.length(); ++i) {
|
||||
SerializeEdge(&children[i]);
|
||||
if (writer_->aborted()) return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotJSONSerializer::SerializeNodes() {
|
||||
// The first (zero) item of nodes array is a JSON-ified object
|
||||
// describing node serialization layout.
|
||||
// We use a set of macros to improve readability.
|
||||
#define JSON_A(s) "["s"]"
|
||||
#define JSON_O(s) "{"s"}"
|
||||
#define JSON_S(s) "\\\""s"\\\""
|
||||
writer_->AddString("\"" JSON_O(
|
||||
JSON_S("fields") ":" JSON_A(
|
||||
JSON_S("type")
|
||||
"," JSON_S("name")
|
||||
"," JSON_S("id")
|
||||
"," JSON_S("self_size")
|
||||
"," JSON_S("children_count")
|
||||
"," JSON_S("children"))
|
||||
"," JSON_S("types") ":" JSON_A(
|
||||
JSON_A(
|
||||
JSON_S("internal")
|
||||
"," JSON_S("array")
|
||||
"," JSON_S("string")
|
||||
"," JSON_S("object")
|
||||
"," JSON_S("code")
|
||||
"," JSON_S("closure"))
|
||||
"," JSON_S("string")
|
||||
"," JSON_S("number")
|
||||
"," JSON_S("number")
|
||||
"," JSON_S("number")
|
||||
"," JSON_O(
|
||||
JSON_S("fields") ":" JSON_A(
|
||||
JSON_S("type")
|
||||
"," JSON_S("name_or_index")
|
||||
"," JSON_S("to_node"))
|
||||
"," JSON_S("types") ":" JSON_A(
|
||||
JSON_A(
|
||||
JSON_S("context")
|
||||
"," JSON_S("element")
|
||||
"," JSON_S("property")
|
||||
"," JSON_S("internal"))
|
||||
"," JSON_S("string_or_number")
|
||||
"," JSON_S("node"))))) "\"");
|
||||
#undef JSON_S
|
||||
#undef JSON_O
|
||||
#undef JSON_A
|
||||
|
||||
const int node_fields_count = 5; // type,name,id,self_size,children_count.
|
||||
const int edge_fields_count = 3; // type,name|index,to_node.
|
||||
List<HashMap::Entry*> sorted_nodes;
|
||||
SortHashMap(&nodes_, &sorted_nodes);
|
||||
// Rewrite node ids, so they refer to actual array positions.
|
||||
if (sorted_nodes.length() > 1) {
|
||||
// Nodes start from array index 1.
|
||||
int prev_value = 1;
|
||||
sorted_nodes[0]->value = reinterpret_cast<void*>(prev_value);
|
||||
for (int i = 1; i < sorted_nodes.length(); ++i) {
|
||||
HeapEntry* prev_heap_entry =
|
||||
reinterpret_cast<HeapEntry*>(sorted_nodes[i-1]->key);
|
||||
prev_value += node_fields_count +
|
||||
prev_heap_entry->children().length() * edge_fields_count;
|
||||
sorted_nodes[i]->value = reinterpret_cast<void*>(prev_value);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < sorted_nodes.length(); ++i) {
|
||||
SerializeNode(reinterpret_cast<HeapEntry*>(sorted_nodes[i]->key));
|
||||
if (writer_->aborted()) return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotJSONSerializer::SerializeSnapshot() {
|
||||
writer_->AddString("\"title\":\"");
|
||||
writer_->AddString(snapshot_->title());
|
||||
writer_->AddString("\"");
|
||||
writer_->AddString(",\"uid\":");
|
||||
writer_->AddNumber(snapshot_->uid());
|
||||
}
|
||||
|
||||
|
||||
static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
|
||||
static const char hex_chars[] = "0123456789ABCDEF";
|
||||
w->AddString("\\u");
|
||||
w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
|
||||
w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
|
||||
w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
|
||||
w->AddCharacter(hex_chars[u & 0xf]);
|
||||
}
|
||||
|
||||
void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
|
||||
writer_->AddCharacter('\n');
|
||||
writer_->AddCharacter('\"');
|
||||
for ( ; *s != '\0'; ++s) {
|
||||
switch (*s) {
|
||||
case '\b':
|
||||
writer_->AddString("\\b");
|
||||
continue;
|
||||
case '\f':
|
||||
writer_->AddString("\\f");
|
||||
continue;
|
||||
case '\n':
|
||||
writer_->AddString("\\n");
|
||||
continue;
|
||||
case '\r':
|
||||
writer_->AddString("\\r");
|
||||
continue;
|
||||
case '\t':
|
||||
writer_->AddString("\\t");
|
||||
continue;
|
||||
case '\"':
|
||||
case '\\':
|
||||
writer_->AddCharacter('\\');
|
||||
writer_->AddCharacter(*s);
|
||||
continue;
|
||||
default:
|
||||
if (*s > 31 && *s < 128) {
|
||||
writer_->AddCharacter(*s);
|
||||
} else if (*s <= 31) {
|
||||
// Special character with no dedicated literal.
|
||||
WriteUChar(writer_, *s);
|
||||
} else {
|
||||
// Convert UTF-8 into \u UTF-16 literal.
|
||||
unsigned length = 1, cursor = 0;
|
||||
for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
|
||||
unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
|
||||
if (c != unibrow::Utf8::kBadChar) {
|
||||
WriteUChar(writer_, c);
|
||||
ASSERT(cursor != 0);
|
||||
s += cursor - 1;
|
||||
} else {
|
||||
writer_->AddCharacter('?');
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
writer_->AddCharacter('\"');
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotJSONSerializer::SerializeStrings() {
|
||||
List<HashMap::Entry*> sorted_strings;
|
||||
SortHashMap(&strings_, &sorted_strings);
|
||||
writer_->AddString("\"<dummy>\"");
|
||||
for (int i = 0; i < sorted_strings.length(); ++i) {
|
||||
writer_->AddCharacter(',');
|
||||
SerializeString(
|
||||
reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
|
||||
if (writer_->aborted()) return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename T>
|
||||
inline static int SortUsingEntryValue(const T* x, const T* y) {
|
||||
uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
|
||||
uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
|
||||
if (x_uint > y_uint) {
|
||||
return 1;
|
||||
} else if (x_uint == y_uint) {
|
||||
return 0;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotJSONSerializer::SortHashMap(
|
||||
HashMap* map, List<HashMap::Entry*>* sorted_entries) {
|
||||
for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
|
||||
sorted_entries->Add(p);
|
||||
sorted_entries->Sort(SortUsingEntryValue);
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
||||
|
48
deps/v8/src/profile-generator.h
vendored
48
deps/v8/src/profile-generator.h
vendored
@ -976,6 +976,54 @@ class HeapSnapshotGenerator {
|
||||
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
|
||||
};
|
||||
|
||||
class OutputStreamWriter;
|
||||
|
||||
class HeapSnapshotJSONSerializer {
|
||||
public:
|
||||
explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
|
||||
: snapshot_(snapshot),
|
||||
nodes_(ObjectsMatch),
|
||||
strings_(ObjectsMatch),
|
||||
next_node_id_(1),
|
||||
next_string_id_(1),
|
||||
writer_(NULL) {
|
||||
}
|
||||
void Serialize(v8::OutputStream* stream);
|
||||
|
||||
private:
|
||||
INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
|
||||
return key1 == key2;
|
||||
}
|
||||
|
||||
INLINE(static uint32_t ObjectHash(const void* key)) {
|
||||
return static_cast<int32_t>(reinterpret_cast<intptr_t>(key));
|
||||
}
|
||||
|
||||
void EnumerateNodes();
|
||||
int GetNodeId(HeapEntry* entry);
|
||||
int GetStringId(const char* s);
|
||||
void SerializeEdge(HeapGraphEdge* edge);
|
||||
void SerializeImpl();
|
||||
void SerializeNode(HeapEntry* entry);
|
||||
void SerializeNodes();
|
||||
void SerializeSnapshot();
|
||||
void SerializeString(const unsigned char* s);
|
||||
void SerializeStrings();
|
||||
void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
|
||||
|
||||
HeapSnapshot* snapshot_;
|
||||
HashMap nodes_;
|
||||
HashMap strings_;
|
||||
int next_node_id_;
|
||||
int next_string_id_;
|
||||
OutputStreamWriter* writer_;
|
||||
|
||||
friend class HeapSnapshotJSONSerializerEnumerator;
|
||||
friend class HeapSnapshotJSONSerializerIterator;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
||||
|
512
deps/v8/src/runtime.cc
vendored
512
deps/v8/src/runtime.cc
vendored
@ -47,6 +47,7 @@
|
||||
#include "smart-pointer.h"
|
||||
#include "stub-cache.h"
|
||||
#include "v8threads.h"
|
||||
#include "string-search.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -2571,418 +2572,6 @@ static Object* Runtime_StringReplaceRegExpWithString(Arguments args) {
|
||||
}
|
||||
|
||||
|
||||
// Cap on the maximal shift in the Boyer-Moore implementation. By setting a
|
||||
// limit, we can fix the size of tables.
|
||||
static const int kBMMaxShift = 0xff;
|
||||
// Reduce alphabet to this size.
|
||||
static const int kBMAlphabetSize = 0x100;
|
||||
// For patterns below this length, the skip length of Boyer-Moore is too short
|
||||
// to compensate for the algorithmic overhead compared to simple brute force.
|
||||
static const int kBMMinPatternLength = 5;
|
||||
|
||||
// Holds the two buffers used by Boyer-Moore string search's Good Suffix
|
||||
// shift. Only allows the last kBMMaxShift characters of the needle
|
||||
// to be indexed.
|
||||
class BMGoodSuffixBuffers {
|
||||
public:
|
||||
BMGoodSuffixBuffers() {}
|
||||
inline void init(int needle_length) {
|
||||
ASSERT(needle_length > 1);
|
||||
int start = needle_length < kBMMaxShift ? 0 : needle_length - kBMMaxShift;
|
||||
int len = needle_length - start;
|
||||
biased_suffixes_ = suffixes_ - start;
|
||||
biased_good_suffix_shift_ = good_suffix_shift_ - start;
|
||||
for (int i = 0; i <= len; i++) {
|
||||
good_suffix_shift_[i] = len;
|
||||
}
|
||||
}
|
||||
inline int& suffix(int index) {
|
||||
ASSERT(biased_suffixes_ + index >= suffixes_);
|
||||
return biased_suffixes_[index];
|
||||
}
|
||||
inline int& shift(int index) {
|
||||
ASSERT(biased_good_suffix_shift_ + index >= good_suffix_shift_);
|
||||
return biased_good_suffix_shift_[index];
|
||||
}
|
||||
private:
|
||||
int suffixes_[kBMMaxShift + 1];
|
||||
int good_suffix_shift_[kBMMaxShift + 1];
|
||||
int* biased_suffixes_;
|
||||
int* biased_good_suffix_shift_;
|
||||
DISALLOW_COPY_AND_ASSIGN(BMGoodSuffixBuffers);
|
||||
};
|
||||
|
||||
// buffers reused by BoyerMoore
|
||||
static int bad_char_occurrence[kBMAlphabetSize];
|
||||
static BMGoodSuffixBuffers bmgs_buffers;
|
||||
|
||||
// State of the string match tables.
|
||||
// SIMPLE: No usable content in the buffers.
|
||||
// BOYER_MOORE_HORSPOOL: The bad_char_occurences table has been populated.
|
||||
// BOYER_MOORE: The bmgs_buffers tables have also been populated.
|
||||
// Whenever starting with a new needle, one should call InitializeStringSearch
|
||||
// to determine which search strategy to use, and in the case of a long-needle
|
||||
// strategy, the call also initializes the algorithm to SIMPLE.
|
||||
enum StringSearchAlgorithm { SIMPLE_SEARCH, BOYER_MOORE_HORSPOOL, BOYER_MOORE };
|
||||
static StringSearchAlgorithm algorithm;
|
||||
|
||||
|
||||
// Compute the bad-char table for Boyer-Moore in the static buffer.
|
||||
template <typename pchar>
|
||||
static void BoyerMoorePopulateBadCharTable(Vector<const pchar> pattern) {
|
||||
// Only preprocess at most kBMMaxShift last characters of pattern.
|
||||
int start = pattern.length() < kBMMaxShift ? 0
|
||||
: pattern.length() - kBMMaxShift;
|
||||
// Run forwards to populate bad_char_table, so that *last* instance
|
||||
// of character equivalence class is the one registered.
|
||||
// Notice: Doesn't include the last character.
|
||||
int table_size = (sizeof(pchar) == 1) ? String::kMaxAsciiCharCode + 1
|
||||
: kBMAlphabetSize;
|
||||
if (start == 0) { // All patterns less than kBMMaxShift in length.
|
||||
memset(bad_char_occurrence, -1, table_size * sizeof(*bad_char_occurrence));
|
||||
} else {
|
||||
for (int i = 0; i < table_size; i++) {
|
||||
bad_char_occurrence[i] = start - 1;
|
||||
}
|
||||
}
|
||||
for (int i = start; i < pattern.length() - 1; i++) {
|
||||
pchar c = pattern[i];
|
||||
int bucket = (sizeof(pchar) ==1) ? c : c % kBMAlphabetSize;
|
||||
bad_char_occurrence[bucket] = i;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename pchar>
|
||||
static void BoyerMoorePopulateGoodSuffixTable(Vector<const pchar> pattern) {
|
||||
int m = pattern.length();
|
||||
int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
|
||||
int len = m - start;
|
||||
// Compute Good Suffix tables.
|
||||
bmgs_buffers.init(m);
|
||||
|
||||
bmgs_buffers.shift(m-1) = 1;
|
||||
bmgs_buffers.suffix(m) = m + 1;
|
||||
pchar last_char = pattern[m - 1];
|
||||
int suffix = m + 1;
|
||||
for (int i = m; i > start;) {
|
||||
for (pchar c = pattern[i - 1]; suffix <= m && c != pattern[suffix - 1];) {
|
||||
if (bmgs_buffers.shift(suffix) == len) {
|
||||
bmgs_buffers.shift(suffix) = suffix - i;
|
||||
}
|
||||
suffix = bmgs_buffers.suffix(suffix);
|
||||
}
|
||||
i--;
|
||||
suffix--;
|
||||
bmgs_buffers.suffix(i) = suffix;
|
||||
if (suffix == m) {
|
||||
// No suffix to extend, so we check against last_char only.
|
||||
while (i > start && pattern[i - 1] != last_char) {
|
||||
if (bmgs_buffers.shift(m) == len) {
|
||||
bmgs_buffers.shift(m) = m - i;
|
||||
}
|
||||
i--;
|
||||
bmgs_buffers.suffix(i) = m;
|
||||
}
|
||||
if (i > start) {
|
||||
i--;
|
||||
suffix--;
|
||||
bmgs_buffers.suffix(i) = suffix;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (suffix < m) {
|
||||
for (int i = start; i <= m; i++) {
|
||||
if (bmgs_buffers.shift(i) == len) {
|
||||
bmgs_buffers.shift(i) = suffix - start;
|
||||
}
|
||||
if (i == suffix) {
|
||||
suffix = bmgs_buffers.suffix(suffix);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename schar, typename pchar>
|
||||
static inline int CharOccurrence(int char_code) {
|
||||
if (sizeof(schar) == 1) {
|
||||
return bad_char_occurrence[char_code];
|
||||
}
|
||||
if (sizeof(pchar) == 1) {
|
||||
if (char_code > String::kMaxAsciiCharCode) {
|
||||
return -1;
|
||||
}
|
||||
return bad_char_occurrence[char_code];
|
||||
}
|
||||
return bad_char_occurrence[char_code % kBMAlphabetSize];
|
||||
}
|
||||
|
||||
|
||||
// Restricted simplified Boyer-Moore string matching.
|
||||
// Uses only the bad-shift table of Boyer-Moore and only uses it
|
||||
// for the character compared to the last character of the needle.
|
||||
template <typename schar, typename pchar>
|
||||
static int BoyerMooreHorspool(Vector<const schar> subject,
|
||||
Vector<const pchar> pattern,
|
||||
int start_index,
|
||||
bool* complete) {
|
||||
ASSERT(algorithm <= BOYER_MOORE_HORSPOOL);
|
||||
int n = subject.length();
|
||||
int m = pattern.length();
|
||||
|
||||
int badness = -m;
|
||||
|
||||
// How bad we are doing without a good-suffix table.
|
||||
int idx; // No matches found prior to this index.
|
||||
pchar last_char = pattern[m - 1];
|
||||
int last_char_shift = m - 1 - CharOccurrence<schar, pchar>(last_char);
|
||||
// Perform search
|
||||
for (idx = start_index; idx <= n - m;) {
|
||||
int j = m - 1;
|
||||
int c;
|
||||
while (last_char != (c = subject[idx + j])) {
|
||||
int bc_occ = CharOccurrence<schar, pchar>(c);
|
||||
int shift = j - bc_occ;
|
||||
idx += shift;
|
||||
badness += 1 - shift; // at most zero, so badness cannot increase.
|
||||
if (idx > n - m) {
|
||||
*complete = true;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
j--;
|
||||
while (j >= 0 && pattern[j] == (subject[idx + j])) j--;
|
||||
if (j < 0) {
|
||||
*complete = true;
|
||||
return idx;
|
||||
} else {
|
||||
idx += last_char_shift;
|
||||
// Badness increases by the number of characters we have
|
||||
// checked, and decreases by the number of characters we
|
||||
// can skip by shifting. It's a measure of how we are doing
|
||||
// compared to reading each character exactly once.
|
||||
badness += (m - j) - last_char_shift;
|
||||
if (badness > 0) {
|
||||
*complete = false;
|
||||
return idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
*complete = true;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
template <typename schar, typename pchar>
|
||||
static int BoyerMooreIndexOf(Vector<const schar> subject,
|
||||
Vector<const pchar> pattern,
|
||||
int idx) {
|
||||
ASSERT(algorithm <= BOYER_MOORE);
|
||||
int n = subject.length();
|
||||
int m = pattern.length();
|
||||
// Only preprocess at most kBMMaxShift last characters of pattern.
|
||||
int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
|
||||
|
||||
pchar last_char = pattern[m - 1];
|
||||
// Continue search from i.
|
||||
while (idx <= n - m) {
|
||||
int j = m - 1;
|
||||
schar c;
|
||||
while (last_char != (c = subject[idx + j])) {
|
||||
int shift = j - CharOccurrence<schar, pchar>(c);
|
||||
idx += shift;
|
||||
if (idx > n - m) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
while (j >= 0 && pattern[j] == (c = subject[idx + j])) j--;
|
||||
if (j < 0) {
|
||||
return idx;
|
||||
} else if (j < start) {
|
||||
// we have matched more than our tables allow us to be smart about.
|
||||
// Fall back on BMH shift.
|
||||
idx += m - 1 - CharOccurrence<schar, pchar>(last_char);
|
||||
} else {
|
||||
int gs_shift = bmgs_buffers.shift(j + 1); // Good suffix shift.
|
||||
int bc_occ = CharOccurrence<schar, pchar>(c);
|
||||
int shift = j - bc_occ; // Bad-char shift.
|
||||
if (gs_shift > shift) {
|
||||
shift = gs_shift;
|
||||
}
|
||||
idx += shift;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
// Trivial string search for shorter strings.
|
||||
// On return, if "complete" is set to true, the return value is the
|
||||
// final result of searching for the patter in the subject.
|
||||
// If "complete" is set to false, the return value is the index where
|
||||
// further checking should start, i.e., it's guaranteed that the pattern
|
||||
// does not occur at a position prior to the returned index.
|
||||
template <typename pchar, typename schar>
|
||||
static int SimpleIndexOf(Vector<const schar> subject,
|
||||
Vector<const pchar> pattern,
|
||||
int idx,
|
||||
bool* complete) {
|
||||
ASSERT(pattern.length() > 1);
|
||||
// Badness is a count of how much work we have done. When we have
|
||||
// done enough work we decide it's probably worth switching to a better
|
||||
// algorithm.
|
||||
int badness = -10 - (pattern.length() << 2);
|
||||
|
||||
// We know our pattern is at least 2 characters, we cache the first so
|
||||
// the common case of the first character not matching is faster.
|
||||
pchar pattern_first_char = pattern[0];
|
||||
for (int i = idx, n = subject.length() - pattern.length(); i <= n; i++) {
|
||||
badness++;
|
||||
if (badness > 0) {
|
||||
*complete = false;
|
||||
return i;
|
||||
}
|
||||
if (sizeof(schar) == 1 && sizeof(pchar) == 1) {
|
||||
const schar* pos = reinterpret_cast<const schar*>(
|
||||
memchr(subject.start() + i,
|
||||
pattern_first_char,
|
||||
n - i + 1));
|
||||
if (pos == NULL) {
|
||||
*complete = true;
|
||||
return -1;
|
||||
}
|
||||
i = static_cast<int>(pos - subject.start());
|
||||
} else {
|
||||
if (subject[i] != pattern_first_char) continue;
|
||||
}
|
||||
int j = 1;
|
||||
do {
|
||||
if (pattern[j] != subject[i+j]) {
|
||||
break;
|
||||
}
|
||||
j++;
|
||||
} while (j < pattern.length());
|
||||
if (j == pattern.length()) {
|
||||
*complete = true;
|
||||
return i;
|
||||
}
|
||||
badness += j;
|
||||
}
|
||||
*complete = true;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Simple indexOf that never bails out. For short patterns only.
|
||||
template <typename pchar, typename schar>
|
||||
static int SimpleIndexOf(Vector<const schar> subject,
|
||||
Vector<const pchar> pattern,
|
||||
int idx) {
|
||||
pchar pattern_first_char = pattern[0];
|
||||
for (int i = idx, n = subject.length() - pattern.length(); i <= n; i++) {
|
||||
if (sizeof(schar) == 1 && sizeof(pchar) == 1) {
|
||||
const schar* pos = reinterpret_cast<const schar*>(
|
||||
memchr(subject.start() + i,
|
||||
pattern_first_char,
|
||||
n - i + 1));
|
||||
if (pos == NULL) return -1;
|
||||
i = static_cast<int>(pos - subject.start());
|
||||
} else {
|
||||
if (subject[i] != pattern_first_char) continue;
|
||||
}
|
||||
int j = 1;
|
||||
while (j < pattern.length()) {
|
||||
if (pattern[j] != subject[i+j]) {
|
||||
break;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
if (j == pattern.length()) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
// Strategy for searching for a string in another string.
|
||||
enum StringSearchStrategy { SEARCH_FAIL, SEARCH_SHORT, SEARCH_LONG };
|
||||
|
||||
|
||||
template <typename pchar>
|
||||
static inline StringSearchStrategy InitializeStringSearch(
|
||||
Vector<const pchar> pat, bool ascii_subject) {
|
||||
// We have an ASCII haystack and a non-ASCII needle. Check if there
|
||||
// really is a non-ASCII character in the needle and bail out if there
|
||||
// is.
|
||||
if (ascii_subject && sizeof(pchar) > 1) {
|
||||
for (int i = 0; i < pat.length(); i++) {
|
||||
uc16 c = pat[i];
|
||||
if (c > String::kMaxAsciiCharCode) {
|
||||
return SEARCH_FAIL;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pat.length() < kBMMinPatternLength) {
|
||||
return SEARCH_SHORT;
|
||||
}
|
||||
algorithm = SIMPLE_SEARCH;
|
||||
return SEARCH_LONG;
|
||||
}
|
||||
|
||||
|
||||
// Dispatch long needle searches to different algorithms.
|
||||
template <typename schar, typename pchar>
|
||||
static int ComplexIndexOf(Vector<const schar> sub,
|
||||
Vector<const pchar> pat,
|
||||
int start_index) {
|
||||
ASSERT(pat.length() >= kBMMinPatternLength);
|
||||
// Try algorithms in order of increasing setup cost and expected performance.
|
||||
bool complete;
|
||||
int idx = start_index;
|
||||
switch (algorithm) {
|
||||
case SIMPLE_SEARCH:
|
||||
idx = SimpleIndexOf(sub, pat, idx, &complete);
|
||||
if (complete) return idx;
|
||||
BoyerMoorePopulateBadCharTable(pat);
|
||||
algorithm = BOYER_MOORE_HORSPOOL;
|
||||
// FALLTHROUGH.
|
||||
case BOYER_MOORE_HORSPOOL:
|
||||
idx = BoyerMooreHorspool(sub, pat, idx, &complete);
|
||||
if (complete) return idx;
|
||||
// Build the Good Suffix table and continue searching.
|
||||
BoyerMoorePopulateGoodSuffixTable(pat);
|
||||
algorithm = BOYER_MOORE;
|
||||
// FALLTHROUGH.
|
||||
case BOYER_MOORE:
|
||||
return BoyerMooreIndexOf(sub, pat, idx);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
// Dispatch to different search strategies for a single search.
|
||||
// If searching multiple times on the same needle, the search
|
||||
// strategy should only be computed once and then dispatch to different
|
||||
// loops.
|
||||
template <typename schar, typename pchar>
|
||||
static int StringSearch(Vector<const schar> sub,
|
||||
Vector<const pchar> pat,
|
||||
int start_index) {
|
||||
bool ascii_subject = (sizeof(schar) == 1);
|
||||
StringSearchStrategy strategy = InitializeStringSearch(pat, ascii_subject);
|
||||
switch (strategy) {
|
||||
case SEARCH_FAIL: return -1;
|
||||
case SEARCH_SHORT: return SimpleIndexOf(sub, pat, start_index);
|
||||
case SEARCH_LONG: return ComplexIndexOf(sub, pat, start_index);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
// Perform string match of pattern on subject, starting at start index.
|
||||
// Caller must ensure that 0 <= start_index <= sub->length(),
|
||||
// and should check that pat->length() + start_index <= sub->length()
|
||||
@ -3042,32 +2631,33 @@ static Object* Runtime_StringIndexOf(Arguments args) {
|
||||
|
||||
|
||||
template <typename schar, typename pchar>
|
||||
static int StringMatchBackwards(Vector<const schar> sub,
|
||||
Vector<const pchar> pat,
|
||||
static int StringMatchBackwards(Vector<const schar> subject,
|
||||
Vector<const pchar> pattern,
|
||||
int idx) {
|
||||
ASSERT(pat.length() >= 1);
|
||||
ASSERT(idx + pat.length() <= sub.length());
|
||||
int pattern_length = pattern.length();
|
||||
ASSERT(pattern_length >= 1);
|
||||
ASSERT(idx + pattern_length <= subject.length());
|
||||
|
||||
if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
|
||||
for (int i = 0; i < pat.length(); i++) {
|
||||
uc16 c = pat[i];
|
||||
for (int i = 0; i < pattern_length; i++) {
|
||||
uc16 c = pattern[i];
|
||||
if (c > String::kMaxAsciiCharCode) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pchar pattern_first_char = pat[0];
|
||||
pchar pattern_first_char = pattern[0];
|
||||
for (int i = idx; i >= 0; i--) {
|
||||
if (sub[i] != pattern_first_char) continue;
|
||||
if (subject[i] != pattern_first_char) continue;
|
||||
int j = 1;
|
||||
while (j < pat.length()) {
|
||||
if (pat[j] != sub[i+j]) {
|
||||
while (j < pattern_length) {
|
||||
if (pattern[j] != subject[i+j]) {
|
||||
break;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
if (j == pat.length()) {
|
||||
if (j == pattern_length) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@ -4929,7 +4519,6 @@ static Object* Runtime_StringParseInt(Arguments args) {
|
||||
RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
|
||||
double value = StringToInt(s, radix);
|
||||
return Heap::NumberFromDouble(value);
|
||||
return Heap::nan_value();
|
||||
}
|
||||
|
||||
|
||||
@ -5167,6 +4756,12 @@ static Object* Runtime_StringTrim(Arguments args) {
|
||||
}
|
||||
|
||||
|
||||
// Define storage for buffers declared in header file.
|
||||
// TODO(lrn): Remove these when rewriting search code.
|
||||
int BMBuffers::bad_char_occurrence[kBMAlphabetSize];
|
||||
BMGoodSuffixBuffers BMBuffers::bmgs_buffers;
|
||||
|
||||
|
||||
template <typename schar, typename pchar>
|
||||
void FindStringIndices(Vector<const schar> subject,
|
||||
Vector<const pchar> pattern,
|
||||
@ -7979,15 +7574,17 @@ static Object* Runtime_MoveArrayContents(Arguments args) {
|
||||
}
|
||||
|
||||
|
||||
// How many elements does this array have?
|
||||
// How many elements does this object/array have?
|
||||
static Object* Runtime_EstimateNumberOfElements(Arguments args) {
|
||||
ASSERT(args.length() == 1);
|
||||
CONVERT_CHECKED(JSArray, array, args[0]);
|
||||
HeapObject* elements = array->elements();
|
||||
CONVERT_CHECKED(JSObject, object, args[0]);
|
||||
HeapObject* elements = object->elements();
|
||||
if (elements->IsDictionary()) {
|
||||
return Smi::FromInt(NumberDictionary::cast(elements)->NumberOfElements());
|
||||
} else if (object->IsJSArray()) {
|
||||
return JSArray::cast(object)->length();
|
||||
} else {
|
||||
return array->length();
|
||||
return Smi::FromInt(FixedArray::cast(elements)->length());
|
||||
}
|
||||
}
|
||||
|
||||
@ -8019,8 +7616,10 @@ static Object* Runtime_SwapElements(Arguments args) {
|
||||
|
||||
|
||||
// Returns an array that tells you where in the [0, length) interval an array
|
||||
// might have elements. Can either return keys or intervals. Keys can have
|
||||
// gaps in (undefined). Intervals can also span over some undefined keys.
|
||||
// might have elements. Can either return keys (positive integers) or
|
||||
// intervals (pair of a negative integer (-start-1) followed by a
|
||||
// positive (length)) or undefined values.
|
||||
// Intervals can span over some keys that are not in the object.
|
||||
static Object* Runtime_GetArrayKeys(Arguments args) {
|
||||
ASSERT(args.length() == 2);
|
||||
HandleScope scope;
|
||||
@ -10464,6 +10063,7 @@ static Object* Runtime_ListNatives(Arguments args) {
|
||||
inline_runtime_functions = false;
|
||||
RUNTIME_FUNCTION_LIST(ADD_ENTRY)
|
||||
inline_runtime_functions = true;
|
||||
INLINE_FUNCTION_LIST(ADD_ENTRY)
|
||||
INLINE_RUNTIME_FUNCTION_LIST(ADD_ENTRY)
|
||||
#undef ADD_ENTRY
|
||||
return *result;
|
||||
@ -10490,35 +10090,55 @@ static Object* Runtime_IS_VAR(Arguments args) {
|
||||
// ----------------------------------------------------------------------------
|
||||
// Implementation of Runtime
|
||||
|
||||
#define F(name, nargs, ressize) \
|
||||
{ #name, FUNCTION_ADDR(Runtime_##name), nargs, \
|
||||
static_cast<int>(Runtime::k##name), ressize },
|
||||
#define F(name, number_of_args, result_size) \
|
||||
{ Runtime::k##name, Runtime::RUNTIME, #name, \
|
||||
FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
|
||||
|
||||
static Runtime::Function Runtime_functions[] = {
|
||||
|
||||
#define I(name, number_of_args, result_size) \
|
||||
{ Runtime::kInline##name, Runtime::INLINE, \
|
||||
"_" #name, NULL, number_of_args, result_size },
|
||||
|
||||
Runtime::Function kIntrinsicFunctions[] = {
|
||||
RUNTIME_FUNCTION_LIST(F)
|
||||
{ NULL, NULL, 0, -1, 0 }
|
||||
INLINE_FUNCTION_LIST(I)
|
||||
INLINE_RUNTIME_FUNCTION_LIST(I)
|
||||
};
|
||||
|
||||
#undef F
|
||||
|
||||
|
||||
Runtime::Function* Runtime::FunctionForId(FunctionId fid) {
|
||||
ASSERT(0 <= fid && fid < kNofFunctions);
|
||||
return &Runtime_functions[fid];
|
||||
Object* Runtime::InitializeIntrinsicFunctionNames(Object* dictionary) {
|
||||
ASSERT(dictionary != NULL);
|
||||
ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0);
|
||||
for (int i = 0; i < kNumFunctions; ++i) {
|
||||
Object* name_symbol = Heap::LookupAsciiSymbol(kIntrinsicFunctions[i].name);
|
||||
if (name_symbol->IsFailure()) return name_symbol;
|
||||
StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
|
||||
dictionary = string_dictionary->Add(String::cast(name_symbol),
|
||||
Smi::FromInt(i),
|
||||
PropertyDetails(NONE, NORMAL));
|
||||
// Non-recoverable failure. Calling code must restart heap initialization.
|
||||
if (dictionary->IsFailure()) return dictionary;
|
||||
}
|
||||
return dictionary;
|
||||
}
|
||||
|
||||
|
||||
Runtime::Function* Runtime::FunctionForName(Vector<const char> name) {
|
||||
for (Function* f = Runtime_functions; f->name != NULL; f++) {
|
||||
if (strncmp(f->name, name.start(), name.length()) == 0
|
||||
&& f->name[name.length()] == 0) {
|
||||
return f;
|
||||
}
|
||||
Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
|
||||
int entry = Heap::intrinsic_function_names()->FindEntry(*name);
|
||||
if (entry != kNotFound) {
|
||||
Object* smi_index = Heap::intrinsic_function_names()->ValueAt(entry);
|
||||
int function_index = Smi::cast(smi_index)->value();
|
||||
return &(kIntrinsicFunctions[function_index]);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
|
||||
return &(kIntrinsicFunctions[static_cast<int>(id)]);
|
||||
}
|
||||
|
||||
|
||||
void Runtime::PerformGC(Object* result) {
|
||||
Failure* failure = Failure::cast(result);
|
||||
if (failure->IsRetryAfterGC()) {
|
||||
|
98
deps/v8/src/runtime.h
vendored
98
deps/v8/src/runtime.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2006-2008 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -389,6 +389,59 @@ namespace internal {
|
||||
RUNTIME_FUNCTION_LIST_PROFILER_SUPPORT(F)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// INLINE_FUNCTION_LIST defines all inlined functions accessed
|
||||
// with a native call of the form %_name from within JS code.
|
||||
// Entries have the form F(name, number of arguments, number of return values).
|
||||
#define INLINE_FUNCTION_LIST(F) \
|
||||
F(IsSmi, 1, 1) \
|
||||
F(IsNonNegativeSmi, 1, 1) \
|
||||
F(IsArray, 1, 1) \
|
||||
F(IsRegExp, 1, 1) \
|
||||
F(CallFunction, -1 /* receiver + n args + function */, 1) \
|
||||
F(ArgumentsLength, 0, 1) \
|
||||
F(Arguments, 1, 1) \
|
||||
F(ValueOf, 1, 1) \
|
||||
F(SetValueOf, 2, 1) \
|
||||
F(StringCharFromCode, 1, 1) \
|
||||
F(StringCharAt, 2, 1) \
|
||||
F(ObjectEquals, 2, 1) \
|
||||
F(RandomHeapNumber, 0, 1) \
|
||||
F(IsObject, 1, 1) \
|
||||
F(IsFunction, 1, 1) \
|
||||
F(IsUndetectableObject, 1, 1) \
|
||||
F(IsSpecObject, 1, 1) \
|
||||
F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
|
||||
F(MathPow, 2, 1) \
|
||||
F(MathSin, 1, 1) \
|
||||
F(MathCos, 1, 1) \
|
||||
F(MathSqrt, 1, 1) \
|
||||
F(IsRegExpEquivalent, 2, 1) \
|
||||
F(HasCachedArrayIndex, 1, 1) \
|
||||
F(GetCachedArrayIndex, 1, 1)
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// INLINE_AND_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
|
||||
// with a native call of the form %_name from within JS code that also have
|
||||
// a corresponding runtime function, that is called for slow cases.
|
||||
// Entries have the form F(name, number of arguments, number of return values).
|
||||
#define INLINE_RUNTIME_FUNCTION_LIST(F) \
|
||||
F(IsConstructCall, 0, 1) \
|
||||
F(ClassOf, 1, 1) \
|
||||
F(StringCharCodeAt, 2, 1) \
|
||||
F(Log, 3, 1) \
|
||||
F(StringAdd, 2, 1) \
|
||||
F(SubString, 3, 1) \
|
||||
F(StringCompare, 2, 1) \
|
||||
F(RegExpExec, 4, 1) \
|
||||
F(RegExpConstructResult, 3, 1) \
|
||||
F(RegExpCloneResult, 1, 1) \
|
||||
F(GetFromCache, 2, 1) \
|
||||
F(NumberToString, 1, 1) \
|
||||
F(SwapElements, 3, 1)
|
||||
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// Runtime provides access to all C++ runtime functions.
|
||||
|
||||
class Runtime : public AllStatic {
|
||||
@ -396,33 +449,52 @@ class Runtime : public AllStatic {
|
||||
enum FunctionId {
|
||||
#define F(name, nargs, ressize) k##name,
|
||||
RUNTIME_FUNCTION_LIST(F)
|
||||
kNofFunctions
|
||||
#undef F
|
||||
#define F(name, nargs, ressize) kInline##name,
|
||||
INLINE_FUNCTION_LIST(F)
|
||||
INLINE_RUNTIME_FUNCTION_LIST(F)
|
||||
#undef F
|
||||
kNumFunctions,
|
||||
kFirstInlineFunction = kInlineIsSmi
|
||||
};
|
||||
|
||||
// Runtime function descriptor.
|
||||
enum IntrinsicType {
|
||||
RUNTIME,
|
||||
INLINE
|
||||
};
|
||||
|
||||
// Intrinsic function descriptor.
|
||||
struct Function {
|
||||
FunctionId function_id;
|
||||
IntrinsicType intrinsic_type;
|
||||
// The JS name of the function.
|
||||
const char* name;
|
||||
|
||||
// The C++ (native) entry point.
|
||||
// The C++ (native) entry point. NULL if the function is inlined.
|
||||
byte* entry;
|
||||
|
||||
// The number of arguments expected; nargs < 0 if variable no. of
|
||||
// arguments.
|
||||
// The number of arguments expected. nargs is -1 if the function takes
|
||||
// a variable number of arguments.
|
||||
int nargs;
|
||||
int stub_id;
|
||||
// Size of result, if complex (larger than a single pointer),
|
||||
// otherwise zero.
|
||||
// Size of result. Most functions return a single pointer, size 1.
|
||||
int result_size;
|
||||
};
|
||||
|
||||
// Get the runtime function with the given function id.
|
||||
static Function* FunctionForId(FunctionId fid);
|
||||
static const int kNotFound = -1;
|
||||
|
||||
// Get the runtime function with the given name.
|
||||
static Function* FunctionForName(Vector<const char> name);
|
||||
// Add symbols for all the intrinsic function names to a StringDictionary.
|
||||
// Returns failure if an allocation fails. In this case, it must be
|
||||
// retried with a new, empty StringDictionary, not with the same one.
|
||||
// Alternatively, heap initialization can be completely restarted.
|
||||
static Object* InitializeIntrinsicFunctionNames(Object* dictionary);
|
||||
|
||||
// Get the intrinsic function with the given name, which must be a symbol.
|
||||
static Function* FunctionForSymbol(Handle<String> name);
|
||||
|
||||
// Get the intrinsic function with the given FunctionId.
|
||||
static Function* FunctionForId(FunctionId id);
|
||||
|
||||
// General-purpose helper functions for runtime system.
|
||||
static int StringMatch(Handle<String> sub, Handle<String> pat, int index);
|
||||
|
||||
static bool IsUpperCaseChar(uint16_t ch);
|
||||
|
463
deps/v8/src/string-search.h
vendored
Normal file
463
deps/v8/src/string-search.h
vendored
Normal file
@ -0,0 +1,463 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef V8_STRING_SEARCH_H_
|
||||
#define V8_STRING_SEARCH_H_
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
// Cap on the maximal shift in the Boyer-Moore implementation. By setting a
|
||||
// limit, we can fix the size of tables. For a needle longer than this limit,
|
||||
// search will not be optimal, since we only build tables for a smaller suffix
|
||||
// of the string, which is a safe approximation.
|
||||
static const int kBMMaxShift = 250;
|
||||
// Reduce alphabet to this size.
|
||||
// One of the tables used by Boyer-Moore and Boyer-Moore-Horspool has size
|
||||
// proportional to the input alphabet. We reduce the alphabet size by
|
||||
// equating input characters modulo a smaller alphabet size. This gives
|
||||
// a potentially less efficient searching, but is a safe approximation.
|
||||
// For needles using only characters in the same Unicode 256-code point page,
|
||||
// there is no search speed degradation.
|
||||
static const int kBMAlphabetSize = 256;
|
||||
// For patterns below this length, the skip length of Boyer-Moore is too short
|
||||
// to compensate for the algorithmic overhead compared to simple brute force.
|
||||
static const int kBMMinPatternLength = 7;
|
||||
|
||||
// Holds the two buffers used by Boyer-Moore string search's Good Suffix
|
||||
// shift. Only allows the last kBMMaxShift characters of the needle
|
||||
// to be indexed.
|
||||
class BMGoodSuffixBuffers {
|
||||
public:
|
||||
BMGoodSuffixBuffers() {}
|
||||
inline void Initialize(int needle_length) {
|
||||
ASSERT(needle_length > 1);
|
||||
int start = needle_length < kBMMaxShift ? 0 : needle_length - kBMMaxShift;
|
||||
int len = needle_length - start;
|
||||
biased_suffixes_ = suffixes_ - start;
|
||||
biased_good_suffix_shift_ = good_suffix_shift_ - start;
|
||||
for (int i = 0; i <= len; i++) {
|
||||
good_suffix_shift_[i] = len;
|
||||
}
|
||||
}
|
||||
inline int& suffix(int index) {
|
||||
ASSERT(biased_suffixes_ + index >= suffixes_);
|
||||
return biased_suffixes_[index];
|
||||
}
|
||||
inline int& shift(int index) {
|
||||
ASSERT(biased_good_suffix_shift_ + index >= good_suffix_shift_);
|
||||
return biased_good_suffix_shift_[index];
|
||||
}
|
||||
private:
|
||||
int suffixes_[kBMMaxShift + 1];
|
||||
int good_suffix_shift_[kBMMaxShift + 1];
|
||||
int* biased_suffixes_;
|
||||
int* biased_good_suffix_shift_;
|
||||
DISALLOW_COPY_AND_ASSIGN(BMGoodSuffixBuffers);
|
||||
};
|
||||
|
||||
// buffers reused by BoyerMoore
|
||||
struct BMBuffers {
|
||||
public:
|
||||
static int bad_char_occurrence[kBMAlphabetSize];
|
||||
static BMGoodSuffixBuffers bmgs_buffers;
|
||||
};
|
||||
|
||||
// State of the string match tables.
|
||||
// SIMPLE: No usable content in the buffers.
|
||||
// BOYER_MOORE_HORSPOOL: The bad_char_occurence table has been populated.
|
||||
// BOYER_MOORE: The bmgs_buffers tables have also been populated.
|
||||
// Whenever starting with a new needle, one should call InitializeStringSearch
|
||||
// to determine which search strategy to use, and in the case of a long-needle
|
||||
// strategy, the call also initializes the algorithm to SIMPLE.
|
||||
enum StringSearchAlgorithm { SIMPLE_SEARCH, BOYER_MOORE_HORSPOOL, BOYER_MOORE };
|
||||
static StringSearchAlgorithm algorithm;
|
||||
|
||||
|
||||
// Compute the bad-char table for Boyer-Moore in the static buffer.
|
||||
template <typename PatternChar>
|
||||
static void BoyerMoorePopulateBadCharTable(Vector<const PatternChar> pattern) {
|
||||
// Only preprocess at most kBMMaxShift last characters of pattern.
|
||||
int start = Max(pattern.length() - kBMMaxShift, 0);
|
||||
// Run forwards to populate bad_char_table, so that *last* instance
|
||||
// of character equivalence class is the one registered.
|
||||
// Notice: Doesn't include the last character.
|
||||
int table_size = (sizeof(PatternChar) == 1) ? String::kMaxAsciiCharCode + 1
|
||||
: kBMAlphabetSize;
|
||||
if (start == 0) { // All patterns less than kBMMaxShift in length.
|
||||
memset(BMBuffers::bad_char_occurrence,
|
||||
-1,
|
||||
table_size * sizeof(*BMBuffers::bad_char_occurrence));
|
||||
} else {
|
||||
for (int i = 0; i < table_size; i++) {
|
||||
BMBuffers::bad_char_occurrence[i] = start - 1;
|
||||
}
|
||||
}
|
||||
for (int i = start; i < pattern.length() - 1; i++) {
|
||||
PatternChar c = pattern[i];
|
||||
int bucket = (sizeof(PatternChar) ==1) ? c : c % kBMAlphabetSize;
|
||||
BMBuffers::bad_char_occurrence[bucket] = i;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename PatternChar>
|
||||
static void BoyerMoorePopulateGoodSuffixTable(
|
||||
Vector<const PatternChar> pattern) {
|
||||
int m = pattern.length();
|
||||
int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
|
||||
int len = m - start;
|
||||
// Compute Good Suffix tables.
|
||||
BMBuffers::bmgs_buffers.Initialize(m);
|
||||
|
||||
BMBuffers::bmgs_buffers.shift(m-1) = 1;
|
||||
BMBuffers::bmgs_buffers.suffix(m) = m + 1;
|
||||
PatternChar last_char = pattern[m - 1];
|
||||
int suffix = m + 1;
|
||||
{
|
||||
int i = m;
|
||||
while (i > start) {
|
||||
PatternChar c = pattern[i - 1];
|
||||
while (suffix <= m && c != pattern[suffix - 1]) {
|
||||
if (BMBuffers::bmgs_buffers.shift(suffix) == len) {
|
||||
BMBuffers::bmgs_buffers.shift(suffix) = suffix - i;
|
||||
}
|
||||
suffix = BMBuffers::bmgs_buffers.suffix(suffix);
|
||||
}
|
||||
BMBuffers::bmgs_buffers.suffix(--i) = --suffix;
|
||||
if (suffix == m) {
|
||||
// No suffix to extend, so we check against last_char only.
|
||||
while ((i > start) && (pattern[i - 1] != last_char)) {
|
||||
if (BMBuffers::bmgs_buffers.shift(m) == len) {
|
||||
BMBuffers::bmgs_buffers.shift(m) = m - i;
|
||||
}
|
||||
BMBuffers::bmgs_buffers.suffix(--i) = m;
|
||||
}
|
||||
if (i > start) {
|
||||
BMBuffers::bmgs_buffers.suffix(--i) = --suffix;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (suffix < m) {
|
||||
for (int i = start; i <= m; i++) {
|
||||
if (BMBuffers::bmgs_buffers.shift(i) == len) {
|
||||
BMBuffers::bmgs_buffers.shift(i) = suffix - start;
|
||||
}
|
||||
if (i == suffix) {
|
||||
suffix = BMBuffers::bmgs_buffers.suffix(suffix);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename SubjectChar, typename PatternChar>
|
||||
static inline int CharOccurrence(int char_code) {
|
||||
if (sizeof(SubjectChar) == 1) {
|
||||
return BMBuffers::bad_char_occurrence[char_code];
|
||||
}
|
||||
if (sizeof(PatternChar) == 1) {
|
||||
if (char_code > String::kMaxAsciiCharCode) {
|
||||
return -1;
|
||||
}
|
||||
return BMBuffers::bad_char_occurrence[char_code];
|
||||
}
|
||||
return BMBuffers::bad_char_occurrence[char_code % kBMAlphabetSize];
|
||||
}
|
||||
|
||||
|
||||
// Restricted simplified Boyer-Moore string matching.
|
||||
// Uses only the bad-shift table of Boyer-Moore and only uses it
|
||||
// for the character compared to the last character of the needle.
|
||||
template <typename SubjectChar, typename PatternChar>
|
||||
static int BoyerMooreHorspool(Vector<const SubjectChar> subject,
|
||||
Vector<const PatternChar> pattern,
|
||||
int start_index,
|
||||
bool* complete) {
|
||||
ASSERT(algorithm <= BOYER_MOORE_HORSPOOL);
|
||||
int n = subject.length();
|
||||
int m = pattern.length();
|
||||
|
||||
int badness = -m;
|
||||
|
||||
// How bad we are doing without a good-suffix table.
|
||||
int idx; // No matches found prior to this index.
|
||||
PatternChar last_char = pattern[m - 1];
|
||||
int last_char_shift =
|
||||
m - 1 - CharOccurrence<SubjectChar, PatternChar>(last_char);
|
||||
// Perform search
|
||||
for (idx = start_index; idx <= n - m;) {
|
||||
int j = m - 1;
|
||||
int c;
|
||||
while (last_char != (c = subject[idx + j])) {
|
||||
int bc_occ = CharOccurrence<SubjectChar, PatternChar>(c);
|
||||
int shift = j - bc_occ;
|
||||
idx += shift;
|
||||
badness += 1 - shift; // at most zero, so badness cannot increase.
|
||||
if (idx > n - m) {
|
||||
*complete = true;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
j--;
|
||||
while (j >= 0 && pattern[j] == (subject[idx + j])) j--;
|
||||
if (j < 0) {
|
||||
*complete = true;
|
||||
return idx;
|
||||
} else {
|
||||
idx += last_char_shift;
|
||||
// Badness increases by the number of characters we have
|
||||
// checked, and decreases by the number of characters we
|
||||
// can skip by shifting. It's a measure of how we are doing
|
||||
// compared to reading each character exactly once.
|
||||
badness += (m - j) - last_char_shift;
|
||||
if (badness > 0) {
|
||||
*complete = false;
|
||||
return idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
*complete = true;
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
template <typename SubjectChar, typename PatternChar>
|
||||
static int BoyerMooreIndexOf(Vector<const SubjectChar> subject,
|
||||
Vector<const PatternChar> pattern,
|
||||
int idx) {
|
||||
ASSERT(algorithm <= BOYER_MOORE);
|
||||
int n = subject.length();
|
||||
int m = pattern.length();
|
||||
// Only preprocess at most kBMMaxShift last characters of pattern.
|
||||
int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
|
||||
|
||||
PatternChar last_char = pattern[m - 1];
|
||||
// Continue search from i.
|
||||
while (idx <= n - m) {
|
||||
int j = m - 1;
|
||||
SubjectChar c;
|
||||
while (last_char != (c = subject[idx + j])) {
|
||||
int shift = j - CharOccurrence<SubjectChar, PatternChar>(c);
|
||||
idx += shift;
|
||||
if (idx > n - m) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
while (j >= 0 && pattern[j] == (c = subject[idx + j])) j--;
|
||||
if (j < 0) {
|
||||
return idx;
|
||||
} else if (j < start) {
|
||||
// we have matched more than our tables allow us to be smart about.
|
||||
// Fall back on BMH shift.
|
||||
idx += m - 1 - CharOccurrence<SubjectChar, PatternChar>(last_char);
|
||||
} else {
|
||||
int gs_shift = BMBuffers::bmgs_buffers.shift(j + 1);
|
||||
int bc_occ = CharOccurrence<SubjectChar, PatternChar>(c);
|
||||
int shift = j - bc_occ;
|
||||
if (gs_shift > shift) {
|
||||
shift = gs_shift;
|
||||
}
|
||||
idx += shift;
|
||||
}
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
// Trivial string search for shorter strings.
|
||||
// On return, if "complete" is set to true, the return value is the
|
||||
// final result of searching for the patter in the subject.
|
||||
// If "complete" is set to false, the return value is the index where
|
||||
// further checking should start, i.e., it's guaranteed that the pattern
|
||||
// does not occur at a position prior to the returned index.
|
||||
template <typename PatternChar, typename SubjectChar>
|
||||
static int SimpleIndexOf(Vector<const SubjectChar> subject,
|
||||
Vector<const PatternChar> pattern,
|
||||
int idx,
|
||||
bool* complete) {
|
||||
ASSERT(pattern.length() > 1);
|
||||
int pattern_length = pattern.length();
|
||||
// Badness is a count of how much work we have done. When we have
|
||||
// done enough work we decide it's probably worth switching to a better
|
||||
// algorithm.
|
||||
int badness = -10 - (pattern_length << 2);
|
||||
|
||||
// We know our pattern is at least 2 characters, we cache the first so
|
||||
// the common case of the first character not matching is faster.
|
||||
PatternChar pattern_first_char = pattern[0];
|
||||
for (int i = idx, n = subject.length() - pattern_length; i <= n; i++) {
|
||||
badness++;
|
||||
if (badness > 0) {
|
||||
*complete = false;
|
||||
return i;
|
||||
}
|
||||
if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
|
||||
const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
|
||||
memchr(subject.start() + i,
|
||||
pattern_first_char,
|
||||
n - i + 1));
|
||||
if (pos == NULL) {
|
||||
*complete = true;
|
||||
return -1;
|
||||
}
|
||||
i = static_cast<int>(pos - subject.start());
|
||||
} else {
|
||||
if (subject[i] != pattern_first_char) continue;
|
||||
}
|
||||
int j = 1;
|
||||
do {
|
||||
if (pattern[j] != subject[i+j]) {
|
||||
break;
|
||||
}
|
||||
j++;
|
||||
} while (j < pattern_length);
|
||||
if (j == pattern_length) {
|
||||
*complete = true;
|
||||
return i;
|
||||
}
|
||||
badness += j;
|
||||
}
|
||||
*complete = true;
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Simple indexOf that never bails out. For short patterns only.
|
||||
template <typename PatternChar, typename SubjectChar>
|
||||
static int SimpleIndexOf(Vector<const SubjectChar> subject,
|
||||
Vector<const PatternChar> pattern,
|
||||
int idx) {
|
||||
int pattern_length = pattern.length();
|
||||
PatternChar pattern_first_char = pattern[0];
|
||||
for (int i = idx, n = subject.length() - pattern_length; i <= n; i++) {
|
||||
if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
|
||||
const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
|
||||
memchr(subject.start() + i,
|
||||
pattern_first_char,
|
||||
n - i + 1));
|
||||
if (pos == NULL) return -1;
|
||||
i = static_cast<int>(pos - subject.start());
|
||||
} else {
|
||||
if (subject[i] != pattern_first_char) continue;
|
||||
}
|
||||
int j = 1;
|
||||
while (j < pattern_length) {
|
||||
if (pattern[j] != subject[i+j]) {
|
||||
break;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
if (j == pattern_length) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
// Strategy for searching for a string in another string.
|
||||
enum StringSearchStrategy { SEARCH_FAIL, SEARCH_SHORT, SEARCH_LONG };
|
||||
|
||||
|
||||
template <typename PatternChar>
|
||||
static inline StringSearchStrategy InitializeStringSearch(
|
||||
Vector<const PatternChar> pat, bool ascii_subject) {
|
||||
// We have an ASCII haystack and a non-ASCII needle. Check if there
|
||||
// really is a non-ASCII character in the needle and bail out if there
|
||||
// is.
|
||||
if (ascii_subject && sizeof(PatternChar) > 1) {
|
||||
for (int i = 0; i < pat.length(); i++) {
|
||||
uc16 c = pat[i];
|
||||
if (c > String::kMaxAsciiCharCode) {
|
||||
return SEARCH_FAIL;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pat.length() < kBMMinPatternLength) {
|
||||
return SEARCH_SHORT;
|
||||
}
|
||||
algorithm = SIMPLE_SEARCH;
|
||||
return SEARCH_LONG;
|
||||
}
|
||||
|
||||
|
||||
// Dispatch long needle searches to different algorithms.
|
||||
template <typename SubjectChar, typename PatternChar>
|
||||
static int ComplexIndexOf(Vector<const SubjectChar> sub,
|
||||
Vector<const PatternChar> pat,
|
||||
int start_index) {
|
||||
ASSERT(pat.length() >= kBMMinPatternLength);
|
||||
// Try algorithms in order of increasing setup cost and expected performance.
|
||||
bool complete;
|
||||
int idx = start_index;
|
||||
switch (algorithm) {
|
||||
case SIMPLE_SEARCH:
|
||||
idx = SimpleIndexOf(sub, pat, idx, &complete);
|
||||
if (complete) return idx;
|
||||
BoyerMoorePopulateBadCharTable(pat);
|
||||
algorithm = BOYER_MOORE_HORSPOOL;
|
||||
// FALLTHROUGH.
|
||||
case BOYER_MOORE_HORSPOOL:
|
||||
idx = BoyerMooreHorspool(sub, pat, idx, &complete);
|
||||
if (complete) return idx;
|
||||
// Build the Good Suffix table and continue searching.
|
||||
BoyerMoorePopulateGoodSuffixTable(pat);
|
||||
algorithm = BOYER_MOORE;
|
||||
// FALLTHROUGH.
|
||||
case BOYER_MOORE:
|
||||
return BoyerMooreIndexOf(sub, pat, idx);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
// Dispatch to different search strategies for a single search.
|
||||
// If searching multiple times on the same needle, the search
|
||||
// strategy should only be computed once and then dispatch to different
|
||||
// loops.
|
||||
template <typename SubjectChar, typename PatternChar>
|
||||
static int StringSearch(Vector<const SubjectChar> sub,
|
||||
Vector<const PatternChar> pat,
|
||||
int start_index) {
|
||||
bool ascii_subject = (sizeof(SubjectChar) == 1);
|
||||
StringSearchStrategy strategy = InitializeStringSearch(pat, ascii_subject);
|
||||
switch (strategy) {
|
||||
case SEARCH_FAIL: return -1;
|
||||
case SEARCH_SHORT: return SimpleIndexOf(sub, pat, start_index);
|
||||
case SEARCH_LONG: return ComplexIndexOf(sub, pat, start_index);
|
||||
}
|
||||
UNREACHABLE();
|
||||
return -1;
|
||||
}
|
||||
|
||||
}} // namespace v8::internal
|
||||
|
||||
#endif // V8_STRING_SEARCH_H_
|
30
deps/v8/src/stub-cache.cc
vendored
30
deps/v8/src/stub-cache.cc
vendored
@ -1222,23 +1222,23 @@ CallStubCompiler::CallStubCompiler(int argc,
|
||||
Object* CallStubCompiler::CompileCustomCall(int generator_id,
|
||||
Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* fname,
|
||||
CheckType check) {
|
||||
ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
|
||||
switch (generator_id) {
|
||||
#define CALL_GENERATOR_CASE(ignored1, ignored2, name) \
|
||||
case k##name##CallGenerator: \
|
||||
return CallStubCompiler::Compile##name##Call(object, \
|
||||
holder, \
|
||||
function, \
|
||||
fname, \
|
||||
check);
|
||||
CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
|
||||
String* fname) {
|
||||
ASSERT(generator_id >= 0 && generator_id < kNumCallGenerators);
|
||||
switch (generator_id) {
|
||||
#define CALL_GENERATOR_CASE(ignored1, ignored2, ignored3, name) \
|
||||
case k##name##CallGenerator: \
|
||||
return CallStubCompiler::Compile##name##Call(object, \
|
||||
holder, \
|
||||
cell, \
|
||||
function, \
|
||||
fname);
|
||||
CUSTOM_CALL_IC_GENERATORS(CALL_GENERATOR_CASE)
|
||||
#undef CALL_GENERATOR_CASE
|
||||
}
|
||||
UNREACHABLE();
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
UNREACHABLE();
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
|
||||
|
||||
|
58
deps/v8/src/stub-cache.h
vendored
58
deps/v8/src/stub-cache.h
vendored
@ -612,21 +612,29 @@ class KeyedStoreStubCompiler: public StubCompiler {
|
||||
// Installation of custom call generators for the selected builtins is
|
||||
// handled by the bootstrapper.
|
||||
//
|
||||
// Each entry has a name of a global function (lowercased), a name of
|
||||
// a builtin function on its instance prototype (the one the generator
|
||||
// is set for), and a name of a generator itself (used to build ids
|
||||
// and generator function names).
|
||||
#define CUSTOM_CALL_IC_GENERATORS(V) \
|
||||
V(array, push, ArrayPush) \
|
||||
V(array, pop, ArrayPop) \
|
||||
V(string, charCodeAt, StringCharCodeAt) \
|
||||
V(string, charAt, StringCharAt)
|
||||
// Each entry has a name of a global function (lowercased), a flag
|
||||
// controlling whether the generator is set on the function itself or
|
||||
// on its instance prototype, a name of a builtin function on the
|
||||
// function or its instance prototype (the one the generator is set
|
||||
// for), and a name of a generator itself (used to build ids and
|
||||
// generator function names).
|
||||
#define CUSTOM_CALL_IC_GENERATORS(V) \
|
||||
V(array, INSTANCE_PROTOTYPE, push, ArrayPush) \
|
||||
V(array, INSTANCE_PROTOTYPE, pop, ArrayPop) \
|
||||
V(string, INSTANCE_PROTOTYPE, charCodeAt, StringCharCodeAt) \
|
||||
V(string, INSTANCE_PROTOTYPE, charAt, StringCharAt) \
|
||||
V(string, FUNCTION, fromCharCode, StringFromCharCode)
|
||||
|
||||
|
||||
class CallStubCompiler: public StubCompiler {
|
||||
public:
|
||||
enum CustomGeneratorOwner {
|
||||
FUNCTION,
|
||||
INSTANCE_PROTOTYPE
|
||||
};
|
||||
|
||||
enum {
|
||||
#define DECLARE_CALL_GENERATOR_ID(ignored1, ignored2, name) \
|
||||
#define DECLARE_CALL_GENERATOR_ID(ignored1, ignore2, ignored3, name) \
|
||||
k##name##CallGenerator,
|
||||
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR_ID)
|
||||
#undef DECLARE_CALL_GENERATOR_ID
|
||||
@ -656,20 +664,21 @@ class CallStubCompiler: public StubCompiler {
|
||||
JSFunction* function,
|
||||
String* name);
|
||||
|
||||
// Compiles a custom call constant IC using the generator with given id.
|
||||
// Compiles a custom call constant/global IC using the generator
|
||||
// with given id. For constant calls cell is NULL.
|
||||
Object* CompileCustomCall(int generator_id,
|
||||
Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check);
|
||||
String* name);
|
||||
|
||||
#define DECLARE_CALL_GENERATOR(ignored1, ignored2, name) \
|
||||
Object* Compile##name##Call(Object* object, \
|
||||
JSObject* holder, \
|
||||
JSFunction* function, \
|
||||
String* fname, \
|
||||
CheckType check);
|
||||
#define DECLARE_CALL_GENERATOR(ignored1, ignored2, ignored3, name) \
|
||||
Object* Compile##name##Call(Object* object, \
|
||||
JSObject* holder, \
|
||||
JSGlobalPropertyCell* cell, \
|
||||
JSFunction* function, \
|
||||
String* fname);
|
||||
CUSTOM_CALL_IC_GENERATORS(DECLARE_CALL_GENERATOR)
|
||||
#undef DECLARE_CALL_GENERATOR
|
||||
|
||||
@ -689,6 +698,17 @@ class CallStubCompiler: public StubCompiler {
|
||||
|
||||
void GenerateNameCheck(String* name, Label* miss);
|
||||
|
||||
void GenerateGlobalReceiverCheck(JSObject* object,
|
||||
JSObject* holder,
|
||||
String* name,
|
||||
Label* miss);
|
||||
|
||||
// Generates code to load the function from the cell checking that
|
||||
// it still contains the same function.
|
||||
void GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
Label* miss);
|
||||
|
||||
// Generates a jump to CallIC miss stub. Returns Failure if the jump cannot
|
||||
// be generated.
|
||||
Object* GenerateMissBranch();
|
||||
|
6
deps/v8/src/unicode.h
vendored
6
deps/v8/src/unicode.h
vendored
@ -120,6 +120,9 @@ class Utf8 {
|
||||
static inline unsigned Encode(char* out, uchar c);
|
||||
static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
|
||||
unsigned capacity, unsigned* chars_read, unsigned* offset);
|
||||
static uchar CalculateValue(const byte* str,
|
||||
unsigned length,
|
||||
unsigned* cursor);
|
||||
static const uchar kBadChar = 0xFFFD;
|
||||
static const unsigned kMaxEncodedSize = 4;
|
||||
static const unsigned kMaxOneByteChar = 0x7f;
|
||||
@ -133,9 +136,6 @@ class Utf8 {
|
||||
static inline uchar ValueOf(const byte* str,
|
||||
unsigned length,
|
||||
unsigned* cursor);
|
||||
static uchar CalculateValue(const byte* str,
|
||||
unsigned length,
|
||||
unsigned* cursor);
|
||||
};
|
||||
|
||||
// --- C h a r a c t e r S t r e a m ---
|
||||
|
6
deps/v8/src/utils.h
vendored
6
deps/v8/src/utils.h
vendored
@ -391,6 +391,12 @@ class Vector {
|
||||
// Factory method for creating empty vectors.
|
||||
static Vector<T> empty() { return Vector<T>(NULL, 0); }
|
||||
|
||||
template<typename S>
|
||||
static Vector<T> cast(Vector<S> input) {
|
||||
return Vector<T>(reinterpret_cast<T*>(input.start()),
|
||||
input.length() * sizeof(S) / sizeof(T));
|
||||
}
|
||||
|
||||
protected:
|
||||
void set_start(T* start) { start_ = start; }
|
||||
|
||||
|
962
deps/v8/src/utils.h.orig
vendored
Normal file
962
deps/v8/src/utils.h.orig
vendored
Normal file
@ -0,0 +1,962 @@
|
||||
// Copyright 2006-2008 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef V8_UTILS_H_
|
||||
#define V8_UTILS_H_
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// General helper functions
|
||||
|
||||
#define IS_POWER_OF_TWO(x) (((x) & ((x) - 1)) == 0)
|
||||
|
||||
// Returns true iff x is a power of 2 (or zero). Cannot be used with the
|
||||
// maximally negative value of the type T (the -1 overflows).
|
||||
template <typename T>
|
||||
static inline bool IsPowerOf2(T x) {
|
||||
return IS_POWER_OF_TWO(x);
|
||||
}
|
||||
|
||||
|
||||
// X must be a power of 2. Returns the number of trailing zeros.
|
||||
template <typename T>
|
||||
static inline int WhichPowerOf2(T x) {
|
||||
ASSERT(IsPowerOf2(x));
|
||||
ASSERT(x != 0);
|
||||
if (x < 0) return 31;
|
||||
int bits = 0;
|
||||
#ifdef DEBUG
|
||||
int original_x = x;
|
||||
#endif
|
||||
if (x >= 0x10000) {
|
||||
bits += 16;
|
||||
x >>= 16;
|
||||
}
|
||||
if (x >= 0x100) {
|
||||
bits += 8;
|
||||
x >>= 8;
|
||||
}
|
||||
if (x >= 0x10) {
|
||||
bits += 4;
|
||||
x >>= 4;
|
||||
}
|
||||
switch (x) {
|
||||
default: UNREACHABLE();
|
||||
case 8: bits++; // Fall through.
|
||||
case 4: bits++; // Fall through.
|
||||
case 2: bits++; // Fall through.
|
||||
case 1: break;
|
||||
}
|
||||
ASSERT_EQ(1 << bits, original_x);
|
||||
return bits;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// The C++ standard leaves the semantics of '>>' undefined for
|
||||
// negative signed operands. Most implementations do the right thing,
|
||||
// though.
|
||||
static inline int ArithmeticShiftRight(int x, int s) {
|
||||
return x >> s;
|
||||
}
|
||||
|
||||
|
||||
// Compute the 0-relative offset of some absolute value x of type T.
|
||||
// This allows conversion of Addresses and integral types into
|
||||
// 0-relative int offsets.
|
||||
template <typename T>
|
||||
static inline intptr_t OffsetFrom(T x) {
|
||||
return x - static_cast<T>(0);
|
||||
}
|
||||
|
||||
|
||||
// Compute the absolute value of type T for some 0-relative offset x.
|
||||
// This allows conversion of 0-relative int offsets into Addresses and
|
||||
// integral types.
|
||||
template <typename T>
|
||||
static inline T AddressFrom(intptr_t x) {
|
||||
return static_cast<T>(static_cast<T>(0) + x);
|
||||
}
|
||||
|
||||
|
||||
// Return the largest multiple of m which is <= x.
|
||||
template <typename T>
|
||||
static inline T RoundDown(T x, int m) {
|
||||
ASSERT(IsPowerOf2(m));
|
||||
return AddressFrom<T>(OffsetFrom(x) & -m);
|
||||
}
|
||||
|
||||
|
||||
// Return the smallest multiple of m which is >= x.
|
||||
template <typename T>
|
||||
static inline T RoundUp(T x, int m) {
|
||||
return RoundDown(x + m - 1, m);
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
static int Compare(const T& a, const T& b) {
|
||||
if (a == b)
|
||||
return 0;
|
||||
else if (a < b)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
static int PointerValueCompare(const T* a, const T* b) {
|
||||
return Compare<T>(*a, *b);
|
||||
}
|
||||
|
||||
|
||||
// Returns the smallest power of two which is >= x. If you pass in a
|
||||
// number that is already a power of two, it is returned as is.
|
||||
uint32_t RoundUpToPowerOf2(uint32_t x);
|
||||
|
||||
|
||||
template <typename T>
|
||||
static inline bool IsAligned(T value, T alignment) {
|
||||
ASSERT(IsPowerOf2(alignment));
|
||||
return (value & (alignment - 1)) == 0;
|
||||
}
|
||||
|
||||
|
||||
// Returns true if (addr + offset) is aligned.
|
||||
static inline bool IsAddressAligned(Address addr,
|
||||
intptr_t alignment,
|
||||
int offset) {
|
||||
intptr_t offs = OffsetFrom(addr + offset);
|
||||
return IsAligned(offs, alignment);
|
||||
}
|
||||
|
||||
|
||||
// Returns the maximum of the two parameters.
|
||||
template <typename T>
|
||||
static T Max(T a, T b) {
|
||||
return a < b ? b : a;
|
||||
}
|
||||
|
||||
|
||||
// Returns the minimum of the two parameters.
|
||||
template <typename T>
|
||||
static T Min(T a, T b) {
|
||||
return a < b ? a : b;
|
||||
}
|
||||
|
||||
|
||||
inline int StrLength(const char* string) {
|
||||
size_t length = strlen(string);
|
||||
ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
|
||||
return static_cast<int>(length);
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// BitField is a help template for encoding and decode bitfield with
|
||||
// unsigned content.
|
||||
template<class T, int shift, int size>
|
||||
class BitField {
|
||||
public:
|
||||
// Tells whether the provided value fits into the bit field.
|
||||
static bool is_valid(T value) {
|
||||
return (static_cast<uint32_t>(value) & ~((1U << (size)) - 1)) == 0;
|
||||
}
|
||||
|
||||
// Returns a uint32_t mask of bit field.
|
||||
static uint32_t mask() {
|
||||
// To use all bits of a uint32 in a bitfield without compiler warnings we
|
||||
// have to compute 2^32 without using a shift count of 32.
|
||||
return ((1U << shift) << size) - (1U << shift);
|
||||
}
|
||||
|
||||
// Returns a uint32_t with the bit field value encoded.
|
||||
static uint32_t encode(T value) {
|
||||
ASSERT(is_valid(value));
|
||||
return static_cast<uint32_t>(value) << shift;
|
||||
}
|
||||
|
||||
// Extracts the bit field from the value.
|
||||
static T decode(uint32_t value) {
|
||||
return static_cast<T>((value & mask()) >> shift);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Hash function.
|
||||
|
||||
uint32_t ComputeIntegerHash(uint32_t key);
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// I/O support.
|
||||
|
||||
// Our version of printf(). Avoids compilation errors that we get
|
||||
// with standard printf when attempting to print pointers, etc.
|
||||
// (the errors are due to the extra compilation flags, which we
|
||||
// want elsewhere).
|
||||
void PrintF(const char* format, ...);
|
||||
|
||||
// Our version of fflush.
|
||||
void Flush();
|
||||
|
||||
|
||||
// Read a line of characters after printing the prompt to stdout. The resulting
|
||||
// char* needs to be disposed off with DeleteArray by the caller.
|
||||
char* ReadLine(const char* prompt);
|
||||
|
||||
|
||||
// Read and return the raw bytes in a file. the size of the buffer is returned
|
||||
// in size.
|
||||
// The returned buffer must be freed by the caller.
|
||||
byte* ReadBytes(const char* filename, int* size, bool verbose = true);
|
||||
|
||||
|
||||
// Write size chars from str to the file given by filename.
|
||||
// The file is overwritten. Returns the number of chars written.
|
||||
int WriteChars(const char* filename,
|
||||
const char* str,
|
||||
int size,
|
||||
bool verbose = true);
|
||||
|
||||
|
||||
// Write size bytes to the file given by filename.
|
||||
// The file is overwritten. Returns the number of bytes written.
|
||||
int WriteBytes(const char* filename,
|
||||
const byte* bytes,
|
||||
int size,
|
||||
bool verbose = true);
|
||||
|
||||
|
||||
// Write the C code
|
||||
// const char* <varname> = "<str>";
|
||||
// const int <varname>_len = <len>;
|
||||
// to the file given by filename. Only the first len chars are written.
|
||||
int WriteAsCFile(const char* filename, const char* varname,
|
||||
const char* str, int size, bool verbose = true);
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Miscellaneous
|
||||
|
||||
// A static resource holds a static instance that can be reserved in
|
||||
// a local scope using an instance of Access. Attempts to re-reserve
|
||||
// the instance will cause an error.
|
||||
template <typename T>
|
||||
class StaticResource {
|
||||
public:
|
||||
StaticResource() : is_reserved_(false) {}
|
||||
|
||||
private:
|
||||
template <typename S> friend class Access;
|
||||
T instance_;
|
||||
bool is_reserved_;
|
||||
};
|
||||
|
||||
|
||||
// Locally scoped access to a static resource.
|
||||
template <typename T>
|
||||
class Access {
|
||||
public:
|
||||
explicit Access(StaticResource<T>* resource)
|
||||
: resource_(resource)
|
||||
, instance_(&resource->instance_) {
|
||||
ASSERT(!resource->is_reserved_);
|
||||
resource->is_reserved_ = true;
|
||||
}
|
||||
|
||||
~Access() {
|
||||
resource_->is_reserved_ = false;
|
||||
resource_ = NULL;
|
||||
instance_ = NULL;
|
||||
}
|
||||
|
||||
T* value() { return instance_; }
|
||||
T* operator -> () { return instance_; }
|
||||
|
||||
private:
|
||||
StaticResource<T>* resource_;
|
||||
T* instance_;
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
class Vector {
|
||||
public:
|
||||
Vector() : start_(NULL), length_(0) {}
|
||||
Vector(T* data, int length) : start_(data), length_(length) {
|
||||
ASSERT(length == 0 || (length > 0 && data != NULL));
|
||||
}
|
||||
|
||||
static Vector<T> New(int length) {
|
||||
return Vector<T>(NewArray<T>(length), length);
|
||||
}
|
||||
|
||||
// Returns a vector using the same backing storage as this one,
|
||||
// spanning from and including 'from', to but not including 'to'.
|
||||
Vector<T> SubVector(int from, int to) {
|
||||
ASSERT(to <= length_);
|
||||
ASSERT(from < to);
|
||||
ASSERT(0 <= from);
|
||||
return Vector<T>(start() + from, to - from);
|
||||
}
|
||||
|
||||
// Returns the length of the vector.
|
||||
int length() const { return length_; }
|
||||
|
||||
// Returns whether or not the vector is empty.
|
||||
bool is_empty() const { return length_ == 0; }
|
||||
|
||||
// Returns the pointer to the start of the data in the vector.
|
||||
T* start() const { return start_; }
|
||||
|
||||
// Access individual vector elements - checks bounds in debug mode.
|
||||
T& operator[](int index) const {
|
||||
ASSERT(0 <= index && index < length_);
|
||||
return start_[index];
|
||||
}
|
||||
|
||||
T& first() { return start_[0]; }
|
||||
|
||||
T& last() { return start_[length_ - 1]; }
|
||||
|
||||
// Returns a clone of this vector with a new backing store.
|
||||
Vector<T> Clone() const {
|
||||
T* result = NewArray<T>(length_);
|
||||
for (int i = 0; i < length_; i++) result[i] = start_[i];
|
||||
return Vector<T>(result, length_);
|
||||
}
|
||||
|
||||
void Sort(int (*cmp)(const T*, const T*)) {
|
||||
typedef int (*RawComparer)(const void*, const void*);
|
||||
qsort(start(),
|
||||
length(),
|
||||
sizeof(T),
|
||||
reinterpret_cast<RawComparer>(cmp));
|
||||
}
|
||||
|
||||
void Sort() {
|
||||
Sort(PointerValueCompare<T>);
|
||||
}
|
||||
|
||||
void Truncate(int length) {
|
||||
ASSERT(length <= length_);
|
||||
length_ = length;
|
||||
}
|
||||
|
||||
// Releases the array underlying this vector. Once disposed the
|
||||
// vector is empty.
|
||||
void Dispose() {
|
||||
DeleteArray(start_);
|
||||
start_ = NULL;
|
||||
length_ = 0;
|
||||
}
|
||||
|
||||
inline Vector<T> operator+(int offset) {
|
||||
ASSERT(offset < length_);
|
||||
return Vector<T>(start_ + offset, length_ - offset);
|
||||
}
|
||||
|
||||
// Factory method for creating empty vectors.
|
||||
static Vector<T> empty() { return Vector<T>(NULL, 0); }
|
||||
|
||||
template<typename S>
|
||||
static Vector<T> cast(Vector<S> input) {
|
||||
return Vector<T>(reinterpret_cast<T*>(input.start()),
|
||||
input.length() * sizeof(S) / sizeof(T));
|
||||
}
|
||||
|
||||
protected:
|
||||
void set_start(T* start) { start_ = start; }
|
||||
|
||||
private:
|
||||
T* start_;
|
||||
int length_;
|
||||
};
|
||||
|
||||
|
||||
// A temporary assignment sets a (non-local) variable to a value on
|
||||
// construction and resets it the value on destruction.
|
||||
template <typename T>
|
||||
class TempAssign {
|
||||
public:
|
||||
TempAssign(T* var, T value): var_(var), old_value_(*var) {
|
||||
*var = value;
|
||||
}
|
||||
|
||||
~TempAssign() { *var_ = old_value_; }
|
||||
|
||||
private:
|
||||
T* var_;
|
||||
T old_value_;
|
||||
};
|
||||
|
||||
|
||||
template <typename T, int kSize>
|
||||
class EmbeddedVector : public Vector<T> {
|
||||
public:
|
||||
EmbeddedVector() : Vector<T>(buffer_, kSize) { }
|
||||
|
||||
// When copying, make underlying Vector to reference our buffer.
|
||||
EmbeddedVector(const EmbeddedVector& rhs)
|
||||
: Vector<T>(rhs) {
|
||||
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
|
||||
set_start(buffer_);
|
||||
}
|
||||
|
||||
EmbeddedVector& operator=(const EmbeddedVector& rhs) {
|
||||
if (this == &rhs) return *this;
|
||||
Vector<T>::operator=(rhs);
|
||||
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
|
||||
this->set_start(buffer_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
T buffer_[kSize];
|
||||
};
|
||||
|
||||
|
||||
template <typename T>
|
||||
class ScopedVector : public Vector<T> {
|
||||
public:
|
||||
explicit ScopedVector(int length) : Vector<T>(NewArray<T>(length), length) { }
|
||||
~ScopedVector() {
|
||||
DeleteArray(this->start());
|
||||
}
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
|
||||
};
|
||||
|
||||
|
||||
inline Vector<const char> CStrVector(const char* data) {
|
||||
return Vector<const char>(data, StrLength(data));
|
||||
}
|
||||
|
||||
inline Vector<char> MutableCStrVector(char* data) {
|
||||
return Vector<char>(data, StrLength(data));
|
||||
}
|
||||
|
||||
inline Vector<char> MutableCStrVector(char* data, int max) {
|
||||
int length = StrLength(data);
|
||||
return Vector<char>(data, (length < max) ? length : max);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
|
||||
int length) {
|
||||
return Vector< Handle<Object> >(
|
||||
reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* A class that collects values into a backing store.
|
||||
* Specialized versions of the class can allow access to the backing store
|
||||
* in different ways.
|
||||
* There is no guarantee that the backing store is contiguous (and, as a
|
||||
* consequence, no guarantees that consecutively added elements are adjacent
|
||||
* in memory). The collector may move elements unless it has guaranteed not
|
||||
* to.
|
||||
*/
|
||||
template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
|
||||
class Collector {
|
||||
public:
|
||||
explicit Collector(int initial_capacity = kMinCapacity)
|
||||
: index_(0), size_(0) {
|
||||
if (initial_capacity < kMinCapacity) {
|
||||
initial_capacity = kMinCapacity;
|
||||
}
|
||||
current_chunk_ = Vector<T>::New(initial_capacity);
|
||||
}
|
||||
|
||||
virtual ~Collector() {
|
||||
// Free backing store (in reverse allocation order).
|
||||
current_chunk_.Dispose();
|
||||
for (int i = chunks_.length() - 1; i >= 0; i--) {
|
||||
chunks_.at(i).Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
// Add a single element.
|
||||
inline void Add(T value) {
|
||||
if (index_ >= current_chunk_.length()) {
|
||||
Grow(1);
|
||||
}
|
||||
current_chunk_[index_] = value;
|
||||
index_++;
|
||||
size_++;
|
||||
}
|
||||
|
||||
// Add a block of contiguous elements and return a Vector backed by the
|
||||
// memory area.
|
||||
// A basic Collector will keep this vector valid as long as the Collector
|
||||
// is alive.
|
||||
inline Vector<T> AddBlock(int size, T initial_value) {
|
||||
ASSERT(size > 0);
|
||||
if (size > current_chunk_.length() - index_) {
|
||||
Grow(size);
|
||||
}
|
||||
T* position = current_chunk_.start() + index_;
|
||||
index_ += size;
|
||||
size_ += size;
|
||||
for (int i = 0; i < size; i++) {
|
||||
position[i] = initial_value;
|
||||
}
|
||||
return Vector<T>(position, size);
|
||||
}
|
||||
|
||||
|
||||
// Write the contents of the collector into the provided vector.
|
||||
void WriteTo(Vector<T> destination) {
|
||||
ASSERT(size_ <= destination.length());
|
||||
int position = 0;
|
||||
for (int i = 0; i < chunks_.length(); i++) {
|
||||
Vector<T> chunk = chunks_.at(i);
|
||||
for (int j = 0; j < chunk.length(); j++) {
|
||||
destination[position] = chunk[j];
|
||||
position++;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < index_; i++) {
|
||||
destination[position] = current_chunk_[i];
|
||||
position++;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate a single contiguous vector, copy all the collected
|
||||
// elements to the vector, and return it.
|
||||
// The caller is responsible for freeing the memory of the returned
|
||||
// vector (e.g., using Vector::Dispose).
|
||||
Vector<T> ToVector() {
|
||||
Vector<T> new_store = Vector<T>::New(size_);
|
||||
WriteTo(new_store);
|
||||
return new_store;
|
||||
}
|
||||
|
||||
// Resets the collector to be empty.
|
||||
virtual void Reset() {
|
||||
for (int i = chunks_.length() - 1; i >= 0; i--) {
|
||||
chunks_.at(i).Dispose();
|
||||
}
|
||||
chunks_.Rewind(0);
|
||||
index_ = 0;
|
||||
size_ = 0;
|
||||
}
|
||||
|
||||
// Total number of elements added to collector so far.
|
||||
inline int size() { return size_; }
|
||||
|
||||
protected:
|
||||
static const int kMinCapacity = 16;
|
||||
List<Vector<T> > chunks_;
|
||||
Vector<T> current_chunk_; // Block of memory currently being written into.
|
||||
int index_; // Current index in current chunk.
|
||||
int size_; // Total number of elements in collector.
|
||||
|
||||
// Creates a new current chunk, and stores the old chunk in the chunks_ list.
|
||||
void Grow(int min_capacity) {
|
||||
ASSERT(growth_factor > 1);
|
||||
int growth = current_chunk_.length() * (growth_factor - 1);
|
||||
if (growth > max_growth) {
|
||||
growth = max_growth;
|
||||
}
|
||||
int new_capacity = current_chunk_.length() + growth;
|
||||
if (new_capacity < min_capacity) {
|
||||
new_capacity = min_capacity + growth;
|
||||
}
|
||||
Vector<T> new_chunk = Vector<T>::New(new_capacity);
|
||||
int new_index = PrepareGrow(new_chunk);
|
||||
if (index_ > 0) {
|
||||
chunks_.Add(current_chunk_.SubVector(0, index_));
|
||||
} else {
|
||||
// Can happen if the call to PrepareGrow moves everything into
|
||||
// the new chunk.
|
||||
current_chunk_.Dispose();
|
||||
}
|
||||
current_chunk_ = new_chunk;
|
||||
index_ = new_index;
|
||||
ASSERT(index_ + min_capacity <= current_chunk_.length());
|
||||
}
|
||||
|
||||
// Before replacing the current chunk, give a subclass the option to move
|
||||
// some of the current data into the new chunk. The function may update
|
||||
// the current index_ value to represent data no longer in the current chunk.
|
||||
// Returns the initial index of the new chunk (after copied data).
|
||||
virtual int PrepareGrow(Vector<T> new_chunk) {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* A collector that allows sequences of values to be guaranteed to
|
||||
* stay consecutive.
|
||||
* If the backing store grows while a sequence is active, the current
|
||||
* sequence might be moved, but after the sequence is ended, it will
|
||||
* not move again.
|
||||
* NOTICE: Blocks allocated using Collector::AddBlock(int) can move
|
||||
* as well, if inside an active sequence where another element is added.
|
||||
*/
|
||||
template <typename T, int growth_factor = 2, int max_growth = 1 * MB>
|
||||
class SequenceCollector : public Collector<T, growth_factor, max_growth> {
|
||||
public:
|
||||
explicit SequenceCollector(int initial_capacity)
|
||||
: Collector<T, growth_factor, max_growth>(initial_capacity),
|
||||
sequence_start_(kNoSequence) { }
|
||||
|
||||
virtual ~SequenceCollector() {}
|
||||
|
||||
void StartSequence() {
|
||||
ASSERT(sequence_start_ == kNoSequence);
|
||||
sequence_start_ = this->index_;
|
||||
}
|
||||
|
||||
Vector<T> EndSequence() {
|
||||
ASSERT(sequence_start_ != kNoSequence);
|
||||
int sequence_start = sequence_start_;
|
||||
sequence_start_ = kNoSequence;
|
||||
if (sequence_start == this->index_) return Vector<T>();
|
||||
return this->current_chunk_.SubVector(sequence_start, this->index_);
|
||||
}
|
||||
|
||||
// Drops the currently added sequence, and all collected elements in it.
|
||||
void DropSequence() {
|
||||
ASSERT(sequence_start_ != kNoSequence);
|
||||
int sequence_length = this->index_ - sequence_start_;
|
||||
this->index_ = sequence_start_;
|
||||
this->size_ -= sequence_length;
|
||||
sequence_start_ = kNoSequence;
|
||||
}
|
||||
|
||||
virtual void Reset() {
|
||||
sequence_start_ = kNoSequence;
|
||||
this->Collector<T, growth_factor, max_growth>::Reset();
|
||||
}
|
||||
|
||||
private:
|
||||
static const int kNoSequence = -1;
|
||||
int sequence_start_;
|
||||
|
||||
// Move the currently active sequence to the new chunk.
|
||||
virtual int PrepareGrow(Vector<T> new_chunk) {
|
||||
if (sequence_start_ != kNoSequence) {
|
||||
int sequence_length = this->index_ - sequence_start_;
|
||||
// The new chunk is always larger than the current chunk, so there
|
||||
// is room for the copy.
|
||||
ASSERT(sequence_length < new_chunk.length());
|
||||
for (int i = 0; i < sequence_length; i++) {
|
||||
new_chunk[i] = this->current_chunk_[sequence_start_ + i];
|
||||
}
|
||||
this->index_ = sequence_start_;
|
||||
sequence_start_ = 0;
|
||||
return sequence_length;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Simple support to read a file into a 0-terminated C-string.
|
||||
// The returned buffer must be freed by the caller.
|
||||
// On return, *exits tells whether the file existed.
|
||||
Vector<const char> ReadFile(const char* filename,
|
||||
bool* exists,
|
||||
bool verbose = true);
|
||||
|
||||
|
||||
// Simple wrapper that allows an ExternalString to refer to a
|
||||
// Vector<const char>. Doesn't assume ownership of the data.
|
||||
class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
|
||||
public:
|
||||
explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
|
||||
|
||||
virtual const char* data() const { return data_.start(); }
|
||||
|
||||
virtual size_t length() const { return data_.length(); }
|
||||
|
||||
private:
|
||||
Vector<const char> data_;
|
||||
};
|
||||
|
||||
|
||||
// Helper class for building result strings in a character buffer. The
|
||||
// purpose of the class is to use safe operations that checks the
|
||||
// buffer bounds on all operations in debug mode.
|
||||
class StringBuilder {
|
||||
public:
|
||||
// Create a string builder with a buffer of the given size. The
|
||||
// buffer is allocated through NewArray<char> and must be
|
||||
// deallocated by the caller of Finalize().
|
||||
explicit StringBuilder(int size);
|
||||
|
||||
StringBuilder(char* buffer, int size)
|
||||
: buffer_(buffer, size), position_(0) { }
|
||||
|
||||
~StringBuilder() { if (!is_finalized()) Finalize(); }
|
||||
|
||||
int size() const { return buffer_.length(); }
|
||||
|
||||
// Get the current position in the builder.
|
||||
int position() const {
|
||||
ASSERT(!is_finalized());
|
||||
return position_;
|
||||
}
|
||||
|
||||
// Reset the position.
|
||||
void Reset() { position_ = 0; }
|
||||
|
||||
// Add a single character to the builder. It is not allowed to add
|
||||
// 0-characters; use the Finalize() method to terminate the string
|
||||
// instead.
|
||||
void AddCharacter(char c) {
|
||||
ASSERT(c != '\0');
|
||||
ASSERT(!is_finalized() && position_ < buffer_.length());
|
||||
buffer_[position_++] = c;
|
||||
}
|
||||
|
||||
// Add an entire string to the builder. Uses strlen() internally to
|
||||
// compute the length of the input string.
|
||||
void AddString(const char* s);
|
||||
|
||||
// Add the first 'n' characters of the given string 's' to the
|
||||
// builder. The input string must have enough characters.
|
||||
void AddSubstring(const char* s, int n);
|
||||
|
||||
// Add formatted contents to the builder just like printf().
|
||||
void AddFormatted(const char* format, ...);
|
||||
|
||||
// Add character padding to the builder. If count is non-positive,
|
||||
// nothing is added to the builder.
|
||||
void AddPadding(char c, int count);
|
||||
|
||||
// Finalize the string by 0-terminating it and returning the buffer.
|
||||
char* Finalize();
|
||||
|
||||
private:
|
||||
Vector<char> buffer_;
|
||||
int position_;
|
||||
|
||||
bool is_finalized() const { return position_ < 0; }
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(StringBuilder);
|
||||
};
|
||||
|
||||
|
||||
// Custom memcpy implementation for platforms where the standard version
|
||||
// may not be good enough.
|
||||
// TODO(lrn): Check whether some IA32 platforms should be excluded.
|
||||
#if defined(V8_TARGET_ARCH_IA32)
|
||||
|
||||
// TODO(lrn): Extend to other platforms as needed.
|
||||
|
||||
typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
|
||||
|
||||
// Implemented in codegen-<arch>.cc.
|
||||
MemCopyFunction CreateMemCopyFunction();
|
||||
|
||||
// Copy memory area to disjoint memory area.
|
||||
static inline void MemCopy(void* dest, const void* src, size_t size) {
|
||||
static MemCopyFunction memcopy = CreateMemCopyFunction();
|
||||
(*memcopy)(dest, src, size);
|
||||
#ifdef DEBUG
|
||||
CHECK_EQ(0, memcmp(dest, src, size));
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Limit below which the extra overhead of the MemCopy function is likely
|
||||
// to outweigh the benefits of faster copying.
|
||||
// TODO(lrn): Try to find a more precise value.
|
||||
static const int kMinComplexMemCopy = 64;
|
||||
|
||||
#else // V8_TARGET_ARCH_IA32
|
||||
|
||||
static inline void MemCopy(void* dest, const void* src, size_t size) {
|
||||
memcpy(dest, src, size);
|
||||
}
|
||||
|
||||
static const int kMinComplexMemCopy = 256;
|
||||
|
||||
#endif // V8_TARGET_ARCH_IA32
|
||||
|
||||
|
||||
// Copy from ASCII/16bit chars to ASCII/16bit chars.
|
||||
template <typename sourcechar, typename sinkchar>
|
||||
static inline void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
|
||||
sinkchar* limit = dest + chars;
|
||||
#ifdef V8_HOST_CAN_READ_UNALIGNED
|
||||
if (sizeof(*dest) == sizeof(*src)) {
|
||||
if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
|
||||
MemCopy(dest, src, chars * sizeof(*dest));
|
||||
return;
|
||||
}
|
||||
// Number of characters in a uintptr_t.
|
||||
static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
|
||||
while (dest <= limit - kStepSize) {
|
||||
*reinterpret_cast<uintptr_t*>(dest) =
|
||||
*reinterpret_cast<const uintptr_t*>(src);
|
||||
dest += kStepSize;
|
||||
src += kStepSize;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
while (dest < limit) {
|
||||
*dest++ = static_cast<sinkchar>(*src++);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Compare ASCII/16bit chars to ASCII/16bit chars.
|
||||
template <typename lchar, typename rchar>
|
||||
static inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
|
||||
const lchar* limit = lhs + chars;
|
||||
#ifdef V8_HOST_CAN_READ_UNALIGNED
|
||||
if (sizeof(*lhs) == sizeof(*rhs)) {
|
||||
// Number of characters in a uintptr_t.
|
||||
static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs); // NOLINT
|
||||
while (lhs <= limit - kStepSize) {
|
||||
if (*reinterpret_cast<const uintptr_t*>(lhs) !=
|
||||
*reinterpret_cast<const uintptr_t*>(rhs)) {
|
||||
break;
|
||||
}
|
||||
lhs += kStepSize;
|
||||
rhs += kStepSize;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
while (lhs < limit) {
|
||||
int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
|
||||
if (r != 0) return r;
|
||||
++lhs;
|
||||
++rhs;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
static inline void MemsetPointer(T** dest, T* value, int counter) {
|
||||
#if defined(V8_HOST_ARCH_IA32)
|
||||
#define STOS "stosl"
|
||||
#elif defined(V8_HOST_ARCH_X64)
|
||||
#define STOS "stosq"
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) && defined(STOS)
|
||||
asm volatile(
|
||||
"cld;"
|
||||
"rep ; " STOS
|
||||
: "+&c" (counter), "+&D" (dest)
|
||||
: "a" (value)
|
||||
: "memory", "cc");
|
||||
#else
|
||||
for (int i = 0; i < counter; i++) {
|
||||
dest[i] = value;
|
||||
}
|
||||
#endif
|
||||
|
||||
#undef STOS
|
||||
}
|
||||
|
||||
|
||||
// Copies data from |src| to |dst|. The data spans MUST not overlap.
|
||||
inline void CopyWords(Object** dst, Object** src, int num_words) {
|
||||
ASSERT(Min(dst, src) + num_words <= Max(dst, src));
|
||||
ASSERT(num_words > 0);
|
||||
|
||||
// Use block copying memcpy if the segment we're copying is
|
||||
// enough to justify the extra call/setup overhead.
|
||||
static const int kBlockCopyLimit = 16;
|
||||
|
||||
if (num_words >= kBlockCopyLimit) {
|
||||
memcpy(dst, src, num_words * kPointerSize);
|
||||
} else {
|
||||
int remaining = num_words;
|
||||
do {
|
||||
remaining--;
|
||||
*dst++ = *src++;
|
||||
} while (remaining > 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Calculate 10^exponent.
|
||||
int TenToThe(int exponent);
|
||||
|
||||
|
||||
// The type-based aliasing rule allows the compiler to assume that pointers of
|
||||
// different types (for some definition of different) never alias each other.
|
||||
// Thus the following code does not work:
|
||||
//
|
||||
// float f = foo();
|
||||
// int fbits = *(int*)(&f);
|
||||
//
|
||||
// The compiler 'knows' that the int pointer can't refer to f since the types
|
||||
// don't match, so the compiler may cache f in a register, leaving random data
|
||||
// in fbits. Using C++ style casts makes no difference, however a pointer to
|
||||
// char data is assumed to alias any other pointer. This is the 'memcpy
|
||||
// exception'.
|
||||
//
|
||||
// Bit_cast uses the memcpy exception to move the bits from a variable of one
|
||||
// type of a variable of another type. Of course the end result is likely to
|
||||
// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
|
||||
// will completely optimize BitCast away.
|
||||
//
|
||||
// There is an additional use for BitCast.
|
||||
// Recent gccs will warn when they see casts that may result in breakage due to
|
||||
// the type-based aliasing rule. If you have checked that there is no breakage
|
||||
// you can use BitCast to cast one pointer type to another. This confuses gcc
|
||||
// enough that it can no longer see that you have cast one pointer type to
|
||||
// another thus avoiding the warning.
|
||||
template <class Dest, class Source>
|
||||
inline Dest BitCast(const Source& source) {
|
||||
// Compile time assertion: sizeof(Dest) == sizeof(Source)
|
||||
// A compile error here means your Dest and Source have different sizes.
|
||||
typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1];
|
||||
|
||||
Dest dest;
|
||||
memcpy(&dest, &source, sizeof(dest));
|
||||
return dest;
|
||||
}
|
||||
|
||||
template <class Dest, class Source>
|
||||
inline Dest BitCast(Source* source) {
|
||||
return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_UTILS_H_
|
2
deps/v8/src/version.cc
vendored
2
deps/v8/src/version.cc
vendored
@ -34,7 +34,7 @@
|
||||
// cannot be changed without changing the SCons build script.
|
||||
#define MAJOR_VERSION 2
|
||||
#define MINOR_VERSION 4
|
||||
#define BUILD_NUMBER 2
|
||||
#define BUILD_NUMBER 4
|
||||
#define PATCH_LEVEL 0
|
||||
#define CANDIDATE_VERSION false
|
||||
|
||||
|
1
deps/v8/src/x64/assembler-x64.cc
vendored
1
deps/v8/src/x64/assembler-x64.cc
vendored
@ -829,6 +829,7 @@ void Assembler::call(Label* L) {
|
||||
|
||||
|
||||
void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode) {
|
||||
WriteRecordedPositions();
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
// 1110 1000 #32-bit disp.
|
||||
|
92
deps/v8/src/x64/code-stubs-x64.cc
vendored
92
deps/v8/src/x64/code-stubs-x64.cc
vendored
@ -1404,33 +1404,35 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
Label slow, done;
|
||||
|
||||
if (op_ == Token::SUB) {
|
||||
// Check whether the value is a smi.
|
||||
Label try_float;
|
||||
__ JumpIfNotSmi(rax, &try_float);
|
||||
if (include_smi_code_) {
|
||||
// Check whether the value is a smi.
|
||||
Label try_float;
|
||||
__ JumpIfNotSmi(rax, &try_float);
|
||||
if (negative_zero_ == kIgnoreNegativeZero) {
|
||||
__ SmiCompare(rax, Smi::FromInt(0));
|
||||
__ j(equal, &done);
|
||||
}
|
||||
__ SmiNeg(rax, rax, &done);
|
||||
|
||||
if (negative_zero_ == kIgnoreNegativeZero) {
|
||||
__ SmiCompare(rax, Smi::FromInt(0));
|
||||
__ j(equal, &done);
|
||||
// Either zero or Smi::kMinValue, neither of which become a smi when
|
||||
// negated. We handle negative zero here if required. We always enter
|
||||
// the runtime system if we have Smi::kMinValue.
|
||||
if (negative_zero_ == kStrictNegativeZero) {
|
||||
__ SmiCompare(rax, Smi::FromInt(0));
|
||||
__ j(not_equal, &slow);
|
||||
__ Move(rax, Factory::minus_zero_value());
|
||||
__ jmp(&done);
|
||||
} else {
|
||||
__ SmiCompare(rax, Smi::FromInt(Smi::kMinValue));
|
||||
__ j(equal, &slow);
|
||||
__ jmp(&done);
|
||||
}
|
||||
// Try floating point case.
|
||||
__ bind(&try_float);
|
||||
} else if (FLAG_debug_code) {
|
||||
__ AbortIfSmi(rax);
|
||||
}
|
||||
|
||||
// Enter runtime system if the value of the smi is zero
|
||||
// to make sure that we switch between 0 and -0.
|
||||
// Also enter it if the value of the smi is Smi::kMinValue.
|
||||
__ SmiNeg(rax, rax, &done);
|
||||
|
||||
// Either zero or Smi::kMinValue, neither of which become a smi when
|
||||
// negated.
|
||||
if (negative_zero_ == kStrictNegativeZero) {
|
||||
__ SmiCompare(rax, Smi::FromInt(0));
|
||||
__ j(not_equal, &slow);
|
||||
__ Move(rax, Factory::minus_zero_value());
|
||||
__ jmp(&done);
|
||||
} else {
|
||||
__ jmp(&slow);
|
||||
}
|
||||
|
||||
// Try floating point case.
|
||||
__ bind(&try_float);
|
||||
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
|
||||
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
|
||||
__ j(not_equal, &slow);
|
||||
@ -1449,6 +1451,17 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
__ movq(rax, rcx);
|
||||
}
|
||||
} else if (op_ == Token::BIT_NOT) {
|
||||
if (include_smi_code_) {
|
||||
Label try_float;
|
||||
__ JumpIfNotSmi(rax, &try_float);
|
||||
__ SmiNot(rax, rax);
|
||||
__ jmp(&done);
|
||||
// Try floating point case.
|
||||
__ bind(&try_float);
|
||||
} else if (FLAG_debug_code) {
|
||||
__ AbortIfSmi(rax);
|
||||
}
|
||||
|
||||
// Check if the operand is a heap number.
|
||||
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
|
||||
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
|
||||
@ -2115,6 +2128,26 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
||||
ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
|
||||
|
||||
Label check_unequal_objects, done;
|
||||
|
||||
// Compare two smis if required.
|
||||
if (include_smi_compare_) {
|
||||
Label non_smi, smi_done;
|
||||
__ JumpIfNotBothSmi(rax, rdx, &non_smi);
|
||||
__ subq(rdx, rax);
|
||||
__ j(no_overflow, &smi_done);
|
||||
__ neg(rdx); // Correct sign in case of overflow.
|
||||
__ bind(&smi_done);
|
||||
__ movq(rax, rdx);
|
||||
__ ret(0);
|
||||
__ bind(&non_smi);
|
||||
} else if (FLAG_debug_code) {
|
||||
Label ok;
|
||||
__ JumpIfNotSmi(rdx, &ok);
|
||||
__ JumpIfNotSmi(rax, &ok);
|
||||
__ Abort("CompareStub: smi operands");
|
||||
__ bind(&ok);
|
||||
}
|
||||
|
||||
// The compare stub returns a positive, negative, or zero 64-bit integer
|
||||
// value in rax, corresponding to result of comparing the two inputs.
|
||||
// NOTICE! This code is only reached after a smi-fast-case check, so
|
||||
@ -3001,7 +3034,8 @@ int CompareStub::MinorKey() {
|
||||
| RegisterField::encode(false) // lhs_ and rhs_ are not used
|
||||
| StrictField::encode(strict_)
|
||||
| NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
|
||||
| IncludeNumberCompareField::encode(include_number_compare_);
|
||||
| IncludeNumberCompareField::encode(include_number_compare_)
|
||||
| IncludeSmiCompareField::encode(include_smi_compare_);
|
||||
}
|
||||
|
||||
|
||||
@ -3041,12 +3075,18 @@ const char* CompareStub::GetName() {
|
||||
include_number_compare_name = "_NO_NUMBER";
|
||||
}
|
||||
|
||||
const char* include_smi_compare_name = "";
|
||||
if (!include_smi_compare_) {
|
||||
include_smi_compare_name = "_NO_SMI";
|
||||
}
|
||||
|
||||
OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
|
||||
"CompareStub_%s%s%s%s",
|
||||
cc_name,
|
||||
strict_name,
|
||||
never_nan_nan_name,
|
||||
include_number_compare_name);
|
||||
include_number_compare_name,
|
||||
include_smi_compare_name);
|
||||
return name_;
|
||||
}
|
||||
|
||||
|
33
deps/v8/src/x64/codegen-x64.cc
vendored
33
deps/v8/src/x64/codegen-x64.cc
vendored
@ -1940,6 +1940,19 @@ static Condition DoubleCondition(Condition cc) {
|
||||
}
|
||||
|
||||
|
||||
static CompareFlags ComputeCompareFlags(NaNInformation nan_info,
|
||||
bool inline_number_compare) {
|
||||
CompareFlags flags = NO_SMI_COMPARE_IN_STUB;
|
||||
if (nan_info == kCantBothBeNaN) {
|
||||
flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN);
|
||||
}
|
||||
if (inline_number_compare) {
|
||||
flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB);
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::Comparison(AstNode* node,
|
||||
Condition cc,
|
||||
bool strict,
|
||||
@ -2070,7 +2083,9 @@ void CodeGenerator::Comparison(AstNode* node,
|
||||
|
||||
// Setup and call the compare stub.
|
||||
is_not_string.Bind(&left_side);
|
||||
CompareStub stub(cc, strict, kCantBothBeNaN);
|
||||
CompareFlags flags =
|
||||
static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
|
||||
CompareStub stub(cc, strict, flags);
|
||||
Result result = frame_->CallStub(&stub, &left_side, &right_side);
|
||||
result.ToRegister();
|
||||
__ testq(result.reg(), result.reg());
|
||||
@ -2174,7 +2189,8 @@ void CodeGenerator::Comparison(AstNode* node,
|
||||
|
||||
// End of in-line compare, call out to the compare stub. Don't include
|
||||
// number comparison in the stub if it was inlined.
|
||||
CompareStub stub(cc, strict, nan_info, !inline_number_compare);
|
||||
CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare);
|
||||
CompareStub stub(cc, strict, flags);
|
||||
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
||||
__ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
|
||||
answer.Unuse();
|
||||
@ -2207,7 +2223,9 @@ void CodeGenerator::Comparison(AstNode* node,
|
||||
|
||||
// End of in-line compare, call out to the compare stub. Don't include
|
||||
// number comparison in the stub if it was inlined.
|
||||
CompareStub stub(cc, strict, nan_info, !inline_number_compare);
|
||||
CompareFlags flags =
|
||||
ComputeCompareFlags(nan_info, inline_number_compare);
|
||||
CompareStub stub(cc, strict, flags);
|
||||
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
||||
__ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
|
||||
answer.Unuse();
|
||||
@ -2332,7 +2350,9 @@ void CodeGenerator::ConstantSmiComparison(Condition cc,
|
||||
}
|
||||
|
||||
// Setup and call the compare stub.
|
||||
CompareStub stub(cc, strict, kCantBothBeNaN);
|
||||
CompareFlags flags =
|
||||
static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB);
|
||||
CompareStub stub(cc, strict, flags);
|
||||
Result result = frame_->CallStub(&stub, left_side, right_side);
|
||||
result.ToRegister();
|
||||
__ testq(result.reg(), result.reg());
|
||||
@ -7395,6 +7415,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
GenericUnaryOpStub stub(
|
||||
Token::SUB,
|
||||
overwrite,
|
||||
NO_UNARY_FLAGS,
|
||||
no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
|
||||
Result operand = frame_->Pop();
|
||||
Result answer = frame_->CallStub(&stub, &operand);
|
||||
@ -7413,7 +7434,9 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
Condition is_smi = masm_->CheckSmi(operand.reg());
|
||||
smi_label.Branch(is_smi, &operand);
|
||||
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT,
|
||||
overwrite,
|
||||
NO_UNARY_SMI_CODE_IN_STUB);
|
||||
Result answer = frame_->CallStub(&stub, &operand);
|
||||
continue_label.Jump(&answer);
|
||||
|
||||
|
21
deps/v8/src/x64/codegen-x64.h
vendored
21
deps/v8/src/x64/codegen-x64.h
vendored
@ -343,15 +343,17 @@ class CodeGenerator: public AstVisitor {
|
||||
bool in_spilled_code() const { return in_spilled_code_; }
|
||||
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
|
||||
|
||||
// If the name is an inline runtime function call return the number of
|
||||
// expected arguments. Otherwise return -1.
|
||||
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
|
||||
|
||||
static Operand ContextOperand(Register context, int index) {
|
||||
return Operand(context, Context::SlotOffset(index));
|
||||
}
|
||||
|
||||
private:
|
||||
// Type of a member function that generates inline code for a native function.
|
||||
typedef void (CodeGenerator::*InlineFunctionGenerator)
|
||||
(ZoneList<Expression*>*);
|
||||
|
||||
static const InlineFunctionGenerator kInlineFunctionGenerators[];
|
||||
|
||||
// Construction/Destruction
|
||||
explicit CodeGenerator(MacroAssembler* masm);
|
||||
|
||||
@ -584,12 +586,9 @@ class CodeGenerator: public AstVisitor {
|
||||
|
||||
void CheckStack();
|
||||
|
||||
struct InlineRuntimeLUT {
|
||||
void (CodeGenerator::*method)(ZoneList<Expression*>*);
|
||||
const char* name;
|
||||
int nargs;
|
||||
};
|
||||
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
|
||||
static InlineFunctionGenerator FindInlineFunctionGenerator(
|
||||
Runtime::FunctionId function_id);
|
||||
|
||||
bool CheckForInlineRuntimeCall(CallRuntime* node);
|
||||
|
||||
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
|
||||
@ -742,8 +741,6 @@ class CodeGenerator: public AstVisitor {
|
||||
// in a spilled state.
|
||||
bool in_spilled_code_;
|
||||
|
||||
static InlineRuntimeLUT kInlineRuntimeLUT[];
|
||||
|
||||
friend class VirtualFrame;
|
||||
friend class JumpTarget;
|
||||
friend class Reference;
|
||||
|
255
deps/v8/src/x64/full-codegen-x64.cc
vendored
255
deps/v8/src/x64/full-codegen-x64.cc
vendored
@ -507,7 +507,7 @@ MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
|
||||
int context_chain_length =
|
||||
scope()->ContextChainLength(slot->var()->scope());
|
||||
__ LoadContext(scratch, context_chain_length);
|
||||
return CodeGenerator::ContextOperand(scratch, slot->index());
|
||||
return ContextOperand(scratch, slot->index());
|
||||
}
|
||||
case Slot::LOOKUP:
|
||||
UNREACHABLE();
|
||||
@ -568,20 +568,17 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
|
||||
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
|
||||
if (FLAG_debug_code) {
|
||||
// Check if we have the correct context pointer.
|
||||
__ movq(rbx,
|
||||
CodeGenerator::ContextOperand(rsi, Context::FCONTEXT_INDEX));
|
||||
__ movq(rbx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
|
||||
__ cmpq(rbx, rsi);
|
||||
__ Check(equal, "Unexpected declaration in current context.");
|
||||
}
|
||||
if (mode == Variable::CONST) {
|
||||
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
|
||||
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
|
||||
kScratchRegister);
|
||||
__ movq(ContextOperand(rsi, slot->index()), kScratchRegister);
|
||||
// No write barrier since the hole value is in old space.
|
||||
} else if (function != NULL) {
|
||||
VisitForValue(function, kAccumulator);
|
||||
__ movq(CodeGenerator::ContextOperand(rsi, slot->index()),
|
||||
result_register());
|
||||
__ movq(ContextOperand(rsi, slot->index()), result_register());
|
||||
int offset = Context::SlotOffset(slot->index());
|
||||
__ movq(rbx, rsi);
|
||||
__ RecordWrite(rbx, offset, result_register(), rcx);
|
||||
@ -680,9 +677,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
|
||||
VisitForValue(clause->label(), kAccumulator);
|
||||
|
||||
// Perform the comparison as if via '==='.
|
||||
if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
|
||||
__ movq(rdx, Operand(rsp, 0)); // Switch value.
|
||||
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
|
||||
if (inline_smi_code) {
|
||||
Label slow_case;
|
||||
__ movq(rdx, Operand(rsp, 0)); // Switch value.
|
||||
__ JumpIfNotBothSmi(rdx, rax, &slow_case);
|
||||
__ SmiCompare(rdx, rax);
|
||||
__ j(not_equal, &next_test);
|
||||
@ -691,7 +689,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
|
||||
__ bind(&slow_case);
|
||||
}
|
||||
|
||||
CompareStub stub(equal, true);
|
||||
CompareFlags flags = inline_smi_code
|
||||
? NO_SMI_COMPARE_IN_STUB
|
||||
: NO_COMPARE_FLAGS;
|
||||
CompareStub stub(equal, true, flags);
|
||||
__ CallStub(&stub);
|
||||
__ testq(rax, rax);
|
||||
__ j(not_equal, &next_test);
|
||||
@ -749,11 +750,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ bind(&done_convert);
|
||||
__ push(rax);
|
||||
|
||||
// TODO(kasperl): Check cache validity in generated code. This is a
|
||||
// fast case for the JSObject::IsSimpleEnum cache validity
|
||||
// checks. If we cannot guarantee cache validity, call the runtime
|
||||
// system to check cache validity or get the property names in a
|
||||
// fixed array.
|
||||
// BUG(867): Check cache validity in generated code. This is a fast
|
||||
// case for the JSObject::IsSimpleEnum cache validity checks. If we
|
||||
// cannot guarantee cache validity, call the runtime system to check
|
||||
// cache validity or get the property names in a fixed array.
|
||||
|
||||
// Get the set of properties to enumerate.
|
||||
__ push(rax); // Duplicate the enumerable object on the stack.
|
||||
@ -881,6 +881,153 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
|
||||
Slot* slot,
|
||||
TypeofState typeof_state,
|
||||
Label* slow) {
|
||||
Register context = rsi;
|
||||
Register temp = rdx;
|
||||
|
||||
Scope* s = scope();
|
||||
while (s != NULL) {
|
||||
if (s->num_heap_slots() > 0) {
|
||||
if (s->calls_eval()) {
|
||||
// Check that extension is NULL.
|
||||
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
|
||||
Immediate(0));
|
||||
__ j(not_equal, slow);
|
||||
}
|
||||
// Load next context in chain.
|
||||
__ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
|
||||
__ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
|
||||
// Walk the rest of the chain without clobbering rsi.
|
||||
context = temp;
|
||||
}
|
||||
// If no outer scope calls eval, we do not need to check more
|
||||
// context extensions. If we have reached an eval scope, we check
|
||||
// all extensions from this point.
|
||||
if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
|
||||
s = s->outer_scope();
|
||||
}
|
||||
|
||||
if (s != NULL && s->is_eval_scope()) {
|
||||
// Loop up the context chain. There is no frame effect so it is
|
||||
// safe to use raw labels here.
|
||||
Label next, fast;
|
||||
if (!context.is(temp)) {
|
||||
__ movq(temp, context);
|
||||
}
|
||||
// Load map for comparison into register, outside loop.
|
||||
__ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
|
||||
__ bind(&next);
|
||||
// Terminate at global context.
|
||||
__ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
|
||||
__ j(equal, &fast);
|
||||
// Check that extension is NULL.
|
||||
__ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
|
||||
__ j(not_equal, slow);
|
||||
// Load next context in chain.
|
||||
__ movq(temp, ContextOperand(temp, Context::CLOSURE_INDEX));
|
||||
__ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
|
||||
__ jmp(&next);
|
||||
__ bind(&fast);
|
||||
}
|
||||
|
||||
// All extension objects were empty and it is safe to use a global
|
||||
// load IC call.
|
||||
__ movq(rax, CodeGenerator::GlobalObject());
|
||||
__ Move(rcx, slot->var()->name());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
||||
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
|
||||
? RelocInfo::CODE_TARGET
|
||||
: RelocInfo::CODE_TARGET_CONTEXT;
|
||||
__ call(ic, mode);
|
||||
__ nop(); // Signal no inlined code.
|
||||
}
|
||||
|
||||
|
||||
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
|
||||
Slot* slot,
|
||||
Label* slow) {
|
||||
ASSERT(slot->type() == Slot::CONTEXT);
|
||||
Register context = rsi;
|
||||
Register temp = rbx;
|
||||
|
||||
for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
|
||||
if (s->num_heap_slots() > 0) {
|
||||
if (s->calls_eval()) {
|
||||
// Check that extension is NULL.
|
||||
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
|
||||
Immediate(0));
|
||||
__ j(not_equal, slow);
|
||||
}
|
||||
__ movq(temp, ContextOperand(context, Context::CLOSURE_INDEX));
|
||||
__ movq(temp, FieldOperand(temp, JSFunction::kContextOffset));
|
||||
// Walk the rest of the chain without clobbering rsi.
|
||||
context = temp;
|
||||
}
|
||||
}
|
||||
// Check that last extension is NULL.
|
||||
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
|
||||
__ j(not_equal, slow);
|
||||
__ movq(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
|
||||
return ContextOperand(temp, slot->index());
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
|
||||
Slot* slot,
|
||||
TypeofState typeof_state,
|
||||
Label* slow,
|
||||
Label* done) {
|
||||
// Generate fast-case code for variables that might be shadowed by
|
||||
// eval-introduced variables. Eval is used a lot without
|
||||
// introducing variables. In those cases, we do not want to
|
||||
// perform a runtime call for all variables in the scope
|
||||
// containing the eval.
|
||||
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
|
||||
EmitLoadGlobalSlotCheckExtensions(slot, typeof_state, slow);
|
||||
__ jmp(done);
|
||||
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
|
||||
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
|
||||
Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
|
||||
if (potential_slot != NULL) {
|
||||
// Generate fast case for locals that rewrite to slots.
|
||||
__ movq(rax,
|
||||
ContextSlotOperandCheckExtensions(potential_slot, slow));
|
||||
if (potential_slot->var()->mode() == Variable::CONST) {
|
||||
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
|
||||
__ j(not_equal, done);
|
||||
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
|
||||
}
|
||||
__ jmp(done);
|
||||
} else if (rewrite != NULL) {
|
||||
// Generate fast case for calls of an argument function.
|
||||
Property* property = rewrite->AsProperty();
|
||||
if (property != NULL) {
|
||||
VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
|
||||
Literal* key_literal = property->key()->AsLiteral();
|
||||
if (obj_proxy != NULL &&
|
||||
key_literal != NULL &&
|
||||
obj_proxy->IsArguments() &&
|
||||
key_literal->handle()->IsSmi()) {
|
||||
// Load arguments object if there are no eval-introduced
|
||||
// variables. Then load the argument from the arguments
|
||||
// object using keyed load.
|
||||
__ movq(rdx,
|
||||
ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
|
||||
slow));
|
||||
__ Move(rax, key_literal->handle());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
||||
__ call(ic, RelocInfo::CODE_TARGET);
|
||||
__ jmp(done);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitVariableLoad(Variable* var,
|
||||
Expression::Context context) {
|
||||
// Four cases: non-this global variables, lookup slots, all other
|
||||
@ -904,10 +1051,19 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
|
||||
Apply(context, rax);
|
||||
|
||||
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
|
||||
Label done, slow;
|
||||
|
||||
// Generate code for loading from variables potentially shadowed
|
||||
// by eval-introduced variables.
|
||||
EmitDynamicLoadFromSlotFastCase(slot, NOT_INSIDE_TYPEOF, &slow, &done);
|
||||
|
||||
__ bind(&slow);
|
||||
Comment cmnt(masm_, "Lookup slot");
|
||||
__ push(rsi); // Context.
|
||||
__ Push(var->name());
|
||||
__ CallRuntime(Runtime::kLoadContextSlot, 2);
|
||||
__ bind(&done);
|
||||
|
||||
Apply(context, rax);
|
||||
|
||||
} else if (slot != NULL) {
|
||||
@ -1713,15 +1869,42 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
EmitCallWithIC(expr, var->name(), RelocInfo::CODE_TARGET_CONTEXT);
|
||||
} else if (var != NULL && var->slot() != NULL &&
|
||||
var->slot()->type() == Slot::LOOKUP) {
|
||||
// Call to a lookup slot (dynamically introduced variable). Call
|
||||
// the runtime to find the function to call (returned in rax) and
|
||||
// the object holding it (returned in rdx).
|
||||
// Call to a lookup slot (dynamically introduced variable).
|
||||
Label slow, done;
|
||||
|
||||
// Generate code for loading from variables potentially shadowed
|
||||
// by eval-introduced variables.
|
||||
EmitDynamicLoadFromSlotFastCase(var->slot(),
|
||||
NOT_INSIDE_TYPEOF,
|
||||
&slow,
|
||||
&done);
|
||||
|
||||
__ bind(&slow);
|
||||
// Call the runtime to find the function to call (returned in rax)
|
||||
// and the object holding it (returned in rdx).
|
||||
__ push(context_register());
|
||||
__ Push(var->name());
|
||||
__ CallRuntime(Runtime::kLoadContextSlot, 2);
|
||||
__ push(rax); // Function.
|
||||
__ push(rdx); // Receiver.
|
||||
|
||||
// If fast case code has been generated, emit code to push the
|
||||
// function and receiver and have the slow path jump around this
|
||||
// code.
|
||||
if (done.is_linked()) {
|
||||
Label call;
|
||||
__ jmp(&call);
|
||||
__ bind(&done);
|
||||
// Push function.
|
||||
__ push(rax);
|
||||
// Push global receiver.
|
||||
__ movq(rbx, CodeGenerator::GlobalObject());
|
||||
__ push(FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
|
||||
__ bind(&call);
|
||||
}
|
||||
|
||||
EmitCallWithStub(expr);
|
||||
|
||||
} else if (fun->AsProperty() != NULL) {
|
||||
// Call to an object property.
|
||||
Property* prop = fun->AsProperty();
|
||||
@ -2522,12 +2705,11 @@ void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
|
||||
Register key = rax;
|
||||
Register cache = rbx;
|
||||
Register tmp = rcx;
|
||||
__ movq(cache, CodeGenerator::ContextOperand(rsi, Context::GLOBAL_INDEX));
|
||||
__ movq(cache, ContextOperand(rsi, Context::GLOBAL_INDEX));
|
||||
__ movq(cache,
|
||||
FieldOperand(cache, GlobalObject::kGlobalContextOffset));
|
||||
__ movq(cache,
|
||||
CodeGenerator::ContextOperand(
|
||||
cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
|
||||
ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
|
||||
__ movq(cache,
|
||||
FieldOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
|
||||
|
||||
@ -2777,7 +2959,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
bool can_overwrite = expr->expression()->ResultOverwriteAllowed();
|
||||
UnaryOverwriteMode overwrite =
|
||||
can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
|
||||
GenericUnaryOpStub stub(Token::SUB, overwrite);
|
||||
GenericUnaryOpStub stub(Token::SUB, overwrite, NO_UNARY_FLAGS);
|
||||
// GenericUnaryOpStub expects the argument to be in the
|
||||
// accumulator register rax.
|
||||
VisitForValue(expr->expression(), kAccumulator);
|
||||
@ -2792,7 +2974,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
// in the accumulator register rax.
|
||||
VisitForValue(expr->expression(), kAccumulator);
|
||||
Label done;
|
||||
if (ShouldInlineSmiCase(expr->op())) {
|
||||
bool inline_smi_case = ShouldInlineSmiCase(expr->op());
|
||||
if (inline_smi_case) {
|
||||
Label call_stub;
|
||||
__ JumpIfNotSmi(rax, &call_stub);
|
||||
__ SmiNot(rax, rax);
|
||||
@ -2802,7 +2985,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
bool overwrite = expr->expression()->ResultOverwriteAllowed();
|
||||
UnaryOverwriteMode mode =
|
||||
overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT, mode);
|
||||
UnaryOpFlags flags = inline_smi_case
|
||||
? NO_UNARY_SMI_CODE_IN_STUB
|
||||
: NO_UNARY_FLAGS;
|
||||
GenericUnaryOpStub stub(Token::BIT_NOT, mode, flags);
|
||||
__ CallStub(&stub);
|
||||
__ bind(&done);
|
||||
Apply(context_, rax);
|
||||
@ -2997,13 +3183,24 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr, Location where) {
|
||||
// Use a regular load, not a contextual load, to avoid a reference
|
||||
// error.
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
__ nop(); // Signal no inlined code.
|
||||
if (where == kStack) __ push(rax);
|
||||
} else if (proxy != NULL &&
|
||||
proxy->var()->slot() != NULL &&
|
||||
proxy->var()->slot()->type() == Slot::LOOKUP) {
|
||||
Label done, slow;
|
||||
|
||||
// Generate code for loading from variables potentially shadowed
|
||||
// by eval-introduced variables.
|
||||
Slot* slot = proxy->var()->slot();
|
||||
EmitDynamicLoadFromSlotFastCase(slot, INSIDE_TYPEOF, &slow, &done);
|
||||
|
||||
__ bind(&slow);
|
||||
__ push(rsi);
|
||||
__ Push(proxy->name());
|
||||
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
|
||||
__ bind(&done);
|
||||
|
||||
if (where == kStack) __ push(rax);
|
||||
} else {
|
||||
// This expression cannot throw a reference error at the top level.
|
||||
@ -3174,7 +3371,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
if (ShouldInlineSmiCase(op)) {
|
||||
bool inline_smi_code = ShouldInlineSmiCase(op);
|
||||
if (inline_smi_code) {
|
||||
Label slow_case;
|
||||
__ JumpIfNotBothSmi(rax, rdx, &slow_case);
|
||||
__ SmiCompare(rdx, rax);
|
||||
@ -3182,7 +3380,10 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
|
||||
__ bind(&slow_case);
|
||||
}
|
||||
|
||||
CompareStub stub(cc, strict);
|
||||
CompareFlags flags = inline_smi_code
|
||||
? NO_SMI_COMPARE_IN_STUB
|
||||
: NO_COMPARE_FLAGS;
|
||||
CompareStub stub(cc, strict, flags);
|
||||
__ CallStub(&stub);
|
||||
__ testq(rax, rax);
|
||||
Split(cc, if_true, if_false, fall_through);
|
||||
@ -3243,7 +3444,7 @@ void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
|
||||
|
||||
|
||||
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
|
||||
__ movq(dst, CodeGenerator::ContextOperand(rsi, context_index));
|
||||
__ movq(dst, ContextOperand(rsi, context_index));
|
||||
}
|
||||
|
||||
|
||||
|
216
deps/v8/src/x64/stub-cache-x64.cc
vendored
216
deps/v8/src/x64/stub-cache-x64.cc
vendored
@ -821,6 +821,59 @@ void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
|
||||
}
|
||||
|
||||
|
||||
void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
|
||||
JSObject* holder,
|
||||
String* name,
|
||||
Label* miss) {
|
||||
ASSERT(holder->IsGlobalObject());
|
||||
|
||||
// Get the number of arguments.
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
|
||||
|
||||
// If the object is the holder then we know that it's a global
|
||||
// object which can only happen for contextual calls. In this case,
|
||||
// the receiver cannot be a smi.
|
||||
if (object != holder) {
|
||||
__ JumpIfSmi(rdx, miss);
|
||||
}
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, miss);
|
||||
}
|
||||
|
||||
|
||||
void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
Label* miss) {
|
||||
// Get the value from the cell.
|
||||
__ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
|
||||
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
// Check that the cell contains the same function.
|
||||
if (Heap::InNewSpace(function)) {
|
||||
// We can't embed a pointer to a function in new space so we have
|
||||
// to verify that the shared function info is unchanged. This has
|
||||
// the nice side effect that multiple closures based on the same
|
||||
// function can all use this call IC. Before we load through the
|
||||
// function, we have to verify that it still is a function.
|
||||
__ JumpIfSmi(rdi, miss);
|
||||
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
|
||||
__ j(not_equal, miss);
|
||||
|
||||
// Check the shared function info. Make sure it hasn't changed.
|
||||
__ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
|
||||
__ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
|
||||
__ j(not_equal, miss);
|
||||
} else {
|
||||
__ Cmp(rdi, Handle<JSFunction>(function));
|
||||
__ j(not_equal, miss);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::GenerateMissBranch() {
|
||||
Object* obj = StubCache::ComputeCallMiss(arguments().immediate(), kind_);
|
||||
if (obj->IsFailure()) return obj;
|
||||
@ -847,12 +900,10 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
||||
SharedFunctionInfo* function_info = function->shared();
|
||||
if (function_info->HasCustomCallGenerator()) {
|
||||
const int id = function_info->custom_call_generator_id();
|
||||
Object* result =
|
||||
CompileCustomCall(id, object, holder, function, name, check);
|
||||
Object* result = CompileCustomCall(
|
||||
id, object, holder, NULL, function, name);
|
||||
// undefined means bail out to regular compiler.
|
||||
if (!result->IsUndefined()) {
|
||||
return result;
|
||||
}
|
||||
if (!result->IsUndefined()) return result;
|
||||
}
|
||||
|
||||
Label miss_in_smi_check;
|
||||
@ -1043,9 +1094,9 @@ Object* CallStubCompiler::CompileCallField(JSObject* object,
|
||||
|
||||
Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rcx : name
|
||||
// -- rsp[0] : return address
|
||||
@ -1053,12 +1104,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
// -- ...
|
||||
// -- rsp[(argc + 1) * 8] : receiver
|
||||
// -----------------------------------
|
||||
ASSERT(check == RECEIVER_MAP_CHECK);
|
||||
|
||||
// If object is not an array, bail out to regular call.
|
||||
if (!object->IsJSArray()) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
Label miss;
|
||||
|
||||
@ -1204,9 +1252,9 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
||||
|
||||
Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rcx : name
|
||||
// -- rsp[0] : return address
|
||||
@ -1214,12 +1262,9 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
||||
// -- ...
|
||||
// -- rsp[(argc + 1) * 8] : receiver
|
||||
// -----------------------------------
|
||||
ASSERT(check == RECEIVER_MAP_CHECK);
|
||||
|
||||
// If object is not an array, bail out to regular call.
|
||||
if (!object->IsJSArray()) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
if (!object->IsJSArray() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
Label miss, return_undefined, call_builtin;
|
||||
|
||||
@ -1289,9 +1334,9 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
||||
|
||||
Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rcx : function name
|
||||
// -- rsp[0] : return address
|
||||
@ -1301,7 +1346,7 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
// -----------------------------------
|
||||
|
||||
// If object is not a string, bail out to regular call.
|
||||
if (!object->IsString()) return Heap::undefined_value();
|
||||
if (!object->IsString() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
@ -1358,11 +1403,12 @@ Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
Object* CallStubCompiler::CompileStringCharCodeAtCall(
|
||||
Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rcx : function name
|
||||
// -- rsp[0] : return address
|
||||
@ -1372,7 +1418,7 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
// -----------------------------------
|
||||
|
||||
// If object is not a string, bail out to regular call.
|
||||
if (!object->IsString()) return Heap::undefined_value();
|
||||
if (!object->IsString() || cell != NULL) return Heap::undefined_value();
|
||||
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
@ -1426,6 +1472,75 @@ Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileStringFromCharCodeCall(
|
||||
Object* object,
|
||||
JSObject* holder,
|
||||
JSGlobalPropertyCell* cell,
|
||||
JSFunction* function,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rcx : function name
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
|
||||
// -- ...
|
||||
// -- rsp[(argc + 1) * 8] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
// If the object is not a JSObject or we got an unexpected number of
|
||||
// arguments, bail out to the regular call.
|
||||
if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
|
||||
|
||||
Label miss;
|
||||
GenerateNameCheck(name, &miss);
|
||||
|
||||
if (cell == NULL) {
|
||||
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
|
||||
|
||||
__ JumpIfSmi(rdx, &miss);
|
||||
|
||||
CheckPrototypes(JSObject::cast(object), rdx, holder, rbx, rax, rdi, name,
|
||||
&miss);
|
||||
} else {
|
||||
ASSERT(cell->value() == function);
|
||||
GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
|
||||
GenerateLoadFunctionFromCell(cell, function, &miss);
|
||||
}
|
||||
|
||||
// Load the char code argument.
|
||||
Register code = rbx;
|
||||
__ movq(code, Operand(rsp, 1 * kPointerSize));
|
||||
|
||||
// Check the code is a smi.
|
||||
Label slow;
|
||||
__ JumpIfNotSmi(code, &slow);
|
||||
|
||||
// Convert the smi code to uint16.
|
||||
__ SmiAndConstant(code, code, Smi::FromInt(0xffff));
|
||||
|
||||
StringCharFromCodeGenerator char_from_code_generator(code, rax);
|
||||
char_from_code_generator.GenerateFast(masm());
|
||||
__ ret(2 * kPointerSize);
|
||||
|
||||
ICRuntimeCallHelper call_helper;
|
||||
char_from_code_generator.GenerateSlow(masm(), call_helper);
|
||||
|
||||
// Tail call the full function. We do not have to patch the receiver
|
||||
// because the function makes no use of it.
|
||||
__ bind(&slow);
|
||||
__ InvokeFunction(function, arguments(), JUMP_FUNCTION);
|
||||
|
||||
__ bind(&miss);
|
||||
// rcx: function name.
|
||||
Object* obj = GenerateMissBranch();
|
||||
if (obj->IsFailure()) return obj;
|
||||
|
||||
// Return the generated code.
|
||||
return (cell == NULL) ? GetCode(function) : GetCode(NORMAL, name);
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
|
||||
JSObject* holder,
|
||||
String* name) {
|
||||
@ -1498,7 +1613,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
JSFunction* function,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -----------------------------------
|
||||
// rcx : function name
|
||||
// rsp[0] : return address
|
||||
// rsp[8] : argument argc
|
||||
@ -1506,6 +1620,17 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
// ...
|
||||
// rsp[argc * 8] : argument 1
|
||||
// rsp[(argc + 1) * 8] : argument 0 = receiver
|
||||
// -----------------------------------
|
||||
|
||||
SharedFunctionInfo* function_info = function->shared();
|
||||
if (function_info->HasCustomCallGenerator()) {
|
||||
const int id = function_info->custom_call_generator_id();
|
||||
Object* result = CompileCustomCall(
|
||||
id, object, holder, cell, function, name);
|
||||
// undefined means bail out to regular compiler.
|
||||
if (!result->IsUndefined()) return result;
|
||||
}
|
||||
|
||||
Label miss;
|
||||
|
||||
GenerateNameCheck(name, &miss);
|
||||
@ -1513,42 +1638,9 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
// Get the number of arguments.
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
|
||||
GenerateGlobalReceiverCheck(object, holder, name, &miss);
|
||||
|
||||
// If the object is the holder then we know that it's a global
|
||||
// object which can only happen for contextual calls. In this case,
|
||||
// the receiver cannot be a smi.
|
||||
if (object != holder) {
|
||||
__ JumpIfSmi(rdx, &miss);
|
||||
}
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
CheckPrototypes(object, rdx, holder, rbx, rax, rdi, name, &miss);
|
||||
|
||||
// Get the value from the cell.
|
||||
__ Move(rdi, Handle<JSGlobalPropertyCell>(cell));
|
||||
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
// Check that the cell contains the same function.
|
||||
if (Heap::InNewSpace(function)) {
|
||||
// We can't embed a pointer to a function in new space so we have
|
||||
// to verify that the shared function info is unchanged. This has
|
||||
// the nice side effect that multiple closures based on the same
|
||||
// function can all use this call IC. Before we load through the
|
||||
// function, we have to verify that it still is a function.
|
||||
__ JumpIfSmi(rdi, &miss);
|
||||
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rax);
|
||||
__ j(not_equal, &miss);
|
||||
|
||||
// Check the shared function info. Make sure it hasn't changed.
|
||||
__ Move(rax, Handle<SharedFunctionInfo>(function->shared()));
|
||||
__ cmpq(FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset), rax);
|
||||
__ j(not_equal, &miss);
|
||||
} else {
|
||||
__ Cmp(rdi, Handle<JSFunction>(function));
|
||||
__ j(not_equal, &miss);
|
||||
}
|
||||
GenerateLoadFunctionFromCell(cell, function, &miss);
|
||||
|
||||
// Patch the receiver on the stack with the global proxy.
|
||||
if (object->IsGlobalObject()) {
|
||||
|
152
deps/v8/test/cctest/test-heap-profiler.cc
vendored
152
deps/v8/test/cctest/test-heap-profiler.cc
vendored
@ -989,4 +989,156 @@ TEST(AggregatedHeapSnapshot) {
|
||||
CHECK(IsNodeRetainedAs(a_from_b, 1)); // B has 1 ref to A.
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
class TestJSONStream : public v8::OutputStream {
|
||||
public:
|
||||
TestJSONStream() : eos_signaled_(0), abort_countdown_(-1) {}
|
||||
explicit TestJSONStream(int abort_countdown)
|
||||
: eos_signaled_(0), abort_countdown_(abort_countdown) {}
|
||||
virtual ~TestJSONStream() {}
|
||||
virtual void EndOfStream() { ++eos_signaled_; }
|
||||
virtual WriteResult WriteAsciiChunk(char* buffer, int chars_written) {
|
||||
if (abort_countdown_ > 0) --abort_countdown_;
|
||||
if (abort_countdown_ == 0) return kAbort;
|
||||
CHECK_GT(chars_written, 0);
|
||||
i::Vector<char> chunk = buffer_.AddBlock(chars_written, '\0');
|
||||
memcpy(chunk.start(), buffer, chars_written);
|
||||
return kContinue;
|
||||
}
|
||||
void WriteTo(i::Vector<char> dest) { buffer_.WriteTo(dest); }
|
||||
int eos_signaled() { return eos_signaled_; }
|
||||
int size() { return buffer_.size(); }
|
||||
private:
|
||||
i::Collector<char> buffer_;
|
||||
int eos_signaled_;
|
||||
int abort_countdown_;
|
||||
};
|
||||
|
||||
class AsciiResource: public v8::String::ExternalAsciiStringResource {
|
||||
public:
|
||||
explicit AsciiResource(i::Vector<char> string): data_(string.start()) {
|
||||
length_ = string.length();
|
||||
}
|
||||
virtual const char* data() const { return data_; }
|
||||
virtual size_t length() const { return length_; }
|
||||
private:
|
||||
const char* data_;
|
||||
size_t length_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST(HeapSnapshotJSONSerialization) {
|
||||
v8::HandleScope scope;
|
||||
LocalContext env;
|
||||
|
||||
#define STRING_LITERAL_FOR_TEST \
|
||||
"\"String \\n\\r\\u0008\\u0081\\u0101\\u0801\\u8001\""
|
||||
CompileAndRunScript(
|
||||
"function A(s) { this.s = s; }\n"
|
||||
"function B(x) { this.x = x; }\n"
|
||||
"var a = new A(" STRING_LITERAL_FOR_TEST ");\n"
|
||||
"var b = new B(a);");
|
||||
const v8::HeapSnapshot* snapshot =
|
||||
v8::HeapProfiler::TakeSnapshot(v8::String::New("json"));
|
||||
TestJSONStream stream;
|
||||
snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
|
||||
CHECK_GT(stream.size(), 0);
|
||||
CHECK_EQ(1, stream.eos_signaled());
|
||||
i::ScopedVector<char> json(stream.size());
|
||||
stream.WriteTo(json);
|
||||
|
||||
// Verify that snapshot string is valid JSON.
|
||||
AsciiResource json_res(json);
|
||||
v8::Local<v8::String> json_string = v8::String::NewExternal(&json_res);
|
||||
env->Global()->Set(v8::String::New("json_snapshot"), json_string);
|
||||
v8::Local<v8::Value> snapshot_parse_result = CompileRun(
|
||||
"var parsed = JSON.parse(json_snapshot); true;");
|
||||
CHECK(!snapshot_parse_result.IsEmpty());
|
||||
|
||||
// Verify that snapshot object has required fields.
|
||||
v8::Local<v8::Object> parsed_snapshot =
|
||||
env->Global()->Get(v8::String::New("parsed"))->ToObject();
|
||||
CHECK(parsed_snapshot->Has(v8::String::New("snapshot")));
|
||||
CHECK(parsed_snapshot->Has(v8::String::New("nodes")));
|
||||
CHECK(parsed_snapshot->Has(v8::String::New("strings")));
|
||||
|
||||
// Verify that nodes meta-info is valid JSON.
|
||||
v8::Local<v8::Value> nodes_meta_parse_result = CompileRun(
|
||||
"var parsed_meta = JSON.parse(parsed.nodes[0]); true;");
|
||||
CHECK(!nodes_meta_parse_result.IsEmpty());
|
||||
|
||||
// Get node and edge "member" offsets.
|
||||
v8::Local<v8::Value> meta_analysis_result = CompileRun(
|
||||
"var children_count_offset ="
|
||||
" parsed_meta.fields.indexOf('children_count');\n"
|
||||
"var children_offset ="
|
||||
" parsed_meta.fields.indexOf('children');\n"
|
||||
"var children_meta ="
|
||||
" parsed_meta.types[children_offset];\n"
|
||||
"var child_fields_count = children_meta.fields.length;\n"
|
||||
"var child_type_offset ="
|
||||
" children_meta.fields.indexOf('type');\n"
|
||||
"var child_name_offset ="
|
||||
" children_meta.fields.indexOf('name_or_index');\n"
|
||||
"var child_to_node_offset ="
|
||||
" children_meta.fields.indexOf('to_node');\n"
|
||||
"var property_type ="
|
||||
" children_meta.types[child_type_offset].indexOf('property');");
|
||||
CHECK(!meta_analysis_result.IsEmpty());
|
||||
|
||||
// A helper function for processing encoded nodes.
|
||||
CompileRun(
|
||||
"function GetChildPosByProperty(pos, prop_name) {\n"
|
||||
" var nodes = parsed.nodes;\n"
|
||||
" var strings = parsed.strings;\n"
|
||||
" for (var i = 0,\n"
|
||||
" count = nodes[pos + children_count_offset] * child_fields_count;\n"
|
||||
" i < count; i += child_fields_count) {\n"
|
||||
" var child_pos = pos + children_offset + i;\n"
|
||||
" if (nodes[child_pos + child_type_offset] === property_type\n"
|
||||
" && strings[nodes[child_pos + child_name_offset]] === prop_name)\n"
|
||||
" return nodes[child_pos + child_to_node_offset];\n"
|
||||
" }\n"
|
||||
" return null;\n"
|
||||
"}\n");
|
||||
// Get the string index using the path: <root> -> <global>.b.x.s
|
||||
v8::Local<v8::Value> string_obj_pos_val = CompileRun(
|
||||
"GetChildPosByProperty(\n"
|
||||
" GetChildPosByProperty(\n"
|
||||
" GetChildPosByProperty("
|
||||
" parsed.nodes[1 + children_offset + child_to_node_offset],\"b\"),\n"
|
||||
" \"x\"),"
|
||||
" \"s\")");
|
||||
CHECK(!string_obj_pos_val.IsEmpty());
|
||||
int string_obj_pos =
|
||||
static_cast<int>(string_obj_pos_val->ToNumber()->Value());
|
||||
v8::Local<v8::Object> nodes_array =
|
||||
parsed_snapshot->Get(v8::String::New("nodes"))->ToObject();
|
||||
int string_index = static_cast<int>(
|
||||
nodes_array->Get(string_obj_pos + 1)->ToNumber()->Value());
|
||||
CHECK_GT(string_index, 0);
|
||||
v8::Local<v8::Object> strings_array =
|
||||
parsed_snapshot->Get(v8::String::New("strings"))->ToObject();
|
||||
v8::Local<v8::String> string = strings_array->Get(string_index)->ToString();
|
||||
v8::Local<v8::String> ref_string =
|
||||
CompileRun(STRING_LITERAL_FOR_TEST)->ToString();
|
||||
#undef STRING_LITERAL_FOR_TEST
|
||||
CHECK_EQ(*v8::String::Utf8Value(ref_string),
|
||||
*v8::String::Utf8Value(string));
|
||||
}
|
||||
|
||||
|
||||
TEST(HeapSnapshotJSONSerializationAborting) {
|
||||
v8::HandleScope scope;
|
||||
LocalContext env;
|
||||
const v8::HeapSnapshot* snapshot =
|
||||
v8::HeapProfiler::TakeSnapshot(v8::String::New("abort"));
|
||||
TestJSONStream stream(5);
|
||||
snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
|
||||
CHECK_GT(stream.size(), 0);
|
||||
CHECK_EQ(0, stream.eos_signaled());
|
||||
}
|
||||
|
||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
||||
|
154
deps/v8/test/mjsunit/array-indexing.js
vendored
154
deps/v8/test/mjsunit/array-indexing.js
vendored
@ -26,41 +26,161 @@
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
var array = [1,2,3,1,2,3,1,2,3,1,2,3];
|
||||
var undef_array = [0,,2,undefined,4,,6,undefined,8,,10];
|
||||
// Sparse arrays with length 42000.
|
||||
var sparse_array = [];
|
||||
sparse_array[100] = 3;
|
||||
sparse_array[200] = undefined;
|
||||
sparse_array[300] = 4;
|
||||
sparse_array[400] = 5;
|
||||
sparse_array[500] = 6;
|
||||
sparse_array[600] = 5;
|
||||
sparse_array[700] = 4;
|
||||
sparse_array[800] = undefined;
|
||||
sparse_array[900] = 3
|
||||
sparse_array[41999] = "filler";
|
||||
|
||||
var dense_object = { 0: 42, 1: 37, length: 2 };
|
||||
var sparse_object = { 0: 42, 100000: 37, length: 200000 };
|
||||
var funky_object = { 10:42, 100000: 42, 100001: 37, length: 50000 };
|
||||
var infinite_object = { 10: 42, 100000: 37, length: Infinity };
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// Array.prototype.indexOf.
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
// Negative cases.
|
||||
assertEquals([].indexOf(1), -1);
|
||||
assertEquals(array.indexOf(4), -1);
|
||||
assertEquals(array.indexOf(3, array.length), -1);
|
||||
assertEquals(-1, [].indexOf(1));
|
||||
assertEquals(-1, array.indexOf(4));
|
||||
assertEquals(-1, array.indexOf(3, array.length));
|
||||
|
||||
assertEquals(array.indexOf(3), 2);
|
||||
assertEquals(2, array.indexOf(3));
|
||||
// Negative index out of range.
|
||||
assertEquals(array.indexOf(1, -17), 0);
|
||||
assertEquals(0, array.indexOf(1, -17));
|
||||
// Negative index in rage.
|
||||
assertEquals(array.indexOf(1, -11), 3);
|
||||
assertEquals(3, array.indexOf(1, -11));
|
||||
// Index in range.
|
||||
assertEquals(array.indexOf(1, 1), 3);
|
||||
assertEquals(array.indexOf(1, 3), 3);
|
||||
assertEquals(array.indexOf(1, 4), 6);
|
||||
assertEquals(3, array.indexOf(1, 1));
|
||||
assertEquals(3, array.indexOf(1, 3));
|
||||
assertEquals(6, array.indexOf(1, 4));
|
||||
|
||||
// Find undefined, not holes.
|
||||
assertEquals(3, undef_array.indexOf(undefined));
|
||||
assertEquals(3, undef_array.indexOf(undefined, 3));
|
||||
assertEquals(7, undef_array.indexOf(undefined, 4));
|
||||
assertEquals(7, undef_array.indexOf(undefined, 7));
|
||||
assertEquals(-1, undef_array.indexOf(undefined, 8));
|
||||
assertEquals(3, undef_array.indexOf(undefined, -11));
|
||||
assertEquals(3, undef_array.indexOf(undefined, -8));
|
||||
assertEquals(7, undef_array.indexOf(undefined, -7));
|
||||
assertEquals(7, undef_array.indexOf(undefined, -4));
|
||||
assertEquals(-1, undef_array.indexOf(undefined, -3));
|
||||
|
||||
// Find in sparse array.
|
||||
assertEquals(100, sparse_array.indexOf(3));
|
||||
assertEquals(900, sparse_array.indexOf(3, 101));
|
||||
assertEquals(-1, sparse_array.indexOf(3, 901));
|
||||
assertEquals(100, sparse_array.indexOf(3, -42000));
|
||||
assertEquals(900, sparse_array.indexOf(3, 101 - 42000));
|
||||
assertEquals(-1, sparse_array.indexOf(3, 901 - 42000));
|
||||
|
||||
assertEquals(300, sparse_array.indexOf(4));
|
||||
assertEquals(700, sparse_array.indexOf(4, 301));
|
||||
assertEquals(-1, sparse_array.indexOf(4, 701));
|
||||
assertEquals(300, sparse_array.indexOf(4, -42000));
|
||||
assertEquals(700, sparse_array.indexOf(4, 301 - 42000));
|
||||
assertEquals(-1, sparse_array.indexOf(4, 701 - 42000));
|
||||
|
||||
assertEquals(200, sparse_array.indexOf(undefined));
|
||||
assertEquals(800, sparse_array.indexOf(undefined, 201));
|
||||
assertEquals(-1, sparse_array.indexOf(undefined, 801));
|
||||
assertEquals(200, sparse_array.indexOf(undefined, -42000));
|
||||
assertEquals(800, sparse_array.indexOf(undefined, 201 - 42000));
|
||||
assertEquals(-1, sparse_array.indexOf(undefined, 801 - 42000));
|
||||
|
||||
// Find in non-arrays.
|
||||
assertEquals(0, Array.prototype.indexOf.call(dense_object, 42));
|
||||
assertEquals(1, Array.prototype.indexOf.call(dense_object, 37));
|
||||
assertEquals(-1, Array.prototype.indexOf.call(dense_object, 87));
|
||||
|
||||
assertEquals(0, Array.prototype.indexOf.call(sparse_object, 42));
|
||||
assertEquals(100000, Array.prototype.indexOf.call(sparse_object, 37));
|
||||
assertEquals(-1, Array.prototype.indexOf.call(sparse_object, 87));
|
||||
|
||||
assertEquals(10, Array.prototype.indexOf.call(funky_object, 42));
|
||||
assertEquals(-1, Array.prototype.indexOf.call(funky_object, 42, 15));
|
||||
assertEquals(-1, Array.prototype.indexOf.call(funky_object, 37));
|
||||
|
||||
assertEquals(-1, Array.prototype.indexOf.call(infinite_object, 42));
|
||||
|
||||
// ----------------------------------------------------------------------
|
||||
// Array.prototype.lastIndexOf.
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
// Negative cases.
|
||||
assertEquals([].lastIndexOf(1), -1);
|
||||
assertEquals(array.lastIndexOf(1, -17), -1);
|
||||
assertEquals(-1, [].lastIndexOf(1));
|
||||
assertEquals(-1, array.lastIndexOf(1, -17));
|
||||
|
||||
assertEquals(array.lastIndexOf(1), 9);
|
||||
assertEquals(9, array.lastIndexOf(1));
|
||||
// Index out of range.
|
||||
assertEquals(array.lastIndexOf(1, array.length), 9);
|
||||
assertEquals(9, array.lastIndexOf(1, array.length));
|
||||
// Index in range.
|
||||
assertEquals(array.lastIndexOf(1, 2), 0);
|
||||
assertEquals(array.lastIndexOf(1, 4), 3);
|
||||
assertEquals(array.lastIndexOf(1, 3), 3);
|
||||
assertEquals(0, array.lastIndexOf(1, 2));
|
||||
assertEquals(3, array.lastIndexOf(1, 4));
|
||||
assertEquals(3, array.lastIndexOf(1, 3));
|
||||
// Negative index in range.
|
||||
assertEquals(array.lastIndexOf(1, -11), 0);
|
||||
assertEquals(0, array.lastIndexOf(1, -11));
|
||||
|
||||
// Find undefined, not holes.
|
||||
assertEquals(7, undef_array.lastIndexOf(undefined));
|
||||
assertEquals(-1, undef_array.lastIndexOf(undefined, 2));
|
||||
assertEquals(3, undef_array.lastIndexOf(undefined, 3));
|
||||
assertEquals(3, undef_array.lastIndexOf(undefined, 6));
|
||||
assertEquals(7, undef_array.lastIndexOf(undefined, 7));
|
||||
assertEquals(7, undef_array.lastIndexOf(undefined, -1));
|
||||
assertEquals(-1, undef_array.lastIndexOf(undefined, -9));
|
||||
assertEquals(3, undef_array.lastIndexOf(undefined, -8));
|
||||
assertEquals(3, undef_array.lastIndexOf(undefined, -5));
|
||||
assertEquals(7, undef_array.lastIndexOf(undefined, -4));
|
||||
|
||||
// Find in sparse array.
|
||||
assertEquals(900, sparse_array.lastIndexOf(3));
|
||||
assertEquals(100, sparse_array.lastIndexOf(3, 899));
|
||||
assertEquals(-1, sparse_array.lastIndexOf(3, 99));
|
||||
assertEquals(900, sparse_array.lastIndexOf(3, -1));
|
||||
assertEquals(100, sparse_array.lastIndexOf(3, 899 - 42000));
|
||||
assertEquals(-1, sparse_array.lastIndexOf(3, 99 - 42000));
|
||||
|
||||
assertEquals(700, sparse_array.lastIndexOf(4));
|
||||
assertEquals(300, sparse_array.lastIndexOf(4, 699));
|
||||
assertEquals(-1, sparse_array.lastIndexOf(4, 299));
|
||||
assertEquals(700, sparse_array.lastIndexOf(4, -1));
|
||||
assertEquals(300, sparse_array.lastIndexOf(4, 699 - 42000));
|
||||
assertEquals(-1, sparse_array.lastIndexOf(4, 299 - 42000));
|
||||
|
||||
assertEquals(800, sparse_array.lastIndexOf(undefined));
|
||||
assertEquals(200, sparse_array.lastIndexOf(undefined, 799));
|
||||
assertEquals(-1, sparse_array.lastIndexOf(undefined, 199));
|
||||
assertEquals(800, sparse_array.lastIndexOf(undefined, -1));
|
||||
assertEquals(200, sparse_array.lastIndexOf(undefined, 799 - 42000));
|
||||
assertEquals(-1, sparse_array.lastIndexOf(undefined, 199 - 42000));
|
||||
|
||||
assertEquals(0, Array.prototype.lastIndexOf.call(dense_object, 42));
|
||||
assertEquals(1, Array.prototype.lastIndexOf.call(dense_object, 37));
|
||||
assertEquals(0, Array.prototype.lastIndexOf.call(sparse_object, 42));
|
||||
assertEquals(100000, Array.prototype.lastIndexOf.call(sparse_object, 37));
|
||||
|
||||
//Find in non-arrays.
|
||||
assertEquals(0, Array.prototype.lastIndexOf.call(dense_object, 42));
|
||||
assertEquals(1, Array.prototype.lastIndexOf.call(dense_object, 37));
|
||||
assertEquals(-1, Array.prototype.lastIndexOf.call(dense_object, 87));
|
||||
|
||||
assertEquals(0, Array.prototype.lastIndexOf.call(sparse_object, 42));
|
||||
assertEquals(100000, Array.prototype.lastIndexOf.call(sparse_object, 37));
|
||||
assertEquals(-1, Array.prototype.lastIndexOf.call(sparse_object, 87));
|
||||
|
||||
assertEquals(10, Array.prototype.lastIndexOf.call(funky_object, 42, 15));
|
||||
assertEquals(10, Array.prototype.lastIndexOf.call(funky_object, 42));
|
||||
assertEquals(-1, Array.prototype.lastIndexOf.call(funky_object, 37));
|
||||
|
||||
assertEquals(-1, Array.prototype.lastIndexOf.call(infinite_object, 42));
|
||||
|
37
deps/v8/test/mjsunit/regress/regress-857.js
vendored
Normal file
37
deps/v8/test/mjsunit/regress/regress-857.js
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Make sure ES5 15.9.1.15 (ISO 8601 / RFC 3339) time zone offsets of
|
||||
// the form "+09:00" & "-09:00" get parsed as expected
|
||||
assertEquals(1283326536000, Date.parse("2010-08-31T22:35:36-09:00"));
|
||||
assertEquals(1283261736000, Date.parse("2010-08-31T22:35:36+09:00"));
|
||||
assertEquals(1283326536000, Date.parse("2010-08-31T22:35:36.0-09:00"));
|
||||
assertEquals(1283261736000, Date.parse("2010-08-31T22:35:36.0+09:00"));
|
||||
// colon-less time expressions in time zone offsets are not conformant
|
||||
// with ES5 15.9.1.15 but are nonetheless supported in V8
|
||||
assertEquals(1283326536000, Date.parse("2010-08-31T22:35:36-0900"));
|
||||
assertEquals(1283261736000, Date.parse("2010-08-31T22:35:36+0900"));
|
89
deps/v8/test/mjsunit/string-fromcharcode.js
vendored
Normal file
89
deps/v8/test/mjsunit/string-fromcharcode.js
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Test String.fromCharCode.
|
||||
|
||||
|
||||
// Test various receivers and arguments passed to String.fromCharCode.
|
||||
|
||||
Object.prototype.fromCharCode = function(x) { return this; };
|
||||
|
||||
var fcc = String.fromCharCode;
|
||||
var fcc2 = fcc;
|
||||
|
||||
function constFun(x) { return function(y) { return x; }; }
|
||||
|
||||
function test(num) {
|
||||
assertEquals(" ", String.fromCharCode(0x20));
|
||||
assertEquals(" ", String.fromCharCode(0x20 + 0x10000));
|
||||
assertEquals(" ", String.fromCharCode(0x20 - 0x10000));
|
||||
assertEquals(" ", String.fromCharCode(0x20 + 0.5));
|
||||
|
||||
assertEquals("\u1234", String.fromCharCode(0x1234));
|
||||
assertEquals("\u1234", String.fromCharCode(0x1234 + 0x10000));
|
||||
assertEquals("\u1234", String.fromCharCode(0x1234 - 0x10000));
|
||||
assertEquals("\u1234", String.fromCharCode(0x1234 + 0.5));
|
||||
|
||||
assertEquals(" ", String.fromCharCode(0x20, 0x20));
|
||||
assertEquals(" ", String.fromCharCode(0x20 + 0.5, 0x20));
|
||||
|
||||
assertEquals(" ", fcc(0x20));
|
||||
assertEquals(" ", fcc(0x20 + 0x10000));
|
||||
assertEquals(" ", fcc(0x20 - 0x10000));
|
||||
assertEquals(" ", fcc(0x20 + 0.5));
|
||||
|
||||
assertEquals("\u1234", fcc(0x1234));
|
||||
assertEquals("\u1234", fcc(0x1234 + 0x10000));
|
||||
assertEquals("\u1234", fcc(0x1234 - 0x10000));
|
||||
assertEquals("\u1234", fcc(0x1234 + 0.5));
|
||||
|
||||
assertEquals(" ", fcc(0x20, 0x20));
|
||||
assertEquals(" ", fcc(0x20 + 0.5, 0x20));
|
||||
|
||||
var receiver = (num < 5) ? String : (num < 9) ? "dummy" : 42;
|
||||
fcc2 = (num < 5) ? fcc : (num < 9) ? constFun("dummy") : constFun(42);
|
||||
var expected = (num < 5) ? " " : (num < 9) ? "dummy" : 42;
|
||||
assertEquals(expected, receiver.fromCharCode(0x20));
|
||||
assertEquals(expected, receiver.fromCharCode(0x20 - 0x10000));
|
||||
assertEquals(expected, receiver.fromCharCode(0x20 + 0.5));
|
||||
assertEquals(expected, fcc2(0x20));
|
||||
assertEquals(expected, fcc2(0x20 - 0x10000));
|
||||
assertEquals(expected, fcc2(0x20 + 0.5));
|
||||
}
|
||||
|
||||
// Use loop to test the custom IC.
|
||||
for (var i = 0; i < 10; i++) {
|
||||
test(i);
|
||||
}
|
||||
|
||||
|
||||
// Test the custom IC works correctly when the map changes.
|
||||
for (var i = 0; i < 10; i++) {
|
||||
var expected = (i < 5) ? " " : 42;
|
||||
if (i == 5) String.fromCharCode = function() { return 42; };
|
||||
assertEquals(expected, String.fromCharCode(0x20));
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user