Upgrade V8 to 1.3.1
This commit is contained in:
parent
5373c6869a
commit
2ebd692151
47
deps/v8/ChangeLog
vendored
47
deps/v8/ChangeLog
vendored
@ -1,3 +1,50 @@
|
||||
2009-07-30: Version 1.3.1
|
||||
|
||||
Speed improvements to accessors and interceptors.
|
||||
|
||||
Added support for capturing stack information on custom errors.
|
||||
|
||||
Added support for morphing an object into a pixel array where its
|
||||
indexed properties are stored in an external byte array. Values written
|
||||
are always clamped to the 0..255 interval.
|
||||
|
||||
Profiler on x64 now handles C/C++ functions from shared libraries.
|
||||
|
||||
Changed the debugger to avoid stepping into function.call/apply if the
|
||||
function is a built-in.
|
||||
|
||||
Initial implementation of constructor heap profile for JS objects.
|
||||
|
||||
More fine grained control of profiling aspects through the API.
|
||||
|
||||
Optimized the called as constructor check for API calls.
|
||||
|
||||
|
||||
2009-07-27: Version 1.3.0
|
||||
|
||||
Allowed RegExp objects to be called as functions (issue 132).
|
||||
|
||||
Fixed issue where global property cells would escape after
|
||||
detaching the global object; see http://crbug.com/16276.
|
||||
|
||||
Added support for stepping into setters and getters in the
|
||||
debugger.
|
||||
|
||||
Changed the debugger to avoid stopping in its own JavaScript code
|
||||
and in the code of built-in functions.
|
||||
|
||||
Fixed issue 345 by avoiding duplicate escaping labels.
|
||||
|
||||
Fixed ARM code generator crash in short-circuited boolean
|
||||
expressions and added regression tests.
|
||||
|
||||
Added an external allocation limit to avoid issues where small V8
|
||||
objects would hold on to large amounts of external memory without
|
||||
causing garbage collections.
|
||||
|
||||
Finished more of the inline caching stubs for x64 targets.
|
||||
|
||||
|
||||
2009-07-13: Version 1.2.14
|
||||
|
||||
Added separate paged heap space for global property cells and
|
||||
|
75
deps/v8/SConstruct
vendored
75
deps/v8/SConstruct
vendored
@ -149,31 +149,22 @@ LIBRARY_FLAGS = {
|
||||
'-Wstrict-aliasing=2'],
|
||||
'CPPPATH': ANDROID_INCLUDES,
|
||||
},
|
||||
'wordsize:32': {
|
||||
'arch:x64': {
|
||||
'CCFLAGS': ['-m64'],
|
||||
'LINKFLAGS': ['-m64']
|
||||
}
|
||||
},
|
||||
'wordsize:64': {
|
||||
'arch:ia32': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
},
|
||||
'arch:arm': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
}
|
||||
},
|
||||
'arch:ia32': {
|
||||
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
|
||||
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
},
|
||||
'arch:arm': {
|
||||
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
|
||||
},
|
||||
'simulator:arm': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
},
|
||||
'arch:x64': {
|
||||
'CCFLAGS': ['-fno-strict-aliasing'],
|
||||
'CPPDEFINES': ['V8_TARGET_ARCH_X64']
|
||||
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
|
||||
'CCFLAGS': ['-fno-strict-aliasing', '-m64'],
|
||||
'LINKFLAGS': ['-m64'],
|
||||
},
|
||||
'prof:oprofile': {
|
||||
'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
|
||||
@ -341,22 +332,6 @@ CCTEST_EXTRA_FLAGS = {
|
||||
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
|
||||
}
|
||||
},
|
||||
'wordsize:32': {
|
||||
'arch:x64': {
|
||||
'CCFLAGS': ['-m64'],
|
||||
'LINKFLAGS': ['-m64']
|
||||
}
|
||||
},
|
||||
'wordsize:64': {
|
||||
'arch:ia32': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
},
|
||||
'arch:arm': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
}
|
||||
}
|
||||
},
|
||||
'msvc': {
|
||||
'all': {
|
||||
@ -408,21 +383,17 @@ SAMPLE_FLAGS = {
|
||||
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
|
||||
}
|
||||
},
|
||||
'wordsize:32': {
|
||||
'arch:x64': {
|
||||
'CCFLAGS': ['-m64'],
|
||||
'LINKFLAGS': ['-m64']
|
||||
}
|
||||
'arch:ia32': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
},
|
||||
'wordsize:64': {
|
||||
'arch:ia32': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
},
|
||||
'arch:arm': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
}
|
||||
'arch:x64': {
|
||||
'CCFLAGS': ['-m64'],
|
||||
'LINKFLAGS': ['-m64']
|
||||
},
|
||||
'simulator:arm': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
},
|
||||
'mode:release': {
|
||||
'CCFLAGS': ['-O2']
|
||||
@ -533,7 +504,6 @@ def GuessToolchain(os):
|
||||
OS_GUESS = utils.GuessOS()
|
||||
TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
|
||||
ARCH_GUESS = utils.GuessArchitecture()
|
||||
WORDSIZE_GUESS = utils.GuessWordsize()
|
||||
|
||||
|
||||
SIMPLE_OPTIONS = {
|
||||
@ -587,11 +557,6 @@ SIMPLE_OPTIONS = {
|
||||
'default': 'on',
|
||||
'help': 'use Microsoft Visual C++ link-time code generation'
|
||||
},
|
||||
'wordsize': {
|
||||
'values': ['64', '32'],
|
||||
'default': WORDSIZE_GUESS,
|
||||
'help': 'the word size'
|
||||
},
|
||||
'simulator': {
|
||||
'values': ['arm', 'none'],
|
||||
'default': 'none',
|
||||
|
32
deps/v8/benchmarks/run.html
vendored
Executable file → Normal file
32
deps/v8/benchmarks/run.html
vendored
Executable file → Normal file
@ -55,9 +55,35 @@ function Run() {
|
||||
NotifyScore: AddScore });
|
||||
}
|
||||
|
||||
function ShowWarningIfObsolete() {
|
||||
// If anything goes wrong we will just catch the exception and no
|
||||
// warning is shown, i.e., no harm is done.
|
||||
try {
|
||||
var xmlhttp;
|
||||
var next_version = parseInt(BenchmarkSuite.version) + 1;
|
||||
var next_version_url = "../v" + next_version + "/run.html";
|
||||
if (window.XMLHttpRequest) {
|
||||
xmlhttp = new window.XMLHttpRequest();
|
||||
} else if (window.ActiveXObject) {
|
||||
xmlhttp = new window.ActiveXObject("Microsoft.XMLHTTP");
|
||||
}
|
||||
xmlhttp.open('GET', next_version_url, true);
|
||||
xmlhttp.onreadystatechange = function() {
|
||||
if (xmlhttp.readyState == 4 && xmlhttp.status == 200) {
|
||||
document.getElementById('obsolete').style.display="block";
|
||||
}
|
||||
};
|
||||
xmlhttp.send(null);
|
||||
} catch(e) {
|
||||
// Ignore exception if check for next version fails.
|
||||
// Hence no warning is displayed.
|
||||
}
|
||||
}
|
||||
|
||||
function Load() {
|
||||
var version = BenchmarkSuite.version;
|
||||
document.getElementById("version").innerHTML = version;
|
||||
ShowWarningIfObsolete();
|
||||
setTimeout(Run, 200);
|
||||
}
|
||||
</script>
|
||||
@ -65,6 +91,12 @@ function Load() {
|
||||
<body onload="Load()">
|
||||
<div>
|
||||
<div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div>
|
||||
<div class="warning" id="obsolete">
|
||||
Warning! This is not the latest version of the V8 benchmark
|
||||
suite. Consider running the
|
||||
<a href="http://v8.googlecode.com/svn/data/benchmarks/current/run.html">
|
||||
latest version</a>.
|
||||
</div>
|
||||
<table>
|
||||
<tr>
|
||||
<td class="contents">
|
||||
|
9
deps/v8/benchmarks/style.css
vendored
Executable file → Normal file
9
deps/v8/benchmarks/style.css
vendored
Executable file → Normal file
@ -55,6 +55,15 @@ div.run {
|
||||
border: 1px solid rgb(51, 102, 204);
|
||||
}
|
||||
|
||||
div.warning {
|
||||
background: #ffffd9;
|
||||
border: 1px solid #d2d26a;
|
||||
display: none;
|
||||
margin: 1em 0 2em;
|
||||
padding: 8px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#status {
|
||||
text-align: center;
|
||||
margin-top: 50px;
|
||||
|
76
deps/v8/include/v8.h
vendored
76
deps/v8/include/v8.h
vendored
@ -180,7 +180,7 @@ template <class T> class V8EXPORT_INLINE Handle {
|
||||
/**
|
||||
* Creates an empty handle.
|
||||
*/
|
||||
Handle();
|
||||
inline Handle();
|
||||
|
||||
/**
|
||||
* Creates a new handle for the specified value.
|
||||
@ -264,7 +264,7 @@ template <class T> class V8EXPORT_INLINE Handle {
|
||||
*/
|
||||
template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
|
||||
public:
|
||||
Local();
|
||||
inline Local();
|
||||
template <class S> inline Local(Local<S> that)
|
||||
: Handle<T>(reinterpret_cast<T*>(*that)) {
|
||||
/**
|
||||
@ -284,7 +284,7 @@ template <class T> class V8EXPORT_INLINE Local : public Handle<T> {
|
||||
* The referee is kept alive by the local handle even when
|
||||
* the original handle is destroyed/disposed.
|
||||
*/
|
||||
static Local<T> New(Handle<T> that);
|
||||
inline static Local<T> New(Handle<T> that);
|
||||
};
|
||||
|
||||
|
||||
@ -312,7 +312,7 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
|
||||
* Creates an empty persistent handle that doesn't point to any
|
||||
* storage cell.
|
||||
*/
|
||||
Persistent();
|
||||
inline Persistent();
|
||||
|
||||
/**
|
||||
* Creates a persistent handle for the same storage cell as the
|
||||
@ -353,7 +353,7 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
|
||||
* Creates a new persistent handle for an existing local or
|
||||
* persistent handle.
|
||||
*/
|
||||
static Persistent<T> New(Handle<T> that);
|
||||
inline static Persistent<T> New(Handle<T> that);
|
||||
|
||||
/**
|
||||
* Releases the storage cell referenced by this persistent handle.
|
||||
@ -361,7 +361,7 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
|
||||
* This handle's reference, and any any other references to the storage
|
||||
* cell remain and IsEmpty will still return false.
|
||||
*/
|
||||
void Dispose();
|
||||
inline void Dispose();
|
||||
|
||||
/**
|
||||
* Make the reference to this object weak. When only weak handles
|
||||
@ -369,20 +369,20 @@ template <class T> class V8EXPORT_INLINE Persistent : public Handle<T> {
|
||||
* callback to the given V8::WeakReferenceCallback function, passing
|
||||
* it the object reference and the given parameters.
|
||||
*/
|
||||
void MakeWeak(void* parameters, WeakReferenceCallback callback);
|
||||
inline void MakeWeak(void* parameters, WeakReferenceCallback callback);
|
||||
|
||||
/** Clears the weak reference to this object.*/
|
||||
void ClearWeak();
|
||||
inline void ClearWeak();
|
||||
|
||||
/**
|
||||
*Checks if the handle holds the only reference to an object.
|
||||
*/
|
||||
bool IsNearDeath() const;
|
||||
inline bool IsNearDeath() const;
|
||||
|
||||
/**
|
||||
* Returns true if the handle's reference is weak.
|
||||
*/
|
||||
bool IsWeak() const;
|
||||
inline bool IsWeak() const;
|
||||
|
||||
private:
|
||||
friend class ImplementationUtilities;
|
||||
@ -1113,6 +1113,13 @@ class V8EXPORT Object : public Value {
|
||||
/** Sets the value in an internal field. */
|
||||
void SetInternalField(int index, Handle<Value> value);
|
||||
|
||||
// The two functions below do not perform index bounds checks and
|
||||
// they do not check that the VM is still running. Use with caution.
|
||||
/** Gets a native pointer from an internal field. */
|
||||
void* GetPointerFromInternalField(int index);
|
||||
/** Sets a native pointer in an internal field. */
|
||||
void SetPointerInInternalField(int index, void* value);
|
||||
|
||||
// Testers for local properties.
|
||||
bool HasRealNamedProperty(Handle<String> key);
|
||||
bool HasRealIndexedProperty(uint32_t index);
|
||||
@ -1162,6 +1169,15 @@ class V8EXPORT Object : public Value {
|
||||
*/
|
||||
Local<Object> Clone();
|
||||
|
||||
/**
|
||||
* Set the backing store of the indexed properties to be managed by the
|
||||
* embedding layer. Access to the indexed properties will follow the rules
|
||||
* spelled out in CanvasPixelArray.
|
||||
* Note: The embedding program still owns the data and needs to ensure that
|
||||
* the backing store is preserved while V8 has a reference.
|
||||
*/
|
||||
void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
|
||||
|
||||
static Local<Object> New();
|
||||
static Object* Cast(Value* obj);
|
||||
private:
|
||||
@ -1950,6 +1966,20 @@ typedef void (*GCCallback)();
|
||||
typedef Persistent<Context> (*ContextGenerator)();
|
||||
|
||||
|
||||
/**
|
||||
* Profiler modules.
|
||||
*
|
||||
* In V8, profiler consists of several modules: CPU profiler, and different
|
||||
* kinds of heap profiling. Each can be turned on / off independently.
|
||||
*/
|
||||
enum ProfilerModules {
|
||||
PROFILER_MODULE_NONE = 0,
|
||||
PROFILER_MODULE_CPU = 1,
|
||||
PROFILER_MODULE_HEAP_STATS = 1 << 1,
|
||||
PROFILER_MODULE_JS_CONSTRUCTORS = 1 << 2
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Container class for static utility functions.
|
||||
*/
|
||||
@ -2103,6 +2133,32 @@ class V8EXPORT V8 {
|
||||
*/
|
||||
static bool IsProfilerPaused();
|
||||
|
||||
/**
|
||||
* Resumes specified profiler modules.
|
||||
* "ResumeProfiler" is equivalent to "ResumeProfilerEx(PROFILER_MODULE_CPU)".
|
||||
* See ProfilerModules enum.
|
||||
*
|
||||
* \param flags Flags specifying profiler modules.
|
||||
*/
|
||||
static void ResumeProfilerEx(int flags);
|
||||
|
||||
/**
|
||||
* Pauses specified profiler modules.
|
||||
* "PauseProfiler" is equivalent to "PauseProfilerEx(PROFILER_MODULE_CPU)".
|
||||
* See ProfilerModules enum.
|
||||
*
|
||||
* \param flags Flags specifying profiler modules.
|
||||
*/
|
||||
static void PauseProfilerEx(int flags);
|
||||
|
||||
/**
|
||||
* Returns active (resumed) profiler modules.
|
||||
* See ProfilerModules enum.
|
||||
*
|
||||
* \returns active profiler modules.
|
||||
*/
|
||||
static int GetActiveProfilerModules();
|
||||
|
||||
/**
|
||||
* If logging is performed into a memory buffer (via --logfile=*), allows to
|
||||
* retrieve previously written messages. This can be used for retrieving
|
||||
|
2
deps/v8/src/SConscript
vendored
2
deps/v8/src/SConscript
vendored
@ -40,7 +40,7 @@ SOURCES = {
|
||||
'codegen.cc', 'compilation-cache.cc', 'compiler.cc', 'contexts.cc',
|
||||
'conversions.cc', 'counters.cc', 'dateparser.cc', 'debug.cc',
|
||||
'debug-agent.cc', 'disassembler.cc', 'execution.cc', 'factory.cc',
|
||||
'flags.cc', 'frames.cc', 'func-name-inferrer.cc',
|
||||
'flags.cc', 'frame-element.cc', 'frames.cc', 'func-name-inferrer.cc',
|
||||
'global-handles.cc', 'handles.cc', 'hashmap.cc',
|
||||
'heap.cc', 'ic.cc', 'interpreter-irregexp.cc', 'jsregexp.cc',
|
||||
'jump-target.cc', 'log.cc', 'log-utils.cc', 'mark-compact.cc', 'messages.cc',
|
||||
|
102
deps/v8/src/api.cc
vendored
102
deps/v8/src/api.cc
vendored
@ -1085,8 +1085,9 @@ Local<Script> Script::Compile(v8::Handle<String> source,
|
||||
// handle it if it turns out not to be in release mode.
|
||||
ASSERT(pre_data == NULL || pre_data->SanityCheck());
|
||||
// If the pre-data isn't sane we simply ignore it
|
||||
if (pre_data != NULL && !pre_data->SanityCheck())
|
||||
if (pre_data != NULL && !pre_data->SanityCheck()) {
|
||||
pre_data = NULL;
|
||||
}
|
||||
i::Handle<i::JSFunction> boilerplate = i::Compiler::Compile(str,
|
||||
name_obj,
|
||||
line_offset,
|
||||
@ -2193,6 +2194,25 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
|
||||
}
|
||||
|
||||
|
||||
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
|
||||
ON_BAILOUT("v8::SetElementsToPixelData()", return);
|
||||
ENTER_V8;
|
||||
if (!ApiCheck(i::Smi::IsValid(length),
|
||||
"v8::Object::SetIndexedPropertiesToPixelData()",
|
||||
"length exceeds max acceptable value")) {
|
||||
return;
|
||||
}
|
||||
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
|
||||
if (!ApiCheck(!self->IsJSArray(),
|
||||
"v8::Object::SetIndexedPropertiesToPixelData()",
|
||||
"JSArray is not supported")) {
|
||||
return;
|
||||
}
|
||||
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(length, data);
|
||||
self->set_elements(*pixels);
|
||||
}
|
||||
|
||||
|
||||
Local<v8::Object> Function::NewInstance() const {
|
||||
return NewInstance(0, NULL);
|
||||
}
|
||||
@ -2464,6 +2484,44 @@ void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
|
||||
}
|
||||
|
||||
|
||||
void* v8::Object::GetPointerFromInternalField(int index) {
|
||||
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
|
||||
i::Object* pointer = obj->GetInternalField(index);
|
||||
if (pointer->IsSmi()) {
|
||||
// Fast case, aligned native pointer.
|
||||
return pointer;
|
||||
}
|
||||
|
||||
// Read from uninitialized field.
|
||||
if (!pointer->IsProxy()) {
|
||||
// Play safe even if it's something unexpected.
|
||||
ASSERT(pointer->IsUndefined());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Unaligned native pointer.
|
||||
return reinterpret_cast<void*>(i::Proxy::cast(pointer)->proxy());
|
||||
}
|
||||
|
||||
|
||||
void v8::Object::SetPointerInInternalField(int index, void* value) {
|
||||
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
|
||||
i::Object* as_object = reinterpret_cast<i::Object*>(value);
|
||||
if (as_object->IsSmi()) {
|
||||
// Aligned pointer, store as is.
|
||||
obj->SetInternalField(index, as_object);
|
||||
} else {
|
||||
// Currently internal fields are used by DOM wrappers which only
|
||||
// get garbage collected by the mark-sweep collector, so we
|
||||
// pretenure the proxy.
|
||||
HandleScope scope;
|
||||
i::Handle<i::Proxy> proxy =
|
||||
i::Factory::NewProxy(reinterpret_cast<i::Address>(value), i::TENURED);
|
||||
if (!proxy.is_null()) obj->SetInternalField(index, *proxy);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// --- E n v i r o n m e n t ---
|
||||
|
||||
bool v8::V8::Initialize() {
|
||||
@ -3018,7 +3076,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) {
|
||||
if (!self->HasFastElements()) {
|
||||
return Local<Object>();
|
||||
}
|
||||
i::FixedArray* elms = self->elements();
|
||||
i::FixedArray* elms = i::FixedArray::cast(self->elements());
|
||||
i::Object* paragon = elms->get(index);
|
||||
if (!paragon->IsJSObject()) {
|
||||
return Local<Object>();
|
||||
@ -3177,6 +3235,46 @@ bool V8::IsProfilerPaused() {
|
||||
}
|
||||
|
||||
|
||||
void V8::ResumeProfilerEx(int flags) {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
if (flags & PROFILER_MODULE_CPU) {
|
||||
i::Logger::ResumeProfiler();
|
||||
}
|
||||
if (flags & (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
|
||||
i::FLAG_log_gc = true;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void V8::PauseProfilerEx(int flags) {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
if (flags & PROFILER_MODULE_CPU) {
|
||||
i::Logger::PauseProfiler();
|
||||
}
|
||||
if (flags & (PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS)) {
|
||||
i::FLAG_log_gc = false;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int V8::GetActiveProfilerModules() {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
int result = PROFILER_MODULE_NONE;
|
||||
if (!i::Logger::IsProfilerPaused()) {
|
||||
result |= PROFILER_MODULE_CPU;
|
||||
}
|
||||
if (i::FLAG_log_gc) {
|
||||
result |= PROFILER_MODULE_HEAP_STATS | PROFILER_MODULE_JS_CONSTRUCTORS;
|
||||
}
|
||||
return result;
|
||||
#else
|
||||
return PROFILER_MODULE_NONE;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
|
||||
|
70
deps/v8/src/arm/codegen-arm.cc
vendored
70
deps/v8/src/arm/codegen-arm.cc
vendored
@ -2897,7 +2897,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
|
||||
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
|
||||
|
||||
// Write to the indexed properties array.
|
||||
int offset = i * kPointerSize + Array::kHeaderSize;
|
||||
int offset = i * kPointerSize + FixedArray::kHeaderSize;
|
||||
__ str(r0, FieldMemOperand(r1, offset));
|
||||
|
||||
// Update the write barrier for the array address.
|
||||
@ -3737,7 +3737,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
}
|
||||
frame_->EmitPush(r0); // r0 has result
|
||||
}
|
||||
ASSERT((has_cc() && frame_->height() == original_height) ||
|
||||
ASSERT(!has_valid_frame() ||
|
||||
(has_cc() && frame_->height() == original_height) ||
|
||||
(!has_cc() && frame_->height() == original_height + 1));
|
||||
}
|
||||
|
||||
@ -3871,22 +3872,12 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
|
||||
&is_true,
|
||||
false_target(),
|
||||
false);
|
||||
if (has_cc()) {
|
||||
Branch(false, false_target());
|
||||
|
||||
// Evaluate right side expression.
|
||||
is_true.Bind();
|
||||
LoadConditionAndSpill(node->right(),
|
||||
NOT_INSIDE_TYPEOF,
|
||||
true_target(),
|
||||
false_target(),
|
||||
false);
|
||||
|
||||
} else {
|
||||
if (has_valid_frame() && !has_cc()) {
|
||||
// The left-hand side result is on top of the virtual frame.
|
||||
JumpTarget pop_and_continue;
|
||||
JumpTarget exit;
|
||||
|
||||
__ ldr(r0, frame_->Top()); // dup the stack top
|
||||
__ ldr(r0, frame_->Top()); // Duplicate the stack top.
|
||||
frame_->EmitPush(r0);
|
||||
// Avoid popping the result if it converts to 'false' using the
|
||||
// standard ToBoolean() conversion as described in ECMA-262,
|
||||
@ -3904,6 +3895,22 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
|
||||
|
||||
// Exit (always with a materialized value).
|
||||
exit.Bind();
|
||||
} else if (has_cc() || is_true.is_linked()) {
|
||||
// The left-hand side is either (a) partially compiled to
|
||||
// control flow with a final branch left to emit or (b) fully
|
||||
// compiled to control flow and possibly true.
|
||||
if (has_cc()) {
|
||||
Branch(false, false_target());
|
||||
}
|
||||
is_true.Bind();
|
||||
LoadConditionAndSpill(node->right(),
|
||||
NOT_INSIDE_TYPEOF,
|
||||
true_target(),
|
||||
false_target(),
|
||||
false);
|
||||
} else {
|
||||
// Nothing to do.
|
||||
ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
|
||||
}
|
||||
|
||||
} else if (op == Token::OR) {
|
||||
@ -3913,18 +3920,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
|
||||
true_target(),
|
||||
&is_false,
|
||||
false);
|
||||
if (has_cc()) {
|
||||
Branch(true, true_target());
|
||||
|
||||
// Evaluate right side expression.
|
||||
is_false.Bind();
|
||||
LoadConditionAndSpill(node->right(),
|
||||
NOT_INSIDE_TYPEOF,
|
||||
true_target(),
|
||||
false_target(),
|
||||
false);
|
||||
|
||||
} else {
|
||||
if (has_valid_frame() && !has_cc()) {
|
||||
// The left-hand side result is on top of the virtual frame.
|
||||
JumpTarget pop_and_continue;
|
||||
JumpTarget exit;
|
||||
|
||||
@ -3946,6 +3943,22 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
|
||||
|
||||
// Exit (always with a materialized value).
|
||||
exit.Bind();
|
||||
} else if (has_cc() || is_false.is_linked()) {
|
||||
// The left-hand side is either (a) partially compiled to
|
||||
// control flow with a final branch left to emit or (b) fully
|
||||
// compiled to control flow and possibly false.
|
||||
if (has_cc()) {
|
||||
Branch(true, true_target());
|
||||
}
|
||||
is_false.Bind();
|
||||
LoadConditionAndSpill(node->right(),
|
||||
NOT_INSIDE_TYPEOF,
|
||||
true_target(),
|
||||
false_target(),
|
||||
false);
|
||||
} else {
|
||||
// Nothing to do.
|
||||
ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
|
||||
}
|
||||
|
||||
} else {
|
||||
@ -3989,7 +4002,8 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
|
||||
}
|
||||
frame_->EmitPush(r0);
|
||||
}
|
||||
ASSERT((has_cc() && frame_->height() == original_height) ||
|
||||
ASSERT(!has_valid_frame() ||
|
||||
(has_cc() && frame_->height() == original_height) ||
|
||||
(!has_cc() && frame_->height() == original_height + 1));
|
||||
}
|
||||
|
||||
|
4
deps/v8/src/arm/codegen-arm.h
vendored
4
deps/v8/src/arm/codegen-arm.h
vendored
@ -215,7 +215,7 @@ class CodeGenerator: public AstVisitor {
|
||||
|
||||
#define DEF_VISIT(type) \
|
||||
void Visit##type(type* node);
|
||||
NODE_LIST(DEF_VISIT)
|
||||
AST_NODE_LIST(DEF_VISIT)
|
||||
#undef DEF_VISIT
|
||||
|
||||
// Visit a statement and then spill the virtual frame if control flow can
|
||||
@ -374,7 +374,7 @@ class CodeGenerator: public AstVisitor {
|
||||
// information.
|
||||
void CodeForFunctionPosition(FunctionLiteral* fun);
|
||||
void CodeForReturnPosition(FunctionLiteral* fun);
|
||||
void CodeForStatementPosition(Node* node);
|
||||
void CodeForStatementPosition(AstNode* node);
|
||||
void CodeForSourcePosition(int pos);
|
||||
|
||||
#ifdef DEBUG
|
||||
|
37
deps/v8/src/arm/ic-arm.cc
vendored
37
deps/v8/src/arm/ic-arm.cc
vendored
@ -77,6 +77,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
|
||||
__ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE));
|
||||
__ b(eq, miss);
|
||||
|
||||
// Possible work-around for http://crbug.com/16276.
|
||||
// See also: http://codereview.chromium.org/155418.
|
||||
__ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
|
||||
__ b(eq, miss);
|
||||
__ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
|
||||
__ b(eq, miss);
|
||||
|
||||
// Check that the properties array is a dictionary.
|
||||
__ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
|
||||
__ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
|
||||
@ -84,14 +91,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
|
||||
__ b(ne, miss);
|
||||
|
||||
// Compute the capacity mask.
|
||||
const int kCapacityOffset =
|
||||
Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize;
|
||||
const int kCapacityOffset = StringDictionary::kHeaderSize +
|
||||
StringDictionary::kCapacityIndex * kPointerSize;
|
||||
__ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
|
||||
__ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int
|
||||
__ sub(r3, r3, Operand(1));
|
||||
|
||||
const int kElementsStartOffset =
|
||||
Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize;
|
||||
const int kElementsStartOffset = StringDictionary::kHeaderSize +
|
||||
StringDictionary::kElementsStartIndex * kPointerSize;
|
||||
|
||||
// Generate an unrolled loop that performs a few probes before
|
||||
// giving up. Measurements done on Gmail indicate that 2 probes
|
||||
@ -575,8 +582,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
|
||||
__ cmp(r3, Operand(Factory::hash_table_map()));
|
||||
__ b(eq, &slow);
|
||||
__ cmp(r3, Operand(Factory::fixed_array_map()));
|
||||
__ b(ne, &slow);
|
||||
// Check that the key (index) is within bounds.
|
||||
__ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
|
||||
__ cmp(r0, Operand(r3));
|
||||
@ -592,7 +599,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
|
||||
// Fast case: Do the load.
|
||||
__ bind(&fast);
|
||||
__ add(r3, r1, Operand(Array::kHeaderSize - kHeapObjectTag));
|
||||
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
|
||||
__ cmp(r0, Operand(Factory::the_hole_value()));
|
||||
// In case the loaded value is the_hole we have to consult GetProperty
|
||||
@ -654,14 +661,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
|
||||
__ cmp(r2, Operand(Factory::hash_table_map()));
|
||||
__ b(eq, &slow);
|
||||
__ cmp(r2, Operand(Factory::fixed_array_map()));
|
||||
__ b(ne, &slow);
|
||||
// Untag the key (for checking against untagged length in the fixed array).
|
||||
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
|
||||
// Compute address to store into and check array bounds.
|
||||
__ add(r2, r3, Operand(Array::kHeaderSize - kHeapObjectTag));
|
||||
__ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
|
||||
__ ldr(ip, FieldMemOperand(r3, Array::kLengthOffset));
|
||||
__ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset));
|
||||
__ cmp(r1, Operand(ip));
|
||||
__ b(lo, &fast);
|
||||
|
||||
@ -689,7 +696,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ mov(r3, Operand(r2));
|
||||
// NOTE: Computing the address to store into must take the fact
|
||||
// that the key has been incremented into account.
|
||||
int displacement = Array::kHeaderSize - kHeapObjectTag -
|
||||
int displacement = FixedArray::kHeaderSize - kHeapObjectTag -
|
||||
((1 << kSmiTagSize) * 2);
|
||||
__ add(r2, r2, Operand(displacement));
|
||||
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
@ -703,8 +710,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ bind(&array);
|
||||
__ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
|
||||
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
|
||||
__ cmp(r1, Operand(Factory::hash_table_map()));
|
||||
__ b(eq, &slow);
|
||||
__ cmp(r1, Operand(Factory::fixed_array_map()));
|
||||
__ b(ne, &slow);
|
||||
|
||||
// Check the key against the length in the array, compute the
|
||||
// address to store into and fall through to fast case.
|
||||
@ -714,7 +721,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ cmp(r1, Operand(ip));
|
||||
__ b(hs, &extra);
|
||||
__ mov(r3, Operand(r2));
|
||||
__ add(r2, r2, Operand(Array::kHeaderSize - kHeapObjectTag));
|
||||
__ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
|
||||
|
||||
|
2
deps/v8/src/arm/macro-assembler-arm.cc
vendored
2
deps/v8/src/arm/macro-assembler-arm.cc
vendored
@ -226,7 +226,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
|
||||
// Add the page header (including remembered set), array header, and array
|
||||
// body size to the page address.
|
||||
add(object, object, Operand(Page::kObjectStartOffset
|
||||
+ Array::kHeaderSize));
|
||||
+ FixedArray::kHeaderSize));
|
||||
add(object, object, Operand(scratch));
|
||||
|
||||
bind(&fast);
|
||||
|
8
deps/v8/src/arm/register-allocator-arm-inl.h
vendored
8
deps/v8/src/arm/register-allocator-arm-inl.h
vendored
@ -60,7 +60,7 @@ bool RegisterAllocator::IsReserved(Register reg) {
|
||||
|
||||
int RegisterAllocator::ToNumber(Register reg) {
|
||||
ASSERT(reg.is_valid() && !IsReserved(reg));
|
||||
static int numbers[] = {
|
||||
const int kNumbers[] = {
|
||||
0, // r0
|
||||
1, // r1
|
||||
2, // r2
|
||||
@ -78,15 +78,15 @@ int RegisterAllocator::ToNumber(Register reg) {
|
||||
11, // lr
|
||||
-1 // pc
|
||||
};
|
||||
return numbers[reg.code()];
|
||||
return kNumbers[reg.code()];
|
||||
}
|
||||
|
||||
|
||||
Register RegisterAllocator::ToRegister(int num) {
|
||||
ASSERT(num >= 0 && num < kNumRegisters);
|
||||
static Register registers[] =
|
||||
const Register kRegisters[] =
|
||||
{ r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
|
||||
return registers[num];
|
||||
return kRegisters[num];
|
||||
}
|
||||
|
||||
|
||||
|
51
deps/v8/src/arm/stub-cache-arm.cc
vendored
51
deps/v8/src/arm/stub-cache-arm.cc
vendored
@ -164,7 +164,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
|
||||
__ ldr(dst, FieldMemOperand(src, offset));
|
||||
} else {
|
||||
// Calculate the offset into the properties array.
|
||||
int offset = index * kPointerSize + Array::kHeaderSize;
|
||||
int offset = index * kPointerSize + FixedArray::kHeaderSize;
|
||||
__ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
|
||||
__ ldr(dst, FieldMemOperand(dst, offset));
|
||||
}
|
||||
@ -330,7 +330,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
||||
__ RecordWrite(receiver_reg, name_reg, scratch);
|
||||
} else {
|
||||
// Write to the properties array.
|
||||
int offset = index * kPointerSize + Array::kHeaderSize;
|
||||
int offset = index * kPointerSize + FixedArray::kHeaderSize;
|
||||
// Get the properties array
|
||||
__ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
|
||||
__ str(r0, FieldMemOperand(scratch, offset));
|
||||
@ -467,21 +467,23 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
|
||||
|
||||
// Push the arguments on the JS stack of the caller.
|
||||
__ push(receiver); // receiver
|
||||
__ push(reg); // holder
|
||||
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data
|
||||
__ push(ip);
|
||||
__ ldr(reg, FieldMemOperand(ip, AccessorInfo::kDataOffset));
|
||||
__ push(reg);
|
||||
__ push(name_reg); // name
|
||||
__ push(reg); // holder
|
||||
|
||||
// Do tail-call to the runtime system.
|
||||
ExternalReference load_callback_property =
|
||||
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
|
||||
__ TailCallRuntime(load_callback_property, 4);
|
||||
__ TailCallRuntime(load_callback_property, 5);
|
||||
}
|
||||
|
||||
|
||||
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
|
||||
JSObject* holder,
|
||||
Smi* lookup_hint,
|
||||
LookupResult* lookup,
|
||||
Register receiver,
|
||||
Register name_reg,
|
||||
Register scratch1,
|
||||
@ -500,13 +502,18 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
|
||||
__ push(receiver); // receiver
|
||||
__ push(reg); // holder
|
||||
__ push(name_reg); // name
|
||||
__ mov(scratch1, Operand(lookup_hint));
|
||||
|
||||
InterceptorInfo* interceptor = holder->GetNamedInterceptor();
|
||||
ASSERT(!Heap::InNewSpace(interceptor));
|
||||
__ mov(scratch1, Operand(Handle<Object>(interceptor)));
|
||||
__ push(scratch1);
|
||||
__ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset));
|
||||
__ push(scratch2);
|
||||
|
||||
// Do tail-call to the runtime system.
|
||||
ExternalReference load_ic_property =
|
||||
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
|
||||
__ TailCallRuntime(load_ic_property, 4);
|
||||
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
|
||||
__ TailCallRuntime(load_ic_property, 5);
|
||||
}
|
||||
|
||||
|
||||
@ -676,13 +683,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
||||
|
||||
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
|
||||
CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss);
|
||||
// Make sure object->elements()->map() != Heap::hash_table_map()
|
||||
// Make sure object->HasFastElements().
|
||||
// Get the elements array of the object.
|
||||
__ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
|
||||
__ cmp(r2, Operand(Factory::hash_table_map()));
|
||||
__ b(eq, &miss);
|
||||
__ cmp(r2, Operand(Factory::fixed_array_map()));
|
||||
__ b(ne, &miss);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -744,8 +751,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
|
||||
|
||||
// Get the number of arguments.
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
@ -782,6 +787,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
|
||||
|
||||
// Jump to the cached code (tail call).
|
||||
__ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
|
||||
ASSERT(function->is_compiled());
|
||||
Handle<Code> code(function->code());
|
||||
ParameterCount expected(function->shared()->formal_parameter_count());
|
||||
@ -790,7 +796,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
|
||||
// Handle call cache miss.
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(&Counters::call_global_inline, 1, r1, r3);
|
||||
__ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3);
|
||||
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
|
||||
__ Jump(ic, RelocInfo::CODE_TARGET);
|
||||
@ -951,8 +956,6 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
|
||||
|
||||
// Check that the map of the global has not changed.
|
||||
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
|
||||
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
|
||||
@ -963,11 +966,11 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
|
||||
__ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell)));
|
||||
__ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
|
||||
|
||||
__ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
|
||||
__ Ret();
|
||||
|
||||
// Handle store cache miss.
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(&Counters::named_store_global_inline, 1, r1, r3);
|
||||
__ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3);
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
|
||||
__ Jump(ic, RelocInfo::CODE_TARGET);
|
||||
@ -1054,9 +1057,11 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
|
||||
|
||||
__ ldr(r0, MemOperand(sp, 0));
|
||||
|
||||
LookupResult lookup;
|
||||
holder->LocalLookupRealNamedProperty(name, &lookup);
|
||||
GenerateLoadInterceptor(object,
|
||||
holder,
|
||||
holder->InterceptorPropertyLookupHint(name),
|
||||
&lookup,
|
||||
r0,
|
||||
r2,
|
||||
r3,
|
||||
@ -1083,8 +1088,6 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
|
||||
|
||||
@ -1109,10 +1112,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
|
||||
__ b(eq, &miss);
|
||||
}
|
||||
|
||||
__ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
|
||||
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3);
|
||||
GenerateLoadMiss(masm(), Code::LOAD_IC);
|
||||
|
||||
@ -1121,8 +1124,6 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
|
||||
}
|
||||
|
||||
|
||||
// TODO(1224671): IC stubs for keyed loads have not been implemented
|
||||
// for ARM.
|
||||
Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
|
||||
JSObject* receiver,
|
||||
JSObject* holder,
|
||||
@ -1217,9 +1218,11 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
|
||||
__ cmp(r2, Operand(Handle<String>(name)));
|
||||
__ b(ne, &miss);
|
||||
|
||||
LookupResult lookup;
|
||||
holder->LocalLookupRealNamedProperty(name, &lookup);
|
||||
GenerateLoadInterceptor(receiver,
|
||||
holder,
|
||||
Smi::FromInt(JSObject::kLookupInHolder),
|
||||
&lookup,
|
||||
r0,
|
||||
r2,
|
||||
r3,
|
||||
|
2
deps/v8/src/ast.cc
vendored
2
deps/v8/src/ast.cc
vendored
@ -51,7 +51,7 @@ CallEval CallEval::sentinel_(NULL, NULL, 0);
|
||||
if (v->CheckStackOverflow()) return; \
|
||||
v->Visit##type(this); \
|
||||
}
|
||||
NODE_LIST(DECL_ACCEPT)
|
||||
AST_NODE_LIST(DECL_ACCEPT)
|
||||
#undef DECL_ACCEPT
|
||||
|
||||
|
||||
|
31
deps/v8/src/ast.h
vendored
31
deps/v8/src/ast.h
vendored
@ -53,9 +53,8 @@ namespace internal {
|
||||
// Nodes of the abstract syntax tree. Only concrete classes are
|
||||
// enumerated here.
|
||||
|
||||
#define NODE_LIST(V) \
|
||||
#define STATEMENT_NODE_LIST(V) \
|
||||
V(Block) \
|
||||
V(Declaration) \
|
||||
V(ExpressionStatement) \
|
||||
V(EmptyStatement) \
|
||||
V(IfStatement) \
|
||||
@ -69,7 +68,9 @@ namespace internal {
|
||||
V(ForInStatement) \
|
||||
V(TryCatch) \
|
||||
V(TryFinally) \
|
||||
V(DebuggerStatement) \
|
||||
V(DebuggerStatement)
|
||||
|
||||
#define EXPRESSION_NODE_LIST(V) \
|
||||
V(FunctionLiteral) \
|
||||
V(FunctionBoilerplateLiteral) \
|
||||
V(Conditional) \
|
||||
@ -93,13 +94,17 @@ namespace internal {
|
||||
V(CompareOperation) \
|
||||
V(ThisFunction)
|
||||
|
||||
#define AST_NODE_LIST(V) \
|
||||
V(Declaration) \
|
||||
STATEMENT_NODE_LIST(V) \
|
||||
EXPRESSION_NODE_LIST(V)
|
||||
|
||||
// Forward declarations
|
||||
class TargetCollector;
|
||||
class MaterializedLiteral;
|
||||
|
||||
#define DEF_FORWARD_DECLARATION(type) class type;
|
||||
NODE_LIST(DEF_FORWARD_DECLARATION)
|
||||
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
|
||||
#undef DEF_FORWARD_DECLARATION
|
||||
|
||||
|
||||
@ -108,10 +113,10 @@ NODE_LIST(DEF_FORWARD_DECLARATION)
|
||||
typedef ZoneList<Handle<String> > ZoneStringList;
|
||||
|
||||
|
||||
class Node: public ZoneObject {
|
||||
class AstNode: public ZoneObject {
|
||||
public:
|
||||
Node(): statement_pos_(RelocInfo::kNoPosition) { }
|
||||
virtual ~Node() { }
|
||||
AstNode(): statement_pos_(RelocInfo::kNoPosition) { }
|
||||
virtual ~AstNode() { }
|
||||
virtual void Accept(AstVisitor* v) = 0;
|
||||
|
||||
// Type testing & conversion.
|
||||
@ -143,7 +148,7 @@ class Node: public ZoneObject {
|
||||
};
|
||||
|
||||
|
||||
class Statement: public Node {
|
||||
class Statement: public AstNode {
|
||||
public:
|
||||
virtual Statement* AsStatement() { return this; }
|
||||
virtual ReturnStatement* AsReturnStatement() { return NULL; }
|
||||
@ -152,7 +157,7 @@ class Statement: public Node {
|
||||
};
|
||||
|
||||
|
||||
class Expression: public Node {
|
||||
class Expression: public AstNode {
|
||||
public:
|
||||
virtual Expression* AsExpression() { return this; }
|
||||
|
||||
@ -240,7 +245,7 @@ class Block: public BreakableStatement {
|
||||
};
|
||||
|
||||
|
||||
class Declaration: public Node {
|
||||
class Declaration: public AstNode {
|
||||
public:
|
||||
Declaration(VariableProxy* proxy, Variable::Mode mode, FunctionLiteral* fun)
|
||||
: proxy_(proxy),
|
||||
@ -523,7 +528,7 @@ class IfStatement: public Statement {
|
||||
|
||||
// NOTE: TargetCollectors are represented as nodes to fit in the target
|
||||
// stack in the compiler; this should probably be reworked.
|
||||
class TargetCollector: public Node {
|
||||
class TargetCollector: public AstNode {
|
||||
public:
|
||||
explicit TargetCollector(ZoneList<BreakTarget*>* targets)
|
||||
: targets_(targets) {
|
||||
@ -1678,7 +1683,7 @@ class AstVisitor BASE_EMBEDDED {
|
||||
virtual ~AstVisitor() { }
|
||||
|
||||
// Dispatch
|
||||
void Visit(Node* node) { node->Accept(this); }
|
||||
void Visit(AstNode* node) { node->Accept(this); }
|
||||
|
||||
// Iteration
|
||||
virtual void VisitStatements(ZoneList<Statement*>* statements);
|
||||
@ -1702,7 +1707,7 @@ class AstVisitor BASE_EMBEDDED {
|
||||
// Individual nodes
|
||||
#define DEF_VISIT(type) \
|
||||
virtual void Visit##type(type* node) = 0;
|
||||
NODE_LIST(DEF_VISIT)
|
||||
AST_NODE_LIST(DEF_VISIT)
|
||||
#undef DEF_VISIT
|
||||
|
||||
private:
|
||||
|
14
deps/v8/src/bootstrapper.cc
vendored
14
deps/v8/src/bootstrapper.cc
vendored
@ -47,14 +47,10 @@ namespace internal {
|
||||
// generate an index for each native JS file.
|
||||
class SourceCodeCache BASE_EMBEDDED {
|
||||
public:
|
||||
explicit SourceCodeCache(Script::Type type): type_(type) { }
|
||||
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
|
||||
|
||||
void Initialize(bool create_heap_objects) {
|
||||
if (create_heap_objects) {
|
||||
cache_ = Heap::empty_fixed_array();
|
||||
} else {
|
||||
cache_ = NULL;
|
||||
}
|
||||
cache_ = create_heap_objects ? Heap::empty_fixed_array() : NULL;
|
||||
}
|
||||
|
||||
void Iterate(ObjectVisitor* v) {
|
||||
@ -1107,12 +1103,6 @@ bool Genesis::InstallNatives() {
|
||||
global_context()->set_empty_script(*script);
|
||||
}
|
||||
|
||||
#ifdef V8_HOST_ARCH_64_BIT
|
||||
// TODO(X64): Remove this when inline caches work.
|
||||
FLAG_use_ic = false;
|
||||
#endif // V8_HOST_ARCH_64_BIT
|
||||
|
||||
|
||||
if (FLAG_natives_file == NULL) {
|
||||
// Without natives file, install default natives.
|
||||
for (int i = Natives::GetDelayCount();
|
||||
|
26
deps/v8/src/builtins.cc
vendored
26
deps/v8/src/builtins.cc
vendored
@ -87,18 +87,34 @@ static inline Object* __builtin_arg__(int n, int argc, Object** argv) {
|
||||
}
|
||||
|
||||
|
||||
// TODO(1238487): Get rid of this function that determines if the
|
||||
// builtin is called as a constructor. This may be a somewhat slow
|
||||
// operation due to the stack frame iteration.
|
||||
static inline bool CalledAsConstructor() {
|
||||
#ifdef DEBUG
|
||||
// Calculate the result using a full stack frame iterator and check
|
||||
// that the state of the stack is as we assume it to be in the
|
||||
// code below.
|
||||
StackFrameIterator it;
|
||||
ASSERT(it.frame()->is_exit());
|
||||
it.Advance();
|
||||
StackFrame* frame = it.frame();
|
||||
return frame->is_construct();
|
||||
bool reference_result = frame->is_construct();
|
||||
#endif
|
||||
Address fp = Top::c_entry_fp(Top::GetCurrentThread());
|
||||
// Because we know fp points to an exit frame we can use the relevant
|
||||
// part of ExitFrame::ComputeCallerState directly.
|
||||
const int kCallerOffset = ExitFrameConstants::kCallerFPOffset;
|
||||
Address caller_fp = Memory::Address_at(fp + kCallerOffset);
|
||||
// This inlines the part of StackFrame::ComputeType that grabs the
|
||||
// type of the current frame. Note that StackFrame::ComputeType
|
||||
// has been specialized for each architecture so if any one of them
|
||||
// changes this code has to be changed as well.
|
||||
const int kMarkerOffset = StandardFrameConstants::kMarkerOffset;
|
||||
const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
|
||||
Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
|
||||
bool result = (marker == kConstructMarker);
|
||||
ASSERT_EQ(result, reference_result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
2
deps/v8/src/codegen.cc
vendored
2
deps/v8/src/codegen.cc
vendored
@ -496,7 +496,7 @@ void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::CodeForStatementPosition(Node* node) {
|
||||
void CodeGenerator::CodeForStatementPosition(AstNode* node) {
|
||||
if (FLAG_debug_info) {
|
||||
int pos = node->statement_pos();
|
||||
if (pos != RelocInfo::kNoPosition) {
|
||||
|
9
deps/v8/src/compilation-cache.cc
vendored
9
deps/v8/src/compilation-cache.cc
vendored
@ -37,10 +37,17 @@ namespace internal {
|
||||
static const int kSubCacheCount = 4;
|
||||
|
||||
// The number of generations for each sub cache.
|
||||
#if defined(ANDROID)
|
||||
static const int kScriptGenerations = 1;
|
||||
static const int kEvalGlobalGenerations = 1;
|
||||
static const int kEvalContextualGenerations = 1;
|
||||
static const int kRegExpGenerations = 1;
|
||||
#else
|
||||
static const int kScriptGenerations = 5;
|
||||
static const int kEvalGlobalGenerations = 2;
|
||||
static const int kEvalContextualGenerations = 2;
|
||||
static const int kRegExpGenerations = 2;
|
||||
#endif
|
||||
|
||||
// Initial of each compilation cache table allocated.
|
||||
static const int kInitialCacheSize = 64;
|
||||
@ -56,6 +63,8 @@ class CompilationSubCache {
|
||||
tables_ = NewArray<Object*>(generations);
|
||||
}
|
||||
|
||||
~CompilationSubCache() { DeleteArray(tables_); }
|
||||
|
||||
// Get the compilation cache tables for a specific generation.
|
||||
Handle<CompilationCacheTable> GetTable(int generation);
|
||||
|
||||
|
4
deps/v8/src/debug-agent.cc
vendored
4
deps/v8/src/debug-agent.cc
vendored
@ -254,8 +254,8 @@ SmartPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
|
||||
|
||||
// Check that key is Content-Length.
|
||||
if (strcmp(key, kContentLength) == 0) {
|
||||
// Get the content length value if within a sensible range.
|
||||
if (strlen(value) > 7) {
|
||||
// Get the content length value if present and within a sensible range.
|
||||
if (value == NULL || strlen(value) > 7) {
|
||||
return SmartPointer<char>();
|
||||
}
|
||||
for (int i = 0; value[i] != '\0'; i++) {
|
||||
|
39
deps/v8/src/debug.cc
vendored
39
deps/v8/src/debug.cc
vendored
@ -334,8 +334,11 @@ void BreakLocationIterator::PrepareStepIn() {
|
||||
rinfo()->set_target_address(stub->entry());
|
||||
}
|
||||
} else {
|
||||
// Step in through constructs call requires no changes to the running code.
|
||||
ASSERT(RelocInfo::IsConstructCall(rmode()));
|
||||
// Step in through construct call requires no changes to the running code.
|
||||
// Step in through getters/setters should already be prepared as well
|
||||
// because caller of this function (Debug::PrepareStep) is expected to
|
||||
// flood the top frame's function with one shot breakpoints.
|
||||
ASSERT(RelocInfo::IsConstructCall(rmode()) || code->is_inline_cache_stub());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1087,10 +1090,18 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
|
||||
|
||||
// Compute whether or not the target is a call target.
|
||||
bool is_call_target = false;
|
||||
bool is_load_or_store = false;
|
||||
bool is_inline_cache_stub = false;
|
||||
if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
|
||||
Address target = it.rinfo()->target_address();
|
||||
Code* code = Code::GetCodeFromTargetAddress(target);
|
||||
if (code->is_call_stub()) is_call_target = true;
|
||||
if (code->is_call_stub()) {
|
||||
is_call_target = true;
|
||||
}
|
||||
if (code->is_inline_cache_stub()) {
|
||||
is_inline_cache_stub = true;
|
||||
is_load_or_store = !is_call_target;
|
||||
}
|
||||
}
|
||||
|
||||
// If this is the last break code target step out is the only possibility.
|
||||
@ -1103,8 +1114,8 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
|
||||
JSFunction* function = JSFunction::cast(frames_it.frame()->function());
|
||||
FloodWithOneShot(Handle<SharedFunctionInfo>(function->shared()));
|
||||
}
|
||||
} else if (!(is_call_target || RelocInfo::IsConstructCall(it.rmode())) ||
|
||||
step_action == StepNext || step_action == StepMin) {
|
||||
} else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode()))
|
||||
|| step_action == StepNext || step_action == StepMin) {
|
||||
// Step next or step min.
|
||||
|
||||
// Fill the current function with one-shot break points.
|
||||
@ -1117,9 +1128,20 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
|
||||
} else {
|
||||
// Fill the current function with one-shot break points even for step in on
|
||||
// a call target as the function called might be a native function for
|
||||
// which step in will not stop.
|
||||
// which step in will not stop. It also prepares for stepping in
|
||||
// getters/setters.
|
||||
FloodWithOneShot(shared);
|
||||
|
||||
if (is_load_or_store) {
|
||||
// Remember source position and frame to handle step in getter/setter. If
|
||||
// there is a custom getter/setter it will be handled in
|
||||
// Object::Get/SetPropertyWithCallback, otherwise the step action will be
|
||||
// propagated on the next Debug::Break.
|
||||
thread_local_.last_statement_position_ =
|
||||
debug_info->code()->SourceStatementPosition(frame->pc());
|
||||
thread_local_.last_fp_ = frame->fp();
|
||||
}
|
||||
|
||||
// Step in or Step in min
|
||||
it.PrepareStepIn();
|
||||
ActivateStepIn(frame);
|
||||
@ -1279,7 +1301,7 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
|
||||
// step into was requested.
|
||||
if (fp == Debug::step_in_fp()) {
|
||||
// Don't allow step into functions in the native context.
|
||||
if (function->context()->global() != Top::context()->builtins()) {
|
||||
if (!function->IsBuiltin()) {
|
||||
if (function->shared()->code() ==
|
||||
Builtins::builtin(Builtins::FunctionApply) ||
|
||||
function->shared()->code() ==
|
||||
@ -1288,7 +1310,8 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
|
||||
// function to be called and not the code for Builtins::FunctionApply or
|
||||
// Builtins::FunctionCall. The receiver of call/apply is the target
|
||||
// function.
|
||||
if (!holder.is_null() && holder->IsJSFunction()) {
|
||||
if (!holder.is_null() && holder->IsJSFunction() &&
|
||||
!JSFunction::cast(*holder)->IsBuiltin()) {
|
||||
Handle<SharedFunctionInfo> shared_info(
|
||||
JSFunction::cast(*holder)->shared());
|
||||
Debug::FloodWithOneShot(shared_info);
|
||||
|
38
deps/v8/src/execution.cc
vendored
38
deps/v8/src/execution.cc
vendored
@ -164,19 +164,16 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
|
||||
// If you return a function from here, it will be called when an
|
||||
// attempt is made to call the given object as a function.
|
||||
|
||||
// The regular expression code here is really meant more as an
|
||||
// example than anything else. KJS does not support calling regular
|
||||
// expressions as functions, but SpiderMonkey does.
|
||||
if (FLAG_call_regexp) {
|
||||
bool is_regexp =
|
||||
object->IsHeapObject() &&
|
||||
(HeapObject::cast(*object)->map()->constructor() ==
|
||||
*Top::regexp_function());
|
||||
// Regular expressions can be called as functions in both Firefox
|
||||
// and Safari so we allow it too.
|
||||
bool is_regexp =
|
||||
object->IsHeapObject() &&
|
||||
(HeapObject::cast(*object)->map()->constructor() ==
|
||||
*Top::regexp_function());
|
||||
|
||||
if (is_regexp) {
|
||||
Handle<String> exec = Factory::exec_symbol();
|
||||
return Handle<Object>(object->GetProperty(*exec));
|
||||
}
|
||||
if (is_regexp) {
|
||||
Handle<String> exec = Factory::exec_symbol();
|
||||
return Handle<Object>(object->GetProperty(*exec));
|
||||
}
|
||||
|
||||
// Objects created through the API can have an instance-call handler
|
||||
@ -590,6 +587,23 @@ Object* Execution::DebugBreakHelper() {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
|
||||
{
|
||||
JavaScriptFrameIterator it;
|
||||
ASSERT(!it.done());
|
||||
Object* fun = it.frame()->function();
|
||||
if (fun && fun->IsJSFunction()) {
|
||||
// Don't stop in builtin functions.
|
||||
if (JSFunction::cast(fun)->IsBuiltin()) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
GlobalObject* global = JSFunction::cast(fun)->context()->global();
|
||||
// Don't stop in debugger functions.
|
||||
if (Debug::IsDebugGlobal(global)) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect the break state before clearing the flags.
|
||||
bool debug_command_only =
|
||||
StackGuard::IsDebugCommand() && !StackGuard::IsDebugBreak();
|
||||
|
10
deps/v8/src/factory.cc
vendored
10
deps/v8/src/factory.cc
vendored
@ -210,6 +210,16 @@ Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
|
||||
}
|
||||
|
||||
|
||||
Handle<PixelArray> Factory::NewPixelArray(int length,
|
||||
uint8_t* external_pointer,
|
||||
PretenureFlag pretenure) {
|
||||
ASSERT(0 <= length);
|
||||
CALL_HEAP_FUNCTION(Heap::AllocatePixelArray(length,
|
||||
external_pointer,
|
||||
pretenure), PixelArray);
|
||||
}
|
||||
|
||||
|
||||
Handle<Map> Factory::NewMap(InstanceType type, int instance_size) {
|
||||
CALL_HEAP_FUNCTION(Heap::AllocateMap(type, instance_size), Map);
|
||||
}
|
||||
|
4
deps/v8/src/factory.h
vendored
4
deps/v8/src/factory.h
vendored
@ -154,6 +154,10 @@ class Factory : public AllStatic {
|
||||
static Handle<ByteArray> NewByteArray(int length,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
static Handle<PixelArray> NewPixelArray(int length,
|
||||
uint8_t* external_pointer,
|
||||
PretenureFlag pretenure = NOT_TENURED);
|
||||
|
||||
static Handle<Map> NewMap(InstanceType type, int instance_size);
|
||||
|
||||
static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
|
||||
|
5
deps/v8/src/flag-definitions.h
vendored
5
deps/v8/src/flag-definitions.h
vendored
@ -144,9 +144,6 @@ DEFINE_bool(debugger_auto_break, false,
|
||||
"automatically set the debug break flag when debugger commands are "
|
||||
"in the queue (experimental)")
|
||||
|
||||
// execution.cc
|
||||
DEFINE_bool(call_regexp, false, "allow calls to RegExp objects")
|
||||
|
||||
// frames.cc
|
||||
DEFINE_int(max_stack_trace_source_length, 300,
|
||||
"maximum length of function source code printed in a stack trace.")
|
||||
@ -158,6 +155,8 @@ DEFINE_bool(gc_global, false, "always perform global GCs")
|
||||
DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
|
||||
DEFINE_bool(trace_gc, false,
|
||||
"print one trace line following each garbage collection")
|
||||
DEFINE_bool(trace_gc_verbose, false,
|
||||
"print more details following each garbage collection")
|
||||
DEFINE_bool(collect_maps, true,
|
||||
"garbage collect maps from which no objects can be reached")
|
||||
|
||||
|
45
deps/v8/src/frame-element.cc
vendored
Normal file
45
deps/v8/src/frame-element.cc
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "frame-element.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// FrameElement implementation.
|
||||
|
||||
|
||||
FrameElement::ZoneObjectList* FrameElement::ConstantList() {
|
||||
static ZoneObjectList list(10);
|
||||
return &list;
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
5
deps/v8/src/frame-element.h
vendored
5
deps/v8/src/frame-element.h
vendored
@ -91,10 +91,7 @@ class FrameElement BASE_EMBEDDED {
|
||||
// this table of handles to the actual constants.
|
||||
typedef ZoneList<Handle<Object> > ZoneObjectList;
|
||||
|
||||
static ZoneObjectList* ConstantList() {
|
||||
static ZoneObjectList list(10);
|
||||
return &list;
|
||||
}
|
||||
static ZoneObjectList* ConstantList();
|
||||
|
||||
// Clear the constants indirection table.
|
||||
static void ClearConstantList() {
|
||||
|
1
deps/v8/src/globals.h
vendored
1
deps/v8/src/globals.h
vendored
@ -207,6 +207,7 @@ class HeapObject;
|
||||
class IC;
|
||||
class InterceptorInfo;
|
||||
class IterationStatement;
|
||||
class Array;
|
||||
class JSArray;
|
||||
class JSFunction;
|
||||
class JSObject;
|
||||
|
23
deps/v8/src/handles.cc
vendored
23
deps/v8/src/handles.cc
vendored
@ -164,8 +164,11 @@ void SetExpectedNofPropertiesFromEstimate(Handle<JSFunction> func,
|
||||
|
||||
|
||||
void NormalizeProperties(Handle<JSObject> object,
|
||||
PropertyNormalizationMode mode) {
|
||||
CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties(mode));
|
||||
PropertyNormalizationMode mode,
|
||||
int expected_additional_properties) {
|
||||
CALL_HEAP_FUNCTION_VOID(object->NormalizeProperties(
|
||||
mode,
|
||||
expected_additional_properties));
|
||||
}
|
||||
|
||||
|
||||
@ -341,6 +344,14 @@ Handle<String> SubString(Handle<String> str, int start, int end) {
|
||||
Handle<Object> SetElement(Handle<JSObject> object,
|
||||
uint32_t index,
|
||||
Handle<Object> value) {
|
||||
if (object->HasPixelElements()) {
|
||||
if (!value->IsSmi() && !value->IsHeapNumber() && !value->IsUndefined()) {
|
||||
bool has_exception;
|
||||
Handle<Object> number = Execution::ToNumber(value, &has_exception);
|
||||
if (has_exception) return Handle<Object>();
|
||||
value = number;
|
||||
}
|
||||
}
|
||||
CALL_HEAP_FUNCTION(object->SetElement(index, *value), Object);
|
||||
}
|
||||
|
||||
@ -643,13 +654,17 @@ bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag) {
|
||||
|
||||
OptimizedObjectForAddingMultipleProperties::
|
||||
OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
|
||||
int expected_additional_properties,
|
||||
bool condition) {
|
||||
object_ = object;
|
||||
if (condition && object_->HasFastProperties()) {
|
||||
// Normalize the properties of object to avoid n^2 behavior
|
||||
// when extending the object multiple properties.
|
||||
// when extending the object multiple properties. Indicate the number of
|
||||
// properties to be added.
|
||||
unused_property_fields_ = object->map()->unused_property_fields();
|
||||
NormalizeProperties(object_, KEEP_INOBJECT_PROPERTIES);
|
||||
NormalizeProperties(object_,
|
||||
KEEP_INOBJECT_PROPERTIES,
|
||||
expected_additional_properties);
|
||||
has_been_transformed_ = true;
|
||||
|
||||
} else {
|
||||
|
4
deps/v8/src/handles.h
vendored
4
deps/v8/src/handles.h
vendored
@ -181,7 +181,8 @@ class HandleScope {
|
||||
// of space or encountering an internal error.
|
||||
|
||||
void NormalizeProperties(Handle<JSObject> object,
|
||||
PropertyNormalizationMode mode);
|
||||
PropertyNormalizationMode mode,
|
||||
int expected_additional_properties);
|
||||
void NormalizeElements(Handle<JSObject> object);
|
||||
void TransformToFastProperties(Handle<JSObject> object,
|
||||
int unused_property_fields);
|
||||
@ -336,6 +337,7 @@ class NoHandleAllocation BASE_EMBEDDED {
|
||||
class OptimizedObjectForAddingMultipleProperties BASE_EMBEDDED {
|
||||
public:
|
||||
OptimizedObjectForAddingMultipleProperties(Handle<JSObject> object,
|
||||
int expected_property_count,
|
||||
bool condition = true);
|
||||
~OptimizedObjectForAddingMultipleProperties();
|
||||
private:
|
||||
|
5
deps/v8/src/hashmap.cc
vendored
5
deps/v8/src/hashmap.cc
vendored
@ -194,7 +194,10 @@ HashMap::Entry* HashMap::Probe(void* key, uint32_t hash) {
|
||||
void HashMap::Initialize(uint32_t capacity) {
|
||||
ASSERT(IsPowerOf2(capacity));
|
||||
map_ = reinterpret_cast<Entry*>(allocator_->New(capacity * sizeof(Entry)));
|
||||
if (map_ == NULL) V8::FatalProcessOutOfMemory("HashMap::Initialize");
|
||||
if (map_ == NULL) {
|
||||
V8::FatalProcessOutOfMemory("HashMap::Initialize");
|
||||
return;
|
||||
}
|
||||
capacity_ = capacity;
|
||||
Clear();
|
||||
}
|
||||
|
25
deps/v8/src/heap-inl.h
vendored
25
deps/v8/src/heap-inl.h
vendored
@ -228,6 +228,31 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
|
||||
}
|
||||
|
||||
|
||||
int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
|
||||
ASSERT(HasBeenSetup());
|
||||
int amount = amount_of_external_allocated_memory_ + change_in_bytes;
|
||||
if (change_in_bytes >= 0) {
|
||||
// Avoid overflow.
|
||||
if (amount > amount_of_external_allocated_memory_) {
|
||||
amount_of_external_allocated_memory_ = amount;
|
||||
}
|
||||
int amount_since_last_global_gc =
|
||||
amount_of_external_allocated_memory_ -
|
||||
amount_of_external_allocated_memory_at_last_global_gc_;
|
||||
if (amount_since_last_global_gc > external_allocation_limit_) {
|
||||
CollectAllGarbage();
|
||||
}
|
||||
} else {
|
||||
// Avoid underflow.
|
||||
if (amount >= 0) {
|
||||
amount_of_external_allocated_memory_ = amount;
|
||||
}
|
||||
}
|
||||
ASSERT(amount_of_external_allocated_memory_ >= 0);
|
||||
return amount_of_external_allocated_memory_;
|
||||
}
|
||||
|
||||
|
||||
void Heap::SetLastScriptId(Object* last_script_id) {
|
||||
roots_[kLastScriptIdRootIndex] = last_script_id;
|
||||
}
|
||||
|
240
deps/v8/src/heap.cc
vendored
240
deps/v8/src/heap.cc
vendored
@ -69,7 +69,7 @@ int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
|
||||
|
||||
// semispace_size_ should be a power of 2 and old_generation_size_ should be
|
||||
// a multiple of Page::kPageSize.
|
||||
#if V8_TARGET_ARCH_ARM
|
||||
#if defined(ANDROID)
|
||||
int Heap::semispace_size_ = 512*KB;
|
||||
int Heap::old_generation_size_ = 128*MB;
|
||||
int Heap::initial_semispace_size_ = 128*KB;
|
||||
@ -85,8 +85,8 @@ GCCallback Heap::global_gc_epilogue_callback_ = NULL;
|
||||
// Variables set based on semispace_size_ and old_generation_size_ in
|
||||
// ConfigureHeap.
|
||||
int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
|
||||
|
||||
int Heap::survived_since_last_expansion_ = 0;
|
||||
int Heap::external_allocation_limit_ = 0;
|
||||
|
||||
Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
|
||||
|
||||
@ -207,6 +207,27 @@ void Heap::ReportStatisticsBeforeGC() {
|
||||
}
|
||||
|
||||
|
||||
#if defined(ENABLE_LOGGING_AND_PROFILING)
|
||||
void Heap::PrintShortHeapStatistics() {
|
||||
if (!FLAG_trace_gc_verbose) return;
|
||||
PrintF("Memory allocator, used: %8d, available: %8d\n",
|
||||
MemoryAllocator::Size(), MemoryAllocator::Available());
|
||||
PrintF("New space, used: %8d, available: %8d\n",
|
||||
Heap::new_space_.Size(), new_space_.Available());
|
||||
PrintF("Old pointers, used: %8d, available: %8d\n",
|
||||
old_pointer_space_->Size(), old_pointer_space_->Available());
|
||||
PrintF("Old data space, used: %8d, available: %8d\n",
|
||||
old_data_space_->Size(), old_data_space_->Available());
|
||||
PrintF("Code space, used: %8d, available: %8d\n",
|
||||
code_space_->Size(), code_space_->Available());
|
||||
PrintF("Map space, used: %8d, available: %8d\n",
|
||||
map_space_->Size(), map_space_->Available());
|
||||
PrintF("Large object space, used: %8d, avaialble: %8d\n",
|
||||
lo_space_->Size(), lo_space_->Available());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
// TODO(1238405): Combine the infrastructure for --heap-stats and
|
||||
// --log-gc to avoid the complicated preprocessor and flag testing.
|
||||
void Heap::ReportStatisticsAfterGC() {
|
||||
@ -1166,10 +1187,14 @@ bool Heap::CreateInitialMaps() {
|
||||
set_undetectable_long_ascii_string_map(Map::cast(obj));
|
||||
Map::cast(obj)->set_is_undetectable();
|
||||
|
||||
obj = AllocateMap(BYTE_ARRAY_TYPE, Array::kAlignedSize);
|
||||
obj = AllocateMap(BYTE_ARRAY_TYPE, ByteArray::kAlignedSize);
|
||||
if (obj->IsFailure()) return false;
|
||||
set_byte_array_map(Map::cast(obj));
|
||||
|
||||
obj = AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
|
||||
if (obj->IsFailure()) return false;
|
||||
set_pixel_array_map(Map::cast(obj));
|
||||
|
||||
obj = AllocateMap(CODE_TYPE, Code::kHeaderSize);
|
||||
if (obj->IsFailure()) return false;
|
||||
set_code_map(Map::cast(obj));
|
||||
@ -1386,6 +1411,12 @@ bool Heap::CreateInitialObjects() {
|
||||
if (obj->IsFailure()) return false;
|
||||
set_the_hole_value(obj);
|
||||
|
||||
obj = CreateOddball(
|
||||
oddball_map(), "no_interceptor_result_sentinel", Smi::FromInt(-2));
|
||||
if (obj->IsFailure()) return false;
|
||||
set_no_interceptor_result_sentinel(obj);
|
||||
|
||||
|
||||
// Allocate the empty string.
|
||||
obj = AllocateRawAsciiString(0, TENURED);
|
||||
if (obj->IsFailure()) return false;
|
||||
@ -1412,13 +1443,15 @@ bool Heap::CreateInitialObjects() {
|
||||
if (obj->IsFailure()) return false;
|
||||
set_prototype_accessors(Proxy::cast(obj));
|
||||
|
||||
// Allocate the code_stubs dictionary.
|
||||
obj = NumberDictionary::Allocate(4);
|
||||
// Allocate the code_stubs dictionary. The initial size is set to avoid
|
||||
// expanding the dictionary during bootstrapping.
|
||||
obj = NumberDictionary::Allocate(128);
|
||||
if (obj->IsFailure()) return false;
|
||||
set_code_stubs(NumberDictionary::cast(obj));
|
||||
|
||||
// Allocate the non_monomorphic_cache used in stub-cache.cc
|
||||
obj = NumberDictionary::Allocate(4);
|
||||
// Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
|
||||
// is set to avoid expanding the dictionary during bootstrapping.
|
||||
obj = NumberDictionary::Allocate(64);
|
||||
if (obj->IsFailure()) return false;
|
||||
set_non_monomorphic_cache(NumberDictionary::cast(obj));
|
||||
|
||||
@ -1555,8 +1588,7 @@ Object* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
|
||||
Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
|
||||
// Statically ensure that it is safe to allocate proxies in paged spaces.
|
||||
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
|
||||
AllocationSpace space =
|
||||
(pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
||||
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
||||
Object* result = Allocate(proxy_map(), space);
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
@ -1838,6 +1870,23 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
|
||||
}
|
||||
|
||||
|
||||
Object* Heap::AllocatePixelArray(int length,
|
||||
uint8_t* external_pointer,
|
||||
PretenureFlag pretenure) {
|
||||
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
|
||||
|
||||
Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
|
||||
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
|
||||
reinterpret_cast<PixelArray*>(result)->set_length(length);
|
||||
reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
Object* Heap::CreateCode(const CodeDesc& desc,
|
||||
ZoneScopeInfo* sinfo,
|
||||
Code::Flags flags,
|
||||
@ -2056,6 +2105,11 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
|
||||
// properly initialized.
|
||||
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
|
||||
|
||||
// Both types of globla objects should be allocated using
|
||||
// AllocateGloblaObject to be properly initialized.
|
||||
ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
|
||||
ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
|
||||
|
||||
// Allocate the backing storage for the properties.
|
||||
int prop_size = map->unused_property_fields() - map->inobject_properties();
|
||||
Object* properties = AllocateFixedArray(prop_size, pretenure);
|
||||
@ -2096,24 +2150,62 @@ Object* Heap::AllocateJSObject(JSFunction* constructor,
|
||||
|
||||
Object* Heap::AllocateGlobalObject(JSFunction* constructor) {
|
||||
ASSERT(constructor->has_initial_map());
|
||||
Map* map = constructor->initial_map();
|
||||
|
||||
// Make sure no field properties are described in the initial map.
|
||||
// This guarantees us that normalizing the properties does not
|
||||
// require us to change property values to JSGlobalPropertyCells.
|
||||
ASSERT(constructor->initial_map()->NextFreePropertyIndex() == 0);
|
||||
ASSERT(map->NextFreePropertyIndex() == 0);
|
||||
|
||||
// Make sure we don't have a ton of pre-allocated slots in the
|
||||
// global objects. They will be unused once we normalize the object.
|
||||
ASSERT(constructor->initial_map()->unused_property_fields() == 0);
|
||||
ASSERT(constructor->initial_map()->inobject_properties() == 0);
|
||||
ASSERT(map->unused_property_fields() == 0);
|
||||
ASSERT(map->inobject_properties() == 0);
|
||||
|
||||
// Allocate the object based on the constructors initial map.
|
||||
Object* result = AllocateJSObjectFromMap(constructor->initial_map(), TENURED);
|
||||
if (result->IsFailure()) return result;
|
||||
// Initial size of the backing store to avoid resize of the storage during
|
||||
// bootstrapping. The size differs between the JS global object ad the
|
||||
// builtins object.
|
||||
int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
|
||||
|
||||
// Normalize the result.
|
||||
JSObject* global = JSObject::cast(result);
|
||||
result = global->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
|
||||
if (result->IsFailure()) return result;
|
||||
// Allocate a dictionary object for backing storage.
|
||||
Object* obj =
|
||||
StringDictionary::Allocate(
|
||||
map->NumberOfDescribedProperties() * 2 + initial_size);
|
||||
if (obj->IsFailure()) return obj;
|
||||
StringDictionary* dictionary = StringDictionary::cast(obj);
|
||||
|
||||
// The global object might be created from an object template with accessors.
|
||||
// Fill these accessors into the dictionary.
|
||||
DescriptorArray* descs = map->instance_descriptors();
|
||||
for (int i = 0; i < descs->number_of_descriptors(); i++) {
|
||||
PropertyDetails details = descs->GetDetails(i);
|
||||
ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
|
||||
PropertyDetails d =
|
||||
PropertyDetails(details.attributes(), CALLBACKS, details.index());
|
||||
Object* value = descs->GetCallbacksObject(i);
|
||||
value = Heap::AllocateJSGlobalPropertyCell(value);
|
||||
if (value->IsFailure()) return value;
|
||||
|
||||
Object* result = dictionary->Add(descs->GetKey(i), value, d);
|
||||
if (result->IsFailure()) return result;
|
||||
dictionary = StringDictionary::cast(result);
|
||||
}
|
||||
|
||||
// Allocate the global object and initialize it with the backing store.
|
||||
obj = Allocate(map, OLD_POINTER_SPACE);
|
||||
if (obj->IsFailure()) return obj;
|
||||
JSObject* global = JSObject::cast(obj);
|
||||
InitializeJSObjectFromMap(global, dictionary, map);
|
||||
|
||||
// Create a new map for the global object.
|
||||
obj = map->CopyDropDescriptors();
|
||||
if (obj->IsFailure()) return obj;
|
||||
Map* new_map = Map::cast(obj);
|
||||
|
||||
// Setup the global object as a normalized object.
|
||||
global->set_map(new_map);
|
||||
global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
|
||||
global->set_properties(dictionary);
|
||||
|
||||
// Make sure result is a global object with properties in dictionary.
|
||||
ASSERT(global->IsGlobalObject());
|
||||
@ -2967,6 +3059,7 @@ bool Heap::ConfigureHeap(int semispace_size, int old_gen_size) {
|
||||
semispace_size_ = RoundUpToPowerOf2(semispace_size_);
|
||||
initial_semispace_size_ = Min(initial_semispace_size_, semispace_size_);
|
||||
young_generation_size_ = 2 * semispace_size_;
|
||||
external_allocation_limit_ = 10 * semispace_size_;
|
||||
|
||||
// The old generation is paged.
|
||||
old_generation_size_ = RoundUp(old_generation_size_, Page::kPageSize);
|
||||
@ -3369,6 +3462,100 @@ void HeapIterator::reset() {
|
||||
}
|
||||
|
||||
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
namespace {
|
||||
|
||||
// JSConstructorProfile is responsible for gathering and logging
|
||||
// "constructor profile" of JS object allocated on heap.
|
||||
// It is run during garbage collection cycle, thus it doesn't need
|
||||
// to use handles.
|
||||
class JSConstructorProfile BASE_EMBEDDED {
|
||||
public:
|
||||
JSConstructorProfile() : zscope_(DELETE_ON_EXIT) {}
|
||||
void CollectStats(JSObject* obj);
|
||||
void PrintStats();
|
||||
// Used by ZoneSplayTree::ForEach.
|
||||
void Call(String* name, const NumberAndSizeInfo& number_and_size);
|
||||
private:
|
||||
struct TreeConfig {
|
||||
typedef String* Key;
|
||||
typedef NumberAndSizeInfo Value;
|
||||
static const Key kNoKey;
|
||||
static const Value kNoValue;
|
||||
// Strings are unique, so it is sufficient to compare their pointers.
|
||||
static int Compare(const Key& a, const Key& b) {
|
||||
return a == b ? 0 : (a < b ? -1 : 1);
|
||||
}
|
||||
};
|
||||
|
||||
typedef ZoneSplayTree<TreeConfig> JSObjectsInfoTree;
|
||||
static int CalculateJSObjectNetworkSize(JSObject* obj);
|
||||
|
||||
ZoneScope zscope_;
|
||||
JSObjectsInfoTree js_objects_info_tree_;
|
||||
};
|
||||
|
||||
const JSConstructorProfile::TreeConfig::Key
|
||||
JSConstructorProfile::TreeConfig::kNoKey = NULL;
|
||||
const JSConstructorProfile::TreeConfig::Value
|
||||
JSConstructorProfile::TreeConfig::kNoValue;
|
||||
|
||||
|
||||
int JSConstructorProfile::CalculateJSObjectNetworkSize(JSObject* obj) {
|
||||
int size = obj->Size();
|
||||
// If 'properties' and 'elements' are non-empty (thus, non-shared),
|
||||
// take their size into account.
|
||||
if (FixedArray::cast(obj->properties())->length() != 0) {
|
||||
size += obj->properties()->Size();
|
||||
}
|
||||
if (FixedArray::cast(obj->elements())->length() != 0) {
|
||||
size += obj->elements()->Size();
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
|
||||
void JSConstructorProfile::Call(String* name,
|
||||
const NumberAndSizeInfo& number_and_size) {
|
||||
SmartPointer<char> s_name;
|
||||
if (name != NULL) {
|
||||
s_name = name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
|
||||
}
|
||||
LOG(HeapSampleJSConstructorEvent(*s_name,
|
||||
number_and_size.number(),
|
||||
number_and_size.bytes()));
|
||||
}
|
||||
|
||||
|
||||
void JSConstructorProfile::CollectStats(JSObject* obj) {
|
||||
String* constructor_func = NULL;
|
||||
if (obj->map()->constructor()->IsJSFunction()) {
|
||||
JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
|
||||
SharedFunctionInfo* sfi = constructor->shared();
|
||||
String* name = String::cast(sfi->name());
|
||||
constructor_func = name->length() > 0 ? name : sfi->inferred_name();
|
||||
} else if (obj->IsJSFunction()) {
|
||||
constructor_func = Heap::function_class_symbol();
|
||||
}
|
||||
JSObjectsInfoTree::Locator loc;
|
||||
if (!js_objects_info_tree_.Find(constructor_func, &loc)) {
|
||||
js_objects_info_tree_.Insert(constructor_func, &loc);
|
||||
}
|
||||
NumberAndSizeInfo number_and_size = loc.value();
|
||||
number_and_size.increment_number(1);
|
||||
number_and_size.increment_bytes(CalculateJSObjectNetworkSize(obj));
|
||||
loc.set_value(number_and_size);
|
||||
}
|
||||
|
||||
|
||||
void JSConstructorProfile::PrintStats() {
|
||||
js_objects_info_tree_.ForEach(this);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
#endif
|
||||
|
||||
|
||||
//
|
||||
// HeapProfiler class implementation.
|
||||
//
|
||||
@ -3385,15 +3572,22 @@ void HeapProfiler::CollectStats(HeapObject* obj, HistogramInfo* info) {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
void HeapProfiler::WriteSample() {
|
||||
LOG(HeapSampleBeginEvent("Heap", "allocated"));
|
||||
LOG(HeapSampleStats(
|
||||
"Heap", "allocated", Heap::Capacity(), Heap::SizeOfObjects()));
|
||||
|
||||
HistogramInfo info[LAST_TYPE+1];
|
||||
#define DEF_TYPE_NAME(name) info[name].set_name(#name);
|
||||
INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
|
||||
#undef DEF_TYPE_NAME
|
||||
|
||||
JSConstructorProfile js_cons_profile;
|
||||
HeapIterator iterator;
|
||||
while (iterator.has_next()) {
|
||||
CollectStats(iterator.next(), info);
|
||||
HeapObject* obj = iterator.next();
|
||||
CollectStats(obj, info);
|
||||
if (obj->IsJSObject()) {
|
||||
js_cons_profile.CollectStats(JSObject::cast(obj));
|
||||
}
|
||||
}
|
||||
|
||||
// Lump all the string types together.
|
||||
@ -3415,6 +3609,8 @@ void HeapProfiler::WriteSample() {
|
||||
}
|
||||
}
|
||||
|
||||
js_cons_profile.PrintStats();
|
||||
|
||||
LOG(HeapSampleEndEvent("Heap", "allocated"));
|
||||
}
|
||||
|
||||
@ -3620,6 +3816,10 @@ GCTracer::~GCTracer() {
|
||||
CollectorString(),
|
||||
start_size_, SizeOfHeapObjects(),
|
||||
static_cast<int>(OS::TimeCurrentMillis() - start_time_));
|
||||
|
||||
#if defined(ENABLE_LOGGING_AND_PROFILING)
|
||||
Heap::PrintShortHeapStatistics();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
40
deps/v8/src/heap.h
vendored
40
deps/v8/src/heap.h
vendored
@ -94,6 +94,7 @@ namespace internal {
|
||||
UndetectableMediumAsciiStringMap) \
|
||||
V(Map, undetectable_long_ascii_string_map, UndetectableLongAsciiStringMap) \
|
||||
V(Map, byte_array_map, ByteArrayMap) \
|
||||
V(Map, pixel_array_map, PixelArrayMap) \
|
||||
V(Map, fixed_array_map, FixedArrayMap) \
|
||||
V(Map, hash_table_map, HashTableMap) \
|
||||
V(Map, context_map, ContextMap) \
|
||||
@ -109,6 +110,7 @@ namespace internal {
|
||||
V(Map, two_pointer_filler_map, TwoPointerFillerMap) \
|
||||
V(Object, nan_value, NanValue) \
|
||||
V(Object, undefined_value, UndefinedValue) \
|
||||
V(Object, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
|
||||
V(Object, minus_zero_value, MinusZeroValue) \
|
||||
V(Object, null_value, NullValue) \
|
||||
V(Object, true_value, TrueValue) \
|
||||
@ -418,6 +420,14 @@ class Heap : public AllStatic {
|
||||
// Please note this does not perform a garbage collection.
|
||||
static Object* AllocateByteArray(int length);
|
||||
|
||||
// Allocate a pixel array of the specified length
|
||||
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
|
||||
// failed.
|
||||
// Please note this does not perform a garbage collection.
|
||||
static Object* AllocatePixelArray(int length,
|
||||
uint8_t* external_pointer,
|
||||
PretenureFlag pretenure);
|
||||
|
||||
// Allocate a tenured JS global property cell.
|
||||
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
|
||||
// failed.
|
||||
@ -733,6 +743,11 @@ class Heap : public AllStatic {
|
||||
static void ZapFromSpace();
|
||||
#endif
|
||||
|
||||
#if defined(ENABLE_LOGGING_AND_PROFILING)
|
||||
// Print short heap statistics.
|
||||
static void PrintShortHeapStatistics();
|
||||
#endif
|
||||
|
||||
// Makes a new symbol object
|
||||
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
|
||||
// failed.
|
||||
@ -741,7 +756,7 @@ class Heap : public AllStatic {
|
||||
static Object* CreateSymbol(String* str);
|
||||
|
||||
// Write barrier support for address[offset] = o.
|
||||
inline static void RecordWrite(Address address, int offset);
|
||||
static inline void RecordWrite(Address address, int offset);
|
||||
|
||||
// Given an address occupied by a live code object, return that object.
|
||||
static Object* FindCodeObject(Address a);
|
||||
@ -797,22 +812,7 @@ class Heap : public AllStatic {
|
||||
|
||||
// Adjusts the amount of registered external memory.
|
||||
// Returns the adjusted value.
|
||||
static int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
|
||||
int amount = amount_of_external_allocated_memory_ + change_in_bytes;
|
||||
if (change_in_bytes >= 0) {
|
||||
// Avoid overflow.
|
||||
if (amount > amount_of_external_allocated_memory_) {
|
||||
amount_of_external_allocated_memory_ = amount;
|
||||
}
|
||||
} else {
|
||||
// Avoid underflow.
|
||||
if (amount >= 0) {
|
||||
amount_of_external_allocated_memory_ = amount;
|
||||
}
|
||||
}
|
||||
ASSERT(amount_of_external_allocated_memory_ >= 0);
|
||||
return amount_of_external_allocated_memory_;
|
||||
}
|
||||
static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
|
||||
|
||||
// Allocate unitialized fixed array (pretenure == NON_TENURE).
|
||||
static Object* AllocateRawFixedArray(int length);
|
||||
@ -896,6 +896,10 @@ class Heap : public AllStatic {
|
||||
// every allocation in large object space.
|
||||
static int old_gen_allocation_limit_;
|
||||
|
||||
// Limit on the amount of externally allocated memory allowed
|
||||
// between global GCs. If reached a global GC is forced.
|
||||
static int external_allocation_limit_;
|
||||
|
||||
// The amount of external memory registered through the API kept alive
|
||||
// by global handles
|
||||
static int amount_of_external_allocated_memory_;
|
||||
@ -1225,7 +1229,7 @@ class KeyedLookupCache {
|
||||
// Clear the cache.
|
||||
static void Clear();
|
||||
private:
|
||||
inline static int Hash(Map* map, String* name);
|
||||
static inline int Hash(Map* map, String* name);
|
||||
static const int kLength = 64;
|
||||
struct Key {
|
||||
Map* map;
|
||||
|
22
deps/v8/src/ia32/codegen-ia32.cc
vendored
22
deps/v8/src/ia32/codegen-ia32.cc
vendored
@ -3857,7 +3857,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
|
||||
s = s->outer_scope();
|
||||
}
|
||||
|
||||
if (s->is_eval_scope()) {
|
||||
if (s != NULL && s->is_eval_scope()) {
|
||||
// Loop up the context chain. There is no frame effect so it is
|
||||
// safe to use raw labels here.
|
||||
Label next, fast;
|
||||
@ -4351,7 +4351,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
|
||||
FieldOperand(elements.reg(), JSObject::kElementsOffset));
|
||||
|
||||
// Write to the indexed properties array.
|
||||
int offset = i * kPointerSize + Array::kHeaderSize;
|
||||
int offset = i * kPointerSize + FixedArray::kHeaderSize;
|
||||
__ mov(FieldOperand(elements.reg(), offset), prop_value.reg());
|
||||
|
||||
// Update the write barrier for the array address.
|
||||
@ -5388,12 +5388,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
} else {
|
||||
Load(node->expression());
|
||||
switch (op) {
|
||||
case Token::NOT:
|
||||
case Token::DELETE:
|
||||
case Token::TYPEOF:
|
||||
UNREACHABLE(); // handled above
|
||||
break;
|
||||
|
||||
case Token::SUB: {
|
||||
bool overwrite =
|
||||
(node->AsBinaryOperation() != NULL &&
|
||||
@ -5448,6 +5442,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
}
|
||||
|
||||
default:
|
||||
// NOT, DELETE, TYPEOF, and VOID are handled outside the
|
||||
// switch.
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
@ -6301,15 +6297,15 @@ void Reference::GetValue(TypeofState typeof_state) {
|
||||
__ mov(elements.reg(),
|
||||
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
|
||||
__ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
|
||||
Immediate(Factory::hash_table_map()));
|
||||
deferred->Branch(equal);
|
||||
Immediate(Factory::fixed_array_map()));
|
||||
deferred->Branch(not_equal);
|
||||
|
||||
// Shift the key to get the actual index value and check that
|
||||
// it is within bounds.
|
||||
__ mov(index.reg(), key.reg());
|
||||
__ sar(index.reg(), kSmiTagSize);
|
||||
__ cmp(index.reg(),
|
||||
FieldOperand(elements.reg(), Array::kLengthOffset));
|
||||
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
|
||||
deferred->Branch(above_equal);
|
||||
|
||||
// Load and check that the result is not the hole. We could
|
||||
@ -6323,7 +6319,7 @@ void Reference::GetValue(TypeofState typeof_state) {
|
||||
__ mov(value.reg(), Operand(elements.reg(),
|
||||
index.reg(),
|
||||
times_4,
|
||||
Array::kHeaderSize - kHeapObjectTag));
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
elements.Unuse();
|
||||
index.Unuse();
|
||||
__ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
|
||||
@ -6495,7 +6491,7 @@ void Reference::SetValue(InitState init_state) {
|
||||
__ mov(Operand(tmp.reg(),
|
||||
key.reg(),
|
||||
times_2,
|
||||
Array::kHeaderSize - kHeapObjectTag),
|
||||
FixedArray::kHeaderSize - kHeapObjectTag),
|
||||
value.reg());
|
||||
__ IncrementCounter(&Counters::keyed_store_inline, 1);
|
||||
|
||||
|
4
deps/v8/src/ia32/codegen-ia32.h
vendored
4
deps/v8/src/ia32/codegen-ia32.h
vendored
@ -359,7 +359,7 @@ class CodeGenerator: public AstVisitor {
|
||||
|
||||
#define DEF_VISIT(type) \
|
||||
void Visit##type(type* node);
|
||||
NODE_LIST(DEF_VISIT)
|
||||
AST_NODE_LIST(DEF_VISIT)
|
||||
#undef DEF_VISIT
|
||||
|
||||
// Visit a statement and then spill the virtual frame if control flow can
|
||||
@ -558,7 +558,7 @@ class CodeGenerator: public AstVisitor {
|
||||
// information.
|
||||
void CodeForFunctionPosition(FunctionLiteral* fun);
|
||||
void CodeForReturnPosition(FunctionLiteral* fun);
|
||||
void CodeForStatementPosition(Node* node);
|
||||
void CodeForStatementPosition(AstNode* node);
|
||||
void CodeForSourcePosition(int pos);
|
||||
|
||||
#ifdef DEBUG
|
||||
|
156
deps/v8/src/ia32/ic-ia32.cc
vendored
156
deps/v8/src/ia32/ic-ia32.cc
vendored
@ -43,6 +43,10 @@ namespace internal {
|
||||
|
||||
|
||||
// Helper function used to load a property from a dictionary backing storage.
|
||||
// This function may return false negatives, so miss_label
|
||||
// must always call a backup property load that is complete.
|
||||
// This function is safe to call if the receiver has fast properties,
|
||||
// or if name is not a symbol, and will jump to the miss_label in that case.
|
||||
static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
|
||||
Register r0, Register r1, Register r2,
|
||||
Register name) {
|
||||
@ -56,7 +60,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
|
||||
//
|
||||
// r2 - used to hold the capacity of the property dictionary.
|
||||
//
|
||||
// name - holds the name of the property and is unchanges.
|
||||
// name - holds the name of the property and is unchanged.
|
||||
|
||||
Label done;
|
||||
|
||||
@ -89,7 +93,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
|
||||
|
||||
// Compute the capacity mask.
|
||||
const int kCapacityOffset =
|
||||
Array::kHeaderSize + StringDictionary::kCapacityIndex * kPointerSize;
|
||||
StringDictionary::kHeaderSize +
|
||||
StringDictionary::kCapacityIndex * kPointerSize;
|
||||
__ mov(r2, FieldOperand(r0, kCapacityOffset));
|
||||
__ shr(r2, kSmiTagSize); // convert smi to int
|
||||
__ dec(r2);
|
||||
@ -99,7 +104,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
|
||||
// cover ~93% of loads from dictionaries.
|
||||
static const int kProbes = 4;
|
||||
const int kElementsStartOffset =
|
||||
Array::kHeaderSize + StringDictionary::kElementsStartIndex * kPointerSize;
|
||||
StringDictionary::kHeaderSize +
|
||||
StringDictionary::kElementsStartIndex * kPointerSize;
|
||||
for (int i = 0; i < kProbes; i++) {
|
||||
// Compute the masked index: (hash + i + i * i) & mask.
|
||||
__ mov(r1, FieldOperand(name, String::kLengthOffset));
|
||||
@ -153,6 +159,9 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
|
||||
}
|
||||
|
||||
|
||||
// The offset from the inlined patch site to the start of the
|
||||
// inlined load instruction. It is 7 bytes (test eax, imm) plus
|
||||
// 6 bytes (jne slow_label).
|
||||
const int LoadIC::kOffsetToLoadInstruction = 13;
|
||||
|
||||
|
||||
@ -225,11 +234,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// -- esp[4] : name
|
||||
// -- esp[8] : receiver
|
||||
// -----------------------------------
|
||||
Label slow, fast, check_string, index_int, index_string;
|
||||
Label slow, check_string, index_int, index_string, check_pixel_array;
|
||||
|
||||
// Load name and receiver.
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(ecx, (Operand(esp, 2 * kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
__ mov(ecx, Operand(esp, 2 * kPointerSize));
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ test(ecx, Immediate(kSmiTagMask));
|
||||
@ -260,24 +269,56 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
|
||||
Immediate(Factory::hash_table_map()));
|
||||
__ j(equal, &slow, not_taken);
|
||||
Immediate(Factory::fixed_array_map()));
|
||||
__ j(not_equal, &check_pixel_array);
|
||||
// Check that the key (index) is within bounds.
|
||||
__ cmp(eax, FieldOperand(ecx, Array::kLengthOffset));
|
||||
__ j(below, &fast, taken);
|
||||
__ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
// Fast case: Do the load.
|
||||
__ mov(eax,
|
||||
Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
|
||||
// In case the loaded value is the_hole we have to consult GetProperty
|
||||
// to ensure the prototype chain is searched.
|
||||
__ j(equal, &slow);
|
||||
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
|
||||
__ ret(0);
|
||||
|
||||
// Check whether the elements is a pixel array.
|
||||
// eax: untagged index
|
||||
// ecx: elements array
|
||||
__ bind(&check_pixel_array);
|
||||
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
|
||||
Immediate(Factory::pixel_array_map()));
|
||||
__ j(not_equal, &slow);
|
||||
__ cmp(eax, FieldOperand(ecx, PixelArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
__ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
|
||||
__ movzx_b(eax, Operand(ecx, eax, times_1, 0));
|
||||
__ shl(eax, kSmiTagSize);
|
||||
__ ret(0);
|
||||
|
||||
|
||||
// Slow case: Load name and receiver from stack and jump to runtime.
|
||||
__ bind(&slow);
|
||||
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
|
||||
KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
|
||||
// Check if the key is a symbol that is not an array index.
|
||||
|
||||
__ bind(&check_string);
|
||||
// The key is not a smi.
|
||||
// Is it a string?
|
||||
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
|
||||
__ j(above_equal, &slow);
|
||||
// Is the string an array index, with cached numeric value?
|
||||
__ mov(ebx, FieldOperand(eax, String::kLengthOffset));
|
||||
__ test(ebx, Immediate(String::kIsArrayIndexMask));
|
||||
__ j(not_zero, &index_string, not_taken);
|
||||
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
|
||||
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
|
||||
|
||||
// If the string is a symbol, do a quick inline probe of the receiver's
|
||||
// dictionary, if it exists.
|
||||
__ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
|
||||
__ test(ebx, Immediate(kIsSymbolMask));
|
||||
__ j(not_zero, &slow, not_taken);
|
||||
__ j(zero, &slow, not_taken);
|
||||
// Probe the dictionary leaving result in ecx.
|
||||
GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
|
||||
GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
|
||||
@ -299,15 +340,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ and_(eax, (1 << String::kShortLengthShift) - 1);
|
||||
__ shr(eax, String::kLongLengthShift);
|
||||
__ jmp(&index_int);
|
||||
// Fast case: Do the load.
|
||||
__ bind(&fast);
|
||||
__ mov(eax, Operand(ecx, eax, times_4, Array::kHeaderSize - kHeapObjectTag));
|
||||
__ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
|
||||
// In case the loaded value is the_hole we have to consult GetProperty
|
||||
// to ensure the prototype chain is searched.
|
||||
__ j(equal, &slow, not_taken);
|
||||
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
|
||||
@ -318,7 +350,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// -- esp[4] : key
|
||||
// -- esp[8] : receiver
|
||||
// -----------------------------------
|
||||
Label slow, fast, array, extra;
|
||||
Label slow, fast, array, extra, check_pixel_array;
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ mov(edx, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, key
|
||||
@ -353,8 +385,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
|
||||
Immediate(Factory::hash_table_map()));
|
||||
__ j(equal, &slow, not_taken);
|
||||
Immediate(Factory::fixed_array_map()));
|
||||
__ j(not_equal, &check_pixel_array, not_taken);
|
||||
// Untag the key (for checking against untagged length in the fixed array).
|
||||
__ mov(edx, Operand(ebx));
|
||||
__ sar(edx, kSmiTagSize); // untag the index and use it for the comparison
|
||||
@ -364,7 +396,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// ebx: index (as a smi)
|
||||
__ j(below, &fast, taken);
|
||||
|
||||
|
||||
// Slow case: Push extra copies of the arguments (3).
|
||||
__ bind(&slow);
|
||||
__ pop(ecx);
|
||||
@ -375,6 +406,37 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
|
||||
|
||||
// Check whether the elements is a pixel array.
|
||||
// eax: value
|
||||
// ecx: elements array
|
||||
// ebx: index (as a smi)
|
||||
__ bind(&check_pixel_array);
|
||||
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
|
||||
Immediate(Factory::pixel_array_map()));
|
||||
__ j(not_equal, &slow);
|
||||
// Check that the value is a smi. If a conversion is needed call into the
|
||||
// runtime to convert and clamp.
|
||||
__ test(eax, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &slow);
|
||||
__ sar(ebx, kSmiTagSize); // Untag the index.
|
||||
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
__ sar(eax, kSmiTagSize); // Untag the value.
|
||||
{ // Clamp the value to [0..255].
|
||||
Label done, check_255;
|
||||
__ cmp(eax, 0);
|
||||
__ j(greater_equal, &check_255);
|
||||
__ mov(eax, Immediate(0));
|
||||
__ jmp(&done);
|
||||
__ bind(&check_255);
|
||||
__ cmp(eax, 255);
|
||||
__ j(less_equal, &done);
|
||||
__ mov(eax, Immediate(255));
|
||||
__ bind(&done);
|
||||
}
|
||||
__ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
|
||||
__ mov_b(Operand(ecx, ebx, times_1, 0), eax);
|
||||
__ ret(0);
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
@ -405,21 +467,21 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// ebx: index (as a smi)
|
||||
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
|
||||
Immediate(Factory::hash_table_map()));
|
||||
__ j(equal, &slow, not_taken);
|
||||
Immediate(Factory::fixed_array_map()));
|
||||
__ j(not_equal, &check_pixel_array);
|
||||
|
||||
// Check the key against the length in the array, compute the
|
||||
// address to store into and fall through to fast case.
|
||||
__ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset));
|
||||
__ j(above_equal, &extra, not_taken);
|
||||
|
||||
|
||||
// Fast case: Do the store.
|
||||
__ bind(&fast);
|
||||
// eax: value
|
||||
// ecx: FixedArray
|
||||
// ebx: index (as a smi)
|
||||
__ mov(Operand(ecx, ebx, times_2, Array::kHeaderSize - kHeapObjectTag), eax);
|
||||
__ mov(Operand(ecx, ebx, times_2, FixedArray::kHeaderSize - kHeapObjectTag),
|
||||
eax);
|
||||
// Update write barrier for the elements array address.
|
||||
__ mov(edx, Operand(eax));
|
||||
__ RecordWrite(ecx, 0, edx, ebx);
|
||||
@ -731,12 +793,10 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
|
||||
// -----------------------------------
|
||||
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
|
||||
// Move the return address below the arguments.
|
||||
__ pop(ebx);
|
||||
__ push(eax);
|
||||
__ push(ecx);
|
||||
__ push(ebx);
|
||||
__ push(eax); // receiver
|
||||
__ push(ecx); // name
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
__ TailCallRuntime(f, 2);
|
||||
@ -779,7 +839,8 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
|
||||
|
||||
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
|
||||
// The address of the instruction following the call.
|
||||
Address test_instruction_address = address + 4;
|
||||
Address test_instruction_address =
|
||||
address + Assembler::kTargetAddrToReturnAddrDist;
|
||||
// If the instruction following the call is not a test eax, nothing
|
||||
// was inlined.
|
||||
if (*test_instruction_address != kTestEaxByte) return false;
|
||||
@ -805,7 +866,8 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
|
||||
|
||||
|
||||
static bool PatchInlinedMapCheck(Address address, Object* map) {
|
||||
Address test_instruction_address = address + 4; // 4 = stub address
|
||||
Address test_instruction_address =
|
||||
address + Assembler::kTargetAddrToReturnAddrDist;
|
||||
// The keyed load has a fast inlined case if the IC call instruction
|
||||
// is immediately followed by a test instruction.
|
||||
if (*test_instruction_address != kTestEaxByte) return false;
|
||||
@ -859,12 +921,10 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
|
||||
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
__ mov(ecx, Operand(esp, 2 * kPointerSize));
|
||||
|
||||
// Move the return address below the arguments.
|
||||
__ pop(ebx);
|
||||
__ push(ecx);
|
||||
__ push(eax);
|
||||
__ push(ebx);
|
||||
__ push(ecx); // receiver
|
||||
__ push(eax); // name
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
__ TailCallRuntime(f, 2);
|
||||
@ -899,12 +959,12 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
|
||||
// -- esp[4] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
// Move the return address below the arguments.
|
||||
__ pop(ebx);
|
||||
__ push(Operand(esp, 0));
|
||||
__ push(ecx);
|
||||
__ push(eax);
|
||||
__ push(ebx);
|
||||
__ push(Operand(esp, 0)); // receiver
|
||||
__ push(ecx); // transition map
|
||||
__ push(eax); // value
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
__ TailCallRuntime(
|
||||
ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
|
||||
|
9
deps/v8/src/ia32/macro-assembler-ia32.cc
vendored
9
deps/v8/src/ia32/macro-assembler-ia32.cc
vendored
@ -79,7 +79,7 @@ static void RecordWriteHelper(MacroAssembler* masm,
|
||||
// Add the page header, array header, and array body size to the page
|
||||
// address.
|
||||
masm->add(Operand(object), Immediate(Page::kObjectStartOffset
|
||||
+ Array::kHeaderSize));
|
||||
+ FixedArray::kHeaderSize));
|
||||
masm->add(object, Operand(scratch));
|
||||
|
||||
|
||||
@ -199,9 +199,10 @@ void MacroAssembler::RecordWrite(Register object, int offset,
|
||||
lea(dst, Operand(object, offset));
|
||||
} else {
|
||||
// array access: calculate the destination address in the same manner as
|
||||
// KeyedStoreIC::GenerateGeneric
|
||||
lea(dst,
|
||||
Operand(object, dst, times_2, Array::kHeaderSize - kHeapObjectTag));
|
||||
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
|
||||
// into an array of words.
|
||||
lea(dst, Operand(object, dst, times_2,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
}
|
||||
// If we are already generating a shared stub, not inlining the
|
||||
// record write code isn't going to save us any memory.
|
||||
|
25
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
vendored
25
deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
vendored
@ -634,11 +634,9 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
|
||||
__ push(Immediate(0)); // Make room for "input start - 1" constant.
|
||||
|
||||
// Check if we have space on the stack for registers.
|
||||
Label retry_stack_check;
|
||||
Label stack_limit_hit;
|
||||
Label stack_ok;
|
||||
|
||||
__ bind(&retry_stack_check);
|
||||
ExternalReference stack_guard_limit =
|
||||
ExternalReference::address_of_stack_guard_limit();
|
||||
__ mov(ecx, esp);
|
||||
@ -658,10 +656,7 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
|
||||
CallCheckStackGuardState(ebx);
|
||||
__ or_(eax, Operand(eax));
|
||||
// If returned value is non-zero, we exit with the returned value as result.
|
||||
// Otherwise it was a preemption and we just check the limit again.
|
||||
__ j(equal, &retry_stack_check);
|
||||
// Return value was non-zero. Exit with exception or retry.
|
||||
__ jmp(&exit_label_);
|
||||
__ j(not_zero, &exit_label_);
|
||||
|
||||
__ bind(&stack_ok);
|
||||
|
||||
@ -762,19 +757,11 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
|
||||
__ push(backtrack_stackpointer());
|
||||
__ push(edi);
|
||||
|
||||
Label retry;
|
||||
|
||||
__ bind(&retry);
|
||||
CallCheckStackGuardState(ebx);
|
||||
__ or_(eax, Operand(eax));
|
||||
// If returning non-zero, we should end execution with the given
|
||||
// result as return value.
|
||||
__ j(not_zero, &exit_label_);
|
||||
// Check if we are still preempted.
|
||||
ExternalReference stack_guard_limit =
|
||||
ExternalReference::address_of_stack_guard_limit();
|
||||
__ cmp(esp, Operand::StaticVariable(stack_guard_limit));
|
||||
__ j(below_equal, &retry);
|
||||
|
||||
__ pop(edi);
|
||||
__ pop(backtrack_stackpointer());
|
||||
@ -1073,10 +1060,12 @@ int RegExpMacroAssemblerIA32::CaseInsensitiveCompareUC16(Address byte_offset1,
|
||||
unibrow::uchar c1 = substring1[i];
|
||||
unibrow::uchar c2 = substring2[i];
|
||||
if (c1 != c2) {
|
||||
canonicalize.get(c1, '\0', &c1);
|
||||
if (c1 != c2) {
|
||||
canonicalize.get(c2, '\0', &c2);
|
||||
if (c1 != c2) {
|
||||
unibrow::uchar s1[1] = { c1 };
|
||||
canonicalize.get(c1, '\0', s1);
|
||||
if (s1[0] != c2) {
|
||||
unibrow::uchar s2[1] = { c2 };
|
||||
canonicalize.get(c2, '\0', s2);
|
||||
if (s1[0] != s2[0]) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ bool RegisterAllocator::IsReserved(Register reg) {
|
||||
|
||||
int RegisterAllocator::ToNumber(Register reg) {
|
||||
ASSERT(reg.is_valid() && !IsReserved(reg));
|
||||
static int numbers[] = {
|
||||
const int kNumbers[] = {
|
||||
0, // eax
|
||||
2, // ecx
|
||||
3, // edx
|
||||
@ -59,14 +59,14 @@ int RegisterAllocator::ToNumber(Register reg) {
|
||||
-1, // esi
|
||||
4 // edi
|
||||
};
|
||||
return numbers[reg.code()];
|
||||
return kNumbers[reg.code()];
|
||||
}
|
||||
|
||||
|
||||
Register RegisterAllocator::ToRegister(int num) {
|
||||
ASSERT(num >= 0 && num < kNumRegisters);
|
||||
static Register registers[] = { eax, ebx, ecx, edx, edi };
|
||||
return registers[num];
|
||||
const Register kRegisters[] = { eax, ebx, ecx, edx, edi };
|
||||
return kRegisters[num];
|
||||
}
|
||||
|
||||
|
||||
|
500
deps/v8/src/ia32/stub-cache-ia32.cc
vendored
500
deps/v8/src/ia32/stub-cache-ia32.cc
vendored
@ -152,6 +152,22 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
template <typename Pushable>
|
||||
static void PushInterceptorArguments(MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register holder,
|
||||
Pushable name,
|
||||
JSObject* holder_obj) {
|
||||
__ push(receiver);
|
||||
__ push(holder);
|
||||
__ push(name);
|
||||
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
|
||||
__ mov(receiver, Immediate(Handle<Object>(interceptor)));
|
||||
__ push(receiver);
|
||||
__ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
|
||||
}
|
||||
|
||||
|
||||
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
|
||||
int index,
|
||||
Register prototype) {
|
||||
@ -266,13 +282,327 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
|
||||
__ mov(dst, FieldOperand(src, offset));
|
||||
} else {
|
||||
// Calculate the offset into the properties array.
|
||||
int offset = index * kPointerSize + Array::kHeaderSize;
|
||||
int offset = index * kPointerSize + FixedArray::kHeaderSize;
|
||||
__ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
|
||||
__ mov(dst, FieldOperand(dst, offset));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <class Pushable>
|
||||
static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register holder,
|
||||
Pushable name,
|
||||
JSObject* holder_obj) {
|
||||
PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
|
||||
|
||||
ExternalReference ref =
|
||||
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
|
||||
__ mov(eax, Immediate(5));
|
||||
__ mov(ebx, Immediate(ref));
|
||||
|
||||
CEntryStub stub;
|
||||
__ CallStub(&stub);
|
||||
}
|
||||
|
||||
|
||||
template <class Compiler>
|
||||
static void CompileLoadInterceptor(Compiler* compiler,
|
||||
StubCompiler* stub_compiler,
|
||||
MacroAssembler* masm,
|
||||
JSObject* object,
|
||||
JSObject* holder,
|
||||
String* name,
|
||||
LookupResult* lookup,
|
||||
Register receiver,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* miss) {
|
||||
ASSERT(holder->HasNamedInterceptor());
|
||||
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ test(receiver, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss, not_taken);
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
Register reg =
|
||||
stub_compiler->CheckPrototypes(object, receiver, holder,
|
||||
scratch1, scratch2, name, miss);
|
||||
|
||||
if (lookup->IsValid() && lookup->IsCacheable()) {
|
||||
compiler->CompileCacheable(masm,
|
||||
stub_compiler,
|
||||
receiver,
|
||||
reg,
|
||||
scratch1,
|
||||
scratch2,
|
||||
holder,
|
||||
lookup,
|
||||
name,
|
||||
miss);
|
||||
} else {
|
||||
compiler->CompileRegular(masm,
|
||||
receiver,
|
||||
reg,
|
||||
scratch2,
|
||||
holder,
|
||||
miss);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void LookupPostInterceptor(JSObject* holder,
|
||||
String* name,
|
||||
LookupResult* lookup) {
|
||||
holder->LocalLookupRealNamedProperty(name, lookup);
|
||||
if (lookup->IsNotFound()) {
|
||||
Object* proto = holder->GetPrototype();
|
||||
if (proto != Heap::null_value()) {
|
||||
proto->Lookup(name, lookup);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class LoadInterceptorCompiler BASE_EMBEDDED {
|
||||
public:
|
||||
explicit LoadInterceptorCompiler(Register name) : name_(name) {}
|
||||
|
||||
void CompileCacheable(MacroAssembler* masm,
|
||||
StubCompiler* stub_compiler,
|
||||
Register receiver,
|
||||
Register holder,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
JSObject* holder_obj,
|
||||
LookupResult* lookup,
|
||||
String* name,
|
||||
Label* miss_label) {
|
||||
AccessorInfo* callback = 0;
|
||||
bool optimize = false;
|
||||
// So far the most popular follow ups for interceptor loads are FIELD
|
||||
// and CALLBACKS, so inline only them, other cases may be added
|
||||
// later.
|
||||
if (lookup->type() == FIELD) {
|
||||
optimize = true;
|
||||
} else if (lookup->type() == CALLBACKS) {
|
||||
Object* callback_object = lookup->GetCallbackObject();
|
||||
if (callback_object->IsAccessorInfo()) {
|
||||
callback = AccessorInfo::cast(callback_object);
|
||||
optimize = callback->getter() != NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!optimize) {
|
||||
CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
|
||||
return;
|
||||
}
|
||||
|
||||
// Note: starting a frame here makes GC aware of pointers pushed below.
|
||||
__ EnterInternalFrame();
|
||||
|
||||
if (lookup->type() == CALLBACKS) {
|
||||
__ push(receiver);
|
||||
}
|
||||
__ push(holder);
|
||||
__ push(name_);
|
||||
|
||||
CompileCallLoadPropertyWithInterceptor(masm,
|
||||
receiver,
|
||||
holder,
|
||||
name_,
|
||||
holder_obj);
|
||||
|
||||
Label interceptor_failed;
|
||||
__ cmp(eax, Factory::no_interceptor_result_sentinel());
|
||||
__ j(equal, &interceptor_failed);
|
||||
__ LeaveInternalFrame();
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&interceptor_failed);
|
||||
__ pop(name_);
|
||||
__ pop(holder);
|
||||
if (lookup->type() == CALLBACKS) {
|
||||
__ pop(receiver);
|
||||
}
|
||||
|
||||
__ LeaveInternalFrame();
|
||||
|
||||
if (lookup->type() == FIELD) {
|
||||
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
|
||||
lookup->holder(), scratch1,
|
||||
scratch2,
|
||||
name,
|
||||
miss_label);
|
||||
stub_compiler->GenerateFastPropertyLoad(masm, eax,
|
||||
holder, lookup->holder(),
|
||||
lookup->GetFieldIndex());
|
||||
__ ret(0);
|
||||
} else {
|
||||
ASSERT(lookup->type() == CALLBACKS);
|
||||
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
|
||||
ASSERT(callback != NULL);
|
||||
ASSERT(callback->getter() != NULL);
|
||||
|
||||
Label cleanup;
|
||||
__ pop(scratch2);
|
||||
__ push(receiver);
|
||||
__ push(scratch2);
|
||||
|
||||
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
|
||||
lookup->holder(), scratch1,
|
||||
scratch2,
|
||||
name,
|
||||
&cleanup);
|
||||
|
||||
__ pop(scratch2); // save old return address
|
||||
__ push(holder);
|
||||
__ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
|
||||
__ push(holder);
|
||||
__ push(FieldOperand(holder, AccessorInfo::kDataOffset));
|
||||
__ push(name_);
|
||||
__ push(scratch2); // restore old return address
|
||||
|
||||
ExternalReference ref =
|
||||
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
|
||||
__ TailCallRuntime(ref, 5);
|
||||
|
||||
__ bind(&cleanup);
|
||||
__ pop(scratch1);
|
||||
__ pop(scratch2);
|
||||
__ push(scratch1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CompileRegular(MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register holder,
|
||||
Register scratch,
|
||||
JSObject* holder_obj,
|
||||
Label* miss_label) {
|
||||
__ pop(scratch); // save old return address
|
||||
PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
|
||||
__ push(scratch); // restore old return address
|
||||
|
||||
ExternalReference ref = ExternalReference(
|
||||
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
|
||||
__ TailCallRuntime(ref, 5);
|
||||
}
|
||||
|
||||
private:
|
||||
Register name_;
|
||||
};
|
||||
|
||||
|
||||
class CallInterceptorCompiler BASE_EMBEDDED {
|
||||
public:
|
||||
explicit CallInterceptorCompiler(const ParameterCount& arguments)
|
||||
: arguments_(arguments), argc_(arguments.immediate()) {}
|
||||
|
||||
void CompileCacheable(MacroAssembler* masm,
|
||||
StubCompiler* stub_compiler,
|
||||
Register receiver,
|
||||
Register holder,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
JSObject* holder_obj,
|
||||
LookupResult* lookup,
|
||||
String* name,
|
||||
Label* miss_label) {
|
||||
JSFunction* function = 0;
|
||||
bool optimize = false;
|
||||
// So far the most popular case for failed interceptor is
|
||||
// CONSTANT_FUNCTION sitting below.
|
||||
if (lookup->type() == CONSTANT_FUNCTION) {
|
||||
function = lookup->GetConstantFunction();
|
||||
// JSArray holder is a special case for call constant function
|
||||
// (see the corresponding code).
|
||||
if (function->is_compiled() && !holder_obj->IsJSArray()) {
|
||||
optimize = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!optimize) {
|
||||
CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
|
||||
return;
|
||||
}
|
||||
|
||||
__ EnterInternalFrame();
|
||||
__ push(holder); // save the holder
|
||||
|
||||
CompileCallLoadPropertyWithInterceptor(
|
||||
masm,
|
||||
receiver,
|
||||
holder,
|
||||
// Under EnterInternalFrame this refers to name.
|
||||
Operand(ebp, (argc_ + 3) * kPointerSize),
|
||||
holder_obj);
|
||||
|
||||
__ pop(receiver); // restore holder
|
||||
__ LeaveInternalFrame();
|
||||
|
||||
__ cmp(eax, Factory::no_interceptor_result_sentinel());
|
||||
Label invoke;
|
||||
__ j(not_equal, &invoke);
|
||||
|
||||
stub_compiler->CheckPrototypes(holder_obj, receiver,
|
||||
lookup->holder(), scratch1,
|
||||
scratch2,
|
||||
name,
|
||||
miss_label);
|
||||
if (lookup->holder()->IsGlobalObject()) {
|
||||
__ mov(edx, Operand(esp, (argc_ + 1) * kPointerSize));
|
||||
__ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
|
||||
__ mov(Operand(esp, (argc_ + 1) * kPointerSize), edx);
|
||||
}
|
||||
|
||||
ASSERT(function->is_compiled());
|
||||
// Get the function and setup the context.
|
||||
__ mov(edi, Immediate(Handle<JSFunction>(function)));
|
||||
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
||||
|
||||
// Jump to the cached code (tail call).
|
||||
ASSERT(function->is_compiled());
|
||||
Handle<Code> code(function->code());
|
||||
ParameterCount expected(function->shared()->formal_parameter_count());
|
||||
__ InvokeCode(code, expected, arguments_,
|
||||
RelocInfo::CODE_TARGET, JUMP_FUNCTION);
|
||||
|
||||
__ bind(&invoke);
|
||||
}
|
||||
|
||||
void CompileRegular(MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register holder,
|
||||
Register scratch,
|
||||
JSObject* holder_obj,
|
||||
Label* miss_label) {
|
||||
__ EnterInternalFrame();
|
||||
|
||||
PushInterceptorArguments(masm,
|
||||
receiver,
|
||||
holder,
|
||||
Operand(ebp, (argc_ + 3) * kPointerSize),
|
||||
holder_obj);
|
||||
|
||||
ExternalReference ref = ExternalReference(
|
||||
IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
|
||||
__ mov(eax, Immediate(5));
|
||||
__ mov(ebx, Immediate(ref));
|
||||
|
||||
CEntryStub stub;
|
||||
__ CallStub(&stub);
|
||||
|
||||
__ LeaveInternalFrame();
|
||||
}
|
||||
|
||||
private:
|
||||
const ParameterCount& arguments_;
|
||||
int argc_;
|
||||
};
|
||||
|
||||
|
||||
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
|
||||
@ -349,7 +679,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
||||
__ RecordWrite(receiver_reg, offset, name_reg, scratch);
|
||||
} else {
|
||||
// Write to the properties array.
|
||||
int offset = index * kPointerSize + Array::kHeaderSize;
|
||||
int offset = index * kPointerSize + FixedArray::kHeaderSize;
|
||||
// Get the properties array (optimistically).
|
||||
__ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
|
||||
__ mov(FieldOperand(scratch, offset), eax);
|
||||
@ -449,15 +779,17 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
|
||||
// Push the arguments on the JS stack of the caller.
|
||||
__ pop(scratch2); // remove return address
|
||||
__ push(receiver); // receiver
|
||||
__ push(Immediate(Handle<AccessorInfo>(callback))); // callback data
|
||||
__ push(name_reg); // name
|
||||
__ push(reg); // holder
|
||||
__ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data
|
||||
__ push(reg);
|
||||
__ push(FieldOperand(reg, AccessorInfo::kDataOffset));
|
||||
__ push(name_reg); // name
|
||||
__ push(scratch2); // restore return address
|
||||
|
||||
// Do tail-call to the runtime system.
|
||||
ExternalReference load_callback_property =
|
||||
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
|
||||
__ TailCallRuntime(load_callback_property, 4);
|
||||
__ TailCallRuntime(load_callback_property, 5);
|
||||
}
|
||||
|
||||
|
||||
@ -486,36 +818,25 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
|
||||
|
||||
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
|
||||
JSObject* holder,
|
||||
Smi* lookup_hint,
|
||||
LookupResult* lookup,
|
||||
Register receiver,
|
||||
Register name_reg,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
String* name,
|
||||
Label* miss) {
|
||||
// Check that the receiver isn't a smi.
|
||||
__ test(receiver, Immediate(kSmiTagMask));
|
||||
__ j(zero, miss, not_taken);
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
Register reg =
|
||||
CheckPrototypes(object, receiver, holder,
|
||||
scratch1, scratch2, name, miss);
|
||||
|
||||
// Push the arguments on the JS stack of the caller.
|
||||
__ pop(scratch2); // remove return address
|
||||
__ push(receiver); // receiver
|
||||
__ push(reg); // holder
|
||||
__ push(name_reg); // name
|
||||
// TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or
|
||||
// LOOKUP_IN_PROTOTYPE, but use a special version of lookup method?
|
||||
__ push(Immediate(lookup_hint));
|
||||
__ push(scratch2); // restore return address
|
||||
|
||||
// Do tail-call to the runtime system.
|
||||
ExternalReference load_ic_property =
|
||||
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
|
||||
__ TailCallRuntime(load_ic_property, 4);
|
||||
LoadInterceptorCompiler compiler(name_reg);
|
||||
CompileLoadInterceptor(&compiler,
|
||||
this,
|
||||
masm(),
|
||||
object,
|
||||
holder,
|
||||
name,
|
||||
lookup,
|
||||
receiver,
|
||||
scratch1,
|
||||
scratch2,
|
||||
miss);
|
||||
}
|
||||
|
||||
|
||||
@ -680,13 +1001,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
||||
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
|
||||
CheckPrototypes(JSObject::cast(object), edx, holder,
|
||||
ebx, ecx, name, &miss);
|
||||
// Make sure object->elements()->map() != Heap::dictionary_array_map()
|
||||
// Make sure object->HasFastElements().
|
||||
// Get the elements array of the object.
|
||||
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
|
||||
Immediate(Factory::hash_table_map()));
|
||||
__ j(equal, &miss, not_taken);
|
||||
Immediate(Factory::fixed_array_map()));
|
||||
__ j(not_equal, &miss, not_taken);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -728,47 +1049,32 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
|
||||
// Get the number of arguments.
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
LookupResult lookup;
|
||||
LookupPostInterceptor(holder, name, &lookup);
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ test(edx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss, not_taken);
|
||||
CallInterceptorCompiler compiler(arguments());
|
||||
CompileLoadInterceptor(&compiler,
|
||||
this,
|
||||
masm(),
|
||||
JSObject::cast(object),
|
||||
holder,
|
||||
name,
|
||||
&lookup,
|
||||
edx,
|
||||
ebx,
|
||||
ecx,
|
||||
&miss);
|
||||
|
||||
// Check that maps have not changed and compute the holder register.
|
||||
Register reg =
|
||||
CheckPrototypes(JSObject::cast(object), edx, holder,
|
||||
ebx, ecx, name, &miss);
|
||||
|
||||
// Enter an internal frame.
|
||||
__ EnterInternalFrame();
|
||||
|
||||
// Push arguments on the expression stack.
|
||||
__ push(edx); // receiver
|
||||
__ push(reg); // holder
|
||||
__ push(Operand(ebp, (argc + 3) * kPointerSize)); // name
|
||||
__ push(Immediate(holder->InterceptorPropertyLookupHint(name)));
|
||||
|
||||
// Perform call.
|
||||
ExternalReference load_interceptor =
|
||||
ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
|
||||
__ mov(eax, Immediate(4));
|
||||
__ mov(ebx, Immediate(load_interceptor));
|
||||
|
||||
CEntryStub stub;
|
||||
__ CallStub(&stub);
|
||||
|
||||
// Move result to edi and restore receiver.
|
||||
__ mov(edi, eax);
|
||||
__ mov(edx, Operand(ebp, (argc + 2) * kPointerSize)); // receiver
|
||||
|
||||
// Exit frame.
|
||||
__ LeaveInternalFrame();
|
||||
// Restore receiver.
|
||||
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
|
||||
|
||||
// Check that the function really is a function.
|
||||
__ test(edi, Immediate(kSmiTagMask));
|
||||
__ test(eax, Immediate(kSmiTagMask));
|
||||
__ j(zero, &miss, not_taken);
|
||||
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
|
||||
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
|
||||
__ j(not_equal, &miss, not_taken);
|
||||
|
||||
// Patch the receiver on the stack with the global proxy if
|
||||
@ -779,6 +1085,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
|
||||
}
|
||||
|
||||
// Invoke the function.
|
||||
__ mov(edi, eax);
|
||||
__ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
|
||||
|
||||
// Handle load cache miss.
|
||||
@ -800,8 +1107,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ IncrementCounter(&Counters::call_global_inline, 1);
|
||||
|
||||
// Get the number of arguments.
|
||||
const int argc = arguments().immediate();
|
||||
|
||||
@ -837,6 +1142,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
||||
|
||||
// Jump to the cached code (tail call).
|
||||
__ IncrementCounter(&Counters::call_global_inline, 1);
|
||||
ASSERT(function->is_compiled());
|
||||
Handle<Code> code(function->code());
|
||||
ParameterCount expected(function->shared()->formal_parameter_count());
|
||||
@ -845,7 +1151,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
||||
|
||||
// Handle call cache miss.
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(&Counters::call_global_inline, 1);
|
||||
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
|
||||
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
|
||||
__ jmp(ic, RelocInfo::CODE_TARGET);
|
||||
@ -1009,10 +1314,8 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ IncrementCounter(&Counters::named_store_global_inline, 1);
|
||||
|
||||
// Check that the map of the global has not changed.
|
||||
__ mov(ebx, (Operand(esp, kPointerSize)));
|
||||
__ mov(ebx, Operand(esp, kPointerSize));
|
||||
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
|
||||
Immediate(Handle<Map>(object->map())));
|
||||
__ j(not_equal, &miss, not_taken);
|
||||
@ -1022,11 +1325,11 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
|
||||
__ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
|
||||
|
||||
// Return the value (register eax).
|
||||
__ IncrementCounter(&Counters::named_store_global_inline, 1);
|
||||
__ ret(0);
|
||||
|
||||
// Handle store cache miss.
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(&Counters::named_store_global_inline, 1);
|
||||
__ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
|
||||
__ jmp(ic, RelocInfo::CODE_TARGET);
|
||||
@ -1091,7 +1394,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::LOAD_IC);
|
||||
@ -1112,7 +1415,7 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
|
||||
callback, name, &miss);
|
||||
__ bind(&miss);
|
||||
@ -1134,7 +1437,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::LOAD_IC);
|
||||
@ -1154,12 +1457,15 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
LookupResult lookup;
|
||||
LookupPostInterceptor(holder, name, &lookup);
|
||||
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
// TODO(368): Compile in the whole chain: all the interceptors in
|
||||
// prototypes and ultimate answer.
|
||||
GenerateLoadInterceptor(receiver,
|
||||
holder,
|
||||
holder->InterceptorPropertyLookupHint(name),
|
||||
&lookup,
|
||||
eax,
|
||||
ecx,
|
||||
edx,
|
||||
@ -1187,10 +1493,8 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ IncrementCounter(&Counters::named_load_global_inline, 1);
|
||||
|
||||
// Get the receiver from the stack.
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
|
||||
// If the object is the holder then we know that it's a global
|
||||
// object which can only happen for contextual loads. In this case,
|
||||
@ -1216,10 +1520,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
|
||||
__ Check(not_equal, "DontDelete cells can't contain the hole");
|
||||
}
|
||||
|
||||
__ IncrementCounter(&Counters::named_load_global_inline, 1);
|
||||
__ ret(0);
|
||||
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(&Counters::named_load_global_inline, 1);
|
||||
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
|
||||
GenerateLoadMiss(masm(), Code::LOAD_IC);
|
||||
|
||||
@ -1239,8 +1543,8 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(ecx, (Operand(esp, 2 * kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
__ mov(ecx, Operand(esp, 2 * kPointerSize));
|
||||
__ IncrementCounter(&Counters::keyed_load_field, 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
@ -1269,8 +1573,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(ecx, (Operand(esp, 2 * kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
__ mov(ecx, Operand(esp, 2 * kPointerSize));
|
||||
__ IncrementCounter(&Counters::keyed_load_callback, 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
@ -1299,8 +1603,8 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(ecx, (Operand(esp, 2 * kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
__ mov(ecx, Operand(esp, 2 * kPointerSize));
|
||||
__ IncrementCounter(&Counters::keyed_load_constant_function, 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
@ -1328,17 +1632,19 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(ecx, (Operand(esp, 2 * kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
__ mov(ecx, Operand(esp, 2 * kPointerSize));
|
||||
__ IncrementCounter(&Counters::keyed_load_interceptor, 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
|
||||
__ j(not_equal, &miss, not_taken);
|
||||
|
||||
LookupResult lookup;
|
||||
LookupPostInterceptor(holder, name, &lookup);
|
||||
GenerateLoadInterceptor(receiver,
|
||||
holder,
|
||||
Smi::FromInt(JSObject::kLookupInHolder),
|
||||
&lookup,
|
||||
ecx,
|
||||
eax,
|
||||
edx,
|
||||
@ -1364,8 +1670,8 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(ecx, (Operand(esp, 2 * kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
__ mov(ecx, Operand(esp, 2 * kPointerSize));
|
||||
__ IncrementCounter(&Counters::keyed_load_array_length, 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
@ -1390,8 +1696,8 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(ecx, (Operand(esp, 2 * kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
__ mov(ecx, Operand(esp, 2 * kPointerSize));
|
||||
__ IncrementCounter(&Counters::keyed_load_string_length, 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
@ -1416,8 +1722,8 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ mov(eax, (Operand(esp, kPointerSize)));
|
||||
__ mov(ecx, (Operand(esp, 2 * kPointerSize)));
|
||||
__ mov(eax, Operand(esp, kPointerSize));
|
||||
__ mov(ecx, Operand(esp, 2 * kPointerSize));
|
||||
__ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
|
||||
|
||||
// Check that the name has not changed.
|
||||
|
59
deps/v8/src/ic.cc
vendored
59
deps/v8/src/ic.cc
vendored
@ -273,28 +273,39 @@ static bool HasInterceptorGetter(JSObject* object) {
|
||||
static void LookupForRead(Object* object,
|
||||
String* name,
|
||||
LookupResult* lookup) {
|
||||
object->Lookup(name, lookup);
|
||||
if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR) {
|
||||
return;
|
||||
}
|
||||
AssertNoAllocation no_gc; // pointers must stay valid
|
||||
|
||||
JSObject* holder = lookup->holder();
|
||||
if (HasInterceptorGetter(holder)) {
|
||||
return;
|
||||
}
|
||||
// Skip all the objects with named interceptors, but
|
||||
// without actual getter.
|
||||
while (true) {
|
||||
object->Lookup(name, lookup);
|
||||
// Besides normal conditions (property not found or it's not
|
||||
// an interceptor), bail out of lookup is not cacheable: we won't
|
||||
// be able to IC it anyway and regular lookup should work fine.
|
||||
if (lookup->IsNotFound() || lookup->type() != INTERCEPTOR ||
|
||||
!lookup->IsCacheable()) {
|
||||
return;
|
||||
}
|
||||
|
||||
// There is no getter, just skip it and lookup down the proto chain
|
||||
holder->LocalLookupRealNamedProperty(name, lookup);
|
||||
if (lookup->IsValid()) {
|
||||
return;
|
||||
}
|
||||
JSObject* holder = lookup->holder();
|
||||
if (HasInterceptorGetter(holder)) {
|
||||
return;
|
||||
}
|
||||
|
||||
Object* proto = holder->GetPrototype();
|
||||
if (proto == Heap::null_value()) {
|
||||
return;
|
||||
}
|
||||
holder->LocalLookupRealNamedProperty(name, lookup);
|
||||
if (lookup->IsValid()) {
|
||||
ASSERT(lookup->type() != INTERCEPTOR);
|
||||
return;
|
||||
}
|
||||
|
||||
LookupForRead(proto, name, lookup);
|
||||
Object* proto = holder->GetPrototype();
|
||||
if (proto->IsNull()) {
|
||||
lookup->NotFound();
|
||||
return;
|
||||
}
|
||||
|
||||
object = proto;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -736,7 +747,7 @@ Object* KeyedLoadIC::Load(State state,
|
||||
set_target(Code::cast(code));
|
||||
#ifdef DEBUG
|
||||
TraceIC("KeyedLoadIC", name, state, target());
|
||||
#endif
|
||||
#endif // DEBUG
|
||||
return Smi::FromInt(string->length());
|
||||
}
|
||||
|
||||
@ -748,7 +759,7 @@ Object* KeyedLoadIC::Load(State state,
|
||||
set_target(Code::cast(code));
|
||||
#ifdef DEBUG
|
||||
TraceIC("KeyedLoadIC", name, state, target());
|
||||
#endif
|
||||
#endif // DEBUG
|
||||
return JSArray::cast(*object)->length();
|
||||
}
|
||||
|
||||
@ -761,7 +772,7 @@ Object* KeyedLoadIC::Load(State state,
|
||||
set_target(Code::cast(code));
|
||||
#ifdef DEBUG
|
||||
TraceIC("KeyedLoadIC", name, state, target());
|
||||
#endif
|
||||
#endif // DEBUG
|
||||
return Accessors::FunctionGetPrototype(*object, 0);
|
||||
}
|
||||
}
|
||||
@ -787,7 +798,6 @@ Object* KeyedLoadIC::Load(State state,
|
||||
}
|
||||
}
|
||||
|
||||
// Update the inline cache.
|
||||
if (FLAG_use_ic && lookup.IsLoaded()) {
|
||||
UpdateCaches(&lookup, state, object, name);
|
||||
}
|
||||
@ -1221,11 +1231,6 @@ void CallIC::GenerateInitialize(MacroAssembler* masm, int argc) {
|
||||
}
|
||||
|
||||
|
||||
void CallIC::GeneratePreMonomorphic(MacroAssembler* masm, int argc) {
|
||||
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
|
||||
}
|
||||
|
||||
|
||||
void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
|
||||
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
|
||||
}
|
||||
|
36
deps/v8/src/ic.h
vendored
36
deps/v8/src/ic.h
vendored
@ -35,21 +35,24 @@ namespace internal {
|
||||
|
||||
// IC_UTIL_LIST defines all utility functions called from generated
|
||||
// inline caching code. The argument for the macro, ICU, is the function name.
|
||||
#define IC_UTIL_LIST(ICU) \
|
||||
ICU(LoadIC_Miss) \
|
||||
ICU(KeyedLoadIC_Miss) \
|
||||
ICU(CallIC_Miss) \
|
||||
ICU(StoreIC_Miss) \
|
||||
ICU(SharedStoreIC_ExtendStorage) \
|
||||
ICU(KeyedStoreIC_Miss) \
|
||||
/* Utilities for IC stubs. */ \
|
||||
ICU(LoadCallbackProperty) \
|
||||
ICU(StoreCallbackProperty) \
|
||||
ICU(LoadInterceptorProperty) \
|
||||
#define IC_UTIL_LIST(ICU) \
|
||||
ICU(LoadIC_Miss) \
|
||||
ICU(KeyedLoadIC_Miss) \
|
||||
ICU(CallIC_Miss) \
|
||||
ICU(StoreIC_Miss) \
|
||||
ICU(SharedStoreIC_ExtendStorage) \
|
||||
ICU(KeyedStoreIC_Miss) \
|
||||
/* Utilities for IC stubs. */ \
|
||||
ICU(LoadCallbackProperty) \
|
||||
ICU(StoreCallbackProperty) \
|
||||
ICU(LoadPropertyWithInterceptorOnly) \
|
||||
ICU(LoadPropertyWithInterceptorForLoad) \
|
||||
ICU(LoadPropertyWithInterceptorForCall) \
|
||||
ICU(StoreInterceptorProperty)
|
||||
|
||||
//
|
||||
// IC is the base class for LoadIC, StoreIC and CallIC.
|
||||
// IC is the base class for LoadIC, StoreIC, CallIC, KeyedLoadIC,
|
||||
// and KeyedStoreIC.
|
||||
//
|
||||
class IC {
|
||||
public:
|
||||
@ -173,7 +176,6 @@ class CallIC: public IC {
|
||||
|
||||
// Code generator routines.
|
||||
static void GenerateInitialize(MacroAssembler* masm, int argc);
|
||||
static void GeneratePreMonomorphic(MacroAssembler* masm, int argc);
|
||||
static void GenerateMiss(MacroAssembler* masm, int argc);
|
||||
static void GenerateMegamorphic(MacroAssembler* masm, int argc);
|
||||
static void GenerateNormal(MacroAssembler* masm, int argc);
|
||||
@ -219,8 +221,8 @@ class LoadIC: public IC {
|
||||
static void GenerateFunctionPrototype(MacroAssembler* masm);
|
||||
|
||||
// The offset from the inlined patch site to the start of the
|
||||
// inlined load instruction. It is 7 bytes (test eax, imm) plus
|
||||
// 6 bytes (jne slow_label).
|
||||
// inlined load instruction. It is architecture-dependent, and not
|
||||
// used on ARM.
|
||||
static const int kOffsetToLoadInstruction;
|
||||
|
||||
private:
|
||||
@ -387,6 +389,10 @@ class KeyedStoreIC: public IC {
|
||||
|
||||
// Support for patching the map that is checked in an inlined
|
||||
// version of keyed store.
|
||||
// The address is the patch point for the IC call
|
||||
// (Assembler::kTargetAddrToReturnAddrDist before the end of
|
||||
// the call/return address).
|
||||
// The map is the new map that the inlined code should check against.
|
||||
static bool PatchInlinedStore(Address address, Object* map);
|
||||
|
||||
friend class IC;
|
||||
|
8
deps/v8/src/interpreter-irregexp.cc
vendored
8
deps/v8/src/interpreter-irregexp.cc
vendored
@ -51,9 +51,11 @@ static bool BackRefMatchesNoCase(int from,
|
||||
unibrow::uchar old_char = subject[from++];
|
||||
unibrow::uchar new_char = subject[current++];
|
||||
if (old_char == new_char) continue;
|
||||
interp_canonicalize.get(old_char, '\0', &old_char);
|
||||
interp_canonicalize.get(new_char, '\0', &new_char);
|
||||
if (old_char != new_char) {
|
||||
unibrow::uchar old_string[1] = { old_char };
|
||||
unibrow::uchar new_string[1] = { new_char };
|
||||
interp_canonicalize.get(old_char, '\0', old_string);
|
||||
interp_canonicalize.get(new_char, '\0', new_string);
|
||||
if (old_string[0] != new_string[0]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
260
deps/v8/src/jsregexp-inl.h
vendored
260
deps/v8/src/jsregexp-inl.h
vendored
@ -1,260 +0,0 @@
|
||||
// Copyright 2008 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef V8_JSREGEXP_INL_H_
|
||||
#define V8_JSREGEXP_INL_H_
|
||||
|
||||
|
||||
#include "jsregexp.h"
|
||||
#include "regexp-macro-assembler.h"
|
||||
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::Insert(const Key& key, Locator* locator) {
|
||||
if (is_empty()) {
|
||||
// If the tree is empty, insert the new node.
|
||||
root_ = new Node(key, C::kNoValue);
|
||||
} else {
|
||||
// Splay on the key to move the last node on the search path
|
||||
// for the key to the root of the tree.
|
||||
Splay(key);
|
||||
// Ignore repeated insertions with the same key.
|
||||
int cmp = C::Compare(key, root_->key_);
|
||||
if (cmp == 0) {
|
||||
locator->bind(root_);
|
||||
return false;
|
||||
}
|
||||
// Insert the new node.
|
||||
Node* node = new Node(key, C::kNoValue);
|
||||
if (cmp > 0) {
|
||||
node->left_ = root_;
|
||||
node->right_ = root_->right_;
|
||||
root_->right_ = NULL;
|
||||
} else {
|
||||
node->right_ = root_;
|
||||
node->left_ = root_->left_;
|
||||
root_->left_ = NULL;
|
||||
}
|
||||
root_ = node;
|
||||
}
|
||||
locator->bind(root_);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::Find(const Key& key, Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
Splay(key);
|
||||
if (C::Compare(key, root_->key_) == 0) {
|
||||
locator->bind(root_);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::FindGreatestLessThan(const Key& key,
|
||||
Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
// Splay on the key to move the node with the given key or the last
|
||||
// node on the search path to the top of the tree.
|
||||
Splay(key);
|
||||
// Now the result is either the root node or the greatest node in
|
||||
// the left subtree.
|
||||
int cmp = C::Compare(root_->key_, key);
|
||||
if (cmp <= 0) {
|
||||
locator->bind(root_);
|
||||
return true;
|
||||
} else {
|
||||
Node* temp = root_;
|
||||
root_ = root_->left_;
|
||||
bool result = FindGreatest(locator);
|
||||
root_ = temp;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::FindLeastGreaterThan(const Key& key,
|
||||
Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
// Splay on the key to move the node with the given key or the last
|
||||
// node on the search path to the top of the tree.
|
||||
Splay(key);
|
||||
// Now the result is either the root node or the least node in
|
||||
// the right subtree.
|
||||
int cmp = C::Compare(root_->key_, key);
|
||||
if (cmp >= 0) {
|
||||
locator->bind(root_);
|
||||
return true;
|
||||
} else {
|
||||
Node* temp = root_;
|
||||
root_ = root_->right_;
|
||||
bool result = FindLeast(locator);
|
||||
root_ = temp;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::FindGreatest(Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
Node* current = root_;
|
||||
while (current->right_ != NULL)
|
||||
current = current->right_;
|
||||
locator->bind(current);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::FindLeast(Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
Node* current = root_;
|
||||
while (current->left_ != NULL)
|
||||
current = current->left_;
|
||||
locator->bind(current);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::Remove(const Key& key) {
|
||||
// Bail if the tree is empty
|
||||
if (is_empty())
|
||||
return false;
|
||||
// Splay on the key to move the node with the given key to the top.
|
||||
Splay(key);
|
||||
// Bail if the key is not in the tree
|
||||
if (C::Compare(key, root_->key_) != 0)
|
||||
return false;
|
||||
if (root_->left_ == NULL) {
|
||||
// No left child, so the new tree is just the right child.
|
||||
root_ = root_->right_;
|
||||
} else {
|
||||
// Left child exists.
|
||||
Node* right = root_->right_;
|
||||
// Make the original left child the new root.
|
||||
root_ = root_->left_;
|
||||
// Splay to make sure that the new root has an empty right child.
|
||||
Splay(key);
|
||||
// Insert the original right child as the right child of the new
|
||||
// root.
|
||||
root_->right_ = right;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
void ZoneSplayTree<C>::Splay(const Key& key) {
|
||||
if (is_empty())
|
||||
return;
|
||||
Node dummy_node(C::kNoKey, C::kNoValue);
|
||||
// Create a dummy node. The use of the dummy node is a bit
|
||||
// counter-intuitive: The right child of the dummy node will hold
|
||||
// the L tree of the algorithm. The left child of the dummy node
|
||||
// will hold the R tree of the algorithm. Using a dummy node, left
|
||||
// and right will always be nodes and we avoid special cases.
|
||||
Node* dummy = &dummy_node;
|
||||
Node* left = dummy;
|
||||
Node* right = dummy;
|
||||
Node* current = root_;
|
||||
while (true) {
|
||||
int cmp = C::Compare(key, current->key_);
|
||||
if (cmp < 0) {
|
||||
if (current->left_ == NULL)
|
||||
break;
|
||||
if (C::Compare(key, current->left_->key_) < 0) {
|
||||
// Rotate right.
|
||||
Node* temp = current->left_;
|
||||
current->left_ = temp->right_;
|
||||
temp->right_ = current;
|
||||
current = temp;
|
||||
if (current->left_ == NULL)
|
||||
break;
|
||||
}
|
||||
// Link right.
|
||||
right->left_ = current;
|
||||
right = current;
|
||||
current = current->left_;
|
||||
} else if (cmp > 0) {
|
||||
if (current->right_ == NULL)
|
||||
break;
|
||||
if (C::Compare(key, current->right_->key_) > 0) {
|
||||
// Rotate left.
|
||||
Node* temp = current->right_;
|
||||
current->right_ = temp->left_;
|
||||
temp->left_ = current;
|
||||
current = temp;
|
||||
if (current->right_ == NULL)
|
||||
break;
|
||||
}
|
||||
// Link left.
|
||||
left->right_ = current;
|
||||
left = current;
|
||||
current = current->right_;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Assemble.
|
||||
left->right_ = current->left_;
|
||||
right->left_ = current->right_;
|
||||
current->left_ = dummy->right_;
|
||||
current->right_ = dummy->left_;
|
||||
root_ = current;
|
||||
}
|
||||
|
||||
|
||||
template <typename Node, class Callback>
|
||||
static void DoForEach(Node* node, Callback* callback) {
|
||||
if (node == NULL) return;
|
||||
DoForEach<Node, Callback>(node->left(), callback);
|
||||
callback->Call(node->key(), node->value());
|
||||
DoForEach<Node, Callback>(node->right(), callback);
|
||||
}
|
||||
|
||||
|
||||
}} // namespace v8::internal
|
||||
|
||||
|
||||
#endif // V8_JSREGEXP_INL_H_
|
8
deps/v8/src/jsregexp.cc
vendored
8
deps/v8/src/jsregexp.cc
vendored
@ -31,7 +31,7 @@
|
||||
#include "compiler.h"
|
||||
#include "execution.h"
|
||||
#include "factory.h"
|
||||
#include "jsregexp-inl.h"
|
||||
#include "jsregexp.h"
|
||||
#include "platform.h"
|
||||
#include "runtime.h"
|
||||
#include "top.h"
|
||||
@ -254,7 +254,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
|
||||
|
||||
{
|
||||
NoHandleAllocation no_handles;
|
||||
FixedArray* array = last_match_info->elements();
|
||||
FixedArray* array = FixedArray::cast(last_match_info->elements());
|
||||
SetAtomLastCapture(array, *subject, value, value + needle->length());
|
||||
}
|
||||
return last_match_info;
|
||||
@ -442,7 +442,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
|
||||
|
||||
if (res != RegExpMacroAssemblerIA32::SUCCESS) return Factory::null_value();
|
||||
|
||||
array = Handle<FixedArray>(last_match_info->elements());
|
||||
array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
|
||||
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
|
||||
// The captures come in (start, end+1) pairs.
|
||||
for (int i = 0; i < number_of_capture_registers; i += 2) {
|
||||
@ -475,7 +475,7 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
|
||||
return Factory::null_value();
|
||||
}
|
||||
|
||||
array = Handle<FixedArray>(last_match_info->elements());
|
||||
array = Handle<FixedArray>(FixedArray::cast(last_match_info->elements()));
|
||||
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
|
||||
// The captures come in (start, end+1) pairs.
|
||||
for (int i = 0; i < number_of_capture_registers; i += 2) {
|
||||
|
102
deps/v8/src/jsregexp.h
vendored
102
deps/v8/src/jsregexp.h
vendored
@ -214,108 +214,6 @@ class CharacterRange {
|
||||
};
|
||||
|
||||
|
||||
template <typename Node, class Callback>
|
||||
static void DoForEach(Node* node, Callback* callback);
|
||||
|
||||
|
||||
// A zone splay tree. The config type parameter encapsulates the
|
||||
// different configurations of a concrete splay tree:
|
||||
//
|
||||
// typedef Key: the key type
|
||||
// typedef Value: the value type
|
||||
// static const kNoKey: the dummy key used when no key is set
|
||||
// static const kNoValue: the dummy value used to initialize nodes
|
||||
// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
|
||||
//
|
||||
template <typename Config>
|
||||
class ZoneSplayTree : public ZoneObject {
|
||||
public:
|
||||
typedef typename Config::Key Key;
|
||||
typedef typename Config::Value Value;
|
||||
|
||||
class Locator;
|
||||
|
||||
ZoneSplayTree() : root_(NULL) { }
|
||||
|
||||
// Inserts the given key in this tree with the given value. Returns
|
||||
// true if a node was inserted, otherwise false. If found the locator
|
||||
// is enabled and provides access to the mapping for the key.
|
||||
bool Insert(const Key& key, Locator* locator);
|
||||
|
||||
// Looks up the key in this tree and returns true if it was found,
|
||||
// otherwise false. If the node is found the locator is enabled and
|
||||
// provides access to the mapping for the key.
|
||||
bool Find(const Key& key, Locator* locator);
|
||||
|
||||
// Finds the mapping with the greatest key less than or equal to the
|
||||
// given key.
|
||||
bool FindGreatestLessThan(const Key& key, Locator* locator);
|
||||
|
||||
// Find the mapping with the greatest key in this tree.
|
||||
bool FindGreatest(Locator* locator);
|
||||
|
||||
// Finds the mapping with the least key greater than or equal to the
|
||||
// given key.
|
||||
bool FindLeastGreaterThan(const Key& key, Locator* locator);
|
||||
|
||||
// Find the mapping with the least key in this tree.
|
||||
bool FindLeast(Locator* locator);
|
||||
|
||||
// Remove the node with the given key from the tree.
|
||||
bool Remove(const Key& key);
|
||||
|
||||
bool is_empty() { return root_ == NULL; }
|
||||
|
||||
// Perform the splay operation for the given key. Moves the node with
|
||||
// the given key to the top of the tree. If no node has the given
|
||||
// key, the last node on the search path is moved to the top of the
|
||||
// tree.
|
||||
void Splay(const Key& key);
|
||||
|
||||
class Node : public ZoneObject {
|
||||
public:
|
||||
Node(const Key& key, const Value& value)
|
||||
: key_(key),
|
||||
value_(value),
|
||||
left_(NULL),
|
||||
right_(NULL) { }
|
||||
Key key() { return key_; }
|
||||
Value value() { return value_; }
|
||||
Node* left() { return left_; }
|
||||
Node* right() { return right_; }
|
||||
private:
|
||||
friend class ZoneSplayTree;
|
||||
friend class Locator;
|
||||
Key key_;
|
||||
Value value_;
|
||||
Node* left_;
|
||||
Node* right_;
|
||||
};
|
||||
|
||||
// A locator provides access to a node in the tree without actually
|
||||
// exposing the node.
|
||||
class Locator {
|
||||
public:
|
||||
explicit Locator(Node* node) : node_(node) { }
|
||||
Locator() : node_(NULL) { }
|
||||
const Key& key() { return node_->key_; }
|
||||
Value& value() { return node_->value_; }
|
||||
void set_value(const Value& value) { node_->value_ = value; }
|
||||
inline void bind(Node* node) { node_ = node; }
|
||||
private:
|
||||
Node* node_;
|
||||
};
|
||||
|
||||
template <class Callback>
|
||||
void ForEach(Callback* c) {
|
||||
DoForEach<typename ZoneSplayTree<Config>::Node, Callback>(root_, c);
|
||||
}
|
||||
|
||||
private:
|
||||
Node* root_;
|
||||
};
|
||||
|
||||
|
||||
// A set of unsigned integers that behaves especially well on small
|
||||
// integers (< 32). May do zone-allocation.
|
||||
class OutSet: public ZoneObject {
|
||||
|
32
deps/v8/src/log.cc
vendored
32
deps/v8/src/log.cc
vendored
@ -843,7 +843,22 @@ void Logger::HeapSampleBeginEvent(const char* space, const char* kind) {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
if (!Log::IsEnabled() || !FLAG_log_gc) return;
|
||||
LogMessageBuilder msg;
|
||||
msg.Append("heap-sample-begin,\"%s\",\"%s\"\n", space, kind);
|
||||
// Using non-relative system time in order to be able to synchronize with
|
||||
// external memory profiling events (e.g. DOM memory size).
|
||||
msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
|
||||
space, kind, OS::TimeCurrentMillis());
|
||||
msg.WriteToLogFile();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void Logger::HeapSampleStats(const char* space, const char* kind,
|
||||
int capacity, int used) {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
if (!Log::IsEnabled() || !FLAG_log_gc) return;
|
||||
LogMessageBuilder msg;
|
||||
msg.Append("heap-sample-stats,\"%s\",\"%s\",%d,%d\n",
|
||||
space, kind, capacity, used);
|
||||
msg.WriteToLogFile();
|
||||
#endif
|
||||
}
|
||||
@ -869,6 +884,21 @@ void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
|
||||
}
|
||||
|
||||
|
||||
void Logger::HeapSampleJSConstructorEvent(const char* constructor,
|
||||
int number, int bytes) {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
if (!Log::IsEnabled() || !FLAG_log_gc) return;
|
||||
LogMessageBuilder msg;
|
||||
msg.Append("heap-js-cons-item,%s,%d,%d\n",
|
||||
constructor != NULL ?
|
||||
(constructor[0] != '\0' ? constructor : "(anonymous)") :
|
||||
"(no_constructor)",
|
||||
number, bytes);
|
||||
msg.WriteToLogFile();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void Logger::DebugTag(const char* call_site_tag) {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
if (!Log::IsEnabled() || !FLAG_log) return;
|
||||
|
4
deps/v8/src/log.h
vendored
4
deps/v8/src/log.h
vendored
@ -219,6 +219,10 @@ class Logger {
|
||||
static void HeapSampleBeginEvent(const char* space, const char* kind);
|
||||
static void HeapSampleEndEvent(const char* space, const char* kind);
|
||||
static void HeapSampleItemEvent(const char* type, int number, int bytes);
|
||||
static void HeapSampleJSConstructorEvent(const char* constructor,
|
||||
int number, int bytes);
|
||||
static void HeapSampleStats(const char* space, const char* kind,
|
||||
int capacity, int used);
|
||||
|
||||
static void SharedLibraryEvent(const char* library_path,
|
||||
uintptr_t start,
|
||||
|
41
deps/v8/src/messages.js
vendored
41
deps/v8/src/messages.js
vendored
@ -561,20 +561,24 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
|
||||
var kAddMessageAccessorsMarker = { };
|
||||
|
||||
// Defines accessors for a property that is calculated the first time
|
||||
// the property is read and then replaces the accessor with the value.
|
||||
// Also, setting the property causes the accessors to be deleted.
|
||||
// the property is read.
|
||||
function DefineOneShotAccessor(obj, name, fun) {
|
||||
// Note that the accessors consistently operate on 'obj', not 'this'.
|
||||
// Since the object may occur in someone else's prototype chain we
|
||||
// can't rely on 'this' being the same as 'obj'.
|
||||
var hasBeenSet = false;
|
||||
var value;
|
||||
obj.__defineGetter__(name, function () {
|
||||
var value = fun(obj);
|
||||
obj[name] = value;
|
||||
if (hasBeenSet) {
|
||||
return value;
|
||||
}
|
||||
hasBeenSet = true;
|
||||
value = fun(obj);
|
||||
return value;
|
||||
});
|
||||
obj.__defineSetter__(name, function (v) {
|
||||
delete obj[name];
|
||||
obj[name] = v;
|
||||
hasBeenSet = true;
|
||||
value = v;
|
||||
});
|
||||
}
|
||||
|
||||
@ -833,22 +837,25 @@ function DefineError(f) {
|
||||
} else if (!IS_UNDEFINED(m)) {
|
||||
this.message = ToString(m);
|
||||
}
|
||||
var stackTraceLimit = $Error.stackTraceLimit;
|
||||
if (stackTraceLimit) {
|
||||
// Cap the limit to avoid extremely big traces
|
||||
if (stackTraceLimit < 0 || stackTraceLimit > 10000)
|
||||
stackTraceLimit = 10000;
|
||||
var raw_stack = %CollectStackTrace(f, stackTraceLimit);
|
||||
DefineOneShotAccessor(this, 'stack', function (obj) {
|
||||
return FormatRawStackTrace(obj, raw_stack);
|
||||
});
|
||||
}
|
||||
captureStackTrace(this, f);
|
||||
} else {
|
||||
return new f(m);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function captureStackTrace(obj, cons_opt) {
|
||||
var stackTraceLimit = $Error.stackTraceLimit;
|
||||
if (!stackTraceLimit) return;
|
||||
if (stackTraceLimit < 0 || stackTraceLimit > 10000)
|
||||
stackTraceLimit = 10000;
|
||||
var raw_stack = %CollectStackTrace(cons_opt ? cons_opt : captureStackTrace,
|
||||
stackTraceLimit);
|
||||
DefineOneShotAccessor(obj, 'stack', function (obj) {
|
||||
return FormatRawStackTrace(obj, raw_stack);
|
||||
});
|
||||
};
|
||||
|
||||
$Math.__proto__ = global.Object.prototype;
|
||||
|
||||
DefineError(function Error() { });
|
||||
@ -859,6 +866,8 @@ DefineError(function ReferenceError() { });
|
||||
DefineError(function EvalError() { });
|
||||
DefineError(function URIError() { });
|
||||
|
||||
$Error.captureStackTrace = captureStackTrace;
|
||||
|
||||
// Setup extra properties of the Error.prototype object.
|
||||
$Error.prototype.message = '';
|
||||
|
||||
|
94
deps/v8/src/objects-debug.cc
vendored
94
deps/v8/src/objects-debug.cc
vendored
@ -115,6 +115,9 @@ void HeapObject::HeapObjectPrint() {
|
||||
case BYTE_ARRAY_TYPE:
|
||||
ByteArray::cast(this)->ByteArrayPrint();
|
||||
break;
|
||||
case PIXEL_ARRAY_TYPE:
|
||||
PixelArray::cast(this)->PixelArrayPrint();
|
||||
break;
|
||||
case FILLER_TYPE:
|
||||
PrintF("filler");
|
||||
break;
|
||||
@ -191,6 +194,9 @@ void HeapObject::HeapObjectVerify() {
|
||||
case BYTE_ARRAY_TYPE:
|
||||
ByteArray::cast(this)->ByteArrayVerify();
|
||||
break;
|
||||
case PIXEL_ARRAY_TYPE:
|
||||
PixelArray::cast(this)->PixelArrayVerify();
|
||||
break;
|
||||
case CODE_TYPE:
|
||||
Code::cast(this)->CodeVerify();
|
||||
break;
|
||||
@ -264,11 +270,21 @@ void ByteArray::ByteArrayPrint() {
|
||||
}
|
||||
|
||||
|
||||
void PixelArray::PixelArrayPrint() {
|
||||
PrintF("pixel array");
|
||||
}
|
||||
|
||||
|
||||
void ByteArray::ByteArrayVerify() {
|
||||
ASSERT(IsByteArray());
|
||||
}
|
||||
|
||||
|
||||
void PixelArray::PixelArrayVerify() {
|
||||
ASSERT(IsPixelArray());
|
||||
}
|
||||
|
||||
|
||||
void JSObject::PrintProperties() {
|
||||
if (HasFastProperties()) {
|
||||
DescriptorArray* descs = map()->instance_descriptors();
|
||||
@ -312,15 +328,30 @@ void JSObject::PrintProperties() {
|
||||
|
||||
|
||||
void JSObject::PrintElements() {
|
||||
if (HasFastElements()) {
|
||||
FixedArray* p = FixedArray::cast(elements());
|
||||
for (int i = 0; i < p->length(); i++) {
|
||||
PrintF(" %d: ", i);
|
||||
p->get(i)->ShortPrint();
|
||||
PrintF("\n");
|
||||
switch (GetElementsKind()) {
|
||||
case FAST_ELEMENTS: {
|
||||
// Print in array notation for non-sparse arrays.
|
||||
FixedArray* p = FixedArray::cast(elements());
|
||||
for (int i = 0; i < p->length(); i++) {
|
||||
PrintF(" %d: ", i);
|
||||
p->get(i)->ShortPrint();
|
||||
PrintF("\n");
|
||||
}
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
elements()->Print();
|
||||
case PIXEL_ELEMENTS: {
|
||||
PixelArray* p = PixelArray::cast(elements());
|
||||
for (int i = 0; i < p->length(); i++) {
|
||||
PrintF(" %d: %d\n", i, p->get(i));
|
||||
}
|
||||
break;
|
||||
}
|
||||
case DICTIONARY_ELEMENTS:
|
||||
elements()->Print();
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -402,6 +433,7 @@ static const char* TypeToString(InstanceType type) {
|
||||
case LONG_EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
|
||||
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
|
||||
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
|
||||
case PIXEL_ARRAY_TYPE: return "PIXEL_ARRAY";
|
||||
case FILLER_TYPE: return "FILLER";
|
||||
case JS_OBJECT_TYPE: return "JS_OBJECT";
|
||||
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: return "JS_CONTEXT_EXTENSION_OBJECT";
|
||||
@ -666,7 +698,7 @@ void Oddball::OddballVerify() {
|
||||
} else {
|
||||
ASSERT(number->IsSmi());
|
||||
int value = Smi::cast(number)->value();
|
||||
ASSERT(value == 0 || value == 1 || value == -1);
|
||||
ASSERT(value == 0 || value == 1 || value == -1 || value == -2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -958,6 +990,7 @@ void Script::ScriptPrint() {
|
||||
}
|
||||
|
||||
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
void DebugInfo::DebugInfoVerify() {
|
||||
CHECK(IsDebugInfo());
|
||||
VerifyPointer(shared());
|
||||
@ -997,6 +1030,7 @@ void BreakPointInfo::BreakPointInfoPrint() {
|
||||
PrintF("\n - break_point_objects: ");
|
||||
break_point_objects()->ShortPrint();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void JSObject::IncrementSpillStatistics(SpillInformation* info) {
|
||||
@ -1013,21 +1047,35 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) {
|
||||
dict->Capacity() - dict->NumberOfElements();
|
||||
}
|
||||
// Indexed properties
|
||||
if (HasFastElements()) {
|
||||
info->number_of_objects_with_fast_elements_++;
|
||||
int holes = 0;
|
||||
FixedArray* e = FixedArray::cast(elements());
|
||||
int len = e->length();
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (e->get(i) == Heap::the_hole_value()) holes++;
|
||||
switch (GetElementsKind()) {
|
||||
case FAST_ELEMENTS: {
|
||||
info->number_of_objects_with_fast_elements_++;
|
||||
int holes = 0;
|
||||
FixedArray* e = FixedArray::cast(elements());
|
||||
int len = e->length();
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (e->get(i) == Heap::the_hole_value()) holes++;
|
||||
}
|
||||
info->number_of_fast_used_elements_ += len - holes;
|
||||
info->number_of_fast_unused_elements_ += holes;
|
||||
break;
|
||||
}
|
||||
info->number_of_fast_used_elements_ += len - holes;
|
||||
info->number_of_fast_unused_elements_ += holes;
|
||||
} else {
|
||||
NumberDictionary* dict = element_dictionary();
|
||||
info->number_of_slow_used_elements_ += dict->NumberOfElements();
|
||||
info->number_of_slow_unused_elements_ +=
|
||||
dict->Capacity() - dict->NumberOfElements();
|
||||
case PIXEL_ELEMENTS: {
|
||||
info->number_of_objects_with_fast_elements_++;
|
||||
PixelArray* e = PixelArray::cast(elements());
|
||||
info->number_of_fast_used_elements_ += e->length();
|
||||
break;
|
||||
}
|
||||
case DICTIONARY_ELEMENTS: {
|
||||
NumberDictionary* dict = element_dictionary();
|
||||
info->number_of_slow_used_elements_ += dict->NumberOfElements();
|
||||
info->number_of_slow_unused_elements_ +=
|
||||
dict->Capacity() - dict->NumberOfElements();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
110
deps/v8/src/objects-inl.h
vendored
110
deps/v8/src/objects-inl.h
vendored
@ -321,6 +321,12 @@ bool Object::IsByteArray() {
|
||||
}
|
||||
|
||||
|
||||
bool Object::IsPixelArray() {
|
||||
return Object::IsHeapObject() &&
|
||||
HeapObject::cast(this)->map()->instance_type() == PIXEL_ARRAY_TYPE;
|
||||
}
|
||||
|
||||
|
||||
bool Object::IsFailure() {
|
||||
return HAS_FAILURE_TAG(this);
|
||||
}
|
||||
@ -1043,7 +1049,22 @@ void HeapNumber::set_value(double value) {
|
||||
|
||||
|
||||
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
|
||||
ACCESSORS(JSObject, elements, FixedArray, kElementsOffset)
|
||||
|
||||
|
||||
Array* JSObject::elements() {
|
||||
Object* array = READ_FIELD(this, kElementsOffset);
|
||||
// In the assert below Dictionary is covered under FixedArray.
|
||||
ASSERT(array->IsFixedArray() || array->IsPixelArray());
|
||||
return reinterpret_cast<Array*>(array);
|
||||
}
|
||||
|
||||
|
||||
void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
|
||||
// In the assert below Dictionary is covered under FixedArray.
|
||||
ASSERT(value->IsFixedArray() || value->IsPixelArray());
|
||||
WRITE_FIELD(this, kElementsOffset, value);
|
||||
CONDITIONAL_WRITE_BARRIER(this, kElementsOffset, mode);
|
||||
}
|
||||
|
||||
|
||||
void JSObject::initialize_properties() {
|
||||
@ -1075,7 +1096,12 @@ void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
|
||||
|
||||
|
||||
int JSObject::GetHeaderSize() {
|
||||
switch (map()->instance_type()) {
|
||||
InstanceType type = map()->instance_type();
|
||||
// Check for the most common kind of JavaScript object before
|
||||
// falling into the generic switch. This speeds up the internal
|
||||
// field operations considerably on average.
|
||||
if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
|
||||
switch (type) {
|
||||
case JS_GLOBAL_PROXY_TYPE:
|
||||
return JSGlobalProxy::kSize;
|
||||
case JS_GLOBAL_OBJECT_TYPE:
|
||||
@ -1090,7 +1116,6 @@ int JSObject::GetHeaderSize() {
|
||||
return JSValue::kSize;
|
||||
case JS_REGEXP_TYPE:
|
||||
return JSValue::kSize;
|
||||
case JS_OBJECT_TYPE:
|
||||
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
|
||||
return JSObject::kHeaderSize;
|
||||
default:
|
||||
@ -1498,6 +1523,7 @@ CAST_ACCESSOR(JSArray)
|
||||
CAST_ACCESSOR(JSRegExp)
|
||||
CAST_ACCESSOR(Proxy)
|
||||
CAST_ACCESSOR(ByteArray)
|
||||
CAST_ACCESSOR(PixelArray)
|
||||
CAST_ACCESSOR(Struct)
|
||||
|
||||
|
||||
@ -1856,6 +1882,32 @@ Address ByteArray::GetDataStartAddress() {
|
||||
}
|
||||
|
||||
|
||||
uint8_t* PixelArray::external_pointer() {
|
||||
intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
|
||||
return reinterpret_cast<uint8_t*>(ptr);
|
||||
}
|
||||
|
||||
|
||||
void PixelArray::set_external_pointer(uint8_t* value, WriteBarrierMode mode) {
|
||||
intptr_t ptr = reinterpret_cast<intptr_t>(value);
|
||||
WRITE_INTPTR_FIELD(this, kExternalPointerOffset, ptr);
|
||||
}
|
||||
|
||||
|
||||
uint8_t PixelArray::get(int index) {
|
||||
ASSERT((index >= 0) && (index < this->length()));
|
||||
uint8_t* ptr = external_pointer();
|
||||
return ptr[index];
|
||||
}
|
||||
|
||||
|
||||
void PixelArray::set(int index, uint8_t value) {
|
||||
ASSERT((index >= 0) && (index < this->length()));
|
||||
uint8_t* ptr = external_pointer();
|
||||
ptr[index] = value;
|
||||
}
|
||||
|
||||
|
||||
int Map::instance_size() {
|
||||
return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
|
||||
}
|
||||
@ -2289,6 +2341,11 @@ bool JSFunction::IsBoilerplate() {
|
||||
}
|
||||
|
||||
|
||||
bool JSFunction::IsBuiltin() {
|
||||
return context()->global()->IsJSBuiltinsObject();
|
||||
}
|
||||
|
||||
|
||||
bool JSObject::IsLoaded() {
|
||||
return !map()->needs_loading();
|
||||
}
|
||||
@ -2519,8 +2576,33 @@ void JSRegExp::SetDataAt(int index, Object* value) {
|
||||
}
|
||||
|
||||
|
||||
JSObject::ElementsKind JSObject::GetElementsKind() {
|
||||
Array* array = elements();
|
||||
if (array->IsFixedArray()) {
|
||||
// FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
|
||||
if (array->map() == Heap::fixed_array_map()) {
|
||||
return FAST_ELEMENTS;
|
||||
}
|
||||
ASSERT(array->IsDictionary());
|
||||
return DICTIONARY_ELEMENTS;
|
||||
}
|
||||
ASSERT(array->IsPixelArray());
|
||||
return PIXEL_ELEMENTS;
|
||||
}
|
||||
|
||||
|
||||
bool JSObject::HasFastElements() {
|
||||
return !elements()->IsDictionary();
|
||||
return GetElementsKind() == FAST_ELEMENTS;
|
||||
}
|
||||
|
||||
|
||||
bool JSObject::HasDictionaryElements() {
|
||||
return GetElementsKind() == DICTIONARY_ELEMENTS;
|
||||
}
|
||||
|
||||
|
||||
bool JSObject::HasPixelElements() {
|
||||
return GetElementsKind() == PIXEL_ELEMENTS;
|
||||
}
|
||||
|
||||
|
||||
@ -2541,7 +2623,7 @@ StringDictionary* JSObject::property_dictionary() {
|
||||
|
||||
|
||||
NumberDictionary* JSObject::element_dictionary() {
|
||||
ASSERT(!HasFastElements());
|
||||
ASSERT(HasDictionaryElements());
|
||||
return NumberDictionary::cast(elements());
|
||||
}
|
||||
|
||||
@ -2647,24 +2729,6 @@ bool JSObject::HasElement(uint32_t index) {
|
||||
}
|
||||
|
||||
|
||||
Smi* JSObject::InterceptorPropertyLookupHint(String* name) {
|
||||
// TODO(antonm): Do we want to do any shortcuts for global object?
|
||||
if (HasFastProperties()) {
|
||||
LookupResult lookup;
|
||||
LocalLookupRealNamedProperty(name, &lookup);
|
||||
if (lookup.IsValid()) {
|
||||
if (lookup.type() == FIELD && lookup.IsCacheable()) {
|
||||
return Smi::FromInt(lookup.GetFieldIndex());
|
||||
}
|
||||
} else {
|
||||
return Smi::FromInt(kLookupInPrototype);
|
||||
}
|
||||
}
|
||||
|
||||
return Smi::FromInt(kLookupInHolder);
|
||||
}
|
||||
|
||||
|
||||
bool AccessorInfo::all_can_read() {
|
||||
return BooleanBit::get(flag(), kAllCanReadBit);
|
||||
}
|
||||
|
1023
deps/v8/src/objects.cc
vendored
1023
deps/v8/src/objects.cc
vendored
File diff suppressed because it is too large
Load Diff
88
deps/v8/src/objects.h
vendored
88
deps/v8/src/objects.h
vendored
@ -52,6 +52,7 @@
|
||||
// - JSValue
|
||||
// - Array
|
||||
// - ByteArray
|
||||
// - PixelArray
|
||||
// - FixedArray
|
||||
// - DescriptorArray
|
||||
// - HashTable
|
||||
@ -95,7 +96,6 @@
|
||||
// HeapObject: [32 bit direct pointer] (4 byte aligned) | 01
|
||||
// Failure: [30 bit signed int] 11
|
||||
|
||||
|
||||
// Ecma-262 3rd 8.6.1
|
||||
enum PropertyAttributes {
|
||||
NONE = v8::None,
|
||||
@ -270,6 +270,7 @@ enum PropertyNormalizationMode {
|
||||
V(ODDBALL_TYPE) \
|
||||
V(PROXY_TYPE) \
|
||||
V(BYTE_ARRAY_TYPE) \
|
||||
V(PIXEL_ARRAY_TYPE) \
|
||||
V(FILLER_TYPE) \
|
||||
\
|
||||
V(ACCESSOR_INFO_TYPE) \
|
||||
@ -659,6 +660,7 @@ enum InstanceType {
|
||||
JS_GLOBAL_PROPERTY_CELL_TYPE,
|
||||
PROXY_TYPE,
|
||||
BYTE_ARRAY_TYPE,
|
||||
PIXEL_ARRAY_TYPE,
|
||||
FILLER_TYPE,
|
||||
SMI_TYPE,
|
||||
|
||||
@ -760,6 +762,7 @@ class Object BASE_EMBEDDED {
|
||||
|
||||
inline bool IsNumber();
|
||||
inline bool IsByteArray();
|
||||
inline bool IsPixelArray();
|
||||
inline bool IsFailure();
|
||||
inline bool IsRetryAfterGC();
|
||||
inline bool IsOutOfMemoryFailure();
|
||||
@ -1302,6 +1305,11 @@ class HeapNumber: public HeapObject {
|
||||
class JSObject: public HeapObject {
|
||||
public:
|
||||
enum DeleteMode { NORMAL_DELETION, FORCE_DELETION };
|
||||
enum ElementsKind {
|
||||
FAST_ELEMENTS,
|
||||
DICTIONARY_ELEMENTS,
|
||||
PIXEL_ELEMENTS
|
||||
};
|
||||
|
||||
// [properties]: Backing storage for properties.
|
||||
// properties is a FixedArray in the fast case, and a Dictionary in the
|
||||
@ -1313,10 +1321,13 @@ class JSObject: public HeapObject {
|
||||
|
||||
// [elements]: The elements (properties with names that are integers).
|
||||
// elements is a FixedArray in the fast case, and a Dictionary in the slow
|
||||
// case.
|
||||
DECL_ACCESSORS(elements, FixedArray) // Get and set fast elements.
|
||||
// case or a PixelArray in a special case.
|
||||
DECL_ACCESSORS(elements, Array) // Get and set fast elements.
|
||||
inline void initialize_elements();
|
||||
inline ElementsKind GetElementsKind();
|
||||
inline bool HasFastElements();
|
||||
inline bool HasDictionaryElements();
|
||||
inline bool HasPixelElements();
|
||||
inline NumberDictionary* element_dictionary(); // Gets slow elements.
|
||||
|
||||
// Collects elements starting at index 0.
|
||||
@ -1496,14 +1507,6 @@ class JSObject: public HeapObject {
|
||||
Object* LookupCallbackSetterInPrototypes(uint32_t index);
|
||||
void LookupCallback(String* name, LookupResult* result);
|
||||
|
||||
inline Smi* InterceptorPropertyLookupHint(String* name);
|
||||
Object* GetInterceptorPropertyWithLookupHint(JSObject* receiver,
|
||||
Smi* lookup_hint,
|
||||
String* name,
|
||||
PropertyAttributes* attributes);
|
||||
static const int kLookupInHolder = -1;
|
||||
static const int kLookupInPrototype = -2;
|
||||
|
||||
// Returns the number of properties on this object filtering out properties
|
||||
// with the specified attributes (ignoring interceptors).
|
||||
int NumberOfLocalProperties(PropertyAttributes filter);
|
||||
@ -1581,8 +1584,11 @@ class JSObject: public HeapObject {
|
||||
PropertyAttributes attributes);
|
||||
|
||||
// Convert the object to use the canonical dictionary
|
||||
// representation.
|
||||
Object* NormalizeProperties(PropertyNormalizationMode mode);
|
||||
// representation. If the object is expected to have additional properties
|
||||
// added this number can be indicated to have the backing store allocated to
|
||||
// an initial capacity for holding these properties.
|
||||
Object* NormalizeProperties(PropertyNormalizationMode mode,
|
||||
int expected_additional_properties);
|
||||
Object* NormalizeElements();
|
||||
|
||||
// Transform slow named properties to fast variants.
|
||||
@ -1695,12 +1701,6 @@ class JSObject: public HeapObject {
|
||||
|
||||
void LookupInDescriptor(String* name, LookupResult* result);
|
||||
|
||||
// Attempts to get property with a named interceptor getter.
|
||||
// Sets |attributes| to ABSENT if interceptor didn't return anything
|
||||
Object* GetPropertyWithInterceptorProper(JSObject* receiver,
|
||||
String* name,
|
||||
PropertyAttributes* attributes);
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
|
||||
};
|
||||
|
||||
@ -1718,6 +1718,10 @@ class Array: public HeapObject {
|
||||
|
||||
// Layout descriptor.
|
||||
static const int kLengthOffset = HeapObject::kHeaderSize;
|
||||
|
||||
protected:
|
||||
// No code should use the Array class directly, only its subclasses.
|
||||
// Use the kHeaderSize of the appropriate subclass, which may be aligned.
|
||||
static const int kHeaderSize = kLengthOffset + kIntSize;
|
||||
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
|
||||
|
||||
@ -2427,11 +2431,54 @@ class ByteArray: public Array {
|
||||
void ByteArrayVerify();
|
||||
#endif
|
||||
|
||||
// ByteArray headers are not quadword aligned.
|
||||
static const int kHeaderSize = Array::kHeaderSize;
|
||||
static const int kAlignedSize = Array::kAlignedSize;
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
|
||||
};
|
||||
|
||||
|
||||
// A PixelArray represents a fixed-size byte array with special semantics
|
||||
// used for implementing the CanvasPixelArray object. Please see the
|
||||
// specification at:
|
||||
// http://www.whatwg.org/specs/web-apps/current-work/
|
||||
// multipage/the-canvas-element.html#canvaspixelarray
|
||||
// In particular, write access clamps the value written to 0 or 255 if the
|
||||
// value written is outside this range.
|
||||
class PixelArray: public Array {
|
||||
public:
|
||||
// [external_pointer]: The pointer to the external memory area backing this
|
||||
// pixel array.
|
||||
DECL_ACCESSORS(external_pointer, uint8_t) // Pointer to the data store.
|
||||
|
||||
// Setter and getter.
|
||||
inline uint8_t get(int index);
|
||||
inline void set(int index, uint8_t value);
|
||||
|
||||
// This accessor applies the correct conversion from Smi, HeapNumber and
|
||||
// undefined and clamps the converted value between 0 and 255.
|
||||
Object* SetValue(uint32_t index, Object* value);
|
||||
|
||||
// Casting.
|
||||
static inline PixelArray* cast(Object* obj);
|
||||
|
||||
#ifdef DEBUG
|
||||
void PixelArrayPrint();
|
||||
void PixelArrayVerify();
|
||||
#endif // DEBUG
|
||||
|
||||
// PixelArray headers are not quadword aligned.
|
||||
static const int kExternalPointerOffset = Array::kAlignedSize;
|
||||
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
|
||||
static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray);
|
||||
};
|
||||
|
||||
|
||||
// Code describes objects with on-the-fly generated machine code.
|
||||
class Code: public HeapObject {
|
||||
public:
|
||||
@ -3129,6 +3176,9 @@ class JSFunction: public JSObject {
|
||||
// function.
|
||||
inline bool IsBoilerplate();
|
||||
|
||||
// Tells whether this function is builtin.
|
||||
inline bool IsBuiltin();
|
||||
|
||||
// [literals]: Fixed array holding the materialized literals.
|
||||
//
|
||||
// If the function contains object, regexp or array literals, the
|
||||
|
29
deps/v8/src/parser.cc
vendored
29
deps/v8/src/parser.cc
vendored
@ -834,12 +834,7 @@ class AstBuildingParserFactory : public ParserFactory {
|
||||
return new CallEval(expression, arguments, pos);
|
||||
}
|
||||
|
||||
virtual Statement* EmptyStatement() {
|
||||
// Use a statically allocated empty statement singleton to avoid
|
||||
// allocating lots and lots of empty statements.
|
||||
static v8::internal::EmptyStatement empty;
|
||||
return ∅
|
||||
}
|
||||
virtual Statement* EmptyStatement();
|
||||
};
|
||||
|
||||
|
||||
@ -1032,6 +1027,14 @@ Scope* AstBuildingParserFactory::NewScope(Scope* parent, Scope::Type type,
|
||||
}
|
||||
|
||||
|
||||
Statement* AstBuildingParserFactory::EmptyStatement() {
|
||||
// Use a statically allocated empty statement singleton to avoid
|
||||
// allocating lots and lots of empty statements.
|
||||
static v8::internal::EmptyStatement empty;
|
||||
return ∅
|
||||
}
|
||||
|
||||
|
||||
Scope* ParserFactory::NewScope(Scope* parent, Scope::Type type,
|
||||
bool inside_with) {
|
||||
ASSERT(parent != NULL);
|
||||
@ -1056,7 +1059,7 @@ VariableProxy* PreParser::Declare(Handle<String> name, Variable::Mode mode,
|
||||
|
||||
class Target BASE_EMBEDDED {
|
||||
public:
|
||||
Target(Parser* parser, Node* node)
|
||||
Target(Parser* parser, AstNode* node)
|
||||
: parser_(parser), node_(node), previous_(parser_->target_stack_) {
|
||||
parser_->target_stack_ = this;
|
||||
}
|
||||
@ -1066,11 +1069,11 @@ class Target BASE_EMBEDDED {
|
||||
}
|
||||
|
||||
Target* previous() { return previous_; }
|
||||
Node* node() { return node_; }
|
||||
AstNode* node() { return node_; }
|
||||
|
||||
private:
|
||||
Parser* parser_;
|
||||
Node* node_;
|
||||
AstNode* node_;
|
||||
Target* previous_;
|
||||
};
|
||||
|
||||
@ -2367,7 +2370,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
|
||||
result = NEW(TryFinally(try_block, finally_block));
|
||||
// Add the jump targets of the try block and the catch block.
|
||||
for (int i = 0; i < collector.targets()->length(); i++) {
|
||||
catch_collector.targets()->Add(collector.targets()->at(i));
|
||||
catch_collector.AddTarget(collector.targets()->at(i));
|
||||
}
|
||||
result->set_escaping_targets(catch_collector.targets());
|
||||
}
|
||||
@ -3928,7 +3931,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
|
||||
case '*':
|
||||
case '+':
|
||||
case '?':
|
||||
ReportError(CStrVector("Nothing to repeat") CHECK_FAILED);
|
||||
return ReportError(CStrVector("Nothing to repeat"));
|
||||
case '^': {
|
||||
Advance();
|
||||
if (multiline_) {
|
||||
@ -4003,7 +4006,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
|
||||
case '\\':
|
||||
switch (Next()) {
|
||||
case kEndMarker:
|
||||
ReportError(CStrVector("\\ at end of pattern") CHECK_FAILED);
|
||||
return ReportError(CStrVector("\\ at end of pattern"));
|
||||
case 'b':
|
||||
Advance(2);
|
||||
builder->AddAssertion(
|
||||
@ -4490,7 +4493,7 @@ CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
|
||||
return CharacterRange::Singleton(0); // Return dummy value.
|
||||
}
|
||||
case kEndMarker:
|
||||
ReportError(CStrVector("\\ at end of pattern") CHECK_FAILED);
|
||||
return ReportError(CStrVector("\\ at end of pattern"));
|
||||
default:
|
||||
uc32 c = ParseClassCharacterEscape(CHECK_FAILED);
|
||||
return CharacterRange::Singleton(c);
|
||||
|
76
deps/v8/src/platform-linux.cc
vendored
76
deps/v8/src/platform-linux.cc
vendored
@ -223,62 +223,36 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
|
||||
}
|
||||
|
||||
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
static uintptr_t StringToULong(char* buffer) {
|
||||
return strtoul(buffer, NULL, 16); // NOLINT
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void OS::LogSharedLibraryAddresses() {
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
static const int MAP_LENGTH = 1024;
|
||||
int fd = open("/proc/self/maps", O_RDONLY);
|
||||
if (fd < 0) return;
|
||||
FILE *fp;
|
||||
fp = fopen("/proc/self/maps", "r");
|
||||
if (fp == NULL) return;
|
||||
while (true) {
|
||||
char addr_buffer[11];
|
||||
addr_buffer[0] = '0';
|
||||
addr_buffer[1] = 'x';
|
||||
addr_buffer[10] = 0;
|
||||
int result = read(fd, addr_buffer + 2, 8);
|
||||
if (result < 8) break;
|
||||
uintptr_t start = StringToULong(addr_buffer);
|
||||
result = read(fd, addr_buffer + 2, 1);
|
||||
if (result < 1) break;
|
||||
if (addr_buffer[2] != '-') break;
|
||||
result = read(fd, addr_buffer + 2, 8);
|
||||
if (result < 8) break;
|
||||
uintptr_t end = StringToULong(addr_buffer);
|
||||
char buffer[MAP_LENGTH];
|
||||
int bytes_read = -1;
|
||||
do {
|
||||
bytes_read++;
|
||||
if (bytes_read >= MAP_LENGTH - 1)
|
||||
break;
|
||||
result = read(fd, buffer + bytes_read, 1);
|
||||
if (result < 1) break;
|
||||
} while (buffer[bytes_read] != '\n');
|
||||
buffer[bytes_read] = 0;
|
||||
// Ignore mappings that are not executable.
|
||||
if (buffer[3] != 'x') continue;
|
||||
char* start_of_path = index(buffer, '/');
|
||||
// If there is no filename for this line then log it as an anonymous
|
||||
// mapping and use the address as its name.
|
||||
if (start_of_path == NULL) {
|
||||
// 40 is enough to print a 64 bit address range.
|
||||
ASSERT(sizeof(buffer) > 40);
|
||||
snprintf(buffer,
|
||||
sizeof(buffer),
|
||||
"%08" V8PRIxPTR "-%08" V8PRIxPTR,
|
||||
start,
|
||||
end);
|
||||
LOG(SharedLibraryEvent(buffer, start, end));
|
||||
} else {
|
||||
buffer[bytes_read] = 0;
|
||||
LOG(SharedLibraryEvent(start_of_path, start, end));
|
||||
uintptr_t start, end;
|
||||
char attr_r, attr_w, attr_x, attr_p;
|
||||
if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
|
||||
if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
|
||||
int c;
|
||||
if (attr_r == 'r' && attr_x == 'x') {
|
||||
while (c = getc(fp), (c != EOF) && (c != '\n') && (c != '/'));
|
||||
char lib_name[1024];
|
||||
bool lib_has_name = false;
|
||||
if (c == '/') {
|
||||
ungetc(c, fp);
|
||||
lib_has_name = fgets(lib_name, sizeof(lib_name), fp) != NULL;
|
||||
}
|
||||
if (lib_has_name && strlen(lib_name) > 0) {
|
||||
lib_name[strlen(lib_name) - 1] = '\0';
|
||||
} else {
|
||||
snprintf(lib_name, sizeof(lib_name),
|
||||
"%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
|
||||
}
|
||||
LOG(SharedLibraryEvent(lib_name, start, end));
|
||||
}
|
||||
while (c = getc(fp), (c != EOF) && (c != '\n'));
|
||||
}
|
||||
close(fd);
|
||||
fclose(fp);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
23
deps/v8/src/platform-macos.cc
vendored
23
deps/v8/src/platform-macos.cc
vendored
@ -28,10 +28,11 @@
|
||||
// Platform specific code for MacOS goes here. For the POSIX comaptible parts
|
||||
// the implementation is in platform-posix.cc.
|
||||
|
||||
#include <ucontext.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
#include <mach/mach_init.h>
|
||||
#include <mach-o/dyld.h>
|
||||
#include <mach-o/getsect.h>
|
||||
|
||||
#include <AvailabilityMacros.h>
|
||||
|
||||
@ -205,7 +206,19 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
|
||||
|
||||
|
||||
void OS::LogSharedLibraryAddresses() {
|
||||
// TODO(1233579): Implement.
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
unsigned int images_count = _dyld_image_count();
|
||||
for (unsigned int i = 0; i < images_count; ++i) {
|
||||
const mach_header* header = _dyld_get_image_header(i);
|
||||
if (header == NULL) continue;
|
||||
unsigned int size;
|
||||
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
|
||||
if (code_ptr == NULL) continue;
|
||||
const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
|
||||
const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
|
||||
LOG(SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
|
||||
}
|
||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
||||
}
|
||||
|
||||
|
||||
@ -411,14 +424,10 @@ class MacOSMutex : public Mutex {
|
||||
public:
|
||||
|
||||
MacOSMutex() {
|
||||
// For some reason the compiler doesn't allow you to write
|
||||
// "this->mutex_ = PTHREAD_..." directly on mac.
|
||||
pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
|
||||
pthread_mutexattr_t attr;
|
||||
pthread_mutexattr_init(&attr);
|
||||
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
|
||||
pthread_mutex_init(&m, &attr);
|
||||
mutex_ = m;
|
||||
pthread_mutex_init(&mutex_, &attr);
|
||||
}
|
||||
|
||||
~MacOSMutex() { pthread_mutex_destroy(&mutex_); }
|
||||
|
14
deps/v8/src/platform-posix.cc
vendored
14
deps/v8/src/platform-posix.cc
vendored
@ -42,11 +42,15 @@
|
||||
#include <netinet/in.h>
|
||||
#include <netdb.h>
|
||||
|
||||
#if defined(ANDROID)
|
||||
#define LOG_TAG "v8"
|
||||
#include <utils/Log.h> // LOG_PRI_VA
|
||||
#endif
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "platform.h"
|
||||
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
@ -126,7 +130,11 @@ void OS::Print(const char* format, ...) {
|
||||
|
||||
|
||||
void OS::VPrint(const char* format, va_list args) {
|
||||
#if defined(ANDROID)
|
||||
LOG_PRI_VA(ANDROID_LOG_INFO, LOG_TAG, format, args);
|
||||
#else
|
||||
vprintf(format, args);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -139,7 +147,11 @@ void OS::PrintError(const char* format, ...) {
|
||||
|
||||
|
||||
void OS::VPrintError(const char* format, va_list args) {
|
||||
#if defined(ANDROID)
|
||||
LOG_PRI_VA(ANDROID_LOG_ERROR, LOG_TAG, format, args);
|
||||
#else
|
||||
vfprintf(stderr, format, args);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
10
deps/v8/src/prettyprinter.cc
vendored
10
deps/v8/src/prettyprinter.cc
vendored
@ -417,7 +417,7 @@ void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
|
||||
}
|
||||
|
||||
|
||||
const char* PrettyPrinter::Print(Node* node) {
|
||||
const char* PrettyPrinter::Print(AstNode* node) {
|
||||
Init();
|
||||
Visit(node);
|
||||
return output_;
|
||||
@ -441,7 +441,7 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
|
||||
}
|
||||
|
||||
|
||||
void PrettyPrinter::PrintOut(Node* node) {
|
||||
void PrettyPrinter::PrintOut(AstNode* node) {
|
||||
PrettyPrinter printer;
|
||||
PrintF("%s", printer.Print(node));
|
||||
}
|
||||
@ -700,7 +700,7 @@ void AstPrinter::PrintLabelsIndented(const char* info, ZoneStringList* labels) {
|
||||
}
|
||||
|
||||
|
||||
void AstPrinter::PrintIndentedVisit(const char* s, Node* node) {
|
||||
void AstPrinter::PrintIndentedVisit(const char* s, AstNode* node) {
|
||||
IndentedScope indent(s);
|
||||
Visit(node);
|
||||
}
|
||||
@ -934,6 +934,9 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
|
||||
case ObjectLiteral::Property::COMPUTED:
|
||||
prop_kind = "PROPERTY - COMPUTED";
|
||||
break;
|
||||
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
|
||||
prop_kind = "PROPERTY - MATERIALIZED_LITERAL";
|
||||
break;
|
||||
case ObjectLiteral::Property::PROTOTYPE:
|
||||
prop_kind = "PROPERTY - PROTOTYPE";
|
||||
break;
|
||||
@ -945,7 +948,6 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
IndentedScope prop(prop_kind);
|
||||
PrintIndentedVisit("KEY", node->properties()->at(i)->key());
|
||||
|
10
deps/v8/src/prettyprinter.h
vendored
10
deps/v8/src/prettyprinter.h
vendored
@ -42,17 +42,17 @@ class PrettyPrinter: public AstVisitor {
|
||||
|
||||
// The following routines print a node into a string.
|
||||
// The result string is alive as long as the PrettyPrinter is alive.
|
||||
const char* Print(Node* node);
|
||||
const char* Print(AstNode* node);
|
||||
const char* PrintExpression(FunctionLiteral* program);
|
||||
const char* PrintProgram(FunctionLiteral* program);
|
||||
|
||||
// Print a node to stdout.
|
||||
static void PrintOut(Node* node);
|
||||
static void PrintOut(AstNode* node);
|
||||
|
||||
// Individual nodes
|
||||
#define DEF_VISIT(type) \
|
||||
virtual void Visit##type(type* node);
|
||||
NODE_LIST(DEF_VISIT)
|
||||
AST_NODE_LIST(DEF_VISIT)
|
||||
#undef DEF_VISIT
|
||||
|
||||
private:
|
||||
@ -87,12 +87,12 @@ class AstPrinter: public PrettyPrinter {
|
||||
// Individual nodes
|
||||
#define DEF_VISIT(type) \
|
||||
virtual void Visit##type(type* node);
|
||||
NODE_LIST(DEF_VISIT)
|
||||
AST_NODE_LIST(DEF_VISIT)
|
||||
#undef DEF_VISIT
|
||||
private:
|
||||
friend class IndentedScope;
|
||||
void PrintIndented(const char* txt);
|
||||
void PrintIndentedVisit(const char* s, Node* node);
|
||||
void PrintIndentedVisit(const char* s, AstNode* node);
|
||||
|
||||
void PrintStatements(ZoneList<Statement*>* statements);
|
||||
void PrintDeclarations(ZoneList<Declaration*>* declarations);
|
||||
|
6
deps/v8/src/register-allocator.cc
vendored
6
deps/v8/src/register-allocator.cc
vendored
@ -44,6 +44,12 @@ Result::Result(Register reg) {
|
||||
}
|
||||
|
||||
|
||||
Result::ZoneObjectList* Result::ConstantList() {
|
||||
static ZoneObjectList list(10);
|
||||
return &list;
|
||||
}
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// RegisterAllocator implementation.
|
||||
|
||||
|
5
deps/v8/src/register-allocator.h
vendored
5
deps/v8/src/register-allocator.h
vendored
@ -92,10 +92,7 @@ class Result BASE_EMBEDDED {
|
||||
// of handles to the actual constants.
|
||||
typedef ZoneList<Handle<Object> > ZoneObjectList;
|
||||
|
||||
static ZoneObjectList* ConstantList() {
|
||||
static ZoneObjectList list(10);
|
||||
return &list;
|
||||
}
|
||||
static ZoneObjectList* ConstantList();
|
||||
|
||||
// Clear the constants indirection table.
|
||||
static void ClearConstantList() {
|
||||
|
9
deps/v8/src/rewriter.cc
vendored
9
deps/v8/src/rewriter.cc
vendored
@ -38,8 +38,9 @@ namespace internal {
|
||||
|
||||
class AstOptimizer: public AstVisitor {
|
||||
public:
|
||||
explicit AstOptimizer() {}
|
||||
explicit AstOptimizer(Handle<String> enclosing_name) {
|
||||
explicit AstOptimizer() : has_function_literal_(false) {}
|
||||
explicit AstOptimizer(Handle<String> enclosing_name)
|
||||
: has_function_literal_(false) {
|
||||
func_name_inferrer_.PushEnclosingName(enclosing_name);
|
||||
}
|
||||
|
||||
@ -58,7 +59,7 @@ class AstOptimizer: public AstVisitor {
|
||||
// Node visitors.
|
||||
#define DEF_VISIT(type) \
|
||||
virtual void Visit##type(type* node);
|
||||
NODE_LIST(DEF_VISIT)
|
||||
AST_NODE_LIST(DEF_VISIT)
|
||||
#undef DEF_VISIT
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(AstOptimizer);
|
||||
@ -556,7 +557,7 @@ class Processor: public AstVisitor {
|
||||
// Node visitors.
|
||||
#define DEF_VISIT(type) \
|
||||
virtual void Visit##type(type* node);
|
||||
NODE_LIST(DEF_VISIT)
|
||||
AST_NODE_LIST(DEF_VISIT)
|
||||
#undef DEF_VISIT
|
||||
};
|
||||
|
||||
|
160
deps/v8/src/runtime.cc
vendored
160
deps/v8/src/runtime.cc
vendored
@ -155,33 +155,43 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
|
||||
}
|
||||
|
||||
// Deep copy local elements.
|
||||
if (copy->HasFastElements()) {
|
||||
FixedArray* elements = copy->elements();
|
||||
WriteBarrierMode mode = elements->GetWriteBarrierMode();
|
||||
for (int i = 0; i < elements->length(); i++) {
|
||||
Object* value = elements->get(i);
|
||||
if (value->IsJSObject()) {
|
||||
JSObject* jsObject = JSObject::cast(value);
|
||||
result = DeepCopyBoilerplate(jsObject);
|
||||
if (result->IsFailure()) return result;
|
||||
elements->set(i, result, mode);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
NumberDictionary* element_dictionary = copy->element_dictionary();
|
||||
int capacity = element_dictionary->Capacity();
|
||||
for (int i = 0; i < capacity; i++) {
|
||||
Object* k = element_dictionary->KeyAt(i);
|
||||
if (element_dictionary->IsKey(k)) {
|
||||
Object* value = element_dictionary->ValueAt(i);
|
||||
// Pixel elements cannot be created using an object literal.
|
||||
ASSERT(!copy->HasPixelElements());
|
||||
switch (copy->GetElementsKind()) {
|
||||
case JSObject::FAST_ELEMENTS: {
|
||||
FixedArray* elements = FixedArray::cast(copy->elements());
|
||||
WriteBarrierMode mode = elements->GetWriteBarrierMode();
|
||||
for (int i = 0; i < elements->length(); i++) {
|
||||
Object* value = elements->get(i);
|
||||
if (value->IsJSObject()) {
|
||||
JSObject* jsObject = JSObject::cast(value);
|
||||
result = DeepCopyBoilerplate(jsObject);
|
||||
if (result->IsFailure()) return result;
|
||||
element_dictionary->ValueAtPut(i, result);
|
||||
elements->set(i, result, mode);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case JSObject::DICTIONARY_ELEMENTS: {
|
||||
NumberDictionary* element_dictionary = copy->element_dictionary();
|
||||
int capacity = element_dictionary->Capacity();
|
||||
for (int i = 0; i < capacity; i++) {
|
||||
Object* k = element_dictionary->KeyAt(i);
|
||||
if (element_dictionary->IsKey(k)) {
|
||||
Object* value = element_dictionary->ValueAt(i);
|
||||
if (value->IsJSObject()) {
|
||||
JSObject* jsObject = JSObject::cast(value);
|
||||
result = DeepCopyBoilerplate(jsObject);
|
||||
if (result->IsFailure()) return result;
|
||||
element_dictionary->ValueAtPut(i, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
return copy;
|
||||
}
|
||||
@ -258,6 +268,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
|
||||
{ // Add the constant properties to the boilerplate.
|
||||
int length = constant_properties->length();
|
||||
OptimizedObjectForAddingMultipleProperties opt(boilerplate,
|
||||
length / 2,
|
||||
!is_result_from_cache);
|
||||
for (int index = 0; index < length; index +=2) {
|
||||
Handle<Object> key(constant_properties->get(index+0));
|
||||
@ -1637,7 +1648,7 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
|
||||
}
|
||||
case SUBJECT_CAPTURE: {
|
||||
int capture = part.data;
|
||||
FixedArray* match_info = last_match_info->elements();
|
||||
FixedArray* match_info = FixedArray::cast(last_match_info->elements());
|
||||
int from = RegExpImpl::GetCapture(match_info, capture * 2);
|
||||
int to = RegExpImpl::GetCapture(match_info, capture * 2 + 1);
|
||||
if (from >= 0 && to > from) {
|
||||
@ -1717,7 +1728,8 @@ static Object* StringReplaceRegExpWithString(String* subject,
|
||||
int start, end;
|
||||
{
|
||||
AssertNoAllocation match_info_array_is_not_in_a_handle;
|
||||
FixedArray* match_info_array = last_match_info_handle->elements();
|
||||
FixedArray* match_info_array =
|
||||
FixedArray::cast(last_match_info_handle->elements());
|
||||
|
||||
ASSERT_EQ(capture_count * 2 + 2,
|
||||
RegExpImpl::GetLastCaptureCount(match_info_array));
|
||||
@ -2345,7 +2357,7 @@ static Object* Runtime_StringMatch(Arguments args) {
|
||||
int end;
|
||||
{
|
||||
AssertNoAllocation no_alloc;
|
||||
FixedArray* elements = regexp_info->elements();
|
||||
FixedArray* elements = FixedArray::cast(regexp_info->elements());
|
||||
start = Smi::cast(elements->get(RegExpImpl::kFirstCapture))->value();
|
||||
end = Smi::cast(elements->get(RegExpImpl::kFirstCapture + 1))->value();
|
||||
}
|
||||
@ -3022,7 +3034,7 @@ static Object* Runtime_ToSlowProperties(Arguments args) {
|
||||
Handle<Object> object = args.at<Object>(0);
|
||||
if (object->IsJSObject()) {
|
||||
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
|
||||
js_object->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES);
|
||||
js_object->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
|
||||
}
|
||||
return *object;
|
||||
}
|
||||
@ -4885,7 +4897,7 @@ static Object* Runtime_DateParseString(Arguments args) {
|
||||
|
||||
AssertNoAllocation no_allocation;
|
||||
|
||||
FixedArray* output_array = output->elements();
|
||||
FixedArray* output_array = FixedArray::cast(output->elements());
|
||||
RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
|
||||
bool result;
|
||||
if (str->IsAsciiRepresentation()) {
|
||||
@ -5173,37 +5185,62 @@ static uint32_t IterateElements(Handle<JSObject> receiver,
|
||||
ArrayConcatVisitor* visitor) {
|
||||
uint32_t num_of_elements = 0;
|
||||
|
||||
if (receiver->HasFastElements()) {
|
||||
Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
|
||||
uint32_t len = elements->length();
|
||||
if (range < len) len = range;
|
||||
|
||||
for (uint32_t j = 0; j < len; j++) {
|
||||
Handle<Object> e(elements->get(j));
|
||||
if (!e->IsTheHole()) {
|
||||
num_of_elements++;
|
||||
if (visitor)
|
||||
visitor->visit(j, e);
|
||||
switch (receiver->GetElementsKind()) {
|
||||
case JSObject::FAST_ELEMENTS: {
|
||||
Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
|
||||
uint32_t len = elements->length();
|
||||
if (range < len) {
|
||||
len = range;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
Handle<NumberDictionary> dict(receiver->element_dictionary());
|
||||
uint32_t capacity = dict->Capacity();
|
||||
for (uint32_t j = 0; j < capacity; j++) {
|
||||
Handle<Object> k(dict->KeyAt(j));
|
||||
if (dict->IsKey(*k)) {
|
||||
ASSERT(k->IsNumber());
|
||||
uint32_t index = static_cast<uint32_t>(k->Number());
|
||||
if (index < range) {
|
||||
for (uint32_t j = 0; j < len; j++) {
|
||||
Handle<Object> e(elements->get(j));
|
||||
if (!e->IsTheHole()) {
|
||||
num_of_elements++;
|
||||
if (visitor) {
|
||||
visitor->visit(index,
|
||||
Handle<Object>(dict->ValueAt(j)));
|
||||
visitor->visit(j, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case JSObject::PIXEL_ELEMENTS: {
|
||||
Handle<PixelArray> pixels(PixelArray::cast(receiver->elements()));
|
||||
uint32_t len = pixels->length();
|
||||
if (range < len) {
|
||||
len = range;
|
||||
}
|
||||
|
||||
for (uint32_t j = 0; j < len; j++) {
|
||||
num_of_elements++;
|
||||
if (visitor != NULL) {
|
||||
Handle<Smi> e(Smi::FromInt(pixels->get(j)));
|
||||
visitor->visit(j, e);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case JSObject::DICTIONARY_ELEMENTS: {
|
||||
Handle<NumberDictionary> dict(receiver->element_dictionary());
|
||||
uint32_t capacity = dict->Capacity();
|
||||
for (uint32_t j = 0; j < capacity; j++) {
|
||||
Handle<Object> k(dict->KeyAt(j));
|
||||
if (dict->IsKey(*k)) {
|
||||
ASSERT(k->IsNumber());
|
||||
uint32_t index = static_cast<uint32_t>(k->Number());
|
||||
if (index < range) {
|
||||
num_of_elements++;
|
||||
if (visitor) {
|
||||
visitor->visit(index, Handle<Object>(dict->ValueAt(j)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
|
||||
return num_of_elements;
|
||||
@ -7408,14 +7445,15 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
|
||||
// Not sure when this can happen but skip it just in case.
|
||||
if (!raw_fun->IsJSFunction())
|
||||
return false;
|
||||
if ((raw_fun == caller) && !(*seen_caller) && frame->IsConstructor()) {
|
||||
if ((raw_fun == caller) && !(*seen_caller)) {
|
||||
*seen_caller = true;
|
||||
return false;
|
||||
}
|
||||
// Skip the most obvious builtin calls. Some builtin calls (such as
|
||||
// Number.ADD which is invoked using 'call') are very difficult to
|
||||
// recognize so we're leaving them in for now.
|
||||
return !frame->receiver()->IsJSBuiltinsObject();
|
||||
// Skip all frames until we've seen the caller. Also, skip the most
|
||||
// obvious builtin calls. Some builtin calls (such as Number.ADD
|
||||
// which is invoked using 'call') are very difficult to recognize
|
||||
// so we're leaving them in for now.
|
||||
return *seen_caller && !frame->receiver()->IsJSBuiltinsObject();
|
||||
}
|
||||
|
||||
|
||||
@ -7424,7 +7462,7 @@ static bool ShowFrameInStackTrace(StackFrame* raw_frame, Object* caller,
|
||||
// code offset.
|
||||
static Object* Runtime_CollectStackTrace(Arguments args) {
|
||||
ASSERT_EQ(args.length(), 2);
|
||||
Object* caller = args[0];
|
||||
Handle<Object> caller = args.at<Object>(0);
|
||||
CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[1]);
|
||||
|
||||
HandleScope scope;
|
||||
@ -7433,12 +7471,14 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
|
||||
Handle<JSArray> result = Factory::NewJSArray(initial_size * 3);
|
||||
|
||||
StackFrameIterator iter;
|
||||
bool seen_caller = false;
|
||||
// If the caller parameter is a function we skip frames until we're
|
||||
// under it before starting to collect.
|
||||
bool seen_caller = !caller->IsJSFunction();
|
||||
int cursor = 0;
|
||||
int frames_seen = 0;
|
||||
while (!iter.done() && frames_seen < limit) {
|
||||
StackFrame* raw_frame = iter.frame();
|
||||
if (ShowFrameInStackTrace(raw_frame, caller, &seen_caller)) {
|
||||
if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
|
||||
frames_seen++;
|
||||
JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
|
||||
Object* recv = frame->receiver();
|
||||
@ -7446,15 +7486,17 @@ static Object* Runtime_CollectStackTrace(Arguments args) {
|
||||
Address pc = frame->pc();
|
||||
Address start = frame->code()->address();
|
||||
Smi* offset = Smi::FromInt(pc - start);
|
||||
FixedArray* elements = result->elements();
|
||||
FixedArray* elements = FixedArray::cast(result->elements());
|
||||
if (cursor + 2 < elements->length()) {
|
||||
elements->set(cursor++, recv);
|
||||
elements->set(cursor++, fun);
|
||||
elements->set(cursor++, offset, SKIP_WRITE_BARRIER);
|
||||
} else {
|
||||
HandleScope scope;
|
||||
SetElement(result, cursor++, Handle<Object>(recv));
|
||||
SetElement(result, cursor++, Handle<Object>(fun));
|
||||
Handle<Object> recv_handle(recv);
|
||||
Handle<Object> fun_handle(fun);
|
||||
SetElement(result, cursor++, recv_handle);
|
||||
SetElement(result, cursor++, fun_handle);
|
||||
SetElement(result, cursor++, Handle<Smi>(offset));
|
||||
}
|
||||
}
|
||||
|
6
deps/v8/src/serialize.cc
vendored
6
deps/v8/src/serialize.cc
vendored
@ -1454,9 +1454,9 @@ void Deserializer::GetLog() {
|
||||
static void InitPagedSpace(PagedSpace* space,
|
||||
int capacity,
|
||||
List<Page*>* page_list) {
|
||||
space->EnsureCapacity(capacity);
|
||||
// TODO(1240712): PagedSpace::EnsureCapacity can return false due to
|
||||
// a failure to allocate from the OS to expand the space.
|
||||
if (!space->EnsureCapacity(capacity)) {
|
||||
V8::FatalProcessOutOfMemory("InitPagedSpace");
|
||||
}
|
||||
PageIterator it(space, PageIterator::ALL_PAGES);
|
||||
while (it.has_next()) page_list->Add(it.next());
|
||||
}
|
||||
|
6
deps/v8/src/spaces.cc
vendored
6
deps/v8/src/spaces.cc
vendored
@ -133,8 +133,6 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
|
||||
#endif
|
||||
stop_page_ = space->last_page_;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
@ -725,11 +723,15 @@ void PagedSpace::Shrink() {
|
||||
Page* current_page = top_page->next_page();
|
||||
// Loop over the pages to the end of the space.
|
||||
while (current_page->is_valid()) {
|
||||
#if defined(ANDROID)
|
||||
// Free all chunks if possible
|
||||
#else
|
||||
// Advance last_page_to_keep every other step to end up at the midpoint.
|
||||
if ((free_pages & 0x1) == 1) {
|
||||
pages_to_keep++;
|
||||
last_page_to_keep = last_page_to_keep->next_page();
|
||||
}
|
||||
#endif
|
||||
free_pages++;
|
||||
current_page = current_page->next_page();
|
||||
}
|
||||
|
40
deps/v8/src/spaces.h
vendored
40
deps/v8/src/spaces.h
vendored
@ -393,6 +393,9 @@ class MemoryAllocator : public AllStatic {
|
||||
// Returns the maximum available bytes of heaps.
|
||||
static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
|
||||
|
||||
// Returns allocated spaces in bytes.
|
||||
static int Size() { return size_; }
|
||||
|
||||
// Returns maximum available bytes that the old space can have.
|
||||
static int MaxAvailable() {
|
||||
return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
|
||||
@ -434,7 +437,11 @@ class MemoryAllocator : public AllStatic {
|
||||
static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
|
||||
// If a chunk has at least 32 pages, the maximum heap size is about
|
||||
// 8 * 1024 * 32 * 8K = 2G bytes.
|
||||
#if defined(ANDROID)
|
||||
static const int kPagesPerChunk = 16;
|
||||
#else
|
||||
static const int kPagesPerChunk = 64;
|
||||
#endif
|
||||
static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
|
||||
|
||||
private:
|
||||
@ -924,34 +931,41 @@ class PagedSpace : public Space {
|
||||
|
||||
|
||||
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
|
||||
// HistogramInfo class for recording a single "bar" of a histogram. This
|
||||
// class is used for collecting statistics to print to stdout (when compiled
|
||||
// with DEBUG) or to the log file (when compiled with
|
||||
// ENABLE_LOGGING_AND_PROFILING).
|
||||
class HistogramInfo BASE_EMBEDDED {
|
||||
class NumberAndSizeInfo BASE_EMBEDDED {
|
||||
public:
|
||||
HistogramInfo() : number_(0), bytes_(0) {}
|
||||
NumberAndSizeInfo() : number_(0), bytes_(0) {}
|
||||
|
||||
const char* name() { return name_; }
|
||||
void set_name(const char* name) { name_ = name; }
|
||||
|
||||
int number() { return number_; }
|
||||
int number() const { return number_; }
|
||||
void increment_number(int num) { number_ += num; }
|
||||
|
||||
int bytes() { return bytes_; }
|
||||
int bytes() const { return bytes_; }
|
||||
void increment_bytes(int size) { bytes_ += size; }
|
||||
|
||||
// Clear the number of objects and size fields, but not the name.
|
||||
void clear() {
|
||||
number_ = 0;
|
||||
bytes_ = 0;
|
||||
}
|
||||
|
||||
private:
|
||||
const char* name_;
|
||||
int number_;
|
||||
int bytes_;
|
||||
};
|
||||
|
||||
|
||||
// HistogramInfo class for recording a single "bar" of a histogram. This
|
||||
// class is used for collecting statistics to print to stdout (when compiled
|
||||
// with DEBUG) or to the log file (when compiled with
|
||||
// ENABLE_LOGGING_AND_PROFILING).
|
||||
class HistogramInfo: public NumberAndSizeInfo {
|
||||
public:
|
||||
HistogramInfo() : NumberAndSizeInfo() {}
|
||||
|
||||
const char* name() { return name_; }
|
||||
void set_name(const char* name) { name_ = name; }
|
||||
|
||||
private:
|
||||
const char* name_;
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
|
6
deps/v8/src/string-stream.cc
vendored
6
deps/v8/src/string-stream.cc
vendored
@ -44,12 +44,6 @@ char* HeapStringAllocator::allocate(unsigned bytes) {
|
||||
}
|
||||
|
||||
|
||||
NoAllocationStringAllocator::NoAllocationStringAllocator(unsigned bytes) {
|
||||
size_ = bytes;
|
||||
space_ = NewArray<char>(bytes);
|
||||
}
|
||||
|
||||
|
||||
NoAllocationStringAllocator::NoAllocationStringAllocator(char* memory,
|
||||
unsigned size) {
|
||||
size_ = size;
|
||||
|
5
deps/v8/src/string-stream.h
vendored
5
deps/v8/src/string-stream.h
vendored
@ -57,11 +57,10 @@ class HeapStringAllocator: public StringAllocator {
|
||||
|
||||
|
||||
// Allocator for use when no new c++ heap allocation is allowed.
|
||||
// Allocates all space up front and does no allocation while building
|
||||
// message.
|
||||
// Given a preallocated buffer up front and does no allocation while
|
||||
// building message.
|
||||
class NoAllocationStringAllocator: public StringAllocator {
|
||||
public:
|
||||
explicit NoAllocationStringAllocator(unsigned bytes);
|
||||
NoAllocationStringAllocator(char* memory, unsigned size);
|
||||
char* allocate(unsigned bytes) { return space_; }
|
||||
char* grow(unsigned* bytes);
|
||||
|
150
deps/v8/src/stub-cache.cc
vendored
150
deps/v8/src/stub-cache.cc
vendored
@ -736,22 +736,22 @@ Handle<Code> ComputeCallMiss(int argc) {
|
||||
|
||||
Object* LoadCallbackProperty(Arguments args) {
|
||||
Handle<JSObject> recv = args.at<JSObject>(0);
|
||||
AccessorInfo* callback = AccessorInfo::cast(args[1]);
|
||||
Handle<JSObject> holder = args.at<JSObject>(1);
|
||||
AccessorInfo* callback = AccessorInfo::cast(args[2]);
|
||||
Handle<Object> data = args.at<Object>(3);
|
||||
Address getter_address = v8::ToCData<Address>(callback->getter());
|
||||
v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
|
||||
ASSERT(fun != NULL);
|
||||
Handle<String> name = args.at<String>(2);
|
||||
Handle<JSObject> holder = args.at<JSObject>(3);
|
||||
HandleScope scope;
|
||||
Handle<Object> data(callback->data());
|
||||
LOG(ApiNamedPropertyAccess("load", *recv, *name));
|
||||
Handle<String> name = args.at<String>(4);
|
||||
// NOTE: If we can align the structure of an AccessorInfo with the
|
||||
// locations of the arguments to this function maybe we don't have
|
||||
// to explicitly create the structure but can just pass a pointer
|
||||
// into the stack.
|
||||
LOG(ApiNamedPropertyAccess("load", *recv, *name));
|
||||
v8::AccessorInfo info(v8::Utils::ToLocal(recv),
|
||||
v8::Utils::ToLocal(data),
|
||||
v8::Utils::ToLocal(holder));
|
||||
HandleScope scope;
|
||||
v8::Handle<v8::Value> result;
|
||||
{
|
||||
// Leaving JavaScript.
|
||||
@ -787,49 +787,129 @@ Object* StoreCallbackProperty(Arguments args) {
|
||||
return *value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempts to load a property with an interceptor (which must be present),
|
||||
* but doesn't search the prototype chain.
|
||||
*
|
||||
* Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
|
||||
* provide any value for the given name.
|
||||
*/
|
||||
Object* LoadPropertyWithInterceptorOnly(Arguments args) {
|
||||
Handle<JSObject> receiver_handle = args.at<JSObject>(0);
|
||||
Handle<JSObject> holder_handle = args.at<JSObject>(1);
|
||||
Handle<String> name_handle = args.at<String>(2);
|
||||
Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3);
|
||||
Handle<Object> data_handle = args.at<Object>(4);
|
||||
|
||||
Object* LoadInterceptorProperty(Arguments args) {
|
||||
JSObject* recv = JSObject::cast(args[0]);
|
||||
JSObject* holder = JSObject::cast(args[1]);
|
||||
String* name = String::cast(args[2]);
|
||||
Smi* lookup_hint = Smi::cast(args[3]);
|
||||
ASSERT(holder->HasNamedInterceptor());
|
||||
PropertyAttributes attr = NONE;
|
||||
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
|
||||
v8::NamedPropertyGetter getter =
|
||||
FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
|
||||
ASSERT(getter != NULL);
|
||||
|
||||
Object* result = holder->GetInterceptorPropertyWithLookupHint(
|
||||
recv, lookup_hint, name, &attr);
|
||||
if (result->IsFailure()) return result;
|
||||
{
|
||||
// Use the interceptor getter.
|
||||
v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
|
||||
v8::Utils::ToLocal(data_handle),
|
||||
v8::Utils::ToLocal(holder_handle));
|
||||
HandleScope scope;
|
||||
v8::Handle<v8::Value> r;
|
||||
{
|
||||
// Leaving JavaScript.
|
||||
VMState state(EXTERNAL);
|
||||
r = getter(v8::Utils::ToLocal(name_handle), info);
|
||||
}
|
||||
RETURN_IF_SCHEDULED_EXCEPTION();
|
||||
if (!r.IsEmpty()) {
|
||||
return *v8::Utils::OpenHandle(*r);
|
||||
}
|
||||
}
|
||||
|
||||
// If the property is present, return it.
|
||||
if (attr != ABSENT) return result;
|
||||
return Heap::no_interceptor_result_sentinel();
|
||||
}
|
||||
|
||||
// If the top frame is an internal frame, this is really a call
|
||||
// IC. In this case, we simply return the undefined result which
|
||||
// will lead to an exception when trying to invoke the result as a
|
||||
// function.
|
||||
StackFrameIterator it;
|
||||
it.Advance(); // skip exit frame
|
||||
if (it.frame()->is_internal()) return result;
|
||||
|
||||
static Object* ThrowReferenceError(String* name) {
|
||||
// If the load is non-contextual, just return the undefined result.
|
||||
// Note that both keyed and non-keyed loads may end up here, so we
|
||||
// can't use either LoadIC or KeyedLoadIC constructors.
|
||||
IC ic(IC::NO_EXTRA_FRAME);
|
||||
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
|
||||
if (!ic.is_contextual()) return result;
|
||||
if (!ic.is_contextual()) return Heap::undefined_value();
|
||||
|
||||
// Throw a reference error.
|
||||
HandleScope scope;
|
||||
Handle<String> name_handle(name);
|
||||
Handle<Object> error =
|
||||
Factory::NewReferenceError("not_defined",
|
||||
HandleVector(&name_handle, 1));
|
||||
return Top::Throw(*error);
|
||||
}
|
||||
|
||||
|
||||
static Object* LoadWithInterceptor(Arguments* args,
|
||||
PropertyAttributes* attrs) {
|
||||
Handle<JSObject> receiver_handle = args->at<JSObject>(0);
|
||||
Handle<JSObject> holder_handle = args->at<JSObject>(1);
|
||||
Handle<String> name_handle = args->at<String>(2);
|
||||
Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(3);
|
||||
Handle<Object> data_handle = args->at<Object>(4);
|
||||
|
||||
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
|
||||
v8::NamedPropertyGetter getter =
|
||||
FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address);
|
||||
ASSERT(getter != NULL);
|
||||
|
||||
{
|
||||
// Use the interceptor getter.
|
||||
v8::AccessorInfo info(v8::Utils::ToLocal(receiver_handle),
|
||||
v8::Utils::ToLocal(data_handle),
|
||||
v8::Utils::ToLocal(holder_handle));
|
||||
HandleScope scope;
|
||||
// We cannot use the raw name pointer here since getting the
|
||||
// property might cause a GC. However, we can get the name from
|
||||
// the stack using the arguments object.
|
||||
Handle<String> name_handle = args.at<String>(2);
|
||||
Handle<Object> error =
|
||||
Factory::NewReferenceError("not_defined",
|
||||
HandleVector(&name_handle, 1));
|
||||
return Top::Throw(*error);
|
||||
v8::Handle<v8::Value> r;
|
||||
{
|
||||
// Leaving JavaScript.
|
||||
VMState state(EXTERNAL);
|
||||
r = getter(v8::Utils::ToLocal(name_handle), info);
|
||||
}
|
||||
RETURN_IF_SCHEDULED_EXCEPTION();
|
||||
if (!r.IsEmpty()) {
|
||||
*attrs = NONE;
|
||||
return *v8::Utils::OpenHandle(*r);
|
||||
}
|
||||
}
|
||||
|
||||
Object* result = holder_handle->GetPropertyPostInterceptor(
|
||||
*receiver_handle,
|
||||
*name_handle,
|
||||
attrs);
|
||||
RETURN_IF_SCHEDULED_EXCEPTION();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Loads a property with an interceptor performing post interceptor
|
||||
* lookup if interceptor failed.
|
||||
*/
|
||||
Object* LoadPropertyWithInterceptorForLoad(Arguments args) {
|
||||
PropertyAttributes attr = NONE;
|
||||
Object* result = LoadWithInterceptor(&args, &attr);
|
||||
if (result->IsFailure()) return result;
|
||||
|
||||
// If the property is present, return it.
|
||||
if (attr != ABSENT) return result;
|
||||
return ThrowReferenceError(String::cast(args[2]));
|
||||
}
|
||||
|
||||
|
||||
Object* LoadPropertyWithInterceptorForCall(Arguments args) {
|
||||
PropertyAttributes attr;
|
||||
Object* result = LoadWithInterceptor(&args, &attr);
|
||||
RETURN_IF_SCHEDULED_EXCEPTION();
|
||||
// This is call IC. In this case, we simply return the undefined result which
|
||||
// will lead to an exception when trying to invoke the result as a
|
||||
// function.
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -863,6 +943,8 @@ Object* StubCompiler::CompileCallInitialize(Code::Flags flags) {
|
||||
Object* StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
|
||||
HandleScope scope;
|
||||
int argc = Code::ExtractArgumentsCountFromFlags(flags);
|
||||
// The code of the PreMonomorphic stub is the same as the code
|
||||
// of the Initialized stub. They just differ on the code object flags.
|
||||
CallIC::GenerateInitialize(masm(), argc);
|
||||
Object* result = GetCodeWithFlags(flags, "CompileCallPreMonomorphic");
|
||||
if (!result->IsFailure()) {
|
||||
|
29
deps/v8/src/stub-cache.h
vendored
29
deps/v8/src/stub-cache.h
vendored
@ -256,11 +256,14 @@ class StubCache : public AllStatic {
|
||||
}
|
||||
|
||||
// Compute the entry for a given offset in exactly the same way as
|
||||
// we done in generated code. This makes it a lot easier to avoid
|
||||
// making mistakes in the hashed offset computations.
|
||||
// we do in generated code. We generate an hash code that already
|
||||
// ends in String::kHashShift 0s. Then we shift it so it is a multiple
|
||||
// of sizeof(Entry). This makes it easier to avoid making mistakes
|
||||
// in the hashed offset computations.
|
||||
static Entry* entry(Entry* table, int offset) {
|
||||
const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift;
|
||||
return reinterpret_cast<Entry*>(
|
||||
reinterpret_cast<Address>(table) + (offset << 1));
|
||||
reinterpret_cast<Address>(table) + (offset << shift_amount));
|
||||
}
|
||||
};
|
||||
|
||||
@ -304,7 +307,9 @@ Object* StoreCallbackProperty(Arguments args);
|
||||
|
||||
|
||||
// Support functions for IC stubs for interceptors.
|
||||
Object* LoadInterceptorProperty(Arguments args);
|
||||
Object* LoadPropertyWithInterceptorOnly(Arguments args);
|
||||
Object* LoadPropertyWithInterceptorForLoad(Arguments args);
|
||||
Object* LoadPropertyWithInterceptorForCall(Arguments args);
|
||||
Object* StoreInterceptorProperty(Arguments args);
|
||||
Object* CallInterceptorProperty(Arguments args);
|
||||
|
||||
@ -374,13 +379,6 @@ class StubCompiler BASE_EMBEDDED {
|
||||
Label* miss_label);
|
||||
static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
|
||||
|
||||
protected:
|
||||
Object* GetCodeWithFlags(Code::Flags flags, const char* name);
|
||||
Object* GetCodeWithFlags(Code::Flags flags, String* name);
|
||||
|
||||
MacroAssembler* masm() { return &masm_; }
|
||||
void set_failure(Failure* failure) { failure_ = failure; }
|
||||
|
||||
// Check the integrity of the prototype chain to make sure that the
|
||||
// current IC is still valid.
|
||||
Register CheckPrototypes(JSObject* object,
|
||||
@ -391,6 +389,13 @@ class StubCompiler BASE_EMBEDDED {
|
||||
String* name,
|
||||
Label* miss);
|
||||
|
||||
protected:
|
||||
Object* GetCodeWithFlags(Code::Flags flags, const char* name);
|
||||
Object* GetCodeWithFlags(Code::Flags flags, String* name);
|
||||
|
||||
MacroAssembler* masm() { return &masm_; }
|
||||
void set_failure(Failure* failure) { failure_ = failure; }
|
||||
|
||||
void GenerateLoadField(JSObject* object,
|
||||
JSObject* holder,
|
||||
Register receiver,
|
||||
@ -421,7 +426,7 @@ class StubCompiler BASE_EMBEDDED {
|
||||
|
||||
void GenerateLoadInterceptor(JSObject* object,
|
||||
JSObject* holder,
|
||||
Smi* lookup_hint,
|
||||
LookupResult* lookup,
|
||||
Register receiver,
|
||||
Register name_reg,
|
||||
Register scratch1,
|
||||
|
4
deps/v8/src/usage-analyzer.cc
vendored
4
deps/v8/src/usage-analyzer.cc
vendored
@ -42,7 +42,7 @@ static const int InitialWeight = 100;
|
||||
|
||||
class UsageComputer: public AstVisitor {
|
||||
public:
|
||||
static bool Traverse(Node* node);
|
||||
static bool Traverse(AstNode* node);
|
||||
|
||||
void VisitBlock(Block* node);
|
||||
void VisitDeclaration(Declaration* node);
|
||||
@ -116,7 +116,7 @@ class WeightScaler BASE_EMBEDDED {
|
||||
// ----------------------------------------------------------------------------
|
||||
// Implementation of UsageComputer
|
||||
|
||||
bool UsageComputer::Traverse(Node* node) {
|
||||
bool UsageComputer::Traverse(AstNode* node) {
|
||||
UsageComputer uc(InitialWeight, false);
|
||||
uc.Visit(node);
|
||||
return !uc.HasStackOverflow();
|
||||
|
6
deps/v8/src/version.cc
vendored
6
deps/v8/src/version.cc
vendored
@ -33,9 +33,9 @@
|
||||
// NOTE these macros are used by the SCons build script so their names
|
||||
// cannot be changed without changing the SCons build script.
|
||||
#define MAJOR_VERSION 1
|
||||
#define MINOR_VERSION 2
|
||||
#define BUILD_NUMBER 14
|
||||
#define PATCH_LEVEL 0
|
||||
#define MINOR_VERSION 3
|
||||
#define BUILD_NUMBER 1
|
||||
#define PATCH_LEVEL 1
|
||||
#define CANDIDATE_VERSION false
|
||||
|
||||
// Define SONAME to have the SCons build the put a specific SONAME into the
|
||||
|
87
deps/v8/src/x64/assembler-x64.cc
vendored
87
deps/v8/src/x64/assembler-x64.cc
vendored
@ -456,13 +456,13 @@ void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) {
|
||||
|
||||
|
||||
void Assembler::arithmetic_op_32(byte opcode,
|
||||
const Operand& dst,
|
||||
Register src) {
|
||||
Register reg,
|
||||
const Operand& rm_reg) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit_optional_rex_32(src, dst);
|
||||
emit_optional_rex_32(reg, rm_reg);
|
||||
emit(opcode);
|
||||
emit_operand(src, dst);
|
||||
emit_operand(reg, rm_reg);
|
||||
}
|
||||
|
||||
|
||||
@ -687,6 +687,13 @@ void Assembler::call(const Operand& op) {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::cdq() {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit(0x99);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::cmovq(Condition cc, Register dst, Register src) {
|
||||
// No need to check CpuInfo for CMOV support, it's a required part of the
|
||||
// 64-bit architecture.
|
||||
@ -773,6 +780,15 @@ void Assembler::decq(const Operand& dst) {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::decl(Register dst) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit_optional_rex_32(dst);
|
||||
emit(0xFF);
|
||||
emit_modrm(0x1, dst);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::decl(const Operand& dst) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
@ -798,7 +814,7 @@ void Assembler::hlt() {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::idiv(Register src) {
|
||||
void Assembler::idivq(Register src) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit_rex_64(src);
|
||||
@ -807,6 +823,15 @@ void Assembler::idiv(Register src) {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::idivl(Register src) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit_optional_rex_32(src);
|
||||
emit(0xF7);
|
||||
emit_modrm(0x7, src);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::imul(Register src) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
@ -1115,6 +1140,9 @@ void Assembler::movq(const Operand& dst, Register src) {
|
||||
|
||||
|
||||
void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
|
||||
// This method must not be used with heap object references. The stored
|
||||
// address is not GC safe. Use the handle version instead.
|
||||
ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit_rex_64(dst);
|
||||
@ -1521,7 +1549,7 @@ void Assembler::store_rax(ExternalReference ref) {
|
||||
|
||||
|
||||
void Assembler::testb(Register reg, Immediate mask) {
|
||||
ASSERT(is_int8(mask.value_));
|
||||
ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
if (reg.is(rax)) {
|
||||
@ -1540,7 +1568,7 @@ void Assembler::testb(Register reg, Immediate mask) {
|
||||
|
||||
|
||||
void Assembler::testb(const Operand& op, Immediate mask) {
|
||||
ASSERT(is_int8(mask.value_));
|
||||
ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit_optional_rex_32(rax, op);
|
||||
@ -2183,48 +2211,3 @@ const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
|
||||
// TODO(x64): Implement and move these to their correct cc-files:
|
||||
#include "ast.h"
|
||||
#include "bootstrapper.h"
|
||||
#include "codegen-inl.h"
|
||||
#include "cpu.h"
|
||||
#include "debug.h"
|
||||
#include "disasm.h"
|
||||
#include "disassembler.h"
|
||||
#include "frames-inl.h"
|
||||
#include "x64/macro-assembler-x64.h"
|
||||
#include "x64/regexp-macro-assembler-x64.h"
|
||||
#include "ic-inl.h"
|
||||
#include "log.h"
|
||||
#include "macro-assembler.h"
|
||||
#include "parser.h"
|
||||
#include "regexp-macro-assembler.h"
|
||||
#include "regexp-stack.h"
|
||||
#include "register-allocator-inl.h"
|
||||
#include "register-allocator.h"
|
||||
#include "runtime.h"
|
||||
#include "scopes.h"
|
||||
#include "serialize.h"
|
||||
#include "stub-cache.h"
|
||||
#include "unicode.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
|
||||
void BreakLocationIterator::ClearDebugBreakAtReturn() {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
bool BreakLocationIterator::IsDebugBreakAtReturn() {
|
||||
UNIMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
|
||||
void BreakLocationIterator::SetDebugBreakAtReturn() {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
37
deps/v8/src/x64/assembler-x64.h
vendored
37
deps/v8/src/x64/assembler-x64.h
vendored
@ -292,6 +292,7 @@ enum ScaleFactor {
|
||||
times_4 = 2,
|
||||
times_8 = 3,
|
||||
times_int_size = times_4,
|
||||
times_half_pointer_size = times_4,
|
||||
times_pointer_size = times_8
|
||||
};
|
||||
|
||||
@ -521,10 +522,6 @@ class Assembler : public Malloced {
|
||||
void xchg(Register dst, Register src);
|
||||
|
||||
// Arithmetics
|
||||
void addq(Register dst, Register src) {
|
||||
arithmetic_op(0x03, dst, src);
|
||||
}
|
||||
|
||||
void addl(Register dst, Register src) {
|
||||
arithmetic_op_32(0x03, dst, src);
|
||||
}
|
||||
@ -533,15 +530,22 @@ class Assembler : public Malloced {
|
||||
immediate_arithmetic_op_32(0x0, dst, src);
|
||||
}
|
||||
|
||||
void addl(Register dst, const Operand& src) {
|
||||
arithmetic_op_32(0x03, dst, src);
|
||||
}
|
||||
|
||||
void addl(const Operand& dst, Immediate src) {
|
||||
immediate_arithmetic_op_32(0x0, dst, src);
|
||||
}
|
||||
|
||||
void addq(Register dst, Register src) {
|
||||
arithmetic_op(0x03, dst, src);
|
||||
}
|
||||
|
||||
void addq(Register dst, const Operand& src) {
|
||||
arithmetic_op(0x03, dst, src);
|
||||
}
|
||||
|
||||
|
||||
void addq(const Operand& dst, Register src) {
|
||||
arithmetic_op(0x01, src, dst);
|
||||
}
|
||||
@ -567,11 +571,11 @@ class Assembler : public Malloced {
|
||||
}
|
||||
|
||||
void cmpl(Register dst, const Operand& src) {
|
||||
arithmetic_op_32(0x3B, src, dst);
|
||||
arithmetic_op_32(0x3B, dst, src);
|
||||
}
|
||||
|
||||
void cmpl(const Operand& dst, Register src) {
|
||||
arithmetic_op_32(0x39, dst, src);
|
||||
arithmetic_op_32(0x39, src, dst);
|
||||
}
|
||||
|
||||
void cmpl(Register dst, Immediate src) {
|
||||
@ -624,13 +628,18 @@ class Assembler : public Malloced {
|
||||
|
||||
void decq(Register dst);
|
||||
void decq(const Operand& dst);
|
||||
void decl(Register dst);
|
||||
void decl(const Operand& dst);
|
||||
|
||||
// Sign-extends rax into rdx:rax.
|
||||
void cqo();
|
||||
// Sign-extends eax into edx:eax.
|
||||
void cdq();
|
||||
|
||||
// Divide rdx:rax by src. Quotient in rax, remainder in rdx.
|
||||
void idiv(Register src);
|
||||
void idivq(Register src);
|
||||
// Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
|
||||
void idivl(Register src);
|
||||
|
||||
// Signed multiply instructions.
|
||||
void imul(Register src); // rdx:rax = rax * src.
|
||||
@ -718,6 +727,10 @@ class Assembler : public Malloced {
|
||||
shift_32(dst, 0x4);
|
||||
}
|
||||
|
||||
void shll(Register dst, Immediate shift_amount) {
|
||||
shift_32(dst, shift_amount, 0x4);
|
||||
}
|
||||
|
||||
void shr(Register dst, Immediate shift_amount) {
|
||||
shift(dst, shift_amount, 0x5);
|
||||
}
|
||||
@ -730,6 +743,10 @@ class Assembler : public Malloced {
|
||||
shift_32(dst, 0x5);
|
||||
}
|
||||
|
||||
void shrl(Register dst, Immediate shift_amount) {
|
||||
shift_32(dst, shift_amount, 0x5);
|
||||
}
|
||||
|
||||
void store_rax(void* dst, RelocInfo::Mode mode);
|
||||
void store_rax(ExternalReference ref);
|
||||
|
||||
@ -1114,8 +1131,8 @@ class Assembler : public Malloced {
|
||||
// ModR/M byte.
|
||||
void arithmetic_op(byte opcode, Register dst, Register src);
|
||||
void arithmetic_op_32(byte opcode, Register dst, Register src);
|
||||
void arithmetic_op_32(byte opcode, const Operand& dst, Register src);
|
||||
void arithmetic_op(byte opcode, Register reg, const Operand& op);
|
||||
void arithmetic_op_32(byte opcode, Register reg, const Operand& rm_reg);
|
||||
void arithmetic_op(byte opcode, Register reg, const Operand& rm_reg);
|
||||
void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
|
||||
void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
|
||||
// Operate on a 32-bit word in memory or register.
|
||||
|
6
deps/v8/src/x64/builtins-x64.cc
vendored
6
deps/v8/src/x64/builtins-x64.cc
vendored
@ -394,9 +394,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
|
||||
// If given receiver is already a JavaScript object then there's no
|
||||
// reason for converting it.
|
||||
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
|
||||
__ j(less, &call_to_object);
|
||||
__ j(below, &call_to_object);
|
||||
__ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
|
||||
__ j(less_equal, &push_receiver);
|
||||
__ j(below_equal, &push_receiver);
|
||||
|
||||
// Convert the receiver to an object.
|
||||
__ bind(&call_to_object);
|
||||
@ -562,7 +562,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
||||
// If the type of the result (stored in its map) is less than
|
||||
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
|
||||
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
|
||||
__ j(greater_equal, &exit);
|
||||
__ j(above_equal, &exit);
|
||||
|
||||
// Throw away the result of the constructor invocation and use the
|
||||
// on-stack receiver as the result.
|
||||
|
460
deps/v8/src/x64/codegen-x64.cc
vendored
460
deps/v8/src/x64/codegen-x64.cc
vendored
@ -389,6 +389,112 @@ bool CodeGenerator::HasValidEntryRegisters() {
|
||||
#endif
|
||||
|
||||
|
||||
class DeferredReferenceGetKeyedValue: public DeferredCode {
|
||||
public:
|
||||
explicit DeferredReferenceGetKeyedValue(Register dst,
|
||||
Register receiver,
|
||||
Register key,
|
||||
bool is_global)
|
||||
: dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
|
||||
set_comment("[ DeferredReferenceGetKeyedValue");
|
||||
}
|
||||
|
||||
virtual void Generate();
|
||||
|
||||
Label* patch_site() { return &patch_site_; }
|
||||
|
||||
private:
|
||||
Label patch_site_;
|
||||
Register dst_;
|
||||
Register receiver_;
|
||||
Register key_;
|
||||
bool is_global_;
|
||||
};
|
||||
|
||||
|
||||
void DeferredReferenceGetKeyedValue::Generate() {
|
||||
__ push(receiver_); // First IC argument.
|
||||
__ push(key_); // Second IC argument.
|
||||
|
||||
// Calculate the delta from the IC call instruction to the map check
|
||||
// movq instruction in the inlined version. This delta is stored in
|
||||
// a test(rax, delta) instruction after the call so that we can find
|
||||
// it in the IC initialization code and patch the movq instruction.
|
||||
// This means that we cannot allow test instructions after calls to
|
||||
// KeyedLoadIC stubs in other places.
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
||||
RelocInfo::Mode mode = is_global_
|
||||
? RelocInfo::CODE_TARGET_CONTEXT
|
||||
: RelocInfo::CODE_TARGET;
|
||||
__ Call(ic, mode);
|
||||
// The delta from the start of the map-compare instruction to the
|
||||
// test instruction. We use masm_-> directly here instead of the __
|
||||
// macro because the macro sometimes uses macro expansion to turn
|
||||
// into something that can't return a value. This is encountered
|
||||
// when doing generated code coverage tests.
|
||||
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
|
||||
// Here we use masm_-> instead of the __ macro because this is the
|
||||
// instruction that gets patched and coverage code gets in the way.
|
||||
// TODO(X64): Consider whether it's worth switching the test to a
|
||||
// 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
|
||||
// be generated normally.
|
||||
masm_->testl(rax, Immediate(-delta_to_patch_site));
|
||||
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
|
||||
|
||||
if (!dst_.is(rax)) __ movq(dst_, rax);
|
||||
__ pop(key_);
|
||||
__ pop(receiver_);
|
||||
}
|
||||
|
||||
|
||||
class DeferredReferenceSetKeyedValue: public DeferredCode {
|
||||
public:
|
||||
DeferredReferenceSetKeyedValue(Register value,
|
||||
Register key,
|
||||
Register receiver)
|
||||
: value_(value), key_(key), receiver_(receiver) {
|
||||
set_comment("[ DeferredReferenceSetKeyedValue");
|
||||
}
|
||||
|
||||
virtual void Generate();
|
||||
|
||||
Label* patch_site() { return &patch_site_; }
|
||||
|
||||
private:
|
||||
Register value_;
|
||||
Register key_;
|
||||
Register receiver_;
|
||||
Label patch_site_;
|
||||
};
|
||||
|
||||
|
||||
void DeferredReferenceSetKeyedValue::Generate() {
|
||||
__ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
|
||||
// Push receiver and key arguments on the stack.
|
||||
__ push(receiver_);
|
||||
__ push(key_);
|
||||
// Move value argument to eax as expected by the IC stub.
|
||||
if (!value_.is(rax)) __ movq(rax, value_);
|
||||
// Call the IC stub.
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
// The delta from the start of the map-compare instructions (initial movq)
|
||||
// to the test instruction. We use masm_-> directly here instead of the
|
||||
// __ macro because the macro sometimes uses macro expansion to turn
|
||||
// into something that can't return a value. This is encountered
|
||||
// when doing generated code coverage tests.
|
||||
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
|
||||
// Here we use masm_-> instead of the __ macro because this is the
|
||||
// instruction that gets patched and coverage code gets in the way.
|
||||
masm_->testl(rax, Immediate(-delta_to_patch_site));
|
||||
// Restore value (returned from store IC), key and receiver
|
||||
// registers.
|
||||
if (!value_.is(rax)) __ movq(value_, rax);
|
||||
__ pop(key_);
|
||||
__ pop(receiver_);
|
||||
}
|
||||
|
||||
|
||||
class DeferredStackCheck: public DeferredCode {
|
||||
public:
|
||||
DeferredStackCheck() {
|
||||
@ -2193,9 +2299,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
|
||||
// The receiver is the argument to the runtime call. It is the
|
||||
// first value pushed when the reference was loaded to the
|
||||
// frame.
|
||||
// TODO(X64): Enable this and the switch back to fast, once they work.
|
||||
// frame_->PushElementAt(target.size() - 1);
|
||||
// Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
|
||||
frame_->PushElementAt(target.size() - 1);
|
||||
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
|
||||
}
|
||||
if (node->op() == Token::ASSIGN ||
|
||||
node->op() == Token::INIT_VAR ||
|
||||
@ -2203,20 +2308,18 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
|
||||
Load(node->value());
|
||||
|
||||
} else {
|
||||
// Literal* literal = node->value()->AsLiteral();
|
||||
Literal* literal = node->value()->AsLiteral();
|
||||
bool overwrite_value =
|
||||
(node->value()->AsBinaryOperation() != NULL &&
|
||||
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
|
||||
// Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
|
||||
Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
|
||||
// There are two cases where the target is not read in the right hand
|
||||
// side, that are easy to test for: the right hand side is a literal,
|
||||
// or the right hand side is a different variable. TakeValue invalidates
|
||||
// the target, with an implicit promise that it will be written to again
|
||||
// before it is read.
|
||||
// TODO(X64): Implement TakeValue optimization. Check issue 150016.
|
||||
if (false) {
|
||||
// if (literal != NULL || (right_var != NULL && right_var != var)) {
|
||||
// target.TakeValue(NOT_INSIDE_TYPEOF);
|
||||
if (literal != NULL || (right_var != NULL && right_var != var)) {
|
||||
target.TakeValue(NOT_INSIDE_TYPEOF);
|
||||
} else {
|
||||
target.GetValue(NOT_INSIDE_TYPEOF);
|
||||
}
|
||||
@ -2247,9 +2350,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
|
||||
// argument to the runtime call is the receiver, which is the
|
||||
// first value pushed as part of the reference, which is below
|
||||
// the lhs value.
|
||||
// TODO(X64): Enable this once ToFastProperties works.
|
||||
// frame_->PushElementAt(target.size());
|
||||
// Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
|
||||
frame_->PushElementAt(target.size());
|
||||
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3379,7 +3481,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
|
||||
// functions to make sure they have 'Function' as their class.
|
||||
|
||||
__ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
|
||||
null.Branch(less);
|
||||
null.Branch(below);
|
||||
|
||||
// As long as JS_FUNCTION_TYPE is the last instance type and it is
|
||||
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
|
||||
@ -3645,7 +3747,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
|
||||
|
||||
// Smi => false iff zero.
|
||||
ASSERT(kSmiTag == 0);
|
||||
__ testq(value.reg(), value.reg());
|
||||
__ testl(value.reg(), value.reg());
|
||||
dest->false_target()->Branch(zero);
|
||||
__ testl(value.reg(), Immediate(kSmiTagMask));
|
||||
dest->true_target()->Branch(zero);
|
||||
@ -3728,7 +3830,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
|
||||
} else {
|
||||
// Anything else is a runtime error.
|
||||
Load(e);
|
||||
// frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
|
||||
frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
|
||||
}
|
||||
|
||||
in_spilled_code_ = was_in_spilled_code;
|
||||
@ -4130,7 +4232,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
|
||||
// A test rax instruction following the call signals that the inobject
|
||||
// property case was inlined. Ensure that there is not a test eax
|
||||
// instruction here.
|
||||
__ nop();
|
||||
masm_->nop();
|
||||
// Discard the global object. The result is in answer.
|
||||
frame_->Drop();
|
||||
return answer;
|
||||
@ -4700,7 +4802,7 @@ void DeferredReferenceGetNamedValue::Generate() {
|
||||
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
|
||||
// Here we use masm_-> instead of the __ macro because this is the
|
||||
// instruction that gets patched and coverage code gets in the way.
|
||||
masm_->testq(rax, Immediate(-delta_to_patch_site));
|
||||
masm_->testl(rax, Immediate(-delta_to_patch_site));
|
||||
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
|
||||
|
||||
if (!dst_.is(rax)) __ movq(dst_, rax);
|
||||
@ -4851,10 +4953,8 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
|
||||
Label add_success;
|
||||
__ j(no_overflow, &add_success);
|
||||
__ subl(operand->reg(), Immediate(smi_value));
|
||||
__ movsxlq(operand->reg(), operand->reg());
|
||||
deferred->Jump();
|
||||
__ bind(&add_success);
|
||||
__ movsxlq(operand->reg(), operand->reg());
|
||||
deferred->BindExit();
|
||||
frame_->Push(operand);
|
||||
break;
|
||||
@ -4965,35 +5065,36 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
|
||||
}
|
||||
deferred->Branch(not_zero);
|
||||
|
||||
if (!left_is_in_rax) __ movq(rax, left->reg());
|
||||
// Sign extend rax into rdx:rax.
|
||||
__ cqo();
|
||||
// All operations on the smi values are on 32-bit registers, which are
|
||||
// zero-extended into 64-bits by all 32-bit operations.
|
||||
if (!left_is_in_rax) __ movl(rax, left->reg());
|
||||
// Sign extend eax into edx:eax.
|
||||
__ cdq();
|
||||
// Check for 0 divisor.
|
||||
__ testq(right->reg(), right->reg());
|
||||
__ testl(right->reg(), right->reg());
|
||||
deferred->Branch(zero);
|
||||
// Divide rdx:rax by the right operand.
|
||||
__ idiv(right->reg());
|
||||
__ idivl(right->reg());
|
||||
|
||||
// Complete the operation.
|
||||
if (op == Token::DIV) {
|
||||
// Check for negative zero result. If result is zero, and divisor
|
||||
// is negative, return a floating point negative zero. The
|
||||
// virtual frame is unchanged in this block, so local control flow
|
||||
// can use a Label rather than a JumpTarget.
|
||||
// Check for negative zero result. If the result is zero, and the
|
||||
// divisor is negative, return a floating point negative zero.
|
||||
Label non_zero_result;
|
||||
__ testq(left->reg(), left->reg());
|
||||
__ testl(left->reg(), left->reg());
|
||||
__ j(not_zero, &non_zero_result);
|
||||
__ testq(right->reg(), right->reg());
|
||||
__ testl(right->reg(), right->reg());
|
||||
deferred->Branch(negative);
|
||||
// The frame is identical on all paths reaching this label.
|
||||
__ bind(&non_zero_result);
|
||||
// Check for the corner case of dividing the most negative smi by
|
||||
// -1. We cannot use the overflow flag, since it is not set by
|
||||
// idiv instruction.
|
||||
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
||||
__ cmpq(rax, Immediate(0x40000000));
|
||||
__ cmpl(rax, Immediate(0x40000000));
|
||||
deferred->Branch(equal);
|
||||
// Check that the remainder is zero.
|
||||
__ testq(rdx, rdx);
|
||||
__ testl(rdx, rdx);
|
||||
deferred->Branch(not_zero);
|
||||
// Tag the result and store it in the quotient register.
|
||||
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
|
||||
@ -5004,15 +5105,14 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
|
||||
frame_->Push("ient);
|
||||
} else {
|
||||
ASSERT(op == Token::MOD);
|
||||
// Check for a negative zero result. If the result is zero, and
|
||||
// the dividend is negative, return a floating point negative
|
||||
// zero. The frame is unchanged in this block, so local control
|
||||
// flow can use a Label rather than a JumpTarget.
|
||||
// Check for a negative zero result. If the result is zero, and the
|
||||
// dividend is negative, return a floating point negative zero.
|
||||
Label non_zero_result;
|
||||
__ testq(rdx, rdx);
|
||||
__ testl(rdx, rdx);
|
||||
__ j(not_zero, &non_zero_result);
|
||||
__ testq(left->reg(), left->reg());
|
||||
__ testl(left->reg(), left->reg());
|
||||
deferred->Branch(negative);
|
||||
// The frame is identical on all paths reaching this label.
|
||||
__ bind(&non_zero_result);
|
||||
deferred->BindExit();
|
||||
left->Unuse();
|
||||
@ -5056,9 +5156,9 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
|
||||
deferred->Branch(not_zero);
|
||||
|
||||
// Untag both operands.
|
||||
__ movq(answer.reg(), left->reg());
|
||||
__ sar(answer.reg(), Immediate(kSmiTagSize));
|
||||
__ sar(rcx, Immediate(kSmiTagSize));
|
||||
__ movl(answer.reg(), left->reg());
|
||||
__ sarl(answer.reg(), Immediate(kSmiTagSize));
|
||||
__ sarl(rcx, Immediate(kSmiTagSize));
|
||||
// Perform the operation.
|
||||
switch (op) {
|
||||
case Token::SAR:
|
||||
@ -5154,7 +5254,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
|
||||
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
|
||||
// Remove smi tag from the left operand (but keep sign).
|
||||
// Left-hand operand has been copied into answer.
|
||||
__ sar(answer.reg(), Immediate(kSmiTagSize));
|
||||
__ sarl(answer.reg(), Immediate(kSmiTagSize));
|
||||
// Do multiplication of smis, leaving result in answer.
|
||||
__ imull(answer.reg(), right->reg());
|
||||
// Go slow on overflows.
|
||||
@ -5164,7 +5264,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
|
||||
// in this block, so local control flow can use a Label rather
|
||||
// than a JumpTarget.
|
||||
Label non_zero_result;
|
||||
__ testq(answer.reg(), answer.reg());
|
||||
__ testl(answer.reg(), answer.reg());
|
||||
__ j(not_zero, &non_zero_result);
|
||||
__ movq(answer.reg(), left->reg());
|
||||
__ or_(answer.reg(), right->reg());
|
||||
@ -5183,6 +5283,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
|
||||
break;
|
||||
|
||||
case Token::BIT_XOR:
|
||||
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
|
||||
__ xor_(answer.reg(), right->reg());
|
||||
break;
|
||||
|
||||
@ -5288,7 +5389,8 @@ void Reference::GetValue(TypeofState typeof_state) {
|
||||
kScratchRegister);
|
||||
// This branch is always a forwards branch so it's always a fixed
|
||||
// size which allows the assert below to succeed and patching to work.
|
||||
deferred->Branch(not_equal);
|
||||
// Don't use deferred->Branch(...), since that might add coverage code.
|
||||
masm->j(not_equal, deferred->entry_label());
|
||||
|
||||
// The delta from the patch label to the load offset must be
|
||||
// statically known.
|
||||
@ -5315,25 +5417,118 @@ void Reference::GetValue(TypeofState typeof_state) {
|
||||
Variable* var = expression_->AsVariableProxy()->AsVariable();
|
||||
bool is_global = var != NULL;
|
||||
ASSERT(!is_global || var->is_global());
|
||||
|
||||
// Inline array load code if inside of a loop. We do not know
|
||||
// the receiver map yet, so we initially generate the code with
|
||||
// a check against an invalid map. In the inline cache code, we
|
||||
// patch the map check if appropriate.
|
||||
if (cgen_->loop_nesting() > 0) {
|
||||
Comment cmnt(masm, "[ Inlined load from keyed Property");
|
||||
|
||||
// TODO(x64): Implement inlined loads for keyed properties.
|
||||
// Comment cmnt(masm, "[ Load from keyed Property");
|
||||
Result key = cgen_->frame()->Pop();
|
||||
Result receiver = cgen_->frame()->Pop();
|
||||
key.ToRegister();
|
||||
receiver.ToRegister();
|
||||
|
||||
RelocInfo::Mode mode = is_global
|
||||
? RelocInfo::CODE_TARGET_CONTEXT
|
||||
: RelocInfo::CODE_TARGET;
|
||||
Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
|
||||
// Make sure that we do not have a test instruction after the
|
||||
// call. A test instruction after the call is used to
|
||||
// indicate that we have generated an inline version of the
|
||||
// keyed load. The explicit nop instruction is here because
|
||||
// the push that follows might be peep-hole optimized away.
|
||||
__ nop();
|
||||
cgen_->frame()->Push(&answer);
|
||||
// Use a fresh temporary to load the elements without destroying
|
||||
// the receiver which is needed for the deferred slow case.
|
||||
Result elements = cgen_->allocator()->Allocate();
|
||||
ASSERT(elements.is_valid());
|
||||
|
||||
// Use a fresh temporary for the index and later the loaded
|
||||
// value.
|
||||
Result index = cgen_->allocator()->Allocate();
|
||||
ASSERT(index.is_valid());
|
||||
|
||||
DeferredReferenceGetKeyedValue* deferred =
|
||||
new DeferredReferenceGetKeyedValue(index.reg(),
|
||||
receiver.reg(),
|
||||
key.reg(),
|
||||
is_global);
|
||||
|
||||
// Check that the receiver is not a smi (only needed if this
|
||||
// is not a load from the global context) and that it has the
|
||||
// expected map.
|
||||
if (!is_global) {
|
||||
__ testl(receiver.reg(), Immediate(kSmiTagMask));
|
||||
deferred->Branch(zero);
|
||||
}
|
||||
|
||||
// Initially, use an invalid map. The map is patched in the IC
|
||||
// initialization code.
|
||||
__ bind(deferred->patch_site());
|
||||
// Use masm-> here instead of the double underscore macro since extra
|
||||
// coverage code can interfere with the patching.
|
||||
masm->movq(kScratchRegister, Factory::null_value(),
|
||||
RelocInfo::EMBEDDED_OBJECT);
|
||||
masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
|
||||
kScratchRegister);
|
||||
deferred->Branch(not_equal);
|
||||
|
||||
// Check that the key is a non-negative smi.
|
||||
__ testl(key.reg(),
|
||||
Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u)));
|
||||
deferred->Branch(not_zero);
|
||||
|
||||
// Get the elements array from the receiver and check that it
|
||||
// is not a dictionary.
|
||||
__ movq(elements.reg(),
|
||||
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
|
||||
__ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
|
||||
Factory::fixed_array_map());
|
||||
deferred->Branch(not_equal);
|
||||
|
||||
// Shift the key to get the actual index value and check that
|
||||
// it is within bounds.
|
||||
__ movl(index.reg(), key.reg());
|
||||
__ shrl(index.reg(), Immediate(kSmiTagSize));
|
||||
__ cmpl(index.reg(),
|
||||
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
|
||||
deferred->Branch(above_equal);
|
||||
|
||||
// The index register holds the un-smi-tagged key. It has been
|
||||
// zero-extended to 64-bits, so it can be used directly as index in the
|
||||
// operand below.
|
||||
// Load and check that the result is not the hole. We could
|
||||
// reuse the index or elements register for the value.
|
||||
//
|
||||
// TODO(206): Consider whether it makes sense to try some
|
||||
// heuristic about which register to reuse. For example, if
|
||||
// one is rax, the we can reuse that one because the value
|
||||
// coming from the deferred code will be in rax.
|
||||
Result value = index;
|
||||
__ movq(value.reg(),
|
||||
Operand(elements.reg(),
|
||||
index.reg(),
|
||||
times_pointer_size,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
elements.Unuse();
|
||||
index.Unuse();
|
||||
__ Cmp(value.reg(), Factory::the_hole_value());
|
||||
deferred->Branch(equal);
|
||||
__ IncrementCounter(&Counters::keyed_load_inline, 1);
|
||||
|
||||
deferred->BindExit();
|
||||
// Restore the receiver and key to the frame and push the
|
||||
// result on top of it.
|
||||
cgen_->frame()->Push(&receiver);
|
||||
cgen_->frame()->Push(&key);
|
||||
cgen_->frame()->Push(&value);
|
||||
|
||||
} else {
|
||||
Comment cmnt(masm, "[ Load from keyed Property");
|
||||
RelocInfo::Mode mode = is_global
|
||||
? RelocInfo::CODE_TARGET_CONTEXT
|
||||
: RelocInfo::CODE_TARGET;
|
||||
Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
|
||||
// Make sure that we do not have a test instruction after the
|
||||
// call. A test instruction after the call is used to
|
||||
// indicate that we have generated an inline version of the
|
||||
// keyed load. The explicit nop instruction is here because
|
||||
// the push that follows might be peep-hole optimized away.
|
||||
__ nop();
|
||||
cgen_->frame()->Push(&answer);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -5400,15 +5595,105 @@ void Reference::SetValue(InitState init_state) {
|
||||
case KEYED: {
|
||||
Comment cmnt(masm, "[ Store to keyed Property");
|
||||
|
||||
// TODO(x64): Implement inlined version of keyed stores.
|
||||
// Generate inlined version of the keyed store if the code is in
|
||||
// a loop and the key is likely to be a smi.
|
||||
Property* property = expression()->AsProperty();
|
||||
ASSERT(property != NULL);
|
||||
SmiAnalysis* key_smi_analysis = property->key()->type();
|
||||
|
||||
Result answer = cgen_->frame()->CallKeyedStoreIC();
|
||||
// Make sure that we do not have a test instruction after the
|
||||
// call. A test instruction after the call is used to
|
||||
// indicate that we have generated an inline version of the
|
||||
// keyed store.
|
||||
__ nop();
|
||||
cgen_->frame()->Push(&answer);
|
||||
if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
|
||||
Comment cmnt(masm, "[ Inlined store to keyed Property");
|
||||
|
||||
// Get the receiver, key and value into registers.
|
||||
Result value = cgen_->frame()->Pop();
|
||||
Result key = cgen_->frame()->Pop();
|
||||
Result receiver = cgen_->frame()->Pop();
|
||||
|
||||
Result tmp = cgen_->allocator_->Allocate();
|
||||
ASSERT(tmp.is_valid());
|
||||
|
||||
// Determine whether the value is a constant before putting it
|
||||
// in a register.
|
||||
bool value_is_constant = value.is_constant();
|
||||
|
||||
// Make sure that value, key and receiver are in registers.
|
||||
value.ToRegister();
|
||||
key.ToRegister();
|
||||
receiver.ToRegister();
|
||||
|
||||
DeferredReferenceSetKeyedValue* deferred =
|
||||
new DeferredReferenceSetKeyedValue(value.reg(),
|
||||
key.reg(),
|
||||
receiver.reg());
|
||||
|
||||
// Check that the value is a smi if it is not a constant.
|
||||
// We can skip the write barrier for smis and constants.
|
||||
if (!value_is_constant) {
|
||||
__ testl(value.reg(), Immediate(kSmiTagMask));
|
||||
deferred->Branch(not_zero);
|
||||
}
|
||||
|
||||
// Check that the key is a non-negative smi.
|
||||
__ testl(key.reg(),
|
||||
Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
|
||||
deferred->Branch(not_zero);
|
||||
|
||||
// Check that the receiver is not a smi.
|
||||
__ testl(receiver.reg(), Immediate(kSmiTagMask));
|
||||
deferred->Branch(zero);
|
||||
|
||||
// Check that the receiver is a JSArray.
|
||||
__ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
|
||||
deferred->Branch(not_equal);
|
||||
|
||||
// Check that the key is within bounds. Both the key and the
|
||||
// length of the JSArray are smis, so compare only low 32 bits.
|
||||
__ cmpl(key.reg(),
|
||||
FieldOperand(receiver.reg(), JSArray::kLengthOffset));
|
||||
deferred->Branch(greater_equal);
|
||||
|
||||
// Get the elements array from the receiver and check that it
|
||||
// is a flat array (not a dictionary).
|
||||
__ movq(tmp.reg(),
|
||||
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
|
||||
// Bind the deferred code patch site to be able to locate the
|
||||
// fixed array map comparison. When debugging, we patch this
|
||||
// comparison to always fail so that we will hit the IC call
|
||||
// in the deferred code which will allow the debugger to
|
||||
// break for fast case stores.
|
||||
__ bind(deferred->patch_site());
|
||||
// Avoid using __ to ensure the distance from patch_site
|
||||
// to the map address is always the same.
|
||||
masm->movq(kScratchRegister, Factory::fixed_array_map(),
|
||||
RelocInfo::EMBEDDED_OBJECT);
|
||||
__ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
|
||||
kScratchRegister);
|
||||
deferred->Branch(not_equal);
|
||||
|
||||
// Store the value.
|
||||
ASSERT_EQ(1, kSmiTagSize);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
__ movq(Operand(tmp.reg(),
|
||||
key.reg(),
|
||||
times_half_pointer_size,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag),
|
||||
value.reg());
|
||||
__ IncrementCounter(&Counters::keyed_store_inline, 1);
|
||||
|
||||
deferred->BindExit();
|
||||
|
||||
cgen_->frame()->Push(&receiver);
|
||||
cgen_->frame()->Push(&key);
|
||||
cgen_->frame()->Push(&value);
|
||||
} else {
|
||||
Result answer = cgen_->frame()->CallKeyedStoreIC();
|
||||
// Make sure that we do not have a test instruction after the
|
||||
// call. A test instruction after the call is used to
|
||||
// indicate that we have generated an inline version of the
|
||||
// keyed store.
|
||||
masm->nop();
|
||||
cgen_->frame()->Push(&answer);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -6429,7 +6714,7 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&load_smi);
|
||||
__ sar(src, Immediate(kSmiTagSize));
|
||||
__ sarl(src, Immediate(kSmiTagSize));
|
||||
__ cvtlsi2sd(dst, src);
|
||||
|
||||
__ bind(&done);
|
||||
@ -6562,7 +6847,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
|
||||
|
||||
// Smi check both operands.
|
||||
__ movq(rcx, rbx);
|
||||
__ or_(rcx, rax);
|
||||
__ or_(rcx, rax); // The value in ecx is used for negative zero test later.
|
||||
__ testl(rcx, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, slow);
|
||||
|
||||
@ -6570,14 +6855,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
|
||||
case Token::ADD: {
|
||||
__ addl(rax, rbx);
|
||||
__ j(overflow, slow); // The slow case rereads operands from the stack.
|
||||
__ movsxlq(rax, rax); // Sign extend eax into rax.
|
||||
break;
|
||||
}
|
||||
|
||||
case Token::SUB: {
|
||||
__ subl(rax, rbx);
|
||||
__ j(overflow, slow); // The slow case rereads operands from the stack.
|
||||
__ movsxlq(rax, rax); // Sign extend eax into rax.
|
||||
break;
|
||||
}
|
||||
|
||||
@ -6585,27 +6868,25 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
|
||||
// If the smi tag is 0 we can just leave the tag on one operand.
|
||||
ASSERT(kSmiTag == 0); // adjust code below if not the case
|
||||
// Remove tag from one of the operands (but keep sign).
|
||||
__ sar(rax, Immediate(kSmiTagSize));
|
||||
__ sarl(rax, Immediate(kSmiTagSize));
|
||||
// Do multiplication.
|
||||
__ imull(rax, rbx); // multiplication of smis; result in eax
|
||||
// Go slow on overflows.
|
||||
__ j(overflow, slow);
|
||||
// Check for negative zero result.
|
||||
__ movsxlq(rax, rax); // Sign extend eax into rax.
|
||||
__ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y
|
||||
__ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
|
||||
break;
|
||||
|
||||
case Token::DIV:
|
||||
// Sign extend rax into rdx:rax
|
||||
// (also sign extends eax into edx if eax is Smi).
|
||||
__ cqo();
|
||||
// Sign extend eax into edx:eax.
|
||||
__ cdq();
|
||||
// Check for 0 divisor.
|
||||
__ testq(rbx, rbx);
|
||||
__ testl(rbx, rbx);
|
||||
__ j(zero, slow);
|
||||
// Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
|
||||
__ idiv(rbx);
|
||||
// Divide edx:eax by ebx (where edx:eax is equivalent to the smi in eax).
|
||||
__ idivl(rbx);
|
||||
// Check that the remainder is zero.
|
||||
__ testq(rdx, rdx);
|
||||
__ testl(rdx, rdx);
|
||||
__ j(not_zero, slow);
|
||||
// Check for the corner case of dividing the most negative smi
|
||||
// by -1. We cannot use the overflow flag, since it is not set
|
||||
@ -6613,28 +6894,27 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
|
||||
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
||||
// TODO(X64): TODO(Smi): Smi implementation dependent constant.
|
||||
// Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
|
||||
__ cmpq(rax, Immediate(0x40000000));
|
||||
__ cmpl(rax, Immediate(0x40000000));
|
||||
__ j(equal, slow);
|
||||
// Check for negative zero result.
|
||||
__ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y
|
||||
__ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
|
||||
// Tag the result and store it in register rax.
|
||||
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
|
||||
__ lea(rax, Operand(rax, rax, times_1, kSmiTag));
|
||||
break;
|
||||
|
||||
case Token::MOD:
|
||||
// Sign extend rax into rdx:rax
|
||||
// (also sign extends eax into edx if eax is Smi).
|
||||
__ cqo();
|
||||
// Sign extend eax into edx:eax
|
||||
__ cdq();
|
||||
// Check for 0 divisor.
|
||||
__ testq(rbx, rbx);
|
||||
__ testl(rbx, rbx);
|
||||
__ j(zero, slow);
|
||||
// Divide rdx:rax by rbx.
|
||||
__ idiv(rbx);
|
||||
// Divide edx:eax by ebx.
|
||||
__ idivl(rbx);
|
||||
// Check for negative zero result.
|
||||
__ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y
|
||||
__ NegativeZeroTest(rdx, rcx, slow); // ecx (not rcx) holds x | y.
|
||||
// Move remainder to register rax.
|
||||
__ movq(rax, rdx);
|
||||
__ movl(rax, rdx);
|
||||
break;
|
||||
|
||||
case Token::BIT_OR:
|
||||
@ -6654,7 +6934,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
|
||||
case Token::SHR:
|
||||
case Token::SAR:
|
||||
// Move the second operand into register ecx.
|
||||
__ movq(rcx, rbx);
|
||||
__ movl(rcx, rbx);
|
||||
// Remove tags from operands (but keep sign).
|
||||
__ sarl(rax, Immediate(kSmiTagSize));
|
||||
__ sarl(rcx, Immediate(kSmiTagSize));
|
||||
|
4
deps/v8/src/x64/codegen-x64.h
vendored
4
deps/v8/src/x64/codegen-x64.h
vendored
@ -361,7 +361,7 @@ class CodeGenerator: public AstVisitor {
|
||||
|
||||
#define DEF_VISIT(type) \
|
||||
void Visit##type(type* node);
|
||||
NODE_LIST(DEF_VISIT)
|
||||
AST_NODE_LIST(DEF_VISIT)
|
||||
#undef DEF_VISIT
|
||||
|
||||
// Visit a statement and then spill the virtual frame if control flow can
|
||||
@ -548,7 +548,7 @@ class CodeGenerator: public AstVisitor {
|
||||
// information.
|
||||
void CodeForFunctionPosition(FunctionLiteral* fun);
|
||||
void CodeForReturnPosition(FunctionLiteral* fun);
|
||||
void CodeForStatementPosition(Node* node);
|
||||
void CodeForStatementPosition(AstNode* node);
|
||||
void CodeForSourcePosition(int pos);
|
||||
|
||||
#ifdef DEBUG
|
||||
|
15
deps/v8/src/x64/debug-x64.cc
vendored
15
deps/v8/src/x64/debug-x64.cc
vendored
@ -80,6 +80,21 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED
|
||||
}
|
||||
|
||||
void BreakLocationIterator::ClearDebugBreakAtReturn() {
|
||||
// TODO(X64): Implement this when we start setting Debug breaks.
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
bool BreakLocationIterator::IsDebugBreakAtReturn() {
|
||||
// TODO(X64): Implement this when we start setting Debug breaks.
|
||||
UNIMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
|
||||
void BreakLocationIterator::SetDebugBreakAtReturn() {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
#endif // ENABLE_DEBUGGER_SUPPORT
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
698
deps/v8/src/x64/disasm-x64.cc
vendored
698
deps/v8/src/x64/disasm-x64.cc
vendored
@ -34,8 +34,15 @@
|
||||
|
||||
namespace disasm {
|
||||
|
||||
enum OperandOrder {
|
||||
UNSET_OP_ORDER = 0, REG_OPER_OP_ORDER, OPER_REG_OP_ORDER
|
||||
enum OperandType {
|
||||
UNSET_OP_ORDER = 0,
|
||||
// Operand size decides between 16, 32 and 64 bit operands.
|
||||
REG_OPER_OP_ORDER = 1, // Register destination, operand source.
|
||||
OPER_REG_OP_ORDER = 2, // Operand destination, register source.
|
||||
// Fixed 8-bit operands.
|
||||
BYTE_SIZE_OPERAND_FLAG = 4,
|
||||
BYTE_REG_OPER_OP_ORDER = REG_OPER_OP_ORDER | BYTE_SIZE_OPERAND_FLAG,
|
||||
BYTE_OPER_REG_OP_ORDER = OPER_REG_OP_ORDER | BYTE_SIZE_OPERAND_FLAG
|
||||
};
|
||||
|
||||
//------------------------------------------------------------------
|
||||
@ -43,28 +50,53 @@ enum OperandOrder {
|
||||
//------------------------------------------------------------------
|
||||
struct ByteMnemonic {
|
||||
int b; // -1 terminates, otherwise must be in range (0..255)
|
||||
OperandOrder op_order_;
|
||||
OperandType op_order_;
|
||||
const char* mnem;
|
||||
};
|
||||
|
||||
|
||||
static ByteMnemonic two_operands_instr[] = {
|
||||
{ 0x03, REG_OPER_OP_ORDER, "add" },
|
||||
{ 0x21, OPER_REG_OP_ORDER, "and" },
|
||||
{ 0x23, REG_OPER_OP_ORDER, "and" },
|
||||
{ 0x3B, REG_OPER_OP_ORDER, "cmp" },
|
||||
{ 0x8D, REG_OPER_OP_ORDER, "lea" },
|
||||
{ 0x09, OPER_REG_OP_ORDER, "or" },
|
||||
{ 0x0B, REG_OPER_OP_ORDER, "or" },
|
||||
{ 0x1B, REG_OPER_OP_ORDER, "sbb" },
|
||||
{ 0x29, OPER_REG_OP_ORDER, "sub" },
|
||||
{ 0x2B, REG_OPER_OP_ORDER, "sub" },
|
||||
{ 0x85, REG_OPER_OP_ORDER, "test" },
|
||||
{ 0x31, OPER_REG_OP_ORDER, "xor" },
|
||||
{ 0x33, REG_OPER_OP_ORDER, "xor" },
|
||||
{ 0x87, REG_OPER_OP_ORDER, "xchg" },
|
||||
{ 0x8A, REG_OPER_OP_ORDER, "movb" },
|
||||
{ 0x8B, REG_OPER_OP_ORDER, "mov" },
|
||||
{ 0x00, BYTE_OPER_REG_OP_ORDER, "add" },
|
||||
{ 0x01, OPER_REG_OP_ORDER, "add" },
|
||||
{ 0x02, BYTE_REG_OPER_OP_ORDER, "add" },
|
||||
{ 0x03, REG_OPER_OP_ORDER, "add" },
|
||||
{ 0x08, BYTE_OPER_REG_OP_ORDER, "or" },
|
||||
{ 0x09, OPER_REG_OP_ORDER, "or" },
|
||||
{ 0x0A, BYTE_REG_OPER_OP_ORDER, "or" },
|
||||
{ 0x0B, REG_OPER_OP_ORDER, "or" },
|
||||
{ 0x10, BYTE_OPER_REG_OP_ORDER, "adc" },
|
||||
{ 0x11, OPER_REG_OP_ORDER, "adc" },
|
||||
{ 0x12, BYTE_REG_OPER_OP_ORDER, "adc" },
|
||||
{ 0x13, REG_OPER_OP_ORDER, "adc" },
|
||||
{ 0x18, BYTE_OPER_REG_OP_ORDER, "sbb" },
|
||||
{ 0x19, OPER_REG_OP_ORDER, "sbb" },
|
||||
{ 0x1A, BYTE_REG_OPER_OP_ORDER, "sbb" },
|
||||
{ 0x1B, REG_OPER_OP_ORDER, "sbb" },
|
||||
{ 0x20, BYTE_OPER_REG_OP_ORDER, "and" },
|
||||
{ 0x21, OPER_REG_OP_ORDER, "and" },
|
||||
{ 0x22, BYTE_REG_OPER_OP_ORDER, "and" },
|
||||
{ 0x23, REG_OPER_OP_ORDER, "and" },
|
||||
{ 0x28, BYTE_OPER_REG_OP_ORDER, "sub" },
|
||||
{ 0x29, OPER_REG_OP_ORDER, "sub" },
|
||||
{ 0x2A, BYTE_REG_OPER_OP_ORDER, "sub" },
|
||||
{ 0x2B, REG_OPER_OP_ORDER, "sub" },
|
||||
{ 0x30, BYTE_OPER_REG_OP_ORDER, "xor" },
|
||||
{ 0x31, OPER_REG_OP_ORDER, "xor" },
|
||||
{ 0x32, BYTE_REG_OPER_OP_ORDER, "xor" },
|
||||
{ 0x33, REG_OPER_OP_ORDER, "xor" },
|
||||
{ 0x38, BYTE_OPER_REG_OP_ORDER, "cmp" },
|
||||
{ 0x39, OPER_REG_OP_ORDER, "cmp" },
|
||||
{ 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
|
||||
{ 0x3B, REG_OPER_OP_ORDER, "cmp" },
|
||||
{ 0x8D, REG_OPER_OP_ORDER, "lea" },
|
||||
{ 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
|
||||
{ 0x85, REG_OPER_OP_ORDER, "test" },
|
||||
{ 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
|
||||
{ 0x87, REG_OPER_OP_ORDER, "xchg" },
|
||||
{ 0x88, BYTE_OPER_REG_OP_ORDER, "mov" },
|
||||
{ 0x89, OPER_REG_OP_ORDER, "mov" },
|
||||
{ 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
|
||||
{ 0x8B, REG_OPER_OP_ORDER, "mov" },
|
||||
{ -1, UNSET_OP_ORDER, "" }
|
||||
};
|
||||
|
||||
@ -97,6 +129,7 @@ static ByteMnemonic short_immediate_instr[] = {
|
||||
{ 0x05, UNSET_OP_ORDER, "add" },
|
||||
{ 0x0D, UNSET_OP_ORDER, "or" },
|
||||
{ 0x15, UNSET_OP_ORDER, "adc" },
|
||||
{ 0x1D, UNSET_OP_ORDER, "sbb" },
|
||||
{ 0x25, UNSET_OP_ORDER, "and" },
|
||||
{ 0x2D, UNSET_OP_ORDER, "sub" },
|
||||
{ 0x35, UNSET_OP_ORDER, "xor" },
|
||||
@ -127,7 +160,8 @@ enum InstructionType {
|
||||
struct InstructionDesc {
|
||||
const char* mnem;
|
||||
InstructionType type;
|
||||
OperandOrder op_order_;
|
||||
OperandType op_order_;
|
||||
bool byte_size_operation; // Fixed 8-bit operation.
|
||||
};
|
||||
|
||||
|
||||
@ -143,7 +177,7 @@ class InstructionTable {
|
||||
void Clear();
|
||||
void Init();
|
||||
void CopyTable(ByteMnemonic bm[], InstructionType type);
|
||||
void SetTableRange(InstructionType type, byte start, byte end,
|
||||
void SetTableRange(InstructionType type, byte start, byte end, bool byte_size,
|
||||
const char* mnem);
|
||||
void AddJumpConditionalShort();
|
||||
};
|
||||
@ -157,9 +191,10 @@ InstructionTable::InstructionTable() {
|
||||
|
||||
void InstructionTable::Clear() {
|
||||
for (int i = 0; i < 256; i++) {
|
||||
instructions_[i].mnem = "";
|
||||
instructions_[i].mnem = "(bad)";
|
||||
instructions_[i].type = NO_INSTR;
|
||||
instructions_[i].op_order_ = UNSET_OP_ORDER;
|
||||
instructions_[i].byte_size_operation = false;
|
||||
}
|
||||
}
|
||||
|
||||
@ -170,9 +205,9 @@ void InstructionTable::Init() {
|
||||
CopyTable(call_jump_instr, CALL_JUMP_INSTR);
|
||||
CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
|
||||
AddJumpConditionalShort();
|
||||
SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, "push");
|
||||
SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, "pop");
|
||||
SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
|
||||
SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, false, "push");
|
||||
SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, false, "pop");
|
||||
SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, false, "mov");
|
||||
}
|
||||
|
||||
|
||||
@ -180,20 +215,27 @@ void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
|
||||
for (int i = 0; bm[i].b >= 0; i++) {
|
||||
InstructionDesc* id = &instructions_[bm[i].b];
|
||||
id->mnem = bm[i].mnem;
|
||||
id->op_order_ = bm[i].op_order_;
|
||||
assert(id->type == NO_INSTR); // Information already entered
|
||||
OperandType op_order = bm[i].op_order_;
|
||||
id->op_order_ =
|
||||
static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
|
||||
assert(id->type == NO_INSTR); // Information not already entered
|
||||
id->type = type;
|
||||
id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void InstructionTable::SetTableRange(InstructionType type, byte start,
|
||||
byte end, const char* mnem) {
|
||||
void InstructionTable::SetTableRange(InstructionType type,
|
||||
byte start,
|
||||
byte end,
|
||||
bool byte_size,
|
||||
const char* mnem) {
|
||||
for (byte b = start; b <= end; b++) {
|
||||
InstructionDesc* id = &instructions_[b];
|
||||
assert(id->type == NO_INSTR); // Information already entered
|
||||
id->mnem = mnem;
|
||||
id->type = type;
|
||||
id->byte_size_operation = byte_size;
|
||||
}
|
||||
}
|
||||
|
||||
@ -211,13 +253,16 @@ void InstructionTable::AddJumpConditionalShort() {
|
||||
static InstructionTable instruction_table;
|
||||
|
||||
|
||||
// The X64 disassembler implementation.
|
||||
//------------------------------------------------------------------------------
|
||||
// DisassemblerX64 implementation.
|
||||
|
||||
enum UnimplementedOpcodeAction {
|
||||
CONTINUE_ON_UNIMPLEMENTED_OPCODE,
|
||||
ABORT_ON_UNIMPLEMENTED_OPCODE
|
||||
};
|
||||
|
||||
|
||||
// A new DisassemblerX64 object is created to disassemble each instruction.
|
||||
// The object can only disassemble a single instruction.
|
||||
class DisassemblerX64 {
|
||||
public:
|
||||
DisassemblerX64(const NameConverter& converter,
|
||||
@ -228,7 +273,9 @@ class DisassemblerX64 {
|
||||
abort_on_unimplemented_(
|
||||
unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE),
|
||||
rex_(0),
|
||||
operand_size_(0) {
|
||||
operand_size_(0),
|
||||
group_1_prefix_(0),
|
||||
byte_size_operand_(false) {
|
||||
tmp_buffer_[0] = '\0';
|
||||
}
|
||||
|
||||
@ -240,6 +287,12 @@ class DisassemblerX64 {
|
||||
int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
|
||||
|
||||
private:
|
||||
enum OperandSize {
|
||||
BYTE_SIZE = 0,
|
||||
WORD_SIZE = 1,
|
||||
DOUBLEWORD_SIZE = 2,
|
||||
QUADWORD_SIZE = 3
|
||||
};
|
||||
|
||||
const NameConverter& converter_;
|
||||
v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
|
||||
@ -247,12 +300,10 @@ class DisassemblerX64 {
|
||||
bool abort_on_unimplemented_;
|
||||
// Prefixes parsed
|
||||
byte rex_;
|
||||
byte operand_size_;
|
||||
|
||||
void setOperandSizePrefix(byte prefix) {
|
||||
ASSERT_EQ(0x66, prefix);
|
||||
operand_size_ = prefix;
|
||||
}
|
||||
byte operand_size_; // 0x66 or (if no group 3 prefix is present) 0x0.
|
||||
byte group_1_prefix_; // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
|
||||
// Byte size operand override.
|
||||
bool byte_size_operand_;
|
||||
|
||||
void setRex(byte rex) {
|
||||
ASSERT_EQ(0x40, rex & 0xF0);
|
||||
@ -272,12 +323,15 @@ class DisassemblerX64 {
|
||||
|
||||
bool rex_w() { return (rex_ & 0x08) != 0; }
|
||||
|
||||
int operand_size() {
|
||||
return rex_w() ? 64 : (operand_size_ != 0) ? 16 : 32;
|
||||
OperandSize operand_size() {
|
||||
if (byte_size_operand_) return BYTE_SIZE;
|
||||
if (rex_w()) return QUADWORD_SIZE;
|
||||
if (operand_size_ != 0) return WORD_SIZE;
|
||||
return DOUBLEWORD_SIZE;
|
||||
}
|
||||
|
||||
char operand_size_code() {
|
||||
return rex_w() ? 'q' : (operand_size_ != 0) ? 'w' : 'l';
|
||||
return "bwlq"[operand_size()];
|
||||
}
|
||||
|
||||
const char* NameOfCPURegister(int reg) const {
|
||||
@ -312,7 +366,7 @@ class DisassemblerX64 {
|
||||
int* base) {
|
||||
*scale = (data >> 6) & 3;
|
||||
*index = ((data >> 3) & 7) | (rex_x() ? 8 : 0);
|
||||
*base = data & 7 | (rex_b() ? 8 : 0);
|
||||
*base = (data & 7) | (rex_b() ? 8 : 0);
|
||||
}
|
||||
|
||||
typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const;
|
||||
@ -322,11 +376,14 @@ class DisassemblerX64 {
|
||||
int PrintRightOperand(byte* modrmp);
|
||||
int PrintRightByteOperand(byte* modrmp);
|
||||
int PrintOperands(const char* mnem,
|
||||
OperandOrder op_order,
|
||||
OperandType op_order,
|
||||
byte* data);
|
||||
int PrintImmediate(byte* data, OperandSize size);
|
||||
int PrintImmediateOp(byte* data);
|
||||
const char* TwoByteMnemonic(byte opcode);
|
||||
int TwoByteOpcodeInstruction(byte* data);
|
||||
int F7Instruction(byte* data);
|
||||
int D1D3C1Instruction(byte* data);
|
||||
int ShiftInstruction(byte* data);
|
||||
int JumpShort(byte* data);
|
||||
int JumpConditional(byte* data);
|
||||
int JumpConditionalShort(byte* data);
|
||||
@ -336,7 +393,7 @@ class DisassemblerX64 {
|
||||
|
||||
void UnimplementedInstruction() {
|
||||
if (abort_on_unimplemented_) {
|
||||
UNIMPLEMENTED();
|
||||
CHECK(false);
|
||||
} else {
|
||||
AppendToBuffer("'Unimplemented Instruction'");
|
||||
}
|
||||
@ -451,6 +508,36 @@ int DisassemblerX64::PrintRightOperandHelper(
|
||||
}
|
||||
|
||||
|
||||
int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
|
||||
int64_t value;
|
||||
int count;
|
||||
switch (size) {
|
||||
case BYTE_SIZE:
|
||||
value = *data;
|
||||
count = 1;
|
||||
break;
|
||||
case WORD_SIZE:
|
||||
value = *reinterpret_cast<int16_t*>(data);
|
||||
count = 2;
|
||||
break;
|
||||
case DOUBLEWORD_SIZE:
|
||||
value = *reinterpret_cast<uint32_t*>(data);
|
||||
count = 4;
|
||||
break;
|
||||
case QUADWORD_SIZE:
|
||||
value = *reinterpret_cast<int32_t*>(data);
|
||||
count = 4;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
value = 0; // Initialize variables on all paths to satisfy the compiler.
|
||||
count = 0;
|
||||
}
|
||||
AppendToBuffer(V8_PTR_PREFIX"x", value);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
int DisassemblerX64::PrintRightOperand(byte* modrmp) {
|
||||
return PrintRightOperandHelper(modrmp,
|
||||
&DisassemblerX64::NameOfCPURegister);
|
||||
@ -466,25 +553,30 @@ int DisassemblerX64::PrintRightByteOperand(byte* modrmp) {
|
||||
// Returns number of bytes used including the current *data.
|
||||
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
|
||||
int DisassemblerX64::PrintOperands(const char* mnem,
|
||||
OperandOrder op_order,
|
||||
OperandType op_order,
|
||||
byte* data) {
|
||||
byte modrm = *data;
|
||||
int mod, regop, rm;
|
||||
get_modrm(modrm, &mod, ®op, &rm);
|
||||
int advance = 0;
|
||||
const char* register_name =
|
||||
byte_size_operand_ ? NameOfByteCPURegister(regop)
|
||||
: NameOfCPURegister(regop);
|
||||
switch (op_order) {
|
||||
case REG_OPER_OP_ORDER: {
|
||||
AppendToBuffer("%s%c %s,",
|
||||
mnem,
|
||||
operand_size_code(),
|
||||
NameOfCPURegister(regop));
|
||||
advance = PrintRightOperand(data);
|
||||
register_name);
|
||||
advance = byte_size_operand_ ? PrintRightByteOperand(data)
|
||||
: PrintRightOperand(data);
|
||||
break;
|
||||
}
|
||||
case OPER_REG_OP_ORDER: {
|
||||
AppendToBuffer("%s%c ", mnem, operand_size_code());
|
||||
advance = PrintRightOperand(data);
|
||||
AppendToBuffer(",%s", NameOfCPURegister(regop));
|
||||
advance = byte_size_operand_ ? PrintRightByteOperand(data)
|
||||
: PrintRightOperand(data);
|
||||
AppendToBuffer(",%s", register_name);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -498,7 +590,7 @@ int DisassemblerX64::PrintOperands(const char* mnem,
|
||||
// Returns number of bytes used by machine instruction, including *data byte.
|
||||
// Writes immediate instructions to 'tmp_buffer_'.
|
||||
int DisassemblerX64::PrintImmediateOp(byte* data) {
|
||||
bool sign_extension_bit = (*data & 0x02) != 0;
|
||||
bool byte_size_immediate = (*data & 0x02) != 0;
|
||||
byte modrm = *(data + 1);
|
||||
int mod, regop, rm;
|
||||
get_modrm(modrm, &mod, ®op, &rm);
|
||||
@ -528,15 +620,12 @@ int DisassemblerX64::PrintImmediateOp(byte* data) {
|
||||
default:
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
AppendToBuffer("%s ", mnem);
|
||||
AppendToBuffer("%s%c ", mnem, operand_size_code());
|
||||
int count = PrintRightOperand(data + 1);
|
||||
if (sign_extension_bit) {
|
||||
AppendToBuffer(",0x%x", *(data + 1 + count));
|
||||
return 1 + count + 1 /*int8*/;
|
||||
} else {
|
||||
AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
|
||||
return 1 + count + 4 /*int32_t*/;
|
||||
}
|
||||
AppendToBuffer(",0x");
|
||||
OperandSize immediate_size = byte_size_immediate ? BYTE_SIZE : operand_size();
|
||||
count += PrintImmediate(data + 1 + count, immediate_size);
|
||||
return 1 + count;
|
||||
}
|
||||
|
||||
|
||||
@ -589,78 +678,65 @@ int DisassemblerX64::F7Instruction(byte* data) {
|
||||
}
|
||||
|
||||
|
||||
int DisassemblerX64::D1D3C1Instruction(byte* data) {
|
||||
byte op = *data;
|
||||
assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
|
||||
int DisassemblerX64::ShiftInstruction(byte* data) {
|
||||
byte op = *data & (~1);
|
||||
if (op != 0xD0 && op != 0xD2 && op != 0xC0) {
|
||||
UnimplementedInstruction();
|
||||
return 1;
|
||||
}
|
||||
byte modrm = *(data + 1);
|
||||
int mod, regop, rm;
|
||||
get_modrm(modrm, &mod, ®op, &rm);
|
||||
ASSERT(regop < 8);
|
||||
regop &= 0x7; // The REX.R bit does not affect the operation.
|
||||
int imm8 = -1;
|
||||
int num_bytes = 2;
|
||||
if (mod == 3) {
|
||||
const char* mnem = NULL;
|
||||
if (op == 0xD1) {
|
||||
imm8 = 1;
|
||||
switch (regop) {
|
||||
case 2:
|
||||
mnem = "rcl";
|
||||
break;
|
||||
case 7:
|
||||
mnem = "sar";
|
||||
break;
|
||||
case 4:
|
||||
mnem = "shl";
|
||||
break;
|
||||
default:
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
} else if (op == 0xC1) {
|
||||
imm8 = *(data + 2);
|
||||
num_bytes = 3;
|
||||
switch (regop) {
|
||||
case 2:
|
||||
mnem = "rcl";
|
||||
break;
|
||||
case 4:
|
||||
mnem = "shl";
|
||||
break;
|
||||
case 5:
|
||||
mnem = "shr";
|
||||
break;
|
||||
case 7:
|
||||
mnem = "sar";
|
||||
break;
|
||||
default:
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
} else if (op == 0xD3) {
|
||||
switch (regop) {
|
||||
case 4:
|
||||
mnem = "shl";
|
||||
break;
|
||||
case 5:
|
||||
mnem = "shr";
|
||||
break;
|
||||
case 7:
|
||||
mnem = "sar";
|
||||
break;
|
||||
default:
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
}
|
||||
assert(mnem != NULL);
|
||||
AppendToBuffer("%s%c %s,",
|
||||
mnem,
|
||||
operand_size_code(),
|
||||
NameOfCPURegister(rm));
|
||||
if (imm8 > 0) {
|
||||
AppendToBuffer("%d", imm8);
|
||||
} else {
|
||||
AppendToBuffer("cl");
|
||||
}
|
||||
} else {
|
||||
if (mod != 3) {
|
||||
UnimplementedInstruction();
|
||||
return num_bytes;
|
||||
}
|
||||
const char* mnem = NULL;
|
||||
switch (regop) {
|
||||
case 0:
|
||||
mnem = "rol";
|
||||
break;
|
||||
case 1:
|
||||
mnem = "ror";
|
||||
break;
|
||||
case 2:
|
||||
mnem = "rcl";
|
||||
break;
|
||||
case 3:
|
||||
mnem = "rcr";
|
||||
break;
|
||||
case 4:
|
||||
mnem = "shl";
|
||||
break;
|
||||
case 5:
|
||||
mnem = "shr";
|
||||
break;
|
||||
case 7:
|
||||
mnem = "sar";
|
||||
break;
|
||||
default:
|
||||
UnimplementedInstruction();
|
||||
return num_bytes;
|
||||
}
|
||||
assert(mnem != NULL);
|
||||
if (op == 0xD0) {
|
||||
imm8 = 1;
|
||||
} else if (op == 0xC0) {
|
||||
imm8 = *(data + 2);
|
||||
num_bytes = 3;
|
||||
}
|
||||
AppendToBuffer("%s%c %s,",
|
||||
mnem,
|
||||
operand_size_code(),
|
||||
byte_size_operand_ ? NameOfByteCPURegister(rm)
|
||||
: NameOfCPURegister(rm));
|
||||
if (op == 0xD2) {
|
||||
AppendToBuffer("cl");
|
||||
} else {
|
||||
AppendToBuffer("%d", imm8);
|
||||
}
|
||||
return num_bytes;
|
||||
}
|
||||
@ -716,20 +792,14 @@ int DisassemblerX64::FPUInstruction(byte* data) {
|
||||
if (b1 == 0xD9) {
|
||||
const char* mnem = NULL;
|
||||
switch (b2) {
|
||||
case 0xE8:
|
||||
mnem = "fld1";
|
||||
break;
|
||||
case 0xEE:
|
||||
mnem = "fldz";
|
||||
case 0xE0:
|
||||
mnem = "fchs";
|
||||
break;
|
||||
case 0xE1:
|
||||
mnem = "fabs";
|
||||
break;
|
||||
case 0xE0:
|
||||
mnem = "fchs";
|
||||
break;
|
||||
case 0xF8:
|
||||
mnem = "fprem";
|
||||
case 0xE4:
|
||||
mnem = "ftst";
|
||||
break;
|
||||
case 0xF5:
|
||||
mnem = "fprem1";
|
||||
@ -737,8 +807,14 @@ int DisassemblerX64::FPUInstruction(byte* data) {
|
||||
case 0xF7:
|
||||
mnem = "fincstp";
|
||||
break;
|
||||
case 0xE4:
|
||||
mnem = "ftst";
|
||||
case 0xE8:
|
||||
mnem = "fld1";
|
||||
break;
|
||||
case 0xEE:
|
||||
mnem = "fldz";
|
||||
break;
|
||||
case 0xF8:
|
||||
mnem = "fprem";
|
||||
break;
|
||||
}
|
||||
if (mnem != NULL) {
|
||||
@ -862,38 +938,146 @@ int DisassemblerX64::FPUInstruction(byte* data) {
|
||||
return 2;
|
||||
}
|
||||
|
||||
// Mnemonics for instructions 0xF0 byte.
|
||||
|
||||
// Handle all two-byte opcodes, which start with 0x0F.
|
||||
// These instructions may be affected by an 0x66, 0xF2, or 0xF3 prefix.
|
||||
// We do not use any three-byte opcodes, which start with 0x0F38 or 0x0F3A.
|
||||
int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
|
||||
byte opcode = *(data + 1);
|
||||
byte* current = data + 2;
|
||||
// At return, "current" points to the start of the next instruction.
|
||||
const char* mnemonic = TwoByteMnemonic(opcode);
|
||||
if (opcode == 0x1F) {
|
||||
// NOP
|
||||
int mod, regop, rm;
|
||||
get_modrm(*current, &mod, ®op, &rm);
|
||||
current++;
|
||||
if (regop == 4) { // SIB byte present.
|
||||
current++;
|
||||
}
|
||||
if (mod == 1) { // Byte displacement.
|
||||
current += 1;
|
||||
} else if (mod == 2) { // 32-bit displacement.
|
||||
current += 4;
|
||||
} // else no immediate displacement.
|
||||
AppendToBuffer("nop");
|
||||
|
||||
} else if (opcode == 0xA2 || opcode == 0x31) {
|
||||
// RDTSC or CPUID
|
||||
AppendToBuffer("%s", mnemonic);
|
||||
|
||||
} else if ((opcode & 0xF0) == 0x80) {
|
||||
// Jcc: Conditional jump (branch).
|
||||
current = data + JumpConditional(data);
|
||||
|
||||
} else if (opcode == 0xBE || opcode == 0xBF || opcode == 0xB6 ||
|
||||
opcode == 0xB7 || opcode == 0xAF) {
|
||||
// Size-extending moves, IMUL.
|
||||
current += PrintOperands(mnemonic, REG_OPER_OP_ORDER, current);
|
||||
|
||||
} else if ((opcode & 0xF0) == 0x90) {
|
||||
// SETcc: Set byte on condition. Needs pointer to beginning of instruction.
|
||||
current = data + SetCC(data);
|
||||
|
||||
} else if (opcode == 0xAB || opcode == 0xA5 || opcode == 0xAD) {
|
||||
// SHLD, SHRD (double-precision shift), BTS (bit set).
|
||||
AppendToBuffer("%s ", mnemonic);
|
||||
int mod, regop, rm;
|
||||
get_modrm(*current, &mod, ®op, &rm);
|
||||
current += PrintRightOperand(current);
|
||||
if (opcode == 0xAB) {
|
||||
AppendToBuffer(",%s", NameOfCPURegister(regop));
|
||||
} else {
|
||||
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
|
||||
}
|
||||
} else if (group_1_prefix_ == 0xF2) {
|
||||
// Beginning of instructions with prefix 0xF2.
|
||||
|
||||
if (opcode == 0x11 || opcode == 0x10) {
|
||||
// MOVSD: Move scalar double-precision fp to/from/between XMM registers.
|
||||
AppendToBuffer("movsd ");
|
||||
int mod, regop, rm;
|
||||
get_modrm(*current, &mod, ®op, &rm);
|
||||
if (opcode == 0x11) {
|
||||
current += PrintRightOperand(current);
|
||||
AppendToBuffer(",%s", NameOfXMMRegister(regop));
|
||||
} else {
|
||||
AppendToBuffer("%s,", NameOfXMMRegister(regop));
|
||||
current += PrintRightOperand(current);
|
||||
}
|
||||
} else if (opcode == 0x2A) {
|
||||
// CVTSI2SD: integer to XMM double conversion.
|
||||
int mod, regop, rm;
|
||||
get_modrm(*current, &mod, ®op, &rm);
|
||||
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
|
||||
data += PrintRightOperand(data);
|
||||
} else if ((opcode & 0xF8) == 0x58) {
|
||||
// XMM arithmetic. Mnemonic was retrieved at the start of this function.
|
||||
int mod, regop, rm;
|
||||
get_modrm(*current, &mod, ®op, &rm);
|
||||
AppendToBuffer("%s %s,%s", mnemonic, NameOfXMMRegister(regop),
|
||||
NameOfXMMRegister(rm));
|
||||
} else {
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
} else if (opcode == 0x2C && group_1_prefix_ == 0xF3) {
|
||||
// Instruction with prefix 0xF3.
|
||||
|
||||
// CVTTSS2SI: Convert scalar single-precision FP to dword integer.
|
||||
// Assert that mod is not 3, so source is memory, not an XMM register.
|
||||
ASSERT((*current & 0xC0) != 0xC0);
|
||||
current += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, current);
|
||||
} else {
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
return current - data;
|
||||
}
|
||||
|
||||
|
||||
// Mnemonics for two-byte opcode instructions starting with 0x0F.
|
||||
// The argument is the second byte of the two-byte opcode.
|
||||
// Returns NULL if the instruction is not handled here.
|
||||
static const char* F0Mnem(byte f0byte) {
|
||||
switch (f0byte) {
|
||||
const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
|
||||
switch (opcode) {
|
||||
case 0x1F:
|
||||
return "nop";
|
||||
case 0x2A: // F2 prefix.
|
||||
return "cvtsi2sd";
|
||||
case 0x31:
|
||||
return "rdtsc";
|
||||
case 0x58: // F2 prefix.
|
||||
return "addsd";
|
||||
case 0x59: // F2 prefix.
|
||||
return "mulsd";
|
||||
case 0x5C: // F2 prefix.
|
||||
return "subsd";
|
||||
case 0x5E: // F2 prefix.
|
||||
return "divsd";
|
||||
case 0xA2:
|
||||
return "cpuid";
|
||||
case 0xBE:
|
||||
return "movsxb";
|
||||
case 0xBF:
|
||||
return "movsxw";
|
||||
case 0xA5:
|
||||
return "shld";
|
||||
case 0xAB:
|
||||
return "bts";
|
||||
case 0xAD:
|
||||
return "shrd";
|
||||
case 0xAF:
|
||||
return "imul";
|
||||
case 0xB6:
|
||||
return "movzxb";
|
||||
case 0xB7:
|
||||
return "movzxw";
|
||||
case 0xAF:
|
||||
return "imul";
|
||||
case 0xA5:
|
||||
return "shld";
|
||||
case 0xAD:
|
||||
return "shrd";
|
||||
case 0xAB:
|
||||
return "bts";
|
||||
case 0xBE:
|
||||
return "movsxb";
|
||||
case 0xBF:
|
||||
return "movsxw";
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
// Disassembled instruction '*instr' and writes it into 'out_buffer'.
|
||||
|
||||
// Disassembles the instruction at instr, and writes it into out_buffer.
|
||||
int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
|
||||
byte* instr) {
|
||||
tmp_buffer_pos_ = 0; // starting to write as position 0
|
||||
@ -905,19 +1089,21 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
|
||||
// Scan for prefixes.
|
||||
while (true) {
|
||||
current = *data;
|
||||
if (current == 0x66) {
|
||||
setOperandSizePrefix(current);
|
||||
data++;
|
||||
} else if ((current & 0xF0) == 0x40) {
|
||||
if (current == 0x66) { // Group 3 prefix.
|
||||
operand_size_ = current;
|
||||
} else if ((current & 0xF0) == 0x40) { // REX prefix.
|
||||
setRex(current);
|
||||
if (rex_w()) AppendToBuffer("REX.W ");
|
||||
data++;
|
||||
} else {
|
||||
} else if ((current & 0xFE) == 0xF2) { // Group 1 prefix.
|
||||
group_1_prefix_ = current;
|
||||
} else { // Not a prefix - an opcode.
|
||||
break;
|
||||
}
|
||||
data++;
|
||||
}
|
||||
|
||||
const InstructionDesc& idesc = instruction_table.Get(current);
|
||||
byte_size_operand_ = idesc.byte_size_operation;
|
||||
switch (idesc.type) {
|
||||
case ZERO_OPERANDS_INSTR:
|
||||
AppendToBuffer(idesc.mnem);
|
||||
@ -949,15 +1135,15 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
|
||||
case MOVE_REG_INSTR: {
|
||||
byte* addr = NULL;
|
||||
switch (operand_size()) {
|
||||
case 16:
|
||||
case WORD_SIZE:
|
||||
addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
|
||||
data += 3;
|
||||
break;
|
||||
case 32:
|
||||
case DOUBLEWORD_SIZE:
|
||||
addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
|
||||
data += 5;
|
||||
break;
|
||||
case 64:
|
||||
case QUADWORD_SIZE:
|
||||
addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
|
||||
data += 9;
|
||||
break;
|
||||
@ -1012,8 +1198,8 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
|
||||
AppendToBuffer("imul %s,%s,0x%x", NameOfCPURegister(regop),
|
||||
NameOfCPURegister(rm), imm);
|
||||
data += 2 + (*data == 0x6B ? 1 : 4);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 0xF6: {
|
||||
int mod, regop, rm;
|
||||
@ -1024,63 +1210,16 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
data += 3;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x81: // fall through
|
||||
case 0x83: // 0x81 with sign extension bit set
|
||||
data += PrintImmediateOp(data);
|
||||
break;
|
||||
|
||||
case 0x0F: {
|
||||
byte f0byte = *(data + 1);
|
||||
const char* f0mnem = F0Mnem(f0byte);
|
||||
if (f0byte == 0x1F) {
|
||||
data += 1;
|
||||
byte modrm = *data;
|
||||
data += 1;
|
||||
if (((modrm >> 3) & 7) == 4) {
|
||||
// SIB byte present.
|
||||
data += 1;
|
||||
}
|
||||
int mod = modrm >> 6;
|
||||
if (mod == 1) {
|
||||
// Byte displacement.
|
||||
data += 1;
|
||||
} else if (mod == 2) {
|
||||
// 32-bit displacement.
|
||||
data += 4;
|
||||
}
|
||||
AppendToBuffer("nop");
|
||||
} else if (f0byte == 0xA2 || f0byte == 0x31) {
|
||||
AppendToBuffer("%s", f0mnem);
|
||||
data += 2;
|
||||
} else if ((f0byte & 0xF0) == 0x80) {
|
||||
data += JumpConditional(data);
|
||||
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 || f0byte
|
||||
== 0xB7 || f0byte == 0xAF) {
|
||||
data += 2;
|
||||
data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
|
||||
} else if ((f0byte & 0xF0) == 0x90) {
|
||||
data += SetCC(data);
|
||||
} else {
|
||||
data += 2;
|
||||
if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
|
||||
// shrd, shld, bts
|
||||
AppendToBuffer("%s ", f0mnem);
|
||||
int mod, regop, rm;
|
||||
get_modrm(*data, &mod, ®op, &rm);
|
||||
data += PrintRightOperand(data);
|
||||
if (f0byte == 0xAB) {
|
||||
AppendToBuffer(",%s", NameOfCPURegister(regop));
|
||||
} else {
|
||||
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
|
||||
}
|
||||
} else {
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
}
|
||||
}
|
||||
case 0x0F:
|
||||
data += TwoByteOpcodeInstruction(data);
|
||||
break;
|
||||
|
||||
case 0x8F: {
|
||||
@ -1170,13 +1309,13 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
|
||||
case 0x95:
|
||||
case 0x96:
|
||||
case 0x97: {
|
||||
int reg = current & 0x7 | (rex_b() ? 8 : 0);
|
||||
int reg = (current & 0x7) | (rex_b() ? 8 : 0);
|
||||
if (reg == 0) {
|
||||
AppendToBuffer("nop"); // Common name for xchg rax,rax.
|
||||
} else {
|
||||
AppendToBuffer("xchg%c rax, %s",
|
||||
operand_size_code(),
|
||||
NameOfByteCPURegister(reg));
|
||||
NameOfCPURegister(reg));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1204,22 +1343,77 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
|
||||
data += 2;
|
||||
break;
|
||||
|
||||
case 0xA1: // Fall through.
|
||||
case 0xA3:
|
||||
switch (operand_size()) {
|
||||
case DOUBLEWORD_SIZE: {
|
||||
const char* memory_location = NameOfAddress(
|
||||
reinterpret_cast<byte*>(
|
||||
*reinterpret_cast<int32_t*>(data + 1)));
|
||||
if (*data == 0xA3) { // Opcode 0xA3
|
||||
AppendToBuffer("movzxlq rax,(%s)", memory_location);
|
||||
} else { // Opcode 0xA1
|
||||
AppendToBuffer("movzxlq (%s),rax", memory_location);
|
||||
}
|
||||
data += 5;
|
||||
break;
|
||||
}
|
||||
case QUADWORD_SIZE: {
|
||||
// New x64 instruction mov rax,(imm_64).
|
||||
const char* memory_location = NameOfAddress(
|
||||
*reinterpret_cast<byte**>(data + 1));
|
||||
if (*data == 0xA3) { // Opcode 0xA3
|
||||
AppendToBuffer("movq rax,(%s)", memory_location);
|
||||
} else { // Opcode 0xA1
|
||||
AppendToBuffer("movq (%s),rax", memory_location);
|
||||
}
|
||||
data += 9;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UnimplementedInstruction();
|
||||
data += 2;
|
||||
}
|
||||
break;
|
||||
|
||||
case 0xA8:
|
||||
AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
|
||||
data += 2;
|
||||
break;
|
||||
|
||||
case 0xA9:
|
||||
AppendToBuffer("test%c rax,0x%x", // CHECKME!
|
||||
case 0xA9: {
|
||||
int64_t value = 0;
|
||||
switch (operand_size()) {
|
||||
case WORD_SIZE:
|
||||
value = *reinterpret_cast<uint16_t*>(data + 1);
|
||||
data += 3;
|
||||
break;
|
||||
case DOUBLEWORD_SIZE:
|
||||
value = *reinterpret_cast<uint32_t*>(data + 1);
|
||||
data += 5;
|
||||
break;
|
||||
case QUADWORD_SIZE:
|
||||
value = *reinterpret_cast<int32_t*>(data + 1);
|
||||
data += 5;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
AppendToBuffer("test%c rax,0x%"V8_PTR_PREFIX"ux",
|
||||
operand_size_code(),
|
||||
*reinterpret_cast<int32_t*>(data + 1));
|
||||
data += 5;
|
||||
value);
|
||||
break;
|
||||
|
||||
}
|
||||
case 0xD1: // fall through
|
||||
case 0xD3: // fall through
|
||||
case 0xC1:
|
||||
data += D1D3C1Instruction(data);
|
||||
data += ShiftInstruction(data);
|
||||
break;
|
||||
case 0xD0: // fall through
|
||||
case 0xD2: // fall through
|
||||
case 0xC0:
|
||||
byte_size_operand_ = true;
|
||||
data += ShiftInstruction(data);
|
||||
break;
|
||||
|
||||
case 0xD9: // fall through
|
||||
@ -1236,73 +1430,13 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
|
||||
data += JumpShort(data);
|
||||
break;
|
||||
|
||||
case 0xF2:
|
||||
if (*(data + 1) == 0x0F) {
|
||||
byte b2 = *(data + 2);
|
||||
if (b2 == 0x11) {
|
||||
AppendToBuffer("movsd ");
|
||||
data += 3;
|
||||
int mod, regop, rm;
|
||||
get_modrm(*data, &mod, ®op, &rm);
|
||||
data += PrintRightOperand(data);
|
||||
AppendToBuffer(",%s", NameOfXMMRegister(regop));
|
||||
} else if (b2 == 0x10) {
|
||||
data += 3;
|
||||
int mod, regop, rm;
|
||||
get_modrm(*data, &mod, ®op, &rm);
|
||||
AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
|
||||
data += PrintRightOperand(data);
|
||||
} else {
|
||||
const char* mnem = "?";
|
||||
switch (b2) {
|
||||
case 0x2A:
|
||||
mnem = "cvtsi2sd";
|
||||
break;
|
||||
case 0x58:
|
||||
mnem = "addsd";
|
||||
break;
|
||||
case 0x59:
|
||||
mnem = "mulsd";
|
||||
break;
|
||||
case 0x5C:
|
||||
mnem = "subsd";
|
||||
break;
|
||||
case 0x5E:
|
||||
mnem = "divsd";
|
||||
break;
|
||||
}
|
||||
data += 3;
|
||||
int mod, regop, rm;
|
||||
get_modrm(*data, &mod, ®op, &rm);
|
||||
if (b2 == 0x2A) {
|
||||
AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
|
||||
data += PrintRightOperand(data);
|
||||
} else {
|
||||
AppendToBuffer("%s %s,%s", mnem, NameOfXMMRegister(regop),
|
||||
NameOfXMMRegister(rm));
|
||||
data++;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
break;
|
||||
|
||||
case 0xF3:
|
||||
if (*(data + 1) == 0x0F && *(data + 2) == 0x2C) {
|
||||
data += 3;
|
||||
data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
|
||||
} else {
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
break;
|
||||
|
||||
case 0xF7:
|
||||
data += F7Instruction(data);
|
||||
break;
|
||||
|
||||
default:
|
||||
UnimplementedInstruction();
|
||||
data += 1;
|
||||
}
|
||||
} // !processed
|
||||
|
||||
|
625
deps/v8/src/x64/ic-x64.cc
vendored
625
deps/v8/src/x64/ic-x64.cc
vendored
@ -42,16 +42,181 @@ namespace internal {
|
||||
#define __ ACCESS_MASM(masm)
|
||||
|
||||
|
||||
void KeyedLoadIC::ClearInlinedVersion(Address address) {
|
||||
UNIMPLEMENTED();
|
||||
// Helper function used to load a property from a dictionary backing storage.
|
||||
// This function may return false negatives, so miss_label
|
||||
// must always call a backup property load that is complete.
|
||||
// This function is safe to call if the receiver has fast properties,
|
||||
// or if name is not a symbol, and will jump to the miss_label in that case.
|
||||
static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
|
||||
Register r0, Register r1, Register r2,
|
||||
Register name) {
|
||||
// Register use:
|
||||
//
|
||||
// r0 - used to hold the property dictionary.
|
||||
//
|
||||
// r1 - initially the receiver
|
||||
// - used for the index into the property dictionary
|
||||
// - holds the result on exit.
|
||||
//
|
||||
// r2 - used to hold the capacity of the property dictionary.
|
||||
//
|
||||
// name - holds the name of the property and is unchanged.
|
||||
|
||||
Label done;
|
||||
|
||||
// Check for the absence of an interceptor.
|
||||
// Load the map into r0.
|
||||
__ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
|
||||
// Test the has_named_interceptor bit in the map.
|
||||
__ testl(FieldOperand(r0, Map::kInstanceAttributesOffset),
|
||||
Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
|
||||
|
||||
// Jump to miss if the interceptor bit is set.
|
||||
__ j(not_zero, miss_label);
|
||||
|
||||
// Bail out if we have a JS global proxy object.
|
||||
__ movzxbq(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
|
||||
__ cmpb(r0, Immediate(JS_GLOBAL_PROXY_TYPE));
|
||||
__ j(equal, miss_label);
|
||||
|
||||
// Possible work-around for http://crbug.com/16276.
|
||||
__ cmpb(r0, Immediate(JS_GLOBAL_OBJECT_TYPE));
|
||||
__ j(equal, miss_label);
|
||||
__ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
|
||||
__ j(equal, miss_label);
|
||||
|
||||
// Check that the properties array is a dictionary.
|
||||
__ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
|
||||
__ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
|
||||
__ j(not_equal, miss_label);
|
||||
|
||||
// Compute the capacity mask.
|
||||
const int kCapacityOffset =
|
||||
StringDictionary::kHeaderSize +
|
||||
StringDictionary::kCapacityIndex * kPointerSize;
|
||||
__ movq(r2, FieldOperand(r0, kCapacityOffset));
|
||||
__ shrl(r2, Immediate(kSmiTagSize)); // convert smi to int
|
||||
__ decl(r2);
|
||||
|
||||
// Generate an unrolled loop that performs a few probes before
|
||||
// giving up. Measurements done on Gmail indicate that 2 probes
|
||||
// cover ~93% of loads from dictionaries.
|
||||
static const int kProbes = 4;
|
||||
const int kElementsStartOffset =
|
||||
StringDictionary::kHeaderSize +
|
||||
StringDictionary::kElementsStartIndex * kPointerSize;
|
||||
for (int i = 0; i < kProbes; i++) {
|
||||
// Compute the masked index: (hash + i + i * i) & mask.
|
||||
__ movl(r1, FieldOperand(name, String::kLengthOffset));
|
||||
__ shrl(r1, Immediate(String::kHashShift));
|
||||
if (i > 0) {
|
||||
__ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
|
||||
}
|
||||
__ and_(r1, r2);
|
||||
|
||||
// Scale the index by multiplying by the entry size.
|
||||
ASSERT(StringDictionary::kEntrySize == 3);
|
||||
__ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
|
||||
|
||||
// Check if the key is identical to the name.
|
||||
__ cmpq(name, Operand(r0, r1, times_pointer_size,
|
||||
kElementsStartOffset - kHeapObjectTag));
|
||||
if (i != kProbes - 1) {
|
||||
__ j(equal, &done);
|
||||
} else {
|
||||
__ j(not_equal, miss_label);
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the value is a normal property.
|
||||
__ bind(&done);
|
||||
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
|
||||
__ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
|
||||
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
|
||||
__ j(not_zero, miss_label);
|
||||
|
||||
// Get the value at the masked, scaled index.
|
||||
const int kValueOffset = kElementsStartOffset + kPointerSize;
|
||||
__ movq(r1,
|
||||
Operand(r0, r1, times_pointer_size, kValueOffset - kHeapObjectTag));
|
||||
}
|
||||
|
||||
|
||||
// Helper function used to check that a value is either not an object
|
||||
// or is loaded if it is an object.
|
||||
static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
|
||||
Register value) {
|
||||
Label done;
|
||||
// Check if the value is a Smi.
|
||||
__ testl(value, Immediate(kSmiTagMask));
|
||||
__ j(zero, &done);
|
||||
// Check if the object has been loaded.
|
||||
__ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
|
||||
__ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
|
||||
Immediate(1 << Map::kNeedsLoading));
|
||||
__ j(not_zero, miss);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
// One byte opcode for test eax,0xXXXXXXXX.
|
||||
static const byte kTestEaxByte = 0xA9;
|
||||
|
||||
|
||||
static bool PatchInlinedMapCheck(Address address, Object* map) {
|
||||
// Arguments are address of start of call sequence that called
|
||||
// the IC,
|
||||
Address test_instruction_address =
|
||||
address + Assembler::kTargetAddrToReturnAddrDist;
|
||||
// The keyed load has a fast inlined case if the IC call instruction
|
||||
// is immediately followed by a test instruction.
|
||||
if (*test_instruction_address != kTestEaxByte) return false;
|
||||
|
||||
// Fetch the offset from the test instruction to the map compare
|
||||
// instructions (starting with the 64-bit immediate mov of the map
|
||||
// address). This offset is stored in the last 4 bytes of the 5
|
||||
// byte test instruction.
|
||||
Address delta_address = test_instruction_address + 1;
|
||||
int delta = *reinterpret_cast<int*>(delta_address);
|
||||
// Compute the map address. The map address is in the last 8 bytes
|
||||
// of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
|
||||
// to the offset to get the map address.
|
||||
Address map_address = test_instruction_address + delta + 2;
|
||||
// Patch the map check.
|
||||
*(reinterpret_cast<Object**>(map_address)) = map;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
|
||||
return PatchInlinedMapCheck(address, map);
|
||||
}
|
||||
|
||||
|
||||
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
|
||||
return PatchInlinedMapCheck(address, map);
|
||||
}
|
||||
|
||||
|
||||
void KeyedLoadIC::ClearInlinedVersion(Address address) {
|
||||
// Insert null as the map to check for to make sure the map check fails
|
||||
// sending control flow to the IC instead of the inlined version.
|
||||
PatchInlinedLoad(address, Heap::null_value());
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::ClearInlinedVersion(Address address) {
|
||||
UNIMPLEMENTED();
|
||||
// Insert null as the elements map to check for. This will make
|
||||
// sure that the elements fast-case map check fails so that control
|
||||
// flows to the IC instead of the inlined version.
|
||||
PatchInlinedStore(address, Heap::null_value());
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
|
||||
UNIMPLEMENTED();
|
||||
// Restore the fast-case elements map check so that the inlined
|
||||
// version can be used again.
|
||||
PatchInlinedStore(address, Heap::fixed_array_map());
|
||||
}
|
||||
|
||||
|
||||
@ -65,83 +230,133 @@ void KeyedLoadIC::Generate(MacroAssembler* masm,
|
||||
|
||||
__ movq(rax, Operand(rsp, kPointerSize));
|
||||
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
|
||||
|
||||
// Move the return address below the arguments.
|
||||
__ pop(rbx);
|
||||
__ push(rcx);
|
||||
__ push(rax);
|
||||
__ push(rbx);
|
||||
__ push(rcx); // receiver
|
||||
__ push(rax); // name
|
||||
__ push(rbx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
__ TailCallRuntime(f, 2);
|
||||
}
|
||||
|
||||
|
||||
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC0AB)); // Debugging aid.
|
||||
#ifdef DEBUG
|
||||
// For use in assert below.
|
||||
static int TenToThe(int exponent) {
|
||||
ASSERT(exponent <= 9);
|
||||
ASSERT(exponent >= 1);
|
||||
int answer = 10;
|
||||
for (int i = 1; i < exponent; i++) answer *= 10;
|
||||
return answer;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[8] : name
|
||||
// -- rsp[16] : receiver
|
||||
// -----------------------------------
|
||||
Label slow, fast, check_string, index_int, index_string;
|
||||
|
||||
// Load name and receiver.
|
||||
__ movq(rax, Operand(rsp, kPointerSize));
|
||||
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
|
||||
|
||||
// Check that the object isn't a smi.
|
||||
__ testl(rcx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &slow);
|
||||
|
||||
// Check that the object is some kind of JS object EXCEPT JS Value type.
|
||||
// In the case that the object is a value-wrapper object,
|
||||
// we enter the runtime system to make sure that indexing
|
||||
// into string objects work as intended.
|
||||
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
|
||||
__ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
|
||||
__ j(below, &slow);
|
||||
// Check that the receiver does not require access checks. We need
|
||||
// to check this explicitly since this generic stub does not perform
|
||||
// map checks. The map is already in rdx.
|
||||
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
|
||||
Immediate(1 << Map::kIsAccessCheckNeeded));
|
||||
__ j(not_zero, &slow);
|
||||
|
||||
// Check that the key is a smi.
|
||||
__ testl(rax, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &check_string);
|
||||
__ sarl(rax, Immediate(kSmiTagSize));
|
||||
// Get the elements array of the object.
|
||||
__ bind(&index_int);
|
||||
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
|
||||
__ j(not_equal, &slow);
|
||||
// Check that the key (index) is within bounds.
|
||||
__ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ j(below, &fast); // Unsigned comparison rejects negative indices.
|
||||
// Slow case: Load name and receiver from stack and jump to runtime.
|
||||
__ bind(&slow);
|
||||
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
|
||||
KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
|
||||
__ bind(&check_string);
|
||||
// The key is not a smi.
|
||||
// Is it a string?
|
||||
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
|
||||
__ j(above_equal, &slow);
|
||||
// Is the string an array index, with cached numeric value?
|
||||
__ movl(rbx, FieldOperand(rax, String::kLengthOffset));
|
||||
__ testl(rbx, Immediate(String::kIsArrayIndexMask));
|
||||
|
||||
// If the string is a symbol, do a quick inline probe of the receiver's
|
||||
// dictionary, if it exists.
|
||||
__ j(not_zero, &index_string); // The value in rbx is used at jump target.
|
||||
__ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
|
||||
Immediate(kIsSymbolMask));
|
||||
__ j(zero, &slow);
|
||||
// Probe the dictionary leaving result in ecx.
|
||||
GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
|
||||
GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
|
||||
__ movq(rax, rcx);
|
||||
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
|
||||
__ ret(0);
|
||||
// Array index string: If short enough use cache in length/hash field (ebx).
|
||||
// We assert that there are enough bits in an int32_t after the hash shift
|
||||
// bits have been subtracted to allow space for the length and the cached
|
||||
// array index.
|
||||
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
|
||||
(1 << (String::kShortLengthShift - String::kHashShift)));
|
||||
__ bind(&index_string);
|
||||
const int kLengthFieldLimit =
|
||||
(String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
|
||||
__ cmpl(rbx, Immediate(kLengthFieldLimit));
|
||||
__ j(above_equal, &slow);
|
||||
__ movl(rax, rbx);
|
||||
__ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
|
||||
__ shrl(rax, Immediate(String::kLongLengthShift));
|
||||
__ jmp(&index_int);
|
||||
// Fast case: Do the load.
|
||||
__ bind(&fast);
|
||||
__ movq(rax, Operand(rcx, rax, times_pointer_size,
|
||||
FixedArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Cmp(rax, Factory::the_hole_value());
|
||||
// In case the loaded value is the_hole we have to consult GetProperty
|
||||
// to ensure the prototype chain is searched.
|
||||
__ j(equal, &slow);
|
||||
__ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
|
||||
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC1AB)); // Debugging aid.
|
||||
// ----------- S t a t e -------------
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[8] : name
|
||||
// -- rsp[16] : receiver
|
||||
// -----------------------------------
|
||||
Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
|
||||
}
|
||||
|
||||
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
|
||||
UNIMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
|
||||
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
|
||||
UNIMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
|
||||
Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
|
||||
JSObject* object,
|
||||
JSObject* holder,
|
||||
AccessorInfo* callback) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
|
||||
JSObject* object,
|
||||
JSObject* holder,
|
||||
Object* callback) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
|
||||
JSObject* object,
|
||||
JSObject* holder,
|
||||
int index) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* object,
|
||||
JSObject* holder,
|
||||
String* name) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
|
||||
// ----------- S t a t e -------------
|
||||
@ -151,33 +366,148 @@ void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
|
||||
// -- rsp[16] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
// Move the return address below the arguments.
|
||||
__ pop(rcx);
|
||||
__ push(Operand(rsp, 1 * kPointerSize));
|
||||
__ push(Operand(rsp, 1 * kPointerSize));
|
||||
__ push(rax);
|
||||
__ push(rcx);
|
||||
__ push(Operand(rsp, 1 * kPointerSize)); // receiver
|
||||
__ push(Operand(rsp, 1 * kPointerSize)); // key
|
||||
__ push(rax); // value
|
||||
__ push(rcx); // return address
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(f, 3);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC2AB)); // Debugging aid.
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : value
|
||||
// -- rcx : transition map
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[8] : key
|
||||
// -- rsp[16] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
__ pop(rbx);
|
||||
__ push(Operand(rsp, 1 * kPointerSize)); // receiver
|
||||
__ push(rcx); // transition map
|
||||
__ push(rax); // value
|
||||
__ push(rbx); // return address
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(
|
||||
ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC3AB)); // Debugging aid.
|
||||
}
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : value
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[8] : key
|
||||
// -- rsp[16] : receiver
|
||||
// -----------------------------------
|
||||
Label slow, fast, array, extra;
|
||||
|
||||
Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
|
||||
int index,
|
||||
Map* transition,
|
||||
String* name) {
|
||||
UNIMPLEMENTED();
|
||||
return NULL;
|
||||
// Get the receiver from the stack.
|
||||
__ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
|
||||
// Check that the object isn't a smi.
|
||||
__ testl(rdx, Immediate(kSmiTagMask));
|
||||
__ j(zero, &slow);
|
||||
// Get the map from the receiver.
|
||||
__ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
|
||||
// Check that the receiver does not require access checks. We need
|
||||
// to do this because this generic stub does not perform map checks.
|
||||
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
|
||||
Immediate(1 << Map::kIsAccessCheckNeeded));
|
||||
__ j(not_zero, &slow);
|
||||
// Get the key from the stack.
|
||||
__ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
|
||||
// Check that the key is a smi.
|
||||
__ testl(rbx, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &slow);
|
||||
|
||||
__ CmpInstanceType(rcx, JS_ARRAY_TYPE);
|
||||
__ j(equal, &array);
|
||||
// Check that the object is some kind of JS object.
|
||||
__ CmpInstanceType(rcx, FIRST_JS_OBJECT_TYPE);
|
||||
__ j(below, &slow);
|
||||
|
||||
// Object case: Check key against length in the elements array.
|
||||
// rax: value
|
||||
// rdx: JSObject
|
||||
// rbx: index (as a smi)
|
||||
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
|
||||
// Check that the object is in fast mode (not dictionary).
|
||||
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
|
||||
__ j(not_equal, &slow);
|
||||
// Untag the key (for checking against untagged length in the fixed array).
|
||||
__ movl(rdx, rbx);
|
||||
__ sarl(rdx, Immediate(kSmiTagSize));
|
||||
__ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
|
||||
// rax: value
|
||||
// rcx: FixedArray
|
||||
// rbx: index (as a smi)
|
||||
__ j(below, &fast);
|
||||
|
||||
|
||||
// Slow case: Push extra copies of the arguments (3).
|
||||
__ bind(&slow);
|
||||
__ pop(rcx);
|
||||
__ push(Operand(rsp, 1 * kPointerSize));
|
||||
__ push(Operand(rsp, 1 * kPointerSize));
|
||||
__ push(rax);
|
||||
__ push(rcx);
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
|
||||
|
||||
|
||||
// Extra capacity case: Check if there is extra capacity to
|
||||
// perform the store and update the length. Used for adding one
|
||||
// element to the array by writing to array[array.length].
|
||||
__ bind(&extra);
|
||||
// rax: value
|
||||
// rdx: JSArray
|
||||
// rcx: FixedArray
|
||||
// rbx: index (as a smi)
|
||||
// flags: compare (rbx, rdx.length())
|
||||
__ j(not_equal, &slow); // do not leave holes in the array
|
||||
__ sarl(rbx, Immediate(kSmiTagSize)); // untag
|
||||
__ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||
__ j(above_equal, &slow);
|
||||
// Restore tag and increment.
|
||||
__ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize));
|
||||
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
|
||||
__ subl(rbx, Immediate(1 << kSmiTagSize)); // decrement rbx again
|
||||
__ jmp(&fast);
|
||||
|
||||
|
||||
// Array case: Get the length and the elements array from the JS
|
||||
// array. Check that the array is in fast mode; if it is the
|
||||
// length is always a smi.
|
||||
__ bind(&array);
|
||||
// rax: value
|
||||
// rdx: JSArray
|
||||
// rbx: index (as a smi)
|
||||
__ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
|
||||
__ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
|
||||
__ j(not_equal, &slow);
|
||||
|
||||
// Check the key against the length in the array, compute the
|
||||
// address to store into and fall through to fast case.
|
||||
__ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
|
||||
__ j(above_equal, &extra);
|
||||
|
||||
|
||||
// Fast case: Do the store.
|
||||
__ bind(&fast);
|
||||
// rax: value
|
||||
// rcx: FixedArray
|
||||
// rbx: index (as a smi)
|
||||
__ movq(Operand(rcx, rbx, times_4, FixedArray::kHeaderSize - kHeapObjectTag),
|
||||
rax);
|
||||
// Update write barrier for the elements array address.
|
||||
__ movq(rdx, rax);
|
||||
__ RecordWrite(rcx, 0, rdx, rbx);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
|
||||
@ -228,20 +558,27 @@ void CallIC::Generate(MacroAssembler* masm,
|
||||
__ InvokeFunction(rdi, actual, JUMP_FUNCTION);
|
||||
}
|
||||
|
||||
void CallIC::GenerateMegamorphic(MacroAssembler* a, int b) {
|
||||
UNIMPLEMENTED();
|
||||
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
|
||||
// Cache miss: Jump to runtime.
|
||||
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
|
||||
}
|
||||
|
||||
void CallIC::GenerateNormal(MacroAssembler* a, int b) {
|
||||
UNIMPLEMENTED();
|
||||
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
|
||||
// Cache miss: Jump to runtime.
|
||||
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
|
||||
}
|
||||
|
||||
|
||||
// The offset from the inlined patch site to the start of the
|
||||
// inlined load instruction.
|
||||
const int LoadIC::kOffsetToLoadInstruction = 20;
|
||||
|
||||
|
||||
void LoadIC::ClearInlinedVersion(Address address) {
|
||||
UNIMPLEMENTED();
|
||||
// Reset the map check of the inlined inobject property load (if
|
||||
// present) to guarantee failure by holding an invalid map (the null
|
||||
// value). The offset can be patched to anything.
|
||||
PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
|
||||
}
|
||||
|
||||
|
||||
@ -254,11 +591,10 @@ void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
|
||||
|
||||
__ movq(rax, Operand(rsp, kPointerSize));
|
||||
|
||||
// Move the return address below the arguments.
|
||||
__ pop(rbx);
|
||||
__ push(rax);
|
||||
__ push(rcx);
|
||||
__ push(rbx);
|
||||
__ push(rax); // receiver
|
||||
__ push(rcx); // name
|
||||
__ push(rbx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
__ TailCallRuntime(f, 2);
|
||||
@ -266,38 +602,79 @@ void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
|
||||
|
||||
|
||||
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC4AB)); // Debugging aid.
|
||||
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
|
||||
}
|
||||
|
||||
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC5AB)); // Debugging aid.
|
||||
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
|
||||
}
|
||||
|
||||
|
||||
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC6AB)); // Debugging aid.
|
||||
// ----------- S t a t e -------------
|
||||
// -- rcx : name
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[8] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
__ movq(rax, Operand(rsp, kPointerSize));
|
||||
|
||||
// Probe the stub cache.
|
||||
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
|
||||
NOT_IN_LOOP,
|
||||
MONOMORPHIC);
|
||||
StubCache::GenerateProbe(masm, flags, rax, rcx, rbx, rdx);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
|
||||
}
|
||||
|
||||
|
||||
void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC7AB)); // Debugging aid.
|
||||
// ----------- S t a t e -------------
|
||||
// -- rcx : name
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[8] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
|
||||
}
|
||||
|
||||
void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC8AB)); // Debugging aid.
|
||||
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
|
||||
}
|
||||
|
||||
|
||||
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xC9AB)); // Debugging aid.
|
||||
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
|
||||
}
|
||||
|
||||
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
|
||||
UNIMPLEMENTED();
|
||||
return false;
|
||||
|
||||
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
|
||||
// The address of the instruction following the call.
|
||||
Address test_instruction_address =
|
||||
address + Assembler::kTargetAddrToReturnAddrDist;
|
||||
// If the instruction following the call is not a test eax, nothing
|
||||
// was inlined.
|
||||
if (*test_instruction_address != kTestEaxByte) return false;
|
||||
|
||||
Address delta_address = test_instruction_address + 1;
|
||||
// The delta to the start of the map check instruction.
|
||||
int delta = *reinterpret_cast<int*>(delta_address);
|
||||
|
||||
// The map address is the last 8 bytes of the 10-byte
|
||||
// immediate move instruction, so we add 2 to get the
|
||||
// offset to the last 8 bytes.
|
||||
Address map_address = test_instruction_address + delta + 2;
|
||||
*(reinterpret_cast<Object**>(map_address)) = map;
|
||||
|
||||
// The offset is in the 32-bit displacement of a seven byte
|
||||
// memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
|
||||
// so we add 3 to get the offset of the displacement.
|
||||
Address offset_address =
|
||||
test_instruction_address + delta + kOffsetToLoadInstruction + 3;
|
||||
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
|
||||
return true;
|
||||
}
|
||||
|
||||
void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
|
||||
@ -307,25 +684,37 @@ void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[8] : receiver
|
||||
// -----------------------------------
|
||||
// Move the return address below the arguments.
|
||||
__ pop(rbx);
|
||||
__ push(Operand(rsp, 0));
|
||||
__ push(rcx);
|
||||
__ push(rax);
|
||||
__ push(rbx);
|
||||
__ push(Operand(rsp, 0)); // receiver
|
||||
__ push(rcx); // name
|
||||
__ push(rax); // value
|
||||
__ push(rbx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
__ TailCallRuntime(f, 3);
|
||||
}
|
||||
|
||||
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xCAAB)); // Debugging aid.
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : value
|
||||
// -- rcx : Map (target of map transition)
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[8] : receiver
|
||||
// -----------------------------------
|
||||
|
||||
__ pop(rbx);
|
||||
__ push(Operand(rsp, 0)); // receiver
|
||||
__ push(rcx); // transition map
|
||||
__ push(rax); // value
|
||||
__ push(rbx); // return address
|
||||
|
||||
// Perform tail call to the entry.
|
||||
__ TailCallRuntime(
|
||||
ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
|
||||
}
|
||||
|
||||
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
|
||||
masm->int3(); // UNIMPLEMENTED.
|
||||
masm->movq(kScratchRegister, Immediate(0xCBAB)); // Debugging aid.
|
||||
Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
|
||||
}
|
||||
|
||||
|
||||
|
173
deps/v8/src/x64/macro-assembler-x64.cc
vendored
173
deps/v8/src/x64/macro-assembler-x64.cc
vendored
@ -71,9 +71,9 @@ void MacroAssembler::NegativeZeroTest(Register result,
|
||||
Register op,
|
||||
Label* then_label) {
|
||||
Label ok;
|
||||
testq(result, result);
|
||||
testl(result, result);
|
||||
j(not_zero, &ok);
|
||||
testq(op, op);
|
||||
testl(op, op);
|
||||
j(sign, then_label);
|
||||
bind(&ok);
|
||||
}
|
||||
@ -151,6 +151,13 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
|
||||
|
||||
void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
|
||||
int num_arguments) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rsp[0] : return address
|
||||
// -- rsp[8] : argument num_arguments - 1
|
||||
// ...
|
||||
// -- rsp[8 * num_arguments] : argument 0 (receiver)
|
||||
// -----------------------------------
|
||||
|
||||
// TODO(1236192): Most runtime routines don't need the number of
|
||||
// arguments passed in because it is constant. At some point we
|
||||
// should remove this need and make the runtime routine entry code
|
||||
@ -311,6 +318,17 @@ void MacroAssembler::Push(Handle<Object> source) {
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Push(Smi* source) {
|
||||
if (IsUnsafeSmi(source)) {
|
||||
LoadUnsafeSmi(kScratchRegister, source);
|
||||
push(kScratchRegister);
|
||||
} else {
|
||||
int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
|
||||
push(Immediate(smi));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Jump(ExternalReference ext) {
|
||||
movq(kScratchRegister, ext);
|
||||
jmp(kScratchRegister);
|
||||
@ -356,6 +374,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
|
||||
ASSERT(RelocInfo::IsCodeTarget(rmode));
|
||||
movq(kScratchRegister, code_object, rmode);
|
||||
#ifdef DEBUG
|
||||
// Patch target is kPointer size bytes *before* target label.
|
||||
Label target;
|
||||
bind(&target);
|
||||
#endif
|
||||
@ -882,4 +901,154 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
|
||||
}
|
||||
|
||||
|
||||
Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
|
||||
JSObject* holder, Register holder_reg,
|
||||
Register scratch,
|
||||
Label* miss) {
|
||||
// Make sure there's no overlap between scratch and the other
|
||||
// registers.
|
||||
ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
|
||||
|
||||
// Keep track of the current object in register reg. On the first
|
||||
// iteration, reg is an alias for object_reg, on later iterations,
|
||||
// it is an alias for holder_reg.
|
||||
Register reg = object_reg;
|
||||
int depth = 1;
|
||||
|
||||
// Check the maps in the prototype chain.
|
||||
// Traverse the prototype chain from the object and do map checks.
|
||||
while (object != holder) {
|
||||
depth++;
|
||||
|
||||
// Only global objects and objects that do not require access
|
||||
// checks are allowed in stubs.
|
||||
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
|
||||
|
||||
JSObject* prototype = JSObject::cast(object->GetPrototype());
|
||||
if (Heap::InNewSpace(prototype)) {
|
||||
// Get the map of the current object.
|
||||
movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
|
||||
Cmp(scratch, Handle<Map>(object->map()));
|
||||
// Branch on the result of the map check.
|
||||
j(not_equal, miss);
|
||||
// Check access rights to the global object. This has to happen
|
||||
// after the map check so that we know that the object is
|
||||
// actually a global object.
|
||||
if (object->IsJSGlobalProxy()) {
|
||||
CheckAccessGlobalProxy(reg, scratch, miss);
|
||||
|
||||
// Restore scratch register to be the map of the object.
|
||||
// We load the prototype from the map in the scratch register.
|
||||
movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
|
||||
}
|
||||
// The prototype is in new space; we cannot store a reference
|
||||
// to it in the code. Load it from the map.
|
||||
reg = holder_reg; // from now the object is in holder_reg
|
||||
movq(reg, FieldOperand(scratch, Map::kPrototypeOffset));
|
||||
|
||||
} else {
|
||||
// Check the map of the current object.
|
||||
Cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
||||
Handle<Map>(object->map()));
|
||||
// Branch on the result of the map check.
|
||||
j(not_equal, miss);
|
||||
// Check access rights to the global object. This has to happen
|
||||
// after the map check so that we know that the object is
|
||||
// actually a global object.
|
||||
if (object->IsJSGlobalProxy()) {
|
||||
CheckAccessGlobalProxy(reg, scratch, miss);
|
||||
}
|
||||
// The prototype is in old space; load it directly.
|
||||
reg = holder_reg; // from now the object is in holder_reg
|
||||
Move(reg, Handle<JSObject>(prototype));
|
||||
}
|
||||
|
||||
// Go to the next object in the prototype chain.
|
||||
object = prototype;
|
||||
}
|
||||
|
||||
// Check the holder map.
|
||||
Cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
||||
Handle<Map>(holder->map()));
|
||||
j(not_equal, miss);
|
||||
|
||||
// Log the check depth.
|
||||
LOG(IntEvent("check-maps-depth", depth));
|
||||
|
||||
// Perform security check for access to the global object and return
|
||||
// the holder register.
|
||||
ASSERT(object == holder);
|
||||
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
|
||||
if (object->IsJSGlobalProxy()) {
|
||||
CheckAccessGlobalProxy(reg, scratch, miss);
|
||||
}
|
||||
return reg;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
|
||||
Register scratch,
|
||||
Label* miss) {
|
||||
Label same_contexts;
|
||||
|
||||
ASSERT(!holder_reg.is(scratch));
|
||||
ASSERT(!scratch.is(kScratchRegister));
|
||||
// Load current lexical context from the stack frame.
|
||||
movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
|
||||
|
||||
// When generating debug code, make sure the lexical context is set.
|
||||
if (FLAG_debug_code) {
|
||||
cmpq(scratch, Immediate(0));
|
||||
Check(not_equal, "we should not have an empty lexical context");
|
||||
}
|
||||
// Load the global context of the current context.
|
||||
int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
|
||||
movq(scratch, FieldOperand(scratch, offset));
|
||||
movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
|
||||
|
||||
// Check the context is a global context.
|
||||
if (FLAG_debug_code) {
|
||||
Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
|
||||
Factory::global_context_map());
|
||||
Check(equal, "JSGlobalObject::global_context should be a global context.");
|
||||
}
|
||||
|
||||
// Check if both contexts are the same.
|
||||
cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
||||
j(equal, &same_contexts);
|
||||
|
||||
// Compare security tokens.
|
||||
// Check that the security token in the calling global object is
|
||||
// compatible with the security token in the receiving global
|
||||
// object.
|
||||
|
||||
// Check the context is a global context.
|
||||
if (FLAG_debug_code) {
|
||||
// Preserve original value of holder_reg.
|
||||
push(holder_reg);
|
||||
movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
||||
Cmp(holder_reg, Factory::null_value());
|
||||
Check(not_equal, "JSGlobalProxy::context() should not be null.");
|
||||
|
||||
// Read the first word and compare to global_context_map(),
|
||||
movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
|
||||
Cmp(holder_reg, Factory::global_context_map());
|
||||
Check(equal, "JSGlobalObject::global_context should be a global context.");
|
||||
pop(holder_reg);
|
||||
}
|
||||
|
||||
movq(kScratchRegister,
|
||||
FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
|
||||
int token_offset = Context::kHeaderSize +
|
||||
Context::SECURITY_TOKEN_INDEX * kPointerSize;
|
||||
movq(scratch, FieldOperand(scratch, token_offset));
|
||||
cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
|
||||
j(not_equal, miss);
|
||||
|
||||
bind(&same_contexts);
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
6
deps/v8/src/x64/macro-assembler-x64.h
vendored
6
deps/v8/src/x64/macro-assembler-x64.h
vendored
@ -164,6 +164,7 @@ class MacroAssembler: public Assembler {
|
||||
void Cmp(Register dst, Handle<Object> source);
|
||||
void Cmp(const Operand& dst, Handle<Object> source);
|
||||
void Push(Handle<Object> source);
|
||||
void Push(Smi* smi);
|
||||
|
||||
// Control Flow
|
||||
void Jump(Address destination, RelocInfo::Mode rmode);
|
||||
@ -175,11 +176,13 @@ class MacroAssembler: public Assembler {
|
||||
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
|
||||
|
||||
// Compare object type for heap object.
|
||||
// Always use unsigned comparisons: above and below, not less and greater.
|
||||
// Incoming register is heap_object and outgoing register is map.
|
||||
// They may be the same register, and may be kScratchRegister.
|
||||
void CmpObjectType(Register heap_object, InstanceType type, Register map);
|
||||
|
||||
// Compare instance type for map.
|
||||
// Always use unsigned comparisons: above and below, not less and greater.
|
||||
void CmpInstanceType(Register map, InstanceType type);
|
||||
|
||||
// FCmp is similar to integer cmp, but requires unsigned
|
||||
@ -212,7 +215,8 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// Generate code for checking access rights - used for security checks
|
||||
// on access to global objects across environments. The holder register
|
||||
// is left untouched, but the scratch register is clobbered.
|
||||
// is left untouched, but the scratch register and kScratchRegister,
|
||||
// which must be different, are clobbered.
|
||||
void CheckAccessGlobalProxy(Register holder_reg,
|
||||
Register scratch,
|
||||
Label* miss);
|
||||
|
8
deps/v8/src/x64/register-allocator-x64-inl.h
vendored
8
deps/v8/src/x64/register-allocator-x64-inl.h
vendored
@ -46,7 +46,7 @@ bool RegisterAllocator::IsReserved(Register reg) {
|
||||
// non-reserved assembler registers.
|
||||
int RegisterAllocator::ToNumber(Register reg) {
|
||||
ASSERT(reg.is_valid() && !IsReserved(reg));
|
||||
static const int numbers[] = {
|
||||
const int kNumbers[] = {
|
||||
0, // rax
|
||||
2, // rcx
|
||||
3, // rdx
|
||||
@ -64,15 +64,15 @@ int RegisterAllocator::ToNumber(Register reg) {
|
||||
8, // r14
|
||||
9 // r15
|
||||
};
|
||||
return numbers[reg.code()];
|
||||
return kNumbers[reg.code()];
|
||||
}
|
||||
|
||||
|
||||
Register RegisterAllocator::ToRegister(int num) {
|
||||
ASSERT(num >= 0 && num < kNumRegisters);
|
||||
static Register registers[] =
|
||||
const Register kRegisters[] =
|
||||
{ rax, rbx, rcx, rdx, rdi, r8, r9, r11, r14, r15, r13, r12 };
|
||||
return registers[num];
|
||||
return kRegisters[num];
|
||||
}
|
||||
|
||||
|
||||
|
1690
deps/v8/src/x64/stub-cache-x64.cc
vendored
1690
deps/v8/src/x64/stub-cache-x64.cc
vendored
File diff suppressed because it is too large
Load Diff
217
deps/v8/src/zone-inl.h
vendored
217
deps/v8/src/zone-inl.h
vendored
@ -68,6 +68,223 @@ void Zone::adjust_segment_bytes_allocated(int delta) {
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::Insert(const Key& key, Locator* locator) {
|
||||
if (is_empty()) {
|
||||
// If the tree is empty, insert the new node.
|
||||
root_ = new Node(key, C::kNoValue);
|
||||
} else {
|
||||
// Splay on the key to move the last node on the search path
|
||||
// for the key to the root of the tree.
|
||||
Splay(key);
|
||||
// Ignore repeated insertions with the same key.
|
||||
int cmp = C::Compare(key, root_->key_);
|
||||
if (cmp == 0) {
|
||||
locator->bind(root_);
|
||||
return false;
|
||||
}
|
||||
// Insert the new node.
|
||||
Node* node = new Node(key, C::kNoValue);
|
||||
if (cmp > 0) {
|
||||
node->left_ = root_;
|
||||
node->right_ = root_->right_;
|
||||
root_->right_ = NULL;
|
||||
} else {
|
||||
node->right_ = root_;
|
||||
node->left_ = root_->left_;
|
||||
root_->left_ = NULL;
|
||||
}
|
||||
root_ = node;
|
||||
}
|
||||
locator->bind(root_);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::Find(const Key& key, Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
Splay(key);
|
||||
if (C::Compare(key, root_->key_) == 0) {
|
||||
locator->bind(root_);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::FindGreatestLessThan(const Key& key,
|
||||
Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
// Splay on the key to move the node with the given key or the last
|
||||
// node on the search path to the top of the tree.
|
||||
Splay(key);
|
||||
// Now the result is either the root node or the greatest node in
|
||||
// the left subtree.
|
||||
int cmp = C::Compare(root_->key_, key);
|
||||
if (cmp <= 0) {
|
||||
locator->bind(root_);
|
||||
return true;
|
||||
} else {
|
||||
Node* temp = root_;
|
||||
root_ = root_->left_;
|
||||
bool result = FindGreatest(locator);
|
||||
root_ = temp;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::FindLeastGreaterThan(const Key& key,
|
||||
Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
// Splay on the key to move the node with the given key or the last
|
||||
// node on the search path to the top of the tree.
|
||||
Splay(key);
|
||||
// Now the result is either the root node or the least node in
|
||||
// the right subtree.
|
||||
int cmp = C::Compare(root_->key_, key);
|
||||
if (cmp >= 0) {
|
||||
locator->bind(root_);
|
||||
return true;
|
||||
} else {
|
||||
Node* temp = root_;
|
||||
root_ = root_->right_;
|
||||
bool result = FindLeast(locator);
|
||||
root_ = temp;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::FindGreatest(Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
Node* current = root_;
|
||||
while (current->right_ != NULL)
|
||||
current = current->right_;
|
||||
locator->bind(current);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::FindLeast(Locator* locator) {
|
||||
if (is_empty())
|
||||
return false;
|
||||
Node* current = root_;
|
||||
while (current->left_ != NULL)
|
||||
current = current->left_;
|
||||
locator->bind(current);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
bool ZoneSplayTree<C>::Remove(const Key& key) {
|
||||
// Bail if the tree is empty
|
||||
if (is_empty())
|
||||
return false;
|
||||
// Splay on the key to move the node with the given key to the top.
|
||||
Splay(key);
|
||||
// Bail if the key is not in the tree
|
||||
if (C::Compare(key, root_->key_) != 0)
|
||||
return false;
|
||||
if (root_->left_ == NULL) {
|
||||
// No left child, so the new tree is just the right child.
|
||||
root_ = root_->right_;
|
||||
} else {
|
||||
// Left child exists.
|
||||
Node* right = root_->right_;
|
||||
// Make the original left child the new root.
|
||||
root_ = root_->left_;
|
||||
// Splay to make sure that the new root has an empty right child.
|
||||
Splay(key);
|
||||
// Insert the original right child as the right child of the new
|
||||
// root.
|
||||
root_->right_ = right;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
template <typename C>
|
||||
void ZoneSplayTree<C>::Splay(const Key& key) {
|
||||
if (is_empty())
|
||||
return;
|
||||
Node dummy_node(C::kNoKey, C::kNoValue);
|
||||
// Create a dummy node. The use of the dummy node is a bit
|
||||
// counter-intuitive: The right child of the dummy node will hold
|
||||
// the L tree of the algorithm. The left child of the dummy node
|
||||
// will hold the R tree of the algorithm. Using a dummy node, left
|
||||
// and right will always be nodes and we avoid special cases.
|
||||
Node* dummy = &dummy_node;
|
||||
Node* left = dummy;
|
||||
Node* right = dummy;
|
||||
Node* current = root_;
|
||||
while (true) {
|
||||
int cmp = C::Compare(key, current->key_);
|
||||
if (cmp < 0) {
|
||||
if (current->left_ == NULL)
|
||||
break;
|
||||
if (C::Compare(key, current->left_->key_) < 0) {
|
||||
// Rotate right.
|
||||
Node* temp = current->left_;
|
||||
current->left_ = temp->right_;
|
||||
temp->right_ = current;
|
||||
current = temp;
|
||||
if (current->left_ == NULL)
|
||||
break;
|
||||
}
|
||||
// Link right.
|
||||
right->left_ = current;
|
||||
right = current;
|
||||
current = current->left_;
|
||||
} else if (cmp > 0) {
|
||||
if (current->right_ == NULL)
|
||||
break;
|
||||
if (C::Compare(key, current->right_->key_) > 0) {
|
||||
// Rotate left.
|
||||
Node* temp = current->right_;
|
||||
current->right_ = temp->left_;
|
||||
temp->left_ = current;
|
||||
current = temp;
|
||||
if (current->right_ == NULL)
|
||||
break;
|
||||
}
|
||||
// Link left.
|
||||
left->right_ = current;
|
||||
left = current;
|
||||
current = current->right_;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Assemble.
|
||||
left->right_ = current->left_;
|
||||
right->left_ = current->right_;
|
||||
current->left_ = dummy->right_;
|
||||
current->right_ = dummy->left_;
|
||||
root_ = current;
|
||||
}
|
||||
|
||||
|
||||
template <typename Node, class Callback>
|
||||
static void DoForEach(Node* node, Callback* callback) {
|
||||
if (node == NULL) return;
|
||||
DoForEach<Node, Callback>(node->left(), callback);
|
||||
callback->Call(node->key(), node->value());
|
||||
DoForEach<Node, Callback>(node->right(), callback);
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_ZONE_INL_H_
|
||||
|
5
deps/v8/src/zone.cc
vendored
5
deps/v8/src/zone.cc
vendored
@ -176,7 +176,10 @@ Address Zone::NewExpand(int size) {
|
||||
new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize);
|
||||
}
|
||||
Segment* segment = Segment::New(new_size);
|
||||
if (segment == NULL) V8::FatalProcessOutOfMemory("Zone");
|
||||
if (segment == NULL) {
|
||||
V8::FatalProcessOutOfMemory("Zone");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Recompute 'top' and 'limit' based on the new segment.
|
||||
Address result = RoundUp(segment->start(), kAlignment);
|
||||
|
102
deps/v8/src/zone.h
vendored
102
deps/v8/src/zone.h
vendored
@ -204,6 +204,108 @@ class ZoneScope BASE_EMBEDDED {
|
||||
};
|
||||
|
||||
|
||||
template <typename Node, class Callback>
|
||||
static void DoForEach(Node* node, Callback* callback);
|
||||
|
||||
|
||||
// A zone splay tree. The config type parameter encapsulates the
|
||||
// different configurations of a concrete splay tree:
|
||||
//
|
||||
// typedef Key: the key type
|
||||
// typedef Value: the value type
|
||||
// static const kNoKey: the dummy key used when no key is set
|
||||
// static const kNoValue: the dummy value used to initialize nodes
|
||||
// int (Compare)(Key& a, Key& b) -> {-1, 0, 1}: comparison function
|
||||
//
|
||||
template <typename Config>
|
||||
class ZoneSplayTree : public ZoneObject {
|
||||
public:
|
||||
typedef typename Config::Key Key;
|
||||
typedef typename Config::Value Value;
|
||||
|
||||
class Locator;
|
||||
|
||||
ZoneSplayTree() : root_(NULL) { }
|
||||
|
||||
// Inserts the given key in this tree with the given value. Returns
|
||||
// true if a node was inserted, otherwise false. If found the locator
|
||||
// is enabled and provides access to the mapping for the key.
|
||||
bool Insert(const Key& key, Locator* locator);
|
||||
|
||||
// Looks up the key in this tree and returns true if it was found,
|
||||
// otherwise false. If the node is found the locator is enabled and
|
||||
// provides access to the mapping for the key.
|
||||
bool Find(const Key& key, Locator* locator);
|
||||
|
||||
// Finds the mapping with the greatest key less than or equal to the
|
||||
// given key.
|
||||
bool FindGreatestLessThan(const Key& key, Locator* locator);
|
||||
|
||||
// Find the mapping with the greatest key in this tree.
|
||||
bool FindGreatest(Locator* locator);
|
||||
|
||||
// Finds the mapping with the least key greater than or equal to the
|
||||
// given key.
|
||||
bool FindLeastGreaterThan(const Key& key, Locator* locator);
|
||||
|
||||
// Find the mapping with the least key in this tree.
|
||||
bool FindLeast(Locator* locator);
|
||||
|
||||
// Remove the node with the given key from the tree.
|
||||
bool Remove(const Key& key);
|
||||
|
||||
bool is_empty() { return root_ == NULL; }
|
||||
|
||||
// Perform the splay operation for the given key. Moves the node with
|
||||
// the given key to the top of the tree. If no node has the given
|
||||
// key, the last node on the search path is moved to the top of the
|
||||
// tree.
|
||||
void Splay(const Key& key);
|
||||
|
||||
class Node : public ZoneObject {
|
||||
public:
|
||||
Node(const Key& key, const Value& value)
|
||||
: key_(key),
|
||||
value_(value),
|
||||
left_(NULL),
|
||||
right_(NULL) { }
|
||||
Key key() { return key_; }
|
||||
Value value() { return value_; }
|
||||
Node* left() { return left_; }
|
||||
Node* right() { return right_; }
|
||||
private:
|
||||
friend class ZoneSplayTree;
|
||||
friend class Locator;
|
||||
Key key_;
|
||||
Value value_;
|
||||
Node* left_;
|
||||
Node* right_;
|
||||
};
|
||||
|
||||
// A locator provides access to a node in the tree without actually
|
||||
// exposing the node.
|
||||
class Locator {
|
||||
public:
|
||||
explicit Locator(Node* node) : node_(node) { }
|
||||
Locator() : node_(NULL) { }
|
||||
const Key& key() { return node_->key_; }
|
||||
Value& value() { return node_->value_; }
|
||||
void set_value(const Value& value) { node_->value_ = value; }
|
||||
inline void bind(Node* node) { node_ = node; }
|
||||
private:
|
||||
Node* node_;
|
||||
};
|
||||
|
||||
template <class Callback>
|
||||
void ForEach(Callback* c) {
|
||||
DoForEach<typename ZoneSplayTree<Config>::Node, Callback>(root_, c);
|
||||
}
|
||||
|
||||
private:
|
||||
Node* root_;
|
||||
};
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_ZONE_H_
|
||||
|
12
deps/v8/test/cctest/cctest.status
vendored
12
deps/v8/test/cctest/cctest.status
vendored
@ -63,7 +63,7 @@ test-api/TryCatchInTryFinally: FAIL
|
||||
|
||||
|
||||
[ $arch == x64 ]
|
||||
test-regexp/Graph: CRASH || FAIL
|
||||
test-regexp/Graph: PASS || CRASH || FAIL
|
||||
test-decls/Present: CRASH || FAIL
|
||||
test-decls/Unknown: CRASH || FAIL
|
||||
test-decls/Appearing: CRASH || FAIL
|
||||
@ -108,17 +108,17 @@ test-debug/StepWithException: CRASH || FAIL
|
||||
test-debug/DebugBreak: CRASH || FAIL
|
||||
test-debug/DisableBreak: CRASH || FAIL
|
||||
test-debug/MessageQueues: CRASH || FAIL
|
||||
test-debug/CallFunctionInDebugger: CRASH || FAIL
|
||||
test-debug/CallFunctionInDebugger: SKIP
|
||||
test-debug/RecursiveBreakpoints: CRASH || FAIL
|
||||
test-debug/DebuggerUnload: CRASH || FAIL
|
||||
test-debug/DebuggerClearMessageHandler: CRASH || FAIL
|
||||
test-debug/DebuggerClearMessageHandlerWhileActive: CRASH || FAIL
|
||||
test-debug/DebuggerHostDispatch: CRASH || FAIL
|
||||
test-debug/DebugBreakInMessageHandler: CRASH || FAIL
|
||||
test-debug/NoDebugBreakInAfterCompileMessageHandler: CRASH || FAIL
|
||||
test-api/HugeConsStringOutOfMemory: CRASH || FAIL
|
||||
test-api/OutOfMemory: CRASH || FAIL
|
||||
test-api/OutOfMemoryNested: CRASH || FAIL
|
||||
test-api/Threading: CRASH || FAIL
|
||||
test-api/Threading2: PASS || TIMEOUT
|
||||
test-api/TryCatchSourceInfo: CRASH || FAIL
|
||||
test-api/RegExpInterruption: CRASH || FAIL
|
||||
test-api/RegExpStringModification: CRASH || FAIL
|
||||
test-api/RegExpInterruption: PASS || TIMEOUT
|
||||
test-api/RegExpStringModification: PASS || TIMEOUT
|
||||
|
610
deps/v8/test/cctest/test-api.cc
vendored
610
deps/v8/test/cctest/test-api.cc
vendored
@ -1266,6 +1266,38 @@ THREADED_TEST(InternalFields) {
|
||||
}
|
||||
|
||||
|
||||
THREADED_TEST(InternalFieldsNativePointers) {
|
||||
v8::HandleScope scope;
|
||||
LocalContext env;
|
||||
|
||||
Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
|
||||
Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
|
||||
instance_templ->SetInternalFieldCount(1);
|
||||
Local<v8::Object> obj = templ->GetFunction()->NewInstance();
|
||||
CHECK_EQ(1, obj->InternalFieldCount());
|
||||
CHECK(obj->GetPointerFromInternalField(0) == NULL);
|
||||
|
||||
char* data = new char[100];
|
||||
|
||||
void* aligned = data;
|
||||
CHECK_EQ(0, reinterpret_cast<uintptr_t>(aligned) & 0x1);
|
||||
void* unaligned = data + 1;
|
||||
CHECK_EQ(1, reinterpret_cast<uintptr_t>(unaligned) & 0x1);
|
||||
|
||||
// Check reading and writing aligned pointers.
|
||||
obj->SetPointerInInternalField(0, aligned);
|
||||
i::Heap::CollectAllGarbage();
|
||||
CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
|
||||
|
||||
// Check reading and writing unaligned pointers.
|
||||
obj->SetPointerInInternalField(0, unaligned);
|
||||
i::Heap::CollectAllGarbage();
|
||||
CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
|
||||
|
||||
delete[] data;
|
||||
}
|
||||
|
||||
|
||||
THREADED_TEST(IdentityHash) {
|
||||
v8::HandleScope scope;
|
||||
LocalContext env;
|
||||
@ -5024,6 +5056,236 @@ THREADED_TEST(InterceptorLoadICWithOverride) {
|
||||
}
|
||||
|
||||
|
||||
// Test the case when we stored field into
|
||||
// a stub, but interceptor produced value on its own.
|
||||
THREADED_TEST(InterceptorLoadICFieldNotNeeded) {
|
||||
CheckInterceptorLoadIC(InterceptorLoadXICGetter,
|
||||
"proto = new Object();"
|
||||
"o.__proto__ = proto;"
|
||||
"proto.x = 239;"
|
||||
"for (var i = 0; i < 1000; i++) {"
|
||||
" o.x;"
|
||||
// Now it should be ICed and keep a reference to x defined on proto
|
||||
"}"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 1000; i++) {"
|
||||
" result += o.x;"
|
||||
"}"
|
||||
"result;",
|
||||
42 * 1000);
|
||||
}
|
||||
|
||||
|
||||
// Test the case when we stored field into
|
||||
// a stub, but it got invalidated later on.
|
||||
THREADED_TEST(InterceptorLoadICInvalidatedField) {
|
||||
CheckInterceptorLoadIC(InterceptorLoadXICGetter,
|
||||
"proto1 = new Object();"
|
||||
"proto2 = new Object();"
|
||||
"o.__proto__ = proto1;"
|
||||
"proto1.__proto__ = proto2;"
|
||||
"proto2.y = 239;"
|
||||
"for (var i = 0; i < 1000; i++) {"
|
||||
" o.y;"
|
||||
// Now it should be ICed and keep a reference to y defined on proto2
|
||||
"}"
|
||||
"proto1.y = 42;"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 1000; i++) {"
|
||||
" result += o.y;"
|
||||
"}"
|
||||
"result;",
|
||||
42 * 1000);
|
||||
}
|
||||
|
||||
|
||||
// Test the case when we stored field into
|
||||
// a stub, but it got invalidated later on due to override on
|
||||
// global object which is between interceptor and fields' holders.
|
||||
THREADED_TEST(InterceptorLoadICInvalidatedFieldViaGlobal) {
|
||||
CheckInterceptorLoadIC(InterceptorLoadXICGetter,
|
||||
"o.__proto__ = this;" // set a global to be a proto of o.
|
||||
"this.__proto__.y = 239;"
|
||||
"for (var i = 0; i < 10; i++) {"
|
||||
" if (o.y != 239) throw 'oops: ' + o.y;"
|
||||
// Now it should be ICed and keep a reference to y defined on field_holder.
|
||||
"}"
|
||||
"this.y = 42;" // Assign on a global.
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 10; i++) {"
|
||||
" result += o.y;"
|
||||
"}"
|
||||
"result;",
|
||||
42 * 10);
|
||||
}
|
||||
|
||||
|
||||
static v8::Handle<Value> Return239(Local<String> name, const AccessorInfo&) {
|
||||
ApiTestFuzzer::Fuzz();
|
||||
return v8_num(239);
|
||||
}
|
||||
|
||||
|
||||
static void SetOnThis(Local<String> name,
|
||||
Local<Value> value,
|
||||
const AccessorInfo& info) {
|
||||
info.This()->ForceSet(name, value);
|
||||
}
|
||||
|
||||
|
||||
THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
|
||||
templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
|
||||
templ->SetAccessor(v8_str("y"), Return239);
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ->NewInstance());
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" result = o.y;"
|
||||
"}");
|
||||
CHECK_EQ(239, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
|
||||
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
|
||||
v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
|
||||
templ_p->SetAccessor(v8_str("y"), Return239);
|
||||
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
|
||||
context->Global()->Set(v8_str("p"), templ_p->NewInstance());
|
||||
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"o.__proto__ = p;"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" result = o.x + o.y;"
|
||||
"}");
|
||||
CHECK_EQ(239 + 42, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
|
||||
templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
|
||||
templ->SetAccessor(v8_str("y"), Return239);
|
||||
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ->NewInstance());
|
||||
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"fst = new Object(); fst.__proto__ = o;"
|
||||
"snd = new Object(); snd.__proto__ = fst;"
|
||||
"var result1 = 0;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" result1 = snd.x;"
|
||||
"}"
|
||||
"fst.x = 239;"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" result = snd.x;"
|
||||
"}"
|
||||
"result + result1");
|
||||
CHECK_EQ(239 + 42, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
// Test the case when we stored callback into
|
||||
// a stub, but interceptor produced value on its own.
|
||||
THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
|
||||
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
|
||||
v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
|
||||
templ_p->SetAccessor(v8_str("y"), Return239);
|
||||
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
|
||||
context->Global()->Set(v8_str("p"), templ_p->NewInstance());
|
||||
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"o.__proto__ = p;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" o.x;"
|
||||
// Now it should be ICed and keep a reference to x defined on p
|
||||
"}"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" result += o.x;"
|
||||
"}"
|
||||
"result");
|
||||
CHECK_EQ(42 * 7, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
// Test the case when we stored callback into
|
||||
// a stub, but it got invalidated later on.
|
||||
THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
|
||||
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
|
||||
v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
|
||||
templ_p->SetAccessor(v8_str("y"), Return239, SetOnThis);
|
||||
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
|
||||
context->Global()->Set(v8_str("p"), templ_p->NewInstance());
|
||||
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"inbetween = new Object();"
|
||||
"o.__proto__ = inbetween;"
|
||||
"inbetween.__proto__ = p;"
|
||||
"for (var i = 0; i < 10; i++) {"
|
||||
" o.y;"
|
||||
// Now it should be ICed and keep a reference to y defined on p
|
||||
"}"
|
||||
"inbetween.y = 42;"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 10; i++) {"
|
||||
" result += o.y;"
|
||||
"}"
|
||||
"result");
|
||||
CHECK_EQ(42 * 10, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
// Test the case when we stored callback into
|
||||
// a stub, but it got invalidated later on due to override on
|
||||
// global object which is between interceptor and callbacks' holders.
|
||||
THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New();
|
||||
templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
|
||||
v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New();
|
||||
templ_p->SetAccessor(v8_str("y"), Return239, SetOnThis);
|
||||
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ_o->NewInstance());
|
||||
context->Global()->Set(v8_str("p"), templ_p->NewInstance());
|
||||
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"o.__proto__ = this;"
|
||||
"this.__proto__ = p;"
|
||||
"for (var i = 0; i < 10; i++) {"
|
||||
" if (o.y != 239) throw 'oops: ' + o.y;"
|
||||
// Now it should be ICed and keep a reference to y defined on p
|
||||
"}"
|
||||
"this.y = 42;"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 10; i++) {"
|
||||
" result += o.y;"
|
||||
"}"
|
||||
"result");
|
||||
CHECK_EQ(42 * 10, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
static v8::Handle<Value> InterceptorLoadICGetter0(Local<String> name,
|
||||
const AccessorInfo& info) {
|
||||
ApiTestFuzzer::Fuzz();
|
||||
@ -5108,6 +5370,192 @@ THREADED_TEST(InterceptorCallIC) {
|
||||
CHECK_EQ(42, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
// This test checks that if interceptor doesn't provide
|
||||
// a value, we can fetch regular value.
|
||||
THREADED_TEST(InterceptorCallICSeesOthers) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
|
||||
templ->SetNamedPropertyHandler(NoBlockGetterX);
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ->NewInstance());
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"o.x = function f(x) { return x + 1; };"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" result = o.x(41);"
|
||||
"}");
|
||||
CHECK_EQ(42, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
static v8::Handle<Value> call_ic_function4;
|
||||
static v8::Handle<Value> InterceptorCallICGetter4(Local<String> name,
|
||||
const AccessorInfo& info) {
|
||||
ApiTestFuzzer::Fuzz();
|
||||
CHECK(v8_str("x")->Equals(name));
|
||||
return call_ic_function4;
|
||||
}
|
||||
|
||||
|
||||
// This test checks that if interceptor provides a function,
|
||||
// even if we cached shadowed variant, interceptor's function
|
||||
// is invoked
|
||||
THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
|
||||
templ->SetNamedPropertyHandler(InterceptorCallICGetter4);
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ->NewInstance());
|
||||
call_ic_function4 =
|
||||
v8_compile("function f(x) { return x - 1; }; f")->Run();
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"o.__proto__.x = function(x) { return x + 1; };"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 1000; i++) {"
|
||||
" result = o.x(42);"
|
||||
"}");
|
||||
CHECK_EQ(41, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
// Test the case when we stored cacheable lookup into
|
||||
// a stub, but it got invalidated later on
|
||||
THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
|
||||
templ->SetNamedPropertyHandler(NoBlockGetterX);
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ->NewInstance());
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"proto1 = new Object();"
|
||||
"proto2 = new Object();"
|
||||
"o.__proto__ = proto1;"
|
||||
"proto1.__proto__ = proto2;"
|
||||
"proto2.y = function(x) { return x + 1; };"
|
||||
// Invoke it many times to compile a stub
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" o.y(42);"
|
||||
"}"
|
||||
"proto1.y = function(x) { return x - 1; };"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" result += o.y(42);"
|
||||
"}");
|
||||
CHECK_EQ(41 * 7, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
static v8::Handle<Value> call_ic_function5;
|
||||
static v8::Handle<Value> InterceptorCallICGetter5(Local<String> name,
|
||||
const AccessorInfo& info) {
|
||||
ApiTestFuzzer::Fuzz();
|
||||
if (v8_str("x")->Equals(name))
|
||||
return call_ic_function5;
|
||||
else
|
||||
return Local<Value>();
|
||||
}
|
||||
|
||||
|
||||
// This test checks that if interceptor doesn't provide a function,
|
||||
// cached constant function is used
|
||||
THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
|
||||
templ->SetNamedPropertyHandler(NoBlockGetterX);
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ->NewInstance());
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"function inc(x) { return x + 1; };"
|
||||
"inc(1);"
|
||||
"o.x = inc;"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 1000; i++) {"
|
||||
" result = o.x(42);"
|
||||
"}");
|
||||
CHECK_EQ(43, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
// This test checks that if interceptor provides a function,
|
||||
// even if we cached constant function, interceptor's function
|
||||
// is invoked
|
||||
THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
|
||||
templ->SetNamedPropertyHandler(InterceptorCallICGetter5);
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ->NewInstance());
|
||||
call_ic_function5 =
|
||||
v8_compile("function f(x) { return x - 1; }; f")->Run();
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"function inc(x) { return x + 1; };"
|
||||
"inc(1);"
|
||||
"o.x = inc;"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 1000; i++) {"
|
||||
" result = o.x(42);"
|
||||
"}");
|
||||
CHECK_EQ(41, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
// Test the case when we stored constant function into
|
||||
// a stub, but it got invalidated later on
|
||||
THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
|
||||
templ->SetNamedPropertyHandler(NoBlockGetterX);
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ->NewInstance());
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"function inc(x) { return x + 1; };"
|
||||
"inc(1);"
|
||||
"proto1 = new Object();"
|
||||
"proto2 = new Object();"
|
||||
"o.__proto__ = proto1;"
|
||||
"proto1.__proto__ = proto2;"
|
||||
"proto2.y = inc;"
|
||||
// Invoke it many times to compile a stub
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" o.y(42);"
|
||||
"}"
|
||||
"proto1.y = function(x) { return x - 1; };"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" result += o.y(42);"
|
||||
"}");
|
||||
CHECK_EQ(41 * 7, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
// Test the case when we stored constant function into
|
||||
// a stub, but it got invalidated later on due to override on
|
||||
// global object which is between interceptor and constant function' holders.
|
||||
THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New();
|
||||
templ->SetNamedPropertyHandler(NoBlockGetterX);
|
||||
LocalContext context;
|
||||
context->Global()->Set(v8_str("o"), templ->NewInstance());
|
||||
v8::Handle<Value> value = CompileRun(
|
||||
"function inc(x) { return x + 1; };"
|
||||
"inc(1);"
|
||||
"o.__proto__ = this;"
|
||||
"this.__proto__.y = inc;"
|
||||
// Invoke it many times to compile a stub
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" if (o.y(42) != 43) throw 'oops: ' + o.y(42);"
|
||||
"}"
|
||||
"this.y = function(x) { return x - 1; };"
|
||||
"var result = 0;"
|
||||
"for (var i = 0; i < 7; i++) {"
|
||||
" result += o.y(42);"
|
||||
"}");
|
||||
CHECK_EQ(41 * 7, value->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
static int interceptor_call_count = 0;
|
||||
|
||||
static v8::Handle<Value> InterceptorICRefErrorGetter(Local<String> name,
|
||||
@ -5768,6 +6216,7 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
|
||||
|
||||
THREADED_TEST(ExternalAllocatedMemory) {
|
||||
v8::HandleScope outer;
|
||||
v8::Persistent<Context> env = Context::New();
|
||||
const int kSize = 1024*1024;
|
||||
CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(kSize), kSize);
|
||||
CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(-kSize), 0);
|
||||
@ -7081,3 +7530,164 @@ THREADED_TEST(ReplaceConstantFunction) {
|
||||
obj_clone->Set(foo_string, v8::String::New("Hello"));
|
||||
CHECK(!obj->Get(foo_string)->IsUndefined());
|
||||
}
|
||||
|
||||
|
||||
// Regression test for http://crbug.com/16276.
|
||||
THREADED_TEST(Regress16276) {
|
||||
v8::HandleScope scope;
|
||||
LocalContext context;
|
||||
// Force the IC in f to be a dictionary load IC.
|
||||
CompileRun("function f(obj) { return obj.x; }\n"
|
||||
"var obj = { x: { foo: 42 }, y: 87 };\n"
|
||||
"var x = obj.x;\n"
|
||||
"delete obj.y;\n"
|
||||
"for (var i = 0; i < 5; i++) f(obj);");
|
||||
// Detach the global object to make 'this' refer directly to the
|
||||
// global object (not the proxy), and make sure that the dictionary
|
||||
// load IC doesn't mess up loading directly from the global object.
|
||||
context->DetachGlobal();
|
||||
CHECK_EQ(42, CompileRun("f(this).foo")->Int32Value());
|
||||
}
|
||||
|
||||
|
||||
THREADED_TEST(PixelArray) {
|
||||
v8::HandleScope scope;
|
||||
LocalContext context;
|
||||
const int kElementCount = 40;
|
||||
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
|
||||
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount,
|
||||
pixel_data);
|
||||
i::Heap::CollectAllGarbage(); // Force GC to trigger verification.
|
||||
for (int i = 0; i < kElementCount; i++) {
|
||||
pixels->set(i, i);
|
||||
}
|
||||
i::Heap::CollectAllGarbage(); // Force GC to trigger verification.
|
||||
for (int i = 0; i < kElementCount; i++) {
|
||||
CHECK_EQ(i, pixels->get(i));
|
||||
CHECK_EQ(i, pixel_data[i]);
|
||||
}
|
||||
|
||||
v8::Handle<v8::Object> obj = v8::Object::New();
|
||||
i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
|
||||
// Set the elements to be the pixels.
|
||||
// jsobj->set_elements(*pixels);
|
||||
obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount);
|
||||
CHECK_EQ(1, i::Smi::cast(jsobj->GetElement(1))->value());
|
||||
obj->Set(v8_str("field"), v8::Int32::New(1503));
|
||||
context->Global()->Set(v8_str("pixels"), obj);
|
||||
v8::Handle<v8::Value> result = CompileRun("pixels.field");
|
||||
CHECK_EQ(1503, result->Int32Value());
|
||||
result = CompileRun("pixels[1]");
|
||||
CHECK_EQ(1, result->Int32Value());
|
||||
result = CompileRun("var sum = 0;"
|
||||
"for (var i = 0; i < 8; i++) {"
|
||||
" sum += pixels[i];"
|
||||
"}"
|
||||
"sum;");
|
||||
CHECK_EQ(28, result->Int32Value());
|
||||
|
||||
i::Handle<i::Smi> value(i::Smi::FromInt(2));
|
||||
i::SetElement(jsobj, 1, value);
|
||||
CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(1))->value());
|
||||
*value.location() = i::Smi::FromInt(256);
|
||||
i::SetElement(jsobj, 1, value);
|
||||
CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(1))->value());
|
||||
*value.location() = i::Smi::FromInt(-1);
|
||||
i::SetElement(jsobj, 1, value);
|
||||
CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1))->value());
|
||||
|
||||
result = CompileRun("for (var i = 0; i < 8; i++) {"
|
||||
" pixels[i] = (i * 65) - 109;"
|
||||
"}"
|
||||
"pixels[1] + pixels[6];");
|
||||
CHECK_EQ(255, result->Int32Value());
|
||||
CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(0))->value());
|
||||
CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1))->value());
|
||||
CHECK_EQ(21, i::Smi::cast(jsobj->GetElement(2))->value());
|
||||
CHECK_EQ(86, i::Smi::cast(jsobj->GetElement(3))->value());
|
||||
CHECK_EQ(151, i::Smi::cast(jsobj->GetElement(4))->value());
|
||||
CHECK_EQ(216, i::Smi::cast(jsobj->GetElement(5))->value());
|
||||
CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(6))->value());
|
||||
CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(7))->value());
|
||||
result = CompileRun("var sum = 0;"
|
||||
"for (var i = 0; i < 8; i++) {"
|
||||
" sum += pixels[i];"
|
||||
"}"
|
||||
"sum;");
|
||||
CHECK_EQ(984, result->Int32Value());
|
||||
|
||||
result = CompileRun("for (var i = 0; i < 8; i++) {"
|
||||
" pixels[i] = (i * 1.1);"
|
||||
"}"
|
||||
"pixels[1] + pixels[6];");
|
||||
CHECK_EQ(8, result->Int32Value());
|
||||
CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(0))->value());
|
||||
CHECK_EQ(1, i::Smi::cast(jsobj->GetElement(1))->value());
|
||||
CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(2))->value());
|
||||
CHECK_EQ(3, i::Smi::cast(jsobj->GetElement(3))->value());
|
||||
CHECK_EQ(4, i::Smi::cast(jsobj->GetElement(4))->value());
|
||||
CHECK_EQ(6, i::Smi::cast(jsobj->GetElement(5))->value());
|
||||
CHECK_EQ(7, i::Smi::cast(jsobj->GetElement(6))->value());
|
||||
CHECK_EQ(8, i::Smi::cast(jsobj->GetElement(7))->value());
|
||||
|
||||
result = CompileRun("for (var i = 0; i < 8; i++) {"
|
||||
" pixels[7] = undefined;"
|
||||
"}"
|
||||
"pixels[7];");
|
||||
CHECK_EQ(0, result->Int32Value());
|
||||
CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(7))->value());
|
||||
|
||||
result = CompileRun("for (var i = 0; i < 8; i++) {"
|
||||
" pixels[6] = '2.3';"
|
||||
"}"
|
||||
"pixels[6];");
|
||||
CHECK_EQ(2, result->Int32Value());
|
||||
CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(6))->value());
|
||||
|
||||
result = CompileRun("for (var i = 0; i < 8; i++) {"
|
||||
" pixels[5] = NaN;"
|
||||
"}"
|
||||
"pixels[5];");
|
||||
CHECK_EQ(0, result->Int32Value());
|
||||
CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value());
|
||||
|
||||
result = CompileRun("for (var i = 0; i < 8; i++) {"
|
||||
" pixels[8] = Infinity;"
|
||||
"}"
|
||||
"pixels[8];");
|
||||
CHECK_EQ(255, result->Int32Value());
|
||||
CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(8))->value());
|
||||
|
||||
result = CompileRun("for (var i = 0; i < 8; i++) {"
|
||||
" pixels[9] = -Infinity;"
|
||||
"}"
|
||||
"pixels[9];");
|
||||
CHECK_EQ(0, result->Int32Value());
|
||||
CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(9))->value());
|
||||
|
||||
result = CompileRun("pixels[3] = 33;"
|
||||
"delete pixels[3];"
|
||||
"pixels[3];");
|
||||
CHECK_EQ(33, result->Int32Value());
|
||||
|
||||
result = CompileRun("pixels[0] = 10; pixels[1] = 11;"
|
||||
"pixels[2] = 12; pixels[3] = 13;"
|
||||
"pixels.__defineGetter__('2',"
|
||||
"function() { return 120; });"
|
||||
"pixels[2];");
|
||||
CHECK_EQ(12, result->Int32Value());
|
||||
|
||||
result = CompileRun("var js_array = new Array(40);"
|
||||
"js_array[0] = 77;"
|
||||
"js_array;");
|
||||
CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
|
||||
|
||||
result = CompileRun("pixels[1] = 23;"
|
||||
"pixels.__proto__ = [];"
|
||||
"js_array.__proto__ = pixels;"
|
||||
"js_array.concat(pixels);");
|
||||
CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value());
|
||||
CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value());
|
||||
|
||||
free(pixel_data);
|
||||
}
|
||||
|
4
deps/v8/test/cctest/test-ast.cc
vendored
4
deps/v8/test/cctest/test-ast.cc
vendored
@ -35,11 +35,11 @@
|
||||
using namespace v8::internal;
|
||||
|
||||
TEST(List) {
|
||||
List<Node*>* list = new List<Node*>(0);
|
||||
List<AstNode*>* list = new List<AstNode*>(0);
|
||||
CHECK_EQ(0, list->length());
|
||||
|
||||
ZoneScope zone_scope(DELETE_ON_EXIT);
|
||||
Node* node = new EmptyStatement();
|
||||
AstNode* node = new EmptyStatement();
|
||||
list->Add(node);
|
||||
CHECK_EQ(1, list->length());
|
||||
CHECK_EQ(node, list->at(0));
|
||||
|
68
deps/v8/test/cctest/test-debug.cc
vendored
68
deps/v8/test/cctest/test-debug.cc
vendored
@ -4875,7 +4875,7 @@ TEST(DebugBreakInMessageHandler) {
|
||||
v8::Debug::SetMessageHandler2(DebugBreakMessageHandler);
|
||||
|
||||
// Test functions.
|
||||
const char* script = "function f() { debugger; } function g() { }";
|
||||
const char* script = "function f() { debugger; g(); } function g() { }";
|
||||
CompileRun(script);
|
||||
v8::Local<v8::Function> f =
|
||||
v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
|
||||
@ -4954,8 +4954,10 @@ TEST(RegExpDebugBreak) {
|
||||
v8::Debug::DebugBreak();
|
||||
result = f->Call(env->Global(), argc, argv);
|
||||
|
||||
CHECK_EQ(20, break_point_hit_count);
|
||||
CHECK_EQ("exec", last_function_hit);
|
||||
// Check that there was only one break event. Matching RegExp should not
|
||||
// cause Break events.
|
||||
CHECK_EQ(1, break_point_hit_count);
|
||||
CHECK_EQ("f", last_function_hit);
|
||||
}
|
||||
#endif // V8_NATIVE_REGEXP
|
||||
|
||||
@ -5295,3 +5297,63 @@ TEST(ProvisionalBreakpointOnLineOutOfRange) {
|
||||
ClearBreakPointFromJS(sbp2);
|
||||
v8::Debug::SetMessageHandler2(NULL);
|
||||
}
|
||||
|
||||
|
||||
static void BreakMessageHandler(const v8::Debug::Message& message) {
|
||||
if (message.IsEvent() && message.GetEvent() == v8::Break) {
|
||||
// Count the number of breaks.
|
||||
break_point_hit_count++;
|
||||
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::String> json = message.GetJSON();
|
||||
|
||||
SendContinueCommand();
|
||||
} else if (message.IsEvent() && message.GetEvent() == v8::AfterCompile) {
|
||||
v8::HandleScope scope;
|
||||
|
||||
bool is_debug_break = i::StackGuard::IsDebugBreak();
|
||||
// Force DebugBreak flag while serializer is working.
|
||||
i::StackGuard::DebugBreak();
|
||||
|
||||
// Force serialization to trigger some internal JS execution.
|
||||
v8::Handle<v8::String> json = message.GetJSON();
|
||||
|
||||
// Restore previous state.
|
||||
if (is_debug_break) {
|
||||
i::StackGuard::DebugBreak();
|
||||
} else {
|
||||
i::StackGuard::Continue(i::DEBUGBREAK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Test that if DebugBreak is forced it is ignored when code from
|
||||
// debug-delay.js is executed.
|
||||
TEST(NoDebugBreakInAfterCompileMessageHandler) {
|
||||
v8::HandleScope scope;
|
||||
DebugLocalContext env;
|
||||
|
||||
// Register a debug event listener which sets the break flag and counts.
|
||||
v8::Debug::SetMessageHandler2(BreakMessageHandler);
|
||||
|
||||
// Set the debug break flag.
|
||||
v8::Debug::DebugBreak();
|
||||
|
||||
// Create a function for testing stepping.
|
||||
const char* src = "function f() { eval('var x = 10;'); } ";
|
||||
v8::Local<v8::Function> f = CompileFunction(&env, src, "f");
|
||||
|
||||
// There should be only one break event.
|
||||
CHECK_EQ(1, break_point_hit_count);
|
||||
|
||||
// Set the debug break flag again.
|
||||
v8::Debug::DebugBreak();
|
||||
f->Call(env->Global(), 0, NULL);
|
||||
// There should be one more break event when the script is evaluated in 'f'.
|
||||
CHECK_EQ(2, break_point_hit_count);
|
||||
|
||||
// Get rid of the debug message handler.
|
||||
v8::Debug::SetMessageHandler2(NULL);
|
||||
CheckDebuggerUnloaded();
|
||||
}
|
||||
|
4
deps/v8/test/cctest/test-heap.cc
vendored
4
deps/v8/test/cctest/test-heap.cc
vendored
@ -36,7 +36,7 @@ TEST(HeapMaps) {
|
||||
InitializeVM();
|
||||
CheckMap(Heap::meta_map(), MAP_TYPE, Map::kSize);
|
||||
CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
|
||||
CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, Array::kAlignedSize);
|
||||
CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, FixedArray::kHeaderSize);
|
||||
CheckMap(Heap::long_string_map(), LONG_STRING_TYPE,
|
||||
SeqTwoByteString::kAlignedSize);
|
||||
}
|
||||
@ -653,7 +653,7 @@ TEST(JSArray) {
|
||||
uint32_t int_length = 0;
|
||||
CHECK(Array::IndexFromObject(length, &int_length));
|
||||
CHECK_EQ(length, array->length());
|
||||
CHECK(!array->HasFastElements()); // Must be in slow mode.
|
||||
CHECK(array->HasDictionaryElements()); // Must be in slow mode.
|
||||
|
||||
// array[length] = name.
|
||||
array->SetElement(int_length, name);
|
||||
|
40
deps/v8/test/cctest/test-log.cc
vendored
40
deps/v8/test/cctest/test-log.cc
vendored
@ -4,8 +4,12 @@
|
||||
|
||||
#ifdef ENABLE_LOGGING_AND_PROFILING
|
||||
|
||||
#include "v8.h"
|
||||
#ifdef __linux__
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include "v8.h"
|
||||
#include "log.h"
|
||||
#include "cctest.h"
|
||||
|
||||
@ -144,6 +148,23 @@ class LoggerTestHelper : public AllStatic {
|
||||
using v8::internal::LoggerTestHelper;
|
||||
|
||||
|
||||
// Under Linux, we need to check if signals were delivered to avoid false
|
||||
// positives. Under other platforms profiling is done via a high-priority
|
||||
// thread, so this case never happen.
|
||||
static bool was_sigprof_received = true;
|
||||
#ifdef __linux__
|
||||
|
||||
struct sigaction old_sigprof_handler;
|
||||
|
||||
static void SigProfSignalHandler(int signal, siginfo_t* info, void* context) {
|
||||
if (signal != SIGPROF) return;
|
||||
was_sigprof_received = true;
|
||||
old_sigprof_handler.sa_sigaction(signal, info, context);
|
||||
}
|
||||
|
||||
#endif // __linux__
|
||||
|
||||
|
||||
static int CheckThatProfilerWorks(int log_pos) {
|
||||
Logger::ResumeProfiler();
|
||||
CHECK(LoggerTestHelper::IsSamplerActive());
|
||||
@ -160,6 +181,18 @@ static int CheckThatProfilerWorks(int log_pos) {
|
||||
const char* code_creation = "\ncode-creation,"; // eq. to /^code-creation,/
|
||||
CHECK_NE(NULL, strstr(buffer.start(), code_creation));
|
||||
|
||||
#ifdef __linux__
|
||||
// Intercept SIGPROF handler to make sure that the test process
|
||||
// had received it. Under load, system can defer it causing test failure.
|
||||
// It is important to execute this after 'ResumeProfiler'.
|
||||
was_sigprof_received = false;
|
||||
struct sigaction sa;
|
||||
sa.sa_sigaction = SigProfSignalHandler;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
CHECK_EQ(0, sigaction(SIGPROF, &sa, &old_sigprof_handler));
|
||||
#endif // __linux__
|
||||
|
||||
// Force compiler to generate new code by parametrizing source.
|
||||
EmbeddedVector<char, 100> script_src;
|
||||
i::OS::SNPrintF(script_src,
|
||||
@ -170,6 +203,8 @@ static int CheckThatProfilerWorks(int log_pos) {
|
||||
const double end_time = i::OS::TimeCurrentMillis() + 200;
|
||||
while (i::OS::TimeCurrentMillis() < end_time) {
|
||||
CompileAndRunScript(script_src.start());
|
||||
// Yield CPU to give Profiler thread a chance to process ticks.
|
||||
i::OS::Sleep(1);
|
||||
}
|
||||
|
||||
Logger::PauseProfiler();
|
||||
@ -189,7 +224,8 @@ static int CheckThatProfilerWorks(int log_pos) {
|
||||
buffer[log_size] = '\0';
|
||||
const char* tick = "\ntick,";
|
||||
CHECK_NE(NULL, strstr(buffer.start(), code_creation));
|
||||
CHECK_NE(NULL, strstr(buffer.start(), tick));
|
||||
const bool ticks_found = strstr(buffer.start(), tick) != NULL;
|
||||
CHECK_EQ(was_sigprof_received, ticks_found);
|
||||
|
||||
return log_pos;
|
||||
}
|
||||
|
5
deps/v8/test/cctest/test-mark-compact.cc
vendored
5
deps/v8/test/cctest/test-mark-compact.cc
vendored
@ -86,7 +86,8 @@ TEST(Promotion) {
|
||||
v8::HandleScope sc;
|
||||
|
||||
// Allocate a fixed array in the new space.
|
||||
int array_size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) /
|
||||
int array_size =
|
||||
(Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
|
||||
(kPointerSize * 4);
|
||||
Object* obj = Heap::AllocateFixedArray(array_size);
|
||||
CHECK(!obj->IsFailure());
|
||||
@ -118,7 +119,7 @@ TEST(NoPromotion) {
|
||||
CHECK(Heap::CollectGarbage(0, OLD_POINTER_SPACE));
|
||||
|
||||
// Allocate a big Fixed array in the new space.
|
||||
int size = (Heap::MaxObjectSizeInPagedSpace() - Array::kHeaderSize) /
|
||||
int size = (Heap::MaxObjectSizeInPagedSpace() - FixedArray::kHeaderSize) /
|
||||
kPointerSize;
|
||||
Object* obj = Heap::AllocateFixedArray(size);
|
||||
|
||||
|
2
deps/v8/test/cctest/test-regexp.cc
vendored
2
deps/v8/test/cctest/test-regexp.cc
vendored
@ -35,7 +35,7 @@
|
||||
#include "zone-inl.h"
|
||||
#include "parser.h"
|
||||
#include "ast.h"
|
||||
#include "jsregexp-inl.h"
|
||||
#include "jsregexp.h"
|
||||
#include "regexp-macro-assembler.h"
|
||||
#include "regexp-macro-assembler-irregexp.h"
|
||||
#ifdef V8_TARGET_ARCH_ARM
|
||||
|
20
deps/v8/test/message/message.status
vendored
20
deps/v8/test/message/message.status
vendored
@ -32,13 +32,13 @@ bugs: FAIL
|
||||
|
||||
[ $arch == x64 ]
|
||||
|
||||
simple-throw.js: FAIL
|
||||
try-catch-finally-throw-in-catch-and-finally.js: FAIL
|
||||
try-catch-finally-throw-in-catch.js: FAIL
|
||||
try-catch-finally-throw-in-finally.js: FAIL
|
||||
try-finally-throw-in-finally.js: FAIL
|
||||
try-finally-throw-in-try-and-finally.js: FAIL
|
||||
try-finally-throw-in-try.js: FAIL
|
||||
overwritten-builtins.js: FAIL
|
||||
regress-73.js: FAIL
|
||||
regress-75.js: FAIL
|
||||
simple-throw: FAIL
|
||||
try-catch-finally-throw-in-catch-and-finally: FAIL
|
||||
try-catch-finally-throw-in-catch: FAIL
|
||||
try-catch-finally-throw-in-finally: FAIL
|
||||
try-finally-throw-in-finally: FAIL
|
||||
try-finally-throw-in-try-and-finally: FAIL
|
||||
try-finally-throw-in-try: FAIL
|
||||
overwritten-builtins: FAIL
|
||||
regress/regress-73: FAIL
|
||||
regress/regress-75: FAIL
|
||||
|
248
deps/v8/test/mjsunit/debug-stepin-accessor.js
vendored
Normal file
248
deps/v8/test/mjsunit/debug-stepin-accessor.js
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
// Copyright 2008 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --expose-debug-as debug
|
||||
|
||||
// Get the Debug object exposed from the debug context global object.
|
||||
Debug = debug.Debug
|
||||
|
||||
var exception = null;
|
||||
var state = 1;
|
||||
var expected_source_line_text = null;
|
||||
var expected_function_name = null;
|
||||
|
||||
// Simple debug event handler which first time will cause 'step in' action
|
||||
// to get into g.call and than check that execution is pauesed inside
|
||||
// function 'g'.
|
||||
function listener(event, exec_state, event_data, data) {
|
||||
try {
|
||||
if (event == Debug.DebugEvent.Break) {
|
||||
if (state == 1) {
|
||||
exec_state.prepareStep(Debug.StepAction.StepIn, 2);
|
||||
state = 2;
|
||||
} else if (state == 2) {
|
||||
assertEquals(expected_source_line_text,
|
||||
event_data.sourceLineText());
|
||||
assertEquals(expected_function_name, event_data.func().name());
|
||||
state = 3;
|
||||
}
|
||||
}
|
||||
} catch(e) {
|
||||
exception = e;
|
||||
}
|
||||
};
|
||||
|
||||
// Add the debug event listener.
|
||||
Debug.setListener(listener);
|
||||
|
||||
|
||||
var c = {
|
||||
name: 'name ',
|
||||
get getter1() {
|
||||
return this.name; // getter 1
|
||||
},
|
||||
get getter2() {
|
||||
return { // getter 2
|
||||
'a': c.name
|
||||
};
|
||||
},
|
||||
set setter1(n) {
|
||||
this.name = n; // setter 1
|
||||
}
|
||||
};
|
||||
|
||||
c.__defineGetter__('y', function getterY() {
|
||||
return this.name; // getter y
|
||||
});
|
||||
|
||||
c.__defineGetter__(3, function getter3() {
|
||||
return this.name; // getter 3
|
||||
});
|
||||
|
||||
c.__defineSetter__('y', function setterY(n) {
|
||||
this.name = n; // setter y
|
||||
});
|
||||
|
||||
c.__defineSetter__(3, function setter3(n) {
|
||||
this.name = n; // setter 3
|
||||
});
|
||||
|
||||
var d = {
|
||||
'c': c,
|
||||
};
|
||||
|
||||
function testGetter1_1() {
|
||||
expected_function_name = 'getter1';
|
||||
expected_source_line_text = ' return this.name; // getter 1';
|
||||
debugger;
|
||||
var x = c.getter1;
|
||||
}
|
||||
|
||||
function testGetter1_2() {
|
||||
expected_function_name = 'getter1';
|
||||
expected_source_line_text = ' return this.name; // getter 1';
|
||||
debugger;
|
||||
var x = c['getter1'];
|
||||
}
|
||||
|
||||
function testGetter1_3() {
|
||||
expected_function_name = 'getter1';
|
||||
expected_source_line_text = ' return this.name; // getter 1';
|
||||
debugger;
|
||||
for (var i = 1; i < 2; i++) {
|
||||
var x = c['getter' + i];
|
||||
}
|
||||
}
|
||||
|
||||
function testGetter1_4() {
|
||||
expected_function_name = 'getter1';
|
||||
expected_source_line_text = ' return this.name; // getter 1';
|
||||
debugger;
|
||||
var x = d.c.getter1;
|
||||
}
|
||||
|
||||
function testGetter1_5() {
|
||||
expected_function_name = 'getter1';
|
||||
expected_source_line_text = ' return this.name; // getter 1';
|
||||
for (var i = 2; i != 1; i--);
|
||||
debugger;
|
||||
var x = d.c['getter' + i];
|
||||
}
|
||||
|
||||
function testGetter2_1() {
|
||||
expected_function_name = 'getter2';
|
||||
expected_source_line_text = ' return { // getter 2';
|
||||
for (var i = 2; i != 1; i--);
|
||||
debugger;
|
||||
var t = d.c.getter2.name;
|
||||
}
|
||||
|
||||
|
||||
function testGetterY_1() {
|
||||
expected_function_name = 'getterY';
|
||||
expected_source_line_text = ' return this.name; // getter y';
|
||||
debugger;
|
||||
var t = d.c.y;
|
||||
}
|
||||
|
||||
function testIndexedGetter3_1() {
|
||||
expected_function_name = 'getter3';
|
||||
expected_source_line_text = ' return this.name; // getter 3';
|
||||
debugger;
|
||||
var r = d.c[3];
|
||||
}
|
||||
|
||||
function testSetterY_1() {
|
||||
expected_function_name = 'setterY';
|
||||
expected_source_line_text = ' this.name = n; // setter y';
|
||||
debugger;
|
||||
d.c.y = 'www';
|
||||
}
|
||||
|
||||
function testIndexedSetter3_1() {
|
||||
expected_function_name = 'setter3';
|
||||
expected_source_line_text = ' this.name = n; // setter 3';
|
||||
var i = 3
|
||||
debugger;
|
||||
d.c[3] = 'www';
|
||||
}
|
||||
|
||||
function testSetter1_1() {
|
||||
expected_function_name = 'setter1';
|
||||
expected_source_line_text = ' this.name = n; // setter 1';
|
||||
debugger;
|
||||
d.c.setter1 = 'aa';
|
||||
}
|
||||
|
||||
function testSetter1_2() {
|
||||
expected_function_name = 'setter1';
|
||||
expected_source_line_text = ' this.name = n; // setter 1';
|
||||
debugger;
|
||||
d.c['setter1'] = 'bb';
|
||||
}
|
||||
|
||||
function testSetter1_3() {
|
||||
expected_function_name = 'setter1';
|
||||
expected_source_line_text = ' this.name = n; // setter 1';
|
||||
for (var i = 2; i != 1; i--);
|
||||
debugger;
|
||||
d.c['setter' + i] = i;
|
||||
}
|
||||
|
||||
var e = {
|
||||
name: 'e'
|
||||
};
|
||||
e.__proto__ = c;
|
||||
|
||||
function testProtoGetter1_1() {
|
||||
expected_function_name = 'getter1';
|
||||
expected_source_line_text = ' return this.name; // getter 1';
|
||||
debugger;
|
||||
var x = e.getter1;
|
||||
}
|
||||
|
||||
function testProtoSetter1_1() {
|
||||
expected_function_name = 'setter1';
|
||||
expected_source_line_text = ' this.name = n; // setter 1';
|
||||
debugger;
|
||||
e.setter1 = 'aa';
|
||||
}
|
||||
|
||||
function testProtoIndexedGetter3_1() {
|
||||
expected_function_name = 'getter3';
|
||||
expected_source_line_text = ' return this.name; // getter 3';
|
||||
debugger;
|
||||
var x = e[3];
|
||||
}
|
||||
|
||||
function testProtoIndexedSetter3_1() {
|
||||
expected_function_name = 'setter3';
|
||||
expected_source_line_text = ' this.name = n; // setter 3';
|
||||
debugger;
|
||||
e[3] = 'new val';
|
||||
}
|
||||
|
||||
function testProtoSetter1_2() {
|
||||
expected_function_name = 'setter1';
|
||||
expected_source_line_text = ' this.name = n; // setter 1';
|
||||
for (var i = 2; i != 1; i--);
|
||||
debugger;
|
||||
e['setter' + i] = 'aa';
|
||||
}
|
||||
|
||||
for (var n in this) {
|
||||
if (n.substr(0, 4) != 'test') {
|
||||
continue;
|
||||
}
|
||||
state = 1;
|
||||
this[n]();
|
||||
assertNull(exception);
|
||||
assertEquals(3, state);
|
||||
}
|
||||
|
||||
// Get rid of the debug event listener.
|
||||
Debug.setListener(null);
|
78
deps/v8/test/mjsunit/debug-stepin-builtin.js
vendored
Normal file
78
deps/v8/test/mjsunit/debug-stepin-builtin.js
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --expose-debug-as debug
|
||||
|
||||
// Get the Debug object exposed from the debug context global object.
|
||||
Debug = debug.Debug
|
||||
|
||||
var exception = null;
|
||||
var state = 1;
|
||||
var expected_source_line_text = null;
|
||||
var expected_function_name = null;
|
||||
|
||||
// Simple debug event handler which first time will cause 'step in' action
|
||||
// and than check that execution is paused inside function
|
||||
// expected_function_name.
|
||||
function listener(event, exec_state, event_data, data) {
|
||||
try {
|
||||
if (event == Debug.DebugEvent.Break) {
|
||||
if (state == 1) {
|
||||
exec_state.prepareStep(Debug.StepAction.StepIn, 2);
|
||||
state = 2;
|
||||
} else if (state == 2) {
|
||||
assertEquals(expected_function_name, event_data.func().name());
|
||||
assertEquals(expected_source_line_text,
|
||||
event_data.sourceLineText());
|
||||
state = 3;
|
||||
}
|
||||
}
|
||||
} catch(e) {
|
||||
exception = e;
|
||||
}
|
||||
};
|
||||
|
||||
// Add the debug event listener.
|
||||
Debug.setListener(listener);
|
||||
|
||||
var a = [1,2,3,4,5];
|
||||
|
||||
// Test step into function call from a function without local variables.
|
||||
function testStepInArraySlice() {
|
||||
expected_function_name = 'testStepInArraySlice';
|
||||
expected_source_line_text = '} // expected line';
|
||||
debugger;
|
||||
var s = Array.prototype.slice.call(a, 2,3);
|
||||
} // expected line
|
||||
|
||||
state = 1;
|
||||
testStepInArraySlice();
|
||||
assertNull(exception);
|
||||
assertEquals(3, state);
|
||||
|
||||
// Get rid of the debug event listener.
|
||||
Debug.setListener(null);
|
67
deps/v8/test/mjsunit/mjsunit.status
vendored
67
deps/v8/test/mjsunit/mjsunit.status
vendored
@ -58,6 +58,8 @@ debug-ignore-breakpoints: CRASH || FAIL
|
||||
debug-multiple-breakpoints: CRASH || FAIL
|
||||
debug-setbreakpoint: CRASH || FAIL || PASS
|
||||
debug-step-stub-callfunction: SKIP
|
||||
debug-stepin-accessor: CRASH || FAIL
|
||||
debug-stepin-builtin: CRASH || FAIL
|
||||
debug-stepin-constructor: CRASH, FAIL
|
||||
debug-stepin-function-call: CRASH || FAIL
|
||||
debug-step: SKIP
|
||||
@ -69,40 +71,37 @@ regress/regress-269: SKIP
|
||||
# Fails on real ARM hardware but not on the simulator.
|
||||
string-compare-alignment: PASS || FAIL
|
||||
|
||||
# Times out often in release mode on ARM.
|
||||
array-splice: PASS || TIMEOUT
|
||||
|
||||
[ $arch == x64 ]
|
||||
|
||||
debug-backtrace.js: CRASH || FAIL
|
||||
date-parse.js: CRASH || FAIL
|
||||
debug-backtrace-text.js: CRASH || FAIL
|
||||
debug-multiple-breakpoints.js: CRASH || FAIL
|
||||
debug-breakpoints.js: CRASH || FAIL
|
||||
debug-changebreakpoint.js: CRASH || FAIL
|
||||
debug-clearbreakpoint.js: CRASH || FAIL
|
||||
debug-conditional-breakpoints.js: CRASH || FAIL
|
||||
debug-constructor.js: CRASH || FAIL
|
||||
debug-continue.js: CRASH || FAIL
|
||||
debug-enable-disable-breakpoints.js: CRASH || FAIL
|
||||
debug-evaluate-recursive.js: CRASH || FAIL
|
||||
debug-event-listener.js: CRASH || FAIL
|
||||
debug-evaluate.js: CRASH || FAIL
|
||||
debug-ignore-breakpoints.js: CRASH || FAIL
|
||||
debug-setbreakpoint.js: CRASH || FAIL
|
||||
debug-step-stub-callfunction.js: CRASH || FAIL
|
||||
debug-step.js: CRASH || FAIL
|
||||
mirror-date.js: CRASH || FAIL
|
||||
invalid-lhs.js: CRASH || FAIL
|
||||
debug-stepin-constructor.js: CRASH || FAIL
|
||||
new.js: CRASH || FAIL
|
||||
fuzz-natives.js: CRASH || FAIL
|
||||
greedy.js: CRASH || FAIL
|
||||
debug-handle.js: CRASH || FAIL
|
||||
string-indexof.js: CRASH || FAIL
|
||||
debug-clearbreakpointgroup.js: CRASH || FAIL
|
||||
regress/regress-269.js: CRASH || FAIL
|
||||
div-mod.js: CRASH || FAIL
|
||||
unicode-test.js: CRASH || FAIL
|
||||
regress/regress-392.js: CRASH || FAIL
|
||||
regress/regress-1200351.js: CRASH || FAIL
|
||||
regress/regress-998565.js: CRASH || FAIL
|
||||
tools/tickprocessor.js: CRASH || FAIL
|
||||
debug-backtrace: CRASH || FAIL
|
||||
debug-backtrace-text: CRASH || FAIL
|
||||
debug-multiple-breakpoints: CRASH || FAIL
|
||||
debug-breakpoints: CRASH || FAIL
|
||||
debug-changebreakpoint: CRASH || FAIL
|
||||
debug-clearbreakpoint: CRASH || FAIL
|
||||
debug-conditional-breakpoints: CRASH || FAIL
|
||||
debug-constructor: CRASH || FAIL
|
||||
debug-continue: CRASH || FAIL
|
||||
debug-enable-disable-breakpoints: CRASH || FAIL
|
||||
debug-evaluate-recursive: CRASH || FAIL
|
||||
debug-event-listener: CRASH || FAIL
|
||||
debug-evaluate: CRASH || FAIL
|
||||
debug-ignore-breakpoints: CRASH || FAIL
|
||||
debug-setbreakpoint: CRASH || FAIL
|
||||
debug-step-stub-callfunction: CRASH || FAIL
|
||||
debug-step: CRASH || FAIL
|
||||
debug-stepin-builtin: CRASH || FAIL
|
||||
debug-stepin-constructor: CRASH || FAIL
|
||||
debug-stepin-function-call: CRASH || FAIL
|
||||
debug-stepin-accessor: CRASH || FAIL
|
||||
fuzz-natives: PASS || TIMEOUT
|
||||
debug-handle: CRASH || FAIL
|
||||
debug-clearbreakpointgroup: CRASH || FAIL
|
||||
regress/regress-269: CRASH || FAIL
|
||||
regress/regress-392: CRASH || FAIL
|
||||
regress/regress-1200351: CRASH || FAIL
|
||||
regress/regress-998565: CRASH || FAIL
|
||||
tools/tickprocessor: PASS || CRASH || FAIL
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user