Upgrade V8 to 2.3.7
This commit is contained in:
parent
9acd76ed6e
commit
083ee0f8b7
1
deps/v8/AUTHORS
vendored
1
deps/v8/AUTHORS
vendored
@ -29,4 +29,5 @@ Rodolph Perfetta <rodolph.perfetta@arm.com>
|
|||||||
Ryan Dahl <coldredlemur@gmail.com>
|
Ryan Dahl <coldredlemur@gmail.com>
|
||||||
Subrato K De <subratokde@codeaurora.org>
|
Subrato K De <subratokde@codeaurora.org>
|
||||||
Burcu Dogan <burcujdogan@gmail.com>
|
Burcu Dogan <burcujdogan@gmail.com>
|
||||||
|
Vlad Burlik <vladbph@gmail.com>
|
||||||
|
|
||||||
|
22
deps/v8/ChangeLog
vendored
22
deps/v8/ChangeLog
vendored
@ -1,3 +1,25 @@
|
|||||||
|
2010-08-11: Version 2.3.7
|
||||||
|
|
||||||
|
Reduced size of heap snapshots produced by heap profiler (issue 783).
|
||||||
|
|
||||||
|
Introduced v8::Value::IsRegExp method.
|
||||||
|
|
||||||
|
Fixed CPU profiler crash in start / stop sequence when non-existent
|
||||||
|
name is passed (issue http://crbug.com/51594).
|
||||||
|
|
||||||
|
Introduced new indexed property query callbacks API (issue 816). This
|
||||||
|
API is guarded by USE_NEW_QUERY_CALLBACK define and is disabled
|
||||||
|
by default.
|
||||||
|
|
||||||
|
Removed support for object literal get/set with number/string
|
||||||
|
property name.
|
||||||
|
|
||||||
|
Fixed handling of JSObject::elements in CalculateNetworkSize
|
||||||
|
(issue 822).
|
||||||
|
|
||||||
|
Allow compiling with strict aliasing enabled on GCC 4.4 (issue 463).
|
||||||
|
|
||||||
|
|
||||||
2010-08-09: Version 2.3.6
|
2010-08-09: Version 2.3.6
|
||||||
|
|
||||||
RegExp literals create a new object every time they are evaluated
|
RegExp literals create a new object every time they are evaluated
|
||||||
|
3
deps/v8/SConstruct
vendored
3
deps/v8/SConstruct
vendored
@ -58,7 +58,7 @@ else:
|
|||||||
# on linux we need these compiler flags to avoid crashes in the v8 test suite
|
# on linux we need these compiler flags to avoid crashes in the v8 test suite
|
||||||
# and avoid dtoa.c strict aliasing issues
|
# and avoid dtoa.c strict aliasing issues
|
||||||
if os.environ.get('GCC_VERSION') == '44':
|
if os.environ.get('GCC_VERSION') == '44':
|
||||||
GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp', '-fno-strict-aliasing']
|
GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
|
||||||
GCC_DTOA_EXTRA_CCFLAGS = []
|
GCC_DTOA_EXTRA_CCFLAGS = []
|
||||||
else:
|
else:
|
||||||
GCC_EXTRA_CCFLAGS = []
|
GCC_EXTRA_CCFLAGS = []
|
||||||
@ -80,7 +80,6 @@ ANDROID_FLAGS = ['-march=armv7-a',
|
|||||||
'-frerun-cse-after-loop',
|
'-frerun-cse-after-loop',
|
||||||
'-frename-registers',
|
'-frename-registers',
|
||||||
'-fomit-frame-pointer',
|
'-fomit-frame-pointer',
|
||||||
'-fno-strict-aliasing',
|
|
||||||
'-finline-limit=64',
|
'-finline-limit=64',
|
||||||
'-DCAN_USE_VFP_INSTRUCTIONS=1',
|
'-DCAN_USE_VFP_INSTRUCTIONS=1',
|
||||||
'-DCAN_USE_ARMV7_INSTRUCTIONS=1',
|
'-DCAN_USE_ARMV7_INSTRUCTIONS=1',
|
||||||
|
30
deps/v8/include/v8-profiler.h
vendored
30
deps/v8/include/v8-profiler.h
vendored
@ -194,10 +194,10 @@ class HeapGraphNode;
|
|||||||
class V8EXPORT HeapGraphEdge {
|
class V8EXPORT HeapGraphEdge {
|
||||||
public:
|
public:
|
||||||
enum Type {
|
enum Type {
|
||||||
CONTEXT_VARIABLE = 0, // A variable from a function context.
|
kContextVariable = 0, // A variable from a function context.
|
||||||
ELEMENT = 1, // An element of an array.
|
kElement = 1, // An element of an array.
|
||||||
PROPERTY = 2, // A named object property.
|
kProperty = 2, // A named object property.
|
||||||
INTERNAL = 3 // A link that can't be accessed from JS,
|
kInternal = 3 // A link that can't be accessed from JS,
|
||||||
// thus, its name isn't a real property name.
|
// thus, its name isn't a real property name.
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -240,12 +240,12 @@ class V8EXPORT HeapGraphPath {
|
|||||||
class V8EXPORT HeapGraphNode {
|
class V8EXPORT HeapGraphNode {
|
||||||
public:
|
public:
|
||||||
enum Type {
|
enum Type {
|
||||||
INTERNAL = 0, // Internal node, a virtual one, for housekeeping.
|
kInternal = 0, // Internal node, a virtual one, for housekeeping.
|
||||||
ARRAY = 1, // An array of elements.
|
kArray = 1, // An array of elements.
|
||||||
STRING = 2, // A string.
|
kString = 2, // A string.
|
||||||
OBJECT = 3, // A JS object (except for arrays and strings).
|
kObject = 3, // A JS object (except for arrays and strings).
|
||||||
CODE = 4, // Compiled code.
|
kCode = 4, // Compiled code.
|
||||||
CLOSURE = 5 // Function closure.
|
kClosure = 5 // Function closure.
|
||||||
};
|
};
|
||||||
|
|
||||||
/** Returns node type (see HeapGraphNode::Type). */
|
/** Returns node type (see HeapGraphNode::Type). */
|
||||||
@ -268,13 +268,15 @@ class V8EXPORT HeapGraphNode {
|
|||||||
int GetSelfSize() const;
|
int GetSelfSize() const;
|
||||||
|
|
||||||
/** Returns node's network (self + reachable nodes) size, in bytes. */
|
/** Returns node's network (self + reachable nodes) size, in bytes. */
|
||||||
int GetTotalSize() const;
|
int GetReachableSize() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns node's private size, in bytes. That is, the size of memory
|
* Returns node's retained size, in bytes. That is, self + sizes of
|
||||||
* that will be reclaimed having this node collected.
|
* the objects that are reachable only from this object. In other
|
||||||
|
* words, the size of memory that will be reclaimed having this node
|
||||||
|
* collected.
|
||||||
*/
|
*/
|
||||||
int GetPrivateSize() const;
|
int GetRetainedSize() const;
|
||||||
|
|
||||||
/** Returns child nodes count of the node. */
|
/** Returns child nodes count of the node. */
|
||||||
int GetChildrenCount() const;
|
int GetChildrenCount() const;
|
||||||
|
52
deps/v8/include/v8.h
vendored
52
deps/v8/include/v8.h
vendored
@ -919,6 +919,11 @@ class Value : public Data {
|
|||||||
*/
|
*/
|
||||||
V8EXPORT bool IsDate() const;
|
V8EXPORT bool IsDate() const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if this value is a RegExp.
|
||||||
|
*/
|
||||||
|
V8EXPORT bool IsRegExp() const;
|
||||||
|
|
||||||
V8EXPORT Local<Boolean> ToBoolean() const;
|
V8EXPORT Local<Boolean> ToBoolean() const;
|
||||||
V8EXPORT Local<Number> ToNumber() const;
|
V8EXPORT Local<Number> ToNumber() const;
|
||||||
V8EXPORT Local<String> ToString() const;
|
V8EXPORT Local<String> ToString() const;
|
||||||
@ -1819,10 +1824,19 @@ typedef Handle<Value> (*IndexedPropertySetter)(uint32_t index,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a non-empty handle if the interceptor intercepts the request.
|
* Returns a non-empty handle if the interceptor intercepts the request.
|
||||||
* The result is true if the property exists and false otherwise.
|
* The result is true if either a boolean (true if property exists and false
|
||||||
|
* otherwise) or an integer encoding property attributes.
|
||||||
*/
|
*/
|
||||||
|
#ifdef USE_NEW_QUERY_CALLBACKS
|
||||||
|
typedef Handle<Integer> (*IndexedPropertyQuery)(uint32_t index,
|
||||||
|
const AccessorInfo& info);
|
||||||
|
#else
|
||||||
typedef Handle<Boolean> (*IndexedPropertyQuery)(uint32_t index,
|
typedef Handle<Boolean> (*IndexedPropertyQuery)(uint32_t index,
|
||||||
const AccessorInfo& info);
|
const AccessorInfo& info);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef Handle<Value> (*IndexedPropertyQueryImpl)(uint32_t index,
|
||||||
|
const AccessorInfo& info);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a non-empty handle if the deleter intercepts the request.
|
* Returns a non-empty handle if the deleter intercepts the request.
|
||||||
@ -2040,6 +2054,22 @@ class V8EXPORT FunctionTemplate : public Template {
|
|||||||
IndexedPropertyQuery query,
|
IndexedPropertyQuery query,
|
||||||
IndexedPropertyDeleter remover,
|
IndexedPropertyDeleter remover,
|
||||||
IndexedPropertyEnumerator enumerator,
|
IndexedPropertyEnumerator enumerator,
|
||||||
|
Handle<Value> data) {
|
||||||
|
IndexedPropertyQueryImpl casted =
|
||||||
|
reinterpret_cast<IndexedPropertyQueryImpl>(query);
|
||||||
|
SetIndexedInstancePropertyHandlerImpl(getter,
|
||||||
|
setter,
|
||||||
|
casted,
|
||||||
|
remover,
|
||||||
|
enumerator,
|
||||||
|
data);
|
||||||
|
}
|
||||||
|
void SetIndexedInstancePropertyHandlerImpl(
|
||||||
|
IndexedPropertyGetter getter,
|
||||||
|
IndexedPropertySetter setter,
|
||||||
|
IndexedPropertyQueryImpl query,
|
||||||
|
IndexedPropertyDeleter remover,
|
||||||
|
IndexedPropertyEnumerator enumerator,
|
||||||
Handle<Value> data);
|
Handle<Value> data);
|
||||||
void SetInstanceCallAsFunctionHandler(InvocationCallback callback,
|
void SetInstanceCallAsFunctionHandler(InvocationCallback callback,
|
||||||
Handle<Value> data);
|
Handle<Value> data);
|
||||||
@ -2139,7 +2169,25 @@ class V8EXPORT ObjectTemplate : public Template {
|
|||||||
IndexedPropertyQuery query = 0,
|
IndexedPropertyQuery query = 0,
|
||||||
IndexedPropertyDeleter deleter = 0,
|
IndexedPropertyDeleter deleter = 0,
|
||||||
IndexedPropertyEnumerator enumerator = 0,
|
IndexedPropertyEnumerator enumerator = 0,
|
||||||
Handle<Value> data = Handle<Value>());
|
Handle<Value> data = Handle<Value>()) {
|
||||||
|
IndexedPropertyQueryImpl casted =
|
||||||
|
reinterpret_cast<IndexedPropertyQueryImpl>(query);
|
||||||
|
SetIndexedPropertyHandlerImpl(getter,
|
||||||
|
setter,
|
||||||
|
casted,
|
||||||
|
deleter,
|
||||||
|
enumerator,
|
||||||
|
data);
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
void SetIndexedPropertyHandlerImpl(IndexedPropertyGetter getter,
|
||||||
|
IndexedPropertySetter setter,
|
||||||
|
IndexedPropertyQueryImpl query,
|
||||||
|
IndexedPropertyDeleter deleter,
|
||||||
|
IndexedPropertyEnumerator enumerator,
|
||||||
|
Handle<Value> data);
|
||||||
|
public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the callback to be used when calling instances created from
|
* Sets the callback to be used when calling instances created from
|
||||||
* this template as a function. If no callback is set, instances
|
* this template as a function. If no callback is set, instances
|
||||||
|
2
deps/v8/src/accessors.cc
vendored
2
deps/v8/src/accessors.cc
vendored
@ -488,7 +488,7 @@ Object* Accessors::FunctionGetLength(Object* object, void*) {
|
|||||||
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
|
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
|
||||||
if (!found_it) return Smi::FromInt(0);
|
if (!found_it) return Smi::FromInt(0);
|
||||||
// Check if already compiled.
|
// Check if already compiled.
|
||||||
if (!function->is_compiled()) {
|
if (!function->shared()->is_compiled()) {
|
||||||
// If the function isn't compiled yet, the length is not computed
|
// If the function isn't compiled yet, the length is not computed
|
||||||
// correctly yet. Compile it now and return the right length.
|
// correctly yet. Compile it now and return the right length.
|
||||||
HandleScope scope;
|
HandleScope scope;
|
||||||
|
129
deps/v8/src/api.cc
vendored
129
deps/v8/src/api.cc
vendored
@ -886,10 +886,10 @@ void FunctionTemplate::SetNamedInstancePropertyHandler(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void FunctionTemplate::SetIndexedInstancePropertyHandler(
|
void FunctionTemplate::SetIndexedInstancePropertyHandlerImpl(
|
||||||
IndexedPropertyGetter getter,
|
IndexedPropertyGetter getter,
|
||||||
IndexedPropertySetter setter,
|
IndexedPropertySetter setter,
|
||||||
IndexedPropertyQuery query,
|
IndexedPropertyQueryImpl query,
|
||||||
IndexedPropertyDeleter remover,
|
IndexedPropertyDeleter remover,
|
||||||
IndexedPropertyEnumerator enumerator,
|
IndexedPropertyEnumerator enumerator,
|
||||||
Handle<Value> data) {
|
Handle<Value> data) {
|
||||||
@ -1054,10 +1054,10 @@ void ObjectTemplate::SetAccessCheckCallbacks(
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void ObjectTemplate::SetIndexedPropertyHandler(
|
void ObjectTemplate::SetIndexedPropertyHandlerImpl(
|
||||||
IndexedPropertyGetter getter,
|
IndexedPropertyGetter getter,
|
||||||
IndexedPropertySetter setter,
|
IndexedPropertySetter setter,
|
||||||
IndexedPropertyQuery query,
|
IndexedPropertyQueryImpl query,
|
||||||
IndexedPropertyDeleter remover,
|
IndexedPropertyDeleter remover,
|
||||||
IndexedPropertyEnumerator enumerator,
|
IndexedPropertyEnumerator enumerator,
|
||||||
Handle<Value> data) {
|
Handle<Value> data) {
|
||||||
@ -1068,7 +1068,7 @@ void ObjectTemplate::SetIndexedPropertyHandler(
|
|||||||
i::FunctionTemplateInfo* constructor =
|
i::FunctionTemplateInfo* constructor =
|
||||||
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
|
i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor());
|
||||||
i::Handle<i::FunctionTemplateInfo> cons(constructor);
|
i::Handle<i::FunctionTemplateInfo> cons(constructor);
|
||||||
Utils::ToLocal(cons)->SetIndexedInstancePropertyHandler(getter,
|
Utils::ToLocal(cons)->SetIndexedInstancePropertyHandlerImpl(getter,
|
||||||
setter,
|
setter,
|
||||||
query,
|
query,
|
||||||
remover,
|
remover,
|
||||||
@ -1792,6 +1792,13 @@ bool Value::IsDate() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool Value::IsRegExp() const {
|
||||||
|
if (IsDeadCheck("v8::Value::IsRegExp()")) return false;
|
||||||
|
i::Handle<i::Object> obj = Utils::OpenHandle(this);
|
||||||
|
return obj->IsJSRegExp();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Local<String> Value::ToString() const {
|
Local<String> Value::ToString() const {
|
||||||
if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
|
if (IsDeadCheck("v8::Value::ToString()")) return Local<String>();
|
||||||
LOG_API("ToString");
|
LOG_API("ToString");
|
||||||
@ -4491,24 +4498,27 @@ const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static i::HeapGraphEdge* ToInternal(const HeapGraphEdge* edge) {
|
||||||
|
return const_cast<i::HeapGraphEdge*>(
|
||||||
|
reinterpret_cast<const i::HeapGraphEdge*>(edge));
|
||||||
|
}
|
||||||
|
|
||||||
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
|
HeapGraphEdge::Type HeapGraphEdge::GetType() const {
|
||||||
IsDeadCheck("v8::HeapGraphEdge::GetType");
|
IsDeadCheck("v8::HeapGraphEdge::GetType");
|
||||||
return static_cast<HeapGraphEdge::Type>(
|
return static_cast<HeapGraphEdge::Type>(ToInternal(this)->type());
|
||||||
reinterpret_cast<const i::HeapGraphEdge*>(this)->type());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Handle<Value> HeapGraphEdge::GetName() const {
|
Handle<Value> HeapGraphEdge::GetName() const {
|
||||||
IsDeadCheck("v8::HeapGraphEdge::GetName");
|
IsDeadCheck("v8::HeapGraphEdge::GetName");
|
||||||
const i::HeapGraphEdge* edge =
|
i::HeapGraphEdge* edge = ToInternal(this);
|
||||||
reinterpret_cast<const i::HeapGraphEdge*>(this);
|
|
||||||
switch (edge->type()) {
|
switch (edge->type()) {
|
||||||
case i::HeapGraphEdge::CONTEXT_VARIABLE:
|
case i::HeapGraphEdge::kContextVariable:
|
||||||
case i::HeapGraphEdge::INTERNAL:
|
case i::HeapGraphEdge::kInternal:
|
||||||
case i::HeapGraphEdge::PROPERTY:
|
case i::HeapGraphEdge::kProperty:
|
||||||
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
|
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
|
||||||
edge->name())));
|
edge->name())));
|
||||||
case i::HeapGraphEdge::ELEMENT:
|
case i::HeapGraphEdge::kElement:
|
||||||
return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt(
|
return Handle<Number>(ToApi<Number>(i::Factory::NewNumberFromInt(
|
||||||
edge->index())));
|
edge->index())));
|
||||||
default: UNREACHABLE();
|
default: UNREACHABLE();
|
||||||
@ -4519,28 +4529,32 @@ Handle<Value> HeapGraphEdge::GetName() const {
|
|||||||
|
|
||||||
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
|
const HeapGraphNode* HeapGraphEdge::GetFromNode() const {
|
||||||
IsDeadCheck("v8::HeapGraphEdge::GetFromNode");
|
IsDeadCheck("v8::HeapGraphEdge::GetFromNode");
|
||||||
const i::HeapEntry* from =
|
const i::HeapEntry* from = ToInternal(this)->From();
|
||||||
reinterpret_cast<const i::HeapGraphEdge*>(this)->from();
|
|
||||||
return reinterpret_cast<const HeapGraphNode*>(from);
|
return reinterpret_cast<const HeapGraphNode*>(from);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
|
const HeapGraphNode* HeapGraphEdge::GetToNode() const {
|
||||||
IsDeadCheck("v8::HeapGraphEdge::GetToNode");
|
IsDeadCheck("v8::HeapGraphEdge::GetToNode");
|
||||||
const i::HeapEntry* to =
|
const i::HeapEntry* to = ToInternal(this)->to();
|
||||||
reinterpret_cast<const i::HeapGraphEdge*>(this)->to();
|
|
||||||
return reinterpret_cast<const HeapGraphNode*>(to);
|
return reinterpret_cast<const HeapGraphNode*>(to);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static i::HeapGraphPath* ToInternal(const HeapGraphPath* path) {
|
||||||
|
return const_cast<i::HeapGraphPath*>(
|
||||||
|
reinterpret_cast<const i::HeapGraphPath*>(path));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int HeapGraphPath::GetEdgesCount() const {
|
int HeapGraphPath::GetEdgesCount() const {
|
||||||
return reinterpret_cast<const i::HeapGraphPath*>(this)->path()->length();
|
return ToInternal(this)->path()->length();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const {
|
const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const {
|
||||||
return reinterpret_cast<const HeapGraphEdge*>(
|
return reinterpret_cast<const HeapGraphEdge*>(
|
||||||
reinterpret_cast<const i::HeapGraphPath*>(this)->path()->at(index));
|
ToInternal(this)->path()->at(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -4555,137 +4569,136 @@ const HeapGraphNode* HeapGraphPath::GetToNode() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
|
||||||
|
return const_cast<i::HeapEntry*>(
|
||||||
|
reinterpret_cast<const i::HeapEntry*>(entry));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
HeapGraphNode::Type HeapGraphNode::GetType() const {
|
HeapGraphNode::Type HeapGraphNode::GetType() const {
|
||||||
IsDeadCheck("v8::HeapGraphNode::GetType");
|
IsDeadCheck("v8::HeapGraphNode::GetType");
|
||||||
return static_cast<HeapGraphNode::Type>(
|
return static_cast<HeapGraphNode::Type>(ToInternal(this)->type());
|
||||||
reinterpret_cast<const i::HeapEntry*>(this)->type());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Handle<String> HeapGraphNode::GetName() const {
|
Handle<String> HeapGraphNode::GetName() const {
|
||||||
IsDeadCheck("v8::HeapGraphNode::GetName");
|
IsDeadCheck("v8::HeapGraphNode::GetName");
|
||||||
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
|
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
|
||||||
reinterpret_cast<const i::HeapEntry*>(this)->name())));
|
ToInternal(this)->name())));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
uint64_t HeapGraphNode::GetId() const {
|
uint64_t HeapGraphNode::GetId() const {
|
||||||
IsDeadCheck("v8::HeapGraphNode::GetId");
|
IsDeadCheck("v8::HeapGraphNode::GetId");
|
||||||
return reinterpret_cast<const i::HeapEntry*>(this)->id();
|
return ToInternal(this)->id();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int HeapGraphNode::GetSelfSize() const {
|
int HeapGraphNode::GetSelfSize() const {
|
||||||
IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
|
IsDeadCheck("v8::HeapGraphNode::GetSelfSize");
|
||||||
return reinterpret_cast<const i::HeapEntry*>(this)->self_size();
|
return ToInternal(this)->self_size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int HeapGraphNode::GetTotalSize() const {
|
int HeapGraphNode::GetReachableSize() const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetHead");
|
IsDeadCheck("v8::HeapSnapshot::GetReachableSize");
|
||||||
return const_cast<i::HeapEntry*>(
|
return ToInternal(this)->ReachableSize();
|
||||||
reinterpret_cast<const i::HeapEntry*>(this))->TotalSize();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int HeapGraphNode::GetPrivateSize() const {
|
int HeapGraphNode::GetRetainedSize() const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetPrivateSize");
|
IsDeadCheck("v8::HeapSnapshot::GetRetainedSize");
|
||||||
return const_cast<i::HeapEntry*>(
|
return ToInternal(this)->RetainedSize();
|
||||||
reinterpret_cast<const i::HeapEntry*>(this))->NonSharedTotalSize();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int HeapGraphNode::GetChildrenCount() const {
|
int HeapGraphNode::GetChildrenCount() const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetChildrenCount");
|
IsDeadCheck("v8::HeapSnapshot::GetChildrenCount");
|
||||||
return reinterpret_cast<const i::HeapEntry*>(this)->children()->length();
|
return ToInternal(this)->children().length();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
|
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetChild");
|
IsDeadCheck("v8::HeapSnapshot::GetChild");
|
||||||
return reinterpret_cast<const HeapGraphEdge*>(
|
return reinterpret_cast<const HeapGraphEdge*>(
|
||||||
reinterpret_cast<const i::HeapEntry*>(this)->children()->at(index));
|
&ToInternal(this)->children()[index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int HeapGraphNode::GetRetainersCount() const {
|
int HeapGraphNode::GetRetainersCount() const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetRetainersCount");
|
IsDeadCheck("v8::HeapSnapshot::GetRetainersCount");
|
||||||
return reinterpret_cast<const i::HeapEntry*>(this)->retainers()->length();
|
return ToInternal(this)->retainers().length();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
|
const HeapGraphEdge* HeapGraphNode::GetRetainer(int index) const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetRetainer");
|
IsDeadCheck("v8::HeapSnapshot::GetRetainer");
|
||||||
return reinterpret_cast<const HeapGraphEdge*>(
|
return reinterpret_cast<const HeapGraphEdge*>(
|
||||||
reinterpret_cast<const i::HeapEntry*>(this)->retainers()->at(index));
|
ToInternal(this)->retainers()[index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int HeapGraphNode::GetRetainingPathsCount() const {
|
int HeapGraphNode::GetRetainingPathsCount() const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount");
|
IsDeadCheck("v8::HeapSnapshot::GetRetainingPathsCount");
|
||||||
return const_cast<i::HeapEntry*>(
|
return ToInternal(this)->GetRetainingPaths()->length();
|
||||||
reinterpret_cast<const i::HeapEntry*>(
|
|
||||||
this))->GetRetainingPaths()->length();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
|
const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetRetainingPath");
|
IsDeadCheck("v8::HeapSnapshot::GetRetainingPath");
|
||||||
return reinterpret_cast<const HeapGraphPath*>(
|
return reinterpret_cast<const HeapGraphPath*>(
|
||||||
const_cast<i::HeapEntry*>(
|
ToInternal(this)->GetRetainingPaths()->at(index));
|
||||||
reinterpret_cast<const i::HeapEntry*>(
|
|
||||||
this))->GetRetainingPaths()->at(index));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
|
const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
|
||||||
IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
|
IsDeadCheck("v8::HeapSnapshotsDiff::GetAdditionsRoot");
|
||||||
const i::HeapSnapshotsDiff* diff =
|
i::HeapSnapshotsDiff* diff =
|
||||||
reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
|
const_cast<i::HeapSnapshotsDiff*>(
|
||||||
|
reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
|
||||||
return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
|
return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
|
const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
|
||||||
IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
|
IsDeadCheck("v8::HeapSnapshotsDiff::GetDeletionsRoot");
|
||||||
const i::HeapSnapshotsDiff* diff =
|
i::HeapSnapshotsDiff* diff =
|
||||||
reinterpret_cast<const i::HeapSnapshotsDiff*>(this);
|
const_cast<i::HeapSnapshotsDiff*>(
|
||||||
|
reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
|
||||||
return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
|
return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
|
||||||
|
return const_cast<i::HeapSnapshot*>(
|
||||||
|
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
unsigned HeapSnapshot::GetUid() const {
|
unsigned HeapSnapshot::GetUid() const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetUid");
|
IsDeadCheck("v8::HeapSnapshot::GetUid");
|
||||||
return reinterpret_cast<const i::HeapSnapshot*>(this)->uid();
|
return ToInternal(this)->uid();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Handle<String> HeapSnapshot::GetTitle() const {
|
Handle<String> HeapSnapshot::GetTitle() const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetTitle");
|
IsDeadCheck("v8::HeapSnapshot::GetTitle");
|
||||||
const i::HeapSnapshot* snapshot =
|
|
||||||
reinterpret_cast<const i::HeapSnapshot*>(this);
|
|
||||||
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
|
return Handle<String>(ToApi<String>(i::Factory::LookupAsciiSymbol(
|
||||||
snapshot->title())));
|
ToInternal(this)->title())));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const HeapGraphNode* HeapSnapshot::GetRoot() const {
|
const HeapGraphNode* HeapSnapshot::GetRoot() const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::GetHead");
|
IsDeadCheck("v8::HeapSnapshot::GetHead");
|
||||||
const i::HeapSnapshot* snapshot =
|
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
|
||||||
reinterpret_cast<const i::HeapSnapshot*>(this);
|
|
||||||
return reinterpret_cast<const HeapGraphNode*>(snapshot->const_root());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
|
const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
|
||||||
const HeapSnapshot* snapshot) const {
|
const HeapSnapshot* snapshot) const {
|
||||||
IsDeadCheck("v8::HeapSnapshot::CompareWith");
|
IsDeadCheck("v8::HeapSnapshot::CompareWith");
|
||||||
i::HeapSnapshot* snapshot1 = const_cast<i::HeapSnapshot*>(
|
|
||||||
reinterpret_cast<const i::HeapSnapshot*>(this));
|
|
||||||
i::HeapSnapshot* snapshot2 = const_cast<i::HeapSnapshot*>(
|
|
||||||
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
|
|
||||||
return reinterpret_cast<const HeapSnapshotsDiff*>(
|
return reinterpret_cast<const HeapSnapshotsDiff*>(
|
||||||
snapshot1->CompareWith(snapshot2));
|
ToInternal(this)->CompareWith(ToInternal(snapshot)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
2
deps/v8/src/arm/builtins-arm.cc
vendored
2
deps/v8/src/arm/builtins-arm.cc
vendored
@ -1050,7 +1050,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
|||||||
__ ldr(r2,
|
__ ldr(r2,
|
||||||
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
|
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
|
||||||
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
|
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeOffset));
|
||||||
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
|
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||||
__ cmp(r2, r0); // Check formal and actual parameter counts.
|
__ cmp(r2, r0); // Check formal and actual parameter counts.
|
||||||
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
|
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
|
||||||
|
182
deps/v8/src/arm/codegen-arm.cc
vendored
182
deps/v8/src/arm/codegen-arm.cc
vendored
@ -1532,9 +1532,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
|
|||||||
__ BranchOnSmi(r0, &build_args);
|
__ BranchOnSmi(r0, &build_args);
|
||||||
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
|
__ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
|
||||||
__ b(ne, &build_args);
|
__ b(ne, &build_args);
|
||||||
__ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
|
|
||||||
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
|
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
|
||||||
__ ldr(r1, FieldMemOperand(r0, SharedFunctionInfo::kCodeOffset));
|
__ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeOffset));
|
||||||
__ cmp(r1, Operand(apply_code));
|
__ cmp(r1, Operand(apply_code));
|
||||||
__ b(ne, &build_args);
|
__ b(ne, &build_args);
|
||||||
|
|
||||||
@ -4176,21 +4175,21 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
|
|||||||
|
|
||||||
|
|
||||||
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
|
void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
|
||||||
ASSERT(args->length() == 1);
|
|
||||||
JumpTarget leave, null, function, non_function_constructor;
|
JumpTarget leave, null, function, non_function_constructor;
|
||||||
|
Register scratch = VirtualFrame::scratch0();
|
||||||
|
|
||||||
// Load the object into r0.
|
// Load the object into register.
|
||||||
|
ASSERT(args->length() == 1);
|
||||||
Load(args->at(0));
|
Load(args->at(0));
|
||||||
frame_->EmitPop(r0);
|
Register tos = frame_->PopToRegister();
|
||||||
|
|
||||||
// If the object is a smi, we return null.
|
// If the object is a smi, we return null.
|
||||||
__ tst(r0, Operand(kSmiTagMask));
|
__ tst(tos, Operand(kSmiTagMask));
|
||||||
null.Branch(eq);
|
null.Branch(eq);
|
||||||
|
|
||||||
// Check that the object is a JS object but take special care of JS
|
// Check that the object is a JS object but take special care of JS
|
||||||
// functions to make sure they have 'Function' as their class.
|
// functions to make sure they have 'Function' as their class.
|
||||||
__ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
|
__ CompareObjectType(tos, tos, scratch, FIRST_JS_OBJECT_TYPE);
|
||||||
null.Branch(lt);
|
null.Branch(lt);
|
||||||
|
|
||||||
// As long as JS_FUNCTION_TYPE is the last instance type and it is
|
// As long as JS_FUNCTION_TYPE is the last instance type and it is
|
||||||
@ -4198,37 +4197,38 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
|
|||||||
// LAST_JS_OBJECT_TYPE.
|
// LAST_JS_OBJECT_TYPE.
|
||||||
STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
|
STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
|
||||||
STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
|
STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
|
||||||
__ cmp(r1, Operand(JS_FUNCTION_TYPE));
|
__ cmp(scratch, Operand(JS_FUNCTION_TYPE));
|
||||||
function.Branch(eq);
|
function.Branch(eq);
|
||||||
|
|
||||||
// Check if the constructor in the map is a function.
|
// Check if the constructor in the map is a function.
|
||||||
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
|
__ ldr(tos, FieldMemOperand(tos, Map::kConstructorOffset));
|
||||||
__ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
|
__ CompareObjectType(tos, scratch, scratch, JS_FUNCTION_TYPE);
|
||||||
non_function_constructor.Branch(ne);
|
non_function_constructor.Branch(ne);
|
||||||
|
|
||||||
// The r0 register now contains the constructor function. Grab the
|
// The tos register now contains the constructor function. Grab the
|
||||||
// instance class name from there.
|
// instance class name from there.
|
||||||
__ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
|
__ ldr(tos, FieldMemOperand(tos, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
|
__ ldr(tos,
|
||||||
frame_->EmitPush(r0);
|
FieldMemOperand(tos, SharedFunctionInfo::kInstanceClassNameOffset));
|
||||||
|
frame_->EmitPush(tos);
|
||||||
leave.Jump();
|
leave.Jump();
|
||||||
|
|
||||||
// Functions have class 'Function'.
|
// Functions have class 'Function'.
|
||||||
function.Bind();
|
function.Bind();
|
||||||
__ mov(r0, Operand(Factory::function_class_symbol()));
|
__ mov(tos, Operand(Factory::function_class_symbol()));
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(tos);
|
||||||
leave.Jump();
|
leave.Jump();
|
||||||
|
|
||||||
// Objects with a non-function constructor have class 'Object'.
|
// Objects with a non-function constructor have class 'Object'.
|
||||||
non_function_constructor.Bind();
|
non_function_constructor.Bind();
|
||||||
__ mov(r0, Operand(Factory::Object_symbol()));
|
__ mov(tos, Operand(Factory::Object_symbol()));
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(tos);
|
||||||
leave.Jump();
|
leave.Jump();
|
||||||
|
|
||||||
// Non-JS objects have class null.
|
// Non-JS objects have class null.
|
||||||
null.Bind();
|
null.Bind();
|
||||||
__ LoadRoot(r0, Heap::kNullValueRootIndex);
|
__ LoadRoot(tos, Heap::kNullValueRootIndex);
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(tos);
|
||||||
|
|
||||||
// All done.
|
// All done.
|
||||||
leave.Bind();
|
leave.Bind();
|
||||||
@ -4236,45 +4236,51 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
|
|||||||
|
|
||||||
|
|
||||||
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
|
void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
Register scratch = VirtualFrame::scratch0();
|
||||||
ASSERT(args->length() == 1);
|
|
||||||
JumpTarget leave;
|
JumpTarget leave;
|
||||||
|
|
||||||
|
ASSERT(args->length() == 1);
|
||||||
Load(args->at(0));
|
Load(args->at(0));
|
||||||
frame_->EmitPop(r0); // r0 contains object.
|
Register tos = frame_->PopToRegister(); // tos contains object.
|
||||||
// if (object->IsSmi()) return the object.
|
// if (object->IsSmi()) return the object.
|
||||||
__ tst(r0, Operand(kSmiTagMask));
|
__ tst(tos, Operand(kSmiTagMask));
|
||||||
leave.Branch(eq);
|
leave.Branch(eq);
|
||||||
// It is a heap object - get map. If (!object->IsJSValue()) return the object.
|
// It is a heap object - get map. If (!object->IsJSValue()) return the object.
|
||||||
__ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
|
__ CompareObjectType(tos, scratch, scratch, JS_VALUE_TYPE);
|
||||||
leave.Branch(ne);
|
leave.Branch(ne);
|
||||||
// Load the value.
|
// Load the value.
|
||||||
__ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
|
__ ldr(tos, FieldMemOperand(tos, JSValue::kValueOffset));
|
||||||
leave.Bind();
|
leave.Bind();
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(tos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
|
void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
Register scratch1 = VirtualFrame::scratch0();
|
||||||
ASSERT(args->length() == 2);
|
Register scratch2 = VirtualFrame::scratch1();
|
||||||
JumpTarget leave;
|
JumpTarget leave;
|
||||||
|
|
||||||
|
ASSERT(args->length() == 2);
|
||||||
Load(args->at(0)); // Load the object.
|
Load(args->at(0)); // Load the object.
|
||||||
Load(args->at(1)); // Load the value.
|
Load(args->at(1)); // Load the value.
|
||||||
frame_->EmitPop(r0); // r0 contains value
|
Register value = frame_->PopToRegister();
|
||||||
frame_->EmitPop(r1); // r1 contains object
|
Register object = frame_->PopToRegister(value);
|
||||||
// if (object->IsSmi()) return object.
|
// if (object->IsSmi()) return object.
|
||||||
__ tst(r1, Operand(kSmiTagMask));
|
__ tst(object, Operand(kSmiTagMask));
|
||||||
leave.Branch(eq);
|
leave.Branch(eq);
|
||||||
// It is a heap object - get map. If (!object->IsJSValue()) return the object.
|
// It is a heap object - get map. If (!object->IsJSValue()) return the object.
|
||||||
__ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
|
__ CompareObjectType(object, scratch1, scratch1, JS_VALUE_TYPE);
|
||||||
leave.Branch(ne);
|
leave.Branch(ne);
|
||||||
// Store the value.
|
// Store the value.
|
||||||
__ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
|
__ str(value, FieldMemOperand(object, JSValue::kValueOffset));
|
||||||
// Update the write barrier.
|
// Update the write barrier.
|
||||||
__ RecordWrite(r1, Operand(JSValue::kValueOffset - kHeapObjectTag), r2, r3);
|
__ RecordWrite(object,
|
||||||
|
Operand(JSValue::kValueOffset - kHeapObjectTag),
|
||||||
|
scratch1,
|
||||||
|
scratch2);
|
||||||
// Leave.
|
// Leave.
|
||||||
leave.Bind();
|
leave.Bind();
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -4558,22 +4564,18 @@ class DeferredStringCharCodeAt : public DeferredCode {
|
|||||||
// This generates code that performs a String.prototype.charCodeAt() call
|
// This generates code that performs a String.prototype.charCodeAt() call
|
||||||
// or returns a smi in order to trigger conversion.
|
// or returns a smi in order to trigger conversion.
|
||||||
void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
|
void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
|
||||||
Comment(masm_, "[ GenerateStringCharCodeAt");
|
Comment(masm_, "[ GenerateStringCharCodeAt");
|
||||||
ASSERT(args->length() == 2);
|
ASSERT(args->length() == 2);
|
||||||
|
|
||||||
Load(args->at(0));
|
Load(args->at(0));
|
||||||
Load(args->at(1));
|
Load(args->at(1));
|
||||||
|
|
||||||
Register index = r1;
|
Register index = frame_->PopToRegister();
|
||||||
Register object = r2;
|
Register object = frame_->PopToRegister(index);
|
||||||
|
|
||||||
frame_->EmitPop(r1);
|
|
||||||
frame_->EmitPop(r2);
|
|
||||||
|
|
||||||
// We need two extra registers.
|
// We need two extra registers.
|
||||||
Register scratch = r3;
|
Register scratch = VirtualFrame::scratch0();
|
||||||
Register result = r0;
|
Register result = VirtualFrame::scratch1();
|
||||||
|
|
||||||
DeferredStringCharCodeAt* deferred =
|
DeferredStringCharCodeAt* deferred =
|
||||||
new DeferredStringCharCodeAt(object,
|
new DeferredStringCharCodeAt(object,
|
||||||
@ -4608,16 +4610,13 @@ class DeferredStringCharFromCode : public DeferredCode {
|
|||||||
|
|
||||||
// Generates code for creating a one-char string from a char code.
|
// Generates code for creating a one-char string from a char code.
|
||||||
void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
|
void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
|
||||||
Comment(masm_, "[ GenerateStringCharFromCode");
|
Comment(masm_, "[ GenerateStringCharFromCode");
|
||||||
ASSERT(args->length() == 1);
|
ASSERT(args->length() == 1);
|
||||||
|
|
||||||
Load(args->at(0));
|
Load(args->at(0));
|
||||||
|
|
||||||
Register code = r1;
|
Register result = frame_->GetTOSRegister();
|
||||||
Register result = r0;
|
Register code = frame_->PopToRegister(result);
|
||||||
|
|
||||||
frame_->EmitPop(code);
|
|
||||||
|
|
||||||
DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
|
DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
|
||||||
code, result);
|
code, result);
|
||||||
@ -4679,23 +4678,20 @@ class DeferredStringCharAt : public DeferredCode {
|
|||||||
// This generates code that performs a String.prototype.charAt() call
|
// This generates code that performs a String.prototype.charAt() call
|
||||||
// or returns a smi in order to trigger conversion.
|
// or returns a smi in order to trigger conversion.
|
||||||
void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
|
void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
|
||||||
Comment(masm_, "[ GenerateStringCharAt");
|
Comment(masm_, "[ GenerateStringCharAt");
|
||||||
ASSERT(args->length() == 2);
|
ASSERT(args->length() == 2);
|
||||||
|
|
||||||
Load(args->at(0));
|
Load(args->at(0));
|
||||||
Load(args->at(1));
|
Load(args->at(1));
|
||||||
|
|
||||||
Register index = r1;
|
Register index = frame_->PopToRegister();
|
||||||
Register object = r2;
|
Register object = frame_->PopToRegister(index);
|
||||||
|
|
||||||
frame_->EmitPop(r1);
|
|
||||||
frame_->EmitPop(r2);
|
|
||||||
|
|
||||||
// We need three extra registers.
|
// We need three extra registers.
|
||||||
Register scratch1 = r3;
|
Register scratch1 = VirtualFrame::scratch0();
|
||||||
Register scratch2 = r4;
|
Register scratch2 = VirtualFrame::scratch1();
|
||||||
Register result = r0;
|
// Use r6 without notifying the virtual frame.
|
||||||
|
Register result = r6;
|
||||||
|
|
||||||
DeferredStringCharAt* deferred =
|
DeferredStringCharAt* deferred =
|
||||||
new DeferredStringCharAt(object,
|
new DeferredStringCharAt(object,
|
||||||
@ -4874,13 +4870,13 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
|
|||||||
|
|
||||||
|
|
||||||
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
|
void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
|
||||||
ASSERT(args->length() == 1);
|
ASSERT(args->length() == 1);
|
||||||
|
|
||||||
// Satisfy contract with ArgumentsAccessStub:
|
// Satisfy contract with ArgumentsAccessStub:
|
||||||
// Load the key into r1 and the formal parameters count into r0.
|
// Load the key into r1 and the formal parameters count into r0.
|
||||||
Load(args->at(0));
|
Load(args->at(0));
|
||||||
frame_->EmitPop(r1);
|
frame_->PopToR1();
|
||||||
|
frame_->SpillAll();
|
||||||
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
|
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
|
||||||
|
|
||||||
// Call the shared stub to get to arguments[key].
|
// Call the shared stub to get to arguments[key].
|
||||||
@ -5108,9 +5104,7 @@ class DeferredSearchCache: public DeferredCode {
|
|||||||
void DeferredSearchCache::Generate() {
|
void DeferredSearchCache::Generate() {
|
||||||
__ Push(cache_, key_);
|
__ Push(cache_, key_);
|
||||||
__ CallRuntime(Runtime::kGetFromCache, 2);
|
__ CallRuntime(Runtime::kGetFromCache, 2);
|
||||||
if (!dst_.is(r0)) {
|
__ Move(dst_, r0);
|
||||||
__ mov(dst_, r0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -5130,33 +5124,42 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
|
|||||||
|
|
||||||
Load(args->at(1));
|
Load(args->at(1));
|
||||||
|
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
frame_->PopToR1();
|
||||||
|
frame_->SpillAll();
|
||||||
|
Register key = r1; // Just poped to r1
|
||||||
|
Register result = r0; // Free, as frame has just been spilled.
|
||||||
|
Register scratch1 = VirtualFrame::scratch0();
|
||||||
|
Register scratch2 = VirtualFrame::scratch1();
|
||||||
|
|
||||||
frame_->EmitPop(r2);
|
__ ldr(scratch1, ContextOperand(cp, Context::GLOBAL_INDEX));
|
||||||
|
__ ldr(scratch1,
|
||||||
|
FieldMemOperand(scratch1, GlobalObject::kGlobalContextOffset));
|
||||||
|
__ ldr(scratch1,
|
||||||
|
ContextOperand(scratch1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
|
||||||
|
__ ldr(scratch1,
|
||||||
|
FieldMemOperand(scratch1, FixedArray::OffsetOfElementAt(cache_id)));
|
||||||
|
|
||||||
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
|
DeferredSearchCache* deferred =
|
||||||
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalContextOffset));
|
new DeferredSearchCache(result, scratch1, key);
|
||||||
__ ldr(r1, ContextOperand(r1, Context::JSFUNCTION_RESULT_CACHES_INDEX));
|
|
||||||
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(cache_id)));
|
|
||||||
|
|
||||||
DeferredSearchCache* deferred = new DeferredSearchCache(r0, r1, r2);
|
|
||||||
|
|
||||||
const int kFingerOffset =
|
const int kFingerOffset =
|
||||||
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
|
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
|
||||||
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
||||||
__ ldr(r0, FieldMemOperand(r1, kFingerOffset));
|
__ ldr(result, FieldMemOperand(scratch1, kFingerOffset));
|
||||||
// r0 now holds finger offset as a smi.
|
// result now holds finger offset as a smi.
|
||||||
__ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
__ add(scratch2, scratch1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||||
// r3 now points to the start of fixed array elements.
|
// scratch2 now points to the start of fixed array elements.
|
||||||
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
|
__ ldr(result,
|
||||||
// Note side effect of PreIndex: r3 now points to the key of the pair.
|
MemOperand(
|
||||||
__ cmp(r2, r0);
|
scratch2, result, LSL, kPointerSizeLog2 - kSmiTagSize, PreIndex));
|
||||||
|
// Note side effect of PreIndex: scratch2 now points to the key of the pair.
|
||||||
|
__ cmp(key, result);
|
||||||
deferred->Branch(ne);
|
deferred->Branch(ne);
|
||||||
|
|
||||||
__ ldr(r0, MemOperand(r3, kPointerSize));
|
__ ldr(result, MemOperand(scratch2, kPointerSize));
|
||||||
|
|
||||||
deferred->BindExit();
|
deferred->BindExit();
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -6851,6 +6854,11 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
|||||||
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
|
__ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
|
||||||
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
|
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
|
||||||
|
|
||||||
|
// Initialize the code pointer in the function to be the one
|
||||||
|
// found in the shared function info object.
|
||||||
|
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
|
||||||
|
__ str(r3, FieldMemOperand(r0, JSFunction::kCodeOffset));
|
||||||
|
|
||||||
// Return result. The argument function info has been popped already.
|
// Return result. The argument function info has been popped already.
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
@ -10444,11 +10452,9 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
|||||||
// NumberToSmi discards numbers that are not exact integers.
|
// NumberToSmi discards numbers that are not exact integers.
|
||||||
__ CallRuntime(Runtime::kNumberToSmi, 1);
|
__ CallRuntime(Runtime::kNumberToSmi, 1);
|
||||||
}
|
}
|
||||||
if (!scratch_.is(r0)) {
|
|
||||||
// Save the conversion result before the pop instructions below
|
// Save the conversion result before the pop instructions below
|
||||||
// have a chance to overwrite it.
|
// have a chance to overwrite it.
|
||||||
__ mov(scratch_, r0);
|
__ Move(scratch_, r0);
|
||||||
}
|
|
||||||
__ pop(index_);
|
__ pop(index_);
|
||||||
__ pop(object_);
|
__ pop(object_);
|
||||||
// Reload the instance type.
|
// Reload the instance type.
|
||||||
@ -10467,9 +10473,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
|
|||||||
call_helper.BeforeCall(masm);
|
call_helper.BeforeCall(masm);
|
||||||
__ Push(object_, index_);
|
__ Push(object_, index_);
|
||||||
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
|
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
|
||||||
if (!result_.is(r0)) {
|
__ Move(result_, r0);
|
||||||
__ mov(result_, r0);
|
|
||||||
}
|
|
||||||
call_helper.AfterCall(masm);
|
call_helper.AfterCall(masm);
|
||||||
__ jmp(&exit_);
|
__ jmp(&exit_);
|
||||||
|
|
||||||
@ -10510,9 +10514,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
|
|||||||
call_helper.BeforeCall(masm);
|
call_helper.BeforeCall(masm);
|
||||||
__ push(code_);
|
__ push(code_);
|
||||||
__ CallRuntime(Runtime::kCharFromCode, 1);
|
__ CallRuntime(Runtime::kCharFromCode, 1);
|
||||||
if (!result_.is(r0)) {
|
__ Move(result_, r0);
|
||||||
__ mov(result_, r0);
|
|
||||||
}
|
|
||||||
call_helper.AfterCall(masm);
|
call_helper.AfterCall(masm);
|
||||||
__ jmp(&exit_);
|
__ jmp(&exit_);
|
||||||
|
|
||||||
|
5
deps/v8/src/arm/macro-assembler-arm.cc
vendored
5
deps/v8/src/arm/macro-assembler-arm.cc
vendored
@ -757,7 +757,7 @@ void MacroAssembler::InvokeFunction(Register fun,
|
|||||||
SharedFunctionInfo::kFormalParameterCountOffset));
|
SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
|
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
|
||||||
ldr(code_reg,
|
ldr(code_reg,
|
||||||
MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
|
MemOperand(r1, JSFunction::kCodeOffset - kHeapObjectTag));
|
||||||
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||||
|
|
||||||
ParameterCount expected(expected_reg);
|
ParameterCount expected(expected_reg);
|
||||||
@ -1508,8 +1508,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
|
|||||||
// Make sure the code objects in the builtins object and in the
|
// Make sure the code objects in the builtins object and in the
|
||||||
// builtin function are the same.
|
// builtin function are the same.
|
||||||
push(r1);
|
push(r1);
|
||||||
ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
ldr(r1, FieldMemOperand(r1, JSFunction::kCodeOffset));
|
||||||
ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCodeOffset));
|
|
||||||
cmp(r1, target);
|
cmp(r1, target);
|
||||||
Assert(eq, "Builtin code object changed");
|
Assert(eq, "Builtin code object changed");
|
||||||
pop(r1);
|
pop(r1);
|
||||||
|
5
deps/v8/src/bootstrapper.cc
vendored
5
deps/v8/src/bootstrapper.cc
vendored
@ -56,7 +56,7 @@ class SourceCodeCache BASE_EMBEDDED {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Iterate(ObjectVisitor* v) {
|
void Iterate(ObjectVisitor* v) {
|
||||||
v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
|
v->VisitPointer(BitCast<Object**>(&cache_));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -470,6 +470,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction() {
|
|||||||
Handle<Code> code =
|
Handle<Code> code =
|
||||||
Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
|
Handle<Code>(Builtins::builtin(Builtins::EmptyFunction));
|
||||||
empty_function->set_code(*code);
|
empty_function->set_code(*code);
|
||||||
|
empty_function->shared()->set_code(*code);
|
||||||
Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
|
Handle<String> source = Factory::NewStringFromAscii(CStrVector("() {}"));
|
||||||
Handle<Script> script = Factory::NewScript(source);
|
Handle<Script> script = Factory::NewScript(source);
|
||||||
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
|
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
|
||||||
@ -1545,6 +1546,8 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
|
|||||||
Handle<SharedFunctionInfo> shared
|
Handle<SharedFunctionInfo> shared
|
||||||
= Handle<SharedFunctionInfo>(function->shared());
|
= Handle<SharedFunctionInfo>(function->shared());
|
||||||
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
|
if (!EnsureCompiled(shared, CLEAR_EXCEPTION)) return false;
|
||||||
|
// Set the code object on the function object.
|
||||||
|
function->set_code(function->shared()->code());
|
||||||
builtins->set_javascript_builtin_code(id, shared->code());
|
builtins->set_javascript_builtin_code(id, shared->code());
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
10
deps/v8/src/checks.h
vendored
10
deps/v8/src/checks.h
vendored
@ -280,14 +280,13 @@ template <int> class StaticAssertionHelper { };
|
|||||||
|
|
||||||
|
|
||||||
// The ASSERT macro is equivalent to CHECK except that it only
|
// The ASSERT macro is equivalent to CHECK except that it only
|
||||||
// generates code in debug builds. Ditto STATIC_ASSERT.
|
// generates code in debug builds.
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
#define ASSERT_RESULT(expr) CHECK(expr)
|
#define ASSERT_RESULT(expr) CHECK(expr)
|
||||||
#define ASSERT(condition) CHECK(condition)
|
#define ASSERT(condition) CHECK(condition)
|
||||||
#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
|
#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
|
||||||
#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
|
#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
|
||||||
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
|
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
|
||||||
#define STATIC_ASSERT(test) STATIC_CHECK(test)
|
|
||||||
#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
|
#define SLOW_ASSERT(condition) if (FLAG_enable_slow_asserts) CHECK(condition)
|
||||||
#else
|
#else
|
||||||
#define ASSERT_RESULT(expr) (expr)
|
#define ASSERT_RESULT(expr) (expr)
|
||||||
@ -295,9 +294,14 @@ template <int> class StaticAssertionHelper { };
|
|||||||
#define ASSERT_EQ(v1, v2) ((void) 0)
|
#define ASSERT_EQ(v1, v2) ((void) 0)
|
||||||
#define ASSERT_NE(v1, v2) ((void) 0)
|
#define ASSERT_NE(v1, v2) ((void) 0)
|
||||||
#define ASSERT_GE(v1, v2) ((void) 0)
|
#define ASSERT_GE(v1, v2) ((void) 0)
|
||||||
#define STATIC_ASSERT(test) ((void) 0)
|
|
||||||
#define SLOW_ASSERT(condition) ((void) 0)
|
#define SLOW_ASSERT(condition) ((void) 0)
|
||||||
#endif
|
#endif
|
||||||
|
// Static asserts has no impact on runtime performance, so they can be
|
||||||
|
// safely enabled in release mode. Moreover, the ((void) 0) expression
|
||||||
|
// obeys different syntax rules than typedef's, e.g. it can't appear
|
||||||
|
// inside class declaration, this leads to inconsistency between debug
|
||||||
|
// and release compilation modes behaviour.
|
||||||
|
#define STATIC_ASSERT(test) STATIC_CHECK(test)
|
||||||
|
|
||||||
|
|
||||||
#define ASSERT_TAG_ALIGNED(address) \
|
#define ASSERT_TAG_ALIGNED(address) \
|
||||||
|
15
deps/v8/src/codegen.cc
vendored
15
deps/v8/src/codegen.cc
vendored
@ -77,11 +77,20 @@ void CodeGenerator::ProcessDeferred() {
|
|||||||
// Generate the code.
|
// Generate the code.
|
||||||
Comment cmnt(masm_, code->comment());
|
Comment cmnt(masm_, code->comment());
|
||||||
masm_->bind(code->entry_label());
|
masm_->bind(code->entry_label());
|
||||||
|
if (code->AutoSaveAndRestore()) {
|
||||||
code->SaveRegisters();
|
code->SaveRegisters();
|
||||||
code->Generate();
|
|
||||||
code->RestoreRegisters();
|
|
||||||
masm_->jmp(code->exit_label());
|
|
||||||
}
|
}
|
||||||
|
code->Generate();
|
||||||
|
if (code->AutoSaveAndRestore()) {
|
||||||
|
code->RestoreRegisters();
|
||||||
|
code->Exit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DeferredCode::Exit() {
|
||||||
|
masm_->jmp(exit_label());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
9
deps/v8/src/codegen.h
vendored
9
deps/v8/src/codegen.h
vendored
@ -319,6 +319,15 @@ class DeferredCode: public ZoneObject {
|
|||||||
|
|
||||||
void SaveRegisters();
|
void SaveRegisters();
|
||||||
void RestoreRegisters();
|
void RestoreRegisters();
|
||||||
|
void Exit();
|
||||||
|
|
||||||
|
// If this returns true then all registers will be saved for the duration
|
||||||
|
// of the Generate() call. Otherwise the registers are not saved and the
|
||||||
|
// Generate() call must bracket runtime any runtime calls with calls to
|
||||||
|
// SaveRegisters() and RestoreRegisters(). In this case the Generate
|
||||||
|
// method must also call Exit() in order to return to the non-deferred
|
||||||
|
// code.
|
||||||
|
virtual bool AutoSaveAndRestore() { return true; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
MacroAssembler* masm_;
|
MacroAssembler* masm_;
|
||||||
|
11
deps/v8/src/cpu-profiler.cc
vendored
11
deps/v8/src/cpu-profiler.cc
vendored
@ -476,7 +476,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
|
|||||||
|
|
||||||
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
|
CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
|
||||||
const double actual_sampling_rate = generator_->actual_sampling_rate();
|
const double actual_sampling_rate = generator_->actual_sampling_rate();
|
||||||
StopProcessorIfLastProfile();
|
StopProcessorIfLastProfile(title);
|
||||||
CpuProfile* result =
|
CpuProfile* result =
|
||||||
profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
|
profiles_->StopProfiling(TokenEnumerator::kNoSecurityToken,
|
||||||
title,
|
title,
|
||||||
@ -491,14 +491,15 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) {
|
|||||||
CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
|
CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
|
||||||
String* title) {
|
String* title) {
|
||||||
const double actual_sampling_rate = generator_->actual_sampling_rate();
|
const double actual_sampling_rate = generator_->actual_sampling_rate();
|
||||||
StopProcessorIfLastProfile();
|
const char* profile_title = profiles_->GetName(title);
|
||||||
|
StopProcessorIfLastProfile(profile_title);
|
||||||
int token = token_enumerator_->GetTokenId(security_token);
|
int token = token_enumerator_->GetTokenId(security_token);
|
||||||
return profiles_->StopProfiling(token, title, actual_sampling_rate);
|
return profiles_->StopProfiling(token, profile_title, actual_sampling_rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CpuProfiler::StopProcessorIfLastProfile() {
|
void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
|
||||||
if (profiles_->is_last_profile()) {
|
if (profiles_->IsLastProfile(title)) {
|
||||||
reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
|
reinterpret_cast<Sampler*>(Logger::ticker_)->Stop();
|
||||||
processor_->Stop();
|
processor_->Stop();
|
||||||
processor_->Join();
|
processor_->Join();
|
||||||
|
2
deps/v8/src/cpu-profiler.h
vendored
2
deps/v8/src/cpu-profiler.h
vendored
@ -260,7 +260,7 @@ class CpuProfiler {
|
|||||||
void StartProcessorIfNotStarted();
|
void StartProcessorIfNotStarted();
|
||||||
CpuProfile* StopCollectingProfile(const char* title);
|
CpuProfile* StopCollectingProfile(const char* title);
|
||||||
CpuProfile* StopCollectingProfile(Object* security_token, String* title);
|
CpuProfile* StopCollectingProfile(Object* security_token, String* title);
|
||||||
void StopProcessorIfLastProfile();
|
void StopProcessorIfLastProfile(const char* title);
|
||||||
|
|
||||||
CpuProfilesCollection* profiles_;
|
CpuProfilesCollection* profiles_;
|
||||||
unsigned next_profile_uid_;
|
unsigned next_profile_uid_;
|
||||||
|
4
deps/v8/src/debug.cc
vendored
4
deps/v8/src/debug.cc
vendored
@ -852,8 +852,8 @@ void Debug::PreemptionWhileInDebugger() {
|
|||||||
|
|
||||||
|
|
||||||
void Debug::Iterate(ObjectVisitor* v) {
|
void Debug::Iterate(ObjectVisitor* v) {
|
||||||
v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_return_)));
|
v->VisitPointer(BitCast<Object**>(&(debug_break_return_)));
|
||||||
v->VisitPointer(BitCast<Object**, Code**>(&(debug_break_slot_)));
|
v->VisitPointer(BitCast<Object**>(&(debug_break_slot_)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
10
deps/v8/src/factory.cc
vendored
10
deps/v8/src/factory.cc
vendored
@ -486,6 +486,10 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
|
|||||||
bool force_initial_map) {
|
bool force_initial_map) {
|
||||||
// Allocate the function
|
// Allocate the function
|
||||||
Handle<JSFunction> function = NewFunction(name, the_hole_value());
|
Handle<JSFunction> function = NewFunction(name, the_hole_value());
|
||||||
|
|
||||||
|
// Setup the code pointer in both the shared function info and in
|
||||||
|
// the function itself.
|
||||||
|
function->shared()->set_code(*code);
|
||||||
function->set_code(*code);
|
function->set_code(*code);
|
||||||
|
|
||||||
if (force_initial_map ||
|
if (force_initial_map ||
|
||||||
@ -511,9 +515,12 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
|
|||||||
Handle<JSObject> prototype,
|
Handle<JSObject> prototype,
|
||||||
Handle<Code> code,
|
Handle<Code> code,
|
||||||
bool force_initial_map) {
|
bool force_initial_map) {
|
||||||
// Allocate the function
|
// Allocate the function.
|
||||||
Handle<JSFunction> function = NewFunction(name, prototype);
|
Handle<JSFunction> function = NewFunction(name, prototype);
|
||||||
|
|
||||||
|
// Setup the code pointer in both the shared function info and in
|
||||||
|
// the function itself.
|
||||||
|
function->shared()->set_code(*code);
|
||||||
function->set_code(*code);
|
function->set_code(*code);
|
||||||
|
|
||||||
if (force_initial_map ||
|
if (force_initial_map ||
|
||||||
@ -535,6 +542,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
|
|||||||
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
|
Handle<JSFunction> Factory::NewFunctionWithoutPrototype(Handle<String> name,
|
||||||
Handle<Code> code) {
|
Handle<Code> code) {
|
||||||
Handle<JSFunction> function = NewFunctionWithoutPrototype(name);
|
Handle<JSFunction> function = NewFunctionWithoutPrototype(name);
|
||||||
|
function->shared()->set_code(*code);
|
||||||
function->set_code(*code);
|
function->set_code(*code);
|
||||||
ASSERT(!function->has_initial_map());
|
ASSERT(!function->has_initial_map());
|
||||||
ASSERT(!function->has_prototype());
|
ASSERT(!function->has_prototype());
|
||||||
|
4
deps/v8/src/factory.h
vendored
4
deps/v8/src/factory.h
vendored
@ -329,7 +329,7 @@ class Factory : public AllStatic {
|
|||||||
|
|
||||||
#define ROOT_ACCESSOR(type, name, camel_name) \
|
#define ROOT_ACCESSOR(type, name, camel_name) \
|
||||||
static inline Handle<type> name() { \
|
static inline Handle<type> name() { \
|
||||||
return Handle<type>(BitCast<type**, Object**>( \
|
return Handle<type>(BitCast<type**>( \
|
||||||
&Heap::roots_[Heap::k##camel_name##RootIndex])); \
|
&Heap::roots_[Heap::k##camel_name##RootIndex])); \
|
||||||
}
|
}
|
||||||
ROOT_LIST(ROOT_ACCESSOR)
|
ROOT_LIST(ROOT_ACCESSOR)
|
||||||
@ -337,7 +337,7 @@ class Factory : public AllStatic {
|
|||||||
|
|
||||||
#define SYMBOL_ACCESSOR(name, str) \
|
#define SYMBOL_ACCESSOR(name, str) \
|
||||||
static inline Handle<String> name() { \
|
static inline Handle<String> name() { \
|
||||||
return Handle<String>(BitCast<String**, Object**>( \
|
return Handle<String>(BitCast<String**>( \
|
||||||
&Heap::roots_[Heap::k##name##RootIndex])); \
|
&Heap::roots_[Heap::k##name##RootIndex])); \
|
||||||
}
|
}
|
||||||
SYMBOL_LIST(SYMBOL_ACCESSOR)
|
SYMBOL_LIST(SYMBOL_ACCESSOR)
|
||||||
|
2
deps/v8/src/handles-inl.h
vendored
2
deps/v8/src/handles-inl.h
vendored
@ -47,7 +47,7 @@ template <class T>
|
|||||||
inline T* Handle<T>::operator*() const {
|
inline T* Handle<T>::operator*() const {
|
||||||
ASSERT(location_ != NULL);
|
ASSERT(location_ != NULL);
|
||||||
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
|
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
|
||||||
return *location_;
|
return *BitCast<T**>(location_);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
10
deps/v8/src/handles.cc
vendored
10
deps/v8/src/handles.cc
vendored
@ -771,20 +771,30 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
|
|||||||
bool CompileLazy(Handle<JSFunction> function,
|
bool CompileLazy(Handle<JSFunction> function,
|
||||||
Handle<Object> receiver,
|
Handle<Object> receiver,
|
||||||
ClearExceptionFlag flag) {
|
ClearExceptionFlag flag) {
|
||||||
|
if (function->shared()->is_compiled()) {
|
||||||
|
function->set_code(function->shared()->code());
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
CompilationInfo info(function, 0, receiver);
|
CompilationInfo info(function, 0, receiver);
|
||||||
bool result = CompileLazyHelper(&info, flag);
|
bool result = CompileLazyHelper(&info, flag);
|
||||||
PROFILE(FunctionCreateEvent(*function));
|
PROFILE(FunctionCreateEvent(*function));
|
||||||
return result;
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool CompileLazyInLoop(Handle<JSFunction> function,
|
bool CompileLazyInLoop(Handle<JSFunction> function,
|
||||||
Handle<Object> receiver,
|
Handle<Object> receiver,
|
||||||
ClearExceptionFlag flag) {
|
ClearExceptionFlag flag) {
|
||||||
|
if (function->shared()->is_compiled()) {
|
||||||
|
function->set_code(function->shared()->code());
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
CompilationInfo info(function, 1, receiver);
|
CompilationInfo info(function, 1, receiver);
|
||||||
bool result = CompileLazyHelper(&info, flag);
|
bool result = CompileLazyHelper(&info, flag);
|
||||||
PROFILE(FunctionCreateEvent(*function));
|
PROFILE(FunctionCreateEvent(*function));
|
||||||
return result;
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
6
deps/v8/src/heap-profiler.cc
vendored
6
deps/v8/src/heap-profiler.cc
vendored
@ -111,10 +111,10 @@ int Clusterizer::CalculateNetworkSize(JSObject* obj) {
|
|||||||
int size = obj->Size();
|
int size = obj->Size();
|
||||||
// If 'properties' and 'elements' are non-empty (thus, non-shared),
|
// If 'properties' and 'elements' are non-empty (thus, non-shared),
|
||||||
// take their size into account.
|
// take their size into account.
|
||||||
if (FixedArray::cast(obj->properties())->length() != 0) {
|
if (obj->properties() != Heap::empty_fixed_array()) {
|
||||||
size += obj->properties()->Size();
|
size += obj->properties()->Size();
|
||||||
}
|
}
|
||||||
if (FixedArray::cast(obj->elements())->length() != 0) {
|
if (obj->elements() != Heap::empty_fixed_array()) {
|
||||||
size += obj->elements()->Size();
|
size += obj->elements()->Size();
|
||||||
}
|
}
|
||||||
// For functions, also account non-empty context and literals sizes.
|
// For functions, also account non-empty context and literals sizes.
|
||||||
@ -360,7 +360,7 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(String* name) {
|
|||||||
|
|
||||||
|
|
||||||
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
|
HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name) {
|
||||||
Heap::CollectAllGarbage(false);
|
Heap::CollectAllGarbage(true);
|
||||||
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
|
HeapSnapshot* result = snapshots_->NewSnapshot(name, next_snapshot_uid_++);
|
||||||
HeapSnapshotGenerator generator(result);
|
HeapSnapshotGenerator generator(result);
|
||||||
generator.GenerateSnapshot();
|
generator.GenerateSnapshot();
|
||||||
|
82
deps/v8/src/heap.cc
vendored
82
deps/v8/src/heap.cc
vendored
@ -2452,39 +2452,62 @@ class FlushingStackVisitor : public ThreadVisitor {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
static void FlushCodeForFunction(SharedFunctionInfo* function_info) {
|
static bool CodeIsActive(Code* code) {
|
||||||
// The function must be compiled and have the source code available,
|
|
||||||
// to be able to recompile it in case we need the function again.
|
|
||||||
if (!(function_info->is_compiled() && function_info->HasSourceCode())) return;
|
|
||||||
|
|
||||||
// We never flush code for Api functions.
|
|
||||||
if (function_info->IsApiFunction()) return;
|
|
||||||
|
|
||||||
// Only flush code for functions.
|
|
||||||
if (!function_info->code()->kind() == Code::FUNCTION) return;
|
|
||||||
|
|
||||||
// Function must be lazy compilable.
|
|
||||||
if (!function_info->allows_lazy_compilation()) return;
|
|
||||||
|
|
||||||
// If this is a full script wrapped in a function we do no flush the code.
|
|
||||||
if (function_info->is_toplevel()) return;
|
|
||||||
|
|
||||||
// If this function is in the compilation cache we do not flush the code.
|
|
||||||
if (CompilationCache::HasFunction(function_info)) return;
|
|
||||||
|
|
||||||
// Make sure we are not referencing the code from the stack.
|
// Make sure we are not referencing the code from the stack.
|
||||||
for (StackFrameIterator it; !it.done(); it.Advance()) {
|
for (StackFrameIterator it; !it.done(); it.Advance()) {
|
||||||
if (function_info->code()->contains(it.frame()->pc())) return;
|
if (code->contains(it.frame()->pc())) return true;
|
||||||
}
|
}
|
||||||
// Iterate the archived stacks in all threads to check if
|
// Iterate the archived stacks in all threads to check if
|
||||||
// the code is referenced.
|
// the code is referenced.
|
||||||
FlushingStackVisitor threadvisitor(function_info->code());
|
FlushingStackVisitor threadvisitor(code);
|
||||||
ThreadManager::IterateArchivedThreads(&threadvisitor);
|
ThreadManager::IterateArchivedThreads(&threadvisitor);
|
||||||
if (threadvisitor.FoundCode()) return;
|
if (threadvisitor.FoundCode()) return true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void FlushCodeForFunction(JSFunction* function) {
|
||||||
|
SharedFunctionInfo* shared_info = function->shared();
|
||||||
|
|
||||||
|
// Special handling if the function and shared info objects
|
||||||
|
// have different code objects.
|
||||||
|
if (function->code() != shared_info->code()) {
|
||||||
|
// If the shared function has been flushed but the function has not,
|
||||||
|
// we flush the function if possible.
|
||||||
|
if (!shared_info->is_compiled() && function->is_compiled() &&
|
||||||
|
!CodeIsActive(function->code())) {
|
||||||
|
function->set_code(shared_info->code());
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The function must be compiled and have the source code available,
|
||||||
|
// to be able to recompile it in case we need the function again.
|
||||||
|
if (!(shared_info->is_compiled() && shared_info->HasSourceCode())) return;
|
||||||
|
|
||||||
|
// We never flush code for Api functions.
|
||||||
|
if (shared_info->IsApiFunction()) return;
|
||||||
|
|
||||||
|
// Only flush code for functions.
|
||||||
|
if (!shared_info->code()->kind() == Code::FUNCTION) return;
|
||||||
|
|
||||||
|
// Function must be lazy compilable.
|
||||||
|
if (!shared_info->allows_lazy_compilation()) return;
|
||||||
|
|
||||||
|
// If this is a full script wrapped in a function we do no flush the code.
|
||||||
|
if (shared_info->is_toplevel()) return;
|
||||||
|
|
||||||
|
// If this function is in the compilation cache we do not flush the code.
|
||||||
|
if (CompilationCache::HasFunction(shared_info)) return;
|
||||||
|
|
||||||
|
// Check stack and archived threads for the code.
|
||||||
|
if (CodeIsActive(shared_info->code())) return;
|
||||||
|
|
||||||
// Compute the lazy compilable version of the code.
|
// Compute the lazy compilable version of the code.
|
||||||
HandleScope scope;
|
HandleScope scope;
|
||||||
function_info->set_code(*ComputeLazyCompile(function_info->length()));
|
Code* code = *ComputeLazyCompile(shared_info->length());
|
||||||
|
shared_info->set_code(code);
|
||||||
|
function->set_code(code);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2496,12 +2519,12 @@ void Heap::FlushCode() {
|
|||||||
HeapObjectIterator it(old_pointer_space());
|
HeapObjectIterator it(old_pointer_space());
|
||||||
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
|
for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
|
||||||
if (obj->IsJSFunction()) {
|
if (obj->IsJSFunction()) {
|
||||||
JSFunction* jsfunction = JSFunction::cast(obj);
|
JSFunction* function = JSFunction::cast(obj);
|
||||||
|
|
||||||
// The function must have a valid context and not be a builtin.
|
// The function must have a valid context and not be a builtin.
|
||||||
if (jsfunction->unchecked_context()->IsContext() &&
|
if (function->unchecked_context()->IsContext() &&
|
||||||
!jsfunction->IsBuiltin()) {
|
!function->IsBuiltin()) {
|
||||||
FlushCodeForFunction(jsfunction->shared());
|
FlushCodeForFunction(function);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2651,6 +2674,7 @@ Object* Heap::InitializeFunction(JSFunction* function,
|
|||||||
function->initialize_properties();
|
function->initialize_properties();
|
||||||
function->initialize_elements();
|
function->initialize_elements();
|
||||||
function->set_shared(shared);
|
function->set_shared(shared);
|
||||||
|
function->set_code(shared->code());
|
||||||
function->set_prototype_or_initial_map(prototype);
|
function->set_prototype_or_initial_map(prototype);
|
||||||
function->set_context(undefined_value());
|
function->set_context(undefined_value());
|
||||||
function->set_literals(empty_fixed_array());
|
function->set_literals(empty_fixed_array());
|
||||||
@ -4000,7 +4024,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
|
|||||||
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
|
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
|
||||||
v->Synchronize("strong_root_list");
|
v->Synchronize("strong_root_list");
|
||||||
|
|
||||||
v->VisitPointer(BitCast<Object**, String**>(&hidden_symbol_));
|
v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
|
||||||
v->Synchronize("symbol");
|
v->Synchronize("symbol");
|
||||||
|
|
||||||
Bootstrapper::Iterate(v);
|
Bootstrapper::Iterate(v);
|
||||||
|
15
deps/v8/src/ia32/assembler-ia32.cc
vendored
15
deps/v8/src/ia32/assembler-ia32.cc
vendored
@ -1142,6 +1142,21 @@ void Assembler::rcl(Register dst, uint8_t imm8) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Assembler::rcr(Register dst, uint8_t imm8) {
|
||||||
|
EnsureSpace ensure_space(this);
|
||||||
|
last_pc_ = pc_;
|
||||||
|
ASSERT(is_uint5(imm8)); // illegal shift count
|
||||||
|
if (imm8 == 1) {
|
||||||
|
EMIT(0xD1);
|
||||||
|
EMIT(0xD8 | dst.code());
|
||||||
|
} else {
|
||||||
|
EMIT(0xC1);
|
||||||
|
EMIT(0xD8 | dst.code());
|
||||||
|
EMIT(imm8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Assembler::sar(Register dst, uint8_t imm8) {
|
void Assembler::sar(Register dst, uint8_t imm8) {
|
||||||
EnsureSpace ensure_space(this);
|
EnsureSpace ensure_space(this);
|
||||||
last_pc_ = pc_;
|
last_pc_ = pc_;
|
||||||
|
1
deps/v8/src/ia32/assembler-ia32.h
vendored
1
deps/v8/src/ia32/assembler-ia32.h
vendored
@ -625,6 +625,7 @@ class Assembler : public Malloced {
|
|||||||
void or_(const Operand& dst, const Immediate& x);
|
void or_(const Operand& dst, const Immediate& x);
|
||||||
|
|
||||||
void rcl(Register dst, uint8_t imm8);
|
void rcl(Register dst, uint8_t imm8);
|
||||||
|
void rcr(Register dst, uint8_t imm8);
|
||||||
|
|
||||||
void sar(Register dst, uint8_t imm8);
|
void sar(Register dst, uint8_t imm8);
|
||||||
void sar_cl(Register dst);
|
void sar_cl(Register dst);
|
||||||
|
2
deps/v8/src/ia32/builtins-ia32.cc
vendored
2
deps/v8/src/ia32/builtins-ia32.cc
vendored
@ -548,7 +548,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
|||||||
__ mov(ebx,
|
__ mov(ebx,
|
||||||
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
|
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ SmiUntag(ebx);
|
__ SmiUntag(ebx);
|
||||||
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
|
__ mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
|
||||||
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
|
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
|
||||||
__ cmp(eax, Operand(ebx));
|
__ cmp(eax, Operand(ebx));
|
||||||
__ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
|
__ j(not_equal, Handle<Code>(builtin(ArgumentsAdaptorTrampoline)));
|
||||||
|
396
deps/v8/src/ia32/codegen-ia32.cc
vendored
396
deps/v8/src/ia32/codegen-ia32.cc
vendored
@ -1038,7 +1038,11 @@ const char* GenericBinaryOpStub::GetName() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Call the specialized stub for a binary operation.
|
// Perform or call the specialized stub for a binary operation. Requires the
|
||||||
|
// three registers left, right and dst to be distinct and spilled. This
|
||||||
|
// deferred operation has up to three entry points: The main one calls the
|
||||||
|
// runtime system. The second is for when the result is a non-Smi. The
|
||||||
|
// third is for when at least one of the inputs is non-Smi and we have SSE2.
|
||||||
class DeferredInlineBinaryOperation: public DeferredCode {
|
class DeferredInlineBinaryOperation: public DeferredCode {
|
||||||
public:
|
public:
|
||||||
DeferredInlineBinaryOperation(Token::Value op,
|
DeferredInlineBinaryOperation(Token::Value op,
|
||||||
@ -1051,11 +1055,23 @@ class DeferredInlineBinaryOperation: public DeferredCode {
|
|||||||
: op_(op), dst_(dst), left_(left), right_(right),
|
: op_(op), dst_(dst), left_(left), right_(right),
|
||||||
left_info_(left_info), right_info_(right_info), mode_(mode) {
|
left_info_(left_info), right_info_(right_info), mode_(mode) {
|
||||||
set_comment("[ DeferredInlineBinaryOperation");
|
set_comment("[ DeferredInlineBinaryOperation");
|
||||||
|
ASSERT(!left.is(right));
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void Generate();
|
virtual void Generate();
|
||||||
|
|
||||||
|
// This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
|
||||||
|
// Exit().
|
||||||
|
virtual bool AutoSaveAndRestore() { return false; }
|
||||||
|
|
||||||
|
void JumpToAnswerOutOfRange(Condition cond);
|
||||||
|
void JumpToConstantRhs(Condition cond, Smi* smi_value);
|
||||||
|
Label* NonSmiInputLabel();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
void GenerateAnswerOutOfRange();
|
||||||
|
void GenerateNonSmiInput();
|
||||||
|
|
||||||
Token::Value op_;
|
Token::Value op_;
|
||||||
Register dst_;
|
Register dst_;
|
||||||
Register left_;
|
Register left_;
|
||||||
@ -1063,13 +1079,40 @@ class DeferredInlineBinaryOperation: public DeferredCode {
|
|||||||
TypeInfo left_info_;
|
TypeInfo left_info_;
|
||||||
TypeInfo right_info_;
|
TypeInfo right_info_;
|
||||||
OverwriteMode mode_;
|
OverwriteMode mode_;
|
||||||
|
Label answer_out_of_range_;
|
||||||
|
Label non_smi_input_;
|
||||||
|
Label constant_rhs_;
|
||||||
|
Smi* smi_value_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
|
||||||
|
if (Token::IsBitOp(op_) && CpuFeatures::IsSupported(SSE2)) {
|
||||||
|
return &non_smi_input_;
|
||||||
|
} else {
|
||||||
|
return entry_label();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
|
||||||
|
__ j(cond, &answer_out_of_range_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
|
||||||
|
Smi* smi_value) {
|
||||||
|
smi_value_ = smi_value;
|
||||||
|
__ j(cond, &constant_rhs_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void DeferredInlineBinaryOperation::Generate() {
|
void DeferredInlineBinaryOperation::Generate() {
|
||||||
Label done;
|
// Registers are not saved implicitly for this stub, so we should not
|
||||||
if (CpuFeatures::IsSupported(SSE2) && ((op_ == Token::ADD) ||
|
// tread on the registers that were not passed to us.
|
||||||
(op_ ==Token::SUB) ||
|
if (CpuFeatures::IsSupported(SSE2) &&
|
||||||
|
((op_ == Token::ADD) ||
|
||||||
|
(op_ == Token::SUB) ||
|
||||||
(op_ == Token::MUL) ||
|
(op_ == Token::MUL) ||
|
||||||
(op_ == Token::DIV))) {
|
(op_ == Token::DIV))) {
|
||||||
CpuFeatures::Scope use_sse2(SSE2);
|
CpuFeatures::Scope use_sse2(SSE2);
|
||||||
@ -1131,7 +1174,6 @@ void DeferredInlineBinaryOperation::Generate() {
|
|||||||
__ cvtsi2sd(xmm1, Operand(right_));
|
__ cvtsi2sd(xmm1, Operand(right_));
|
||||||
__ SmiTag(right_);
|
__ SmiTag(right_);
|
||||||
if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
|
if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
|
||||||
Label alloc_failure;
|
|
||||||
__ push(left_);
|
__ push(left_);
|
||||||
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
|
__ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
|
||||||
__ pop(left_);
|
__ pop(left_);
|
||||||
@ -1146,19 +1188,200 @@ void DeferredInlineBinaryOperation::Generate() {
|
|||||||
default: UNREACHABLE();
|
default: UNREACHABLE();
|
||||||
}
|
}
|
||||||
__ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
|
__ movdbl(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
|
||||||
__ jmp(&done);
|
Exit();
|
||||||
|
|
||||||
|
|
||||||
__ bind(&after_alloc_failure);
|
__ bind(&after_alloc_failure);
|
||||||
__ pop(left_);
|
__ pop(left_);
|
||||||
__ bind(&call_runtime);
|
__ bind(&call_runtime);
|
||||||
}
|
}
|
||||||
|
// Register spilling is not done implicitly for this stub.
|
||||||
|
// We can't postpone it any more now though.
|
||||||
|
SaveRegisters();
|
||||||
|
|
||||||
GenericBinaryOpStub stub(op_,
|
GenericBinaryOpStub stub(op_,
|
||||||
mode_,
|
mode_,
|
||||||
NO_SMI_CODE_IN_STUB,
|
NO_SMI_CODE_IN_STUB,
|
||||||
TypeInfo::Combine(left_info_, right_info_));
|
TypeInfo::Combine(left_info_, right_info_));
|
||||||
stub.GenerateCall(masm_, left_, right_);
|
stub.GenerateCall(masm_, left_, right_);
|
||||||
if (!dst_.is(eax)) __ mov(dst_, eax);
|
if (!dst_.is(eax)) __ mov(dst_, eax);
|
||||||
__ bind(&done);
|
RestoreRegisters();
|
||||||
|
Exit();
|
||||||
|
|
||||||
|
if (non_smi_input_.is_linked() || constant_rhs_.is_linked()) {
|
||||||
|
GenerateNonSmiInput();
|
||||||
|
}
|
||||||
|
if (answer_out_of_range_.is_linked()) {
|
||||||
|
GenerateAnswerOutOfRange();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DeferredInlineBinaryOperation::GenerateNonSmiInput() {
|
||||||
|
// We know at least one of the inputs was not a Smi.
|
||||||
|
// This is a third entry point into the deferred code.
|
||||||
|
// We may not overwrite left_ because we want to be able
|
||||||
|
// to call the handling code for non-smi answer and it
|
||||||
|
// might want to overwrite the heap number in left_.
|
||||||
|
ASSERT(!right_.is(dst_));
|
||||||
|
ASSERT(!left_.is(dst_));
|
||||||
|
ASSERT(!left_.is(right_));
|
||||||
|
// This entry point is used for bit ops where the right hand side
|
||||||
|
// is a constant Smi and the left hand side is a heap object. It
|
||||||
|
// is also used for bit ops where both sides are unknown, but where
|
||||||
|
// at least one of them is a heap object.
|
||||||
|
bool rhs_is_constant = constant_rhs_.is_linked();
|
||||||
|
// We can't generate code for both cases.
|
||||||
|
ASSERT(!non_smi_input_.is_linked() || !constant_rhs_.is_linked());
|
||||||
|
|
||||||
|
if (FLAG_debug_code) {
|
||||||
|
__ int3(); // We don't fall through into this code.
|
||||||
|
}
|
||||||
|
|
||||||
|
__ bind(&non_smi_input_);
|
||||||
|
|
||||||
|
if (rhs_is_constant) {
|
||||||
|
__ bind(&constant_rhs_);
|
||||||
|
// In this case the input is a heap object and it is in the dst_ register.
|
||||||
|
// The left_ and right_ registers have not been initialized yet.
|
||||||
|
__ mov(right_, Immediate(smi_value_));
|
||||||
|
__ mov(left_, Operand(dst_));
|
||||||
|
if (!CpuFeatures::IsSupported(SSE2)) {
|
||||||
|
__ jmp(entry_label());
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
CpuFeatures::Scope use_sse2(SSE2);
|
||||||
|
__ JumpIfNotNumber(dst_, left_info_, entry_label());
|
||||||
|
__ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
|
||||||
|
__ SmiUntag(right_);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// We know we have SSE2 here because otherwise the label is not linked (see
|
||||||
|
// NonSmiInputLabel).
|
||||||
|
CpuFeatures::Scope use_sse2(SSE2);
|
||||||
|
// Handle the non-constant right hand side situation:
|
||||||
|
if (left_info_.IsSmi()) {
|
||||||
|
// Right is a heap object.
|
||||||
|
__ JumpIfNotNumber(right_, right_info_, entry_label());
|
||||||
|
__ ConvertToInt32(right_, right_, dst_, left_info_, entry_label());
|
||||||
|
__ mov(dst_, Operand(left_));
|
||||||
|
__ SmiUntag(dst_);
|
||||||
|
} else if (right_info_.IsSmi()) {
|
||||||
|
// Left is a heap object.
|
||||||
|
__ JumpIfNotNumber(left_, left_info_, entry_label());
|
||||||
|
__ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
|
||||||
|
__ SmiUntag(right_);
|
||||||
|
} else {
|
||||||
|
// Here we don't know if it's one or both that is a heap object.
|
||||||
|
Label only_right_is_heap_object, got_both;
|
||||||
|
__ mov(dst_, Operand(left_));
|
||||||
|
__ SmiUntag(dst_, &only_right_is_heap_object);
|
||||||
|
// Left was a heap object.
|
||||||
|
__ JumpIfNotNumber(left_, left_info_, entry_label());
|
||||||
|
__ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
|
||||||
|
__ SmiUntag(right_, &got_both);
|
||||||
|
// Both were heap objects.
|
||||||
|
__ rcl(right_, 1); // Put tag back.
|
||||||
|
__ JumpIfNotNumber(right_, right_info_, entry_label());
|
||||||
|
__ ConvertToInt32(right_, right_, no_reg, left_info_, entry_label());
|
||||||
|
__ jmp(&got_both);
|
||||||
|
__ bind(&only_right_is_heap_object);
|
||||||
|
__ JumpIfNotNumber(right_, right_info_, entry_label());
|
||||||
|
__ ConvertToInt32(right_, right_, no_reg, left_info_, entry_label());
|
||||||
|
__ bind(&got_both);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ASSERT(op_ == Token::BIT_AND ||
|
||||||
|
op_ == Token::BIT_OR ||
|
||||||
|
op_ == Token::BIT_XOR ||
|
||||||
|
right_.is(ecx));
|
||||||
|
switch (op_) {
|
||||||
|
case Token::BIT_AND: __ and_(dst_, Operand(right_)); break;
|
||||||
|
case Token::BIT_OR: __ or_(dst_, Operand(right_)); break;
|
||||||
|
case Token::BIT_XOR: __ xor_(dst_, Operand(right_)); break;
|
||||||
|
case Token::SHR: __ shr_cl(dst_); break;
|
||||||
|
case Token::SAR: __ sar_cl(dst_); break;
|
||||||
|
case Token::SHL: __ shl_cl(dst_); break;
|
||||||
|
default: UNREACHABLE();
|
||||||
|
}
|
||||||
|
if (op_ == Token::SHR) {
|
||||||
|
// Check that the *unsigned* result fits in a smi. Neither of
|
||||||
|
// the two high-order bits can be set:
|
||||||
|
// * 0x80000000: high bit would be lost when smi tagging.
|
||||||
|
// * 0x40000000: this number would convert to negative when smi
|
||||||
|
// tagging.
|
||||||
|
__ test(dst_, Immediate(0xc0000000));
|
||||||
|
__ j(not_zero, &answer_out_of_range_);
|
||||||
|
} else {
|
||||||
|
// Check that the *signed* result fits in a smi.
|
||||||
|
__ cmp(dst_, 0xc0000000);
|
||||||
|
__ j(negative, &answer_out_of_range_);
|
||||||
|
}
|
||||||
|
__ SmiTag(dst_);
|
||||||
|
Exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void DeferredInlineBinaryOperation::GenerateAnswerOutOfRange() {
|
||||||
|
Label after_alloc_failure2;
|
||||||
|
Label allocation_ok;
|
||||||
|
__ bind(&after_alloc_failure2);
|
||||||
|
// We have to allocate a number, causing a GC, while keeping hold of
|
||||||
|
// the answer in dst_. The answer is not a Smi. We can't just call the
|
||||||
|
// runtime shift function here because we already threw away the inputs.
|
||||||
|
__ xor_(left_, Operand(left_));
|
||||||
|
__ shl(dst_, 1); // Put top bit in carry flag and Smi tag the low bits.
|
||||||
|
__ rcr(left_, 1); // Rotate with carry.
|
||||||
|
__ push(dst_); // Smi tagged low 31 bits.
|
||||||
|
__ push(left_); // 0 or 0x80000000, which is Smi tagged in both cases.
|
||||||
|
__ CallRuntime(Runtime::kNumberAlloc, 0);
|
||||||
|
if (!left_.is(eax)) {
|
||||||
|
__ mov(left_, eax);
|
||||||
|
}
|
||||||
|
__ pop(right_); // High bit.
|
||||||
|
__ pop(dst_); // Low 31 bits.
|
||||||
|
__ shr(dst_, 1); // Put 0 in top bit.
|
||||||
|
__ or_(dst_, Operand(right_));
|
||||||
|
__ jmp(&allocation_ok);
|
||||||
|
|
||||||
|
// This is the second entry point to the deferred code. It is used only by
|
||||||
|
// the bit operations.
|
||||||
|
// The dst_ register has the answer. It is not Smi tagged. If mode_ is
|
||||||
|
// OVERWRITE_LEFT then left_ must contain either an overwritable heap number
|
||||||
|
// or a Smi.
|
||||||
|
// Put a heap number pointer in left_.
|
||||||
|
__ bind(&answer_out_of_range_);
|
||||||
|
SaveRegisters();
|
||||||
|
if (mode_ == OVERWRITE_LEFT) {
|
||||||
|
__ test(left_, Immediate(kSmiTagMask));
|
||||||
|
__ j(not_zero, &allocation_ok);
|
||||||
|
}
|
||||||
|
// This trashes right_.
|
||||||
|
__ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
|
||||||
|
__ bind(&allocation_ok);
|
||||||
|
if (CpuFeatures::IsSupported(SSE2) && op_ != Token::SHR) {
|
||||||
|
CpuFeatures::Scope use_sse2(SSE2);
|
||||||
|
ASSERT(Token::IsBitOp(op_));
|
||||||
|
// Signed conversion.
|
||||||
|
__ cvtsi2sd(xmm0, Operand(dst_));
|
||||||
|
__ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
|
||||||
|
} else {
|
||||||
|
if (op_ == Token::SHR) {
|
||||||
|
__ push(Immediate(0)); // High word of unsigned value.
|
||||||
|
__ push(dst_);
|
||||||
|
__ fild_d(Operand(esp, 0));
|
||||||
|
__ Drop(2);
|
||||||
|
} else {
|
||||||
|
ASSERT(Token::IsBitOp(op_));
|
||||||
|
__ push(dst_);
|
||||||
|
__ fild_s(Operand(esp, 0)); // Signed conversion.
|
||||||
|
__ pop(dst_);
|
||||||
|
}
|
||||||
|
__ fstp_d(FieldOperand(left_, HeapNumber::kValueOffset));
|
||||||
|
}
|
||||||
|
__ mov(dst_, left_);
|
||||||
|
RestoreRegisters();
|
||||||
|
Exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1499,10 +1722,25 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
|
|||||||
TypeInfo left_info,
|
TypeInfo left_info,
|
||||||
TypeInfo right_info,
|
TypeInfo right_info,
|
||||||
DeferredCode* deferred) {
|
DeferredCode* deferred) {
|
||||||
|
JumpIfNotBothSmiUsingTypeInfo(left,
|
||||||
|
right,
|
||||||
|
scratch,
|
||||||
|
left_info,
|
||||||
|
right_info,
|
||||||
|
deferred->entry_label());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
|
||||||
|
Register right,
|
||||||
|
Register scratch,
|
||||||
|
TypeInfo left_info,
|
||||||
|
TypeInfo right_info,
|
||||||
|
Label* on_not_smi) {
|
||||||
if (left.is(right)) {
|
if (left.is(right)) {
|
||||||
if (!left_info.IsSmi()) {
|
if (!left_info.IsSmi()) {
|
||||||
__ test(left, Immediate(kSmiTagMask));
|
__ test(left, Immediate(kSmiTagMask));
|
||||||
deferred->Branch(not_zero);
|
__ j(not_zero, on_not_smi);
|
||||||
} else {
|
} else {
|
||||||
if (FLAG_debug_code) __ AbortIfNotSmi(left);
|
if (FLAG_debug_code) __ AbortIfNotSmi(left);
|
||||||
}
|
}
|
||||||
@ -1511,17 +1749,17 @@ void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
|
|||||||
__ mov(scratch, left);
|
__ mov(scratch, left);
|
||||||
__ or_(scratch, Operand(right));
|
__ or_(scratch, Operand(right));
|
||||||
__ test(scratch, Immediate(kSmiTagMask));
|
__ test(scratch, Immediate(kSmiTagMask));
|
||||||
deferred->Branch(not_zero);
|
__ j(not_zero, on_not_smi);
|
||||||
} else {
|
} else {
|
||||||
__ test(left, Immediate(kSmiTagMask));
|
__ test(left, Immediate(kSmiTagMask));
|
||||||
deferred->Branch(not_zero);
|
__ j(not_zero, on_not_smi);
|
||||||
if (FLAG_debug_code) __ AbortIfNotSmi(right);
|
if (FLAG_debug_code) __ AbortIfNotSmi(right);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (FLAG_debug_code) __ AbortIfNotSmi(left);
|
if (FLAG_debug_code) __ AbortIfNotSmi(left);
|
||||||
if (!right_info.IsSmi()) {
|
if (!right_info.IsSmi()) {
|
||||||
__ test(right, Immediate(kSmiTagMask));
|
__ test(right, Immediate(kSmiTagMask));
|
||||||
deferred->Branch(not_zero);
|
__ j(not_zero, on_not_smi);
|
||||||
} else {
|
} else {
|
||||||
if (FLAG_debug_code) __ AbortIfNotSmi(right);
|
if (FLAG_debug_code) __ AbortIfNotSmi(right);
|
||||||
}
|
}
|
||||||
@ -1606,13 +1844,16 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
|||||||
right->ToRegister();
|
right->ToRegister();
|
||||||
frame_->Spill(eax);
|
frame_->Spill(eax);
|
||||||
frame_->Spill(edx);
|
frame_->Spill(edx);
|
||||||
|
// DeferredInlineBinaryOperation requires all the registers that it is
|
||||||
|
// told about to be spilled and distinct.
|
||||||
|
Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
|
||||||
|
|
||||||
// Check that left and right are smi tagged.
|
// Check that left and right are smi tagged.
|
||||||
DeferredInlineBinaryOperation* deferred =
|
DeferredInlineBinaryOperation* deferred =
|
||||||
new DeferredInlineBinaryOperation(op,
|
new DeferredInlineBinaryOperation(op,
|
||||||
(op == Token::DIV) ? eax : edx,
|
(op == Token::DIV) ? eax : edx,
|
||||||
left->reg(),
|
left->reg(),
|
||||||
right->reg(),
|
distinct_right.reg(),
|
||||||
left_type_info,
|
left_type_info,
|
||||||
right_type_info,
|
right_type_info,
|
||||||
overwrite_mode);
|
overwrite_mode);
|
||||||
@ -1695,15 +1936,23 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
|||||||
left->ToRegister();
|
left->ToRegister();
|
||||||
ASSERT(left->is_register() && !left->reg().is(ecx));
|
ASSERT(left->is_register() && !left->reg().is(ecx));
|
||||||
ASSERT(right->is_register() && right->reg().is(ecx));
|
ASSERT(right->is_register() && right->reg().is(ecx));
|
||||||
|
if (left_type_info.IsSmi()) {
|
||||||
|
if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
|
||||||
|
}
|
||||||
|
if (right_type_info.IsSmi()) {
|
||||||
|
if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
|
||||||
|
}
|
||||||
|
|
||||||
// We will modify right, it must be spilled.
|
// We will modify right, it must be spilled.
|
||||||
frame_->Spill(ecx);
|
frame_->Spill(ecx);
|
||||||
|
// DeferredInlineBinaryOperation requires all the registers that it is told
|
||||||
|
// about to be spilled and distinct. We know that right is ecx and left is
|
||||||
|
// not ecx.
|
||||||
|
frame_->Spill(left->reg());
|
||||||
|
|
||||||
// Use a fresh answer register to avoid spilling the left operand.
|
// Use a fresh answer register to avoid spilling the left operand.
|
||||||
answer = allocator_->Allocate();
|
answer = allocator_->Allocate();
|
||||||
ASSERT(answer.is_valid());
|
ASSERT(answer.is_valid());
|
||||||
// Check that both operands are smis using the answer register as a
|
|
||||||
// temporary.
|
|
||||||
DeferredInlineBinaryOperation* deferred =
|
DeferredInlineBinaryOperation* deferred =
|
||||||
new DeferredInlineBinaryOperation(op,
|
new DeferredInlineBinaryOperation(op,
|
||||||
answer.reg(),
|
answer.reg(),
|
||||||
@ -1712,55 +1961,28 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
|||||||
left_type_info,
|
left_type_info,
|
||||||
right_type_info,
|
right_type_info,
|
||||||
overwrite_mode);
|
overwrite_mode);
|
||||||
|
|
||||||
Label do_op, left_nonsmi;
|
|
||||||
// If right is a smi we make a fast case if left is either a smi
|
|
||||||
// or a heapnumber.
|
|
||||||
if (CpuFeatures::IsSupported(SSE2) && right_type_info.IsSmi()) {
|
|
||||||
CpuFeatures::Scope use_sse2(SSE2);
|
|
||||||
__ mov(answer.reg(), left->reg());
|
|
||||||
// Fast case - both are actually smis.
|
|
||||||
if (!left_type_info.IsSmi()) {
|
|
||||||
__ test(answer.reg(), Immediate(kSmiTagMask));
|
|
||||||
__ j(not_zero, &left_nonsmi);
|
|
||||||
} else {
|
|
||||||
if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
|
|
||||||
}
|
|
||||||
if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
|
|
||||||
__ SmiUntag(answer.reg());
|
|
||||||
__ jmp(&do_op);
|
|
||||||
|
|
||||||
__ bind(&left_nonsmi);
|
|
||||||
// Branch if not a heapnumber.
|
|
||||||
__ cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
|
|
||||||
Factory::heap_number_map());
|
|
||||||
deferred->Branch(not_equal);
|
|
||||||
|
|
||||||
// Load integer value into answer register using truncation.
|
|
||||||
__ cvttsd2si(answer.reg(),
|
|
||||||
FieldOperand(answer.reg(), HeapNumber::kValueOffset));
|
|
||||||
// Branch if we do not fit in a smi.
|
|
||||||
__ cmp(answer.reg(), 0xc0000000);
|
|
||||||
deferred->Branch(negative);
|
|
||||||
} else {
|
|
||||||
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
|
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
|
||||||
left_type_info, right_type_info, deferred);
|
left_type_info, right_type_info,
|
||||||
|
deferred->NonSmiInputLabel());
|
||||||
|
|
||||||
// Untag both operands.
|
// Untag both operands.
|
||||||
__ mov(answer.reg(), left->reg());
|
__ mov(answer.reg(), left->reg());
|
||||||
__ SmiUntag(answer.reg());
|
__ SmiUntag(answer.reg());
|
||||||
}
|
__ SmiUntag(right->reg()); // Right is ecx.
|
||||||
|
|
||||||
__ bind(&do_op);
|
|
||||||
__ SmiUntag(ecx);
|
|
||||||
// Perform the operation.
|
// Perform the operation.
|
||||||
|
ASSERT(right->reg().is(ecx));
|
||||||
switch (op) {
|
switch (op) {
|
||||||
case Token::SAR:
|
case Token::SAR: {
|
||||||
__ sar_cl(answer.reg());
|
__ sar_cl(answer.reg());
|
||||||
// No checks of result necessary
|
if (!left_type_info.IsSmi()) {
|
||||||
|
// Check that the *signed* result fits in a smi.
|
||||||
|
__ cmp(answer.reg(), 0xc0000000);
|
||||||
|
deferred->JumpToAnswerOutOfRange(negative);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
case Token::SHR: {
|
case Token::SHR: {
|
||||||
Label result_ok;
|
|
||||||
__ shr_cl(answer.reg());
|
__ shr_cl(answer.reg());
|
||||||
// Check that the *unsigned* result fits in a smi. Neither of
|
// Check that the *unsigned* result fits in a smi. Neither of
|
||||||
// the two high-order bits can be set:
|
// the two high-order bits can be set:
|
||||||
@ -1773,21 +1995,14 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
|||||||
// case. The low bit of the left argument may be lost, but only
|
// case. The low bit of the left argument may be lost, but only
|
||||||
// in a case where it is dropped anyway.
|
// in a case where it is dropped anyway.
|
||||||
__ test(answer.reg(), Immediate(0xc0000000));
|
__ test(answer.reg(), Immediate(0xc0000000));
|
||||||
__ j(zero, &result_ok);
|
deferred->JumpToAnswerOutOfRange(not_zero);
|
||||||
__ SmiTag(ecx);
|
|
||||||
deferred->Jump();
|
|
||||||
__ bind(&result_ok);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case Token::SHL: {
|
case Token::SHL: {
|
||||||
Label result_ok;
|
|
||||||
__ shl_cl(answer.reg());
|
__ shl_cl(answer.reg());
|
||||||
// Check that the *signed* result fits in a smi.
|
// Check that the *signed* result fits in a smi.
|
||||||
__ cmp(answer.reg(), 0xc0000000);
|
__ cmp(answer.reg(), 0xc0000000);
|
||||||
__ j(positive, &result_ok);
|
deferred->JumpToAnswerOutOfRange(negative);
|
||||||
__ SmiTag(ecx);
|
|
||||||
deferred->Jump();
|
|
||||||
__ bind(&result_ok);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -1805,6 +2020,9 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
|||||||
// Handle the other binary operations.
|
// Handle the other binary operations.
|
||||||
left->ToRegister();
|
left->ToRegister();
|
||||||
right->ToRegister();
|
right->ToRegister();
|
||||||
|
// DeferredInlineBinaryOperation requires all the registers that it is told
|
||||||
|
// about to be spilled.
|
||||||
|
Result distinct_right = frame_->MakeDistinctAndSpilled(left, right);
|
||||||
// A newly allocated register answer is used to hold the answer. The
|
// A newly allocated register answer is used to hold the answer. The
|
||||||
// registers containing left and right are not modified so they don't
|
// registers containing left and right are not modified so they don't
|
||||||
// need to be spilled in the fast case.
|
// need to be spilled in the fast case.
|
||||||
@ -1816,12 +2034,16 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
|||||||
new DeferredInlineBinaryOperation(op,
|
new DeferredInlineBinaryOperation(op,
|
||||||
answer.reg(),
|
answer.reg(),
|
||||||
left->reg(),
|
left->reg(),
|
||||||
right->reg(),
|
distinct_right.reg(),
|
||||||
left_type_info,
|
left_type_info,
|
||||||
right_type_info,
|
right_type_info,
|
||||||
overwrite_mode);
|
overwrite_mode);
|
||||||
|
Label non_smi_bit_op;
|
||||||
|
if (op != Token::BIT_OR) {
|
||||||
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
|
JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), answer.reg(),
|
||||||
left_type_info, right_type_info, deferred);
|
left_type_info, right_type_info,
|
||||||
|
deferred->NonSmiInputLabel());
|
||||||
|
}
|
||||||
|
|
||||||
__ mov(answer.reg(), left->reg());
|
__ mov(answer.reg(), left->reg());
|
||||||
switch (op) {
|
switch (op) {
|
||||||
@ -1864,6 +2086,8 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
|||||||
|
|
||||||
case Token::BIT_OR:
|
case Token::BIT_OR:
|
||||||
__ or_(answer.reg(), Operand(right->reg()));
|
__ or_(answer.reg(), Operand(right->reg()));
|
||||||
|
__ test(answer.reg(), Immediate(kSmiTagMask));
|
||||||
|
__ j(not_zero, deferred->NonSmiInputLabel());
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case Token::BIT_AND:
|
case Token::BIT_AND:
|
||||||
@ -1878,6 +2102,7 @@ Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
|||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
deferred->BindExit();
|
deferred->BindExit();
|
||||||
left->Unuse();
|
left->Unuse();
|
||||||
right->Unuse();
|
right->Unuse();
|
||||||
@ -2363,27 +2588,25 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
|
|||||||
case Token::BIT_XOR:
|
case Token::BIT_XOR:
|
||||||
case Token::BIT_AND: {
|
case Token::BIT_AND: {
|
||||||
operand->ToRegister();
|
operand->ToRegister();
|
||||||
|
// DeferredInlineBinaryOperation requires all the registers that it is
|
||||||
|
// told about to be spilled.
|
||||||
frame_->Spill(operand->reg());
|
frame_->Spill(operand->reg());
|
||||||
DeferredCode* deferred = NULL;
|
DeferredInlineBinaryOperation* deferred = NULL;
|
||||||
if (reversed) {
|
|
||||||
deferred =
|
|
||||||
new DeferredInlineSmiOperationReversed(op,
|
|
||||||
operand->reg(),
|
|
||||||
smi_value,
|
|
||||||
operand->reg(),
|
|
||||||
operand->type_info(),
|
|
||||||
overwrite_mode);
|
|
||||||
} else {
|
|
||||||
deferred = new DeferredInlineSmiOperation(op,
|
|
||||||
operand->reg(),
|
|
||||||
operand->reg(),
|
|
||||||
operand->type_info(),
|
|
||||||
smi_value,
|
|
||||||
overwrite_mode);
|
|
||||||
}
|
|
||||||
if (!operand->type_info().IsSmi()) {
|
if (!operand->type_info().IsSmi()) {
|
||||||
|
Result left = allocator()->Allocate();
|
||||||
|
ASSERT(left.is_valid());
|
||||||
|
Result right = allocator()->Allocate();
|
||||||
|
ASSERT(right.is_valid());
|
||||||
|
deferred = new DeferredInlineBinaryOperation(
|
||||||
|
op,
|
||||||
|
operand->reg(),
|
||||||
|
left.reg(),
|
||||||
|
right.reg(),
|
||||||
|
operand->type_info(),
|
||||||
|
TypeInfo::Smi(),
|
||||||
|
overwrite_mode == NO_OVERWRITE ? NO_OVERWRITE : OVERWRITE_LEFT);
|
||||||
__ test(operand->reg(), Immediate(kSmiTagMask));
|
__ test(operand->reg(), Immediate(kSmiTagMask));
|
||||||
deferred->Branch(not_zero);
|
deferred->JumpToConstantRhs(not_zero, smi_value);
|
||||||
} else if (FLAG_debug_code) {
|
} else if (FLAG_debug_code) {
|
||||||
__ AbortIfNotSmi(operand->reg());
|
__ AbortIfNotSmi(operand->reg());
|
||||||
}
|
}
|
||||||
@ -2399,7 +2622,7 @@ Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
|
|||||||
__ or_(Operand(operand->reg()), Immediate(value));
|
__ or_(Operand(operand->reg()), Immediate(value));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
deferred->BindExit();
|
if (deferred != NULL) deferred->BindExit();
|
||||||
answer = *operand;
|
answer = *operand;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -3212,10 +3435,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
|
|||||||
__ j(zero, &build_args);
|
__ j(zero, &build_args);
|
||||||
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
|
__ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx);
|
||||||
__ j(not_equal, &build_args);
|
__ j(not_equal, &build_args);
|
||||||
__ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
|
|
||||||
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
|
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
|
||||||
__ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset),
|
__ cmp(FieldOperand(eax, JSFunction::kCodeOffset), Immediate(apply_code));
|
||||||
Immediate(apply_code));
|
|
||||||
__ j(not_equal, &build_args);
|
__ j(not_equal, &build_args);
|
||||||
|
|
||||||
// Check that applicand is a function.
|
// Check that applicand is a function.
|
||||||
@ -9467,6 +9688,11 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
|||||||
__ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
|
__ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
|
||||||
__ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
|
__ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
|
||||||
|
|
||||||
|
// Initialize the code pointer in the function to be the one
|
||||||
|
// found in the shared function info object.
|
||||||
|
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
|
||||||
|
__ mov(FieldOperand(eax, JSFunction::kCodeOffset), edx);
|
||||||
|
|
||||||
// Return and remove the on-stack parameter.
|
// Return and remove the on-stack parameter.
|
||||||
__ ret(1 * kPointerSize);
|
__ ret(1 * kPointerSize);
|
||||||
|
|
||||||
|
11
deps/v8/src/ia32/codegen-ia32.h
vendored
11
deps/v8/src/ia32/codegen-ia32.h
vendored
@ -530,7 +530,7 @@ class CodeGenerator: public AstVisitor {
|
|||||||
|
|
||||||
// Emits code sequence that jumps to deferred code if the inputs
|
// Emits code sequence that jumps to deferred code if the inputs
|
||||||
// are not both smis. Cannot be in MacroAssembler because it takes
|
// are not both smis. Cannot be in MacroAssembler because it takes
|
||||||
// advantage of TypeInfo to skip unneeded checks.
|
// a deferred code object.
|
||||||
void JumpIfNotBothSmiUsingTypeInfo(Register left,
|
void JumpIfNotBothSmiUsingTypeInfo(Register left,
|
||||||
Register right,
|
Register right,
|
||||||
Register scratch,
|
Register scratch,
|
||||||
@ -538,6 +538,15 @@ class CodeGenerator: public AstVisitor {
|
|||||||
TypeInfo right_info,
|
TypeInfo right_info,
|
||||||
DeferredCode* deferred);
|
DeferredCode* deferred);
|
||||||
|
|
||||||
|
// Emits code sequence that jumps to the label if the inputs
|
||||||
|
// are not both smis.
|
||||||
|
void JumpIfNotBothSmiUsingTypeInfo(Register left,
|
||||||
|
Register right,
|
||||||
|
Register scratch,
|
||||||
|
TypeInfo left_info,
|
||||||
|
TypeInfo right_info,
|
||||||
|
Label* on_non_smi);
|
||||||
|
|
||||||
// If possible, combine two constant smi values using op to produce
|
// If possible, combine two constant smi values using op to produce
|
||||||
// a smi result, and push it on the virtual frame, all at compile time.
|
// a smi result, and push it on the virtual frame, all at compile time.
|
||||||
// Returns true if it succeeds. Otherwise it has no effect.
|
// Returns true if it succeeds. Otherwise it has no effect.
|
||||||
|
66
deps/v8/src/ia32/macro-assembler-ia32.cc
vendored
66
deps/v8/src/ia32/macro-assembler-ia32.cc
vendored
@ -377,6 +377,12 @@ void MacroAssembler::AbortIfNotSmi(Register object) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MacroAssembler::AbortIfSmi(Register object) {
|
||||||
|
test(object, Immediate(kSmiTagMask));
|
||||||
|
Assert(not_equal, "Operand a smi");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
||||||
push(ebp);
|
push(ebp);
|
||||||
mov(ebp, Operand(esp));
|
mov(ebp, Operand(esp));
|
||||||
@ -1292,7 +1298,7 @@ void MacroAssembler::InvokeFunction(Register fun,
|
|||||||
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
||||||
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
|
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
SmiUntag(ebx);
|
SmiUntag(ebx);
|
||||||
mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
|
mov(edx, FieldOperand(edi, JSFunction::kCodeOffset));
|
||||||
lea(edx, FieldOperand(edx, Code::kHeaderSize));
|
lea(edx, FieldOperand(edx, Code::kHeaderSize));
|
||||||
|
|
||||||
ParameterCount expected(ebx);
|
ParameterCount expected(ebx);
|
||||||
@ -1344,8 +1350,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
|
|||||||
// Make sure the code objects in the builtins object and in the
|
// Make sure the code objects in the builtins object and in the
|
||||||
// builtin function are the same.
|
// builtin function are the same.
|
||||||
push(target);
|
push(target);
|
||||||
mov(target, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
|
mov(target, FieldOperand(edi, JSFunction::kCodeOffset));
|
||||||
mov(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
|
|
||||||
cmp(target, Operand(esp, 0));
|
cmp(target, Operand(esp, 0));
|
||||||
Assert(equal, "Builtin code object changed");
|
Assert(equal, "Builtin code object changed");
|
||||||
pop(target);
|
pop(target);
|
||||||
@ -1510,6 +1515,61 @@ void MacroAssembler::Abort(const char* msg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MacroAssembler::JumpIfNotNumber(Register reg,
|
||||||
|
TypeInfo info,
|
||||||
|
Label* on_not_number) {
|
||||||
|
if (FLAG_debug_code) AbortIfSmi(reg);
|
||||||
|
if (!info.IsNumber()) {
|
||||||
|
cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
||||||
|
Factory::heap_number_map());
|
||||||
|
j(not_equal, on_not_number);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void MacroAssembler::ConvertToInt32(Register dst,
|
||||||
|
Register source,
|
||||||
|
Register scratch,
|
||||||
|
TypeInfo info,
|
||||||
|
Label* on_not_int32) {
|
||||||
|
if (FLAG_debug_code) {
|
||||||
|
AbortIfSmi(source);
|
||||||
|
AbortIfNotNumber(source);
|
||||||
|
}
|
||||||
|
if (info.IsInteger32()) {
|
||||||
|
cvttsd2si(dst, FieldOperand(source, HeapNumber::kValueOffset));
|
||||||
|
} else {
|
||||||
|
Label done;
|
||||||
|
bool push_pop = (scratch.is(no_reg) && dst.is(source));
|
||||||
|
ASSERT(!scratch.is(source));
|
||||||
|
if (push_pop) {
|
||||||
|
push(dst);
|
||||||
|
scratch = dst;
|
||||||
|
}
|
||||||
|
if (scratch.is(no_reg)) scratch = dst;
|
||||||
|
cvttsd2si(scratch, FieldOperand(source, HeapNumber::kValueOffset));
|
||||||
|
cmp(scratch, 0x80000000u);
|
||||||
|
if (push_pop || dst.is(source)) {
|
||||||
|
j(not_equal, &done);
|
||||||
|
if (push_pop) {
|
||||||
|
pop(dst);
|
||||||
|
jmp(on_not_int32);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
j(equal, on_not_int32);
|
||||||
|
}
|
||||||
|
|
||||||
|
bind(&done);
|
||||||
|
if (push_pop) {
|
||||||
|
add(Operand(esp), Immediate(kPointerSize)); // Pop.
|
||||||
|
}
|
||||||
|
if (!scratch.is(dst)) {
|
||||||
|
mov(dst, scratch);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
|
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
|
||||||
Register instance_type,
|
Register instance_type,
|
||||||
Register scratch,
|
Register scratch,
|
||||||
|
33
deps/v8/src/ia32/macro-assembler-ia32.h
vendored
33
deps/v8/src/ia32/macro-assembler-ia32.h
vendored
@ -29,6 +29,7 @@
|
|||||||
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
|
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
|
||||||
|
|
||||||
#include "assembler.h"
|
#include "assembler.h"
|
||||||
|
#include "type-info.h"
|
||||||
|
|
||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
@ -225,12 +226,44 @@ class MacroAssembler: public Assembler {
|
|||||||
sar(reg, kSmiTagSize);
|
sar(reg, kSmiTagSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Modifies the register even if it does not contain a Smi!
|
||||||
|
void SmiUntag(Register reg, TypeInfo info, Label* non_smi) {
|
||||||
|
ASSERT(kSmiTagSize == 1);
|
||||||
|
sar(reg, kSmiTagSize);
|
||||||
|
if (info.IsSmi()) {
|
||||||
|
ASSERT(kSmiTag == 0);
|
||||||
|
j(carry, non_smi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Modifies the register even if it does not contain a Smi!
|
||||||
|
void SmiUntag(Register reg, Label* is_smi) {
|
||||||
|
ASSERT(kSmiTagSize == 1);
|
||||||
|
sar(reg, kSmiTagSize);
|
||||||
|
ASSERT(kSmiTag == 0);
|
||||||
|
j(not_carry, is_smi);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assumes input is a heap object.
|
||||||
|
void JumpIfNotNumber(Register reg, TypeInfo info, Label* on_not_number);
|
||||||
|
|
||||||
|
// Assumes input is a heap number. Jumps on things out of range. Also jumps
|
||||||
|
// on the min negative int32. Ignores frational parts.
|
||||||
|
void ConvertToInt32(Register dst,
|
||||||
|
Register src, // Can be the same as dst.
|
||||||
|
Register scratch, // Can be no_reg or dst, but not src.
|
||||||
|
TypeInfo info,
|
||||||
|
Label* on_not_int32);
|
||||||
|
|
||||||
// Abort execution if argument is not a number. Used in debug code.
|
// Abort execution if argument is not a number. Used in debug code.
|
||||||
void AbortIfNotNumber(Register object);
|
void AbortIfNotNumber(Register object);
|
||||||
|
|
||||||
// Abort execution if argument is not a smi. Used in debug code.
|
// Abort execution if argument is not a smi. Used in debug code.
|
||||||
void AbortIfNotSmi(Register object);
|
void AbortIfNotSmi(Register object);
|
||||||
|
|
||||||
|
// Abort execution if argument is a smi. Used in debug code.
|
||||||
|
void AbortIfSmi(Register object);
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Exception handling
|
// Exception handling
|
||||||
|
|
||||||
|
16
deps/v8/src/ia32/virtual-frame-ia32.h
vendored
16
deps/v8/src/ia32/virtual-frame-ia32.h
vendored
@ -139,6 +139,22 @@ class VirtualFrame: public ZoneObject {
|
|||||||
if (is_used(reg)) SpillElementAt(register_location(reg));
|
if (is_used(reg)) SpillElementAt(register_location(reg));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make the two registers distinct and spill them. Returns the second
|
||||||
|
// register. If the registers were not distinct then it returns the new
|
||||||
|
// second register.
|
||||||
|
Result MakeDistinctAndSpilled(Result* left, Result* right) {
|
||||||
|
Spill(left->reg());
|
||||||
|
Spill(right->reg());
|
||||||
|
if (left->reg().is(right->reg())) {
|
||||||
|
RegisterAllocator* allocator = cgen()->allocator();
|
||||||
|
Result fresh = allocator->Allocate();
|
||||||
|
ASSERT(fresh.is_valid());
|
||||||
|
masm()->mov(fresh.reg(), right->reg());
|
||||||
|
return fresh;
|
||||||
|
}
|
||||||
|
return *right;
|
||||||
|
}
|
||||||
|
|
||||||
// Spill all occurrences of an arbitrary register if possible. Return the
|
// Spill all occurrences of an arbitrary register if possible. Return the
|
||||||
// register spilled or no_reg if it was not possible to free any register
|
// register spilled or no_reg if it was not possible to free any register
|
||||||
// (ie, they all have frame-external references).
|
// (ie, they all have frame-external references).
|
||||||
|
7
deps/v8/src/list-inl.h
vendored
7
deps/v8/src/list-inl.h
vendored
@ -126,6 +126,13 @@ void List<T, P>::Iterate(void (*callback)(T* x)) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<typename T, class P>
|
||||||
|
template<class Visitor>
|
||||||
|
void List<T, P>::Iterate(Visitor* visitor) {
|
||||||
|
for (int i = 0; i < length_; i++) visitor->Apply(&data_[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
template<typename T, class P>
|
template<typename T, class P>
|
||||||
bool List<T, P>::Contains(const T& elm) {
|
bool List<T, P>::Contains(const T& elm) {
|
||||||
for (int i = 0; i < length_; i++) {
|
for (int i = 0; i < length_; i++) {
|
||||||
|
2
deps/v8/src/list.h
vendored
2
deps/v8/src/list.h
vendored
@ -117,6 +117,8 @@ class List {
|
|||||||
|
|
||||||
// Iterate through all list entries, starting at index 0.
|
// Iterate through all list entries, starting at index 0.
|
||||||
void Iterate(void (*callback)(T* x));
|
void Iterate(void (*callback)(T* x));
|
||||||
|
template<class Visitor>
|
||||||
|
void Iterate(Visitor* visitor);
|
||||||
|
|
||||||
// Sort all list entries (using QuickSort)
|
// Sort all list entries (using QuickSort)
|
||||||
void Sort(int (*cmp)(const T* x, const T* y));
|
void Sort(int (*cmp)(const T* x, const T* y));
|
||||||
|
7
deps/v8/src/mips/simulator-mips.cc
vendored
7
deps/v8/src/mips/simulator-mips.cc
vendored
@ -606,7 +606,7 @@ void Simulator::set_fpu_register(int fpureg, int32_t value) {
|
|||||||
|
|
||||||
void Simulator::set_fpu_register_double(int fpureg, double value) {
|
void Simulator::set_fpu_register_double(int fpureg, double value) {
|
||||||
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
|
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
|
||||||
*v8i::BitCast<double*, int32_t*>(&FPUregisters_[fpureg]) = value;
|
*v8i::BitCast<double*>(&FPUregisters_[fpureg]) = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -627,8 +627,7 @@ int32_t Simulator::get_fpu_register(int fpureg) const {
|
|||||||
|
|
||||||
double Simulator::get_fpu_register_double(int fpureg) const {
|
double Simulator::get_fpu_register_double(int fpureg) const {
|
||||||
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
|
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
|
||||||
return *v8i::BitCast<double*, int32_t*>(
|
return *v8i::BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
|
||||||
const_cast<int32_t*>(&FPUregisters_[fpureg]));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Raw access to the PC register.
|
// Raw access to the PC register.
|
||||||
@ -903,7 +902,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
|
|||||||
break;
|
break;
|
||||||
case MFHC1:
|
case MFHC1:
|
||||||
fp_out = get_fpu_register_double(fs_reg);
|
fp_out = get_fpu_register_double(fs_reg);
|
||||||
alu_out = *v8i::BitCast<int32_t*, double*>(&fp_out);
|
alu_out = *v8i::BitCast<int32_t*>(&fp_out);
|
||||||
break;
|
break;
|
||||||
case MTC1:
|
case MTC1:
|
||||||
case MTHC1:
|
case MTHC1:
|
||||||
|
8
deps/v8/src/objects-inl.h
vendored
8
deps/v8/src/objects-inl.h
vendored
@ -2694,12 +2694,14 @@ bool JSFunction::IsBuiltin() {
|
|||||||
|
|
||||||
|
|
||||||
Code* JSFunction::code() {
|
Code* JSFunction::code() {
|
||||||
return shared()->code();
|
return Code::cast(READ_FIELD(this, kCodeOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void JSFunction::set_code(Code* value) {
|
void JSFunction::set_code(Code* value) {
|
||||||
shared()->set_code(value);
|
// Skip the write barrier because code is never in new space.
|
||||||
|
ASSERT(!Heap::InNewSpace(value));
|
||||||
|
WRITE_FIELD(this, kCodeOffset, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2771,7 +2773,7 @@ bool JSFunction::should_have_prototype() {
|
|||||||
|
|
||||||
|
|
||||||
bool JSFunction::is_compiled() {
|
bool JSFunction::is_compiled() {
|
||||||
return shared()->is_compiled();
|
return code()->kind() != Code::STUB;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
16
deps/v8/src/objects.cc
vendored
16
deps/v8/src/objects.cc
vendored
@ -5823,16 +5823,24 @@ bool JSObject::HasElementWithInterceptor(JSObject* receiver, uint32_t index) {
|
|||||||
CustomArguments args(interceptor->data(), receiver, this);
|
CustomArguments args(interceptor->data(), receiver, this);
|
||||||
v8::AccessorInfo info(args.end());
|
v8::AccessorInfo info(args.end());
|
||||||
if (!interceptor->query()->IsUndefined()) {
|
if (!interceptor->query()->IsUndefined()) {
|
||||||
v8::IndexedPropertyQuery query =
|
v8::IndexedPropertyQueryImpl query =
|
||||||
v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
|
v8::ToCData<v8::IndexedPropertyQueryImpl>(interceptor->query());
|
||||||
LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
|
LOG(ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
|
||||||
v8::Handle<v8::Boolean> result;
|
v8::Handle<v8::Value> result;
|
||||||
{
|
{
|
||||||
// Leaving JavaScript.
|
// Leaving JavaScript.
|
||||||
VMState state(EXTERNAL);
|
VMState state(EXTERNAL);
|
||||||
result = query(index, info);
|
result = query(index, info);
|
||||||
}
|
}
|
||||||
if (!result.IsEmpty()) return result->IsTrue();
|
if (!result.IsEmpty()) {
|
||||||
|
// IsBoolean check would be removed when transition to new API is over.
|
||||||
|
if (result->IsBoolean()) {
|
||||||
|
return result->IsTrue() ? true : false;
|
||||||
|
} else {
|
||||||
|
ASSERT(result->IsInt32());
|
||||||
|
return true; // absence of property is signaled by empty handle.
|
||||||
|
}
|
||||||
|
}
|
||||||
} else if (!interceptor->getter()->IsUndefined()) {
|
} else if (!interceptor->getter()->IsUndefined()) {
|
||||||
v8::IndexedPropertyGetter getter =
|
v8::IndexedPropertyGetter getter =
|
||||||
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
|
v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
|
||||||
|
4
deps/v8/src/objects.h
vendored
4
deps/v8/src/objects.h
vendored
@ -3608,7 +3608,9 @@ class JSFunction: public JSObject {
|
|||||||
static Context* GlobalContextFromLiterals(FixedArray* literals);
|
static Context* GlobalContextFromLiterals(FixedArray* literals);
|
||||||
|
|
||||||
// Layout descriptors.
|
// Layout descriptors.
|
||||||
static const int kPrototypeOrInitialMapOffset = JSObject::kHeaderSize;
|
static const int kCodeOffset = JSObject::kHeaderSize;
|
||||||
|
static const int kPrototypeOrInitialMapOffset =
|
||||||
|
kCodeOffset + kPointerSize;
|
||||||
static const int kSharedFunctionInfoOffset =
|
static const int kSharedFunctionInfoOffset =
|
||||||
kPrototypeOrInitialMapOffset + kPointerSize;
|
kPrototypeOrInitialMapOffset + kPointerSize;
|
||||||
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
|
static const int kContextOffset = kSharedFunctionInfoOffset + kPointerSize;
|
||||||
|
9
deps/v8/src/parser.cc
vendored
9
deps/v8/src/parser.cc
vendored
@ -3587,10 +3587,8 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
|
|||||||
// { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
|
// { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
|
||||||
// We have already read the "get" or "set" keyword.
|
// We have already read the "get" or "set" keyword.
|
||||||
Token::Value next = Next();
|
Token::Value next = Next();
|
||||||
if (next == Token::IDENTIFIER ||
|
// TODO(820): Allow NUMBER and STRING as well (and handle array indices).
|
||||||
next == Token::STRING ||
|
if (next == Token::IDENTIFIER || Token::IsKeyword(next)) {
|
||||||
next == Token::NUMBER ||
|
|
||||||
Token::IsKeyword(next)) {
|
|
||||||
Handle<String> name =
|
Handle<String> name =
|
||||||
factory()->LookupSymbol(scanner_.literal_string(),
|
factory()->LookupSymbol(scanner_.literal_string(),
|
||||||
scanner_.literal_length());
|
scanner_.literal_length());
|
||||||
@ -3652,8 +3650,7 @@ Expression* Parser::ParseObjectLiteral(bool* ok) {
|
|||||||
factory()->LookupSymbol(scanner_.literal_string(),
|
factory()->LookupSymbol(scanner_.literal_string(),
|
||||||
scanner_.literal_length());
|
scanner_.literal_length());
|
||||||
uint32_t index;
|
uint32_t index;
|
||||||
if (!string.is_null() &&
|
if (!string.is_null() && string->AsArrayIndex(&index)) {
|
||||||
string->AsArrayIndex(&index)) {
|
|
||||||
key = NewNumberLiteral(index);
|
key = NewNumberLiteral(index);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
18
deps/v8/src/profile-generator-inl.h
vendored
18
deps/v8/src/profile-generator-inl.h
vendored
@ -97,13 +97,6 @@ void CodeMap::DeleteCode(Address addr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool CpuProfilesCollection::is_last_profile() {
|
|
||||||
// Called from VM thread, and only it can mutate the list,
|
|
||||||
// so no locking is needed here.
|
|
||||||
return current_profiles_.length() == 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
const char* CpuProfilesCollection::GetFunctionName(String* name) {
|
const char* CpuProfilesCollection::GetFunctionName(String* name) {
|
||||||
return GetFunctionName(GetName(name));
|
return GetFunctionName(GetName(name));
|
||||||
}
|
}
|
||||||
@ -130,17 +123,6 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<class Visitor>
|
|
||||||
void HeapEntriesMap::Apply(Visitor* visitor) {
|
|
||||||
for (HashMap::Entry* p = entries_.Start();
|
|
||||||
p != NULL;
|
|
||||||
p = entries_.Next(p)) {
|
|
||||||
if (!IsAlias(p->value))
|
|
||||||
visitor->Apply(reinterpret_cast<HeapEntry*>(p->value));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} } // namespace v8::internal
|
} } // namespace v8::internal
|
||||||
|
|
||||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
#endif // ENABLE_LOGGING_AND_PROFILING
|
||||||
|
1260
deps/v8/src/profile-generator.cc
vendored
1260
deps/v8/src/profile-generator.cc
vendored
File diff suppressed because it is too large
Load Diff
491
deps/v8/src/profile-generator.h
vendored
491
deps/v8/src/profile-generator.h
vendored
@ -279,15 +279,12 @@ class CpuProfilesCollection {
|
|||||||
CpuProfile* StopProfiling(int security_token_id,
|
CpuProfile* StopProfiling(int security_token_id,
|
||||||
const char* title,
|
const char* title,
|
||||||
double actual_sampling_rate);
|
double actual_sampling_rate);
|
||||||
CpuProfile* StopProfiling(int security_token_id,
|
|
||||||
String* title,
|
|
||||||
double actual_sampling_rate);
|
|
||||||
List<CpuProfile*>* Profiles(int security_token_id);
|
List<CpuProfile*>* Profiles(int security_token_id);
|
||||||
const char* GetName(String* name) {
|
const char* GetName(String* name) {
|
||||||
return function_and_resource_names_.GetName(name);
|
return function_and_resource_names_.GetName(name);
|
||||||
}
|
}
|
||||||
CpuProfile* GetProfile(int security_token_id, unsigned uid);
|
CpuProfile* GetProfile(int security_token_id, unsigned uid);
|
||||||
inline bool is_last_profile();
|
bool IsLastProfile(const char* title);
|
||||||
|
|
||||||
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
|
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
|
||||||
String* name, String* resource_name, int line_number);
|
String* name, String* resource_name, int line_number);
|
||||||
@ -423,167 +420,194 @@ class ProfileGenerator {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class HeapSnapshot;
|
|
||||||
class HeapEntry;
|
class HeapEntry;
|
||||||
|
|
||||||
|
class HeapGraphEdge BASE_EMBEDDED {
|
||||||
class HeapGraphEdge {
|
|
||||||
public:
|
public:
|
||||||
enum Type {
|
enum Type {
|
||||||
CONTEXT_VARIABLE = v8::HeapGraphEdge::CONTEXT_VARIABLE,
|
kContextVariable = v8::HeapGraphEdge::kContextVariable,
|
||||||
ELEMENT = v8::HeapGraphEdge::ELEMENT,
|
kElement = v8::HeapGraphEdge::kElement,
|
||||||
PROPERTY = v8::HeapGraphEdge::PROPERTY,
|
kProperty = v8::HeapGraphEdge::kProperty,
|
||||||
INTERNAL = v8::HeapGraphEdge::INTERNAL
|
kInternal = v8::HeapGraphEdge::kInternal
|
||||||
};
|
};
|
||||||
|
|
||||||
HeapGraphEdge(Type type, const char* name, HeapEntry* from, HeapEntry* to);
|
HeapGraphEdge() { }
|
||||||
HeapGraphEdge(int index, HeapEntry* from, HeapEntry* to);
|
void Init(int child_index, Type type, const char* name, HeapEntry* to);
|
||||||
|
void Init(int child_index, int index, HeapEntry* to);
|
||||||
|
|
||||||
Type type() const { return type_; }
|
Type type() { return static_cast<Type>(type_); }
|
||||||
int index() const {
|
int index() {
|
||||||
ASSERT(type_ == ELEMENT);
|
ASSERT(type_ == kElement);
|
||||||
return index_;
|
return index_;
|
||||||
}
|
}
|
||||||
const char* name() const {
|
const char* name() {
|
||||||
ASSERT(type_ == CONTEXT_VARIABLE || type_ == PROPERTY || type_ == INTERNAL);
|
ASSERT(type_ == kContextVariable
|
||||||
|
|| type_ == kProperty
|
||||||
|
|| type_ == kInternal);
|
||||||
return name_;
|
return name_;
|
||||||
}
|
}
|
||||||
HeapEntry* from() const { return from_; }
|
HeapEntry* to() { return to_; }
|
||||||
HeapEntry* to() const { return to_; }
|
|
||||||
|
HeapEntry* From();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Type type_;
|
int child_index_ : 30;
|
||||||
|
unsigned type_ : 2;
|
||||||
union {
|
union {
|
||||||
int index_;
|
int index_;
|
||||||
const char* name_;
|
const char* name_;
|
||||||
};
|
};
|
||||||
HeapEntry* from_;
|
|
||||||
HeapEntry* to_;
|
HeapEntry* to_;
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
|
DISALLOW_COPY_AND_ASSIGN(HeapGraphEdge);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class HeapGraphPath;
|
|
||||||
class CachedHeapGraphPath;
|
class CachedHeapGraphPath;
|
||||||
|
class HeapGraphPath;
|
||||||
|
class HeapSnapshot;
|
||||||
|
|
||||||
class HeapEntry {
|
// HeapEntry instances represent an entity from the heap (or a special
|
||||||
|
// virtual node, e.g. root). To make heap snapshots more compact,
|
||||||
|
// HeapEntries has a special memory layout (no Vectors or Lists used):
|
||||||
|
//
|
||||||
|
// +-----------------+
|
||||||
|
// HeapEntry
|
||||||
|
// +-----------------+
|
||||||
|
// HeapGraphEdge |
|
||||||
|
// ... } children_count
|
||||||
|
// HeapGraphEdge |
|
||||||
|
// +-----------------+
|
||||||
|
// HeapGraphEdge* |
|
||||||
|
// ... } retainers_count
|
||||||
|
// HeapGraphEdge* |
|
||||||
|
// +-----------------+
|
||||||
|
//
|
||||||
|
// In a HeapSnapshot, all entries are hand-allocated in a continuous array
|
||||||
|
// of raw bytes.
|
||||||
|
//
|
||||||
|
class HeapEntry BASE_EMBEDDED {
|
||||||
public:
|
public:
|
||||||
enum Type {
|
enum Type {
|
||||||
INTERNAL = v8::HeapGraphNode::INTERNAL,
|
kInternal = v8::HeapGraphNode::kInternal,
|
||||||
ARRAY = v8::HeapGraphNode::ARRAY,
|
kArray = v8::HeapGraphNode::kArray,
|
||||||
STRING = v8::HeapGraphNode::STRING,
|
kString = v8::HeapGraphNode::kString,
|
||||||
OBJECT = v8::HeapGraphNode::OBJECT,
|
kObject = v8::HeapGraphNode::kObject,
|
||||||
CODE = v8::HeapGraphNode::CODE,
|
kCode = v8::HeapGraphNode::kCode,
|
||||||
CLOSURE = v8::HeapGraphNode::CLOSURE
|
kClosure = v8::HeapGraphNode::kClosure
|
||||||
};
|
};
|
||||||
|
|
||||||
explicit HeapEntry(HeapSnapshot* snapshot)
|
HeapEntry() { }
|
||||||
: snapshot_(snapshot),
|
void Init(HeapSnapshot* snapshot, int children_count, int retainers_count);
|
||||||
visited_(false),
|
void Init(HeapSnapshot* snapshot,
|
||||||
type_(INTERNAL),
|
|
||||||
name_(""),
|
|
||||||
id_(0),
|
|
||||||
next_auto_index_(0),
|
|
||||||
self_size_(0),
|
|
||||||
security_token_id_(TokenEnumerator::kNoSecurityToken),
|
|
||||||
children_(1),
|
|
||||||
retainers_(0),
|
|
||||||
retaining_paths_(0),
|
|
||||||
total_size_(kUnknownSize),
|
|
||||||
non_shared_total_size_(kUnknownSize),
|
|
||||||
painted_(kUnpainted) { }
|
|
||||||
HeapEntry(HeapSnapshot* snapshot,
|
|
||||||
Type type,
|
Type type,
|
||||||
const char* name,
|
const char* name,
|
||||||
uint64_t id,
|
uint64_t id,
|
||||||
int self_size,
|
int self_size,
|
||||||
int security_token_id)
|
int children_count,
|
||||||
: snapshot_(snapshot),
|
int retainers_count);
|
||||||
visited_(false),
|
|
||||||
type_(type),
|
|
||||||
name_(name),
|
|
||||||
id_(id),
|
|
||||||
next_auto_index_(1),
|
|
||||||
self_size_(self_size),
|
|
||||||
security_token_id_(security_token_id),
|
|
||||||
children_(4),
|
|
||||||
retainers_(4),
|
|
||||||
retaining_paths_(4),
|
|
||||||
total_size_(kUnknownSize),
|
|
||||||
non_shared_total_size_(kUnknownSize),
|
|
||||||
painted_(kUnpainted) { }
|
|
||||||
~HeapEntry();
|
|
||||||
|
|
||||||
bool visited() const { return visited_; }
|
HeapSnapshot* snapshot() { return snapshot_; }
|
||||||
Type type() const { return type_; }
|
Type type() { return static_cast<Type>(type_); }
|
||||||
const char* name() const { return name_; }
|
const char* name() { return name_; }
|
||||||
uint64_t id() const { return id_; }
|
uint64_t id() { return id_; }
|
||||||
int self_size() const { return self_size_; }
|
int self_size() { return self_size_; }
|
||||||
int security_token_id() const { return security_token_id_; }
|
|
||||||
bool painted_reachable() { return painted_ == kPaintReachable; }
|
Vector<HeapGraphEdge> children() {
|
||||||
bool not_painted_reachable_from_others() {
|
return Vector<HeapGraphEdge>(children_arr(), children_count_); }
|
||||||
return painted_ != kPaintReachableFromOthers;
|
Vector<HeapGraphEdge*> retainers() {
|
||||||
|
return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
|
||||||
|
List<HeapGraphPath*>* GetRetainingPaths();
|
||||||
|
|
||||||
|
void clear_paint() { painted_ = kUnpainted; }
|
||||||
|
bool painted_reachable() { return painted_ == kPainted; }
|
||||||
|
void paint_reachable() {
|
||||||
|
ASSERT(painted_ == kUnpainted);
|
||||||
|
painted_ = kPainted;
|
||||||
|
}
|
||||||
|
bool not_painted_reachable_from_others() {
|
||||||
|
return painted_ != kPaintedReachableFromOthers;
|
||||||
|
}
|
||||||
|
void paint_reachable_from_others() {
|
||||||
|
painted_ = kPaintedReachableFromOthers;
|
||||||
}
|
}
|
||||||
const List<HeapGraphEdge*>* children() const { return &children_; }
|
|
||||||
const List<HeapGraphEdge*>* retainers() const { return &retainers_; }
|
|
||||||
const List<HeapGraphPath*>* GetRetainingPaths();
|
|
||||||
|
|
||||||
template<class Visitor>
|
template<class Visitor>
|
||||||
void ApplyAndPaintAllReachable(Visitor* visitor);
|
void ApplyAndPaintAllReachable(Visitor* visitor);
|
||||||
|
|
||||||
void ClearPaint() { painted_ = kUnpainted; }
|
|
||||||
void CutEdges();
|
|
||||||
void MarkAsVisited() { visited_ = true; }
|
|
||||||
void PaintAllReachable();
|
void PaintAllReachable();
|
||||||
void PaintReachable() {
|
|
||||||
ASSERT(painted_ == kUnpainted);
|
|
||||||
painted_ = kPaintReachable;
|
|
||||||
}
|
|
||||||
void PaintReachableFromOthers() { painted_ = kPaintReachableFromOthers; }
|
|
||||||
void SetClosureReference(const char* name, HeapEntry* entry);
|
|
||||||
void SetElementReference(int index, HeapEntry* entry);
|
|
||||||
void SetInternalReference(const char* name, HeapEntry* entry);
|
|
||||||
void SetPropertyReference(const char* name, HeapEntry* entry);
|
|
||||||
void SetAutoIndexReference(HeapEntry* entry);
|
|
||||||
void SetUnidirAutoIndexReference(HeapEntry* entry);
|
|
||||||
|
|
||||||
int TotalSize();
|
void SetElementReference(
|
||||||
int NonSharedTotalSize();
|
int child_index, int index, HeapEntry* entry, int retainer_index);
|
||||||
|
void SetNamedReference(HeapGraphEdge::Type type,
|
||||||
|
int child_index,
|
||||||
|
const char* name,
|
||||||
|
HeapEntry* entry,
|
||||||
|
int retainer_index);
|
||||||
|
void SetUnidirElementReference(int child_index, int index, HeapEntry* entry);
|
||||||
|
|
||||||
|
int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
|
||||||
|
int ReachableSize();
|
||||||
|
int RetainedSize();
|
||||||
|
|
||||||
void Print(int max_depth, int indent);
|
void Print(int max_depth, int indent);
|
||||||
|
|
||||||
private:
|
static int EntriesSize(int entries_count,
|
||||||
void AddEdge(HeapGraphEdge* edge);
|
int children_count,
|
||||||
int CalculateTotalSize();
|
int retainers_count);
|
||||||
int CalculateNonSharedTotalSize();
|
|
||||||
void FindRetainingPaths(HeapEntry* node, CachedHeapGraphPath* prev_path);
|
|
||||||
void RemoveChild(HeapGraphEdge* edge);
|
|
||||||
void RemoveRetainer(HeapGraphEdge* edge);
|
|
||||||
|
|
||||||
|
private:
|
||||||
|
HeapGraphEdge* children_arr() {
|
||||||
|
return reinterpret_cast<HeapGraphEdge*>(this + 1);
|
||||||
|
}
|
||||||
|
HeapGraphEdge** retainers_arr() {
|
||||||
|
return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
|
||||||
|
}
|
||||||
const char* TypeAsString();
|
const char* TypeAsString();
|
||||||
|
|
||||||
|
unsigned painted_: 2;
|
||||||
|
unsigned type_: 3;
|
||||||
|
// The calculated data is stored in HeapSnapshot in HeapEntryCalculatedData
|
||||||
|
// entries. See AddCalculatedData and GetCalculatedData.
|
||||||
|
int calculated_data_index_: 27;
|
||||||
|
int self_size_;
|
||||||
|
int children_count_;
|
||||||
|
int retainers_count_;
|
||||||
HeapSnapshot* snapshot_;
|
HeapSnapshot* snapshot_;
|
||||||
bool visited_;
|
|
||||||
Type type_;
|
|
||||||
const char* name_;
|
const char* name_;
|
||||||
uint64_t id_;
|
uint64_t id_;
|
||||||
int next_auto_index_;
|
|
||||||
int self_size_;
|
static const unsigned kUnpainted = 0;
|
||||||
int security_token_id_;
|
static const unsigned kPainted = 1;
|
||||||
List<HeapGraphEdge*> children_;
|
static const unsigned kPaintedReachableFromOthers = 2;
|
||||||
List<HeapGraphEdge*> retainers_;
|
static const int kNoCalculatedData = -1;
|
||||||
List<HeapGraphPath*> retaining_paths_;
|
|
||||||
int total_size_;
|
DISALLOW_COPY_AND_ASSIGN(HeapEntry);
|
||||||
int non_shared_total_size_;
|
};
|
||||||
int painted_;
|
|
||||||
|
|
||||||
|
class HeapEntryCalculatedData {
|
||||||
|
public:
|
||||||
|
HeapEntryCalculatedData()
|
||||||
|
: retaining_paths_(NULL),
|
||||||
|
reachable_size_(kUnknownSize),
|
||||||
|
retained_size_(kUnknownSize) {
|
||||||
|
}
|
||||||
|
void Dispose();
|
||||||
|
|
||||||
|
List<HeapGraphPath*>* GetRetainingPaths(HeapEntry* entry);
|
||||||
|
int ReachableSize(HeapEntry* entry);
|
||||||
|
int RetainedSize(HeapEntry* entry);
|
||||||
|
|
||||||
|
private:
|
||||||
|
void CalculateSizes(HeapEntry* entry);
|
||||||
|
void FindRetainingPaths(HeapEntry* entry, CachedHeapGraphPath* prev_path);
|
||||||
|
|
||||||
|
List<HeapGraphPath*>* retaining_paths_;
|
||||||
|
int reachable_size_;
|
||||||
|
int retained_size_;
|
||||||
|
|
||||||
static const int kUnknownSize = -1;
|
static const int kUnknownSize = -1;
|
||||||
static const int kUnpainted = 0;
|
|
||||||
static const int kPaintReachable = 1;
|
|
||||||
static const int kPaintReachableFromOthers = 2;
|
|
||||||
|
|
||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(HeapEntry);
|
// Allow generated copy constructor and assignment operator.
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -595,7 +619,7 @@ class HeapGraphPath {
|
|||||||
|
|
||||||
void Add(HeapGraphEdge* edge) { path_.Add(edge); }
|
void Add(HeapGraphEdge* edge) { path_.Add(edge); }
|
||||||
void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; }
|
void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; }
|
||||||
const List<HeapGraphEdge*>* path() const { return &path_; }
|
const List<HeapGraphEdge*>* path() { return &path_; }
|
||||||
|
|
||||||
void Print();
|
void Print();
|
||||||
|
|
||||||
@ -606,39 +630,6 @@ class HeapGraphPath {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class HeapEntriesMap {
|
|
||||||
public:
|
|
||||||
HeapEntriesMap();
|
|
||||||
~HeapEntriesMap();
|
|
||||||
|
|
||||||
void Alias(HeapObject* object, HeapEntry* entry);
|
|
||||||
void Apply(void (HeapEntry::*Func)(void));
|
|
||||||
template<class Visitor>
|
|
||||||
void Apply(Visitor* visitor);
|
|
||||||
HeapEntry* Map(HeapObject* object);
|
|
||||||
void Pair(HeapObject* object, HeapEntry* entry);
|
|
||||||
|
|
||||||
uint32_t capacity() { return entries_.capacity(); }
|
|
||||||
|
|
||||||
private:
|
|
||||||
INLINE(uint32_t Hash(HeapObject* object)) {
|
|
||||||
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
|
|
||||||
}
|
|
||||||
INLINE(static bool HeapObjectsMatch(void* key1, void* key2)) {
|
|
||||||
return key1 == key2;
|
|
||||||
}
|
|
||||||
INLINE(bool IsAlias(void* ptr)) {
|
|
||||||
return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const intptr_t kAliasTag = 1;
|
|
||||||
|
|
||||||
HashMap entries_;
|
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
class HeapSnapshotsCollection;
|
class HeapSnapshotsCollection;
|
||||||
class HeapSnapshotsDiff;
|
class HeapSnapshotsDiff;
|
||||||
|
|
||||||
@ -653,53 +644,52 @@ class HeapSnapshot {
|
|||||||
const char* title,
|
const char* title,
|
||||||
unsigned uid);
|
unsigned uid);
|
||||||
~HeapSnapshot();
|
~HeapSnapshot();
|
||||||
void ClearPaint();
|
|
||||||
void CutObjectsFromForeignSecurityContexts();
|
|
||||||
HeapEntry* GetEntry(Object* object);
|
|
||||||
void SetClosureReference(
|
|
||||||
HeapEntry* parent, String* reference_name, Object* child);
|
|
||||||
void SetElementReference(HeapEntry* parent, int index, Object* child);
|
|
||||||
void SetInternalReference(
|
|
||||||
HeapEntry* parent, const char* reference_name, Object* child);
|
|
||||||
void SetPropertyReference(
|
|
||||||
HeapEntry* parent, String* reference_name, Object* child);
|
|
||||||
|
|
||||||
INLINE(const char* title() const) { return title_; }
|
HeapSnapshotsCollection* collection() { return collection_; }
|
||||||
INLINE(unsigned uid() const) { return uid_; }
|
const char* title() { return title_; }
|
||||||
const HeapEntry* const_root() const { return &root_; }
|
unsigned uid() { return uid_; }
|
||||||
HeapEntry* root() { return &root_; }
|
HeapEntry* root() { return entries_[root_entry_index_]; }
|
||||||
template<class Visitor>
|
|
||||||
void IterateEntries(Visitor* visitor) { entries_.Apply(visitor); }
|
void AllocateEntries(
|
||||||
List<HeapEntry*>* GetSortedEntriesList();
|
int entries_count, int children_count, int retainers_count);
|
||||||
|
HeapEntry* AddEntry(
|
||||||
|
HeapObject* object, int children_count, int retainers_count);
|
||||||
|
bool WillAddEntry(HeapObject* object);
|
||||||
|
int AddCalculatedData();
|
||||||
|
HeapEntryCalculatedData& GetCalculatedData(int index) {
|
||||||
|
return calculated_data_[index];
|
||||||
|
}
|
||||||
|
void ClearPaint();
|
||||||
HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
|
HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
|
||||||
|
List<HeapEntry*>* GetSortedEntriesList();
|
||||||
|
template<class Visitor>
|
||||||
|
void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
|
||||||
|
|
||||||
void Print(int max_depth);
|
void Print(int max_depth);
|
||||||
|
void PrintEntriesSize();
|
||||||
|
|
||||||
|
static HeapObject *const kInternalRootObject;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
HeapEntry* AddEntry(HeapObject* object, HeapEntry::Type type) {
|
HeapEntry* AddEntry(HeapObject* object,
|
||||||
return AddEntry(object, type, "");
|
HeapEntry::Type type,
|
||||||
}
|
const char* name,
|
||||||
HeapEntry* AddEntry(
|
int children_count,
|
||||||
HeapObject* object, HeapEntry::Type type, const char* name);
|
int retainers_count);
|
||||||
void AddEntryAlias(HeapObject* object, HeapEntry* entry) {
|
HeapEntry* GetNextEntryToInit();
|
||||||
entries_.Alias(object, entry);
|
|
||||||
}
|
|
||||||
HeapEntry* FindEntry(HeapObject* object) {
|
|
||||||
return entries_.Map(object);
|
|
||||||
}
|
|
||||||
int GetGlobalSecurityToken();
|
|
||||||
int GetObjectSecurityToken(HeapObject* obj);
|
|
||||||
static int GetObjectSize(HeapObject* obj);
|
static int GetObjectSize(HeapObject* obj);
|
||||||
static int CalculateNetworkSize(JSObject* obj);
|
static int CalculateNetworkSize(JSObject* obj);
|
||||||
|
|
||||||
HeapSnapshotsCollection* collection_;
|
HeapSnapshotsCollection* collection_;
|
||||||
const char* title_;
|
const char* title_;
|
||||||
unsigned uid_;
|
unsigned uid_;
|
||||||
HeapEntry root_;
|
int root_entry_index_;
|
||||||
// Mapping from HeapObject* pointers to HeapEntry* pointers.
|
char* raw_entries_;
|
||||||
HeapEntriesMap entries_;
|
List<HeapEntry*> entries_;
|
||||||
// Entries sorted by id.
|
bool entries_sorted_;
|
||||||
List<HeapEntry*>* sorted_entries_;
|
List<HeapEntryCalculatedData> calculated_data_;
|
||||||
|
|
||||||
|
friend class HeapSnapshotTester;
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
|
DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
|
||||||
};
|
};
|
||||||
@ -748,30 +738,36 @@ class HeapSnapshotsDiff {
|
|||||||
HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
|
HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
|
||||||
: snapshot1_(snapshot1),
|
: snapshot1_(snapshot1),
|
||||||
snapshot2_(snapshot2),
|
snapshot2_(snapshot2),
|
||||||
additions_root_(new HeapEntry(snapshot2)),
|
raw_additions_root_(NULL),
|
||||||
deletions_root_(new HeapEntry(snapshot1)) { }
|
raw_deletions_root_(NULL) { }
|
||||||
|
|
||||||
~HeapSnapshotsDiff() {
|
~HeapSnapshotsDiff() {
|
||||||
delete deletions_root_;
|
DeleteArray(raw_deletions_root_);
|
||||||
delete additions_root_;
|
DeleteArray(raw_additions_root_);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddAddedEntry(HeapEntry* entry) {
|
void AddAddedEntry(int child_index, int index, HeapEntry* entry) {
|
||||||
additions_root_->SetUnidirAutoIndexReference(entry);
|
additions_root()->SetUnidirElementReference(child_index, index, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddDeletedEntry(HeapEntry* entry) {
|
void AddDeletedEntry(int child_index, int index, HeapEntry* entry) {
|
||||||
deletions_root_->SetUnidirAutoIndexReference(entry);
|
deletions_root()->SetUnidirElementReference(child_index, index, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
const HeapEntry* additions_root() const { return additions_root_; }
|
void CreateRoots(int additions_count, int deletions_count);
|
||||||
const HeapEntry* deletions_root() const { return deletions_root_; }
|
|
||||||
|
HeapEntry* additions_root() {
|
||||||
|
return reinterpret_cast<HeapEntry*>(raw_additions_root_);
|
||||||
|
}
|
||||||
|
HeapEntry* deletions_root() {
|
||||||
|
return reinterpret_cast<HeapEntry*>(raw_deletions_root_);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
HeapSnapshot* snapshot1_;
|
HeapSnapshot* snapshot1_;
|
||||||
HeapSnapshot* snapshot2_;
|
HeapSnapshot* snapshot2_;
|
||||||
HeapEntry* additions_root_;
|
char* raw_additions_root_;
|
||||||
HeapEntry* deletions_root_;
|
char* raw_deletions_root_;
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
|
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
|
||||||
};
|
};
|
||||||
@ -830,18 +826,123 @@ class HeapSnapshotsCollection {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
// The HeapEntriesMap instance is used to track a mapping between
|
||||||
|
// real heap objects and their representations in heap snapshots.
|
||||||
|
class HeapEntriesMap {
|
||||||
|
public:
|
||||||
|
HeapEntriesMap();
|
||||||
|
~HeapEntriesMap();
|
||||||
|
|
||||||
|
// Aliasing is used for skipping intermediate proxy objects, like
|
||||||
|
// JSGlobalPropertyCell.
|
||||||
|
void Alias(HeapObject* from, HeapObject* to);
|
||||||
|
HeapEntry* Map(HeapObject* object);
|
||||||
|
void Pair(HeapObject* object, HeapEntry* entry);
|
||||||
|
void CountReference(HeapObject* from, HeapObject* to,
|
||||||
|
int* prev_children_count = NULL,
|
||||||
|
int* prev_retainers_count = NULL);
|
||||||
|
template<class Visitor>
|
||||||
|
void UpdateEntries(Visitor* visitor);
|
||||||
|
|
||||||
|
int entries_count() { return entries_count_; }
|
||||||
|
int total_children_count() { return total_children_count_; }
|
||||||
|
int total_retainers_count() { return total_retainers_count_; }
|
||||||
|
|
||||||
|
private:
|
||||||
|
struct EntryInfo {
|
||||||
|
explicit EntryInfo(HeapEntry* entry)
|
||||||
|
: entry(entry), children_count(0), retainers_count(0) { }
|
||||||
|
HeapEntry* entry;
|
||||||
|
int children_count;
|
||||||
|
int retainers_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
uint32_t Hash(HeapObject* object) {
|
||||||
|
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(object));
|
||||||
|
}
|
||||||
|
static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; }
|
||||||
|
|
||||||
|
bool IsAlias(void* ptr) {
|
||||||
|
return reinterpret_cast<intptr_t>(ptr) & kAliasTag;
|
||||||
|
}
|
||||||
|
void* MakeAlias(void* ptr) {
|
||||||
|
return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(ptr) | kAliasTag);
|
||||||
|
}
|
||||||
|
void* Unalias(void* ptr) {
|
||||||
|
return reinterpret_cast<void*>(
|
||||||
|
reinterpret_cast<intptr_t>(ptr) & (~kAliasTag));
|
||||||
|
}
|
||||||
|
|
||||||
|
HashMap entries_;
|
||||||
|
int entries_count_;
|
||||||
|
int total_children_count_;
|
||||||
|
int total_retainers_count_;
|
||||||
|
|
||||||
|
static const intptr_t kAliasTag = 1;
|
||||||
|
|
||||||
|
DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
class HeapSnapshotGenerator {
|
class HeapSnapshotGenerator {
|
||||||
public:
|
public:
|
||||||
|
class SnapshotFillerInterface {
|
||||||
|
public:
|
||||||
|
virtual ~SnapshotFillerInterface() { }
|
||||||
|
virtual HeapEntry* AddEntry(HeapObject* obj) = 0;
|
||||||
|
virtual void SetElementReference(HeapObject* parent_obj,
|
||||||
|
HeapEntry* parent_entry,
|
||||||
|
int index,
|
||||||
|
Object* child_obj,
|
||||||
|
HeapEntry* child_entry) = 0;
|
||||||
|
virtual void SetNamedReference(HeapGraphEdge::Type type,
|
||||||
|
HeapObject* parent_obj,
|
||||||
|
HeapEntry* parent_entry,
|
||||||
|
const char* reference_name,
|
||||||
|
Object* child_obj,
|
||||||
|
HeapEntry* child_entry) = 0;
|
||||||
|
virtual void SetRootReference(Object* child_obj,
|
||||||
|
HeapEntry* child_entry) = 0;
|
||||||
|
|
||||||
|
static HeapEntry *const kHeapEntryPlaceholder;
|
||||||
|
};
|
||||||
|
|
||||||
explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
|
explicit HeapSnapshotGenerator(HeapSnapshot* snapshot);
|
||||||
void GenerateSnapshot();
|
void GenerateSnapshot();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
HeapEntry* GetEntry(Object* obj);
|
||||||
|
int GetGlobalSecurityToken();
|
||||||
|
int GetObjectSecurityToken(HeapObject* obj);
|
||||||
void ExtractReferences(HeapObject* obj);
|
void ExtractReferences(HeapObject* obj);
|
||||||
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
|
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
|
||||||
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
|
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
|
||||||
void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
|
void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
|
||||||
|
void SetClosureReference(HeapObject* parent_obj,
|
||||||
|
HeapEntry* parent,
|
||||||
|
String* reference_name,
|
||||||
|
Object* child);
|
||||||
|
void SetElementReference(HeapObject* parent_obj,
|
||||||
|
HeapEntry* parent,
|
||||||
|
int index,
|
||||||
|
Object* child);
|
||||||
|
void SetInternalReference(HeapObject* parent_obj,
|
||||||
|
HeapEntry* parent,
|
||||||
|
const char* reference_name,
|
||||||
|
Object* child);
|
||||||
|
void SetPropertyReference(HeapObject* parent_obj,
|
||||||
|
HeapEntry* parent,
|
||||||
|
String* reference_name,
|
||||||
|
Object* child);
|
||||||
|
void SetRootReference(Object* child);
|
||||||
|
|
||||||
HeapSnapshot* snapshot_;
|
HeapSnapshot* snapshot_;
|
||||||
|
HeapSnapshotsCollection* collection_;
|
||||||
|
// Mapping from HeapObject* pointers to HeapEntry* pointers.
|
||||||
|
HeapEntriesMap entries_;
|
||||||
|
SnapshotFillerInterface* filler_;
|
||||||
|
|
||||||
|
friend class IndexedReferencesExtractor;
|
||||||
|
|
||||||
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
|
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
|
||||||
};
|
};
|
||||||
|
16
deps/v8/src/runtime.cc
vendored
16
deps/v8/src/runtime.cc
vendored
@ -305,13 +305,14 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
|
|||||||
}
|
}
|
||||||
Handle<Object> result;
|
Handle<Object> result;
|
||||||
uint32_t element_index = 0;
|
uint32_t element_index = 0;
|
||||||
if (key->ToArrayIndex(&element_index)) {
|
if (key->IsSymbol()) {
|
||||||
|
// If key is a symbol it is not an array element.
|
||||||
|
Handle<String> name(String::cast(*key));
|
||||||
|
ASSERT(!name->AsArrayIndex(&element_index));
|
||||||
|
result = SetProperty(boilerplate, name, value, NONE);
|
||||||
|
} else if (key->ToArrayIndex(&element_index)) {
|
||||||
// Array index (uint32).
|
// Array index (uint32).
|
||||||
result = SetElement(boilerplate, element_index, value);
|
result = SetElement(boilerplate, element_index, value);
|
||||||
} else if (key->IsSymbol()) {
|
|
||||||
// The key is not an array index.
|
|
||||||
Handle<String> name(String::cast(*key));
|
|
||||||
result = SetProperty(boilerplate, name, value, NONE);
|
|
||||||
} else {
|
} else {
|
||||||
// Non-uint32 number.
|
// Non-uint32 number.
|
||||||
ASSERT(key->IsNumber());
|
ASSERT(key->IsNumber());
|
||||||
@ -1626,7 +1627,8 @@ static Object* Runtime_SetCode(Arguments args) {
|
|||||||
}
|
}
|
||||||
// Set the code, scope info, formal parameter count,
|
// Set the code, scope info, formal parameter count,
|
||||||
// and the length of the target function.
|
// and the length of the target function.
|
||||||
target->set_code(fun->code());
|
target->shared()->set_code(shared->code());
|
||||||
|
target->set_code(shared->code());
|
||||||
target->shared()->set_scope_info(shared->scope_info());
|
target->shared()->set_scope_info(shared->scope_info());
|
||||||
target->shared()->set_length(shared->length());
|
target->shared()->set_length(shared->length());
|
||||||
target->shared()->set_formal_parameter_count(
|
target->shared()->set_formal_parameter_count(
|
||||||
@ -6869,7 +6871,7 @@ static Object* Runtime_LazyCompile(Arguments args) {
|
|||||||
|
|
||||||
Handle<JSFunction> function = args.at<JSFunction>(0);
|
Handle<JSFunction> function = args.at<JSFunction>(0);
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (FLAG_trace_lazy) {
|
if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
|
||||||
PrintF("[lazy: ");
|
PrintF("[lazy: ");
|
||||||
function->shared()->name()->Print();
|
function->shared()->name()->Print();
|
||||||
PrintF("]\n");
|
PrintF("]\n");
|
||||||
|
2
deps/v8/src/serialize.h
vendored
2
deps/v8/src/serialize.h
vendored
@ -248,7 +248,7 @@ class SerializerDeserializer: public ObjectVisitor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int partial_snapshot_cache_length_;
|
static int partial_snapshot_cache_length_;
|
||||||
static const int kPartialSnapshotCacheCapacity = 1300;
|
static const int kPartialSnapshotCacheCapacity = 1400;
|
||||||
static Object* partial_snapshot_cache_[];
|
static Object* partial_snapshot_cache_[];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
9
deps/v8/src/top.cc
vendored
9
deps/v8/src/top.cc
vendored
@ -107,16 +107,15 @@ void Top::IterateThread(ThreadVisitor* v, char* t) {
|
|||||||
void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
|
void Top::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
|
||||||
v->VisitPointer(&(thread->pending_exception_));
|
v->VisitPointer(&(thread->pending_exception_));
|
||||||
v->VisitPointer(&(thread->pending_message_obj_));
|
v->VisitPointer(&(thread->pending_message_obj_));
|
||||||
v->VisitPointer(
|
v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
|
||||||
BitCast<Object**, Script**>(&(thread->pending_message_script_)));
|
v->VisitPointer(BitCast<Object**>(&(thread->context_)));
|
||||||
v->VisitPointer(BitCast<Object**, Context**>(&(thread->context_)));
|
|
||||||
v->VisitPointer(&(thread->scheduled_exception_));
|
v->VisitPointer(&(thread->scheduled_exception_));
|
||||||
|
|
||||||
for (v8::TryCatch* block = thread->TryCatchHandler();
|
for (v8::TryCatch* block = thread->TryCatchHandler();
|
||||||
block != NULL;
|
block != NULL;
|
||||||
block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
|
block = TRY_CATCH_FROM_ADDRESS(block->next_)) {
|
||||||
v->VisitPointer(BitCast<Object**, void**>(&(block->exception_)));
|
v->VisitPointer(BitCast<Object**>(&(block->exception_)));
|
||||||
v->VisitPointer(BitCast<Object**, void**>(&(block->message_)));
|
v->VisitPointer(BitCast<Object**>(&(block->message_)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Iterate over pointers on native execution stack.
|
// Iterate over pointers on native execution stack.
|
||||||
|
6
deps/v8/src/utils.h
vendored
6
deps/v8/src/utils.h
vendored
@ -739,7 +739,11 @@ inline Dest BitCast(const Source& source) {
|
|||||||
return dest;
|
return dest;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <class Dest, class Source>
|
||||||
|
inline Dest BitCast(Source* const & source) {
|
||||||
|
return BitCast<Dest>(reinterpret_cast<uintptr_t>(source));
|
||||||
|
}
|
||||||
|
|
||||||
} } // namespace v8::internal
|
} } // namespace v8::internal
|
||||||
|
|
||||||
|
|
||||||
#endif // V8_UTILS_H_
|
#endif // V8_UTILS_H_
|
||||||
|
4
deps/v8/src/version.cc
vendored
4
deps/v8/src/version.cc
vendored
@ -34,8 +34,8 @@
|
|||||||
// cannot be changed without changing the SCons build script.
|
// cannot be changed without changing the SCons build script.
|
||||||
#define MAJOR_VERSION 2
|
#define MAJOR_VERSION 2
|
||||||
#define MINOR_VERSION 3
|
#define MINOR_VERSION 3
|
||||||
#define BUILD_NUMBER 6
|
#define BUILD_NUMBER 7
|
||||||
#define PATCH_LEVEL 1
|
#define PATCH_LEVEL 0
|
||||||
#define CANDIDATE_VERSION false
|
#define CANDIDATE_VERSION false
|
||||||
|
|
||||||
// Define SONAME to have the SCons build the put a specific SONAME into the
|
// Define SONAME to have the SCons build the put a specific SONAME into the
|
||||||
|
2
deps/v8/src/x64/assembler-x64.cc
vendored
2
deps/v8/src/x64/assembler-x64.cc
vendored
@ -253,7 +253,7 @@ Operand::Operand(const Operand& operand, int32_t offset) {
|
|||||||
int32_t disp_value = 0;
|
int32_t disp_value = 0;
|
||||||
if (mode == 0x80 || is_baseless) {
|
if (mode == 0x80 || is_baseless) {
|
||||||
// Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
|
// Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
|
||||||
disp_value = *reinterpret_cast<const int32_t*>(&operand.buf_[disp_offset]);
|
disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
|
||||||
} else if (mode == 0x40) {
|
} else if (mode == 0x40) {
|
||||||
// Mode 1: Byte displacement.
|
// Mode 1: Byte displacement.
|
||||||
disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
|
disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
|
||||||
|
2
deps/v8/src/x64/builtins-x64.cc
vendored
2
deps/v8/src/x64/builtins-x64.cc
vendored
@ -310,7 +310,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
|||||||
__ movsxlq(rbx,
|
__ movsxlq(rbx,
|
||||||
FieldOperand(rdx,
|
FieldOperand(rdx,
|
||||||
SharedFunctionInfo::kFormalParameterCountOffset));
|
SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
|
__ movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
|
||||||
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
|
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
|
||||||
__ cmpq(rax, rbx);
|
__ cmpq(rax, rbx);
|
||||||
__ j(not_equal,
|
__ j(not_equal,
|
||||||
|
9
deps/v8/src/x64/codegen-x64.cc
vendored
9
deps/v8/src/x64/codegen-x64.cc
vendored
@ -2630,9 +2630,8 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
|
|||||||
__ j(is_smi, &build_args);
|
__ j(is_smi, &build_args);
|
||||||
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
|
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx);
|
||||||
__ j(not_equal, &build_args);
|
__ j(not_equal, &build_args);
|
||||||
__ movq(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
|
|
||||||
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
|
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
|
||||||
__ Cmp(FieldOperand(rax, SharedFunctionInfo::kCodeOffset), apply_code);
|
__ Cmp(FieldOperand(rax, JSFunction::kCodeOffset), apply_code);
|
||||||
__ j(not_equal, &build_args);
|
__ j(not_equal, &build_args);
|
||||||
|
|
||||||
// Check that applicand is a function.
|
// Check that applicand is a function.
|
||||||
@ -8635,6 +8634,12 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
|||||||
__ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
|
__ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
|
||||||
__ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
|
__ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
|
||||||
|
|
||||||
|
// Initialize the code pointer in the function to be the one
|
||||||
|
// found in the shared function info object.
|
||||||
|
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
|
||||||
|
__ movq(FieldOperand(rax, JSFunction::kCodeOffset), rdx);
|
||||||
|
|
||||||
|
|
||||||
// Return and remove the on-stack parameter.
|
// Return and remove the on-stack parameter.
|
||||||
__ ret(1 * kPointerSize);
|
__ ret(1 * kPointerSize);
|
||||||
|
|
||||||
|
5
deps/v8/src/x64/macro-assembler-x64.cc
vendored
5
deps/v8/src/x64/macro-assembler-x64.cc
vendored
@ -582,8 +582,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
|
|||||||
// Make sure the code objects in the builtins object and in the
|
// Make sure the code objects in the builtins object and in the
|
||||||
// builtin function are the same.
|
// builtin function are the same.
|
||||||
push(target);
|
push(target);
|
||||||
movq(target, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
movq(target, FieldOperand(rdi, JSFunction::kCodeOffset));
|
||||||
movq(target, FieldOperand(target, SharedFunctionInfo::kCodeOffset));
|
|
||||||
cmpq(target, Operand(rsp, 0));
|
cmpq(target, Operand(rsp, 0));
|
||||||
Assert(equal, "Builtin code object changed");
|
Assert(equal, "Builtin code object changed");
|
||||||
pop(target);
|
pop(target);
|
||||||
@ -2290,7 +2289,7 @@ void MacroAssembler::InvokeFunction(Register function,
|
|||||||
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
|
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
|
||||||
movsxlq(rbx,
|
movsxlq(rbx,
|
||||||
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
|
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
|
movq(rdx, FieldOperand(rdi, JSFunction::kCodeOffset));
|
||||||
// Advances rdx to the end of the Code object header, to the start of
|
// Advances rdx to the end of the Code object header, to the start of
|
||||||
// the executable code.
|
// the executable code.
|
||||||
lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
|
lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
|
||||||
|
6
deps/v8/test/cctest/test-api.cc
vendored
6
deps/v8/test/cctest/test-api.cc
vendored
@ -27,6 +27,8 @@
|
|||||||
|
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
|
|
||||||
|
#define USE_NEW_QUERY_CALLBACKS
|
||||||
|
|
||||||
#include "v8.h"
|
#include "v8.h"
|
||||||
|
|
||||||
#include "api.h"
|
#include "api.h"
|
||||||
@ -1194,12 +1196,12 @@ v8::Handle<Value> CheckThisNamedPropertySetter(Local<String> property,
|
|||||||
return v8::Handle<Value>();
|
return v8::Handle<Value>();
|
||||||
}
|
}
|
||||||
|
|
||||||
v8::Handle<v8::Boolean> CheckThisIndexedPropertyQuery(
|
v8::Handle<v8::Integer> CheckThisIndexedPropertyQuery(
|
||||||
uint32_t index,
|
uint32_t index,
|
||||||
const AccessorInfo& info) {
|
const AccessorInfo& info) {
|
||||||
ApiTestFuzzer::Fuzz();
|
ApiTestFuzzer::Fuzz();
|
||||||
CHECK(info.This()->Equals(bottom));
|
CHECK(info.This()->Equals(bottom));
|
||||||
return v8::Handle<v8::Boolean>();
|
return v8::Handle<v8::Integer>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
15
deps/v8/test/cctest/test-cpu-profiler.cc
vendored
15
deps/v8/test/cctest/test-cpu-profiler.cc
vendored
@ -12,6 +12,7 @@ namespace i = v8::internal;
|
|||||||
|
|
||||||
using i::CodeEntry;
|
using i::CodeEntry;
|
||||||
using i::CpuProfile;
|
using i::CpuProfile;
|
||||||
|
using i::CpuProfiler;
|
||||||
using i::CpuProfilesCollection;
|
using i::CpuProfilesCollection;
|
||||||
using i::ProfileGenerator;
|
using i::ProfileGenerator;
|
||||||
using i::ProfileNode;
|
using i::ProfileNode;
|
||||||
@ -225,4 +226,18 @@ TEST(TickEvents) {
|
|||||||
CHECK_EQ("bbb", bottom_up_ddd_stub_children->last()->entry()->name());
|
CHECK_EQ("bbb", bottom_up_ddd_stub_children->last()->entry()->name());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// http://crbug/51594
|
||||||
|
// This test must not crash.
|
||||||
|
TEST(CrashIfStoppingLastNonExistentProfile) {
|
||||||
|
InitializeVM();
|
||||||
|
TestSetup test_setup;
|
||||||
|
CpuProfiler::Setup();
|
||||||
|
CpuProfiler::StartProfiling("1");
|
||||||
|
CpuProfiler::StopProfiling("2");
|
||||||
|
CpuProfiler::StartProfiling("1");
|
||||||
|
CpuProfiler::StopProfiling("");
|
||||||
|
CpuProfiler::TearDown();
|
||||||
|
}
|
||||||
|
|
||||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
#endif // ENABLE_LOGGING_AND_PROFILING
|
||||||
|
236
deps/v8/test/cctest/test-heap-profiler.cc
vendored
236
deps/v8/test/cctest/test-heap-profiler.cc
vendored
@ -396,20 +396,17 @@ class NamedEntriesDetector {
|
|||||||
has_A2(false), has_B2(false), has_C2(false) {
|
has_A2(false), has_B2(false), has_C2(false) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Apply(i::HeapEntry* entry) {
|
void Apply(i::HeapEntry** entry_ptr) {
|
||||||
const char* node_name = entry->name();
|
if (IsReachableNodeWithName(*entry_ptr, "A1")) has_A1 = true;
|
||||||
if (strcmp("A1", node_name) == 0
|
if (IsReachableNodeWithName(*entry_ptr, "B1")) has_B1 = true;
|
||||||
&& entry->GetRetainingPaths()->length() > 0) has_A1 = true;
|
if (IsReachableNodeWithName(*entry_ptr, "C1")) has_C1 = true;
|
||||||
if (strcmp("B1", node_name) == 0
|
if (IsReachableNodeWithName(*entry_ptr, "A2")) has_A2 = true;
|
||||||
&& entry->GetRetainingPaths()->length() > 0) has_B1 = true;
|
if (IsReachableNodeWithName(*entry_ptr, "B2")) has_B2 = true;
|
||||||
if (strcmp("C1", node_name) == 0
|
if (IsReachableNodeWithName(*entry_ptr, "C2")) has_C2 = true;
|
||||||
&& entry->GetRetainingPaths()->length() > 0) has_C1 = true;
|
}
|
||||||
if (strcmp("A2", node_name) == 0
|
|
||||||
&& entry->GetRetainingPaths()->length() > 0) has_A2 = true;
|
static bool IsReachableNodeWithName(i::HeapEntry* entry, const char* name) {
|
||||||
if (strcmp("B2", node_name) == 0
|
return strcmp(name, entry->name()) == 0 && entry->painted_reachable();
|
||||||
&& entry->GetRetainingPaths()->length() > 0) has_B2 = true;
|
|
||||||
if (strcmp("C2", node_name) == 0
|
|
||||||
&& entry->GetRetainingPaths()->length() > 0) has_C2 = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool has_A1;
|
bool has_A1;
|
||||||
@ -460,7 +457,7 @@ static bool HasString(const v8::HeapGraphNode* node, const char* contents) {
|
|||||||
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
|
for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
|
||||||
const v8::HeapGraphEdge* prop = node->GetChild(i);
|
const v8::HeapGraphEdge* prop = node->GetChild(i);
|
||||||
const v8::HeapGraphNode* node = prop->GetToNode();
|
const v8::HeapGraphNode* node = prop->GetToNode();
|
||||||
if (node->GetType() == v8::HeapGraphNode::STRING) {
|
if (node->GetType() == v8::HeapGraphNode::kString) {
|
||||||
v8::String::AsciiValue node_name(node->GetName());
|
v8::String::AsciiValue node_name(node->GetName());
|
||||||
if (strcmp(contents, *node_name) == 0) return true;
|
if (strcmp(contents, *node_name) == 0) return true;
|
||||||
}
|
}
|
||||||
@ -496,26 +493,34 @@ TEST(HeapSnapshot) {
|
|||||||
"var c2 = new C2(a2);");
|
"var c2 = new C2(a2);");
|
||||||
const v8::HeapSnapshot* snapshot_env2 =
|
const v8::HeapSnapshot* snapshot_env2 =
|
||||||
v8::HeapProfiler::TakeSnapshot(v8::String::New("env2"));
|
v8::HeapProfiler::TakeSnapshot(v8::String::New("env2"));
|
||||||
const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
|
|
||||||
|
|
||||||
// Verify, that JS global object of env2 doesn't have '..1'
|
|
||||||
// properties, but has '..2' properties.
|
|
||||||
CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a1"));
|
|
||||||
CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_1"));
|
|
||||||
CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b1_2"));
|
|
||||||
CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c1"));
|
|
||||||
const v8::HeapGraphNode* a2_node =
|
|
||||||
GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "a2");
|
|
||||||
CHECK_NE(NULL, a2_node);
|
|
||||||
CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_1"));
|
|
||||||
CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "b2_2"));
|
|
||||||
CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::PROPERTY, "c2"));
|
|
||||||
|
|
||||||
// Verify that anything related to '[ABC]1' is not reachable.
|
|
||||||
NamedEntriesDetector det;
|
|
||||||
i::HeapSnapshot* i_snapshot_env2 =
|
i::HeapSnapshot* i_snapshot_env2 =
|
||||||
const_cast<i::HeapSnapshot*>(
|
const_cast<i::HeapSnapshot*>(
|
||||||
reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
|
reinterpret_cast<const i::HeapSnapshot*>(snapshot_env2));
|
||||||
|
const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
|
||||||
|
// Paint all nodes reachable from global object.
|
||||||
|
i_snapshot_env2->ClearPaint();
|
||||||
|
const_cast<i::HeapEntry*>(
|
||||||
|
reinterpret_cast<const i::HeapEntry*>(global_env2))->PaintAllReachable();
|
||||||
|
|
||||||
|
// Verify, that JS global object of env2 doesn't have '..1'
|
||||||
|
// properties, but has '..2' properties.
|
||||||
|
CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "a1"));
|
||||||
|
CHECK_EQ(
|
||||||
|
NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b1_1"));
|
||||||
|
CHECK_EQ(
|
||||||
|
NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b1_2"));
|
||||||
|
CHECK_EQ(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "c1"));
|
||||||
|
const v8::HeapGraphNode* a2_node =
|
||||||
|
GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "a2");
|
||||||
|
CHECK_NE(NULL, a2_node);
|
||||||
|
CHECK_NE(
|
||||||
|
NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_1"));
|
||||||
|
CHECK_NE(
|
||||||
|
NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "b2_2"));
|
||||||
|
CHECK_NE(NULL, GetProperty(global_env2, v8::HeapGraphEdge::kProperty, "c2"));
|
||||||
|
|
||||||
|
// Verify that anything related to '[ABC]1' is not reachable.
|
||||||
|
NamedEntriesDetector det;
|
||||||
i_snapshot_env2->IterateEntries(&det);
|
i_snapshot_env2->IterateEntries(&det);
|
||||||
CHECK(!det.has_A1);
|
CHECK(!det.has_A1);
|
||||||
CHECK(!det.has_B1);
|
CHECK(!det.has_B1);
|
||||||
@ -539,7 +544,7 @@ TEST(HeapSnapshot) {
|
|||||||
const v8::HeapGraphEdge* last_edge = path->GetEdge(edges_count - 1);
|
const v8::HeapGraphEdge* last_edge = path->GetEdge(edges_count - 1);
|
||||||
v8::String::AsciiValue last_edge_name(last_edge->GetName());
|
v8::String::AsciiValue last_edge_name(last_edge->GetName());
|
||||||
if (strcmp("a2", *last_edge_name) == 0
|
if (strcmp("a2", *last_edge_name) == 0
|
||||||
&& last_edge->GetType() == v8::HeapGraphEdge::PROPERTY) {
|
&& last_edge->GetType() == v8::HeapGraphEdge::kProperty) {
|
||||||
has_global_obj_a2_ref = true;
|
has_global_obj_a2_ref = true;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -547,19 +552,19 @@ TEST(HeapSnapshot) {
|
|||||||
const v8::HeapGraphEdge* prev_edge = path->GetEdge(edges_count - 2);
|
const v8::HeapGraphEdge* prev_edge = path->GetEdge(edges_count - 2);
|
||||||
v8::String::AsciiValue prev_edge_name(prev_edge->GetName());
|
v8::String::AsciiValue prev_edge_name(prev_edge->GetName());
|
||||||
if (strcmp("x1", *last_edge_name) == 0
|
if (strcmp("x1", *last_edge_name) == 0
|
||||||
&& last_edge->GetType() == v8::HeapGraphEdge::PROPERTY
|
&& last_edge->GetType() == v8::HeapGraphEdge::kProperty
|
||||||
&& strcmp("c2", *prev_edge_name) == 0) has_c2_x1_ref = true;
|
&& strcmp("c2", *prev_edge_name) == 0) has_c2_x1_ref = true;
|
||||||
if (strcmp("x2", *last_edge_name) == 0
|
if (strcmp("x2", *last_edge_name) == 0
|
||||||
&& last_edge->GetType() == v8::HeapGraphEdge::PROPERTY
|
&& last_edge->GetType() == v8::HeapGraphEdge::kProperty
|
||||||
&& strcmp("c2", *prev_edge_name) == 0) has_c2_x2_ref = true;
|
&& strcmp("c2", *prev_edge_name) == 0) has_c2_x2_ref = true;
|
||||||
if (strcmp("1", *last_edge_name) == 0
|
if (strcmp("1", *last_edge_name) == 0
|
||||||
&& last_edge->GetType() == v8::HeapGraphEdge::ELEMENT
|
&& last_edge->GetType() == v8::HeapGraphEdge::kElement
|
||||||
&& strcmp("c2", *prev_edge_name) == 0) has_c2_1_ref = true;
|
&& strcmp("c2", *prev_edge_name) == 0) has_c2_1_ref = true;
|
||||||
if (strcmp("x", *last_edge_name) == 0
|
if (strcmp("x", *last_edge_name) == 0
|
||||||
&& last_edge->GetType() == v8::HeapGraphEdge::CONTEXT_VARIABLE
|
&& last_edge->GetType() == v8::HeapGraphEdge::kContextVariable
|
||||||
&& strcmp("b2_1", *prev_edge_name) == 0) has_b2_1_x_ref = true;
|
&& strcmp("b2_1", *prev_edge_name) == 0) has_b2_1_x_ref = true;
|
||||||
if (strcmp("x", *last_edge_name) == 0
|
if (strcmp("x", *last_edge_name) == 0
|
||||||
&& last_edge->GetType() == v8::HeapGraphEdge::CONTEXT_VARIABLE
|
&& last_edge->GetType() == v8::HeapGraphEdge::kContextVariable
|
||||||
&& strcmp("b2_2", *prev_edge_name) == 0) has_b2_2_x_ref = true;
|
&& strcmp("b2_2", *prev_edge_name) == 0) has_b2_2_x_ref = true;
|
||||||
}
|
}
|
||||||
CHECK(has_global_obj_a2_ref);
|
CHECK(has_global_obj_a2_ref);
|
||||||
@ -571,6 +576,73 @@ TEST(HeapSnapshot) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST(HeapSnapshotObjectSizes) {
|
||||||
|
v8::HandleScope scope;
|
||||||
|
LocalContext env;
|
||||||
|
|
||||||
|
// -a-> X1 --a
|
||||||
|
// x -b-> X2 <-|
|
||||||
|
CompileAndRunScript(
|
||||||
|
"function X(a, b) { this.a = a; this.b = b; }\n"
|
||||||
|
"x = new X(new X(), new X());\n"
|
||||||
|
"x.a.a = x.b;");
|
||||||
|
const v8::HeapSnapshot* snapshot =
|
||||||
|
v8::HeapProfiler::TakeSnapshot(v8::String::New("sizes"));
|
||||||
|
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
|
||||||
|
const v8::HeapGraphNode* x =
|
||||||
|
GetProperty(global, v8::HeapGraphEdge::kProperty, "x");
|
||||||
|
CHECK_NE(NULL, x);
|
||||||
|
const v8::HeapGraphNode* x_prototype =
|
||||||
|
GetProperty(x, v8::HeapGraphEdge::kProperty, "prototype");
|
||||||
|
CHECK_NE(NULL, x_prototype);
|
||||||
|
const v8::HeapGraphNode* x1 =
|
||||||
|
GetProperty(x, v8::HeapGraphEdge::kProperty, "a");
|
||||||
|
CHECK_NE(NULL, x1);
|
||||||
|
const v8::HeapGraphNode* x2 =
|
||||||
|
GetProperty(x, v8::HeapGraphEdge::kProperty, "b");
|
||||||
|
CHECK_NE(NULL, x2);
|
||||||
|
CHECK_EQ(
|
||||||
|
x->GetSelfSize() * 3,
|
||||||
|
x->GetReachableSize() - x_prototype->GetReachableSize());
|
||||||
|
CHECK_EQ(
|
||||||
|
x->GetSelfSize() * 3 + x_prototype->GetSelfSize(), x->GetRetainedSize());
|
||||||
|
CHECK_EQ(
|
||||||
|
x1->GetSelfSize() * 2,
|
||||||
|
x1->GetReachableSize() - x_prototype->GetReachableSize());
|
||||||
|
CHECK_EQ(
|
||||||
|
x1->GetSelfSize(), x1->GetRetainedSize());
|
||||||
|
CHECK_EQ(
|
||||||
|
x2->GetSelfSize(),
|
||||||
|
x2->GetReachableSize() - x_prototype->GetReachableSize());
|
||||||
|
CHECK_EQ(
|
||||||
|
x2->GetSelfSize(), x2->GetRetainedSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
TEST(HeapSnapshotEntryChildren) {
|
||||||
|
v8::HandleScope scope;
|
||||||
|
LocalContext env;
|
||||||
|
|
||||||
|
CompileAndRunScript(
|
||||||
|
"function A() { }\n"
|
||||||
|
"a = new A;");
|
||||||
|
const v8::HeapSnapshot* snapshot =
|
||||||
|
v8::HeapProfiler::TakeSnapshot(v8::String::New("children"));
|
||||||
|
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
|
||||||
|
for (int i = 0, count = global->GetChildrenCount(); i < count; ++i) {
|
||||||
|
const v8::HeapGraphEdge* prop = global->GetChild(i);
|
||||||
|
CHECK_EQ(global, prop->GetFromNode());
|
||||||
|
}
|
||||||
|
const v8::HeapGraphNode* a =
|
||||||
|
GetProperty(global, v8::HeapGraphEdge::kProperty, "a");
|
||||||
|
CHECK_NE(NULL, a);
|
||||||
|
for (int i = 0, count = a->GetChildrenCount(); i < count; ++i) {
|
||||||
|
const v8::HeapGraphEdge* prop = a->GetChild(i);
|
||||||
|
CHECK_EQ(a, prop->GetFromNode());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
TEST(HeapSnapshotCodeObjects) {
|
TEST(HeapSnapshotCodeObjects) {
|
||||||
v8::HandleScope scope;
|
v8::HandleScope scope;
|
||||||
LocalContext env;
|
LocalContext env;
|
||||||
@ -584,20 +656,20 @@ TEST(HeapSnapshotCodeObjects) {
|
|||||||
|
|
||||||
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
|
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
|
||||||
const v8::HeapGraphNode* compiled =
|
const v8::HeapGraphNode* compiled =
|
||||||
GetProperty(global, v8::HeapGraphEdge::PROPERTY, "compiled");
|
GetProperty(global, v8::HeapGraphEdge::kProperty, "compiled");
|
||||||
CHECK_NE(NULL, compiled);
|
CHECK_NE(NULL, compiled);
|
||||||
CHECK_EQ(v8::HeapGraphNode::CLOSURE, compiled->GetType());
|
CHECK_EQ(v8::HeapGraphNode::kClosure, compiled->GetType());
|
||||||
const v8::HeapGraphNode* lazy =
|
const v8::HeapGraphNode* lazy =
|
||||||
GetProperty(global, v8::HeapGraphEdge::PROPERTY, "lazy");
|
GetProperty(global, v8::HeapGraphEdge::kProperty, "lazy");
|
||||||
CHECK_NE(NULL, lazy);
|
CHECK_NE(NULL, lazy);
|
||||||
CHECK_EQ(v8::HeapGraphNode::CLOSURE, lazy->GetType());
|
CHECK_EQ(v8::HeapGraphNode::kClosure, lazy->GetType());
|
||||||
|
|
||||||
// Find references to code.
|
// Find references to code.
|
||||||
const v8::HeapGraphNode* compiled_code =
|
const v8::HeapGraphNode* compiled_code =
|
||||||
GetProperty(compiled, v8::HeapGraphEdge::INTERNAL, "code");
|
GetProperty(compiled, v8::HeapGraphEdge::kInternal, "code");
|
||||||
CHECK_NE(NULL, compiled_code);
|
CHECK_NE(NULL, compiled_code);
|
||||||
const v8::HeapGraphNode* lazy_code =
|
const v8::HeapGraphNode* lazy_code =
|
||||||
GetProperty(lazy, v8::HeapGraphEdge::INTERNAL, "code");
|
GetProperty(lazy, v8::HeapGraphEdge::kInternal, "code");
|
||||||
CHECK_NE(NULL, lazy_code);
|
CHECK_NE(NULL, lazy_code);
|
||||||
|
|
||||||
// Verify that non-compiled code doesn't contain references to "x"
|
// Verify that non-compiled code doesn't contain references to "x"
|
||||||
@ -607,7 +679,7 @@ TEST(HeapSnapshotCodeObjects) {
|
|||||||
for (int i = 0, count = compiled_code->GetChildrenCount(); i < count; ++i) {
|
for (int i = 0, count = compiled_code->GetChildrenCount(); i < count; ++i) {
|
||||||
const v8::HeapGraphEdge* prop = compiled_code->GetChild(i);
|
const v8::HeapGraphEdge* prop = compiled_code->GetChild(i);
|
||||||
const v8::HeapGraphNode* node = prop->GetToNode();
|
const v8::HeapGraphNode* node = prop->GetToNode();
|
||||||
if (node->GetType() == v8::HeapGraphNode::ARRAY) {
|
if (node->GetType() == v8::HeapGraphNode::kArray) {
|
||||||
if (HasString(node, "x")) {
|
if (HasString(node, "x")) {
|
||||||
compiled_references_x = true;
|
compiled_references_x = true;
|
||||||
break;
|
break;
|
||||||
@ -617,7 +689,7 @@ TEST(HeapSnapshotCodeObjects) {
|
|||||||
for (int i = 0, count = lazy_code->GetChildrenCount(); i < count; ++i) {
|
for (int i = 0, count = lazy_code->GetChildrenCount(); i < count; ++i) {
|
||||||
const v8::HeapGraphEdge* prop = lazy_code->GetChild(i);
|
const v8::HeapGraphEdge* prop = lazy_code->GetChild(i);
|
||||||
const v8::HeapGraphNode* node = prop->GetToNode();
|
const v8::HeapGraphNode* node = prop->GetToNode();
|
||||||
if (node->GetType() == v8::HeapGraphNode::ARRAY) {
|
if (node->GetType() == v8::HeapGraphNode::kArray) {
|
||||||
if (HasString(node, "x")) {
|
if (HasString(node, "x")) {
|
||||||
lazy_references_x = true;
|
lazy_references_x = true;
|
||||||
break;
|
break;
|
||||||
@ -634,11 +706,8 @@ TEST(HeapSnapshotCodeObjects) {
|
|||||||
// them to a signed type.
|
// them to a signed type.
|
||||||
#define CHECK_EQ_UINT64_T(a, b) \
|
#define CHECK_EQ_UINT64_T(a, b) \
|
||||||
CHECK_EQ(static_cast<int64_t>(a), static_cast<int64_t>(b))
|
CHECK_EQ(static_cast<int64_t>(a), static_cast<int64_t>(b))
|
||||||
#define CHECK_NE_UINT64_T(a, b) do \
|
#define CHECK_NE_UINT64_T(a, b) \
|
||||||
{ \
|
CHECK((a) != (b)) // NOLINT
|
||||||
bool ne = a != b; \
|
|
||||||
CHECK(ne); \
|
|
||||||
} while (false)
|
|
||||||
|
|
||||||
TEST(HeapEntryIdsAndGC) {
|
TEST(HeapEntryIdsAndGC) {
|
||||||
v8::HandleScope scope;
|
v8::HandleScope scope;
|
||||||
@ -662,27 +731,35 @@ TEST(HeapEntryIdsAndGC) {
|
|||||||
CHECK_NE_UINT64_T(0, global1->GetId());
|
CHECK_NE_UINT64_T(0, global1->GetId());
|
||||||
CHECK_EQ_UINT64_T(global1->GetId(), global2->GetId());
|
CHECK_EQ_UINT64_T(global1->GetId(), global2->GetId());
|
||||||
const v8::HeapGraphNode* A1 =
|
const v8::HeapGraphNode* A1 =
|
||||||
GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "A");
|
GetProperty(global1, v8::HeapGraphEdge::kProperty, "A");
|
||||||
|
CHECK_NE(NULL, A1);
|
||||||
const v8::HeapGraphNode* A2 =
|
const v8::HeapGraphNode* A2 =
|
||||||
GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "A");
|
GetProperty(global2, v8::HeapGraphEdge::kProperty, "A");
|
||||||
|
CHECK_NE(NULL, A2);
|
||||||
CHECK_NE_UINT64_T(0, A1->GetId());
|
CHECK_NE_UINT64_T(0, A1->GetId());
|
||||||
CHECK_EQ_UINT64_T(A1->GetId(), A2->GetId());
|
CHECK_EQ_UINT64_T(A1->GetId(), A2->GetId());
|
||||||
const v8::HeapGraphNode* B1 =
|
const v8::HeapGraphNode* B1 =
|
||||||
GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "B");
|
GetProperty(global1, v8::HeapGraphEdge::kProperty, "B");
|
||||||
|
CHECK_NE(NULL, B1);
|
||||||
const v8::HeapGraphNode* B2 =
|
const v8::HeapGraphNode* B2 =
|
||||||
GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "B");
|
GetProperty(global2, v8::HeapGraphEdge::kProperty, "B");
|
||||||
|
CHECK_NE(NULL, B2);
|
||||||
CHECK_NE_UINT64_T(0, B1->GetId());
|
CHECK_NE_UINT64_T(0, B1->GetId());
|
||||||
CHECK_EQ_UINT64_T(B1->GetId(), B2->GetId());
|
CHECK_EQ_UINT64_T(B1->GetId(), B2->GetId());
|
||||||
const v8::HeapGraphNode* a1 =
|
const v8::HeapGraphNode* a1 =
|
||||||
GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "a");
|
GetProperty(global1, v8::HeapGraphEdge::kProperty, "a");
|
||||||
|
CHECK_NE(NULL, a1);
|
||||||
const v8::HeapGraphNode* a2 =
|
const v8::HeapGraphNode* a2 =
|
||||||
GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "a");
|
GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
|
||||||
|
CHECK_NE(NULL, a2);
|
||||||
CHECK_NE_UINT64_T(0, a1->GetId());
|
CHECK_NE_UINT64_T(0, a1->GetId());
|
||||||
CHECK_EQ_UINT64_T(a1->GetId(), a2->GetId());
|
CHECK_EQ_UINT64_T(a1->GetId(), a2->GetId());
|
||||||
const v8::HeapGraphNode* b1 =
|
const v8::HeapGraphNode* b1 =
|
||||||
GetProperty(global1, v8::HeapGraphEdge::PROPERTY, "b");
|
GetProperty(global1, v8::HeapGraphEdge::kProperty, "b");
|
||||||
|
CHECK_NE(NULL, b1);
|
||||||
const v8::HeapGraphNode* b2 =
|
const v8::HeapGraphNode* b2 =
|
||||||
GetProperty(global2, v8::HeapGraphEdge::PROPERTY, "b");
|
GetProperty(global2, v8::HeapGraphEdge::kProperty, "b");
|
||||||
|
CHECK_NE(NULL, b2);
|
||||||
CHECK_NE_UINT64_T(0, b1->GetId());
|
CHECK_NE_UINT64_T(0, b1->GetId());
|
||||||
CHECK_EQ_UINT64_T(b1->GetId(), b2->GetId());
|
CHECK_EQ_UINT64_T(b1->GetId(), b2->GetId());
|
||||||
}
|
}
|
||||||
@ -717,15 +794,15 @@ TEST(HeapSnapshotsDiff) {
|
|||||||
for (int i = 0, count = additions_root->GetChildrenCount(); i < count; ++i) {
|
for (int i = 0, count = additions_root->GetChildrenCount(); i < count; ++i) {
|
||||||
const v8::HeapGraphEdge* prop = additions_root->GetChild(i);
|
const v8::HeapGraphEdge* prop = additions_root->GetChild(i);
|
||||||
const v8::HeapGraphNode* node = prop->GetToNode();
|
const v8::HeapGraphNode* node = prop->GetToNode();
|
||||||
if (node->GetType() == v8::HeapGraphNode::OBJECT) {
|
if (node->GetType() == v8::HeapGraphNode::kObject) {
|
||||||
v8::String::AsciiValue node_name(node->GetName());
|
v8::String::AsciiValue node_name(node->GetName());
|
||||||
if (strcmp(*node_name, "A") == 0) {
|
if (strcmp(*node_name, "A") == 0) {
|
||||||
CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "a"));
|
CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::kProperty, "a"));
|
||||||
CHECK(!found_A);
|
CHECK(!found_A);
|
||||||
found_A = true;
|
found_A = true;
|
||||||
s1_A_id = node->GetId();
|
s1_A_id = node->GetId();
|
||||||
} else if (strcmp(*node_name, "B") == 0) {
|
} else if (strcmp(*node_name, "B") == 0) {
|
||||||
CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "b2"));
|
CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::kProperty, "b2"));
|
||||||
CHECK(!found_B);
|
CHECK(!found_B);
|
||||||
found_B = true;
|
found_B = true;
|
||||||
}
|
}
|
||||||
@ -741,10 +818,10 @@ TEST(HeapSnapshotsDiff) {
|
|||||||
for (int i = 0, count = deletions_root->GetChildrenCount(); i < count; ++i) {
|
for (int i = 0, count = deletions_root->GetChildrenCount(); i < count; ++i) {
|
||||||
const v8::HeapGraphEdge* prop = deletions_root->GetChild(i);
|
const v8::HeapGraphEdge* prop = deletions_root->GetChild(i);
|
||||||
const v8::HeapGraphNode* node = prop->GetToNode();
|
const v8::HeapGraphNode* node = prop->GetToNode();
|
||||||
if (node->GetType() == v8::HeapGraphNode::OBJECT) {
|
if (node->GetType() == v8::HeapGraphNode::kObject) {
|
||||||
v8::String::AsciiValue node_name(node->GetName());
|
v8::String::AsciiValue node_name(node->GetName());
|
||||||
if (strcmp(*node_name, "A") == 0) {
|
if (strcmp(*node_name, "A") == 0) {
|
||||||
CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::PROPERTY, "a"));
|
CHECK(IsNodeRetainedAs(node, v8::HeapGraphEdge::kProperty, "a"));
|
||||||
CHECK(!found_A_del);
|
CHECK(!found_A_del);
|
||||||
found_A_del = true;
|
found_A_del = true;
|
||||||
s2_A_id = node->GetId();
|
s2_A_id = node->GetId();
|
||||||
@ -756,4 +833,35 @@ TEST(HeapSnapshotsDiff) {
|
|||||||
CHECK(s1_A_id != s2_A_id);
|
CHECK(s1_A_id != s2_A_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
namespace v8 {
|
||||||
|
namespace internal {
|
||||||
|
|
||||||
|
class HeapSnapshotTester {
|
||||||
|
public:
|
||||||
|
static int CalculateNetworkSize(JSObject* obj) {
|
||||||
|
return HeapSnapshot::CalculateNetworkSize(obj);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
} } // namespace v8::internal
|
||||||
|
|
||||||
|
// http://code.google.com/p/v8/issues/detail?id=822
|
||||||
|
// Trying to call CalculateNetworkSize on an object with elements set
|
||||||
|
// to non-FixedArray may cause an assertion error in debug builds.
|
||||||
|
TEST(Issue822) {
|
||||||
|
v8::HandleScope scope;
|
||||||
|
LocalContext context;
|
||||||
|
const int kElementCount = 260;
|
||||||
|
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
|
||||||
|
i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount,
|
||||||
|
pixel_data);
|
||||||
|
v8::Handle<v8::Object> obj = v8::Object::New();
|
||||||
|
// Set the elements to be the pixels.
|
||||||
|
obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount);
|
||||||
|
i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj);
|
||||||
|
// This call must not cause an assertion error in debug builds.
|
||||||
|
i::HeapSnapshotTester::CalculateNetworkSize(*jsobj);
|
||||||
|
}
|
||||||
|
|
||||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
#endif // ENABLE_LOGGING_AND_PROFILING
|
||||||
|
22
deps/v8/test/mjsunit/object-literal.js
vendored
22
deps/v8/test/mjsunit/object-literal.js
vendored
@ -188,25 +188,3 @@ function testKeywordProperty(keyword) {
|
|||||||
for (var i = 0; i < keywords.length; i++) {
|
for (var i = 0; i < keywords.length; i++) {
|
||||||
testKeywordProperty(keywords[i]);
|
testKeywordProperty(keywords[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test getter and setter properties with string/number literal names.
|
|
||||||
|
|
||||||
var obj = {get 42() { return 42; },
|
|
||||||
get 3.14() { return "PI"; },
|
|
||||||
get "PI"() { return 3.14; },
|
|
||||||
readback: 0,
|
|
||||||
set 37(v) { this.readback = v; },
|
|
||||||
set 1.44(v) { this.readback = v; },
|
|
||||||
set "Poo"(v) { this.readback = v; }}
|
|
||||||
|
|
||||||
assertEquals(42, obj[42]);
|
|
||||||
assertEquals("PI", obj[3.14]);
|
|
||||||
assertEquals(3.14, obj["PI"]);
|
|
||||||
obj[37] = "t1";
|
|
||||||
assertEquals("t1", obj.readback);
|
|
||||||
obj[1.44] = "t2";
|
|
||||||
assertEquals("t2", obj.readback);
|
|
||||||
obj["Poo"] = "t3";
|
|
||||||
assertEquals("t3", obj.readback);
|
|
||||||
|
|
||||||
|
|
||||||
|
31
deps/v8/test/mjsunit/regress/bitops-register-alias.js
vendored
Normal file
31
deps/v8/test/mjsunit/regress/bitops-register-alias.js
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Test that the code generator can cope with left and right being in
|
||||||
|
// the same register for bitops.
|
||||||
|
function f() { for (var i = 10; i < 100; i++) { return i | i; } }
|
||||||
|
assertEquals(10, f());
|
2
deps/v8/tools/gyp/v8.gyp
vendored
2
deps/v8/tools/gyp/v8.gyp
vendored
@ -108,8 +108,6 @@
|
|||||||
'conditions': [
|
'conditions': [
|
||||||
[ 'gcc_version==44', {
|
[ 'gcc_version==44', {
|
||||||
'cflags': [
|
'cflags': [
|
||||||
# Avoid gcc 4.4 strict aliasing issues in dtoa.c
|
|
||||||
'-fno-strict-aliasing',
|
|
||||||
# Avoid crashes with gcc 4.4 in the v8 test suite.
|
# Avoid crashes with gcc 4.4 in the v8 test suite.
|
||||||
'-fno-tree-vrp',
|
'-fno-tree-vrp',
|
||||||
],
|
],
|
||||||
|
Loading…
x
Reference in New Issue
Block a user