From 4ed66880106781b502ca02509c7c544d6c476369 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Sat, 19 Feb 2011 20:56:20 -0800 Subject: [PATCH 01/24] Add V8 options to man page --- doc/node.1 | 322 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 322 insertions(+) diff --git a/doc/node.1 b/doc/node.1 index 1a3a98f158f..758cd2ffd85 100644 --- a/doc/node.1 +++ b/doc/node.1 @@ -53,6 +53,328 @@ If set to 1 then modules will load in their own global contexts. .IP NODE_DISABLE_COLORS If set to 1 then colors will not be used in the REPL. +.SH V8 OPTIONS + + --crankshaft (use crankshaft) + type: bool default: false + --hydrogen_filter (hydrogen use/trace filter) + type: string default: + --use_hydrogen (use generated hydrogen for compilation) + type: bool default: true + --build_lithium (use lithium chunk builder) + type: bool default: true + --alloc_lithium (use lithium register allocator) + type: bool default: true + --use_lithium (use lithium code generator) + type: bool default: true + --use_range (use hydrogen range analysis) + type: bool default: true + --eliminate_dead_phis (eliminate dead phis) + type: bool default: true + --use_gvn (use hydrogen global value numbering) + type: bool default: true + --use_peeling (use loop peeling) + type: bool default: false + --use_canonicalizing (use hydrogen instruction canonicalizing) + type: bool default: true + --use_inlining (use function inlining) + type: bool default: true + --limit_inlining (limit code size growth from inlining) + type: bool default: true + --eliminate_empty_blocks (eliminate empty blocks) + type: bool default: true + --loop_invariant_code_motion (loop invariant code motion) + type: bool default: true + --time_hydrogen (timing for hydrogen) + type: bool default: false + --trace_hydrogen (trace generated hydrogen to file) + type: bool default: false + --trace_inlining (trace inlining decisions) + type: bool default: false + --trace_alloc (trace register allocator) + type: bool default: false + --trace_range (trace range analysis) + type: bool default: false + --trace_gvn (trace global value numbering) + type: bool default: false + --trace_representation (trace representation types) + type: bool default: false + --stress_pointer_maps (pointer map for every instruction) + type: bool default: false + --stress_environments (environment for every instruction) + type: bool default: false + --deopt_every_n_times (deoptimize every n times a deopt point is passed) + type: int default: 0 + --process_arguments_object (try to deal with arguments object) + type: bool default: true + --trap_on_deopt (put a break point before deoptimizing) + type: bool default: false + --deoptimize_uncommon_cases (deoptimize uncommon cases) + type: bool default: true + --polymorphic_inlining (polymorphic inlining) + type: bool default: true + --aggressive_loop_invariant_motion (aggressive motion of instructions out of loops) + type: bool default: true + --use_osr (use on-stack replacement) + type: bool default: true + --trace_osr (trace on-stack replacement) + type: bool default: false + --stress_runs (number of stress runs) + type: int default: 0 + --optimize_closures (optimize closures) + type: bool default: true + --debug_code (generate extra code (assertions) for debugging) + type: bool default: false + --code_comments (emit comments in code disassembly) + type: bool default: false + --emit_branch_hints (emit branch hints) + type: bool default: false + --peephole_optimization (perform peephole optimizations in assembly code) + type: bool default: true + --print_peephole_optimization (print peephole optimizations in assembly code) + type: bool default: false + --enable_sse2 (enable use of SSE2 instructions if available) + type: bool default: true + --enable_sse3 (enable use of SSE3 instructions if available) + type: bool default: true + --enable_sse4_1 (enable use of SSE4.1 instructions if available) + type: bool default: true + --enable_cmov (enable use of CMOV instruction if available) + type: bool default: true + --enable_rdtsc (enable use of RDTSC instruction if available) + type: bool default: true + --enable_sahf (enable use of SAHF instruction if available (X64 only)) + type: bool default: true + --enable_vfp3 (enable use of VFP3 instructions if available (ARM only)) + type: bool default: true + --enable_armv7 (enable use of ARMv7 instructions if available (ARM only)) + type: bool default: true + --expose_natives_as (expose natives in global object) + type: string default: NULL + --expose_debug_as (expose debug in global object) + type: string default: NULL + --expose_gc (expose gc extension) + type: bool default: false + --expose_externalize_string (expose externalize string extension) + type: bool default: false + --stack_trace_limit (number of stack frames to capture) + type: int default: 10 + --disable_native_files (disable builtin natives files) + type: bool default: false + --inline_new (use fast inline allocation) + type: bool default: true + --stack_trace_on_abort (print a stack trace if an assertion failure occurs) + type: bool default: true + --trace (trace function calls) + type: bool default: false + --defer_negation (defer negation operation) + type: bool default: true + --mask_constants_with_cookie (use random jit cookie to mask large constants) + type: bool default: true + --lazy (use lazy compilation) + type: bool default: true + --trace_opt (trace lazy optimization) + type: bool default: false + --trace_opt_stats (trace lazy optimization statistics) + type: bool default: false + --opt (use adaptive optimizations) + type: bool default: true + --opt_eagerly (be more eager when adaptively optimizing) + type: bool default: false + --always_opt (always try to optimize functions) + type: bool default: false + --prepare_always_opt (prepare for turning on always opt) + type: bool default: false + --debug_info (add debug information to compiled functions) + type: bool default: true + --deopt (support deoptimization) + type: bool default: true + --trace_deopt (trace deoptimization) + type: bool default: false + --strict (strict error checking) + type: bool default: false + --min_preparse_length (minimum length for automatic enable preparsing) + type: int default: 1024 + --full_compiler (enable dedicated backend for run-once code) + type: bool default: true + --always_full_compiler (try to use the dedicated run-once backend for all code) + type: bool default: false + --trace_bailout (print reasons for falling back to using the classic V8 backend) + type: bool default: false + --safe_int32_compiler (enable optimized side-effect-free int32 expressions.) + type: bool default: true + --use_flow_graph (perform flow-graph based optimizations) + type: bool default: false + --compilation_cache (enable compilation cache) + type: bool default: true + --loop_peeling (Peel off the first iteration of loops.) + type: bool default: false + --remote_debugging (enable remote debugging) + type: bool default: false + --trace_debug_json (trace debugging JSON request/response) + type: bool default: false + --debugger_auto_break (automatically set the debug break flag when debugger commands are in the queue) + type: bool default: true + --enable_liveedit (enable liveedit experimental feature) + type: bool default: true + --stack_size (default size of stack region v8 is allowed to use (in KkBytes)) + type: int default: 1024 + --max_stack_trace_source_length (maximum length of function source code printed in a stack trace.) + type: int default: 300 + --always_inline_smi_code (always inline smi code in non-opt code) + type: bool default: false + --max_new_space_size (max size of the new generation (in kBytes)) + type: int default: 0 + --max_old_space_size (max size of the old generation (in Mbytes)) + type: int default: 0 + --max_executable_size (max size of executable memory (in Mbytes)) + type: int default: 0 + --gc_global (always perform global GCs) + type: bool default: false + --gc_interval (garbage collect after allocations) + type: int default: -1 + --trace_gc (print one trace line following each garbage collection) + type: bool default: false + --trace_gc_nvp (print one detailed trace line in name=value format after each garbage collection) + type: bool default: false + --print_cumulative_gc_stat (print cumulative GC statistics in name=value format on exit) + type: bool default: false + --trace_gc_verbose (print more details following each garbage collection) + type: bool default: false + --collect_maps (garbage collect maps from which no objects can be reached) + type: bool default: true + --flush_code (flush code that we expect not to use again before full gc) + type: bool default: true + --use_idle_notification (Use idle notification to reduce memory footprint.) + type: bool default: true + --use_ic (use inline caching) + type: bool default: true + --native_code_counters (generate extra code for manipulating stats counters) + type: bool default: false + --always_compact (Perform compaction on every full GC) + type: bool default: false + --never_compact (Never perform compaction on full GC - testing only) + type: bool default: false + --cleanup_ics_at_gc (Flush inline caches prior to mark compact collection.) + type: bool default: true + --cleanup_caches_in_maps_at_gc (Flush code caches in maps during mark compact cycle.) + type: bool default: true + --random_seed (Default seed for initializing random generator (0, the default, means to use system random).) + type: int default: 0 + --canonicalize_object_literal_maps (Canonicalize maps for object literals.) + type: bool default: true + --use_big_map_space (Use big map space, but don't compact if it grew too big.) + type: bool default: true + --max_map_space_pages (Maximum number of pages in map space which still allows to encode forwarding pointers. That's actually a constant, but it's useful to control it with a flag for better testing.) + type: int default: 65535 + --h (print this message) + type: bool default: false + --new_snapshot (use new snapshot implementation) + type: bool default: true + --use_verbose_printer (allows verbose printing) + type: bool default: true + --allow_natives_syntax (allow natives syntax) + type: bool default: false + --strict_mode (allow strict mode directives) + type: bool default: true + --optimize_ast (optimize the ast) + type: bool default: true + --trace_sim (Trace simulator execution) + type: bool default: false + --check_icache (Check icache flushes in ARM simulator) + type: bool default: false + --stop_sim_at (Simulator stop after x number of instructions) + type: int default: 0 + --sim_stack_alignment (Stack alingment in bytes in simulator (4 or 8, 8 is default)) + type: int default: 8 + --trace_exception (print stack trace when throwing exceptions) + type: bool default: false + --preallocate_message_memory (preallocate some memory to build stack traces.) + type: bool default: false + --preemption (activate a 100ms timer that switches between V8 threads) + type: bool default: false + --trace_regexps (trace regexp execution) + type: bool default: false + --regexp_optimization (generate optimized regexp code) + type: bool default: true + --regexp_entry_native (use native code to enter regexp) + type: bool default: true + --testing_bool_flag (testing_bool_flag) + type: bool default: true + --testing_int_flag (testing_int_flag) + type: int default: 13 + --testing_float_flag (float-flag) + type: float default: 2.500000 + --testing_string_flag (string-flag) + type: string default: Hello, world! + --testing_prng_seed (Seed used for threading test randomness) + type: int default: 42 + --testing_serialization_file (file in which to serialize heap) + type: string default: /tmp/serdes + --help (Print usage message, including flags, on console) + type: bool default: true + --dump_counters (Dump counters on exit) + type: bool default: false + --debugger (Enable JavaScript debugger) + type: bool default: false + --remote_debugger (Connect JavaScript debugger to the debugger agent in another process) + type: bool default: false + --debugger_agent (Enable debugger agent) + type: bool default: false + --debugger_port (Port to use for remote debugging) + type: int default: 5858 + --map_counters (Map counters to a file) + type: string default: NULL + --js_arguments (Pass all remaining arguments to the script. Alias for "--".) + type: arguments default: + --debug_compile_events (Enable debugger compile events) + type: bool default: true + --debug_script_collected_events (Enable debugger script collected events) + type: bool default: true + --gdbjit (enable GDBJIT interface (disables compacting GC)) + type: bool default: false + --gdbjit_full (enable GDBJIT interface for all code objects) + type: bool default: false + --gdbjit_dump (dump elf objects with debug info to disk) + type: bool default: false + --log (Minimal logging (no API, code, GC, suspect, or handles samples).) + type: bool default: false + --log_all (Log all events to the log file.) + type: bool default: false + --log_runtime (Activate runtime system %Log call.) + type: bool default: false + --log_api (Log API events to the log file.) + type: bool default: false + --log_code (Log code events to the log file without profiling.) + type: bool default: false + --log_gc (Log heap samples on garbage collection for the hp2ps tool.) + type: bool default: false + --log_handles (Log global handle events.) + type: bool default: false + --log_snapshot_positions (log positions of (de)serialized objects in the snapshot.) + type: bool default: false + --log_suspect (Log suspect operations.) + type: bool default: false + --log_producers (Log stack traces of JS objects allocations.) + type: bool default: false + --prof (Log statistical profiling information (implies --log-code).) + type: bool default: false + --prof_auto (Used with --prof, starts profiling automatically) + type: bool default: true + --prof_lazy (Used with --prof, only does sampling and logging when profiler is active (implies --noprof_auto).) + type: bool default: false + --prof_browser_mode (Used with --prof, turns on browser-compatible mode for profiling.) + type: bool default: true + --log_regexp (Log regular expression execution.) + type: bool default: false + --sliding_state_window (Update sliding state window counters.) + type: bool default: false + --logfile (Specify the name of the log file.) + type: string default: v8.log + --ll_prof (Enable low-level linux profiler.) + type: bool default: false + + .SH RESOURCES AND DOCUMENTATION See the website for documentation http://nodejs.org/ From df215a67d4ee53be749693909aae266d41723aa7 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Sun, 20 Feb 2011 13:11:53 -0800 Subject: [PATCH 02/24] Document path.existsSync() Closes GH-677. --- doc/api/path.markdown | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/api/path.markdown b/doc/api/path.markdown index 36544f6c17b..adef4b39ed6 100644 --- a/doc/api/path.markdown +++ b/doc/api/path.markdown @@ -112,3 +112,8 @@ with either true or false. Example: path.exists('/etc/passwd', function (exists) { util.debug(exists ? "it's there" : "no passwd!"); }); + + +### path.existsSync(p) + +Synchronous version of `path.exists`. From 47f5fd01e1f91d875aecf1f7290ab9f2423c75b4 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Sun, 20 Feb 2011 13:53:40 -0800 Subject: [PATCH 03/24] Add example to TTY documentation --- doc/api/tty.markdown | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/doc/api/tty.markdown b/doc/api/tty.markdown index a1bd1433637..83cad064b2f 100644 --- a/doc/api/tty.markdown +++ b/doc/api/tty.markdown @@ -2,6 +2,19 @@ Use `require('tty')` to access this module. +Example: + + var tty = require('tty'); + tty.setRawMode(true); + process.stdin.resume(); + process.stdin.on('keypress', function(char, key) { + if (key && key.ctrl && key.name == 'c') { + console.log('graceful exit'); + process.exit() + } + }); + + ### tty.open(path, args=[]) From ab144f4843c2c7630564f26cfbdd69d01d8fbd00 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Wed, 23 Feb 2011 04:43:13 -0800 Subject: [PATCH 04/24] Add TODO item --- lib/tls.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tls.js b/lib/tls.js index e27d1aa0cea..db6807e06ce 100644 --- a/lib/tls.js +++ b/lib/tls.js @@ -770,6 +770,8 @@ Server.prototype.setOptions = function(options) { // s.end("hello world\n"); // }); // +// +// TODO: make port, host part of options! exports.connect = function(port /* host, options, cb */) { // parse args var host, options = {}, cb; From 0248c87ec7c76617af3a3de11791c6f6cbb90aac Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Wed, 23 Feb 2011 12:56:50 -0800 Subject: [PATCH 05/24] improve hello-world example on website --- doc/index.html | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/index.html b/doc/index.html index 7ae60221873..9b88f44d252 100644 --- a/doc/index.html +++ b/doc/index.html @@ -65,12 +65,13 @@ Server running at http://127.0.0.1:8124/
 var net = require('net');
-net.createServer(function (socket) {
+
+var server = net.createServer(function (socket) {
   socket.write("Echo server\r\n");
-  socket.on("data", function (data) {
-    socket.write(data);
-  });
-}).listen(8124, "127.0.0.1");
+  socket.pipe(socket);
+})
+
+server.listen(8124, "127.0.0.1");
 

From a91b1409636db20e931e14e1be03261f57ab3f7c Mon Sep 17 00:00:00 2001 From: cloudhead Date: Mon, 21 Feb 2011 19:31:01 -0500 Subject: [PATCH 06/24] fix process.on edge case with signal event When adding a listener for a signal event, removing it, and adding it back again, it triggers a condition with an undefined variable. --- src/node.js | 2 +- test/simple/test-signal-handler.js | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/node.js b/src/node.js index 789cb9b0e1d..6b0aa6f53ea 100644 --- a/src/node.js +++ b/src/node.js @@ -232,7 +232,7 @@ w.start(); } else if (this.listeners(type).length === 1) { - signalWatchers[event].start(); + signalWatchers[type].start(); } } diff --git a/test/simple/test-signal-handler.js b/test/simple/test-signal-handler.js index 906573a3aeb..9866fd66939 100644 --- a/test/simple/test-signal-handler.js +++ b/test/simple/test-signal-handler.js @@ -6,6 +6,8 @@ console.log('process.pid: ' + process.pid); var first = 0, second = 0; +var sighup = false; + process.addListener('SIGUSR1', function() { console.log('Interrupted by SIGUSR1'); first += 1; @@ -28,8 +30,15 @@ setInterval(function() { } }, 1); +// Test addListener condition where a watcher for SIGNAL +// has been previously registered, and `process.listeners(SIGNAL).length === 1` +process.addListener('SIGHUP', function () {}); +process.removeAllListeners('SIGHUP'); +process.addListener('SIGHUP', function () { sighup = true }); +process.kill(process.pid, 'SIGHUP'); process.addListener('exit', function() { assert.equal(1, first); assert.equal(1, second); + assert.equal(true, sighup); }); From 340291c085f629050bd66668dba7cb006f896d89 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Wed, 23 Feb 2011 16:19:41 -0800 Subject: [PATCH 07/24] Add extra debug print statement to tls.js --- lib/tls.js | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tls.js b/lib/tls.js index db6807e06ce..d5542583baa 100644 --- a/lib/tls.js +++ b/lib/tls.js @@ -773,6 +773,7 @@ Server.prototype.setOptions = function(options) { // // TODO: make port, host part of options! exports.connect = function(port /* host, options, cb */) { + debug('tls.connect called with ' + JSON.stringify(arguments)); // parse args var host, options = {}, cb; for (var i = 1; i < arguments.length; i++) { From 45adc907c9b3eff0bc560d0c6a0f904ff9ee0c39 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Wed, 23 Feb 2011 16:19:13 -0800 Subject: [PATCH 08/24] ry/node -> joyent/node --- doc/api/addons.markdown | 2 +- doc/api/appendix_1.markdown | 4 ++-- doc/index.html | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/api/addons.markdown b/doc/api/addons.markdown index 315015f571e..847d4144b5a 100644 --- a/doc/api/addons.markdown +++ b/doc/api/addons.markdown @@ -77,4 +77,4 @@ All Node addons must export a function called `init` with this signature: extern 'C' void init (Handle target) For the moment, that is all the documentation on addons. Please see - for a real example. + for a real example. diff --git a/doc/api/appendix_1.markdown b/doc/api/appendix_1.markdown index 395a9414f47..ea2700dea96 100644 --- a/doc/api/appendix_1.markdown +++ b/doc/api/appendix_1.markdown @@ -2,7 +2,7 @@ There are many third party modules for Node. At the time of writing, August 2010, the master repository of modules is -[the wiki page](http://github.com/ry/node/wiki/modules). +[the wiki page](http://github.com/joyent/node/wiki/modules). This appendix is intended as a SMALL guide to new-comers to help them quickly find what are considered to be quality modules. It is not intended @@ -21,7 +21,7 @@ elsewhere. - [mDNS/Zeroconf/Bonjour](http://github.com/agnat/node_mdns) -- [RabbitMQ, AMQP](http://github.com/ry/node-amqp) +- [RabbitMQ, AMQP](http://github.com/joyent/node-amqp) - [mysql](http://github.com/felixge/node-mysql) diff --git a/doc/index.html b/doc/index.html index 9b88f44d252..34dc236eba3 100644 --- a/doc/index.html +++ b/doc/index.html @@ -18,11 +18,11 @@
@@ -81,13 +81,13 @@ server.listen(8124, "127.0.0.1");

- Go to the Wiki for lots more information. + Go to the Wiki for lots more information.

Download

- git repo + git repo

@@ -100,7 +100,7 @@ server.listen(8124, "127.0.0.1");

For build instructions see - https://github.com/ry/node/wiki/Installation + https://github.com/joyent/node/wiki/Installation

From 5287703cce59e2ee09c73a851bfdcd018a67926c Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Thu, 24 Feb 2011 11:24:19 -0800 Subject: [PATCH 09/24] Pragma HTTP header comma separation Closes GH-715, GH-716. --- lib/http.js | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/http.js b/lib/http.js index af6a40932b0..8313c8cc648 100644 --- a/lib/http.js +++ b/lib/http.js @@ -261,6 +261,7 @@ IncomingMessage.prototype._addHeaderLine = function(field, value) { case 'accept-language': case 'connection': case 'cookie': + case 'pragma': if (field in dest) { dest[field] += ', ' + value; } else { From f423ec90fc6cfa47baf48485240dd338d61e9307 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Geisendo=CC=88rfer?= Date: Sun, 20 Feb 2011 13:31:37 +0100 Subject: [PATCH 10/24] In addition to 'aborted' emit 'close' from incoming requests Closes GH-160. --- lib/http.js | 1 + test/simple/test-http-response-close.js | 34 +++++++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 test/simple/test-http-response-close.js diff --git a/lib/http.js b/lib/http.js index 8313c8cc648..5e86a33f0da 100644 --- a/lib/http.js +++ b/lib/http.js @@ -952,6 +952,7 @@ function connectionListener(socket) { while (incoming.length) { var req = incoming.shift(); req.emit('aborted'); + req.emit('close'); } // abort socket._httpMessage ? } diff --git a/test/simple/test-http-response-close.js b/test/simple/test-http-response-close.js new file mode 100644 index 00000000000..7a1b76e2993 --- /dev/null +++ b/test/simple/test-http-response-close.js @@ -0,0 +1,34 @@ +var common = require('../common'); +var assert = require('assert'); +var http = require('http'); + +var gotEnd = false; + +var server = http.createServer(function(req, res) { + res.writeHead(200); + res.write('a'); + + req.on('close', function() { + console.error("aborted"); + gotEnd = true; + }); +}); +server.listen(common.PORT); + +server.addListener('listening', function() { + console.error("make req"); + http.get({ + port: common.PORT + }, function(res) { + console.error("got res"); + res.on('data', function(data) { + console.error("destroy res"); + res.destroy(); + server.close(); + }); + }); +}); + +process.on('exit', function() { + assert.ok(gotEnd); +}); From 06ac129903d9b71782743dc77ec79689f4eab1a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Geisendo=CC=88rfer?= Date: Sun, 20 Feb 2011 13:41:14 +0100 Subject: [PATCH 11/24] Document existence of `fd` property for ReadStream Closes GH-194. Closes GH-701. --- doc/api/fs.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/api/fs.markdown b/doc/api/fs.markdown index 39ffcbdf279..18f12d9584e 100644 --- a/doc/api/fs.markdown +++ b/doc/api/fs.markdown @@ -355,6 +355,7 @@ Returns a new ReadStream object (See `Readable Stream`). { flags: 'r', encoding: null, + fd: null, mode: 0666, bufferSize: 4096 } From 2680522d3a71fb5f12aa052555f3d04806338821 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Geisendo=CC=88rfer?= Date: Sun, 20 Feb 2011 13:45:25 +0100 Subject: [PATCH 12/24] Fix ReadStream bufferSize docs The default is 64kb buffer, not 4kb. Closes GH-702. --- doc/api/fs.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/api/fs.markdown b/doc/api/fs.markdown index 18f12d9584e..437ba732f84 100644 --- a/doc/api/fs.markdown +++ b/doc/api/fs.markdown @@ -357,7 +357,8 @@ Returns a new ReadStream object (See `Readable Stream`). encoding: null, fd: null, mode: 0666, - bufferSize: 4096 } + bufferSize: 64 * 1024 + } `options` can include `start` and `end` values to read a range of bytes from the file instead of the entire file. Both `start` and `end` are inclusive and From e33e7d1a3712add02cd08c0069bb56ba76e49aa5 Mon Sep 17 00:00:00 2001 From: Ryan Dahl Date: Thu, 24 Feb 2011 13:14:59 -0800 Subject: [PATCH 13/24] Upgrade V8 to 3.1.6 --- deps/v8/ChangeLog | 13 + deps/v8/SConstruct | 122 +- deps/v8/src/SConscript | 4 +- deps/v8/src/accessors.cc | 16 +- deps/v8/src/arm/assembler-arm.cc | 110 +- deps/v8/src/arm/assembler-arm.h | 28 +- deps/v8/src/arm/code-stubs-arm.cc | 145 ++- deps/v8/src/arm/code-stubs-arm.h | 26 +- deps/v8/src/arm/codegen-arm.cc | 4 +- deps/v8/src/arm/cpu-arm.cc | 5 + deps/v8/src/arm/deoptimizer-arm.cc | 14 +- deps/v8/src/arm/full-codegen-arm.cc | 192 +-- deps/v8/src/arm/lithium-arm.cc | 57 +- deps/v8/src/arm/lithium-arm.h | 105 +- deps/v8/src/arm/lithium-codegen-arm.cc | 473 +++----- deps/v8/src/arm/lithium-codegen-arm.h | 77 +- deps/v8/src/arm/lithium-gap-resolver-arm.cc | 303 +++++ deps/v8/src/arm/lithium-gap-resolver-arm.h | 84 ++ deps/v8/src/arm/macro-assembler-arm.cc | 42 +- deps/v8/src/arm/macro-assembler-arm.h | 24 +- deps/v8/src/arm/stub-cache-arm.cc | 15 +- deps/v8/src/assembler.cc | 8 +- deps/v8/src/assembler.h | 8 + deps/v8/src/compiler.cc | 72 +- deps/v8/src/compiler.h | 5 +- deps/v8/src/cpu-profiler-inl.h | 7 +- deps/v8/src/cpu-profiler.cc | 140 +-- deps/v8/src/cpu-profiler.h | 48 +- deps/v8/src/execution.cc | 5 + deps/v8/src/flag-definitions.h | 5 + deps/v8/src/gdb-jit.cc | 18 +- deps/v8/src/handles.cc | 34 +- deps/v8/src/heap.cc | 204 ++-- deps/v8/src/heap.h | 67 +- deps/v8/src/hydrogen-instructions.cc | 212 ++-- deps/v8/src/hydrogen-instructions.h | 1046 +++++++++-------- deps/v8/src/hydrogen.cc | 974 ++++++++------- deps/v8/src/hydrogen.h | 195 +-- deps/v8/src/ia32/assembler-ia32.h | 11 +- deps/v8/src/ia32/code-stubs-ia32.cc | 4 +- deps/v8/src/ia32/codegen-ia32.cc | 15 +- deps/v8/src/ia32/deoptimizer-ia32.cc | 14 +- deps/v8/src/ia32/full-codegen-ia32.cc | 31 +- deps/v8/src/ia32/lithium-codegen-ia32.cc | 138 ++- deps/v8/src/ia32/lithium-codegen-ia32.h | 14 + deps/v8/src/ia32/lithium-ia32.cc | 51 +- deps/v8/src/ia32/lithium-ia32.h | 65 +- deps/v8/src/ia32/macro-assembler-ia32.cc | 22 + deps/v8/src/ia32/macro-assembler-ia32.h | 15 +- deps/v8/src/ia32/stub-cache-ia32.cc | 19 +- deps/v8/src/ic.cc | 93 +- deps/v8/src/ic.h | 2 +- deps/v8/src/lithium-allocator.cc | 20 +- deps/v8/src/lithium-allocator.h | 2 - deps/v8/src/lithium.h | 8 +- deps/v8/src/log-utils.cc | 2 + deps/v8/src/log.cc | 151 +-- deps/v8/src/log.h | 26 +- deps/v8/src/mark-compact.cc | 12 +- deps/v8/src/objects.cc | 81 +- deps/v8/src/objects.h | 5 + deps/v8/src/platform-cygwin.cc | 42 +- deps/v8/src/platform-freebsd.cc | 2 +- deps/v8/src/platform-linux.cc | 2 +- deps/v8/src/platform-macos.cc | 2 +- deps/v8/src/platform-openbsd.cc | 2 +- deps/v8/src/platform-solaris.cc | 2 +- deps/v8/src/platform-win32.cc | 2 +- deps/v8/src/platform.h | 14 +- deps/v8/src/profile-generator-inl.h | 11 +- deps/v8/src/profile-generator.cc | 78 +- deps/v8/src/profile-generator.h | 12 +- deps/v8/src/runtime.cc | 82 +- deps/v8/src/top.cc | 12 +- deps/v8/src/v8.cc | 5 + deps/v8/src/v8natives.js | 4 +- deps/v8/src/version.cc | 2 +- deps/v8/src/x64/assembler-x64.cc | 14 +- deps/v8/src/x64/assembler-x64.h | 10 +- deps/v8/src/x64/code-stubs-x64.cc | 152 ++- deps/v8/src/x64/code-stubs-x64.h | 25 +- deps/v8/src/x64/codegen-x64.cc | 15 +- deps/v8/src/x64/deoptimizer-x64.cc | 14 +- deps/v8/src/x64/full-codegen-x64.cc | 30 +- deps/v8/src/x64/lithium-codegen-x64.cc | 829 +++++++++++-- deps/v8/src/x64/lithium-codegen-x64.h | 12 +- deps/v8/src/x64/lithium-x64.cc | 234 +++- deps/v8/src/x64/lithium-x64.h | 178 ++- deps/v8/src/x64/macro-assembler-x64.cc | 67 +- deps/v8/src/x64/macro-assembler-x64.h | 53 +- deps/v8/src/x64/stub-cache-x64.cc | 15 +- deps/v8/test/cctest/cctest.status | 21 - deps/v8/test/cctest/test-api.cc | 90 +- deps/v8/test/cctest/test-cpu-profiler.cc | 10 +- deps/v8/test/cctest/test-decls.cc | 8 +- deps/v8/test/cctest/test-log-stack-tracer.cc | 32 +- deps/v8/test/cctest/test-log.cc | 23 +- deps/v8/test/cctest/test-parsing.cc | 11 +- deps/v8/test/cctest/test-profile-generator.cc | 6 +- deps/v8/test/cctest/test-serialize.cc | 5 +- .../regress-valueof.js} | 15 +- deps/v8/test/mjsunit/mjsunit.js | 7 + deps/v8/test/mjsunit/regress/regress-1151.js | 17 + deps/v8/test/mjsunit/regress/regress-1166.js | 35 + deps/v8/test/mjsunit/regress/regress-1167.js | 72 ++ deps/v8/test/mjsunit/regress/regress-1170.js | 66 ++ deps/v8/test/mjsunit/regress/regress-1172.js | 39 + deps/v8/test/mjsunit/regress/regress-1174.js | 43 + deps/v8/test/mjsunit/regress/regress-1176.js | 33 + deps/v8/test/mjsunit/regress/regress-1184.js | 47 + deps/v8/test/mjsunit/strict-mode.js | 224 +++- deps/v8/test/sputnik/README | 4 +- deps/v8/test/sputnik/sputnik.status | 49 +- deps/v8/test/sputnik/testcfg.py | 3 +- deps/v8/tools/disasm.py | 92 ++ deps/v8/tools/grokdump.py | 840 +++++++++++++ deps/v8/tools/gyp/v8.gyp | 2 + deps/v8/tools/linux-tick-processor.py | 78 -- deps/v8/tools/ll_prof.py | 82 +- deps/v8/tools/profile.js | 139 ++- deps/v8/tools/splaytree.py | 226 ---- deps/v8/tools/tickprocessor.js | 75 +- deps/v8/tools/tickprocessor.py | 571 --------- deps/v8/tools/v8.xcodeproj/project.pbxproj | 6 + deps/v8/tools/windows-tick-processor.py | 137 --- 125 files changed, 6670 insertions(+), 4068 deletions(-) create mode 100644 deps/v8/src/arm/lithium-gap-resolver-arm.cc create mode 100644 deps/v8/src/arm/lithium-gap-resolver-arm.h rename deps/v8/test/mjsunit/{regress/regress-1105.js => compiler/regress-valueof.js} (84%) create mode 100644 deps/v8/test/mjsunit/regress/regress-1166.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1167.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1170.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1172.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1174.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1176.js create mode 100644 deps/v8/test/mjsunit/regress/regress-1184.js create mode 100644 deps/v8/tools/disasm.py create mode 100755 deps/v8/tools/grokdump.py delete mode 100755 deps/v8/tools/linux-tick-processor.py delete mode 100644 deps/v8/tools/splaytree.py delete mode 100644 deps/v8/tools/tickprocessor.py delete mode 100755 deps/v8/tools/windows-tick-processor.py diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog index f69be973f04..1d91fcded11 100644 --- a/deps/v8/ChangeLog +++ b/deps/v8/ChangeLog @@ -1,3 +1,16 @@ +2011-02-24: Version 3.1.6 + + Fixed a number of crash bugs. + + Added support for Cygwin (issue 64). + + Improved Crankshaft for x64 and ARM. + + Added Crankshaft support for stores to pixel arrays. + + Fixed issue in CPU profiler with Crankshaft. + + 2011-02-16: Version 3.1.5 Change RegExp parsing to disallow /(*)/. diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct index 436581a9baf..84707e98475 100644 --- a/deps/v8/SConstruct +++ b/deps/v8/SConstruct @@ -27,7 +27,6 @@ import platform import re -import subprocess import sys import os from os.path import join, dirname, abspath @@ -143,9 +142,6 @@ LIBRARY_FLAGS = { # Use visibility=default to disable this. 'CXXFLAGS': ['-fvisibility=hidden'] }, - 'strictaliasing:off': { - 'CCFLAGS': ['-fno-strict-aliasing'] - }, 'mode:debug': { 'CCFLAGS': ['-g', '-O0'], 'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'], @@ -306,6 +302,7 @@ V8_EXTRA_FLAGS = { 'gcc': { 'all': { 'WARNINGFLAGS': ['-Wall', + '-Werror', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor'] @@ -655,16 +652,8 @@ def Abort(message): sys.exit(1) -def GuessOS(env): - return utils.GuessOS() - - -def GuessArch(env): - return utils.GuessArchitecture() - - -def GuessToolchain(env): - tools = env['TOOLS'] +def GuessToolchain(os): + tools = Environment()['TOOLS'] if 'gcc' in tools: return 'gcc' elif 'msvc' in tools: @@ -673,9 +662,7 @@ def GuessToolchain(env): return None -def GuessVisibility(env): - os = env['os'] - toolchain = env['toolchain']; +def GuessVisibility(os, toolchain): if (os == 'win32' or os == 'cygwin') and toolchain == 'gcc': # MinGW / Cygwin can't do it. return 'default' @@ -685,35 +672,27 @@ def GuessVisibility(env): return 'hidden' -def GuessStrictAliasing(env): - # There seems to be a problem with gcc 4.5.x - # see http://code.google.com/p/v8/issues/detail?id=884 - # it can be worked around by disabling strict aliasing - toolchain = env['toolchain']; - if toolchain == 'gcc': - env = Environment(tools=['gcc']) - version = subprocess.Popen([env['CC'], '-dumpversion'], - stdout=subprocess.PIPE).communicate()[0] - if version.find('4.5.') == 0: - return 'off' - return 'default' +OS_GUESS = utils.GuessOS() +TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS) +ARCH_GUESS = utils.GuessArchitecture() +VISIBILITY_GUESS = GuessVisibility(OS_GUESS, TOOLCHAIN_GUESS) SIMPLE_OPTIONS = { 'toolchain': { 'values': ['gcc', 'msvc'], - 'guess': GuessToolchain, - 'help': 'the toolchain to use' + 'default': TOOLCHAIN_GUESS, + 'help': 'the toolchain to use (%s)' % TOOLCHAIN_GUESS }, 'os': { 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'], - 'guess': GuessOS, - 'help': 'the os to build for' + 'default': OS_GUESS, + 'help': 'the os to build for (%s)' % OS_GUESS }, 'arch': { 'values':['arm', 'ia32', 'x64', 'mips'], - 'guess': GuessArch, - 'help': 'the architecture to build for' + 'default': ARCH_GUESS, + 'help': 'the architecture to build for (%s)' % ARCH_GUESS }, 'regexp': { 'values': ['native', 'interpreted'], @@ -822,15 +801,8 @@ SIMPLE_OPTIONS = { }, 'visibility': { 'values': ['default', 'hidden'], - 'guess': GuessVisibility, - 'depends': ['os', 'toolchain'], - 'help': 'shared library symbol visibility' - }, - 'strictaliasing': { - 'values': ['default', 'off'], - 'guess': GuessStrictAliasing, - 'depends': ['toolchain'], - 'help': 'assume strict aliasing while optimizing' + 'default': VISIBILITY_GUESS, + 'help': 'shared library symbol visibility (%s)' % VISIBILITY_GUESS }, 'pgo': { 'values': ['off', 'instrument', 'optimize'], @@ -840,26 +812,6 @@ SIMPLE_OPTIONS = { } -def AddOption(result, name, option): - if 'guess' in option: - # Option has a guess function - guess = option.get('guess') - guess_env = Environment(options=result) - # Check if all options that the guess function depends on are set - if 'depends' in option: - for dependency in option.get('depends'): - if not dependency in guess_env: - return False - default = guess(guess_env) - else: - # Option has a fixed default - default = option.get('default') - - help = '%s (%s)' % (option.get('help'), ", ".join(option['values'])) - result.Add(name, help, default) - return True - - def GetOptions(): result = Options() result.Add('mode', 'compilation mode (debug, release)', 'release') @@ -867,28 +819,12 @@ def GetOptions(): result.Add('cache', 'directory to use for scons build cache', '') result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '') result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '') - options = SIMPLE_OPTIONS - while len(options): - postpone = {} - for (name, option) in options.iteritems(): - if not AddOption(result, name, option): - postpone[name] = option - options = postpone + for (name, option) in SIMPLE_OPTIONS.iteritems(): + help = '%s (%s)' % (name, ", ".join(option['values'])) + result.Add(name, help, option.get('default')) return result -def GetTools(opts): - env = Environment(options=opts) - os = env['os'] - toolchain = env['toolchain'] - if os == 'win32' and toolchain == 'gcc': - return ['mingw'] - elif os == 'win32' and toolchain == 'msvc': - return ['msvc', 'mslink', 'mslib', 'msvs'] - else: - return ['default'] - - def GetVersionComponents(): MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)") MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)") @@ -969,7 +905,7 @@ def VerifyOptions(env): print env['simulator'] Abort("Option unalignedaccesses only supported for the ARM architecture.") for (name, option) in SIMPLE_OPTIONS.iteritems(): - if (not name in env): + if (not option.get('default')) and (name not in ARGUMENTS): message = ("A value for option %s must be specified (%s)." % (name, ", ".join(option['values']))) Abort(message) @@ -1097,7 +1033,7 @@ def ParseEnvOverrides(arg, imports): return overrides -def BuildSpecific(env, mode, env_overrides, tools): +def BuildSpecific(env, mode, env_overrides): options = {'mode': mode} for option in SIMPLE_OPTIONS: options[option] = env[option] @@ -1150,7 +1086,7 @@ def BuildSpecific(env, mode, env_overrides, tools): (object_files, shell_files, mksnapshot) = env.SConscript( join('src', 'SConscript'), build_dir=join('obj', target_id), - exports='context tools', + exports='context', duplicate=False ) @@ -1170,21 +1106,21 @@ def BuildSpecific(env, mode, env_overrides, tools): library = env.SharedLibrary(library_name, object_files, PDB=pdb_name) context.library_targets.append(library) - d8_env = Environment(tools=tools) + d8_env = Environment() d8_env.Replace(**context.flags['d8']) context.ApplyEnvOverrides(d8_env) shell = d8_env.Program('d8' + suffix, object_files + shell_files) context.d8_targets.append(shell) for sample in context.samples: - sample_env = Environment(tools=tools) + sample_env = Environment() sample_env.Replace(**context.flags['sample']) sample_env.Prepend(LIBS=[library_name]) context.ApplyEnvOverrides(sample_env) sample_object = sample_env.SConscript( join('samples', 'SConscript'), build_dir=join('obj', 'sample', sample, target_id), - exports='sample context tools', + exports='sample context', duplicate=False ) sample_name = sample + suffix @@ -1197,7 +1133,7 @@ def BuildSpecific(env, mode, env_overrides, tools): cctest_program = cctest_env.SConscript( join('test', 'cctest', 'SConscript'), build_dir=join('obj', 'test', target_id), - exports='context object_files tools', + exports='context object_files', duplicate=False ) context.cctest_targets.append(cctest_program) @@ -1207,9 +1143,7 @@ def BuildSpecific(env, mode, env_overrides, tools): def Build(): opts = GetOptions() - tools = GetTools(opts) - env = Environment(options=opts, tools=tools) - + env = Environment(options=opts) Help(opts.GenerateHelpText(env)) VerifyOptions(env) env_overrides = ParseEnvOverrides(env['env'], env['importenv']) @@ -1223,7 +1157,7 @@ def Build(): d8s = [] modes = SplitList(env['mode']) for mode in modes: - context = BuildSpecific(env.Copy(), mode, env_overrides, tools) + context = BuildSpecific(env.Copy(), mode, env_overrides) libraries += context.library_targets mksnapshots += context.mksnapshot_targets cctests += context.cctest_targets diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 2e54295e888..34ca91ca65b 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -31,7 +31,6 @@ root_dir = dirname(File('SConstruct').rfile().abspath) sys.path.append(join(root_dir, 'tools')) import js2c Import('context') -Import('tools') SOURCES = { @@ -154,6 +153,7 @@ SOURCES = { arm/jump-target-arm.cc arm/lithium-arm.cc arm/lithium-codegen-arm.cc + arm/lithium-gap-resolver-arm.cc arm/macro-assembler-arm.cc arm/regexp-macro-assembler-arm.cc arm/register-allocator-arm.cc @@ -305,7 +305,7 @@ def Abort(message): def ConfigureObjectFiles(): - env = Environment(tools=tools) + env = Environment() env.Replace(**context.flags['v8']) context.ApplyEnvOverrides(env) env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C) diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index f6d1daf67a2..18264254b83 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -446,8 +446,15 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { bool found_it = false; JSFunction* function = FindInPrototypeChain(object, &found_it); if (!found_it) return Heap::undefined_value(); + while (!function->should_have_prototype()) { + found_it = false; + function = FindInPrototypeChain(object->GetPrototype(), + &found_it); + // There has to be one because we hit the getter. + ASSERT(found_it); + } + if (!function->has_prototype()) { - if (!function->should_have_prototype()) return Heap::undefined_value(); Object* prototype; { MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function); if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; @@ -467,6 +474,13 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object, bool found_it = false; JSFunction* function = FindInPrototypeChain(object, &found_it); if (!found_it) return Heap::undefined_value(); + if (!function->should_have_prototype()) { + // Since we hit this accessor, object will have no prototype property. + return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(), + value, + NONE); + } + if (function->has_initial_map()) { // If the function has allocated the initial map // replace it with a copy containing the new prototype. diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index fb9bb488c97..c91d4ba2bc6 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -1848,11 +1848,31 @@ void Assembler::vldr(const DwVfpRegister dst, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); + ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | + 0xB*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8); + } +} + + +void Assembler::vldr(const DwVfpRegister dst, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(dst, operand.rn(), operand.offset(), cond); } @@ -1870,13 +1890,33 @@ void Assembler::vldr(const SwVfpRegister dst, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); - ASSERT(offset >= 0); int sd, d; dst.split_code(&sd, &d); + ASSERT(offset >= 0); + + if ((offset % 4) == 0 && (offset / 4) < 256) { emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | 0xA*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + } +} + + +void Assembler::vldr(const SwVfpRegister dst, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(dst, operand.rn(), operand.offset(), cond); } @@ -1894,11 +1934,30 @@ void Assembler::vstr(const DwVfpRegister src, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); ASSERT(offset >= 0); - emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | + 0xB*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8); + } +} + + +void Assembler::vstr(const DwVfpRegister src, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vstr(src, operand.rn(), operand.offset(), cond); } @@ -1916,13 +1975,32 @@ void Assembler::vstr(const SwVfpRegister src, offset = -offset; u = 0; } - ASSERT(offset % 4 == 0); - ASSERT((offset / 4) < 256); - ASSERT(offset >= 0); int sd, d; src.split_code(&sd, &d); - emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | - 0xA*B8 | ((offset / 4) & 255)); + ASSERT(offset >= 0); + if ((offset % 4) == 0 && (offset / 4) < 256) { + emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | + 0xA*B8 | ((offset / 4) & 255)); + } else { + // Larger offsets must be handled by computing the correct address + // in the ip register. + ASSERT(!base.is(ip)); + if (u == 1) { + add(ip, base, Operand(offset)); + } else { + sub(ip, base, Operand(offset)); + } + emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); + } +} + + +void Assembler::vstr(const SwVfpRegister src, + const MemOperand& operand, + const Condition cond) { + ASSERT(!operand.rm().is_valid()); + ASSERT(operand.am_ == Offset); + vldr(src, operand.rn(), operand.offset(), cond); } diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index 3941c84b34b..954b9cff337 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -387,7 +387,7 @@ class Operand BASE_EMBEDDED { // Return true if this is a register operand. INLINE(bool is_reg() const); - // Return true of this operand fits in one instruction so that no + // Return true if this operand fits in one instruction so that no // 2-instruction solution with a load into the ip register is necessary. bool is_single_instruction() const; bool must_use_constant_pool() const; @@ -439,7 +439,7 @@ class MemOperand BASE_EMBEDDED { offset_ = offset; } - uint32_t offset() { + uint32_t offset() const { ASSERT(rm_.is(no_reg)); return offset_; } @@ -447,6 +447,10 @@ class MemOperand BASE_EMBEDDED { Register rn() const { return rn_; } Register rm() const { return rm_; } + bool OffsetIsUint12Encodable() const { + return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_); + } + private: Register rn_; // base Register rm_; // register offset @@ -902,22 +906,34 @@ class Assembler : public Malloced { void vldr(const DwVfpRegister dst, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vldr(const DwVfpRegister dst, + const MemOperand& src, const Condition cond = al); void vldr(const SwVfpRegister dst, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vldr(const SwVfpRegister dst, + const MemOperand& src, const Condition cond = al); void vstr(const DwVfpRegister src, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vstr(const DwVfpRegister src, + const MemOperand& dst, const Condition cond = al); void vstr(const SwVfpRegister src, const Register base, - int offset, // Offset must be a multiple of 4. + int offset, + const Condition cond = al); + void vstr(const SwVfpRegister src, + const MemOperand& dst, const Condition cond = al); void vmov(const DwVfpRegister dst, diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index cc49f7e4e55..87fa87df0c7 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -2661,8 +2661,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Allocate new heap number for result. Register result = r5; - __ AllocateHeapNumber( - result, scratch1, scratch2, heap_number_map, gc_required); + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); // Load the operands. if (smi_operands) { @@ -2811,8 +2811,14 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Allocate new heap number for result. __ bind(&result_not_a_smi); - __ AllocateHeapNumber( - r5, scratch1, scratch2, heap_number_map, gc_required); + Register result = r5; + if (smi_operands) { + __ AllocateHeapNumber( + result, scratch1, scratch2, heap_number_map, gc_required); + } else { + GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required); + } // r2: Answer as signed int32. // r5: Heap number to write answer into. @@ -2934,45 +2940,47 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime; + Label call_runtime, call_string_add_or_runtime; GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); - // If all else fails, use the runtime system to get the correct - // result. - __ bind(&call_runtime); + GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); - // Try to add strings before calling runtime. + __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { GenerateAddStrings(masm); } - GenericBinaryOpStub stub(op_, mode_, r1, r0); - __ TailCallStub(&stub); + __ bind(&call_runtime); + GenerateCallRuntime(masm); } void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { ASSERT(op_ == Token::ADD); + Label left_not_string, call_runtime; Register left = r1; Register right = r0; - Label call_runtime; - // Check if first argument is a string. - __ JumpIfSmi(left, &call_runtime); + // Check if left argument is a string. + __ JumpIfSmi(left, &left_not_string); __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); - __ b(ge, &call_runtime); + __ b(ge, &left_not_string); - // First argument is a a string, test second. + StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); + GenerateRegisterArgsPush(masm); + __ TailCallStub(&string_add_left_stub); + + // Left operand is not a string, test right. + __ bind(&left_not_string); __ JumpIfSmi(right, &call_runtime); __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); __ b(ge, &call_runtime); - // First and second argument are strings. - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); + StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); GenerateRegisterArgsPush(masm); - __ TailCallStub(&string_add_stub); + __ TailCallStub(&string_add_right_stub); // At least one argument is not a string. __ bind(&call_runtime); @@ -3706,7 +3714,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) { // The offset was stored in r4 safepoint slot. // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) - __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4)); + __ LoadFromSafepointRegisterSlot(scratch, r4); __ sub(inline_site, lr, scratch); // Get the map location in scratch and patch it. __ GetRelocatedValueLocation(inline_site, scratch); @@ -5438,18 +5446,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void StringAddStub::Generate(MacroAssembler* masm) { - Label string_add_runtime; + Label string_add_runtime, call_builtin; + Builtins::JavaScript builtin_id = Builtins::ADD; + // Stack on entry: - // sp[0]: second argument. - // sp[4]: first argument. + // sp[0]: second argument (right). + // sp[4]: first argument (left). // Load the two arguments. __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. // Make sure that both arguments are strings if not known in advance. - if (string_check_) { - STATIC_ASSERT(kSmiTag == 0); + if (flags_ == NO_STRING_ADD_FLAGS) { __ JumpIfEitherSmi(r0, r1, &string_add_runtime); // Load instance types. __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); @@ -5461,13 +5470,27 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ tst(r4, Operand(kIsNotStringMask)); __ tst(r5, Operand(kIsNotStringMask), eq); __ b(ne, &string_add_runtime); + } else { + // Here at least one of the arguments is definitely a string. + // We convert the one that is not known to be a string. + if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin); + builtin_id = Builtins::STRING_ADD_RIGHT; + } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { + ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); + GenerateConvertArgument( + masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin); + builtin_id = Builtins::STRING_ADD_LEFT; + } } // Both arguments are strings. // r0: first string // r1: second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) { Label strings_not_empty; // Check if either of the strings are empty. In that case return the other. @@ -5495,8 +5518,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r1: second string // r2: length of first string // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) // Look at the length of the result of adding the two strings. Label string_add_flat_result, longer_than_two; // Adding two lengths can't overflow. @@ -5508,7 +5531,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(ne, &longer_than_two); // Check that both strings are non-external ascii strings. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5556,7 +5579,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // If result is not supposed to be flat, allocate a cons string object. // If both strings are ascii the result is an ascii cons string. - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5604,11 +5627,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { // r1: second string // r2: length of first string // r3: length of second string - // r4: first string instance type (if string_check_) - // r5: second string instance type (if string_check_) + // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) + // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) // r6: sum of lengths. __ bind(&string_add_flat_result); - if (!string_check_) { + if (flags_ != NO_STRING_ADD_FLAGS) { __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); @@ -5706,6 +5729,60 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Just jump to runtime to add the two strings. __ bind(&string_add_runtime); __ TailCallRuntime(Runtime::kStringAdd, 2, 1); + + if (call_builtin.is_linked()) { + __ bind(&call_builtin); + __ InvokeBuiltin(builtin_id, JUMP_JS); + } +} + + +void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow) { + // First check if the argument is already a string. + Label not_string, done; + __ JumpIfSmi(arg, ¬_string); + __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE); + __ b(lt, &done); + + // Check the number to string cache. + Label not_cached; + __ bind(¬_string); + // Puts the cached result into scratch1. + NumberToStringStub::GenerateLookupNumberStringCache(masm, + arg, + scratch1, + scratch2, + scratch3, + scratch4, + false, + ¬_cached); + __ mov(arg, scratch1); + __ str(arg, MemOperand(sp, stack_offset)); + __ jmp(&done); + + // Check if the argument is a safe string wrapper. + __ bind(¬_cached); + __ JumpIfSmi(arg, slow); + __ CompareObjectType( + arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1. + __ b(ne, slow); + __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); + __ and_(scratch2, + scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ cmp(scratch2, + Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); + __ b(ne, slow); + __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); + __ str(arg, MemOperand(sp, stack_offset)); + + __ bind(&done); } diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index baaa2f2bdab..475fbd70e89 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -335,24 +335,36 @@ class TypeRecordingBinaryOpStub: public CodeStub { // Flag that indicates how to generate code for the stub StringAddStub. enum StringAddFlags { NO_STRING_ADD_FLAGS = 0, - NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. + // Omit left string check in stub (left is definitely a string). + NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0, + // Omit right string check in stub (right is definitely a string). + NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1, + // Omit both string checks in stub. + NO_STRING_CHECK_IN_STUB = + NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB }; class StringAddStub: public CodeStub { public: - explicit StringAddStub(StringAddFlags flags) { - string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0); - } + explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} private: Major MajorKey() { return StringAdd; } - int MinorKey() { return string_check_ ? 0 : 1; } + int MinorKey() { return flags_; } void Generate(MacroAssembler* masm); - // Should the stub check whether arguments are strings? - bool string_check_; + void GenerateConvertArgument(MacroAssembler* masm, + int stack_offset, + Register arg, + Register scratch1, + Register scratch2, + Register scratch3, + Register scratch4, + Label* slow); + + const StringAddFlags flags_; }; diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index a3921d8efcc..3e125a33fcb 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -5850,8 +5850,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { } else if (variable != NULL) { // Delete of an unqualified identifier is disallowed in strict mode - // so this code can only be reached in non-strict mode. - ASSERT(strict_mode_flag() == kNonStrictMode); + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); Slot* slot = variable->AsSlot(); if (variable->is_global()) { LoadGlobal(); diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc index 507954d9e12..51c84b33546 100644 --- a/deps/v8/src/arm/cpu-arm.cc +++ b/deps/v8/src/arm/cpu-arm.cc @@ -50,6 +50,11 @@ void CPU::Setup() { void CPU::FlushICache(void* start, size_t size) { + // Nothing to do flushing no instructions. + if (size == 0) { + return; + } + #if defined (USE_SIMULATOR) // Not generating ARM instructions for C-code. This means that we are // building an ARM emulator based target. We should notify the simulator diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 9af7a8d1907..9a5aa902b8f 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -429,14 +429,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator, fp_value, output_offset, value); } - // The context can be gotten from the function so long as we don't - // optimize functions that need local contexts. + // For the bottommost output frame the context can be gotten from the input + // frame. For all subsequent output frames it can be gotten from the function + // so long as we don't inline functions that need local contexts. output_offset -= kPointerSize; input_offset -= kPointerSize; - value = reinterpret_cast(function->context()); - // The context for the bottommost output frame should also agree with the - // input frame. - ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); + if (is_bottommost) { + value = input_->GetFrameSlot(input_offset); + } else { + value = reinterpret_cast(function->context()); + } output_frame->SetFrameSlot(output_offset, value); if (is_topmost) { output_frame->SetRegister(cp.code(), value); diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index f04a00e0523..fea9a8cfb73 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -219,46 +219,47 @@ void FullCodeGenerator::Generate(CompilationInfo* info) { Move(dot_arguments_slot, r3, r1, r2); } - { Comment cmnt(masm_, "[ Declarations"); - // For named function expressions, declare the function name as a - // constant. - if (scope()->is_function_scope() && scope()->function() != NULL) { - EmitDeclaration(scope()->function(), Variable::CONST, NULL); - } - // Visit all the explicit declarations unless there is an illegal - // redeclaration. - if (scope()->HasIllegalRedeclaration()) { - scope()->VisitIllegalRedeclaration(this); - } else { - VisitDeclarations(scope()->declarations()); - } - } - if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); } - // Check the stack for overflow or break request. - { Comment cmnt(masm_, "[ Stack check"); - PrepareForBailout(info->function(), NO_REGISTERS); - Label ok; - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &ok); - StackCheckStub stub; - __ CallStub(&stub); - __ bind(&ok); - } - - { Comment cmnt(masm_, "[ Body"); - ASSERT(loop_depth() == 0); - VisitStatements(function()->body()); - ASSERT(loop_depth() == 0); + // Visit the declarations and body unless there is an illegal + // redeclaration. + if (scope()->HasIllegalRedeclaration()) { + Comment cmnt(masm_, "[ Declarations"); + scope()->VisitIllegalRedeclaration(this); + + } else { + { Comment cmnt(masm_, "[ Declarations"); + // For named function expressions, declare the function name as a + // constant. + if (scope()->is_function_scope() && scope()->function() != NULL) { + EmitDeclaration(scope()->function(), Variable::CONST, NULL); + } + VisitDeclarations(scope()->declarations()); + } + + { Comment cmnt(masm_, "[ Stack check"); + PrepareForBailout(info->function(), NO_REGISTERS); + Label ok; + __ LoadRoot(ip, Heap::kStackLimitRootIndex); + __ cmp(sp, Operand(ip)); + __ b(hs, &ok); + StackCheckStub stub; + __ CallStub(&stub); + __ bind(&ok); + } + + { Comment cmnt(masm_, "[ Body"); + ASSERT(loop_depth() == 0); + VisitStatements(function()->body()); + ASSERT(loop_depth() == 0); + } } + // Always emit a 'return undefined' in case control fell off the end of + // the body. { Comment cmnt(masm_, "[ return ;"); - // Emit a 'return undefined' in case control fell off the end of the - // body. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); } EmitReturnSequence(); @@ -694,10 +695,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable, // We bypass the general EmitSlotSearch because we know more about // this specific context. - // The variable in the decl always resides in the current context. + // The variable in the decl always resides in the current function + // context. ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (FLAG_debug_code) { - // Check if we have the correct context pointer. + // Check that we're not inside a 'with'. __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); __ cmp(r1, cp); __ Check(eq, "Unexpected declaration in current context."); @@ -1037,7 +1039,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( Slot* slot, Label* slow) { ASSERT(slot->type() == Slot::CONTEXT); - Register current = cp; + Register context = cp; Register next = r3; Register temp = r4; @@ -1045,22 +1047,25 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions( if (s->num_heap_slots() > 0) { if (s->calls_eval()) { // Check that extension is NULL. - __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); __ tst(temp, temp); __ b(ne, slow); } - __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX)); + __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX)); __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset)); // Walk the rest of the chain without clobbering cp. - current = next; + context = next; } } // Check that last extension is NULL. - __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); + __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); __ tst(temp, temp); __ b(ne, slow); - __ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX)); - return ContextOperand(temp, slot->index()); + + // This function is used only for loads, not stores, so it's safe to + // return an cp-based operand (the write barrier cannot be allowed to + // destroy the cp register). + return ContextOperand(context, slot->index()); } @@ -2004,34 +2009,60 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, : Builtins::StoreIC_Initialize)); EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT); - } else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) { - // Perform the assignment for non-const variables and for initialization - // of const variables. Const assignments are simply skipped. - Label done; + } else if (op == Token::INIT_CONST) { + // Like var declarations, const declarations are hoisted to function + // scope. However, unlike var initializers, const initializers are able + // to drill a hole to that function context, even from inside a 'with' + // context. We thus bypass the normal static scope lookup. + Slot* slot = var->AsSlot(); + Label skip; + switch (slot->type()) { + case Slot::PARAMETER: + // No const parameters. + UNREACHABLE(); + break; + case Slot::LOCAL: + // Detect const reinitialization by checking for the hole value. + __ ldr(r1, MemOperand(fp, SlotOffset(slot))); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r1, ip); + __ b(ne, &skip); + __ str(result_register(), MemOperand(fp, SlotOffset(slot))); + break; + case Slot::CONTEXT: { + __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX)); + __ ldr(r2, ContextOperand(r1, slot->index())); + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); + __ cmp(r2, ip); + __ b(ne, &skip); + __ str(r0, ContextOperand(r1, slot->index())); + int offset = Context::SlotOffset(slot->index()); + __ mov(r3, r0); // Preserve the stored value in r0. + __ RecordWrite(r1, Operand(offset), r3, r2); + break; + } + case Slot::LOOKUP: + __ push(r0); + __ mov(r0, Operand(slot->var()->name())); + __ Push(cp, r0); // Context and name. + __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); + break; + } + __ bind(&skip); + + } else if (var->mode() != Variable::CONST) { + // Perform the assignment for non-const variables. Const assignments + // are simply skipped. Slot* slot = var->AsSlot(); switch (slot->type()) { case Slot::PARAMETER: case Slot::LOCAL: - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ ldr(r1, MemOperand(fp, SlotOffset(slot))); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r1, ip); - __ b(ne, &done); - } // Perform the assignment. __ str(result_register(), MemOperand(fp, SlotOffset(slot))); break; case Slot::CONTEXT: { MemOperand target = EmitSlotSearch(slot, r1); - if (op == Token::INIT_CONST) { - // Detect const reinitialization by checking for the hole value. - __ ldr(r2, target); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r2, ip); - __ b(ne, &done); - } // Perform the assignment and issue the write barrier. __ str(result_register(), target); // RecordWrite may destroy all its register arguments. @@ -2042,20 +2073,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, } case Slot::LOOKUP: - // Call the runtime for the assignment. The runtime will ignore - // const reinitialization. + // Call the runtime for the assignment. __ push(r0); // Value. __ mov(r0, Operand(slot->var()->name())); __ Push(cp, r0); // Context and name. - if (op == Token::INIT_CONST) { - // The runtime will ignore const redeclaration. - __ CallRuntime(Runtime::kInitializeConstContextSlot, 3); - } else { - __ CallRuntime(Runtime::kStoreContextSlot, 3); - } + __ CallRuntime(Runtime::kStoreContextSlot, 3); break; } - __ bind(&done); } } @@ -3373,8 +3397,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { } } else if (var != NULL) { // Delete of an unqualified identifier is disallowed in strict mode - // so this code can only be reached in non-strict mode. - ASSERT(strict_mode_flag() == kNonStrictMode); + // but "delete this" is. + ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this()); if (var->is_global()) { __ ldr(r2, GlobalObjectOperand()); __ mov(r1, Operand(var->name())); @@ -3414,17 +3438,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { case Token::NOT: { Comment cmnt(masm_, "[ UnaryOperation (NOT)"); - Label materialize_true, materialize_false; - Label* if_true = NULL; - Label* if_false = NULL; - Label* fall_through = NULL; + if (context()->IsEffect()) { + // Unary NOT has no side effects so it's only necessary to visit the + // subexpression. Match the optimizing compiler by not branching. + VisitForEffect(expr->expression()); + } else { + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; - // Notice that the labels are swapped. - context()->PrepareTest(&materialize_true, &materialize_false, - &if_false, &if_true, &fall_through); - if (context()->IsTest()) ForwardBailoutToChild(expr); - VisitForControl(expr->expression(), if_true, if_false, fall_through); - context()->Plug(if_false, if_true); // Labels swapped. + // Notice that the labels are swapped. + context()->PrepareTest(&materialize_true, &materialize_false, + &if_false, &if_true, &fall_through); + if (context()->IsTest()) ForwardBailoutToChild(expr); + VisitForControl(expr->expression(), if_true, if_false, fall_through); + context()->Plug(if_false, if_true); // Labels swapped. + } break; } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 903f77bbf04..d3c9fee8e32 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -346,7 +346,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { } -void LStoreNamed::PrintDataTo(StringStream* stream) { +void LStoreNamedField::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("."); stream->Add(*String::cast(*name())->ToCString()); @@ -355,7 +355,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) { } -void LStoreKeyed::PrintDataTo(StringStream* stream) { +void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("."); + stream->Add(*String::cast(*name())->ToCString()); + stream->Add(" <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { + object()->PrintTo(stream); + stream->Add("["); + key()->PrintTo(stream); + stream->Add("] <- "); + value()->PrintTo(stream); +} + + +void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) { object()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); @@ -1204,8 +1222,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { case kMathSqrt: return DefineSameAsFirst(result); case kMathRound: - Abort("MathRound LUnaryMathOperation not implemented"); - return NULL; + return AssignEnvironment(DefineAsRegister(result)); case kMathPowHalf: Abort("MathPowHalf LUnaryMathOperation not implemented"); return NULL; @@ -1418,8 +1435,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { LInstruction* LChunkBuilder::DoPower(HPower* instr) { - Abort("LPower instruction not implemented on ARM"); - return NULL; + ASSERT(instr->representation().IsDouble()); + // We call a C function for double power. It can't trigger a GC. + // We need to use fixed result register for the call. + Representation exponent_type = instr->right()->representation(); + ASSERT(instr->left()->representation().IsDouble()); + LOperand* left = UseFixedDouble(instr->left(), d1); + LOperand* right = exponent_type.IsDouble() ? + UseFixedDouble(instr->right(), d2) : + UseFixed(instr->right(), r0); + LPower* result = new LPower(left, right); + return MarkAsCall(DefineFixedDouble(result, d3), + instr, + CAN_DEOPTIMIZE_EAGERLY); } @@ -1709,11 +1737,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) { LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) { - LOperand* context = UseTempRegister(instr->context()); + LOperand* context; LOperand* value; if (instr->NeedsWriteBarrier()) { + context = UseTempRegister(instr->context()); value = UseTempRegister(instr->value()); } else { + context = UseRegister(instr->context()); value = UseRegister(instr->value()); } return new LStoreContextSlot(context, value); @@ -1806,6 +1836,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement( } +LInstruction* LChunkBuilder::DoStorePixelArrayElement( + HStorePixelArrayElement* instr) { + Abort("DoStorePixelArrayElement not implemented"); + return NULL; +} + + LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LOperand* obj = UseFixed(instr->object(), r2); LOperand* key = UseFixed(instr->key(), r1); @@ -1911,8 +1948,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) { LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) { - // There are no real uses of the arguments object (we bail out in all other - // cases). + // There are no real uses of the arguments object. + // arguments.length and element access are supported directly on + // stack arguments, and any real arguments object use causes a bailout. + // So this value is never used. return NULL; } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index 57338f16d55..77d6b71a93a 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -42,8 +42,6 @@ class LCodeGen; #define LITHIUM_ALL_INSTRUCTION_LIST(V) \ V(ControlInstruction) \ V(Call) \ - V(StoreKeyed) \ - V(StoreNamed) \ LITHIUM_CONCRETE_INSTRUCTION_LIST(V) @@ -135,6 +133,7 @@ class LCodeGen; V(OuterContext) \ V(Parameter) \ V(PixelArrayLength) \ + V(Power) \ V(PushArgument) \ V(RegExpLiteral) \ V(Return) \ @@ -1058,6 +1057,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> { }; +class LPower: public LTemplateInstruction<1, 2, 0> { + public: + LPower(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + DECLARE_CONCRETE_INSTRUCTION(Power, "power") + DECLARE_HYDROGEN_ACCESSOR(Power) +}; + + class LArithmeticD: public LTemplateInstruction<1, 2, 0> { public: LArithmeticD(Token::Value op, LOperand* left, LOperand* right) @@ -1510,15 +1521,38 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> { }; -class LStoreNamed: public LTemplateInstruction<0, 2, 0> { +class LStoreNamedField: public LTemplateInstruction<0, 2, 0> { public: - LStoreNamed(LOperand* obj, LOperand* val) { + LStoreNamedField(LOperand* obj, LOperand* val) { inputs_[0] = obj; inputs_[1] = val; } - DECLARE_INSTRUCTION(StoreNamed) - DECLARE_HYDROGEN_ACCESSOR(StoreNamed) + DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) + + virtual void PrintDataTo(StringStream* stream); + + LOperand* object() { return inputs_[0]; } + LOperand* value() { return inputs_[1]; } + + Handle name() const { return hydrogen()->name(); } + bool is_in_object() { return hydrogen()->is_in_object(); } + int offset() { return hydrogen()->offset(); } + bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } + Handle transition() const { return hydrogen()->transition(); } +}; + + +class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { + public: + LStoreNamedGeneric(LOperand* obj, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = val; + } + + DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") + DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) virtual void PrintDataTo(StringStream* stream); @@ -1528,40 +1562,17 @@ class LStoreNamed: public LTemplateInstruction<0, 2, 0> { }; -class LStoreNamedField: public LStoreNamed { +class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { public: - LStoreNamedField(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedField) - - bool is_in_object() { return hydrogen()->is_in_object(); } - int offset() { return hydrogen()->offset(); } - bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); } - Handle transition() const { return hydrogen()->transition(); } -}; - - -class LStoreNamedGeneric: public LStoreNamed { - public: - LStoreNamedGeneric(LOperand* obj, LOperand* val) - : LStoreNamed(obj, val) { } - - DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic") - DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric) -}; - - -class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { inputs_[0] = obj; inputs_[1] = key; inputs_[2] = val; } - DECLARE_INSTRUCTION(StoreKeyed) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, + "store-keyed-fast-element") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) virtual void PrintDataTo(StringStream* stream); @@ -1571,23 +1582,21 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedFastElement: public LStoreKeyed { +class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) {} - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) -}; - - -class LStoreKeyedGeneric: public LStoreKeyed { - public: - LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) - : LStoreKeyed(obj, key, val) { } + LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) { + inputs_[0] = obj; + inputs_[1] = key; + inputs_[2] = val; + } DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic") + + virtual void PrintDataTo(StringStream* stream); + + LOperand* object() { return inputs_[0]; } + LOperand* key() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } }; diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 1bfb3ad943a..ca644422702 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -26,6 +26,7 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "arm/lithium-codegen-arm.h" +#include "arm/lithium-gap-resolver-arm.h" #include "code-stubs.h" #include "stub-cache.h" @@ -54,157 +55,6 @@ class SafepointGenerator : public PostCallGenerator { }; -class LGapNode: public ZoneObject { - public: - explicit LGapNode(LOperand* operand) - : operand_(operand), resolved_(false), visited_id_(-1) { } - - LOperand* operand() const { return operand_; } - bool IsResolved() const { return !IsAssigned() || resolved_; } - void MarkResolved() { - ASSERT(!IsResolved()); - resolved_ = true; - } - int visited_id() const { return visited_id_; } - void set_visited_id(int id) { - ASSERT(id > visited_id_); - visited_id_ = id; - } - - bool IsAssigned() const { return assigned_from_.is_set(); } - LGapNode* assigned_from() const { return assigned_from_.get(); } - void set_assigned_from(LGapNode* n) { assigned_from_.set(n); } - - private: - LOperand* operand_; - SetOncePointer assigned_from_; - bool resolved_; - int visited_id_; -}; - - -LGapResolver::LGapResolver() - : nodes_(32), - identified_cycles_(4), - result_(16), - next_visited_id_(0) { -} - - -const ZoneList* LGapResolver::Resolve( - const ZoneList* moves, - LOperand* marker_operand) { - nodes_.Rewind(0); - identified_cycles_.Rewind(0); - result_.Rewind(0); - next_visited_id_ = 0; - - for (int i = 0; i < moves->length(); ++i) { - LMoveOperands move = moves->at(i); - if (!move.IsRedundant()) RegisterMove(move); - } - - for (int i = 0; i < identified_cycles_.length(); ++i) { - ResolveCycle(identified_cycles_[i], marker_operand); - } - - int unresolved_nodes; - do { - unresolved_nodes = 0; - for (int j = 0; j < nodes_.length(); j++) { - LGapNode* node = nodes_[j]; - if (!node->IsResolved() && node->assigned_from()->IsResolved()) { - AddResultMove(node->assigned_from(), node); - node->MarkResolved(); - } - if (!node->IsResolved()) ++unresolved_nodes; - } - } while (unresolved_nodes > 0); - return &result_; -} - - -void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) { - AddResultMove(from->operand(), to->operand()); -} - - -void LGapResolver::AddResultMove(LOperand* from, LOperand* to) { - result_.Add(LMoveOperands(from, to)); -} - - -void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) { - ZoneList cycle_operands(8); - cycle_operands.Add(marker_operand); - LGapNode* cur = start; - do { - cur->MarkResolved(); - cycle_operands.Add(cur->operand()); - cur = cur->assigned_from(); - } while (cur != start); - cycle_operands.Add(marker_operand); - - for (int i = cycle_operands.length() - 1; i > 0; --i) { - LOperand* from = cycle_operands[i]; - LOperand* to = cycle_operands[i - 1]; - AddResultMove(from, to); - } -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) { - ASSERT(a != b); - LGapNode* cur = a; - while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) { - cur->set_visited_id(visited_id); - cur = cur->assigned_from(); - } - - return cur == b; -} - - -bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) { - ASSERT(a != b); - return CanReach(a, b, next_visited_id_++); -} - - -void LGapResolver::RegisterMove(LMoveOperands move) { - if (move.source()->IsConstantOperand()) { - // Constant moves should be last in the machine code. Therefore add them - // first to the result set. - AddResultMove(move.source(), move.destination()); - } else { - LGapNode* from = LookupNode(move.source()); - LGapNode* to = LookupNode(move.destination()); - if (to->IsAssigned() && to->assigned_from() == from) { - move.Eliminate(); - return; - } - ASSERT(!to->IsAssigned()); - if (CanReach(from, to)) { - // This introduces a cycle. Save. - identified_cycles_.Add(from); - } - to->set_assigned_from(from); - } -} - - -LGapNode* LGapResolver::LookupNode(LOperand* operand) { - for (int i = 0; i < nodes_.length(); ++i) { - if (nodes_[i]->operand()->Equals(operand)) return nodes_[i]; - } - - // No node found => create a new one. - LGapNode* result = new LGapNode(operand); - nodes_.Add(result); - return result; -} - - #define __ masm()-> bool LCodeGen::GenerateCode() { @@ -294,6 +144,44 @@ bool LCodeGen::GeneratePrologue() { } } + // Possibly allocate a local context. + int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + if (heap_slots > 0) { + Comment(";;; Allocate local context"); + // Argument to NewContext is the function, which is in r1. + __ push(r1); + if (heap_slots <= FastNewContextStub::kMaximumSlots) { + FastNewContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kNewContext, 1); + } + RecordSafepoint(Safepoint::kNoDeoptimizationIndex); + // Context is returned in both r0 and cp. It replaces the context + // passed to us. It's saved in the stack and kept live in cp. + __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); + // Copy any necessary parameters into the context. + int num_parameters = scope()->num_parameters(); + for (int i = 0; i < num_parameters; i++) { + Slot* slot = scope()->parameter(i)->AsSlot(); + if (slot != NULL && slot->type() == Slot::CONTEXT) { + int parameter_offset = StandardFrameConstants::kCallerSPOffset + + (num_parameters - 1 - i) * kPointerSize; + // Load parameter from stack. + __ ldr(r0, MemOperand(fp, parameter_offset)); + // Store it in the context. + __ mov(r1, Operand(Context::SlotOffset(slot->index()))); + __ str(r0, MemOperand(cp, r1)); + // Update the write barrier. This clobbers all involved + // registers, so we have to use two more registers to avoid + // clobbering cp. + __ mov(r2, Operand(cp)); + __ RecordWrite(r2, Operand(r1), r3, r0); + } + } + Comment(";;; End allocate local context"); + } + // Trace the call. if (FLAG_trace) { __ CallRuntime(Runtime::kTraceEnter, 0); @@ -464,7 +352,6 @@ Operand LCodeGen::ToOperand(LOperand* op) { MemOperand LCodeGen::ToMemOperand(LOperand* op) const { - // TODO(regis): Revisit. ASSERT(!op->IsRegister()); ASSERT(!op->IsDoubleRegister()); ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); @@ -480,6 +367,21 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const { } +MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { + ASSERT(op->IsDoubleStackSlot()); + int index = op->index(); + if (index >= 0) { + // Local or spill slot. Skip the frame pointer, function, context, + // and the first word of the double in the fixed part of the frame. + return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize); + } else { + // Incoming parameter. Skip the return address and the first word of + // the double. + return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize); + } +} + + void LCodeGen::WriteTranslation(LEnvironment* environment, Translation* translation) { if (environment == NULL) return; @@ -751,6 +653,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, } +void LCodeGen::RecordSafepoint(int deoptimization_index) { + LPointerMap empty_pointers(RelocInfo::kNoPosition); + RecordSafepoint(&empty_pointers, deoptimization_index); +} + + void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index) { @@ -787,116 +695,7 @@ void LCodeGen::DoLabel(LLabel* label) { void LCodeGen::DoParallelMove(LParallelMove* move) { - // d0 must always be a scratch register. - DoubleRegister dbl_scratch = d0; - LUnallocated marker_operand(LUnallocated::NONE); - - Register core_scratch = scratch0(); - bool destroys_core_scratch = false; - - const ZoneList* moves = - resolver_.Resolve(move->move_operands(), &marker_operand); - for (int i = moves->length() - 1; i >= 0; --i) { - LMoveOperands move = moves->at(i); - LOperand* from = move.source(); - LOperand* to = move.destination(); - ASSERT(!from->IsDoubleRegister() || - !ToDoubleRegister(from).is(dbl_scratch)); - ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch)); - ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch)); - ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch)); - if (from == &marker_operand) { - if (to->IsRegister()) { - __ mov(ToRegister(to), core_scratch); - ASSERT(destroys_core_scratch); - } else if (to->IsStackSlot()) { - __ str(core_scratch, ToMemOperand(to)); - ASSERT(destroys_core_scratch); - } else if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), dbl_scratch); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } else if (to == &marker_operand) { - if (from->IsRegister() || from->IsConstantOperand()) { - __ mov(core_scratch, ToOperand(from)); - destroys_core_scratch = true; - } else if (from->IsStackSlot()) { - __ ldr(core_scratch, ToMemOperand(from)); - destroys_core_scratch = true; - } else if (from->IsDoubleRegister()) { - __ vmov(dbl_scratch, ToDoubleRegister(from)); - } else { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - } - } else if (from->IsConstantOperand()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ mov(ip, ToOperand(from)); - __ str(ip, ToMemOperand(to)); - } - } else if (from->IsRegister()) { - if (to->IsRegister()) { - __ mov(ToRegister(to), ToOperand(from)); - } else { - ASSERT(to->IsStackSlot()); - __ str(ToRegister(from), ToMemOperand(to)); - } - } else if (to->IsRegister()) { - ASSERT(from->IsStackSlot()); - __ ldr(ToRegister(to), ToMemOperand(from)); - } else if (from->IsStackSlot()) { - ASSERT(to->IsStackSlot()); - __ ldr(ip, ToMemOperand(from)); - __ str(ip, ToMemOperand(to)); - } else if (from->IsDoubleRegister()) { - if (to->IsDoubleRegister()) { - __ vmov(ToDoubleRegister(to), ToDoubleRegister(from)); - } else { - ASSERT(to->IsDoubleStackSlot()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset()); - } - } else if (to->IsDoubleRegister()) { - ASSERT(from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(ToDoubleRegister(to), ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset()); - } else { - ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot()); - // TODO(regis): Why is vldr not taking a MemOperand? - // __ vldr(dbl_scratch, ToMemOperand(from)); - MemOperand from_operand = ToMemOperand(from); - __ vldr(dbl_scratch, from_operand.rn(), from_operand.offset()); - // TODO(regis): Why is vstr not taking a MemOperand? - // __ vstr(dbl_scratch, ToMemOperand(to)); - MemOperand to_operand = ToMemOperand(to); - __ vstr(dbl_scratch, to_operand.rn(), to_operand.offset()); - } - } - - if (destroys_core_scratch) { - __ ldr(core_scratch, MemOperand(fp, -kPointerSize)); - } - - LInstruction* next = GetNextInstruction(); - if (next != NULL && next->IsLazyBailout()) { - int pc = masm()->pc_offset(); - safepoints_.SetPcAfterGap(pc); - } + resolver_.Resolve(move); } @@ -987,7 +786,7 @@ void LCodeGen::DoModI(LModI* instr) { DeferredModI(LCodeGen* codegen, LModI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { - codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD); + codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD); } private: LModI* instr_; @@ -1016,7 +815,7 @@ void LCodeGen::DoModI(LModI* instr) { __ bind(&ok); } - // Try a few common cases before using the generic stub. + // Try a few common cases before using the stub. Label call_stub; const int kUnfolds = 3; // Skip if either side is negative. @@ -1044,7 +843,7 @@ void LCodeGen::DoModI(LModI* instr) { __ and_(result, scratch, Operand(left)); __ bind(&call_stub); - // Call the generic stub. The numbers in r0 and r1 have + // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. DeferredModI* deferred = new DeferredModI(this, instr); __ TrySmiTag(left, &deoptimize, scratch); @@ -1070,7 +869,7 @@ void LCodeGen::DoDivI(LDivI* instr) { DeferredDivI(LCodeGen* codegen, LDivI* instr) : LDeferredCode(codegen), instr_(instr) { } virtual void Generate() { - codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV); + codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV); } private: LDivI* instr_; @@ -1123,7 +922,7 @@ void LCodeGen::DoDivI(LDivI* instr) { __ mov(result, Operand(left, ASR, 2), LeaveCC, eq); __ b(eq, &done); - // Call the generic stub. The numbers in r0 and r1 have + // Call the stub. The numbers in r0 and r1 have // to be tagged to Smis. If that is not possible, deoptimize. DeferredDivI* deferred = new DeferredDivI(this, instr); @@ -1145,19 +944,33 @@ void LCodeGen::DoDivI(LDivI* instr) { template -void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, - Token::Value op) { +void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, + Token::Value op) { Register left = ToRegister(instr->InputAt(0)); Register right = ToRegister(instr->InputAt(1)); __ PushSafepointRegistersAndDoubles(); - GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right); + // Move left to r1 and right to r0 for the stub call. + if (left.is(r1)) { + __ Move(r0, right); + } else if (left.is(r0) && right.is(r1)) { + __ Swap(r0, r1, r2); + } else if (left.is(r0)) { + ASSERT(!right.is(r1)); + __ mov(r1, r0); + __ mov(r0, right); + } else { + ASSERT(!left.is(r0) && !right.is(r0)); + __ mov(r0, right); + __ mov(r1, left); + } + TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT); __ CallStub(&stub); RecordSafepointWithRegistersAndDoubles(instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); // Overwrite the stored value of r0 with the result of the stub. - __ StoreToSafepointRegistersAndDoublesSlot(r0); + __ StoreToSafepointRegistersAndDoublesSlot(r0, r0); __ PopSafepointRegistersAndDoubles(); } @@ -1413,7 +1226,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { __ vmov(r2, r3, right); __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4); // Move the result in the double result register. - __ vmov(ToDoubleRegister(instr->result()), r0, r1); + __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result())); // Restore r0-r3. __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); @@ -1431,10 +1244,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->InputAt(1)).is(r0)); ASSERT(ToRegister(instr->result()).is(r0)); - // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current - // GenericBinaryOpStub: - // TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); - GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0); + TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE); CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); } @@ -2174,7 +1984,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, __ bind(&before_push_delta); __ BlockConstPoolFor(kAdditionalDelta); __ mov(temp, Operand(delta * kPointerSize)); - __ StoreToSafepointRegisterSlot(temp); + __ StoreToSafepointRegisterSlot(temp, temp); __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); ASSERT_EQ(kAdditionalDelta, masm_->InstructionsGeneratedSince(&before_push_delta)); @@ -2182,7 +1992,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); // Put the result value into the result register slot and // restore all registers. - __ StoreToSafepointRegisterSlot(result); + __ StoreToSafepointRegisterSlot(result, result); __ PopSafepointRegisters(); } @@ -2302,17 +2112,13 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { Register context = ToRegister(instr->context()); Register result = ToRegister(instr->result()); - __ ldr(result, - MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); - __ ldr(result, ContextOperand(result, instr->slot_index())); + __ ldr(result, ContextOperand(context, instr->slot_index())); } void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { Register context = ToRegister(instr->context()); Register value = ToRegister(instr->value()); - __ ldr(context, - MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); __ str(value, ContextOperand(context, instr->slot_index())); if (instr->needs_write_barrier()) { int offset = Context::SlotOffset(instr->slot_index()); @@ -2715,7 +2521,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { // Set the pointer to the new heap number in tmp. if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); // Restore input_reg after call to runtime. - __ LoadFromSafepointRegisterSlot(input); + __ LoadFromSafepointRegisterSlot(input, input); __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); __ bind(&allocated); @@ -2726,7 +2532,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); - __ str(tmp1, masm()->SafepointRegisterSlot(input)); + __ StoreToSafepointRegisterSlot(tmp1, input); __ PopSafepointRegisters(); __ bind(&done); @@ -2843,6 +2649,30 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { } +void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { + DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); + Register result = ToRegister(instr->result()); + Register scratch1 = scratch0(); + Register scratch2 = result; + EmitVFPTruncate(kRoundToNearest, + double_scratch0().low(), + input, + scratch1, + scratch2); + DeoptimizeIf(ne, instr->environment()); + __ vmov(result, double_scratch0().low()); + + // Test for -0. + Label done; + __ cmp(result, Operand(0)); + __ b(ne, &done); + __ vmov(scratch1, input.high()); + __ tst(scratch1, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(ne, instr->environment()); + __ bind(&done); +} + + void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); ASSERT(ToDoubleRegister(instr->result()).is(input)); @@ -2850,6 +2680,64 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { } +void LCodeGen::DoPower(LPower* instr) { + LOperand* left = instr->InputAt(0); + LOperand* right = instr->InputAt(1); + Register scratch = scratch0(); + DoubleRegister result_reg = ToDoubleRegister(instr->result()); + Representation exponent_type = instr->hydrogen()->right()->representation(); + if (exponent_type.IsDouble()) { + // Prepare arguments and call C function. + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, ToDoubleRegister(right)); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); + } else if (exponent_type.IsInteger32()) { + ASSERT(ToRegister(right).is(r0)); + // Prepare arguments and call C function. + __ PrepareCallCFunction(4, scratch); + __ mov(r2, ToRegister(right)); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ CallCFunction(ExternalReference::power_double_int_function(), 4); + } else { + ASSERT(exponent_type.IsTagged()); + ASSERT(instr->hydrogen()->left()->representation().IsDouble()); + + Register right_reg = ToRegister(right); + + // Check for smi on the right hand side. + Label non_smi, call; + __ JumpIfNotSmi(right_reg, &non_smi); + + // Untag smi and convert it to a double. + __ SmiUntag(right_reg); + SwVfpRegister single_scratch = double_scratch0().low(); + __ vmov(single_scratch, right_reg); + __ vcvt_f64_s32(result_reg, single_scratch); + __ jmp(&call); + + // Heap number map check. + __ bind(&non_smi); + __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, Operand(ip)); + DeoptimizeIf(ne, instr->environment()); + int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag; + __ add(scratch, right_reg, Operand(value_offset)); + __ vldr(result_reg, scratch, 0); + + // Prepare arguments and call C function. + __ bind(&call); + __ PrepareCallCFunction(4, scratch); + __ vmov(r0, r1, ToDoubleRegister(left)); + __ vmov(r2, r3, result_reg); + __ CallCFunction(ExternalReference::power_double_double_function(), 4); + } + // Store the result in the result register. + __ GetCFunctionDoubleResult(result_reg); +} + + void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { switch (instr->op()) { case kMathAbs: @@ -2858,6 +2746,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { case kMathFloor: DoMathFloor(instr); break; + case kMathRound: + DoMathRound(instr); + break; case kMathSqrt: DoMathSqrt(instr); break; @@ -3157,8 +3048,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { __ AbortIfNotSmi(r0); } __ SmiUntag(r0); - MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result); - __ str(r0, result_stack_slot); + __ StoreToSafepointRegisterSlot(r0, result); __ PopSafepointRegisters(); } @@ -3239,9 +3129,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { // register is stored, as this register is in the pointer map, but contains an // integer value. __ mov(ip, Operand(0)); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(ip, MemOperand(sp, reg_stack_index * kPointerSize)); - + __ StoreToSafepointRegisterSlot(ip, reg); __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); @@ -3252,7 +3140,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) { __ bind(&done); __ sub(ip, reg, Operand(kHeapObjectTag)); __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset); - __ str(reg, MemOperand(sp, reg_stack_index * kPointerSize)); + __ StoreToSafepointRegisterSlot(reg, reg); __ PopSafepointRegisters(); } @@ -3297,8 +3185,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); RecordSafepointWithRegisters( instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); - int reg_stack_index = __ SafepointRegisterStackIndex(reg.code()); - __ str(r0, MemOperand(sp, reg_stack_index * kPointerSize)); + __ StoreToSafepointRegisterSlot(r0, reg); __ PopSafepointRegisters(); } diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 732db445170..2d9c6edcb6c 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -29,7 +29,7 @@ #define V8_ARM_LITHIUM_CODEGEN_ARM_H_ #include "arm/lithium-arm.h" - +#include "arm/lithium-gap-resolver-arm.h" #include "deoptimizer.h" #include "safepoint-table.h" #include "scopes.h" @@ -39,31 +39,8 @@ namespace internal { // Forward declarations. class LDeferredCode; -class LGapNode; class SafepointGenerator; -class LGapResolver BASE_EMBEDDED { - public: - LGapResolver(); - const ZoneList* Resolve(const ZoneList* moves, - LOperand* marker_operand); - - private: - LGapNode* LookupNode(LOperand* operand); - bool CanReach(LGapNode* a, LGapNode* b, int visited_id); - bool CanReach(LGapNode* a, LGapNode* b); - void RegisterMove(LMoveOperands move); - void AddResultMove(LOperand* from, LOperand* to); - void AddResultMove(LGapNode* from, LGapNode* to); - void ResolveCycle(LGapNode* start, LOperand* marker_operand); - - ZoneList nodes_; - ZoneList identified_cycles_; - ZoneList result_; - int next_visited_id_; -}; - - class LCodeGen BASE_EMBEDDED { public: LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) @@ -79,10 +56,35 @@ class LCodeGen BASE_EMBEDDED { scope_(chunk->graph()->info()->scope()), status_(UNUSED), deferred_(8), - osr_pc_offset_(-1) { + osr_pc_offset_(-1), + resolver_(this) { PopulateDeoptimizationLiteralsWithInlinedFunctions(); } + + // Simple accessors. + MacroAssembler* masm() const { return masm_; } + + // Support for converting LOperands to assembler types. + // LOperand must be a register. + Register ToRegister(LOperand* op) const; + + // LOperand is loaded into scratch, unless already a register. + Register EmitLoadRegister(LOperand* op, Register scratch); + + // LOperand must be a double register. + DoubleRegister ToDoubleRegister(LOperand* op) const; + + // LOperand is loaded into dbl_scratch, unless already a double register. + DoubleRegister EmitLoadDoubleRegister(LOperand* op, + SwVfpRegister flt_scratch, + DoubleRegister dbl_scratch); + int ToInteger32(LConstantOperand* op) const; + Operand ToOperand(LOperand* op); + MemOperand ToMemOperand(LOperand* op) const; + // Returns a MemOperand pointing to the high word of a DoubleStackSlot. + MemOperand ToHighMemOperand(LOperand* op) const; + // Try to generate code for the entire chunk, but it may fail if the // chunk contains constructs we cannot handle. Returns true if the // code generation attempt succeeded. @@ -94,8 +96,8 @@ class LCodeGen BASE_EMBEDDED { // Deferred code support. template - void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr, - Token::Value op); + void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr, + Token::Value op); void DoDeferredNumberTagD(LNumberTagD* instr); void DoDeferredNumberTagI(LNumberTagI* instr); void DoDeferredTaggedToI(LTaggedToI* instr); @@ -136,7 +138,6 @@ class LCodeGen BASE_EMBEDDED { LChunk* chunk() const { return chunk_; } Scope* scope() const { return scope_; } HGraph* graph() const { return chunk_->graph(); } - MacroAssembler* masm() const { return masm_; } Register scratch0() { return r9; } DwVfpRegister double_scratch0() { return d0; } @@ -202,24 +203,6 @@ class LCodeGen BASE_EMBEDDED { Register ToRegister(int index) const; DoubleRegister ToDoubleRegister(int index) const; - // LOperand must be a register. - Register ToRegister(LOperand* op) const; - - // LOperand is loaded into scratch, unless already a register. - Register EmitLoadRegister(LOperand* op, Register scratch); - - // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; - - // LOperand is loaded into dbl_scratch, unless already a double register. - DoubleRegister EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch); - - int ToInteger32(LConstantOperand* op) const; - Operand ToOperand(LOperand* op); - MemOperand ToMemOperand(LOperand* op) const; - // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr); @@ -229,6 +212,7 @@ class LCodeGen BASE_EMBEDDED { Register scratch1, Register scratch2); void DoMathFloor(LUnaryMathOperation* instr); + void DoMathRound(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr); // Support for recording safepoint and position information. @@ -237,6 +221,7 @@ class LCodeGen BASE_EMBEDDED { int arguments, int deoptimization_index); void RecordSafepoint(LPointerMap* pointers, int deoptimization_index); + void RecordSafepoint(int deoptimization_index); void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments, int deoptimization_index); diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc new file mode 100644 index 00000000000..1a2326b748d --- /dev/null +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -0,0 +1,303 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "arm/lithium-gap-resolver-arm.h" +#include "arm/lithium-codegen-arm.h" + +namespace v8 { +namespace internal { + +static const Register kSavedValueRegister = { 9 }; +static const DoubleRegister kSavedDoubleValueRegister = { 0 }; + +LGapResolver::LGapResolver(LCodeGen* owner) + : cgen_(owner), moves_(32), root_index_(0), in_cycle_(false), + saved_destination_(NULL) { } + + +void LGapResolver::Resolve(LParallelMove* parallel_move) { + ASSERT(moves_.is_empty()); + // Build up a worklist of moves. + BuildInitialMoveList(parallel_move); + + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands move = moves_[i]; + // Skip constants to perform them last. They don't block other moves + // and skipping such moves with register destinations keeps those + // registers free for the whole algorithm. + if (!move.IsEliminated() && !move.source()->IsConstantOperand()) { + root_index_ = i; // Any cycle is found when by reaching this move again. + PerformMove(i); + if (in_cycle_) { + RestoreValue(); + } + } + } + + // Perform the moves with constant sources. + for (int i = 0; i < moves_.length(); ++i) { + if (!moves_[i].IsEliminated()) { + ASSERT(moves_[i].source()->IsConstantOperand()); + EmitMove(i); + } + } + + moves_.Rewind(0); +} + + +void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) { + // Perform a linear sweep of the moves to add them to the initial list of + // moves to perform, ignoring any move that is redundant (the source is + // the same as the destination, the destination is ignored and + // unallocated, or the move was already eliminated). + const ZoneList* moves = parallel_move->move_operands(); + for (int i = 0; i < moves->length(); ++i) { + LMoveOperands move = moves->at(i); + if (!move.IsRedundant()) moves_.Add(move); + } + Verify(); +} + + +void LGapResolver::PerformMove(int index) { + // Each call to this function performs a move and deletes it from the move + // graph. We first recursively perform any move blocking this one. We + // mark a move as "pending" on entry to PerformMove in order to detect + // cycles in the move graph. + + // We can only find a cycle, when doing a depth-first traversal of moves, + // be encountering the starting move again. So by spilling the source of + // the starting move, we break the cycle. All moves are then unblocked, + // and the starting move is completed by writing the spilled value to + // its destination. All other moves from the spilled source have been + // completed prior to breaking the cycle. + // An additional complication is that moves to MemOperands with large + // offsets (more than 1K or 4K) require us to spill this spilled value to + // the stack, to free up the register. + ASSERT(!moves_[index].IsPending()); + ASSERT(!moves_[index].IsRedundant()); + + // Clear this move's destination to indicate a pending move. The actual + // destination is saved in a stack allocated local. Multiple moves can + // be pending because this function is recursive. + ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated. + LOperand* destination = moves_[index].destination(); + moves_[index].set_destination(NULL); + + // Perform a depth-first traversal of the move graph to resolve + // dependencies. Any unperformed, unpending move with a source the same + // as this one's destination blocks this one so recursively perform all + // such moves. + for (int i = 0; i < moves_.length(); ++i) { + LMoveOperands other_move = moves_[i]; + if (other_move.Blocks(destination) && !other_move.IsPending()) { + PerformMove(i); + // If there is a blocking, pending move it must be moves_[root_index_] + // and all other moves with the same source as moves_[root_index_] are + // sucessfully executed (because they are cycle-free) by this loop. + } + } + + // We are about to resolve this move and don't need it marked as + // pending, so restore its destination. + moves_[index].set_destination(destination); + + // The move may be blocked on a pending move, which must be the starting move. + // In this case, we have a cycle, and we save the source of this move to + // a scratch register to break it. + LMoveOperands other_move = moves_[root_index_]; + if (other_move.Blocks(destination)) { + ASSERT(other_move.IsPending()); + BreakCycle(index); + return; + } + + // This move is no longer blocked. + EmitMove(index); +} + + +void LGapResolver::Verify() { +#ifdef ENABLE_SLOW_ASSERTS + // No operand should be the destination for more than one move. + for (int i = 0; i < moves_.length(); ++i) { + LOperand* destination = moves_[i].destination(); + for (int j = i + 1; j < moves_.length(); ++j) { + SLOW_ASSERT(!destination->Equals(moves_[j].destination())); + } + } +#endif +} + +#define __ ACCESS_MASM(cgen_->masm()) + +void LGapResolver::BreakCycle(int index) { + // We save in a register the value that should end up in the source of + // moves_[root_index]. After performing all moves in the tree rooted + // in that move, we save the value to that source. + ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source())); + ASSERT(!in_cycle_); + in_cycle_ = true; + LOperand* source = moves_[index].source(); + saved_destination_ = moves_[index].destination(); + if (source->IsRegister()) { + __ mov(kSavedValueRegister, cgen_->ToRegister(source)); + } else if (source->IsStackSlot()) { + __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); + } else if (source->IsDoubleRegister()) { + __ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source)); + } else if (source->IsDoubleStackSlot()) { + __ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source)); + } else { + UNREACHABLE(); + } + // This move will be done by restoring the saved value to the destination. + moves_[index].Eliminate(); +} + + +void LGapResolver::RestoreValue() { + ASSERT(in_cycle_); + ASSERT(saved_destination_ != NULL); + + // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister. + if (saved_destination_->IsRegister()) { + __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister); + } else if (saved_destination_->IsStackSlot()) { + __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); + } else if (saved_destination_->IsDoubleRegister()) { + __ vmov(cgen_->ToDoubleRegister(saved_destination_), + kSavedDoubleValueRegister); + } else if (saved_destination_->IsDoubleStackSlot()) { + __ vstr(kSavedDoubleValueRegister, + cgen_->ToMemOperand(saved_destination_)); + } else { + UNREACHABLE(); + } + + in_cycle_ = false; + saved_destination_ = NULL; +} + + +void LGapResolver::EmitMove(int index) { + LOperand* source = moves_[index].source(); + LOperand* destination = moves_[index].destination(); + + // Dispatch on the source and destination operand kinds. Not all + // combinations are possible. + + if (source->IsRegister()) { + Register source_register = cgen_->ToRegister(source); + if (destination->IsRegister()) { + __ mov(cgen_->ToRegister(destination), source_register); + } else { + ASSERT(destination->IsStackSlot()); + __ str(source_register, cgen_->ToMemOperand(destination)); + } + + } else if (source->IsStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsRegister()) { + __ ldr(cgen_->ToRegister(destination), source_operand); + } else { + ASSERT(destination->IsStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + if (!destination_operand.OffsetIsUint12Encodable()) { + // ip is overwritten while saving the value to the destination. + // Therefore we can't use ip. It is OK if the read from the source + // destroys ip, since that happens before the value is read. + __ vldr(kSavedDoubleValueRegister.low(), source_operand); + __ vstr(kSavedDoubleValueRegister.low(), destination_operand); + } else { + __ ldr(ip, source_operand); + __ str(ip, destination_operand); + } + } else { + __ ldr(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, destination_operand); + } + } + + } else if (source->IsConstantOperand()) { + Operand source_operand = cgen_->ToOperand(source); + if (destination->IsRegister()) { + __ mov(cgen_->ToRegister(destination), source_operand); + } else { + ASSERT(destination->IsStackSlot()); + ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone. + MemOperand destination_operand = cgen_->ToMemOperand(destination); + __ mov(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, cgen_->ToMemOperand(destination)); + } + + } else if (source->IsDoubleRegister()) { + DoubleRegister source_register = cgen_->ToDoubleRegister(source); + if (destination->IsDoubleRegister()) { + __ vmov(cgen_->ToDoubleRegister(destination), source_register); + } else { + ASSERT(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + __ vstr(source_register, destination_operand); + } + + } else if (source->IsDoubleStackSlot()) { + MemOperand source_operand = cgen_->ToMemOperand(source); + if (destination->IsDoubleRegister()) { + __ vldr(cgen_->ToDoubleRegister(destination), source_operand); + } else { + ASSERT(destination->IsDoubleStackSlot()); + MemOperand destination_operand = cgen_->ToMemOperand(destination); + if (in_cycle_) { + // kSavedDoubleValueRegister was used to break the cycle, + // but kSavedValueRegister is free. + MemOperand source_high_operand = + cgen_->ToHighMemOperand(source); + MemOperand destination_high_operand = + cgen_->ToHighMemOperand(destination); + __ ldr(kSavedValueRegister, source_operand); + __ str(kSavedValueRegister, destination_operand); + __ ldr(kSavedValueRegister, source_high_operand); + __ str(kSavedValueRegister, destination_high_operand); + } else { + __ vldr(kSavedDoubleValueRegister, source_operand); + __ vstr(kSavedDoubleValueRegister, destination_operand); + } + } + } else { + UNREACHABLE(); + } + + moves_[index].Eliminate(); +} + + +#undef __ + +} } // namespace v8::internal diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.h b/deps/v8/src/arm/lithium-gap-resolver-arm.h new file mode 100644 index 00000000000..334d2920b6e --- /dev/null +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.h @@ -0,0 +1,84 @@ +// Copyright 2011 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ +#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ + +#include "v8.h" + +#include "lithium.h" + +namespace v8 { +namespace internal { + +class LCodeGen; +class LGapResolver; + +class LGapResolver BASE_EMBEDDED { + public: + + explicit LGapResolver(LCodeGen* owner); + + // Resolve a set of parallel moves, emitting assembler instructions. + void Resolve(LParallelMove* parallel_move); + + private: + // Build the initial list of moves. + void BuildInitialMoveList(LParallelMove* parallel_move); + + // Perform the move at the moves_ index in question (possibly requiring + // other moves to satisfy dependencies). + void PerformMove(int index); + + // If a cycle is found in the series of moves, save the blocking value to + // a scratch register. The cycle must be found by hitting the root of the + // depth-first search. + void BreakCycle(int index); + + // After a cycle has been resolved, restore the value from the scratch + // register to its proper destination. + void RestoreValue(); + + // Emit a move and remove it from the move graph. + void EmitMove(int index); + + // Verify the move list before performing moves. + void Verify(); + + LCodeGen* cgen_; + + // List of moves not yet resolved. + ZoneList moves_; + + int root_index_; + bool in_cycle_; + LOperand* saved_destination_; +}; + +} } // namespace v8::internal + +#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_ diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index eb850cd948f..65c92f9e134 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -485,18 +485,19 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() { PopSafepointRegisters(); } -void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) { - str(reg, SafepointRegistersAndDoublesSlot(reg)); +void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, + Register dst) { + str(src, SafepointRegistersAndDoublesSlot(dst)); } -void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) { - str(reg, SafepointRegisterSlot(reg)); +void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { + str(src, SafepointRegisterSlot(dst)); } -void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) { - ldr(reg, SafepointRegisterSlot(reg)); +void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { + ldr(dst, SafepointRegisterSlot(src)); } @@ -745,6 +746,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, } } +void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { +#if !defined(USE_ARM_EABI) + UNREACHABLE(); +#else + vmov(dst, r0, r1); +#endif +} + void MacroAssembler::InvokePrologue(const ParameterCount& expected, const ParameterCount& actual, @@ -2154,11 +2163,22 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) { ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX))); ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset)); } - // The context may be an intermediate context, not a function context. - ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); - } else { // Slot is in the current function context. - // The context may be an intermediate context, not a function context. - ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX))); + } else { + // Slot is in the current function context. Move it into the + // destination register in case we store into it (the write barrier + // cannot be allowed to destroy the context in esi). + mov(dst, cp); + } + + // We should not have found a 'with' context by walking the context chain + // (i.e., the static scope chain and runtime context chain do not agree). + // A variable occurring in such a scope should have slot type LOOKUP and + // not CONTEXT. + if (FLAG_debug_code) { + ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX))); + cmp(dst, ip); + Check(eq, "Yo dawg, I heard you liked function contexts " + "so I put function contexts in all your contexts"); } } diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index 354662da32a..83c59a6f654 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -240,12 +240,13 @@ class MacroAssembler: public Assembler { void PopSafepointRegisters(); void PushSafepointRegistersAndDoubles(); void PopSafepointRegistersAndDoubles(); - void StoreToSafepointRegisterSlot(Register reg); - void StoreToSafepointRegistersAndDoublesSlot(Register reg); - void LoadFromSafepointRegisterSlot(Register reg); - static int SafepointRegisterStackIndex(int reg_code); - static MemOperand SafepointRegisterSlot(Register reg); - static MemOperand SafepointRegistersAndDoublesSlot(Register reg); + // Store value in register src in the safepoint stack slot for + // register dst. + void StoreToSafepointRegisterSlot(Register src, Register dst); + void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst); + // Load the value of the src register from its safepoint stack slot + // into register dst. + void LoadFromSafepointRegisterSlot(Register dst, Register src); // Load two consecutive registers with two consecutive memory locations. void Ldrd(Register dst1, @@ -683,6 +684,8 @@ class MacroAssembler: public Assembler { void CallCFunction(ExternalReference function, int num_arguments); void CallCFunction(Register function, int num_arguments); + void GetCFunctionDoubleResult(const DoubleRegister dst); + // Calls an API function. Allocates HandleScope, extracts returned value // from handle and propagates exceptions. Restores context. // stack_space - space to be unwound on exit (includes the call js @@ -883,10 +886,19 @@ class MacroAssembler: public Assembler { Register scratch1, Register scratch2); + // Compute memory operands for safepoint stack slots. + static int SafepointRegisterStackIndex(int reg_code); + MemOperand SafepointRegisterSlot(Register reg); + MemOperand SafepointRegistersAndDoublesSlot(Register reg); + bool generating_stub_; bool allow_stub_calls_; // This handle will be patched with the code object on installation. Handle code_object_; + + // Needs access to SafepointRegisterStackIndex for optimized frame + // traversal. + friend class OptimizedFrame; }; diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 675fdf49b23..e623ea1914f 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -2332,8 +2332,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case STRING_CHECK: - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { // Check that the object is a two-byte string or a symbol. @@ -2348,8 +2349,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, break; case NUMBER_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; @@ -2369,8 +2371,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object, } case BOOLEAN_CHECK: { - if (!function->IsBuiltin()) { - // Calling non-builtins with a value as receiver requires boxing. + if (!function->IsBuiltin() && !function_info->strict_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. __ jmp(&miss); } else { Label fast; diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index 42a61c2b8d0..a323ecaa44c 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -228,6 +228,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { WriteTaggedPC(pc_delta, kEmbeddedObjectTag); } else if (rmode == RelocInfo::CODE_TARGET) { WriteTaggedPC(pc_delta, kCodeTargetTag); + ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize); } else if (RelocInfo::IsPosition(rmode)) { // Use signed delta-encoding for data. intptr_t data_delta = rinfo->data() - last_data_; @@ -251,6 +252,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { WriteExtraTaggedPC(pc_delta, kPCJumpTag); WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag); last_data_ = rinfo->data(); + ASSERT(begin_pos - pos_ == RelocInfo::kRelocCommentSize); } else { // For all other modes we simply use the mode as the extra tag. // None of these modes need a data component. @@ -850,12 +852,14 @@ double power_double_double(double x, double y) { ExternalReference ExternalReference::power_double_double_function() { - return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double))); + return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double), + FP_RETURN_CALL)); } ExternalReference ExternalReference::power_double_int_function() { - return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int))); + return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int), + FP_RETURN_CALL)); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index 1b71dfc5a19..095859840ef 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -184,6 +184,14 @@ class RelocInfo BASE_EMBEDDED { // we do not normally record relocation info. static const char* kFillerCommentString; + // The size of a comment is equal to tree bytes for the extra tagged pc + + // the tag for the data, and kPointerSize for the actual pointer to the + // comment. + static const int kRelocCommentSize = 3 + kPointerSize; + + // The maximum size for a call instruction including pc-jump. + static const int kMaxCallSize = 6; + enum Mode { // Please note the order is important (see IsCodeTarget, IsGCRelocMode). CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor. diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index ae7b2b9f98e..f392cceb3cd 100755 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -261,10 +261,8 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { Handle shared = info->shared_info(); shared->EnableDeoptimizationSupport(*unoptimized.code()); // The existing unoptimized code was replaced with the new one. - Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, - Handle(shared->DebugName()), - shared->start_position(), - &unoptimized); + Compiler::RecordFunctionCompilation( + Logger::LAZY_COMPILE_TAG, &unoptimized, shared); } } @@ -273,7 +271,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { // optimizable marker in the code object and optimize anyway. This // is safe as long as the unoptimized code has deoptimization // support. - ASSERT(FLAG_always_opt || info->shared_info()->code()->optimizable()); + ASSERT(FLAG_always_opt || code->optimizable()); ASSERT(info->shared_info()->has_deoptimization_support()); if (FLAG_trace_hydrogen) { @@ -283,8 +281,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { } TypeFeedbackOracle oracle( - Handle(info->shared_info()->code()), - Handle(info->closure()->context()->global_context())); + code, Handle(info->closure()->context()->global_context())); HGraphBuilder builder(&oracle); HPhase phase(HPhase::kTotal); HGraph* graph = builder.CreateGraph(info); @@ -294,9 +291,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) { } if (graph != NULL && FLAG_build_lithium) { - Handle code = graph->Compile(); - if (!code.is_null()) { - info->SetCode(code); + Handle optimized_code = graph->Compile(); + if (!optimized_code.is_null()) { + info->SetCode(optimized_code); FinishOptimization(info->closure(), start); return true; } @@ -415,13 +412,25 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { return Handle::null(); } + // Allocate function. ASSERT(!info->code().is_null()); + Handle result = + Factory::NewSharedFunctionInfo( + lit->name(), + lit->materialized_literal_count(), + info->code(), + SerializedScopeInfo::Create(info->scope())); + + ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); + Compiler::SetFunctionInfo(result, lit, true, script); + if (script->name()->IsString()) { PROFILE(CodeCreateEvent( info->is_eval() ? Logger::EVAL_TAG : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), + *result, String::cast(script->name()))); GDBJIT(AddCode(Handle(String::cast(script->name())), script, @@ -432,21 +441,11 @@ static Handle MakeFunctionInfo(CompilationInfo* info) { ? Logger::EVAL_TAG : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *info->code(), - "")); + *result, + Heap::empty_string())); GDBJIT(AddCode(Handle(), script, info->code())); } - // Allocate function. - Handle result = - Factory::NewSharedFunctionInfo( - lit->name(), - lit->materialized_literal_count(), - info->code(), - SerializedScopeInfo::Create(info->scope())); - - ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position()); - Compiler::SetFunctionInfo(result, lit, true, script); - // Hint to the runtime system used when allocating space for initial // property space by setting the expected number of properties for // the instances of the function. @@ -613,10 +612,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) { ASSERT(!info->code().is_null()); Handle code = info->code(); Handle function = info->closure(); - RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, - Handle(shared->DebugName()), - shared->start_position(), - info); + RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared); if (info->IsOptimizing()) { function->ReplaceCode(*code); @@ -724,10 +720,6 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, ASSERT(!info.code().is_null()); // Function compilation complete. - RecordFunctionCompilation(Logger::FUNCTION_TAG, - literal->debug_name(), - literal->start_position(), - &info); scope_info = SerializedScopeInfo::Create(info.scope()); } @@ -738,6 +730,7 @@ Handle Compiler::BuildFunctionInfo(FunctionLiteral* literal, info.code(), scope_info); SetFunctionInfo(result, literal, false, script); + RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result); result->set_allows_lazy_compilation(allow_lazy); // Set the expected number of properties for instances and return @@ -776,28 +769,31 @@ void Compiler::SetFunctionInfo(Handle function_info, void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag, - Handle name, - int start_position, - CompilationInfo* info) { + CompilationInfo* info, + Handle shared) { + // SharedFunctionInfo is passed separately, because if CompilationInfo + // was created using Script object, it will not have it. + // Log the code generation. If source information is available include // script name and line number. Check explicitly whether logging is // enabled as finding the line number is not free. - if (Logger::is_logging() || - CpuProfiler::is_profiling()) { + if (Logger::is_logging() || CpuProfiler::is_profiling()) { Handle