benchmark: remove unused files
These files are very old and undocumented. Most likely nobody are using them. PR-URL: https://github.com/nodejs/node/pull/7094 Reviewed-By: Trevor Norris <trev.norris@gmail.com> Reviewed-By: Jeremiah Senkpiel <fishrock123@rocketmail.com> Reviewed-By: Brian White <mscdex@mscdex.net> Reviewed-By: Anna Henningsen <anna@addaleax.net>
This commit is contained in:
parent
adc74b42ca
commit
ee2843b4ea
8
Makefile
8
Makefile
@ -669,14 +669,6 @@ bench: bench-net bench-http bench-fs bench-tls
|
||||
|
||||
bench-ci: bench
|
||||
|
||||
bench-http-simple:
|
||||
benchmark/http_simple_bench.sh
|
||||
|
||||
bench-idle:
|
||||
$(NODE) benchmark/idle_server.js &
|
||||
sleep 1
|
||||
$(NODE) benchmark/idle_clients.js &
|
||||
|
||||
jslint:
|
||||
$(NODE) tools/jslint.js -J benchmark lib src test tools
|
||||
|
||||
|
@ -1,95 +0,0 @@
|
||||
// If there are no args, then this is the root. Run all the benchmarks!
|
||||
'use strict';
|
||||
if (!process.argv[2])
|
||||
parent();
|
||||
else
|
||||
runTest(+process.argv[2], +process.argv[3], process.argv[4]);
|
||||
|
||||
function parent() {
|
||||
var types = [ 'string', 'buffer' ];
|
||||
var durs = [ 1, 5 ];
|
||||
var sizes = [ 1, 10, 100, 2048, 10240 ];
|
||||
var queue = [];
|
||||
types.forEach(function(t) {
|
||||
durs.forEach(function(d) {
|
||||
sizes.forEach(function(s) {
|
||||
queue.push([__filename, d, s, t]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
var spawn = require('child_process').spawn;
|
||||
var node = process.execPath;
|
||||
|
||||
run();
|
||||
|
||||
function run() {
|
||||
var args = queue.shift();
|
||||
if (!args)
|
||||
return;
|
||||
var child = spawn(node, args, { stdio: 'inherit' });
|
||||
child.on('close', function(code, signal) {
|
||||
if (code)
|
||||
throw new Error('Benchmark failed: ' + args.slice(1));
|
||||
run();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function runTest(dur, size, type) {
|
||||
if (type !== 'string')
|
||||
type = 'buffer';
|
||||
var chunk;
|
||||
switch (type) {
|
||||
case 'string':
|
||||
chunk = new Array(size + 1).join('a');
|
||||
break;
|
||||
case 'buffer':
|
||||
chunk = Buffer.alloc(size, 'a');
|
||||
break;
|
||||
}
|
||||
|
||||
var fs = require('fs');
|
||||
try { fs.unlinkSync('write_stream_throughput'); } catch (e) {}
|
||||
|
||||
var start;
|
||||
var end;
|
||||
function done() {
|
||||
var time = end[0] + end[1] / 1E9;
|
||||
var written = fs.statSync('write_stream_throughput').size / 1024;
|
||||
var rate = (written / time).toFixed(2);
|
||||
console.log('fs_write_stream_dur_%d_size_%d_type_%s: %d',
|
||||
dur, size, type, rate);
|
||||
|
||||
try { fs.unlinkSync('write_stream_throughput'); } catch (e) {}
|
||||
}
|
||||
|
||||
var f = require('fs').createWriteStream('write_stream_throughput');
|
||||
f.on('drain', write);
|
||||
f.on('open', write);
|
||||
f.on('close', done);
|
||||
|
||||
// streams2 fs.WriteStreams will let you send a lot of writes into the
|
||||
// buffer before returning false, so capture the *actual* end time when
|
||||
// all the bytes have been written to the disk, indicated by 'finish'
|
||||
f.on('finish', function() {
|
||||
end = process.hrtime(start);
|
||||
});
|
||||
|
||||
var ending = false;
|
||||
function write() {
|
||||
// don't try to write after we end, even if a 'drain' event comes.
|
||||
// v0.8 streams are so sloppy!
|
||||
if (ending)
|
||||
return;
|
||||
|
||||
start = start || process.hrtime();
|
||||
while (false !== f.write(chunk));
|
||||
end = process.hrtime(start);
|
||||
|
||||
if (end[0] >= dur) {
|
||||
ending = true;
|
||||
f.end();
|
||||
}
|
||||
}
|
||||
}
|
@ -1,116 +0,0 @@
|
||||
#!/bin/bash
|
||||
cd "$(dirname "$(dirname $0)")"
|
||||
|
||||
node=${NODE:-./node}
|
||||
|
||||
name=${NAME:-stacks}
|
||||
|
||||
if type sysctl &>/dev/null; then
|
||||
# darwin and linux
|
||||
sudo sysctl -w net.inet.ip.portrange.first=12000
|
||||
sudo sysctl -w net.inet.tcp.msl=1000
|
||||
sudo sysctl -w kern.maxfiles=1000000 kern.maxfilesperproc=1000000
|
||||
elif type /usr/sbin/ndd &>/dev/null; then
|
||||
# sunos
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_smallest_anon_port 12000
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_largest_anon_port 65535
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_max_buf 2097152
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_xmit_hiwat 1048576
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_recv_hiwat 1048576
|
||||
fi
|
||||
|
||||
ulimit -n 100000
|
||||
$node benchmark/http_simple.js &
|
||||
nodepid=$!
|
||||
echo "node pid = $nodepid"
|
||||
sleep 1
|
||||
|
||||
# has to stay alive until dtrace exits
|
||||
dtrace -n 'profile-97/pid == '$nodepid' && arg1/{ @[jstack(150, 8000)] = count(); } tick-60s { exit(0); }' \
|
||||
| grep -v _ZN2v88internalL21Builtin_HandleApiCallENS0_12_GLOBAL__N_116BuiltinA \
|
||||
> "$name".src &
|
||||
|
||||
dtracepid=$!
|
||||
|
||||
echo "dtrace pid = $dtracepid"
|
||||
|
||||
sleep 1
|
||||
|
||||
test () {
|
||||
c=$1
|
||||
t=$2
|
||||
l=$3
|
||||
k=$4
|
||||
ab $k -t 10 -c $c http://127.0.0.1:8000/$t/$l \
|
||||
2>&1 | grep Req
|
||||
}
|
||||
|
||||
#test 100 bytes 1024
|
||||
#test 10 bytes 100 -k
|
||||
#test 100 bytes 1024 -k
|
||||
#test 100 bytes 1024 -k
|
||||
#test 100 bytes 1024 -k
|
||||
|
||||
echo 'Keep going until dtrace stops listening...'
|
||||
while pargs $dtracepid &>/dev/null; do
|
||||
test 100 bytes ${LENGTH:-1} -k
|
||||
done
|
||||
|
||||
kill $nodepid
|
||||
|
||||
echo 'Turn the stacks into a svg'
|
||||
stackvis dtrace flamegraph-svg < "$name".src > "$name".raw.svg
|
||||
|
||||
echo 'Prune tiny stacks out of the graph'
|
||||
node -e '
|
||||
var infile = process.argv[1];
|
||||
var outfile = process.argv[2];
|
||||
var output = "";
|
||||
var fs = require("fs");
|
||||
var input = fs.readFileSync(infile, "utf8");
|
||||
|
||||
input = input.split("id=\"details\" > </text>");
|
||||
var head = input.shift() + "id=\"details\" > </text>";
|
||||
input = input.join("id=\"details\" > </text>");
|
||||
|
||||
var tail = "</svg>";
|
||||
input = input.split("</svg>")[0];
|
||||
|
||||
var minyKept = Infinity;
|
||||
var minyOverall = Infinity;
|
||||
var rects = input.trim().split(/\n/).filter(function(rect) {
|
||||
var my = rect.match(/y="([0-9\.]+)"/);
|
||||
|
||||
if (!my)
|
||||
return false;
|
||||
var y = +my[1];
|
||||
if (!y)
|
||||
return false;
|
||||
minyOverall = Math.min(minyOverall, y);
|
||||
|
||||
// pluck off everything that will be less than one pixel.
|
||||
var mw = rect.match(/width="([0-9\.]+)"/)
|
||||
if (mw) {
|
||||
var width = +mw[1];
|
||||
if (!(width >= 1))
|
||||
return false;
|
||||
}
|
||||
minyKept = Math.min(minyKept, y);
|
||||
return true;
|
||||
});
|
||||
|
||||
// move everything up to the top of the page.
|
||||
var ydiff = minyKept - minyOverall;
|
||||
rects = rects.map(function(rect) {
|
||||
var my = rect.match(/y="([0-9\.]+)"/);
|
||||
var y = +my[1];
|
||||
var newy = y - ydiff;
|
||||
rect = rect.replace(/y="([0-9\.]+)"/, "y=\"" + newy + "\"");
|
||||
return rect;
|
||||
});
|
||||
|
||||
fs.writeFileSync(outfile, head + "\n" + rects.join("\n") + "\n" + tail);
|
||||
' "$name".raw.svg "$name".svg
|
||||
|
||||
echo ''
|
||||
echo 'done. Results in '"$name"'.svg'
|
@ -1,43 +0,0 @@
|
||||
#!/bin/bash
|
||||
cd "$(dirname "$(dirname $0)")"
|
||||
|
||||
if type sysctl &>/dev/null; then
|
||||
# darwin and linux
|
||||
sudo sysctl -w net.ipv4.ip_local_port_range="12000 65535"
|
||||
sudo sysctl -w net.inet.ip.portrange.first=12000
|
||||
sudo sysctl -w net.inet.tcp.msl=1000
|
||||
sudo sysctl -w kern.maxfiles=1000000 kern.maxfilesperproc=1000000
|
||||
elif type /usr/sbin/ndd &>/dev/null; then
|
||||
# sunos
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_smallest_anon_port 12000
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_largest_anon_port 65535
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_max_buf 2097152
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_xmit_hiwat 1048576
|
||||
/usr/sbin/ndd -set /dev/tcp tcp_recv_hiwat 1048576
|
||||
fi
|
||||
|
||||
ulimit -n 100000
|
||||
|
||||
k=${KEEPALIVE}
|
||||
if [ "$k" = "no" ]; then
|
||||
k=""
|
||||
else
|
||||
k="-k"
|
||||
fi
|
||||
node=${NODE:-./node}
|
||||
|
||||
$node benchmark/http_simple.js &
|
||||
npid=$!
|
||||
|
||||
sleep 1
|
||||
|
||||
if [ "$k" = "-k" ]; then
|
||||
echo "using keepalive"
|
||||
fi
|
||||
|
||||
for i in a a a a a a a a a a a a a a a a a a a a; do
|
||||
ab $k -t 10 -c 100 http://127.0.0.1:8000/${TYPE:-bytes}/${LENGTH:-1024} \
|
||||
2>&1 | grep Req | egrep -o '[0-9\.]+'
|
||||
done
|
||||
|
||||
kill $npid
|
@ -1,120 +0,0 @@
|
||||
'use strict';
|
||||
var spawn = require('child_process').spawn;
|
||||
var cluster = require('cluster');
|
||||
var http = require('http');
|
||||
|
||||
var options = {
|
||||
mode: 'master',
|
||||
host: '127.0.0.1',
|
||||
port: 22344,
|
||||
path: '/',
|
||||
servers: 1,
|
||||
clients: 1,
|
||||
clientConcurrentRequests: 2
|
||||
};
|
||||
|
||||
for (var i = 2; i < process.argv.length; ++i) {
|
||||
var args = process.argv[i].split('=', 2);
|
||||
var key = args[0];
|
||||
var val = args[1];
|
||||
options[key] = val;
|
||||
}
|
||||
|
||||
switch (options.mode) {
|
||||
case 'master': startMaster(); break;
|
||||
case 'server': startServer(); break;
|
||||
case 'client': startClient(); break;
|
||||
default: throw new Error('Bad mode: ' + options.mode);
|
||||
}
|
||||
|
||||
process.title = 'http_bench[' + options.mode + ']';
|
||||
|
||||
// monkey-patch the log functions so they include name + pid
|
||||
console.log = patch(console.log);
|
||||
console.trace = patch(console.trace);
|
||||
console.error = patch(console.error);
|
||||
|
||||
function patch(fun) {
|
||||
var prefix = process.title + '[' + process.pid + '] ';
|
||||
return function() {
|
||||
var args = Array.prototype.slice.call(arguments);
|
||||
args[0] = prefix + args[0];
|
||||
return fun.apply(console, args);
|
||||
};
|
||||
}
|
||||
|
||||
function startMaster() {
|
||||
if (!cluster.isMaster) return startServer();
|
||||
|
||||
var forkCount = 0;
|
||||
|
||||
cluster.on('online', function() {
|
||||
forkCount = forkCount + 1;
|
||||
if (forkCount === ~~options.servers) {
|
||||
var args = [
|
||||
__filename,
|
||||
'mode=client',
|
||||
'clientConcurrentRequests=' + options.clientConcurrentRequests
|
||||
];
|
||||
for (var i = ~~options.clients; i > 0; --i) {
|
||||
var cp = spawn(process.execPath, args);
|
||||
cp.stdout.pipe(process.stdout);
|
||||
cp.stderr.pipe(process.stderr);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for (var i = ~~options.servers; i > 0; --i) cluster.fork();
|
||||
}
|
||||
|
||||
function startServer() {
|
||||
http.createServer(onRequest).listen(options.port, options.host);
|
||||
|
||||
var body = Array(1024).join('x');
|
||||
var headers = {'Content-Length': '' + body.length};
|
||||
|
||||
function onRequest(req, res) {
|
||||
req.on('error', onError);
|
||||
res.on('error', onError);
|
||||
res.writeHead(200, headers);
|
||||
res.end(body);
|
||||
}
|
||||
|
||||
function onError(err) {
|
||||
console.error(err.stack);
|
||||
}
|
||||
}
|
||||
|
||||
function startClient() {
|
||||
// send off a bunch of concurrent requests
|
||||
for (var i = ~~options.clientConcurrentRequests; i > 0; --i) {
|
||||
sendRequest();
|
||||
}
|
||||
|
||||
function sendRequest() {
|
||||
var req = http.request(options, onConnection);
|
||||
req.on('error', onError);
|
||||
req.end();
|
||||
}
|
||||
|
||||
// add a little back-off to prevent EADDRNOTAVAIL errors, it's pretty easy
|
||||
// to exhaust the available port range
|
||||
function relaxedSendRequest() {
|
||||
setTimeout(sendRequest, 1);
|
||||
}
|
||||
|
||||
function onConnection(res) {
|
||||
res.on('error', onError);
|
||||
res.on('data', onData);
|
||||
res.on('end', relaxedSendRequest);
|
||||
}
|
||||
|
||||
function onError(err) {
|
||||
console.error(err.stack);
|
||||
relaxedSendRequest();
|
||||
}
|
||||
|
||||
function onData(data) {
|
||||
// this space intentionally left blank
|
||||
}
|
||||
}
|
@ -1,15 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
var http = require('http');
|
||||
var port = parseInt(process.env.PORT, 10) || 8000;
|
||||
var defaultLag = parseInt(process.argv[2], 10) || 100;
|
||||
|
||||
http.createServer(function(req, res) {
|
||||
res.writeHead(200, { 'content-type': 'text/plain',
|
||||
'content-length': '2' });
|
||||
|
||||
var lag = parseInt(req.url.split('/').pop(), 10) || defaultLag;
|
||||
setTimeout(function() {
|
||||
res.end('ok');
|
||||
}, lag);
|
||||
}).listen(port, 'localhost');
|
@ -1,95 +0,0 @@
|
||||
DIR = File.dirname(__FILE__)
|
||||
|
||||
def fib(n)
|
||||
return 1 if n <= 1
|
||||
fib(n-1) + fib(n-2)
|
||||
end
|
||||
|
||||
def wait(seconds)
|
||||
n = (seconds / 0.01).to_i
|
||||
n.times do
|
||||
sleep(0.01)
|
||||
#File.read(DIR + '/yahoo.html')
|
||||
end
|
||||
end
|
||||
|
||||
class SimpleApp
|
||||
@@responses = {}
|
||||
|
||||
def initialize
|
||||
@count = 0
|
||||
end
|
||||
|
||||
def deferred?(env)
|
||||
false
|
||||
end
|
||||
|
||||
def call(env)
|
||||
path = env['PATH_INFO'] || env['REQUEST_URI']
|
||||
commands = path.split('/')
|
||||
|
||||
@count += 1
|
||||
if commands.include?('periodical_activity') and @count % 10 != 1
|
||||
return [200, {'Content-Type'=>'text/plain'}, "quick response!\r\n"]
|
||||
end
|
||||
|
||||
if commands.include?('fibonacci')
|
||||
n = commands.last.to_i
|
||||
raise "fibonacci called with n <= 0" if n <= 0
|
||||
body = (1..n).to_a.map { |i| fib(i).to_s }.join(' ')
|
||||
status = 200
|
||||
|
||||
elsif commands.include?('wait')
|
||||
n = commands.last.to_f
|
||||
raise "wait called with n <= 0" if n <= 0
|
||||
wait(n)
|
||||
body = "waited about #{n} seconds"
|
||||
status = 200
|
||||
|
||||
elsif commands.include?('bytes')
|
||||
n = commands.last.to_i
|
||||
raise "bytes called with n <= 0" if n <= 0
|
||||
body = @@responses[n] || "C"*n
|
||||
status = 200
|
||||
|
||||
elsif commands.include?('fixed')
|
||||
n = 20 * 1024;
|
||||
body = @@responses[n] || "C"*n
|
||||
status = 200
|
||||
|
||||
elsif commands.include?('test_post_length')
|
||||
input_body = ""
|
||||
while chunk = env['rack.input'].read(512)
|
||||
input_body << chunk
|
||||
end
|
||||
if env['CONTENT_LENGTH'].to_i == input_body.length
|
||||
body = "Content-Length matches input length"
|
||||
status = 200
|
||||
else
|
||||
body = "Content-Length doesn't matches input length!
|
||||
content_length = #{env['CONTENT_LENGTH'].to_i}
|
||||
input_body.length = #{input_body.length}"
|
||||
status = 500
|
||||
end
|
||||
else
|
||||
status = 404
|
||||
body = "Undefined url"
|
||||
end
|
||||
|
||||
body += "\r\n"
|
||||
headers = {'Content-Type' => 'text/plain', 'Content-Length' => body.length.to_s }
|
||||
[status, headers, [body]]
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
if $0 == __FILE__
|
||||
#require DIR + '/../lib/ebb'
|
||||
require 'rubygems'
|
||||
require 'rack'
|
||||
require 'thin'
|
||||
require 'ebb'
|
||||
# Rack::Handler::Mongrel.run(SimpleApp.new, :Port => 8000)
|
||||
Thin::Server.start("0.0.0.0", 8000, SimpleApp.new)
|
||||
# Ebb::start_server(SimpleApp.new, :port => 8000)
|
||||
end
|
@ -1,123 +0,0 @@
|
||||
//
|
||||
// Usage:
|
||||
// node benchmark/http_simple_auto.js <args> <target>
|
||||
//
|
||||
// Where:
|
||||
// <args> Arguments to pass to `ab`.
|
||||
// <target> Target to benchmark, e.g. `bytes/1024` or `buffer/8192`.
|
||||
//
|
||||
'use strict';
|
||||
|
||||
var http = require('http');
|
||||
var spawn = require('child_process').spawn;
|
||||
|
||||
var port = parseInt(process.env.PORT || 8000);
|
||||
|
||||
var fixed = 'C'.repeat(20 * 1024);
|
||||
|
||||
var stored = {};
|
||||
var storedBuffer = {};
|
||||
|
||||
var server = http.createServer(function(req, res) {
|
||||
var commands = req.url.split('/');
|
||||
var command = commands[1];
|
||||
var body = '';
|
||||
var arg = commands[2];
|
||||
var n_chunks = parseInt(commands[3], 10);
|
||||
var status = 200;
|
||||
var n;
|
||||
var i;
|
||||
|
||||
if (command == 'bytes') {
|
||||
n = parseInt(arg, 10);
|
||||
if (n <= 0)
|
||||
throw new Error('bytes called with n <= 0');
|
||||
if (stored[n] === undefined) {
|
||||
stored[n] = 'C'.repeat(n);
|
||||
}
|
||||
body = stored[n];
|
||||
|
||||
} else if (command == 'buffer') {
|
||||
n = parseInt(arg, 10);
|
||||
if (n <= 0) throw new Error('bytes called with n <= 0');
|
||||
if (storedBuffer[n] === undefined) {
|
||||
storedBuffer[n] = Buffer.allocUnsafe(n);
|
||||
for (i = 0; i < n; i++) {
|
||||
storedBuffer[n][i] = 'C'.charCodeAt(0);
|
||||
}
|
||||
}
|
||||
body = storedBuffer[n];
|
||||
|
||||
} else if (command == 'quit') {
|
||||
res.connection.server.close();
|
||||
body = 'quitting';
|
||||
|
||||
} else if (command == 'fixed') {
|
||||
body = fixed;
|
||||
|
||||
} else if (command == 'echo') {
|
||||
res.writeHead(200, { 'Content-Type': 'text/plain',
|
||||
'Transfer-Encoding': 'chunked' });
|
||||
req.pipe(res);
|
||||
return;
|
||||
|
||||
} else {
|
||||
status = 404;
|
||||
body = 'not found\n';
|
||||
}
|
||||
|
||||
// example: http://localhost:port/bytes/512/4
|
||||
// sends a 512 byte body in 4 chunks of 128 bytes
|
||||
if (n_chunks > 0) {
|
||||
res.writeHead(status, { 'Content-Type': 'text/plain',
|
||||
'Transfer-Encoding': 'chunked' });
|
||||
// send body in chunks
|
||||
var len = body.length;
|
||||
var step = Math.floor(len / n_chunks) || 1;
|
||||
|
||||
for (i = 0, n = (n_chunks - 1); i < n; ++i) {
|
||||
res.write(body.slice(i * step, i * step + step));
|
||||
}
|
||||
res.end(body.slice((n_chunks - 1) * step));
|
||||
} else {
|
||||
var content_length = body.length.toString();
|
||||
|
||||
res.writeHead(status, { 'Content-Type': 'text/plain',
|
||||
'Content-Length': content_length });
|
||||
res.end(body);
|
||||
}
|
||||
|
||||
});
|
||||
|
||||
server.listen(port, function() {
|
||||
var url = 'http://127.0.0.1:' + port + '/';
|
||||
|
||||
var n = process.argv.length - 1;
|
||||
process.argv[n] = url + process.argv[n];
|
||||
|
||||
var cp = spawn('ab', process.argv.slice(2));
|
||||
cp.stdout.pipe(process.stdout);
|
||||
cp.stderr.pipe(process.stderr);
|
||||
cp.on('exit', function() {
|
||||
server.close();
|
||||
process.nextTick(dump_mm_stats);
|
||||
});
|
||||
});
|
||||
|
||||
function dump_mm_stats() {
|
||||
if (typeof global.gc != 'function') return;
|
||||
|
||||
var before = process.memoryUsage();
|
||||
for (var i = 0; i < 10; ++i) global.gc();
|
||||
var after = process.memoryUsage();
|
||||
setTimeout(print_stats, 250); // give GC time to settle
|
||||
|
||||
function print_stats() {
|
||||
console.log('\nBEFORE / AFTER GC');
|
||||
['rss', 'heapTotal', 'heapUsed'].forEach(function(key) {
|
||||
var a = before[key] / (1024 * 1024);
|
||||
var b = after[key] / (1024 * 1024);
|
||||
console.log('%sM / %sM %s', a.toFixed(2), b.toFixed(2), key);
|
||||
});
|
||||
}
|
||||
}
|
@ -1,74 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
SERVER=127.0.0.1
|
||||
PORT=${PORT:=8000}
|
||||
|
||||
# You may want to configure your TCP settings to make many ports available
|
||||
# to node and ab. On macintosh use:
|
||||
# sudo sysctl -w net.inet.ip.portrange.first=32768
|
||||
# sudo sysctl -w net.inet.tcp.msl=1000
|
||||
|
||||
if [ ! -d benchmark/ ]; then
|
||||
echo "Run this script from the node root directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ $SERVER == "127.0.0.1" ]; then
|
||||
./node benchmark/http_simple.js &
|
||||
node_pid=$!
|
||||
sleep 1
|
||||
fi
|
||||
|
||||
date=`date "+%Y%m%d%H%M%S"`
|
||||
|
||||
ab_hello_world() {
|
||||
local type="$1"
|
||||
local ressize="$2"
|
||||
if [ $type == "string" ]; then
|
||||
local uri="bytes/$ressize"
|
||||
else
|
||||
local uri="buffer/$ressize"
|
||||
fi
|
||||
|
||||
|
||||
name="ab-hello-world-$type-$ressize"
|
||||
|
||||
dir=".benchmark_reports/$name/$rev/"
|
||||
if [ ! -d $dir ]; then
|
||||
mkdir -p $dir
|
||||
fi
|
||||
|
||||
summary_fn="$dir/$date.summary"
|
||||
data_fn="$dir/$date.data"
|
||||
|
||||
echo "Bench $name starts in 3 seconds..."
|
||||
# let things calm down
|
||||
sleep 3
|
||||
|
||||
# hammer that as hard as it can for 10 seconds.
|
||||
ab -g $data_fn -c 100 -t 10 http://$SERVER:$PORT/$uri > $summary_fn
|
||||
|
||||
# add our data about the server
|
||||
echo >> $summary_fn
|
||||
echo >> $summary_fn
|
||||
echo "webserver-rev: $rev" >> $summary_fn
|
||||
echo "webserver-uname: $uname" >> $summary_fn
|
||||
|
||||
grep Req $summary_fn
|
||||
|
||||
echo "Summary: $summary_fn"
|
||||
echo
|
||||
}
|
||||
|
||||
# 1k
|
||||
ab_hello_world 'string' '1024'
|
||||
ab_hello_world 'buffer' '1024'
|
||||
|
||||
# 100k
|
||||
ab_hello_world 'string' '102400'
|
||||
ab_hello_world 'buffer' '102400'
|
||||
|
||||
|
||||
if [ ! -z $node_pid ]; then
|
||||
kill -9 $node_pid
|
||||
fi
|
@ -1,11 +0,0 @@
|
||||
'use strict';
|
||||
const cluster = require('cluster');
|
||||
const os = require('os');
|
||||
const path = require('path');
|
||||
|
||||
if (cluster.isMaster) {
|
||||
console.log('master running on pid %d', process.pid);
|
||||
for (var i = 0, n = os.cpus().length; i < n; ++i) cluster.fork();
|
||||
} else {
|
||||
require(path.join(__dirname, 'http_simple.js'));
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
'use strict';
|
||||
const net = require('net');
|
||||
|
||||
var errors = 0, connections = 0;
|
||||
|
||||
function connect() {
|
||||
process.nextTick(function() {
|
||||
var s = net.Stream();
|
||||
var gotConnected = false;
|
||||
s.connect(9000);
|
||||
|
||||
s.on('connect', function() {
|
||||
gotConnected = true;
|
||||
connections++;
|
||||
connect();
|
||||
});
|
||||
|
||||
s.on('close', function() {
|
||||
if (gotConnected) connections--;
|
||||
});
|
||||
|
||||
s.on('error', function() {
|
||||
errors++;
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
connect();
|
||||
|
||||
|
||||
var oldConnections, oldErrors;
|
||||
|
||||
// Try to start new connections every so often
|
||||
setInterval(connect, 5000);
|
||||
|
||||
setInterval(function() {
|
||||
if (oldConnections != connections) {
|
||||
oldConnections = connections;
|
||||
console.log('CLIENT %d connections: %d', process.pid, connections);
|
||||
}
|
||||
|
||||
if (oldErrors != errors) {
|
||||
oldErrors = errors;
|
||||
console.log('CLIENT %d errors: %d', process.pid, errors);
|
||||
}
|
||||
}, 1000);
|
||||
|
@ -1,30 +0,0 @@
|
||||
'use strict';
|
||||
|
||||
const net = require('net');
|
||||
var errors = 0;
|
||||
|
||||
var server = net.Server(function(socket) {
|
||||
|
||||
socket.on('error', function() {
|
||||
errors++;
|
||||
});
|
||||
|
||||
});
|
||||
|
||||
//server.maxConnections = 128;
|
||||
|
||||
server.listen(9000);
|
||||
|
||||
var oldConnections, oldErrors;
|
||||
|
||||
setInterval(function() {
|
||||
if (oldConnections != server.connections) {
|
||||
oldConnections = server.connections;
|
||||
console.log('SERVER %d connections: %d', process.pid, server.connections);
|
||||
}
|
||||
|
||||
if (oldErrors != errors) {
|
||||
oldErrors = errors;
|
||||
console.log('SERVER %d errors: %d', process.pid, errors);
|
||||
}
|
||||
}, 1000);
|
123
benchmark/io.c
123
benchmark/io.c
@ -1,123 +0,0 @@
|
||||
/**
|
||||
* gcc -o iotest io.c
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/time.h>
|
||||
#include <assert.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
|
||||
static int c = 0;
|
||||
static int tsize = 1000 * 1048576;
|
||||
static const char* path = "/tmp/wt.dat";
|
||||
static char buf[65536];
|
||||
|
||||
static uint64_t now(void) {
|
||||
struct timeval tv;
|
||||
|
||||
if (gettimeofday(&tv, NULL))
|
||||
abort();
|
||||
|
||||
return tv.tv_sec * 1000000ULL + tv.tv_usec;
|
||||
}
|
||||
|
||||
static void writetest(int size, size_t bsize)
|
||||
{
|
||||
int i;
|
||||
uint64_t start, end;
|
||||
double elapsed;
|
||||
double mbps;
|
||||
|
||||
assert(bsize <= sizeof buf);
|
||||
|
||||
int fd = open(path, O_CREAT|O_WRONLY, 0644);
|
||||
if (fd < 0) {
|
||||
perror("open failed");
|
||||
exit(254);
|
||||
}
|
||||
|
||||
start = now();
|
||||
|
||||
for (i = 0; i < size; i += bsize) {
|
||||
int rv = write(fd, buf, bsize);
|
||||
if (rv < 0) {
|
||||
perror("write failed");
|
||||
exit(254);
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef NSYNC
|
||||
# ifdef __linux__
|
||||
fdatasync(fd);
|
||||
# else
|
||||
fsync(fd);
|
||||
# endif
|
||||
#endif /* SYNC */
|
||||
|
||||
close(fd);
|
||||
|
||||
end = now();
|
||||
elapsed = (end - start) / 1e6;
|
||||
mbps = ((tsize/elapsed)) / 1048576;
|
||||
|
||||
fprintf(stderr, "Wrote %d bytes in %03fs using %ld byte buffers: %03f\n", size, elapsed, bsize, mbps);
|
||||
}
|
||||
|
||||
void readtest(int size, size_t bsize)
|
||||
{
|
||||
int i;
|
||||
uint64_t start, end;
|
||||
double elapsed;
|
||||
double mbps;
|
||||
|
||||
assert(bsize <= sizeof buf);
|
||||
|
||||
int fd = open(path, O_RDONLY, 0644);
|
||||
if (fd < 0) {
|
||||
perror("open failed");
|
||||
exit(254);
|
||||
}
|
||||
|
||||
start = now();
|
||||
|
||||
for (i = 0; i < size; i += bsize) {
|
||||
int rv = read(fd, buf, bsize);
|
||||
if (rv < 0) {
|
||||
perror("write failed");
|
||||
exit(254);
|
||||
}
|
||||
}
|
||||
close(fd);
|
||||
|
||||
end = now();
|
||||
elapsed = (end - start) / 1e6;
|
||||
mbps = ((tsize/elapsed)) / 1048576;
|
||||
|
||||
fprintf(stderr, "Read %d bytes in %03fs using %ld byte buffers: %03fmB/s\n", size, elapsed, bsize, mbps);
|
||||
}
|
||||
|
||||
void cleanup() {
|
||||
unlink(path);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
int i;
|
||||
int bsizes[] = {1024, 4096, 8192, 16384, 32768, 65536, 0};
|
||||
|
||||
if (argc > 1) path = argv[1];
|
||||
|
||||
for (i = 0; bsizes[i] != 0; i++) {
|
||||
writetest(tsize, bsizes[i]);
|
||||
}
|
||||
for (i = 0; bsizes[i] != 0; i++) {
|
||||
readtest(tsize, bsizes[i]);
|
||||
}
|
||||
atexit(cleanup);
|
||||
return 0;
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
#!/usr/bin/env Rscript
|
||||
|
||||
# To use this script you'll need to install R: http://www.r-project.org/
|
||||
# and a library for R called ggplot2
|
||||
# Which can be done by starting R and typing install.packages("ggplot2")
|
||||
# like this:
|
||||
#
|
||||
# shell% R
|
||||
# R version 2.11.0 beta (2010-04-12 r51689)
|
||||
# > install.packages("ggplot2")
|
||||
# (follow prompt)
|
||||
#
|
||||
# Then you can try this script by providing a full path to .data file
|
||||
# outputed from 'make bench'
|
||||
#
|
||||
# > cd ~/src/node
|
||||
# > make bench
|
||||
# ...
|
||||
# > ./benchmark/plot.R .benchmark_reports/ab-hello-world-buffer-1024/ff456b38862de3fd0118c6ac6b3f46edb1fbb87f/20101013162056.data
|
||||
#
|
||||
# This will generate a PNG file which you can view
|
||||
#
|
||||
#
|
||||
# Hopefully these steps will be automated in the future.
|
||||
|
||||
|
||||
|
||||
library(ggplot2)
|
||||
|
||||
args <- commandArgs(TRUE)
|
||||
|
||||
ab.load <- function (filename, name) {
|
||||
raw <- data.frame(read.csv(filename, sep="\t", header=T), server=name)
|
||||
raw <- data.frame(raw, time=raw$seconds-min(raw$seconds))
|
||||
raw <- data.frame(raw, time_s=raw$time/1000000)
|
||||
raw
|
||||
}
|
||||
|
||||
#ab.tsPoint <- function (d) {
|
||||
# qplot(time_s, ttime, data=d, facets=server~.,
|
||||
# geom="point", alpha=I(1/15), ylab="response time (ms)",
|
||||
# xlab="time (s)", main="c=30, res=26kb",
|
||||
# ylim=c(0,100))
|
||||
#}
|
||||
#
|
||||
#ab.tsLine <- function (d) {
|
||||
# qplot(time_s, ttime, data=d, facets=server~.,
|
||||
# geom="line", ylab="response time (ms)",
|
||||
# xlab="time (s)", main="c=30, res=26kb",
|
||||
# ylim=c(0,100))
|
||||
#}
|
||||
|
||||
|
||||
filename <- args[0:1]
|
||||
data <- ab.load(filename, "node")
|
||||
|
||||
|
||||
# histogram
|
||||
|
||||
#hist_png_filename <- gsub(".data", "_hist.png", filename)
|
||||
hist_png_filename <- "hist.png"
|
||||
|
||||
png(filename = hist_png_filename, width = 480, height = 380, units = "px")
|
||||
|
||||
qplot(ttime, data=data, geom="histogram",
|
||||
main="xxx",
|
||||
binwidth=1, xlab="response time (ms)",
|
||||
xlim=c(0,100))
|
||||
|
||||
print(hist_png_filename)
|
||||
|
||||
|
||||
|
||||
# time series
|
||||
|
||||
#ts_png_filename <- gsub(".data", "_ts.png", filename)
|
||||
ts_png_filename = "ts.png"
|
||||
|
||||
png(filename = ts_png_filename, width = 480, height = 380, units = "px")
|
||||
|
||||
qplot(time, ttime, data=data, facets=server~.,
|
||||
geom="point", alpha=I(1/15), ylab="response time (ms)",
|
||||
xlab="time (s)", main="xxx",
|
||||
ylim=c(0,100))
|
||||
|
||||
print(ts_png_filename)
|
@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env Rscript
|
||||
|
||||
# To use this to graph some benchmarks, install R (http://www.r-project.org/)
|
||||
# and ggplot (http://ggplot2.org/).
|
||||
#
|
||||
# Once installed, you can generate some CSV output with a command like this:
|
||||
#
|
||||
# $ OUTPUT_FORMAT=csv node benchmark/http/client-request-body.js > data.csv
|
||||
# $ ./benchmark/plot_csv.R data.csv data.png bytes type
|
||||
#
|
||||
# Where the 3rd argument to this script is the graph's X coordinate, the 4th is
|
||||
# how the output is grouped, and the Y coordinate defaults to result.
|
||||
|
||||
library(methods)
|
||||
library(ggplot2)
|
||||
|
||||
# get info from arguments
|
||||
args <- commandArgs(TRUE)
|
||||
|
||||
csvFilename <- args[1]
|
||||
graphFilename <- args[2]
|
||||
|
||||
xCoordinate <- args[3]
|
||||
groupBy <- args[4]
|
||||
|
||||
# read data
|
||||
data <- read.csv(file = csvFilename, head = TRUE)
|
||||
|
||||
# plot and save
|
||||
plot <- ggplot(data = data, aes_string(x = xCoordinate, y = 'result', col = groupBy)) +
|
||||
geom_point(size = 5) +
|
||||
ggtitle(data$filename)
|
||||
|
||||
png(filename = graphFilename, width = 560, height = 480, units = 'px')
|
||||
print(plot)
|
||||
graphics.off()
|
||||
|
||||
cat(paste('Saved to', graphFilename, '\n'))
|
@ -1,2 +0,0 @@
|
||||
'use strict';
|
||||
console.log(process.memoryUsage().rss);
|
@ -1,40 +0,0 @@
|
||||
'use strict';
|
||||
var http = require('http');
|
||||
|
||||
var concurrency = 30;
|
||||
var port = 12346;
|
||||
var n = 700;
|
||||
var bytes = 1024 * 5;
|
||||
|
||||
var responses = 0;
|
||||
|
||||
var body = 'C'.repeat(bytes);
|
||||
|
||||
var server = http.createServer(function(req, res) {
|
||||
res.writeHead(200, {
|
||||
'Content-Type': 'text/plain',
|
||||
'Content-Length': body.length
|
||||
});
|
||||
res.end(body);
|
||||
});
|
||||
|
||||
server.listen(port, function() {
|
||||
var agent = new http.Agent();
|
||||
agent.maxSockets = concurrency;
|
||||
|
||||
for (var i = 0; i < n; i++) {
|
||||
var req = http.get({
|
||||
port: port,
|
||||
path: '/',
|
||||
agent: agent
|
||||
}, function(res) {
|
||||
res.resume();
|
||||
res.on('end', function() {
|
||||
if (++responses === n) {
|
||||
server.close();
|
||||
}
|
||||
});
|
||||
});
|
||||
req.id = i;
|
||||
}
|
||||
});
|
Loading…
x
Reference in New Issue
Block a user