JSONRPC: use a bigger default buffer.
This potentially saves us some reads (not measurably though), at cost of less fairness. It's important to measure though, because a single large request will increase buffer size for successive requests, so we can see this pattern in real usage. tests/test_coinmoves.py::test_generate_coinmoves (2,000,000, sqlite3): Time (from start to end of l2 node): 227 seconds (was 239) Worst latency: 62.4 seconds (was 56.9) Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
@@ -8,7 +8,7 @@
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define READ_CHUNKSIZE 64
|
||||
#define READ_CHUNKSIZE 2048
|
||||
|
||||
struct jsonrpc_io {
|
||||
MEMBUF(char) membuf;
|
||||
@@ -22,13 +22,14 @@ struct jsonrpc_io {
|
||||
struct jsonrpc_io *jsonrpc_io_new(const tal_t *ctx)
|
||||
{
|
||||
struct jsonrpc_io *json_in;
|
||||
const size_t bufsize = READ_CHUNKSIZE * 2;
|
||||
|
||||
json_in = tal(ctx, struct jsonrpc_io);
|
||||
json_in->bytes_read = 0;
|
||||
|
||||
membuf_init(&json_in->membuf,
|
||||
tal_arr(json_in, char, READ_CHUNKSIZE),
|
||||
READ_CHUNKSIZE, membuf_tal_resize);
|
||||
tal_arr(json_in, char, bufsize),
|
||||
bufsize, membuf_tal_resize);
|
||||
json_in->toks = toks_alloc(json_in);
|
||||
jsmn_init(&json_in->parser);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user