2021-12-04 21:53:56 +10:30
|
|
|
#include "config.h"
|
2026-01-20 18:01:54 +10:30
|
|
|
#include <ccan/cast/cast.h>
|
2018-08-22 19:36:40 +09:30
|
|
|
#include <ccan/err/err.h>
|
|
|
|
|
#include <ccan/io/io.h>
|
2016-01-22 06:41:47 +10:30
|
|
|
#include <ccan/read_write_all/read_write_all.h>
|
|
|
|
|
#include <ccan/str/hex/hex.h>
|
2018-02-18 23:26:46 +10:30
|
|
|
#include <ccan/tal/link/link.h>
|
2016-01-22 06:41:47 +10:30
|
|
|
#include <ccan/tal/str/str.h>
|
2025-11-13 16:02:35 +10:30
|
|
|
#include <common/clock_time.h>
|
2023-06-01 16:12:37 +09:30
|
|
|
#include <common/configvar.h>
|
2018-12-08 11:09:28 +10:30
|
|
|
#include <common/json_command.h>
|
2017-12-15 20:59:03 +10:30
|
|
|
#include <common/memleak.h>
|
2016-01-22 06:41:47 +10:30
|
|
|
#include <errno.h>
|
2016-01-22 06:41:47 +10:30
|
|
|
#include <fcntl.h>
|
2021-12-04 21:53:56 +10:30
|
|
|
#include <lightningd/log.h>
|
2019-06-06 16:26:42 +08:00
|
|
|
#include <lightningd/notification.h>
|
2016-01-22 06:41:47 +10:30
|
|
|
#include <stdio.h>
|
|
|
|
|
|
2019-11-18 10:57:17 +10:30
|
|
|
/* What logging level to use if they didn't specify */
|
|
|
|
|
#define DEFAULT_LOGLEVEL LOG_INFORM
|
|
|
|
|
|
2018-03-29 12:36:45 +10:30
|
|
|
/* Once we're up and running, this is set up. */
|
2023-07-16 15:26:52 +09:30
|
|
|
struct logger *crashlog;
|
2018-03-29 12:36:45 +10:30
|
|
|
|
2026-01-20 15:08:07 +10:30
|
|
|
/* Reference counted log_prefix. Log entries keep a pointer, and they
|
|
|
|
|
* can outlast the log entry point which created them. */
|
|
|
|
|
struct log_prefix {
|
|
|
|
|
size_t refcnt;
|
|
|
|
|
const char *prefix;
|
|
|
|
|
};
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
struct log_hdr {
|
2026-01-20 15:08:07 +10:30
|
|
|
struct timeabs time;
|
|
|
|
|
enum log_level level;
|
|
|
|
|
struct node_id_cache *nc;
|
|
|
|
|
struct log_prefix *prefix;
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t msglen, iolen;
|
|
|
|
|
/* Followed by msglen then iolen bytes! */
|
2026-01-20 15:08:07 +10:30
|
|
|
};
|
|
|
|
|
|
2019-11-18 10:57:17 +10:30
|
|
|
struct print_filter {
|
2023-07-19 14:26:20 +09:30
|
|
|
/* In list log_book->print_filters / log_file->print_filters */
|
2019-11-18 10:57:17 +10:30
|
|
|
struct list_node list;
|
|
|
|
|
|
|
|
|
|
const char *prefix;
|
|
|
|
|
enum log_level level;
|
|
|
|
|
};
|
|
|
|
|
|
2023-07-19 14:26:20 +09:30
|
|
|
struct log_file {
|
|
|
|
|
struct list_head print_filters;
|
|
|
|
|
FILE *f;
|
|
|
|
|
};
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
#define RING_BITS 24
|
2019-11-18 10:56:27 +10:30
|
|
|
struct log_book {
|
2019-11-18 10:57:17 +10:30
|
|
|
struct list_head print_filters;
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
/* Ring buffer: 16MB should be enough for anyone! */
|
|
|
|
|
char ringbuf[1 << RING_BITS];
|
|
|
|
|
/* These free-run, so use modulus */
|
|
|
|
|
size_t ringbuf_start, ringbuf_end;
|
|
|
|
|
|
2019-11-18 10:57:17 +10:30
|
|
|
/* Non-null once it's been initialized */
|
|
|
|
|
enum log_level *default_print_level;
|
2019-11-18 10:56:27 +10:30
|
|
|
struct timeabs init_time;
|
2022-06-26 13:55:01 +09:30
|
|
|
|
2023-07-17 17:06:24 +09:30
|
|
|
/* Our loggers */
|
|
|
|
|
struct list_head loggers;
|
|
|
|
|
|
2022-06-26 13:55:01 +09:30
|
|
|
/* Array of log files: one per ld->logfiles[] */
|
2023-07-19 14:26:20 +09:30
|
|
|
struct log_file **log_files;
|
2021-05-03 12:49:43 +09:30
|
|
|
bool print_timestamps;
|
2019-11-18 10:56:27 +10:30
|
|
|
|
2022-07-08 19:27:11 +09:30
|
|
|
/* Prefix this to every entry as you output */
|
|
|
|
|
const char *prefix;
|
2019-11-18 10:57:18 +10:30
|
|
|
|
2019-11-18 10:56:27 +10:30
|
|
|
/* Although log_book will copy log entries to parent log_book
|
|
|
|
|
* (the log_book belongs to lightningd), a pointer to lightningd
|
|
|
|
|
* is more directly because the notification needs ld->plugins.
|
|
|
|
|
*/
|
|
|
|
|
struct lightningd *ld;
|
2019-11-18 10:57:17 +10:30
|
|
|
/* Cache of all node_ids, to avoid multiple copies. */
|
|
|
|
|
struct node_id_map *cache;
|
2019-11-18 10:56:27 +10:30
|
|
|
};
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
struct logger {
|
2023-07-17 17:06:24 +09:30
|
|
|
/* Inside log_book->loggers. */
|
|
|
|
|
struct list_node list;
|
2023-07-16 15:26:52 +09:30
|
|
|
struct log_book *log_book;
|
2019-11-18 10:56:27 +10:30
|
|
|
const struct node_id *default_node_id;
|
2021-11-25 06:29:18 +10:30
|
|
|
struct log_prefix *prefix;
|
2019-11-18 10:57:17 +10:30
|
|
|
|
2023-07-19 14:25:48 +09:30
|
|
|
/* Print log message at >= this level */
|
|
|
|
|
enum log_level print_level;
|
2023-07-19 14:26:22 +09:30
|
|
|
/* For non-trivial setups, we might need to test filters again
|
|
|
|
|
* when actually producing output. */
|
|
|
|
|
bool need_refiltering;
|
2019-11-18 10:56:27 +10:30
|
|
|
};
|
|
|
|
|
|
2021-11-25 06:29:18 +10:30
|
|
|
static struct log_prefix *log_prefix_new(const tal_t *ctx,
|
|
|
|
|
const char *prefix TAKES)
|
|
|
|
|
{
|
|
|
|
|
struct log_prefix *lp = tal(ctx, struct log_prefix);
|
|
|
|
|
lp->refcnt = 1;
|
|
|
|
|
lp->prefix = tal_strdup(lp, prefix);
|
|
|
|
|
return lp;
|
|
|
|
|
}
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
static void ringbuf_span(const struct log_book *log,
|
|
|
|
|
size_t start, size_t len,
|
|
|
|
|
void **buf1, size_t *buf1len,
|
|
|
|
|
void **buf2, size_t *buf2len)
|
|
|
|
|
{
|
|
|
|
|
size_t size = sizeof(log->ringbuf);
|
|
|
|
|
size_t off = start % size;
|
|
|
|
|
size_t first = size - off;
|
|
|
|
|
|
|
|
|
|
assert(len <= size);
|
|
|
|
|
|
|
|
|
|
if (first > len)
|
|
|
|
|
first = len;
|
|
|
|
|
|
|
|
|
|
*buf1 = (void *)(log->ringbuf + off);
|
|
|
|
|
*buf1len = first;
|
|
|
|
|
|
|
|
|
|
if (first == len) {
|
|
|
|
|
*buf2 = NULL;
|
|
|
|
|
*buf2len = 0;
|
|
|
|
|
} else {
|
|
|
|
|
*buf2 = (void *)log->ringbuf;
|
|
|
|
|
*buf2len = len - first;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* buf1/buf1len & buf2/buf2len -> dest/destlen */
|
|
|
|
|
static size_t copy_from(void *dest, size_t destlen,
|
|
|
|
|
const void *buf1, size_t buf1len,
|
|
|
|
|
const void *buf2, size_t buf2len)
|
|
|
|
|
{
|
|
|
|
|
assert(destlen == buf1len + buf2len);
|
|
|
|
|
if (buf1len)
|
|
|
|
|
memcpy(dest, buf1, buf1len);
|
|
|
|
|
if (buf2len)
|
|
|
|
|
memcpy((char *)dest + buf1len, buf2, buf2len);
|
|
|
|
|
return destlen;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* dest/destlen -> buf1/buf1len & buf2/buf2len */
|
|
|
|
|
static size_t copy_to(void *buf1, size_t buf1len,
|
|
|
|
|
void *buf2, size_t buf2len,
|
|
|
|
|
const void *dest, size_t destlen)
|
|
|
|
|
{
|
|
|
|
|
assert(destlen == buf1len + buf2len);
|
|
|
|
|
if (buf1len)
|
|
|
|
|
memcpy(buf1, dest, buf1len);
|
|
|
|
|
if (buf2len)
|
|
|
|
|
memcpy(buf2, (char *)dest + buf1len, buf2len);
|
|
|
|
|
return destlen;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static size_t ringbuf_used(const struct log_book *log)
|
|
|
|
|
{
|
|
|
|
|
return log->ringbuf_end - log->ringbuf_start;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static size_t ringbuf_avail(const struct log_book *log)
|
|
|
|
|
{
|
|
|
|
|
return sizeof(log->ringbuf) - ringbuf_used(log);
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-25 06:29:18 +10:30
|
|
|
static void log_prefix_drop(struct log_prefix *lp)
|
|
|
|
|
{
|
|
|
|
|
if (--lp->refcnt == 0)
|
|
|
|
|
tal_free(lp);
|
|
|
|
|
}
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
/* Returns in-place, but copies if it has to. Updates *off. */
|
|
|
|
|
static bool get_log_entry(const tal_t *ctx,
|
|
|
|
|
const struct log_book *log,
|
|
|
|
|
struct log_hdr *hdr,
|
|
|
|
|
const char **msg,
|
|
|
|
|
const u8 **io,
|
|
|
|
|
size_t *off)
|
|
|
|
|
{
|
|
|
|
|
void *buf1, *buf2;
|
|
|
|
|
size_t buf1len, buf2len;
|
|
|
|
|
|
|
|
|
|
if (ringbuf_used(log) < *off + sizeof(*hdr))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
ringbuf_span(log, log->ringbuf_start + *off, sizeof(*hdr),
|
|
|
|
|
&buf1, &buf1len, &buf2, &buf2len);
|
|
|
|
|
*off += copy_from(hdr, sizeof(*hdr), buf1, buf1len, buf2, buf2len);
|
|
|
|
|
ringbuf_span(log, log->ringbuf_start + *off, hdr->msglen,
|
|
|
|
|
&buf1, &buf1len, &buf2, &buf2len);
|
|
|
|
|
if (buf2len != 0) {
|
|
|
|
|
char *bytes = tal_arr(ctx, char, buf1len + buf2len);
|
|
|
|
|
*off += copy_from(bytes, tal_bytelen(bytes), buf1, buf1len, buf2, buf2len);
|
|
|
|
|
*msg = bytes;
|
|
|
|
|
} else {
|
|
|
|
|
*msg = buf1;
|
|
|
|
|
*off += buf1len;
|
|
|
|
|
}
|
|
|
|
|
ringbuf_span(log, log->ringbuf_start + *off, hdr->iolen,
|
|
|
|
|
&buf1, &buf1len, &buf2, &buf2len);
|
|
|
|
|
if (buf2len != 0) {
|
|
|
|
|
u8 *bytes = tal_arr(ctx, u8, buf1len + buf2len);
|
|
|
|
|
*off += copy_from(bytes, tal_bytelen(bytes), buf1, buf1len, buf2, buf2len);
|
|
|
|
|
*io = bytes;
|
|
|
|
|
} else {
|
|
|
|
|
if (buf1len == 0)
|
|
|
|
|
*io = NULL;
|
|
|
|
|
else {
|
|
|
|
|
*io = buf1;
|
|
|
|
|
*off += buf1len;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-25 06:29:18 +10:30
|
|
|
static struct log_prefix *log_prefix_get(struct log_prefix *lp)
|
|
|
|
|
{
|
|
|
|
|
assert(lp->refcnt);
|
|
|
|
|
lp->refcnt++;
|
|
|
|
|
return lp;
|
|
|
|
|
}
|
|
|
|
|
|
2019-11-18 10:57:17 +10:30
|
|
|
/* Avoids duplicate node_id entries. */
|
|
|
|
|
struct node_id_cache {
|
|
|
|
|
size_t count;
|
|
|
|
|
struct node_id node_id;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static const struct node_id *node_cache_id(const struct node_id_cache *nc)
|
|
|
|
|
{
|
|
|
|
|
return &nc->node_id;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool node_id_cache_eq(const struct node_id_cache *nc,
|
|
|
|
|
const struct node_id *node_id)
|
|
|
|
|
{
|
|
|
|
|
return node_id_eq(&nc->node_id, node_id);
|
|
|
|
|
}
|
|
|
|
|
|
2024-12-23 09:19:14 +10:30
|
|
|
HTABLE_DEFINE_NODUPS_TYPE(struct node_id_cache,
|
|
|
|
|
node_cache_id, node_id_hash, node_id_cache_eq,
|
|
|
|
|
node_id_map);
|
2019-11-18 10:57:17 +10:30
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
static void del_front_log(struct log_book *log)
|
|
|
|
|
{
|
|
|
|
|
struct log_hdr hdr;
|
|
|
|
|
void *buf1, *buf2;
|
|
|
|
|
size_t buf1len, buf2len;
|
|
|
|
|
|
|
|
|
|
ringbuf_span(log, log->ringbuf_start, sizeof(hdr),
|
|
|
|
|
&buf1, &buf1len, &buf2, &buf2len);
|
|
|
|
|
copy_from(&hdr, sizeof(hdr), buf1, buf1len, buf2, buf2len);
|
|
|
|
|
assert(ringbuf_used(log) >= sizeof(hdr) + hdr.msglen + hdr.iolen);
|
|
|
|
|
log->ringbuf_start += sizeof(hdr) + hdr.msglen + hdr.iolen;
|
|
|
|
|
|
|
|
|
|
if (hdr.nc && --hdr.nc->count == 0)
|
|
|
|
|
tal_free(hdr.nc);
|
|
|
|
|
log_prefix_drop(hdr.prefix);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We truncate genuinely giant messages */
|
|
|
|
|
static char *cap_header(const tal_t *ctx, struct log_hdr *hdr, const char *msg)
|
|
|
|
|
{
|
|
|
|
|
const size_t max = sizeof(((struct log_book *)0)->ringbuf) / 64;
|
|
|
|
|
if (hdr->msglen > max) {
|
|
|
|
|
msg = tal_fmt(ctx, "[TRUNCATED message from %zu bytes]: %.*s",
|
|
|
|
|
hdr->msglen, (int)max, msg);
|
|
|
|
|
hdr->msglen = strlen(msg);
|
|
|
|
|
}
|
|
|
|
|
if (hdr->iolen > max) {
|
|
|
|
|
msg = tal_fmt(ctx, "[TRUNCATED IO from %zu bytes]: %.*s",
|
|
|
|
|
hdr->iolen, (int)hdr->msglen, msg);
|
|
|
|
|
hdr->msglen = strlen(msg);
|
|
|
|
|
hdr->iolen = max;
|
|
|
|
|
}
|
|
|
|
|
return cast_const(char *, msg);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void add_entry(struct log_book *log,
|
|
|
|
|
const struct log_hdr *hdr,
|
|
|
|
|
const char *msg,
|
|
|
|
|
const u8 *io)
|
|
|
|
|
{
|
|
|
|
|
void *buf1, *buf2;
|
|
|
|
|
size_t buf1len, buf2len;
|
|
|
|
|
size_t needed = sizeof(*hdr) + hdr->msglen + hdr->iolen;
|
|
|
|
|
assert(needed < sizeof(log->ringbuf));
|
|
|
|
|
|
|
|
|
|
while (ringbuf_avail(log) < needed)
|
|
|
|
|
del_front_log(log);
|
|
|
|
|
|
|
|
|
|
ringbuf_span(log, log->ringbuf_end, sizeof(*hdr), &buf1, &buf1len, &buf2, &buf2len);
|
|
|
|
|
log->ringbuf_end += copy_to(buf1, buf1len, buf2, buf2len, hdr, sizeof(*hdr));
|
|
|
|
|
ringbuf_span(log, log->ringbuf_end, hdr->msglen, &buf1, &buf1len, &buf2, &buf2len);
|
|
|
|
|
log->ringbuf_end += copy_to(buf1, buf1len, buf2, buf2len, msg, hdr->msglen);
|
|
|
|
|
ringbuf_span(log, log->ringbuf_end, hdr->iolen, &buf1, &buf1len, &buf2, &buf2len);
|
|
|
|
|
log->ringbuf_end += copy_to(buf1, buf1len, buf2, buf2len, io, hdr->iolen);
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-30 10:12:43 +09:30
|
|
|
static const char *level_prefix(enum log_level level)
|
|
|
|
|
{
|
|
|
|
|
switch (level) {
|
|
|
|
|
case LOG_IO_OUT:
|
|
|
|
|
case LOG_IO_IN:
|
2020-09-03 13:29:23 +02:00
|
|
|
return "IO ";
|
2024-04-25 16:42:54 -07:00
|
|
|
case LOG_TRACE:
|
|
|
|
|
return "TRACE ";
|
2019-06-30 10:12:43 +09:30
|
|
|
case LOG_DBG:
|
2020-09-03 13:29:23 +02:00
|
|
|
return "DEBUG ";
|
2019-06-30 10:12:43 +09:30
|
|
|
case LOG_INFORM:
|
2020-09-03 13:29:23 +02:00
|
|
|
return "INFO ";
|
2019-06-30 10:12:43 +09:30
|
|
|
case LOG_UNUSUAL:
|
|
|
|
|
return "UNUSUAL";
|
|
|
|
|
case LOG_BROKEN:
|
|
|
|
|
return "**BROKEN**";
|
|
|
|
|
}
|
|
|
|
|
abort();
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-19 14:26:20 +09:30
|
|
|
/* What do these filters say about level to log this entry at? */
|
|
|
|
|
static bool filter_level(const struct list_head *print_filters,
|
|
|
|
|
const char *prefix,
|
|
|
|
|
const char *node_id_str,
|
|
|
|
|
enum log_level *level)
|
|
|
|
|
{
|
|
|
|
|
struct print_filter *i;
|
|
|
|
|
|
|
|
|
|
list_for_each(print_filters, i, list) {
|
|
|
|
|
if (strstr(prefix, i->prefix) || strstr(node_id_str, i->prefix)) {
|
|
|
|
|
*level = i->level;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-19 14:26:22 +09:30
|
|
|
/* What's the lowest filtering which could possibly apply? */
|
|
|
|
|
static void lowest_filter(const struct list_head *print_filters,
|
|
|
|
|
const char *prefix,
|
|
|
|
|
const struct node_id *node_id,
|
|
|
|
|
enum log_level *level)
|
|
|
|
|
{
|
|
|
|
|
struct print_filter *i;
|
|
|
|
|
const char *node_id_str;
|
|
|
|
|
|
|
|
|
|
if (node_id)
|
2024-03-20 11:10:16 +10:30
|
|
|
node_id_str = fmt_node_id(tmpctx, node_id);
|
2023-07-19 14:26:22 +09:30
|
|
|
else
|
|
|
|
|
node_id_str = NULL;
|
|
|
|
|
|
|
|
|
|
list_for_each(print_filters, i, list) {
|
|
|
|
|
bool match;
|
|
|
|
|
|
|
|
|
|
if (strstr(prefix, i->prefix))
|
|
|
|
|
match = true;
|
|
|
|
|
else if (node_id_str) {
|
|
|
|
|
match = (strstr(node_id_str, i->prefix) != NULL);
|
|
|
|
|
} else {
|
|
|
|
|
/* Could this possibly match a node_id? */
|
|
|
|
|
match = strstarts(i->prefix, "02") || strstarts(i->prefix, "03");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (match && i->level < *level) {
|
|
|
|
|
*level = i->level;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-08 19:27:11 +09:30
|
|
|
static void log_to_files(const char *log_prefix,
|
|
|
|
|
const char *entry_prefix,
|
2022-06-26 13:55:01 +09:30
|
|
|
enum log_level level,
|
2023-07-19 14:26:22 +09:30
|
|
|
/* The node_id to log under. */
|
2022-06-26 13:55:01 +09:30
|
|
|
const struct node_id *node_id,
|
2023-07-19 14:26:22 +09:30
|
|
|
/* Filters to apply, if non-NULL */
|
|
|
|
|
const struct list_head *print_filters,
|
2022-06-26 13:55:01 +09:30
|
|
|
const struct timeabs *time,
|
2026-01-20 18:01:54 +10:30
|
|
|
const char *str, size_t str_len,
|
|
|
|
|
const u8 *io, size_t io_len,
|
2022-06-26 13:55:01 +09:30
|
|
|
bool print_timestamps,
|
2023-07-19 14:26:20 +09:30
|
|
|
const enum log_level *default_print_level,
|
|
|
|
|
struct log_file **log_files)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2021-05-03 12:49:43 +09:30
|
|
|
char tstamp[sizeof("YYYY-mm-ddTHH:MM:SS.nnnZ ")];
|
2025-05-08 12:20:47 +09:30
|
|
|
char *entry, nodestr[hex_str_size(PUBKEY_CMPR_LEN)];
|
2025-05-08 12:20:48 +09:30
|
|
|
char buf[sizeof("%s%s%s %s-%s: %s\n")
|
|
|
|
|
+ strlen(log_prefix)
|
|
|
|
|
+ sizeof(tstamp)
|
|
|
|
|
+ strlen(level_prefix(level))
|
|
|
|
|
+ sizeof(nodestr)
|
|
|
|
|
+ strlen(entry_prefix)
|
2026-01-20 18:01:54 +10:30
|
|
|
+ str_len];
|
2023-08-05 17:15:40 +09:30
|
|
|
bool filtered;
|
2021-05-03 12:49:43 +09:30
|
|
|
|
|
|
|
|
if (print_timestamps) {
|
|
|
|
|
char iso8601_msec_fmt[sizeof("YYYY-mm-ddTHH:MM:SS.%03dZ ")];
|
|
|
|
|
strftime(iso8601_msec_fmt, sizeof(iso8601_msec_fmt), "%FT%T.%%03dZ ", gmtime(&time->ts.tv_sec));
|
|
|
|
|
snprintf(tstamp, sizeof(tstamp), iso8601_msec_fmt, (int) time->ts.tv_nsec / 1000000);
|
|
|
|
|
} else
|
|
|
|
|
tstamp[0] = '\0';
|
2018-02-05 14:39:28 +10:30
|
|
|
|
2023-07-19 14:26:20 +09:30
|
|
|
if (node_id)
|
2025-05-08 12:20:47 +09:30
|
|
|
hex_encode(node_id->k, sizeof(node_id->k),
|
|
|
|
|
nodestr, sizeof(nodestr));
|
2023-07-19 14:26:20 +09:30
|
|
|
else
|
2025-05-08 12:20:47 +09:30
|
|
|
nodestr[0] = '\0';
|
2018-02-05 14:39:28 +10:30
|
|
|
if (level == LOG_IO_IN || level == LOG_IO_OUT) {
|
|
|
|
|
const char *dir = level == LOG_IO_IN ? "[IN]" : "[OUT]";
|
2019-04-08 09:22:19 +09:30
|
|
|
char *hex = tal_hexstr(NULL, io, io_len);
|
2019-11-17 22:11:33 +10:30
|
|
|
if (!node_id)
|
2026-01-20 18:01:54 +10:30
|
|
|
entry = tal_fmt(tmpctx, "%s%s%s: %.*s%s %s\n",
|
|
|
|
|
log_prefix, tstamp, entry_prefix, (int)str_len, str, dir, hex);
|
2019-11-17 22:11:33 +10:30
|
|
|
else
|
2026-01-20 18:01:54 +10:30
|
|
|
entry = tal_fmt(tmpctx, "%s%s%s-%s: %.*s%s %s\n",
|
2022-07-08 19:27:11 +09:30
|
|
|
log_prefix, tstamp,
|
2023-07-19 14:26:20 +09:30
|
|
|
nodestr,
|
2026-01-20 18:01:54 +10:30
|
|
|
entry_prefix, (int)str_len, str, dir, hex);
|
2018-02-05 14:39:28 +10:30
|
|
|
tal_free(hex);
|
2019-11-18 10:57:17 +10:30
|
|
|
} else {
|
2025-05-08 12:20:48 +09:30
|
|
|
size_t len;
|
|
|
|
|
entry = buf;
|
2019-11-17 22:11:33 +10:30
|
|
|
if (!node_id)
|
2025-05-08 12:20:48 +09:30
|
|
|
len = snprintf(buf, sizeof(buf),
|
2026-01-20 18:01:54 +10:30
|
|
|
"%s%s%s %s: %.*s\n",
|
|
|
|
|
log_prefix, tstamp, level_prefix(level), entry_prefix, (int)str_len, str);
|
2019-11-17 22:11:33 +10:30
|
|
|
else
|
2026-01-20 18:01:54 +10:30
|
|
|
len = snprintf(buf, sizeof(buf), "%s%s%s %s-%s: %.*s\n",
|
2025-05-08 12:20:48 +09:30
|
|
|
log_prefix, tstamp, level_prefix(level),
|
|
|
|
|
nodestr,
|
2026-01-20 18:01:54 +10:30
|
|
|
entry_prefix, (int)str_len, str);
|
2025-05-08 12:20:48 +09:30
|
|
|
assert(len < sizeof(buf));
|
2022-06-26 13:55:01 +09:30
|
|
|
}
|
|
|
|
|
|
2023-07-19 14:26:22 +09:30
|
|
|
/* In complex configurations, we tell loggers to overshare: then we
|
|
|
|
|
* need to filter here to see if we really want it. */
|
2023-08-05 17:15:40 +09:30
|
|
|
filtered = false;
|
2023-07-19 14:26:22 +09:30
|
|
|
if (print_filters) {
|
|
|
|
|
enum log_level filter;
|
|
|
|
|
if (filter_level(print_filters,
|
|
|
|
|
entry_prefix, nodestr, &filter)) {
|
|
|
|
|
if (level < filter)
|
|
|
|
|
return;
|
2023-08-05 17:15:40 +09:30
|
|
|
/* Even if they specify a default filter level of 'INFO', this overrides */
|
|
|
|
|
filtered = true;
|
2023-07-19 14:26:22 +09:30
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-26 13:55:01 +09:30
|
|
|
/* Default if nothing set is stdout */
|
2023-07-19 14:26:20 +09:30
|
|
|
if (!log_files) {
|
2022-06-26 13:55:01 +09:30
|
|
|
fwrite(entry, strlen(entry), 1, stdout);
|
|
|
|
|
fflush(stdout);
|
|
|
|
|
}
|
2023-07-19 14:26:20 +09:30
|
|
|
|
|
|
|
|
/* We may have to apply per-file filters. */
|
|
|
|
|
for (size_t i = 0; i < tal_count(log_files); i++) {
|
|
|
|
|
enum log_level filter;
|
|
|
|
|
if (!filter_level(&log_files[i]->print_filters,
|
|
|
|
|
entry_prefix, nodestr, &filter)) {
|
|
|
|
|
/* If we haven't set default yet, only log UNUSUAL */
|
2023-08-05 17:15:40 +09:30
|
|
|
if (!default_print_level)
|
2023-07-19 14:26:20 +09:30
|
|
|
filter = LOG_UNUSUAL;
|
2023-08-05 17:15:40 +09:30
|
|
|
else {
|
|
|
|
|
/* If we've filtered it already, it passes */
|
|
|
|
|
if (filtered)
|
|
|
|
|
filter = level;
|
|
|
|
|
else
|
|
|
|
|
filter = *default_print_level;
|
|
|
|
|
}
|
2023-07-19 14:26:20 +09:30
|
|
|
}
|
|
|
|
|
if (level < filter)
|
|
|
|
|
continue;
|
|
|
|
|
fwrite(entry, strlen(entry), 1, log_files[i]->f);
|
|
|
|
|
fflush(log_files[i]->f);
|
2016-01-22 06:41:47 +10:30
|
|
|
}
|
2018-02-02 10:37:19 +10:30
|
|
|
}
|
|
|
|
|
|
2019-11-18 10:57:18 +10:30
|
|
|
static void destroy_log_book(struct log_book *log)
|
|
|
|
|
{
|
2026-01-20 18:01:54 +10:30
|
|
|
while (ringbuf_used(log) > 0)
|
|
|
|
|
del_front_log(log);
|
2019-11-18 10:57:18 +10:30
|
|
|
}
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
struct log_book *new_log_book(struct lightningd *ld)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2023-07-16 15:26:52 +09:30
|
|
|
struct log_book *log_book = tal_linkable(tal(NULL, struct log_book));
|
2016-01-22 06:41:47 +10:30
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
log_book->ringbuf_start = log_book->ringbuf_end = 0;
|
2023-07-19 14:26:20 +09:30
|
|
|
log_book->log_files = NULL;
|
2023-07-16 15:26:52 +09:30
|
|
|
log_book->default_print_level = NULL;
|
2022-07-08 19:27:11 +09:30
|
|
|
/* We have to allocate this, since we tal_free it on resetting */
|
2023-07-16 15:26:52 +09:30
|
|
|
log_book->prefix = tal_strdup(log_book, "");
|
|
|
|
|
list_head_init(&log_book->print_filters);
|
2023-07-17 17:06:24 +09:30
|
|
|
list_head_init(&log_book->loggers);
|
2025-11-13 16:02:35 +10:30
|
|
|
log_book->init_time = clock_time();
|
2023-07-16 15:26:52 +09:30
|
|
|
log_book->ld = ld;
|
|
|
|
|
log_book->cache = tal(log_book, struct node_id_map);
|
|
|
|
|
node_id_map_init(log_book->cache);
|
|
|
|
|
log_book->print_timestamps = true;
|
|
|
|
|
tal_add_destructor(log_book, destroy_log_book);
|
2016-01-22 06:41:47 +10:30
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
return log_book;
|
2016-01-22 06:41:47 +10:30
|
|
|
}
|
|
|
|
|
|
2023-07-19 14:26:22 +09:30
|
|
|
/* What's the minimum level to print this prefix and node_id for this
|
|
|
|
|
* log book? Saves us marshalling long print lines in most cases. */
|
2023-07-19 14:26:20 +09:30
|
|
|
static enum log_level print_level(struct log_book *log_book,
|
|
|
|
|
const struct log_prefix *lp,
|
2023-07-19 14:26:22 +09:30
|
|
|
const struct node_id *node_id,
|
|
|
|
|
bool *need_refiltering)
|
2019-11-18 10:57:17 +10:30
|
|
|
{
|
2023-07-19 14:26:22 +09:30
|
|
|
enum log_level level = *log_book->default_print_level;
|
|
|
|
|
bool have_filters = false;
|
2019-11-18 10:57:17 +10:30
|
|
|
|
2023-07-19 14:26:22 +09:30
|
|
|
lowest_filter(&log_book->print_filters, lp->prefix, node_id, &level);
|
|
|
|
|
if (!list_empty(&log_book->print_filters))
|
|
|
|
|
have_filters = true;
|
2023-07-19 14:26:20 +09:30
|
|
|
|
|
|
|
|
/* We need to look into per-file filters as well: might give a
|
|
|
|
|
* lower filter! */
|
|
|
|
|
for (size_t i = 0; i < tal_count(log_book->log_files); i++) {
|
2023-07-19 14:26:22 +09:30
|
|
|
lowest_filter(&log_book->log_files[i]->print_filters,
|
|
|
|
|
lp->prefix, node_id, &level);
|
|
|
|
|
if (!list_empty(&log_book->log_files[i]->print_filters))
|
|
|
|
|
have_filters = true;
|
2023-07-19 14:26:20 +09:30
|
|
|
}
|
|
|
|
|
|
2023-07-19 14:26:22 +09:30
|
|
|
/* Almost any complex array of filters can mean we want to re-check
|
|
|
|
|
* when logging. */
|
|
|
|
|
if (need_refiltering)
|
|
|
|
|
*need_refiltering = have_filters;
|
|
|
|
|
|
2023-07-19 14:26:20 +09:30
|
|
|
return level;
|
2019-11-18 10:57:17 +10:30
|
|
|
}
|
|
|
|
|
|
2023-07-17 17:06:24 +09:30
|
|
|
static void destroy_logger(struct logger *log)
|
|
|
|
|
{
|
|
|
|
|
list_del_from(&log->log_book->loggers, &log->list);
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-22 06:41:47 +10:30
|
|
|
/* With different entry points */
|
2023-07-16 15:26:52 +09:30
|
|
|
struct logger *
|
|
|
|
|
new_logger(const tal_t *ctx, struct log_book *log_book,
|
|
|
|
|
const struct node_id *default_node_id,
|
|
|
|
|
const char *fmt, ...)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2023-07-16 15:26:52 +09:30
|
|
|
struct logger *log = tal(ctx, struct logger);
|
2016-01-22 06:41:47 +10:30
|
|
|
va_list ap;
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
log->log_book = tal_link(log, log_book);
|
2016-01-22 06:41:47 +10:30
|
|
|
va_start(ap, fmt);
|
2021-11-25 06:29:18 +10:30
|
|
|
/* Owned by the log book itself, since it can be referenced
|
|
|
|
|
* by log entries, too */
|
2023-07-16 15:26:52 +09:30
|
|
|
log->prefix = log_prefix_new(log->log_book, take(tal_vfmt(NULL, fmt, ap)));
|
2016-01-22 06:41:47 +10:30
|
|
|
va_end(ap);
|
2021-12-28 09:51:09 +10:30
|
|
|
log->default_node_id = tal_dup_or_null(log, struct node_id,
|
2019-11-17 22:11:33 +10:30
|
|
|
default_node_id);
|
2016-01-22 06:41:47 +10:30
|
|
|
|
2023-07-19 14:25:48 +09:30
|
|
|
/* Still initializing? Print UNUSUAL / BROKEN messages only */
|
2023-07-19 14:26:22 +09:30
|
|
|
if (!log->log_book->default_print_level) {
|
2023-07-19 14:25:48 +09:30
|
|
|
log->print_level = LOG_UNUSUAL;
|
2023-07-19 14:26:22 +09:30
|
|
|
log->need_refiltering = false;
|
|
|
|
|
} else {
|
|
|
|
|
log->print_level = print_level(log->log_book,
|
|
|
|
|
log->prefix,
|
|
|
|
|
default_node_id,
|
|
|
|
|
&log->need_refiltering);
|
|
|
|
|
}
|
2023-07-17 17:06:24 +09:30
|
|
|
list_add(&log->log_book->loggers, &log->list);
|
|
|
|
|
tal_add_destructor(log, destroy_logger);
|
2016-01-22 06:41:47 +10:30
|
|
|
return log;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
const char *log_prefix(const struct logger *log)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2021-11-25 06:29:18 +10:30
|
|
|
return log->prefix->prefix;
|
2016-01-22 06:41:47 +10:30
|
|
|
}
|
2019-11-18 10:57:17 +10:30
|
|
|
|
2023-07-17 17:05:22 +09:30
|
|
|
bool log_has_io_logging(const struct logger *log)
|
|
|
|
|
{
|
2024-04-25 16:42:54 -07:00
|
|
|
return print_level(log->log_book, log->prefix, log->default_node_id, NULL) < LOG_TRACE;
|
2023-07-17 17:05:22 +09:30
|
|
|
}
|
2019-11-18 10:57:18 +10:30
|
|
|
|
2024-12-13 15:07:51 +10:30
|
|
|
bool log_has_trace_logging(const struct logger *log)
|
|
|
|
|
{
|
|
|
|
|
return print_level(log->log_book, log->prefix, log->default_node_id, NULL) < LOG_DBG;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
static void destroy_node_id_cache(struct node_id_cache *nc, struct log_book *log_book)
|
2019-11-18 10:57:17 +10:30
|
|
|
{
|
2023-07-16 15:26:52 +09:30
|
|
|
node_id_map_del(log_book->cache, nc);
|
2019-11-18 10:57:17 +10:30
|
|
|
}
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
static void maybe_print(struct logger *log,
|
|
|
|
|
const struct log_hdr *l,
|
|
|
|
|
const char *logmsg,
|
|
|
|
|
const u8 *iomsg)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2026-01-20 18:01:54 +10:30
|
|
|
if (l->level >= log->print_level)
|
|
|
|
|
log_to_files(log->log_book->prefix, log->prefix->prefix, l->level,
|
|
|
|
|
l->nc ? &l->nc->node_id : NULL,
|
|
|
|
|
log->need_refiltering ? &log->log_book->print_filters : NULL,
|
|
|
|
|
&l->time, logmsg, strlen(logmsg),
|
|
|
|
|
iomsg, l->iolen,
|
|
|
|
|
log->log_book->print_timestamps,
|
|
|
|
|
log->log_book->default_print_level,
|
|
|
|
|
log->log_book->log_files);
|
|
|
|
|
}
|
2016-01-22 06:41:47 +10:30
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
static void maybe_notify_log(struct logger *log,
|
|
|
|
|
const struct log_hdr *l,
|
|
|
|
|
const char *logmsg)
|
|
|
|
|
{
|
|
|
|
|
if (l->level >= log->print_level)
|
|
|
|
|
notify_log(log->log_book->ld,
|
|
|
|
|
l->level,
|
|
|
|
|
l->time,
|
|
|
|
|
l->prefix->prefix,
|
|
|
|
|
logmsg);
|
|
|
|
|
}
|
2019-11-18 10:57:18 +10:30
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
static void init_log_hdr(const struct logger *log,
|
|
|
|
|
struct log_hdr *l,
|
|
|
|
|
enum log_level level,
|
|
|
|
|
const struct node_id *node_id,
|
|
|
|
|
size_t msglen, size_t iolen)
|
|
|
|
|
{
|
2025-11-13 16:02:35 +10:30
|
|
|
l->time = clock_time();
|
2016-01-22 06:41:47 +10:30
|
|
|
l->level = level;
|
2021-11-25 06:29:18 +10:30
|
|
|
l->prefix = log_prefix_get(log->prefix);
|
2026-01-20 18:01:54 +10:30
|
|
|
l->msglen = msglen;
|
|
|
|
|
l->iolen = iolen;
|
2019-11-17 22:11:33 +10:30
|
|
|
if (!node_id)
|
|
|
|
|
node_id = log->default_node_id;
|
2019-11-18 10:57:17 +10:30
|
|
|
if (node_id) {
|
2023-07-16 15:26:52 +09:30
|
|
|
l->nc = node_id_map_get(log->log_book->cache, node_id);
|
2019-11-18 10:57:17 +10:30
|
|
|
if (!l->nc) {
|
2023-07-16 15:26:52 +09:30
|
|
|
l->nc = tal(log->log_book->cache, struct node_id_cache);
|
2019-11-18 10:57:17 +10:30
|
|
|
l->nc->count = 0;
|
|
|
|
|
l->nc->node_id = *node_id;
|
2023-07-16 15:26:52 +09:30
|
|
|
node_id_map_add(log->log_book->cache, l->nc);
|
2019-11-18 10:57:17 +10:30
|
|
|
tal_add_destructor2(l->nc, destroy_node_id_cache,
|
2023-07-16 15:26:52 +09:30
|
|
|
log->log_book);
|
2019-11-18 10:57:17 +10:30
|
|
|
}
|
|
|
|
|
l->nc->count++;
|
|
|
|
|
} else
|
|
|
|
|
l->nc = NULL;
|
2024-01-11 16:05:46 +01:00
|
|
|
}
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
void logv(struct logger *log, enum log_level level,
|
2019-11-17 22:11:33 +10:30
|
|
|
const struct node_id *node_id,
|
|
|
|
|
bool call_notifier,
|
|
|
|
|
const char *fmt, va_list ap)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2018-02-05 14:39:27 +10:30
|
|
|
int save_errno = errno;
|
2026-01-20 18:01:54 +10:30
|
|
|
struct log_hdr l;
|
|
|
|
|
size_t log_len;
|
|
|
|
|
char *logmsg;
|
2016-01-22 06:41:47 +10:30
|
|
|
|
2019-11-18 10:57:18 +10:30
|
|
|
/* This is WARN_UNUSED_RESULT, because everyone should somehow deal
|
|
|
|
|
* with OOM, even though nobody does. */
|
2026-01-20 18:01:54 +10:30
|
|
|
if (vasprintf(&logmsg, fmt, ap) == -1)
|
2019-11-18 10:57:18 +10:30
|
|
|
abort();
|
2018-02-08 21:05:29 +01:00
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
log_len = strlen(logmsg);
|
2018-05-22 01:30:22 +09:00
|
|
|
|
2018-02-08 21:05:29 +01:00
|
|
|
/* Sanitize any non-printable characters, and replace with '?' */
|
2018-05-22 01:30:22 +09:00
|
|
|
for (size_t i=0; i<log_len; i++)
|
2026-01-20 18:01:54 +10:30
|
|
|
if (logmsg[i] < ' ' || logmsg[i] >= 0x7f)
|
|
|
|
|
logmsg[i] = '?';
|
2018-02-08 21:05:29 +01:00
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
init_log_hdr(log, &l, level, node_id, log_len, 0);
|
|
|
|
|
maybe_print(log, &l, logmsg, NULL);
|
|
|
|
|
maybe_notify_log(log, &l, logmsg);
|
2016-01-22 06:41:47 +10:30
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
logmsg = cap_header(tmpctx, &l, logmsg);
|
|
|
|
|
add_entry(log->log_book, &l, logmsg, NULL);
|
2019-06-06 16:26:42 +08:00
|
|
|
|
|
|
|
|
if (call_notifier)
|
2026-01-20 15:08:07 +10:30
|
|
|
notify_warning(log->log_book->ld,
|
2026-01-20 18:01:54 +10:30
|
|
|
l.level,
|
|
|
|
|
l.time,
|
|
|
|
|
l.prefix->prefix,
|
|
|
|
|
logmsg);
|
|
|
|
|
free(logmsg);
|
2019-06-06 16:26:42 +08:00
|
|
|
|
2018-02-05 14:39:27 +10:30
|
|
|
errno = save_errno;
|
2016-01-22 06:41:47 +10:30
|
|
|
}
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
void log_io(struct logger *log, enum log_level dir,
|
2019-11-17 22:11:33 +10:30
|
|
|
const struct node_id *node_id,
|
2018-02-05 14:39:28 +10:30
|
|
|
const char *str TAKES,
|
|
|
|
|
const void *data TAKES, size_t len)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
|
|
|
|
int save_errno = errno;
|
2026-01-20 18:01:54 +10:30
|
|
|
struct log_hdr l;
|
2018-02-05 14:39:28 +10:30
|
|
|
|
|
|
|
|
assert(dir == LOG_IO_IN || dir == LOG_IO_OUT);
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
init_log_hdr(log, &l, dir, node_id, strlen(str), len);
|
|
|
|
|
|
|
|
|
|
if (l.level >= log->print_level)
|
|
|
|
|
log_to_files(log->log_book->prefix, log->prefix->prefix, l.level,
|
|
|
|
|
l.nc ? &l.nc->node_id : NULL,
|
2023-07-19 14:26:22 +09:30
|
|
|
log->need_refiltering ? &log->log_book->print_filters : NULL,
|
2026-01-20 18:01:54 +10:30
|
|
|
&l.time, str, strlen(str),
|
2022-06-26 13:55:01 +09:30
|
|
|
data, len,
|
2023-07-16 15:26:52 +09:30
|
|
|
log->log_book->print_timestamps,
|
2023-07-19 14:26:20 +09:30
|
|
|
log->log_book->default_print_level,
|
|
|
|
|
log->log_book->log_files);
|
2019-04-08 09:22:19 +09:30
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
str = cap_header(tmpctx, &l, str);
|
|
|
|
|
add_entry(log->log_book, &l, str, data);
|
2016-01-22 06:41:47 +10:30
|
|
|
errno = save_errno;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
void log_(struct logger *log, enum log_level level,
|
2019-11-17 22:11:33 +10:30
|
|
|
const struct node_id *node_id,
|
|
|
|
|
bool call_notifier,
|
|
|
|
|
const char *fmt, ...)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2019-11-17 22:11:33 +10:30
|
|
|
logv(log, level, node_id, call_notifier, fmt, ap);
|
2016-01-22 06:41:47 +10:30
|
|
|
va_end(ap);
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
#define log_each_line(log_book, func, arg) \
|
|
|
|
|
log_each_line_((log_book), \
|
2019-11-18 10:57:15 +10:30
|
|
|
typesafe_cb_preargs(void, void *, (func), (arg), \
|
|
|
|
|
struct timerel, \
|
|
|
|
|
enum log_level, \
|
|
|
|
|
const struct node_id *, \
|
|
|
|
|
const char *, \
|
2026-01-20 18:01:54 +10:30
|
|
|
const char *, size_t, \
|
|
|
|
|
const u8 *, size_t), (arg))
|
2019-11-18 10:57:15 +10:30
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
static void log_each_line_(const struct log_book *log_book,
|
2026-01-20 18:01:54 +10:30
|
|
|
void (*func)(struct timerel time,
|
2019-11-18 10:57:15 +10:30
|
|
|
enum log_level level,
|
|
|
|
|
const struct node_id *node_id,
|
|
|
|
|
const char *prefix,
|
|
|
|
|
const char *log,
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t loglen,
|
2019-11-18 10:57:15 +10:30
|
|
|
const u8 *io,
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t iolen,
|
2019-11-18 10:57:15 +10:30
|
|
|
void *arg),
|
|
|
|
|
void *arg)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t off = 0;
|
|
|
|
|
const char *msg;
|
|
|
|
|
const u8 *io;
|
|
|
|
|
struct log_hdr l;
|
2016-01-22 06:41:47 +10:30
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
while (get_log_entry(tmpctx, log_book, &l, &msg, &io, &off)) {
|
|
|
|
|
func(time_between(l.time, log_book->init_time),
|
|
|
|
|
l.level, l.nc ? &l.nc->node_id : NULL,
|
|
|
|
|
l.prefix->prefix, msg, l.msglen, io, l.iolen, arg);
|
2016-01-22 06:41:47 +10:30
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct log_data {
|
|
|
|
|
int fd;
|
|
|
|
|
const char *prefix;
|
|
|
|
|
};
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
static void log_one_line(struct timerel diff,
|
2016-01-22 06:41:47 +10:30
|
|
|
enum log_level level,
|
2019-11-17 22:11:33 +10:30
|
|
|
const struct node_id *node_id,
|
2016-01-22 06:41:47 +10:30
|
|
|
const char *prefix,
|
|
|
|
|
const char *log,
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t loglen,
|
2018-02-05 14:39:28 +10:30
|
|
|
const u8 *io,
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t iolen,
|
2016-01-22 06:41:47 +10:30
|
|
|
struct log_data *data)
|
|
|
|
|
{
|
|
|
|
|
char buf[101];
|
|
|
|
|
|
2018-07-31 14:56:04 +02:00
|
|
|
snprintf(buf, sizeof(buf), "%s+%lu.%09u %s%s: ",
|
2016-01-22 06:41:47 +10:30
|
|
|
data->prefix,
|
|
|
|
|
(unsigned long)diff.ts.tv_sec,
|
|
|
|
|
(unsigned)diff.ts.tv_nsec,
|
|
|
|
|
prefix,
|
2018-02-05 14:39:28 +10:30
|
|
|
level == LOG_IO_IN ? "IO_IN"
|
|
|
|
|
: level == LOG_IO_OUT ? "IO_OUT"
|
2024-04-25 16:42:54 -07:00
|
|
|
: level == LOG_TRACE ? "TRACE"
|
2016-01-22 06:41:47 +10:30
|
|
|
: level == LOG_DBG ? "DEBUG"
|
|
|
|
|
: level == LOG_INFORM ? "INFO"
|
|
|
|
|
: level == LOG_UNUSUAL ? "UNUSUAL"
|
|
|
|
|
: level == LOG_BROKEN ? "BROKEN"
|
|
|
|
|
: "**INVALID**");
|
|
|
|
|
|
|
|
|
|
write_all(data->fd, buf, strlen(buf));
|
2026-01-20 18:01:54 +10:30
|
|
|
write_all(data->fd, log, loglen);
|
2018-02-05 14:39:28 +10:30
|
|
|
if (level == LOG_IO_IN || level == LOG_IO_OUT) {
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t off, used;
|
2016-01-22 06:41:47 +10:30
|
|
|
|
|
|
|
|
/* No allocations, may be in signal handler. */
|
2026-01-20 18:01:54 +10:30
|
|
|
for (off = 0; off < iolen; off += used) {
|
|
|
|
|
used = iolen - off;
|
2016-01-22 06:41:47 +10:30
|
|
|
if (hex_str_size(used) > sizeof(buf))
|
|
|
|
|
used = hex_data_size(sizeof(buf));
|
2018-02-05 14:39:28 +10:30
|
|
|
hex_encode(io + off, used, buf, hex_str_size(used));
|
2016-01-22 06:41:47 +10:30
|
|
|
write_all(data->fd, buf, strlen(buf));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data->prefix = "\n";
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-19 14:26:22 +09:30
|
|
|
static struct log_file *find_log_file(struct log_book *log_book,
|
|
|
|
|
const char *fname)
|
|
|
|
|
{
|
|
|
|
|
assert(tal_count(log_book->log_files)
|
|
|
|
|
== tal_count(log_book->ld->logfiles));
|
|
|
|
|
for (size_t i = 0; i < tal_count(log_book->log_files); i++) {
|
|
|
|
|
if (streq(log_book->ld->logfiles[i], fname))
|
|
|
|
|
return log_book->log_files[i];
|
|
|
|
|
}
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-17 17:06:07 +09:30
|
|
|
char *opt_log_level(const char *arg, struct log_book *log_book)
|
2019-11-18 10:57:17 +10:30
|
|
|
{
|
2020-10-12 16:03:36 +10:30
|
|
|
enum log_level level;
|
2019-11-18 10:57:17 +10:30
|
|
|
int len;
|
|
|
|
|
|
|
|
|
|
len = strcspn(arg, ":");
|
2020-10-12 16:03:36 +10:30
|
|
|
if (!log_level_parse(arg, len, &level))
|
2023-06-06 10:08:47 +09:30
|
|
|
return tal_fmt(tmpctx, "unknown log level %.*s", len, arg);
|
2019-11-18 10:57:17 +10:30
|
|
|
|
|
|
|
|
if (arg[len]) {
|
2023-07-17 17:06:07 +09:30
|
|
|
struct print_filter *f = tal(log_book, struct print_filter);
|
2019-11-18 10:57:17 +10:30
|
|
|
f->prefix = arg + len + 1;
|
2020-10-12 16:03:36 +10:30
|
|
|
f->level = level;
|
2023-07-19 14:26:22 +09:30
|
|
|
|
|
|
|
|
/* :<filename> */
|
|
|
|
|
len = strcspn(f->prefix, ":");
|
|
|
|
|
if (f->prefix[len]) {
|
|
|
|
|
struct log_file *lf;
|
|
|
|
|
lf = find_log_file(log_book, f->prefix + len + 1);
|
|
|
|
|
if (!lf)
|
|
|
|
|
return tal_fmt(tmpctx,
|
|
|
|
|
"unknown log file %s",
|
|
|
|
|
f->prefix + len + 1);
|
|
|
|
|
f->prefix = tal_strndup(f, f->prefix, len);
|
|
|
|
|
list_add_tail(&lf->print_filters, &f->list);
|
|
|
|
|
} else {
|
|
|
|
|
list_add_tail(&log_book->print_filters, &f->list);
|
|
|
|
|
}
|
2019-11-18 10:57:17 +10:30
|
|
|
} else {
|
2023-07-17 17:06:07 +09:30
|
|
|
tal_free(log_book->default_print_level);
|
|
|
|
|
log_book->default_print_level = tal(log_book, enum log_level);
|
|
|
|
|
*log_book->default_print_level = level;
|
2016-01-22 06:41:47 +10:30
|
|
|
}
|
2019-11-18 10:57:17 +10:30
|
|
|
return NULL;
|
2016-01-22 06:41:47 +10:30
|
|
|
}
|
|
|
|
|
|
2023-07-17 17:06:07 +09:30
|
|
|
void json_add_opt_log_levels(struct json_stream *response, struct log_book *log_book)
|
2018-01-29 11:00:15 +10:30
|
|
|
{
|
2019-11-18 10:57:17 +10:30
|
|
|
struct print_filter *i;
|
2018-01-29 11:00:15 +10:30
|
|
|
|
2023-07-17 17:06:07 +09:30
|
|
|
list_for_each(&log_book->print_filters, i, list) {
|
2022-07-04 13:22:35 +09:30
|
|
|
json_add_str_fmt(response, "log-level", "%s:%s",
|
|
|
|
|
log_level_name(i->level), i->prefix);
|
2018-01-29 11:00:15 +10:30
|
|
|
}
|
2019-11-18 10:57:17 +10:30
|
|
|
}
|
|
|
|
|
|
2023-07-17 17:06:07 +09:30
|
|
|
static bool show_log_level(char *buf, size_t len, const struct log_book *log_book)
|
2019-11-18 10:57:17 +10:30
|
|
|
{
|
|
|
|
|
enum log_level l;
|
|
|
|
|
|
2023-07-17 17:06:07 +09:30
|
|
|
if (log_book->default_print_level)
|
|
|
|
|
l = *log_book->default_print_level;
|
2019-11-18 10:57:17 +10:30
|
|
|
else
|
|
|
|
|
l = DEFAULT_LOGLEVEL;
|
2023-06-01 13:46:21 +09:30
|
|
|
strncpy(buf, log_level_name(l), len);
|
|
|
|
|
return true;
|
2018-01-29 11:00:15 +10:30
|
|
|
}
|
|
|
|
|
|
2022-07-08 19:27:11 +09:30
|
|
|
static char *arg_log_prefix(const char *arg, struct log_book *log_book)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2022-07-08 19:27:11 +09:30
|
|
|
tal_free(log_book->prefix);
|
|
|
|
|
log_book->prefix = tal_strdup(log_book, arg);
|
2016-01-22 06:41:47 +10:30
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2023-06-01 13:46:21 +09:30
|
|
|
static bool show_log_prefix(char *buf, size_t len, const struct log_book *log_book)
|
2018-01-29 11:00:15 +10:30
|
|
|
{
|
2023-06-01 13:46:21 +09:30
|
|
|
strncpy(buf, log_book->prefix, len);
|
2023-06-02 12:06:04 +09:30
|
|
|
/* Default is empty, so don't print that! */
|
|
|
|
|
return !streq(log_book->prefix, "");
|
2018-01-29 11:00:15 +10:30
|
|
|
}
|
|
|
|
|
|
2018-08-22 19:36:40 +09:30
|
|
|
static int signalfds[2];
|
|
|
|
|
|
|
|
|
|
static void handle_sighup(int sig)
|
|
|
|
|
{
|
|
|
|
|
/* Writes a single 0x00 byte to the signalfds pipe. This may fail if
|
|
|
|
|
* we're hammered with SIGHUP. We don't care. */
|
|
|
|
|
if (write(signalfds[1], "", 1))
|
|
|
|
|
;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Mutual recursion */
|
|
|
|
|
static struct io_plan *setup_read(struct io_conn *conn, struct lightningd *ld);
|
|
|
|
|
|
|
|
|
|
static struct io_plan *rotate_log(struct io_conn *conn, struct lightningd *ld)
|
|
|
|
|
{
|
|
|
|
|
log_info(ld->log, "Ending log due to SIGHUP");
|
2023-07-19 14:26:20 +09:30
|
|
|
for (size_t i = 0; i < tal_count(ld->log->log_book->log_files); i++) {
|
2022-06-26 13:55:01 +09:30
|
|
|
if (streq(ld->logfiles[i], "-"))
|
|
|
|
|
continue;
|
2023-07-19 14:26:20 +09:30
|
|
|
fclose(ld->log->log_book->log_files[i]->f);
|
|
|
|
|
ld->log->log_book->log_files[i]->f = fopen(ld->logfiles[i], "a");
|
|
|
|
|
if (!ld->log->log_book->log_files[i]->f)
|
2022-06-26 13:55:01 +09:30
|
|
|
err(1, "failed to reopen log file %s", ld->logfiles[i]);
|
|
|
|
|
}
|
2018-08-22 19:36:40 +09:30
|
|
|
|
|
|
|
|
log_info(ld->log, "Started log due to SIGHUP");
|
|
|
|
|
return setup_read(conn, ld);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct io_plan *setup_read(struct io_conn *conn, struct lightningd *ld)
|
|
|
|
|
{
|
|
|
|
|
/* We read and discard. */
|
|
|
|
|
static char discard;
|
|
|
|
|
return io_read(conn, &discard, 1, rotate_log, ld);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void setup_log_rotation(struct lightningd *ld)
|
|
|
|
|
{
|
|
|
|
|
struct sigaction act;
|
|
|
|
|
if (pipe(signalfds) != 0)
|
|
|
|
|
errx(1, "Pipe for signalfds");
|
|
|
|
|
|
|
|
|
|
notleak(io_new_conn(ld, signalfds[0], setup_read, ld));
|
|
|
|
|
|
|
|
|
|
io_fd_block(signalfds[1], false);
|
|
|
|
|
memset(&act, 0, sizeof(act));
|
|
|
|
|
act.sa_handler = handle_sighup;
|
2020-12-02 09:56:30 +08:00
|
|
|
/* We do not need any particular flags; the sigaction
|
|
|
|
|
* default behavior (EINTR any system calls, pass only
|
|
|
|
|
* the signo to the handler, retain the same signal
|
|
|
|
|
* handler throughout) is fine with us.
|
|
|
|
|
*/
|
|
|
|
|
act.sa_flags = 0;
|
|
|
|
|
/* Block all signals while handling SIGHUP.
|
|
|
|
|
* Without this, e.g. an inopportune SIGCHLD while we
|
|
|
|
|
* are doing a `write` to the SIGHUP signal pipe could
|
|
|
|
|
* prevent us from sending the byte and performing the
|
|
|
|
|
* log rotation in the main loop.
|
|
|
|
|
*
|
|
|
|
|
* The SIGHUP handler does very little anyway, and
|
|
|
|
|
* the blocked signals will get delivered soon after
|
|
|
|
|
* the SIGHUP handler returns.
|
|
|
|
|
*/
|
|
|
|
|
sigfillset(&act.sa_mask);
|
2018-08-22 19:36:40 +09:30
|
|
|
|
|
|
|
|
if (sigaction(SIGHUP, &act, NULL) != 0)
|
|
|
|
|
err(1, "Setting up SIGHUP handler");
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-29 11:00:15 +10:30
|
|
|
char *arg_log_to_file(const char *arg, struct lightningd *ld)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2019-02-21 22:54:39 +01:00
|
|
|
int size;
|
2023-07-19 14:26:20 +09:30
|
|
|
struct log_file *logf;
|
2018-01-29 11:00:15 +10:30
|
|
|
|
2022-06-26 13:55:01 +09:30
|
|
|
if (!ld->logfiles) {
|
2018-08-22 19:36:40 +09:30
|
|
|
setup_log_rotation(ld);
|
2022-06-26 13:55:01 +09:30
|
|
|
ld->logfiles = tal_arr(ld, const char *, 0);
|
2023-07-19 14:26:20 +09:30
|
|
|
ld->log_book->log_files = tal_arr(ld->log_book, struct log_file *, 0);
|
2022-06-26 13:55:01 +09:30
|
|
|
}
|
|
|
|
|
|
2023-07-19 14:26:20 +09:30
|
|
|
logf = tal(ld->log_book->log_files, struct log_file);
|
|
|
|
|
list_head_init(&logf->print_filters);
|
2022-06-26 13:55:01 +09:30
|
|
|
if (streq(arg, "-"))
|
2023-07-19 14:26:20 +09:30
|
|
|
logf->f = stdout;
|
2022-06-26 13:55:01 +09:30
|
|
|
else {
|
2023-07-19 14:26:20 +09:30
|
|
|
logf->f = fopen(arg, "a");
|
|
|
|
|
if (!logf->f)
|
2023-06-06 10:08:47 +09:30
|
|
|
return tal_fmt(tmpctx, "Failed to open: %s", strerror(errno));
|
2022-06-26 13:55:01 +09:30
|
|
|
}
|
2018-08-22 19:36:40 +09:30
|
|
|
|
2022-06-26 13:55:01 +09:30
|
|
|
tal_arr_expand(&ld->logfiles, tal_strdup(ld->logfiles, arg));
|
2023-07-19 14:26:20 +09:30
|
|
|
tal_arr_expand(&ld->log_book->log_files, logf);
|
2018-12-16 15:55:45 +10:30
|
|
|
|
2019-02-21 22:54:39 +01:00
|
|
|
/* For convenience make a block of empty lines just like Bitcoin Core */
|
2023-07-19 14:26:20 +09:30
|
|
|
size = ftell(logf->f);
|
2019-02-21 22:54:39 +01:00
|
|
|
if (size > 0)
|
2023-07-19 14:26:20 +09:30
|
|
|
fprintf(logf->f, "\n\n\n\n");
|
2019-02-21 22:54:39 +01:00
|
|
|
|
2018-12-16 15:55:45 +10:30
|
|
|
log_debug(ld->log, "Opened log file %s", arg);
|
2016-01-22 06:41:47 +10:30
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-29 11:00:15 +10:30
|
|
|
void opt_register_logging(struct lightningd *ld)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2018-12-16 15:55:25 +10:30
|
|
|
opt_register_early_arg("--log-level",
|
2023-07-17 17:06:07 +09:30
|
|
|
opt_log_level, show_log_level, ld->log_book,
|
2019-11-18 10:57:17 +10:30
|
|
|
"log level (io, debug, info, unusual, broken) [:prefix]");
|
2023-06-02 12:06:04 +09:30
|
|
|
clnopt_witharg("--log-timestamps", OPT_EARLY|OPT_SHOWBOOL,
|
|
|
|
|
opt_set_bool_arg, opt_show_bool,
|
2023-07-17 17:06:07 +09:30
|
|
|
&ld->log_book->print_timestamps,
|
2023-06-02 12:06:04 +09:30
|
|
|
"prefix log messages with timestamp");
|
2024-08-08 00:26:24 +02:00
|
|
|
clnopt_witharg("--log-prefix", OPT_EARLY|OPT_KEEP_WHITESPACE,
|
|
|
|
|
arg_log_prefix, show_log_prefix, ld->log_book, "log prefix");
|
2023-06-01 16:12:37 +09:30
|
|
|
clnopt_witharg("--log-file=<file>",
|
|
|
|
|
OPT_EARLY|OPT_MULTI,
|
|
|
|
|
arg_log_to_file, NULL, ld,
|
|
|
|
|
"Also log to file (- for stdout)");
|
2016-01-22 06:41:47 +10:30
|
|
|
}
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
void logging_options_parsed(struct log_book *log_book)
|
2019-11-18 10:57:17 +10:30
|
|
|
{
|
2023-07-19 14:25:48 +09:30
|
|
|
struct logger *log;
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t off;
|
|
|
|
|
const char *msg;
|
|
|
|
|
const u8 *io;
|
|
|
|
|
struct log_hdr l;
|
2023-07-19 14:25:48 +09:30
|
|
|
|
2019-11-18 10:57:17 +10:30
|
|
|
/* If they didn't set an explicit level, set to info */
|
2023-07-16 15:26:52 +09:30
|
|
|
if (!log_book->default_print_level) {
|
|
|
|
|
log_book->default_print_level = tal(log_book, enum log_level);
|
|
|
|
|
*log_book->default_print_level = DEFAULT_LOGLEVEL;
|
2019-11-18 10:57:17 +10:30
|
|
|
}
|
|
|
|
|
|
2023-07-19 14:25:48 +09:30
|
|
|
/* Set print_levels for each log, depending on filters. */
|
|
|
|
|
list_for_each(&log_book->loggers, log, list) {
|
2023-07-19 14:26:20 +09:30
|
|
|
log->print_level = print_level(log_book,
|
|
|
|
|
log->prefix,
|
2023-07-19 14:26:22 +09:30
|
|
|
log->default_node_id,
|
|
|
|
|
&log->need_refiltering);
|
2023-07-19 14:25:48 +09:30
|
|
|
}
|
|
|
|
|
|
2019-11-18 10:57:17 +10:30
|
|
|
/* Catch up, since before we were only printing BROKEN msgs */
|
2026-01-20 18:01:54 +10:30
|
|
|
off = 0;
|
|
|
|
|
while (get_log_entry(tmpctx, log_book, &l, &msg, &io, &off)) {
|
|
|
|
|
if (l.level >= print_level(log_book, l.prefix, l.nc ? &l.nc->node_id : NULL, NULL))
|
|
|
|
|
log_to_files(log_book->prefix, l.prefix->prefix, l.level,
|
|
|
|
|
l.nc ? &l.nc->node_id : NULL,
|
2023-07-19 14:26:22 +09:30
|
|
|
&log_book->print_filters,
|
2026-01-20 18:01:54 +10:30
|
|
|
&l.time, msg, l.msglen,
|
|
|
|
|
io, l.iolen,
|
2023-07-16 15:26:52 +09:30
|
|
|
log_book->print_timestamps,
|
2023-07-19 14:26:20 +09:30
|
|
|
log_book->default_print_level,
|
|
|
|
|
log_book->log_files);
|
2019-11-18 10:57:17 +10:30
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-29 12:36:45 +10:30
|
|
|
void log_backtrace_print(const char *fmt, ...)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
2018-03-29 12:36:45 +10:30
|
|
|
va_list ap;
|
2016-01-22 06:41:47 +10:30
|
|
|
|
2018-03-29 12:36:45 +10:30
|
|
|
if (!crashlog)
|
|
|
|
|
return;
|
2016-01-22 06:41:48 +10:30
|
|
|
|
2018-03-29 12:36:45 +10:30
|
|
|
va_start(ap, fmt);
|
2019-11-17 22:11:33 +10:30
|
|
|
logv(crashlog, LOG_BROKEN, NULL, false, fmt, ap);
|
2018-03-29 12:36:45 +10:30
|
|
|
va_end(ap);
|
2016-01-22 06:41:47 +10:30
|
|
|
}
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
static void log_dump_to_file(int fd, const struct log_book *log_book)
|
2016-01-22 06:41:47 +10:30
|
|
|
{
|
|
|
|
|
char buf[100];
|
2018-03-10 14:02:33 -05:00
|
|
|
int len;
|
2016-01-22 06:41:47 +10:30
|
|
|
struct log_data data;
|
|
|
|
|
time_t start;
|
|
|
|
|
|
2023-07-16 15:26:52 +09:30
|
|
|
start = log_book->init_time.ts.tv_sec;
|
2026-01-20 18:01:54 +10:30
|
|
|
len = snprintf(buf, sizeof(buf), "%zu bytes, %s", ringbuf_used(log_book), ctime(&start));
|
2018-03-10 14:02:33 -05:00
|
|
|
write_all(fd, buf, len);
|
2016-01-22 06:41:47 +10:30
|
|
|
|
|
|
|
|
/* ctime includes \n... WTF? */
|
|
|
|
|
data.prefix = "";
|
|
|
|
|
data.fd = fd;
|
2023-07-16 15:26:52 +09:30
|
|
|
log_each_line(log_book, log_one_line, &data);
|
2016-01-22 06:41:47 +10:30
|
|
|
write_all(fd, "\n\n", strlen("\n\n"));
|
|
|
|
|
}
|
2016-01-22 06:41:48 +10:30
|
|
|
|
2018-03-29 12:36:45 +10:30
|
|
|
void log_backtrace_exit(void)
|
|
|
|
|
{
|
2018-08-22 19:36:42 +09:30
|
|
|
int fd;
|
2018-08-22 13:17:20 +02:00
|
|
|
char timebuf[sizeof("YYYYmmddHHMMSS")];
|
|
|
|
|
char logfile[sizeof("/tmp/lightning-crash.log.") + sizeof(timebuf)];
|
2025-11-13 16:02:35 +10:30
|
|
|
struct timeabs time = clock_time();
|
2018-08-22 13:17:20 +02:00
|
|
|
|
|
|
|
|
strftime(timebuf, sizeof(timebuf), "%Y%m%d%H%M%S", gmtime(&time.ts.tv_sec));
|
2018-08-22 19:36:42 +09:30
|
|
|
|
2018-03-29 12:36:45 +10:30
|
|
|
if (!crashlog)
|
|
|
|
|
return;
|
|
|
|
|
|
2018-08-22 19:36:42 +09:30
|
|
|
/* We expect to be in config dir. */
|
2018-08-22 13:17:20 +02:00
|
|
|
snprintf(logfile, sizeof(logfile), "crash.log.%s", timebuf);
|
2018-03-29 12:36:45 +10:30
|
|
|
|
2018-08-22 19:36:42 +09:30
|
|
|
fd = open(logfile, O_WRONLY|O_CREAT|O_TRUNC, 0600);
|
|
|
|
|
if (fd < 0) {
|
|
|
|
|
snprintf(logfile, sizeof(logfile),
|
2018-08-22 13:17:20 +02:00
|
|
|
"/tmp/lightning-crash.log.%s", timebuf);
|
2018-08-22 19:36:42 +09:30
|
|
|
fd = open(logfile, O_WRONLY|O_CREAT|O_TRUNC, 0600);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Dump entire log. */
|
|
|
|
|
if (fd >= 0) {
|
2023-07-16 15:26:52 +09:30
|
|
|
log_dump_to_file(fd, crashlog->log_book);
|
2018-08-22 19:36:42 +09:30
|
|
|
close(fd);
|
|
|
|
|
fprintf(stderr, "Log dumped in %s\n", logfile);
|
2018-03-29 12:36:45 +10:30
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-03-01 11:22:15 -06:00
|
|
|
void fatal_vfmt(const char *fmt, va_list ap)
|
2016-01-22 06:41:48 +10:30
|
|
|
{
|
2022-07-25 13:33:59 +09:30
|
|
|
va_list ap2;
|
|
|
|
|
|
|
|
|
|
/* You are not allowed to re-use va_lists, so make a copy. */
|
|
|
|
|
va_copy(ap2, ap);
|
2016-01-22 06:41:48 +10:30
|
|
|
vfprintf(stderr, fmt, ap);
|
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
|
|
2018-03-29 12:36:45 +10:30
|
|
|
if (!crashlog)
|
|
|
|
|
exit(1);
|
|
|
|
|
|
2022-07-25 13:33:59 +09:30
|
|
|
logv(crashlog, LOG_BROKEN, NULL, true, fmt, ap2);
|
2017-10-29 21:48:13 +10:30
|
|
|
abort();
|
2022-07-25 13:33:59 +09:30
|
|
|
/* va_copy() must be matched with va_end(), even if unreachable. */
|
|
|
|
|
va_end(ap2);
|
2016-01-22 06:41:48 +10:30
|
|
|
}
|
2018-02-05 14:39:28 +10:30
|
|
|
|
2022-03-01 11:22:15 -06:00
|
|
|
void fatal(const char *fmt, ...)
|
|
|
|
|
{
|
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
|
fatal_vfmt(fmt, ap);
|
|
|
|
|
va_end(ap);
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-05 14:39:28 +10:30
|
|
|
struct log_info {
|
|
|
|
|
enum log_level level;
|
2018-10-19 11:47:49 +10:30
|
|
|
struct json_stream *response;
|
2019-11-18 10:57:15 +10:30
|
|
|
/* If non-null, only show messages about this peer */
|
|
|
|
|
const struct node_id *node_id;
|
2018-02-05 14:39:28 +10:30
|
|
|
};
|
|
|
|
|
|
2026-01-20 18:01:54 +10:30
|
|
|
static void log_to_json(struct timerel diff,
|
2018-02-05 14:39:28 +10:30
|
|
|
enum log_level level,
|
2019-11-17 22:11:33 +10:30
|
|
|
const struct node_id *node_id,
|
2018-02-05 14:39:28 +10:30
|
|
|
const char *prefix,
|
|
|
|
|
const char *log,
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t loglen,
|
2018-02-05 14:39:28 +10:30
|
|
|
const u8 *io,
|
2026-01-20 18:01:54 +10:30
|
|
|
size_t iolen,
|
2018-02-05 14:39:28 +10:30
|
|
|
struct log_info *info)
|
|
|
|
|
{
|
2019-11-18 10:57:15 +10:30
|
|
|
if (info->node_id) {
|
|
|
|
|
if (!node_id || !node_id_eq(node_id, info->node_id))
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-05 14:39:28 +10:30
|
|
|
if (level < info->level) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
json_object_start(info->response, NULL);
|
|
|
|
|
json_add_string(info->response, "type",
|
|
|
|
|
level == LOG_BROKEN ? "BROKEN"
|
|
|
|
|
: level == LOG_UNUSUAL ? "UNUSUAL"
|
|
|
|
|
: level == LOG_INFORM ? "INFO"
|
|
|
|
|
: level == LOG_DBG ? "DEBUG"
|
2024-04-25 16:42:54 -07:00
|
|
|
: level == LOG_TRACE ? "TRACE"
|
2018-02-05 14:39:28 +10:30
|
|
|
: level == LOG_IO_IN ? "IO_IN"
|
|
|
|
|
: level == LOG_IO_OUT ? "IO_OUT"
|
2018-02-05 14:39:28 +10:30
|
|
|
: "UNKNOWN");
|
2023-09-14 12:10:43 +09:30
|
|
|
json_add_timestr(info->response, "time", diff.ts);
|
2019-11-17 22:11:33 +10:30
|
|
|
if (node_id)
|
|
|
|
|
json_add_node_id(info->response, "node_id", node_id);
|
2018-02-05 14:39:28 +10:30
|
|
|
json_add_string(info->response, "source", prefix);
|
2026-01-20 18:01:54 +10:30
|
|
|
json_add_stringn(info->response, "log", log, loglen);
|
2018-02-05 14:39:28 +10:30
|
|
|
if (io)
|
2026-01-20 18:01:54 +10:30
|
|
|
json_add_hex(info->response, "data", io, iolen);
|
2018-02-05 14:39:28 +10:30
|
|
|
|
|
|
|
|
json_object_end(info->response);
|
|
|
|
|
}
|
|
|
|
|
|
2018-10-19 11:47:49 +10:30
|
|
|
void json_add_log(struct json_stream *response,
|
2023-07-16 15:26:52 +09:30
|
|
|
const struct log_book *log_book,
|
2019-11-18 10:57:15 +10:30
|
|
|
const struct node_id *node_id,
|
|
|
|
|
enum log_level minlevel)
|
2018-02-05 14:39:28 +10:30
|
|
|
{
|
|
|
|
|
struct log_info info;
|
|
|
|
|
|
|
|
|
|
info.level = minlevel;
|
|
|
|
|
info.response = response;
|
2019-11-18 10:57:15 +10:30
|
|
|
info.node_id = node_id;
|
2018-02-05 14:39:28 +10:30
|
|
|
|
|
|
|
|
json_array_start(info.response, "log");
|
2023-07-16 15:26:52 +09:30
|
|
|
log_each_line(log_book, log_to_json, &info);
|
2018-02-05 14:39:28 +10:30
|
|
|
json_array_end(info.response);
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-16 15:20:06 +10:30
|
|
|
struct command_result *param_loglevel(struct command *cmd,
|
|
|
|
|
const char *name,
|
|
|
|
|
const char *buffer,
|
|
|
|
|
const jsmntok_t *tok,
|
|
|
|
|
enum log_level **level)
|
2018-02-05 14:39:28 +10:30
|
|
|
{
|
2018-08-15 09:40:37 -05:00
|
|
|
*level = tal(cmd, enum log_level);
|
2020-10-12 16:03:36 +10:30
|
|
|
if (log_level_parse(buffer + tok->start, tok->end - tok->start, *level))
|
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
|
|
return command_fail_badparam(cmd, name, buffer, tok,
|
|
|
|
|
"should be 'io', 'debug', 'info', or "
|
|
|
|
|
"'unusual'");
|
2018-02-05 14:39:28 +10:30
|
|
|
}
|
|
|
|
|
|
2018-12-16 15:22:06 +10:30
|
|
|
static struct command_result *json_getlog(struct command *cmd,
|
|
|
|
|
const char *buffer,
|
|
|
|
|
const jsmntok_t *obj UNNEEDED,
|
|
|
|
|
const jsmntok_t * params)
|
2018-02-05 14:39:28 +10:30
|
|
|
{
|
2018-10-19 11:47:49 +10:30
|
|
|
struct json_stream *response;
|
2018-08-15 09:40:37 -05:00
|
|
|
enum log_level *minlevel;
|
2023-07-16 15:26:52 +09:30
|
|
|
struct log_book *log_book = cmd->ld->log_book;
|
2018-02-05 14:39:28 +10:30
|
|
|
|
2018-07-19 20:14:02 -05:00
|
|
|
if (!param(cmd, buffer, params,
|
2018-12-16 15:20:06 +10:30
|
|
|
p_opt_def("level", param_loglevel, &minlevel, LOG_INFORM),
|
2018-07-19 20:14:02 -05:00
|
|
|
NULL))
|
2018-12-16 15:22:06 +10:30
|
|
|
return command_param_failed();
|
2018-02-05 14:39:28 +10:30
|
|
|
|
2018-10-19 11:47:48 +10:30
|
|
|
response = json_stream_success(cmd);
|
2019-05-23 13:09:17 +03:00
|
|
|
/* Suppress logging for this stream, to not bloat io logs */
|
|
|
|
|
json_stream_log_suppress_for_cmd(response, cmd);
|
2023-09-14 12:10:43 +09:30
|
|
|
json_add_timestr(response, "created_at", log_book->init_time.ts);
|
2026-01-20 18:01:54 +10:30
|
|
|
json_add_num(response, "bytes_used", (unsigned int)ringbuf_used(log_book));
|
|
|
|
|
json_add_num(response, "bytes_max", sizeof(log_book->ringbuf));
|
2023-07-16 15:26:52 +09:30
|
|
|
json_add_log(response, log_book, NULL, *minlevel);
|
2018-12-16 15:22:06 +10:30
|
|
|
return command_success(cmd, response);
|
2018-02-05 14:39:28 +10:30
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct json_command getlog_command = {
|
|
|
|
|
"getlog",
|
|
|
|
|
json_getlog,
|
|
|
|
|
};
|
|
|
|
|
AUTODATA(json_command, &getlog_command);
|