Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 4 additions & 20 deletions ldk-server/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,13 +121,10 @@ fn main() {
std::process::exit(-1);
}

let logger = match ServerLogger::init(config_file.log_level, &log_file_path) {
Ok(logger) => logger,
Err(e) => {
eprintln!("Failed to initialize logger: {e}");
std::process::exit(-1);
},
};
if let Err(e) = ServerLogger::init(config_file.log_level, &log_file_path) {
eprintln!("Failed to initialize logger: {e}");
std::process::exit(-1);
}

let api_key = match load_or_generate_api_key(&network_dir) {
Ok(key) => key,
Expand Down Expand Up @@ -254,14 +251,6 @@ fn main() {
}

runtime.block_on(async {
// Register SIGHUP handler for log rotation
let mut sighup_stream = match tokio::signal::unix::signal(SignalKind::hangup()) {
Ok(stream) => stream,
Err(e) => {
error!("Failed to register SIGHUP handler: {e}");
std::process::exit(-1);
}
};

let mut sigterm_stream = match tokio::signal::unix::signal(SignalKind::terminate()) {
Ok(stream) => stream,
Expand Down Expand Up @@ -517,11 +506,6 @@ fn main() {
let _ = shutdown_tx.send(true);
break;
}
_ = sighup_stream.recv() => {
if let Err(e) = logger.reopen() {
error!("Failed to reopen log file on SIGHUP: {e}");
}
}
_ = sigterm_stream.recv() => {
info!("Received SIGTERM, shutting down..");
let _ = shutdown_tx.send(true);
Expand Down
140 changes: 94 additions & 46 deletions ldk-server/src/util/logger.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,26 @@
// licenses.

use std::fs::{self, File, OpenOptions};
use std::io::{self, Write};
use std::io::{self, BufWriter, Write};
use std::path::{Path, PathBuf};
use std::process::Command;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::SystemTime;

use log::{Level, LevelFilter, Log, Metadata, Record};

/// Maximum size of the log file before it gets rotated (50 MB)
const MAX_LOG_SIZE_BYTES: usize = 50 * 1024 * 1024;
/// Maximum age of the log file before it gets rotated (24 hours)
const ROTATION_INTERVAL_SECS: u64 = 24 * 60 * 60;
Comment on lines +20 to +23
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

these should be configurable


struct LoggerState {
file: BufWriter<File>,
bytes_written: usize,
created_at: SystemTime,
}

/// A logger implementation that writes logs to both stderr and a file.
///
/// The logger formats log messages with RFC3339 timestamps and writes them to:
Expand All @@ -25,12 +39,12 @@ use log::{Level, LevelFilter, Log, Metadata, Record};
///
/// Example: `[2025-12-04T10:30:45Z INFO ldk_server:42] Starting up...`
///
/// The logger handles SIGHUP for log rotation by reopening the file handle when signaled.
/// The logger does a native size/time-based rotation and zero-dependency background gzip compression.
pub struct ServerLogger {
/// The maximum log level to display
level: LevelFilter,
/// The file to write logs to, protected by a mutex for thread-safe access
file: Mutex<File>,
/// Groups the file and state in a single Mutex
state: Mutex<LoggerState>,
/// Path to the log file for reopening on SIGHUP
log_file_path: PathBuf,
}
Expand All @@ -52,30 +66,60 @@ impl ServerLogger {

let file = open_log_file(log_file_path)?;

// Check existing file metadata to persist size and age across node restarts
let metadata = fs::metadata(log_file_path);
let initial_size = metadata.as_ref().map(|m| m.len() as usize).unwrap_or(0);
let created_at = metadata
.and_then(|m| m.created().or_else(|_| m.modified()))
.unwrap_or_else(|_| SystemTime::now());

let logger = Arc::new(ServerLogger {
level,
file: Mutex::new(file),
log_file_path: log_file_path.to_path_buf(),
state: Mutex::new(LoggerState {
file: BufWriter::new(file),
bytes_written: initial_size,
created_at,
}),
});

log::set_boxed_logger(Box::new(LoggerWrapper(Arc::clone(&logger))))
.map_err(io::Error::other)?;
log::set_max_level(level);

Ok(logger)
}

/// Reopens the log file. Called on SIGHUP for log rotation.
pub fn reopen(&self) -> Result<(), io::Error> {
/// Flushes the current file, renames it with a timestamp, opens a fresh log,
/// and spawns a background thread to compress the old file.
fn rotate(&self, state: &mut LoggerState) -> Result<(), io::Error> {
state.file.flush()?;

let now = chrono::Utc::now().format("%Y-%m-%dT%H-%M-%SZ").to_string();
let mut new_path = self.log_file_path.to_path_buf().into_os_string();
new_path.push(".");
new_path.push(now);
let rotated_path = PathBuf::from(new_path);

fs::rename(&self.log_file_path, &rotated_path)?;

let new_file = open_log_file(&self.log_file_path)?;
match self.file.lock() {
Ok(mut file) => {
// Flush the old buffer before replacing with the new file
file.flush()?;
*file = new_file;
Ok(())
state.file = BufWriter::new(new_file);

// Reset our rotation triggers for the new file
state.bytes_written = 0;
state.created_at = SystemTime::now();

// Spawn independent OS thread to compress the old file using native gzip
thread::spawn(move || match Command::new("gzip").arg("-f").arg(&rotated_path).status() {
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is assuming they have gzip installed and we have permissions to run it. I would rather just have the rust code do this

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It doesn't really seem worth taking a gzip dependency just to do log rotation. We can just rotate and delete as-is and tell people to use the system log (which handles this properly) otherwise. Really we shouldn't be doing logging ourselves anyway - log should go to stdout/err and systemd or some other logic should figure out what to do with it.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm happy to drop the compression so we do a rotate and delete. If that works for everyone, I'll update this to introduce a configurable max_rotated_files that will default to the last 5 uncompressed files. Does that work?

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm in favor of just doing deletion instead of compression but I would really rather keep the log file and not rely on just stdout/stderr. If we don't have an easy log file and rely on the user's systemd/docker/whatever to do and persist the logs, we will likely not have logs for lots of users and make it much harder to do support.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm in favor of just doing deletion instead of compression but I would really rather keep the log file and not rely on just stdout/stderr. If we don't have an easy log file and rely on the user's systemd/docker/whatever to do and persist the logs, we will likely not have logs for lots of users and make it much harder to do support.

I'm also leaning towards just not logging to file. However I see your point that it might lead to nothing being logged at all. In that case we should however at the very least add the option to disable logging to file, and also update our docs and checked-in systemd ldk-server.service to use that mode, so that everybody who runs it that way will just use journald rather than our custom filesystem logger.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah I like that

Ok(status) if status.success() => {},
Ok(status) => {
eprintln!("Failed to compress log {:?}: exited with {}", rotated_path, status)
},
Err(e) => Err(io::Error::other(format!("Failed to acquire lock: {e}"))),
}
Err(e) => eprintln!("Failed to execute gzip on {:?}: {}", rotated_path, e),
});

Ok(())
}
}

Expand All @@ -89,52 +133,56 @@ impl Log for ServerLogger {
let level_str = format_level(record.level());
let line = record.line().unwrap_or(0);

let log_line = format!(
"[{} {} {}:{}] {}",
format_timestamp(),
level_str,
record.target(),
line,
record.args()
);

// Log to console
let _ = match record.level() {
match record.level() {
Level::Error => {
writeln!(
io::stderr(),
"[{} {} {}:{}] {}",
format_timestamp(),
level_str,
record.target(),
line,
record.args()
)
let _ = writeln!(io::stderr(), "{}", log_line);
},
_ => {
writeln!(
io::stdout(),
"[{} {} {}:{}] {}",
format_timestamp(),
level_str,
record.target(),
line,
record.args()
)
let _ = writeln!(io::stdout(), "{}", log_line);
},
};

// Log to file
if let Ok(mut file) = self.file.lock() {
let _ = writeln!(
file,
"[{} {} {}:{}] {}",
format_timestamp(),
level_str,
record.target(),
line,
record.args()
);
let log_bytes = log_line.len() + 1;

if let Ok(mut state) = self.state.lock() {
let mut needs_rotation = false;

if state.bytes_written + log_bytes > MAX_LOG_SIZE_BYTES {
needs_rotation = true;
} else if let Ok(age) = SystemTime::now().duration_since(state.created_at) {
if age.as_secs() > ROTATION_INTERVAL_SECS {
needs_rotation = true;
}
}

if needs_rotation {
if let Err(e) = self.rotate(&mut state) {
eprintln!("Failed to rotate log file: {}", e);
}
}

let _ = writeln!(state.file, "{}", log_line);
state.bytes_written += log_bytes;
}
}
}

fn flush(&self) {
let _ = io::stdout().flush();
let _ = io::stderr().flush();
if let Ok(mut file) = self.file.lock() {
let _ = file.flush();
if let Ok(mut state) = self.state.lock() {
let _ = state.file.flush();
}
}
}
Expand Down