Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: CI

on:
push:
branches:
- main
- tna_testnet
pull_request:

jobs:
test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Install Rust
uses: dtolnay/rust-toolchain@stable

- name: Rust cache
uses: Swatinem/rust-cache@v2

- name: Test (workspace)
run: cargo test --workspace --locked

47 changes: 47 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
name: Release

on:
push:
tags:
- "v*"
workflow_dispatch:

permissions:
contents: write

jobs:
build-linux:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Install Rust
uses: dtolnay/rust-toolchain@stable

- name: Rust cache
uses: Swatinem/rust-cache@v2

- name: Build catalyst-cli (release)
run: cargo build -p catalyst-cli --release --locked

- name: Package + checksums
shell: bash
run: |
set -euo pipefail
mkdir -p dist
cp target/release/catalyst-cli dist/
sha256sum dist/catalyst-cli > dist/SHA256SUMS
VERSION="${GITHUB_REF_NAME:-manual}"
TAR="catalyst-cli-${VERSION}-x86_64-unknown-linux-gnu.tar.gz"
tar -czf "$TAR" -C dist catalyst-cli SHA256SUMS
sha256sum "$TAR" > "${TAR}.sha256"

- name: Upload release assets (tag builds)
if: startsWith(github.ref, 'refs/tags/')
uses: softprops/action-gh-release@v2
with:
files: |
catalyst-cli-${{ github.ref_name }}-x86_64-unknown-linux-gnu.tar.gz
catalyst-cli-${{ github.ref_name }}-x86_64-unknown-linux-gnu.tar.gz.sha256

2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions crates/catalyst-cli/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ bytes = "1.11.1"
tokio-util = { workspace = true, features = ["io"] }
hickory-resolver = "0.25.2"

[dev-dependencies]
tempfile = "3"

[features]
default = []
dev = []
46 changes: 46 additions & 0 deletions crates/catalyst-cli/src/commands.rs
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,52 @@ pub async fn db_backup(data_dir: &Path, out_dir: &Path, archive: Option<&Path>)
Ok(())
}

/// Print RocksDB/storage statistics for an existing node data directory.
pub async fn db_stats(data_dir: &Path) -> Result<()> {
let mut cfg = StorageConfigLib::default();
cfg.data_dir = data_dir.to_path_buf();
let store = StorageManager::new(cfg).await?;

let st = store.get_statistics().await?;
println!(
"state_root: {}",
st.current_state_root
.map(|h| format!("0x{}", hex::encode(h)))
.unwrap_or_else(|| "null".to_string())
);
println!("pending_transactions: {}", st.pending_transactions);

// Column-family key counts are a simple proxy for growth hotspots.
let mut cfs: Vec<(String, u64)> = st.column_family_stats.into_iter().collect();
cfs.sort_by(|a, b| a.0.cmp(&b.0));
for (cf, n) in cfs {
println!("cf_keys.{}: {}", cf, n);
}

// Memory usage estimates.
let mut mem: Vec<(String, u64)> = st.memory_usage.into_iter().collect();
mem.sort_by(|a, b| a.0.cmp(&b.0));
for (k, v) in mem {
println!("mem.{}: {}", k, v);
}

if let Some(stats) = st.database_stats {
println!("rocksdb_stats:\n{}", stats);
}
Ok(())
}

/// Run maintenance (flush + manual compaction + snapshot cleanup).
pub async fn db_maintenance(data_dir: &Path) -> Result<()> {
let mut cfg = StorageConfigLib::default();
cfg.data_dir = data_dir.to_path_buf();
let store = StorageManager::new(cfg).await?;

store.maintenance().await?;
println!("maintenance_ok: true");
Ok(())
}

pub async fn db_restore(data_dir: &Path, from_dir: &Path) -> Result<()> {
// Optional pre-flight: if metadata is present, load it for post-restore verification.
let meta_path = from_dir.join("catalyst_snapshot.json");
Expand Down
50 changes: 50 additions & 0 deletions crates/catalyst-cli/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,40 @@ pub struct StorageConfig {

/// Enable database compression
pub compression_enabled: bool,

/// Enable pruning of historical RPC/indexer metadata (blocks/tx history).
///
/// When enabled, the node will delete old `metadata` keys for cycles older than
/// `history_keep_cycles` behind the applied head. Authenticated account state is not affected.
#[serde(default)]
pub history_prune_enabled: bool,

/// Number of cycles (seconds) of history to retain behind head when pruning is enabled.
///
/// `0` means "keep all history" (no pruning), even if `history_prune_enabled=true`.
#[serde(default = "default_history_keep_cycles")]
pub history_keep_cycles: u64,

/// Minimum time between prune runs.
#[serde(default = "default_history_prune_interval_seconds")]
pub history_prune_interval_seconds: u64,

/// Maximum number of cycles to prune per run (bounds runtime overhead).
#[serde(default = "default_history_prune_batch_cycles")]
pub history_prune_batch_cycles: u64,
}

fn default_history_keep_cycles() -> u64 {
// Default retention window for pruned nodes: 7 days at 1 cycle/sec.
7 * 24 * 60 * 60
}

fn default_history_prune_interval_seconds() -> u64 {
300
}

fn default_history_prune_batch_cycles() -> u64 {
1_000
}

/// Consensus configuration
Expand Down Expand Up @@ -433,6 +467,10 @@ impl Default for NodeConfig {
write_buffer_size_mb: 64,
max_open_files: 1000,
compression_enabled: true,
history_prune_enabled: false,
history_keep_cycles: default_history_keep_cycles(),
history_prune_interval_seconds: default_history_prune_interval_seconds(),
history_prune_batch_cycles: default_history_prune_batch_cycles(),
},
consensus: ConsensusConfig {
cycle_duration_seconds: 60,
Expand Down Expand Up @@ -649,6 +687,18 @@ impl NodeConfig {
if self.storage.capacity_gb == 0 && self.storage.enabled {
return Err(anyhow::anyhow!("Storage capacity must be > 0 when storage is enabled"));
}
if self.storage.history_prune_enabled {
if self.storage.history_prune_interval_seconds == 0 {
return Err(anyhow::anyhow!(
"storage.history_prune_interval_seconds must be > 0 when pruning is enabled"
));
}
if self.storage.history_prune_batch_cycles == 0 {
return Err(anyhow::anyhow!(
"storage.history_prune_batch_cycles must be > 0 when pruning is enabled"
));
}
}

// Validate RPC configuration
if self.rpc.enabled && self.rpc.port == 0 {
Expand Down
22 changes: 22 additions & 0 deletions crates/catalyst-cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ mod commands;
mod config;
mod tx;
mod sync;
mod pruning;
mod dfs_store;
mod identity;
mod evm;
Expand Down Expand Up @@ -154,6 +155,18 @@ enum Commands {
#[arg(long)]
from_dir: PathBuf,
},
/// Show local DB stats for a data directory
DbStats {
/// Data directory (same as config.storage.data_dir)
#[arg(long)]
data_dir: PathBuf,
},
/// Run DB maintenance (flush, compact, snapshot cleanup)
DbMaintenance {
/// Data directory (same as config.storage.data_dir)
#[arg(long)]
data_dir: PathBuf,
},
/// Publish snapshot metadata into the node DB (served via RPC for fast-sync tooling)
SnapshotPublish {
/// Data directory (same as config.storage.data_dir) of the RPC node
Expand Down Expand Up @@ -413,6 +426,9 @@ async fn main() -> Result<()> {
.collect();
}

// Enforce bounded, safe parameters before starting.
node_config.validate()?;

start_node(node_config, generate_txs, tx_interval_ms).await?;
}
Commands::GenerateIdentity { output } => {
Expand Down Expand Up @@ -441,6 +457,12 @@ async fn main() -> Result<()> {
Commands::DbRestore { data_dir, from_dir } => {
commands::db_restore(&data_dir, &from_dir).await?;
}
Commands::DbStats { data_dir } => {
commands::db_stats(&data_dir).await?;
}
Commands::DbMaintenance { data_dir } => {
commands::db_maintenance(&data_dir).await?;
}
Commands::SnapshotPublish { data_dir, snapshot_dir, archive_url, archive_path, ttl_seconds } => {
commands::snapshot_publish(&data_dir, &snapshot_dir, &archive_url, &archive_path, ttl_seconds).await?;
}
Expand Down
39 changes: 39 additions & 0 deletions crates/catalyst-cli/src/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -703,6 +703,12 @@ async fn apply_lsu_to_storage_without_root_check(
}
}

// Prune persisted mempool against updated nonces.
prune_persisted_mempool(store).await;

// Opportunistic disk-bounding: prune old historical metadata (if enabled).
crate::pruning::maybe_prune_history(store).await;

// Flush without recomputing.
for cf_name in store.engine().cf_names() {
let _ = store.engine().flush_cf(&cf_name);
Expand Down Expand Up @@ -1775,6 +1781,9 @@ async fn apply_lsu_to_storage(
// Prune persisted mempool against updated nonces.
prune_persisted_mempool(store).await;

// Opportunistic disk-bounding: prune old historical metadata (if enabled).
crate::pruning::maybe_prune_history(store).await;

Ok(state_root)
}

Expand Down Expand Up @@ -2111,6 +2120,36 @@ impl CatalystNode {
.map_err(|e| anyhow::anyhow!("genesis/identity initialization failed: {e}"))?;
}

// Persist storage pruning knobs into DB metadata so apply paths can enforce bounded disk
// growth without needing to thread config through every callsite.
if let Some(store) = &storage {
let enabled: u8 = if self.config.storage.history_prune_enabled { 1 } else { 0 };
let _ = store
.set_metadata("storage:history_prune_enabled", &[enabled])
.await;
let _ = store
.set_metadata(
"storage:history_keep_cycles",
&self.config.storage.history_keep_cycles.to_le_bytes(),
)
.await;
let _ = store
.set_metadata(
"storage:history_prune_interval_seconds",
&self.config
.storage
.history_prune_interval_seconds
.to_le_bytes(),
)
.await;
let _ = store
.set_metadata(
"storage:history_prune_batch_cycles",
&self.config.storage.history_prune_batch_cycles.to_le_bytes(),
)
.await;
}

// Auto-register as a worker (on-chain) for validator nodes.
if self.config.validator {
if let Some(store) = &storage {
Expand Down
Loading