Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 25 additions & 9 deletions mgmtd/src/bee_msg/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -122,14 +122,18 @@ client version < 8.0)"

// if this is a meta node, auto-add a corresponding meta target after the node.
if msg.node_type == NodeType::Meta {
// Convert the NodeID to a TargetID. Due to the difference in bitsize, meta
// node IDs are not allowed to be bigger than u16
let Ok(target_id) = TargetId::try_from(node.num_id()) else {
bail!(
"{} is not a valid numeric meta node id\
(must be between 1 and 65535)",
node.num_id()
);
// Either create a new TargetID/NodeID or use the given NodeID. In the latter
// case, convert it to a TargetID. Due to the difference in bitsize, meta node
// IDs are not allowed to be bigger than u16.
let target_id = match node.num_id() {
0 => db::misc::find_new_id(
tx,
"targets",
"target_id",
NodeType::Meta,
1..=0xFFFF,
)?,
n => TargetId::try_from(n).map_err(|_| anyhow!("{n} is not a valid numeric meta node id (must be between 1 and 65535)"))?,
};

// Do not set a registration token if the provided string is empty. This makes
Expand All @@ -141,7 +145,19 @@ client version < 8.0)"
None
};

db::target::insert_meta(tx, target_id, tk)?;
db::target::insert(
tx,
target_id,
tk,
NodeTypeServer::Meta,
Some(target_id.into()),
)?;

// If this is the first meta target, set it as meta root
tx.execute(
sql!("INSERT OR IGNORE INTO root_inode (target_id) VALUES (?1)"),
[target_id],
)?;
}

(node, true)
Expand Down
11 changes: 10 additions & 1 deletion mgmtd/src/db/import_v7.rs
Comment thread
iamjoemccormick marked this conversation as resolved.
Comment thread
iamjoemccormick marked this conversation as resolved.
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,13 @@ fn meta_nodes(tx: &Transaction, f: &Path) -> Result<(NodeId, bool)> {
);
};

target::insert_meta(tx, target_id, None)?;
target::insert(
tx,
target_id,
None,
NodeTypeServer::Meta,
Some(target_id.into()),
)?;
}

if root_id == 0 {
Expand Down Expand Up @@ -531,3 +537,6 @@ fn quota_limits(

Ok(())
}

#[cfg(test)]
mod test;
233 changes: 233 additions & 0 deletions mgmtd/src/db/import_v7/test.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,233 @@
use crate::db::{MIGRATIONS, initial_entries};
use crate::types::SqliteEnumExt;
use shared::types::{BuddyGroupId, NodeId, NodeType, PoolId, QuotaIdType, QuotaType, TargetId};
use sqlite::{TransactionExt, migrate_schema, open_in_memory};
use sqlite_check::sql;
use std::fs::{create_dir_all, remove_dir_all};
use std::panic::catch_unwind;
use std::path::Path;
use std::process::Command;

const TAR_PATH: &str = concat!(
env!("CARGO_MANIFEST_DIR"),
"/src/db/import_v7/test_data.tar.gz"
);

/// Tests the v7 import function from a fixed v7 management data folder, created using v7.4
#[cfg(not(target_os = "windows"))]
#[test]
fn import_v7() {
// Setup
let pid = std::process::id();
let tmp_dir = std::env::temp_dir().join(format!(".beegfs_import_v7_test_{pid}"));

create_dir_all(&tmp_dir).unwrap();
let res = Command::new("tar")
.args([
"-xzf",
TAR_PATH,
"-C",
&tmp_dir.to_string_lossy(),
"--strip-components",
"1",
])
.output()
.unwrap();

assert!(
res.status.success(),
"untaring v7 management archive failed: {}",
String::from_utf8_lossy(&res.stderr)
);

// Run the test, making sure the cleanup below is run even if test fails
let res = catch_unwind(|| import_v7_inner(&tmp_dir));

remove_dir_all(&tmp_dir).unwrap();

res.unwrap();
}

fn import_v7_inner(base_path: &Path) {
let mut conn = open_in_memory().unwrap();
let tx = conn.transaction().unwrap();

migrate_schema(&tx, MIGRATIONS).unwrap();
initial_entries(&tx, None).unwrap();
super::import_v7(&tx, base_path).unwrap();

// Check nodes
let res: Vec<(NodeType, NodeId)> = tx
.query_map_collect(
sql!("SELECT node_type, node_id FROM nodes ORDER BY node_type ASC, node_id ASC"),
[],
|row| Ok((NodeType::from_row(row, 0)?, row.get(1)?)),
)
.unwrap();

assert_eq!(
res,
&[
(NodeType::Meta, 1),
(NodeType::Meta, 2),
(NodeType::Meta, 3),
(NodeType::Meta, 4),
(NodeType::Storage, 1),
(NodeType::Storage, 2),
(NodeType::Management, 1)
]
);

// Check targets
let res: Vec<(NodeType, TargetId, NodeId, Option<PoolId>)> = tx
.query_map_collect(
sql!(
"SELECT node_type, target_id, node_id, pool_id FROM targets
ORDER BY node_type ASC, target_id ASC"
),
[],
|row| {
Ok((
NodeType::from_row(row, 0)?,
row.get(1)?,
row.get(2)?,
row.get(3)?,
))
},
)
.unwrap();

assert_eq!(
res,
&[
(NodeType::Meta, 1, 1, None),
(NodeType::Meta, 2, 2, None),
(NodeType::Meta, 3, 3, None),
(NodeType::Meta, 4, 4, None),
(NodeType::Storage, 1, 1, Some(1)),
(NodeType::Storage, 2, 1, Some(2)),
(NodeType::Storage, 3, 1, Some(2)),
(NodeType::Storage, 4, 2, Some(1)),
(NodeType::Storage, 5, 2, Some(1)),
(NodeType::Storage, 6, 2, Some(2)),
]
);

// Check buddy groups
let res: Vec<(NodeType, BuddyGroupId, TargetId, TargetId, Option<PoolId>)> = tx
.query_map_collect(
sql!(
"SELECT node_type, group_id, p_target_id, s_target_id, pool_id
FROM buddy_groups ORDER BY node_type ASC, group_id ASC"
),
[],
|row| {
Ok((
NodeType::from_row(row, 0)?,
row.get(1)?,
row.get(2)?,
row.get(3)?,
row.get(4)?,
))
},
)
.unwrap();

assert_eq!(
res,
&[
(NodeType::Meta, 1, 1, 2, None),
(NodeType::Storage, 1, 1, 4, Some(1)),
(NodeType::Storage, 2, 3, 6, Some(2)),
]
);

// Check meta root
let res: (Option<TargetId>, Option<BuddyGroupId>) = tx
.query_row(
sql!("SELECT target_id, group_id FROM root_inode"),
[],
|row| Ok((row.get(0)?, row.get(1)?)),
)
.unwrap();

assert_eq!(res, (None, Some(1)));

// Check storage pools
let res: Vec<(PoolId, String)> = tx
.query_map_collect(
sql!("SELECT pool_id, alias FROM pools_ext ORDER BY pool_id ASC"),
[],
|row| Ok((row.get(0)?, row.get(1)?)),
)
.unwrap();

assert_eq!(res, &[(1, "Default".to_string()), (2, "pool2".to_string())]);

// Check quota default limits
let res: Vec<(QuotaType, QuotaIdType, PoolId, u64)> = tx
.query_map_collect(
sql!(
"SELECT quota_type, id_type, pool_id, value FROM quota_default_limits
ORDER BY quota_type ASC, id_type ASC, pool_id ASC, value ASC"
),
[],
|row| {
Ok((
QuotaType::from_row(row, 0)?,
QuotaIdType::from_row(row, 1)?,
row.get(2)?,
row.get(3)?,
))
},
)
.unwrap();

assert_eq!(
res,
&[
(QuotaType::Space, QuotaIdType::User, 1, 1000),
(QuotaType::Space, QuotaIdType::User, 2, 2000),
(QuotaType::Space, QuotaIdType::Group, 1, 0),
(QuotaType::Space, QuotaIdType::Group, 2, 0),
(QuotaType::Inode, QuotaIdType::User, 1, 100),
(QuotaType::Inode, QuotaIdType::User, 2, 200),
(QuotaType::Inode, QuotaIdType::Group, 1, 0),
(QuotaType::Inode, QuotaIdType::Group, 2, 0),
]
);

// Check quota limits
let res: Vec<(QuotaType, QuotaIdType, u64, PoolId, u64)> = tx
.query_map_collect(
sql!(
"SELECT quota_type, id_type, quota_id, pool_id, value FROM quota_limits
ORDER BY quota_type ASC, id_type ASC, quota_id ASC, pool_id ASC, value ASC"
),
[],
|row| {
Ok((
QuotaType::from_row(row, 0)?,
QuotaIdType::from_row(row, 1)?,
row.get(2)?,
row.get(3)?,
row.get(4)?,
))
},
)
.unwrap();

assert_eq!(
res,
&[
(QuotaType::Space, QuotaIdType::User, 0, 1, 1000),
(QuotaType::Space, QuotaIdType::User, 0, 2, 2000),
(QuotaType::Space, QuotaIdType::User, 5000, 1, 5000),
(QuotaType::Inode, QuotaIdType::User, 0, 1, 100),
(QuotaType::Inode, QuotaIdType::User, 0, 2, 200),
(QuotaType::Inode, QuotaIdType::User, 5000, 1, 500),
]
);

tx.commit().unwrap();
}
Binary file added mgmtd/src/db/import_v7/test_data.tar.gz
Binary file not shown.
52 changes: 1 addition & 51 deletions mgmtd/src/db/target.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,38 +33,6 @@ pub(crate) fn validate_ids(
}
}

/// Inserts a new meta target.
///
/// BeeGFS doesn't really support meta targets at the moment, so there always must be exactly one
/// meta target per meta node with their IDs being the same.
pub(crate) fn insert_meta(
tx: &Transaction,
target_id: TargetId,
reg_token: Option<&str>,
) -> Result<()> {
let target_id = if target_id == 0 {
misc::find_new_id(tx, "targets", "target_id", NodeType::Meta, 1..=0xFFFF)?
} else {
target_id
};

insert(
tx,
target_id,
reg_token,
NodeTypeServer::Meta,
Some(target_id.into()),
)?;

// If this is the first meta target, set it as meta root
tx.execute(
sql!("INSERT OR IGNORE INTO root_inode (target_id) VALUES (?1)"),
[target_id],
)?;

Ok(())
}

/// Inserts a new storage target which may not exist yet.
///
/// Providing 0 for `target_id` chooses the ID automatically.
Expand All @@ -87,7 +55,7 @@ pub(crate) fn insert_storage(
Ok(target_id)
}

fn insert(
pub fn insert(
tx: &Transaction,
target_id: TargetId,
reg_token: Option<&str>,
Expand Down Expand Up @@ -272,24 +240,6 @@ pub(crate) fn delete_storage(tx: &Transaction, target_id: TargetId) -> Result<()
mod test {
use super::*;

#[test]
fn set_get_meta() {
with_test_data(|tx| {
super::insert_meta(tx, 1, None).unwrap_err();
super::insert_meta(tx, 99, None).unwrap();
// existing id
super::insert_meta(tx, 99, None).unwrap_err();

let targets: i64 = tx
.query_row(sql!("SELECT COUNT(*) FROM meta_targets"), [], |row| {
row.get(0)
})
.unwrap();

assert_eq!(5, targets);
})
}

#[test]
fn set_get_storage_and_map() {
with_test_data(|tx| {
Expand Down
Loading