diff --git a/src/api2/config/tape_backup_job.rs b/src/api2/config/tape_backup_job.rs index 770488be..e32841c0 100644 --- a/src/api2/config/tape_backup_job.rs +++ b/src/api2/config/tape_backup_job.rs @@ -1,15 +1,14 @@ -use anyhow::Error; -use serde_json::Value; use ::serde::{Deserialize, Serialize}; +use anyhow::Error; use hex::FromHex; +use serde_json::Value; -use proxmox_router::{http_bail, Router, RpcEnvironment, Permission}; +use proxmox_router::{http_bail, Permission, Router, RpcEnvironment}; use proxmox_schema::{api, param_bail}; use pbs_api_types::{ - Authid, TapeBackupJobConfig, TapeBackupJobConfigUpdater, - JOB_ID_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, - PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, + Authid, TapeBackupJobConfig, TapeBackupJobConfigUpdater, JOB_ID_SCHEMA, PRIV_TAPE_AUDIT, + PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, }; use pbs_config::CachedUserInfo; @@ -107,7 +106,6 @@ pub fn read_tape_backup_job( id: String, mut rpcenv: &mut dyn RpcEnvironment, ) -> Result { - let (config, digest) = pbs_config::tape_job::config()?; let job = config.lookup("backup", &id)?; @@ -119,7 +117,7 @@ pub fn read_tape_backup_job( #[api()] #[derive(Serialize, Deserialize)] -#[serde(rename_all="kebab-case")] +#[serde(rename_all = "kebab-case")] /// Deletable property name pub enum DeletableProperty { /// Delete the comment property. @@ -188,29 +186,61 @@ pub fn update_tape_backup_job( if let Some(delete) = delete { for delete_prop in delete { match delete_prop { - DeletableProperty::EjectMedia => { data.setup.eject_media = None; }, - DeletableProperty::ExportMediaSet => { data.setup.export_media_set = None; }, - DeletableProperty::LatestOnly => { data.setup.latest_only = None; }, - DeletableProperty::NotifyUser => { data.setup.notify_user = None; }, - DeletableProperty::Schedule => { data.schedule = None; }, - DeletableProperty::Comment => { data.comment = None; }, - DeletableProperty::GroupFilter => { data.setup.group_filter = None; }, + DeletableProperty::EjectMedia => { + data.setup.eject_media = None; + } + DeletableProperty::ExportMediaSet => { + data.setup.export_media_set = None; + } + DeletableProperty::LatestOnly => { + data.setup.latest_only = None; + } + DeletableProperty::NotifyUser => { + data.setup.notify_user = None; + } + DeletableProperty::Schedule => { + data.schedule = None; + } + DeletableProperty::Comment => { + data.comment = None; + } + DeletableProperty::GroupFilter => { + data.setup.group_filter = None; + } } } } - if let Some(store) = update.setup.store { data.setup.store = store; } - if let Some(pool) = update.setup.pool { data.setup.pool = pool; } - if let Some(drive) = update.setup.drive { data.setup.drive = drive; } + if let Some(store) = update.setup.store { + data.setup.store = store; + } + if let Some(pool) = update.setup.pool { + data.setup.pool = pool; + } + if let Some(drive) = update.setup.drive { + data.setup.drive = drive; + } - if update.setup.eject_media.is_some() { data.setup.eject_media = update.setup.eject_media; }; - if update.setup.export_media_set.is_some() { data.setup.export_media_set = update.setup.export_media_set; } - if update.setup.latest_only.is_some() { data.setup.latest_only = update.setup.latest_only; } - if update.setup.notify_user.is_some() { data.setup.notify_user = update.setup.notify_user; } - if update.setup.group_filter.is_some() { data.setup.group_filter = update.setup.group_filter; } + if update.setup.eject_media.is_some() { + data.setup.eject_media = update.setup.eject_media; + }; + if update.setup.export_media_set.is_some() { + data.setup.export_media_set = update.setup.export_media_set; + } + if update.setup.latest_only.is_some() { + data.setup.latest_only = update.setup.latest_only; + } + if update.setup.notify_user.is_some() { + data.setup.notify_user = update.setup.notify_user; + } + if update.setup.group_filter.is_some() { + data.setup.group_filter = update.setup.group_filter; + } let schedule_changed = data.schedule != update.schedule; - if update.schedule.is_some() { data.schedule = update.schedule; } + if update.schedule.is_some() { + data.schedule = update.schedule; + } if let Some(comment) = update.comment { let comment = comment.trim(); @@ -267,8 +297,10 @@ pub fn delete_tape_backup_job( match config.lookup::("backup", &id) { Ok(_job) => { config.sections.remove(&id); - }, - Err(_) => { http_bail!(NOT_FOUND, "job '{}' does not exist.", id) }, + } + Err(_) => { + http_bail!(NOT_FOUND, "job '{}' does not exist.", id) + } }; pbs_config::tape_job::save_config(&config)?; diff --git a/src/api2/config/tape_encryption_keys.rs b/src/api2/config/tape_encryption_keys.rs index 3e9a60d1..3146d0d7 100644 --- a/src/api2/config/tape_encryption_keys.rs +++ b/src/api2/config/tape_encryption_keys.rs @@ -1,15 +1,13 @@ -use anyhow::{format_err, bail, Error}; -use serde_json::Value; +use anyhow::{bail, format_err, Error}; use hex::FromHex; +use serde_json::Value; -use proxmox_router::{http_bail, ApiMethod, Router, RpcEnvironment, Permission}; +use proxmox_router::{http_bail, ApiMethod, Permission, Router, RpcEnvironment}; use proxmox_schema::{api, param_bail}; use pbs_api_types::{ - Authid, Fingerprint, KeyInfo, Kdf, - TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, - PROXMOX_CONFIG_DIGEST_SCHEMA, PASSWORD_HINT_SCHEMA, - PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, + Authid, Fingerprint, Kdf, KeyInfo, PASSWORD_HINT_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, + PROXMOX_CONFIG_DIGEST_SCHEMA, TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA, }; use pbs_config::CachedUserInfo; @@ -17,12 +15,7 @@ use pbs_config::CachedUserInfo; use pbs_config::key_config::KeyConfig; use pbs_config::open_backup_lockfile; use pbs_config::tape_encryption_keys::{ - TAPE_KEYS_LOCKFILE, - load_keys, - load_key_configs, - save_keys, - save_key_configs, - insert_key, + insert_key, load_key_configs, load_keys, save_key_configs, save_keys, TAPE_KEYS_LOCKFILE, }; #[api( @@ -44,7 +37,6 @@ pub fn list_keys( _info: &ApiMethod, mut rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { - let (key_map, digest) = load_key_configs()?; let mut list = Vec::new(); @@ -106,13 +98,15 @@ pub fn change_passphrase( force: bool, fingerprint: Fingerprint, digest: Option, - rpcenv: &mut dyn RpcEnvironment + rpcenv: &mut dyn RpcEnvironment, ) -> Result<(), Error> { - let kdf = kdf.unwrap_or_default(); if let Kdf::None = kdf { - param_bail!("kdf", format_err!("Please specify a key derivation function (none is not allowed here).")); + param_bail!( + "kdf", + format_err!("Please specify a key derivation function (none is not allowed here).") + ); } let _lock = open_backup_lockfile(TAPE_KEYS_LOCKFILE, None, true)?; @@ -126,7 +120,11 @@ pub fn change_passphrase( let key_config = match config_map.get(&fingerprint) { Some(key_config) => key_config, - None => http_bail!(NOT_FOUND, "tape encryption key configuration '{}' does not exist.", fingerprint), + None => http_bail!( + NOT_FOUND, + "tape encryption key configuration '{}' does not exist.", + fingerprint + ), }; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -137,15 +135,25 @@ pub fn change_passphrase( } let (key, created, fingerprint) = match (force, &password) { - (true, Some(_)) => param_bail!("password", format_err!("password is not allowed when using force")), + (true, Some(_)) => param_bail!( + "password", + format_err!("password is not allowed when using force") + ), (false, None) => param_bail!("password", format_err!("missing parameter: password")), (false, Some(pass)) => key_config.decrypt(&|| Ok(pass.as_bytes().to_vec()))?, (true, None) => { - let key = load_keys()?.0.get(&fingerprint).ok_or_else(|| { - format_err!("failed to reset passphrase, could not find key '{}'", fingerprint) - })?.key; + let key = load_keys()? + .0 + .get(&fingerprint) + .ok_or_else(|| { + format_err!( + "failed to reset passphrase, could not find key '{}'", + fingerprint + ) + })? + .key; - (key, key_config.created, fingerprint) + (key, key_config.created, fingerprint) } }; @@ -189,13 +197,15 @@ pub fn create_key( kdf: Option, password: String, hint: String, - _rpcenv: &mut dyn RpcEnvironment + _rpcenv: &mut dyn RpcEnvironment, ) -> Result { - let kdf = kdf.unwrap_or_default(); if let Kdf::None = kdf { - param_bail!("kdf", format_err!("Please specify a key derivation function (none is not allowed here).")); + param_bail!( + "kdf", + format_err!("Please specify a key derivation function (none is not allowed here).") + ); } let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?; @@ -208,7 +218,6 @@ pub fn create_key( Ok(fingerprint) } - #[api( input: { properties: { @@ -229,12 +238,15 @@ pub fn read_key( fingerprint: Fingerprint, _rpcenv: &mut dyn RpcEnvironment, ) -> Result { - let (config_map, _digest) = load_key_configs()?; let key_config = match config_map.get(&fingerprint) { Some(key_config) => key_config, - None => http_bail!(NOT_FOUND, "tape encryption key '{}' does not exist.", fingerprint), + None => http_bail!( + NOT_FOUND, + "tape encryption key '{}' does not exist.", + fingerprint + ), }; if key_config.kdf.is_none() { @@ -280,8 +292,14 @@ pub fn delete_key( } match config_map.get(&fingerprint) { - Some(_) => { config_map.remove(&fingerprint); }, - None => http_bail!(NOT_FOUND, "tape encryption key '{}' does not exist.", fingerprint), + Some(_) => { + config_map.remove(&fingerprint); + } + None => http_bail!( + NOT_FOUND, + "tape encryption key '{}' does not exist.", + fingerprint + ), } save_key_configs(config_map)?; diff --git a/src/tape/changer/mod.rs b/src/tape/changer/mod.rs index a6bc7aa8..0cb42486 100644 --- a/src/tape/changer/mod.rs +++ b/src/tape/changer/mod.rs @@ -9,15 +9,14 @@ use std::path::PathBuf; use anyhow::{bail, Error}; -use proxmox_sys::fs::{CreateOptions, replace_file, file_read_optional_string}; +use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions}; -use pbs_api_types::{ScsiTapeChanger, LtoTapeDrive}; +use pbs_api_types::{LtoTapeDrive, ScsiTapeChanger}; -use pbs_tape::{sg_pt_changer, MtxStatus, ElementStatus}; +use pbs_tape::{sg_pt_changer, ElementStatus, MtxStatus}; /// Interface to SCSI changer devices pub trait ScsiMediaChange { - fn status(&mut self, use_cache: bool) -> Result; fn load_slot(&mut self, from_slot: u64, drivenum: u64) -> Result; @@ -29,7 +28,6 @@ pub trait ScsiMediaChange { /// Interface to the media changer device for a single drive pub trait MediaChange { - /// Drive number inside changer fn drive_number(&self) -> u64; @@ -55,9 +53,11 @@ pub trait MediaChange { /// slots. Also, you cannot load cleaning units with this /// interface. fn load_media(&mut self, label_text: &str) -> Result { - if label_text.starts_with("CLN") { - bail!("unable to load media '{}' (seems to be a cleaning unit)", label_text); + bail!( + "unable to load media '{}' (seems to be a cleaning unit)", + label_text + ); } let mut status = self.status()?; @@ -69,17 +69,21 @@ pub trait MediaChange { if let ElementStatus::VolumeTag(ref tag) = drive_status.status { if *tag == label_text { if i as u64 != self.drive_number() { - bail!("unable to load media '{}' - media in wrong drive ({} != {})", - label_text, i, self.drive_number()); + bail!( + "unable to load media '{}' - media in wrong drive ({} != {})", + label_text, + i, + self.drive_number() + ); } - return Ok(status) // already loaded + return Ok(status); // already loaded } } if i as u64 == self.drive_number() { match drive_status.status { - ElementStatus::Empty => { /* OK */ }, + ElementStatus::Empty => { /* OK */ } _ => unload_drive = true, - } + } } } @@ -92,9 +96,12 @@ pub trait MediaChange { if let ElementStatus::VolumeTag(ref tag) = slot_info.status { if tag == label_text { if slot_info.import_export { - bail!("unable to load media '{}' - inside import/export slot", label_text); + bail!( + "unable to load media '{}' - inside import/export slot", + label_text + ); } - slot = Some(i+1); + slot = Some(i + 1); break; } } @@ -127,9 +134,13 @@ pub trait MediaChange { } for slot_info in status.slots.iter() { - if slot_info.import_export { continue; } + if slot_info.import_export { + continue; + } if let ElementStatus::VolumeTag(ref tag) = slot_info.status { - if tag.starts_with("CLN") { continue; } + if tag.starts_with("CLN") { + continue; + } list.push(tag.clone()); } } @@ -147,15 +158,19 @@ pub trait MediaChange { // Unload drive first. Note: This also unloads a loaded cleaning tape if let Some(drive_status) = status.drives.get(self.drive_number() as usize) { match drive_status.status { - ElementStatus::Empty => { /* OK */ }, - _ => { status = self.unload_to_free_slot(status)?; } + ElementStatus::Empty => { /* OK */ } + _ => { + status = self.unload_to_free_slot(status)?; + } } } let mut cleaning_cartridge_slot = None; for (i, slot_info) in status.slots.iter().enumerate() { - if slot_info.import_export { continue; } + if slot_info.import_export { + continue; + } if let ElementStatus::VolumeTag(ref tag) = slot_info.status { if tag.starts_with("CLN") { cleaning_cartridge_slot = Some(i + 1); @@ -169,7 +184,6 @@ pub trait MediaChange { Some(cleaning_cartridge_slot) => cleaning_cartridge_slot as u64, }; - self.load_media_from_slot(cleaning_cartridge_slot)?; self.unload_media(Some(cleaning_cartridge_slot)) @@ -197,7 +211,9 @@ pub trait MediaChange { for (i, slot_info) in status.slots.iter().enumerate() { if slot_info.import_export { - if to.is_some() { continue; } + if to.is_some() { + continue; + } if let ElementStatus::Empty = slot_info.status { to = Some(i as u64 + 1); } @@ -214,7 +230,7 @@ pub trait MediaChange { self.unload_media(Some(to))?; Ok(Some(to)) } - None => bail!("unable to find free export slot"), + None => bail!("unable to find free export slot"), } } else { match (from, to) { @@ -234,7 +250,6 @@ pub trait MediaChange { /// /// Note: This method consumes status - so please use returned status afterward. fn unload_to_free_slot(&mut self, status: MtxStatus) -> Result { - let drive_status = &status.drives[self.drive_number() as usize]; if let Some(slot) = drive_status.loaded_slot { // check if original slot is empty/usable @@ -248,7 +263,10 @@ pub trait MediaChange { if let Some(slot) = status.find_free_slot(false) { self.unload_media(Some(slot)) } else { - bail!("drive '{}' unload failure - no free slot", self.drive_name()); + bail!( + "drive '{}' unload failure - no free slot", + self.drive_name() + ); } } } @@ -256,8 +274,7 @@ pub trait MediaChange { const USE_MTX: bool = false; impl ScsiMediaChange for ScsiTapeChanger { - - fn status(&mut self, use_cache: bool) -> Result { + fn status(&mut self, use_cache: bool) -> Result { if use_cache { if let Some(state) = load_changer_state_cache(&self.name)? { return Ok(state); @@ -328,11 +345,7 @@ impl ScsiMediaChange for ScsiTapeChanger { } } -fn save_changer_state_cache( - changer: &str, - state: &MtxStatus, -) -> Result<(), Error> { - +fn save_changer_state_cache(changer: &str, state: &MtxStatus) -> Result<(), Error> { let mut path = PathBuf::from(crate::tape::CHANGER_STATE_DIR); path.push(changer); @@ -377,7 +390,6 @@ pub struct MtxMediaChanger { } impl MtxMediaChanger { - pub fn with_drive_config(drive_config: &LtoTapeDrive) -> Result { let (config, _digest) = pbs_config::drive::config()?; let changer_config: ScsiTapeChanger = match drive_config.changer { @@ -394,7 +406,6 @@ impl MtxMediaChanger { } impl MediaChange for MtxMediaChanger { - fn drive_number(&self) -> u64 { self.drive_number } diff --git a/src/tape/changer/mtx/mtx_wrapper.rs b/src/tape/changer/mtx/mtx_wrapper.rs index 5dbe4fa3..f4e370ed 100644 --- a/src/tape/changer/mtx/mtx_wrapper.rs +++ b/src/tape/changer/mtx/mtx_wrapper.rs @@ -1,14 +1,10 @@ use anyhow::Error; -use proxmox_sys::command::run_command; use pbs_api_types::ScsiTapeChanger; use pbs_tape::MtxStatus; +use proxmox_sys::command::run_command; -use crate::{ - tape::changer::{ - mtx::parse_mtx_status, - }, -}; +use crate::tape::changer::mtx::parse_mtx_status; /// Run 'mtx status' and return parsed result. pub fn mtx_status(config: &ScsiTapeChanger) -> Result { @@ -27,12 +23,7 @@ pub fn mtx_status(config: &ScsiTapeChanger) -> Result { } /// Run 'mtx load' -pub fn mtx_load( - path: &str, - slot: u64, - drivenum: u64, -) -> Result<(), Error> { - +pub fn mtx_load(path: &str, slot: u64, drivenum: u64) -> Result<(), Error> { let mut command = std::process::Command::new("mtx"); command.args(&["-f", path, "load", &slot.to_string(), &drivenum.to_string()]); run_command(command, None)?; @@ -41,28 +32,30 @@ pub fn mtx_load( } /// Run 'mtx unload' -pub fn mtx_unload( - path: &str, - slot: u64, - drivenum: u64, -) -> Result<(), Error> { - +pub fn mtx_unload(path: &str, slot: u64, drivenum: u64) -> Result<(), Error> { let mut command = std::process::Command::new("mtx"); - command.args(&["-f", path, "unload", &slot.to_string(), &drivenum.to_string()]); + command.args(&[ + "-f", + path, + "unload", + &slot.to_string(), + &drivenum.to_string(), + ]); run_command(command, None)?; Ok(()) } /// Run 'mtx transfer' -pub fn mtx_transfer( - path: &str, - from_slot: u64, - to_slot: u64, -) -> Result<(), Error> { - +pub fn mtx_transfer(path: &str, from_slot: u64, to_slot: u64) -> Result<(), Error> { let mut command = std::process::Command::new("mtx"); - command.args(&["-f", path, "transfer", &from_slot.to_string(), &to_slot.to_string()]); + command.args(&[ + "-f", + path, + "transfer", + &from_slot.to_string(), + &to_slot.to_string(), + ]); run_command(command, None)?; diff --git a/src/tape/changer/mtx/parse_mtx_status.rs b/src/tape/changer/mtx/parse_mtx_status.rs index 389c744d..40bfd714 100644 --- a/src/tape/changer/mtx/parse_mtx_status.rs +++ b/src/tape/changer/mtx/parse_mtx_status.rs @@ -1,17 +1,15 @@ use anyhow::Error; -use nom::bytes::complete::{take_while, tag}; +use nom::bytes::complete::{tag, take_while}; -use pbs_tape::{ElementStatus, MtxStatus, DriveStatus, StorageElementStatus}; +use pbs_tape::{DriveStatus, ElementStatus, MtxStatus, StorageElementStatus}; use pbs_tools::nom::{ - parse_complete, multispace0, multispace1, parse_u64, - parse_failure, parse_error, IResult, + multispace0, multispace1, parse_complete, parse_error, parse_failure, parse_u64, IResult, }; - // Recognizes one line -fn next_line(i: &str) -> IResult<&str, &str> { +fn next_line(i: &str) -> IResult<&str, &str> { let (i, line) = take_while(|c| (c != '\n'))(i)?; if i.is_empty() { Ok((i, line)) @@ -21,7 +19,6 @@ fn next_line(i: &str) -> IResult<&str, &str> { } fn parse_storage_changer(i: &str) -> IResult<&str, ()> { - let (i, _) = multispace0(i)?; let (i, _) = tag("Storage Changer")(i)?; let (i, _) = next_line(i)?; // skip @@ -30,7 +27,6 @@ fn parse_storage_changer(i: &str) -> IResult<&str, ()> { } fn parse_drive_status(i: &str, id: u64) -> IResult<&str, DriveStatus> { - let mut loaded_slot = None; if let Some(empty) = i.strip_prefix("Empty") { @@ -87,14 +83,13 @@ fn parse_drive_status(i: &str, id: u64) -> IResult<&str, DriveStatus> { fn parse_slot_status(i: &str) -> IResult<&str, ElementStatus> { if let Some(empty) = i.strip_prefix("Empty") { - return Ok((empty, ElementStatus::Empty)); + return Ok((empty, ElementStatus::Empty)); } if let Some(n) = i.strip_prefix("Full ") { if let Some(n) = n.strip_prefix(":VolumeTag=") { let (n, tag) = take_while(|c| !(c == ' ' || c == ':' || c == '\n'))(n)?; let (n, _) = take_while(|c| c != '\n')(n)?; // skip to eol return Ok((n, ElementStatus::VolumeTag(tag.to_string()))); - } let (n, _) = take_while(|c| c != '\n')(n)?; // skip @@ -105,7 +100,6 @@ fn parse_slot_status(i: &str) -> IResult<&str, ElementStatus> { } fn parse_data_transfer_element(i: &str) -> IResult<&str, (u64, DriveStatus)> { - let (i, _) = tag("Data Transfer Element")(i)?; let (i, _) = multispace1(i)?; let (i, id) = parse_u64(i)?; @@ -117,13 +111,12 @@ fn parse_data_transfer_element(i: &str) -> IResult<&str, (u64, DriveStatus)> { } fn parse_storage_element(i: &str) -> IResult<&str, (u64, bool, ElementStatus)> { - let (i, _) = multispace1(i)?; let (i, _) = tag("Storage Element")(i)?; let (i, _) = multispace1(i)?; let (i, id) = parse_u64(i)?; let (i, opt_ie) = nom::combinator::opt(tag(" IMPORT/EXPORT"))(i)?; - let import_export = opt_ie.is_some(); + let import_export = opt_ie.is_some(); let (i, _) = nom::character::complete::char(':')(i)?; let (i, element_status) = parse_slot_status(i)?; let (i, _) = nom::character::complete::newline(i)?; @@ -131,8 +124,7 @@ fn parse_storage_element(i: &str) -> IResult<&str, (u64, bool, ElementStatus)> { Ok((i, (id, import_export, element_status))) } -fn parse_status(i: &str) -> IResult<&str, MtxStatus> { - +fn parse_status(i: &str) -> IResult<&str, MtxStatus> { let (mut i, _) = parse_storage_changer(i)?; let mut drives = Vec::new(); @@ -158,14 +150,17 @@ fn parse_status(i: &str) -> IResult<&str, MtxStatus> { slots.push(status); } - let status = MtxStatus { drives, slots, transports: Vec::new() }; + let status = MtxStatus { + drives, + slots, + transports: Vec::new(), + }; Ok((i, status)) } /// Parses the output from 'mtx status' pub fn parse_mtx_status(i: &str) -> Result { - let status = parse_complete("mtx status", i, parse_status)?; Ok(status) @@ -173,7 +168,6 @@ pub fn parse_mtx_status(i: &str) -> Result { #[test] fn test_changer_status() -> Result<(), Error> { - let output = r###" Storage Changer /dev/tape/by-id/scsi-387408F60F0000:2 Drives, 24 Slots ( 4 Import/Export ) Data Transfer Element 0:Empty Data Transfer Element 1:Empty diff --git a/src/tape/changer/online_status_map.rs b/src/tape/changer/online_status_map.rs index 3284e0e1..a8c2e5e6 100644 --- a/src/tape/changer/online_status_map.rs +++ b/src/tape/changer/online_status_map.rs @@ -1,16 +1,16 @@ -use std::path::Path; use std::collections::{HashMap, HashSet}; +use std::path::Path; use anyhow::{bail, Error}; use proxmox_section_config::SectionConfigData; use proxmox_uuid::Uuid; -use pbs_api_types::{VirtualTapeDrive, ScsiTapeChanger}; +use pbs_api_types::{ScsiTapeChanger, VirtualTapeDrive}; use pbs_tape::{ElementStatus, MtxStatus}; -use crate::tape::Inventory; use crate::tape::changer::{MediaChange, ScsiMediaChange}; +use crate::tape::Inventory; /// Helper to update media online status /// @@ -23,13 +23,11 @@ pub struct OnlineStatusMap { } impl OnlineStatusMap { - /// Creates a new instance with one map entry for each configured /// changer (or 'VirtualTapeDrive', which has an internal /// changer). The map entry is set to 'None' to indicate that we /// do not have information about the online status. pub fn new(config: &SectionConfigData) -> Result { - let mut map = HashMap::new(); let changers: Vec = config.convert_to_typed_array("changer")?; @@ -42,7 +40,10 @@ impl OnlineStatusMap { map.insert(vtape.name.clone(), None); } - Ok(Self { map, changer_map: HashMap::new() }) + Ok(Self { + map, + changer_map: HashMap::new(), + }) } /// Returns the assiciated changer name for a media. @@ -61,11 +62,14 @@ impl OnlineStatusMap { } /// Update the online set for the specified changer - pub fn update_online_status(&mut self, changer_name: &str, online_set: HashSet) -> Result<(), Error> { - + pub fn update_online_status( + &mut self, + changer_name: &str, + online_set: HashSet, + ) -> Result<(), Error> { match self.map.get(changer_name) { None => bail!("no such changer '{}' device", changer_name), - Some(None) => { /* Ok */ }, + Some(None) => { /* Ok */ } Some(Some(_)) => { // do not allow updates to keep self.changer_map consistent bail!("update_online_status '{}' called twice", changer_name); @@ -73,7 +77,8 @@ impl OnlineStatusMap { } for uuid in online_set.iter() { - self.changer_map.insert(uuid.clone(), changer_name.to_string()); + self.changer_map + .insert(uuid.clone(), changer_name.to_string()); } self.map.insert(changer_name.to_string(), Some(online_set)); @@ -87,7 +92,6 @@ impl OnlineStatusMap { /// Returns a HashSet containing all found media Uuid. This only /// returns media found in Inventory. pub fn mtx_status_to_online_set(status: &MtxStatus, inventory: &Inventory) -> HashSet { - let mut online_set = HashSet::new(); for drive_status in status.drives.iter() { @@ -99,7 +103,9 @@ pub fn mtx_status_to_online_set(status: &MtxStatus, inventory: &Inventory) -> Ha } for slot_info in status.slots.iter() { - if slot_info.import_export { continue; } + if slot_info.import_export { + continue; + } if let ElementStatus::VolumeTag(ref label_text) = slot_info.status { if let Some(media_id) = inventory.find_media_by_label_text(label_text) { online_set.insert(media_id.label.uuid.clone()); @@ -113,8 +119,10 @@ pub fn mtx_status_to_online_set(status: &MtxStatus, inventory: &Inventory) -> Ha /// Update online media status /// /// For a single 'changer', or else simply ask all changer devices. -pub fn update_online_status(state_path: &Path, changer: Option<&str>) -> Result { - +pub fn update_online_status( + state_path: &Path, + changer: Option<&str>, +) -> Result { let (config, _digest) = pbs_config::drive::config()?; let mut inventory = Inventory::load(state_path)?; @@ -135,7 +143,10 @@ pub fn update_online_status(state_path: &Path, changer: Option<&str>) -> Result< let status = match changer_config.status(false) { Ok(status) => status, Err(err) => { - eprintln!("unable to get changer '{}' status - {}", changer_config.name, err); + eprintln!( + "unable to get changer '{}' status - {}", + changer_config.name, err + ); continue; } }; @@ -172,7 +183,10 @@ pub fn update_online_status(state_path: &Path, changer: Option<&str>) -> Result< if let Some(changer) = changer { if !found_changer { - bail!("update_online_status failed - no such changer '{}'", changer); + bail!( + "update_online_status failed - no such changer '{}'", + changer + ); } } @@ -188,7 +202,6 @@ pub fn update_changer_online_status( changer_name: &str, label_text_list: &[String], ) -> Result<(), Error> { - let mut online_map = OnlineStatusMap::new(drive_config)?; let mut online_set = HashSet::new(); for label_text in label_text_list.iter() { diff --git a/src/tape/drive/lto/mod.rs b/src/tape/drive/lto/mod.rs index 5f4bc12e..95b7ee1f 100644 --- a/src/tape/drive/lto/mod.rs +++ b/src/tape/drive/lto/mod.rs @@ -11,30 +11,28 @@ //! //! - unability to detect EOT (you just get EIO) +use std::convert::TryInto; use std::fs::File; use std::os::unix::io::{AsRawFd, FromRawFd, RawFd}; -use std::convert::TryInto; use anyhow::{bail, format_err, Error}; use proxmox_uuid::Uuid; use pbs_api_types::{ - Fingerprint, MamAttribute, LtoDriveAndMediaStatus, LtoTapeDrive, Lp17VolumeStatistics, + Fingerprint, Lp17VolumeStatistics, LtoDriveAndMediaStatus, LtoTapeDrive, MamAttribute, }; use pbs_config::key_config::KeyConfig; -use proxmox_sys::command::run_command; use pbs_tape::{ - TapeWrite, TapeRead, BlockReadError, MediaContentHeader, - sg_tape::{SgTape, TapeAlertFlags}, linux_list_drives::open_lto_tape_device, + sg_tape::{SgTape, TapeAlertFlags}, + BlockReadError, MediaContentHeader, TapeRead, TapeWrite, }; +use proxmox_sys::command::run_command; -use crate::{ - tape::{ - drive::TapeDriver, - file_formats::{PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, MediaSetLabel}, - }, +use crate::tape::{ + drive::TapeDriver, + file_formats::{MediaSetLabel, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0}, }; /// Open a tape device @@ -46,7 +44,6 @@ use crate::{ /// - check block size /// - for autoloader only, try to reload ejected tapes pub fn open_lto_tape_drive(config: &LtoTapeDrive) -> Result { - proxmox_lang::try_block!({ let file = open_lto_tape_device(&config.path)?; @@ -64,7 +61,15 @@ pub fn open_lto_tape_drive(config: &LtoTapeDrive) -> Result Result { let sg_tape = SgTape::new(file)?; @@ -93,7 +97,8 @@ impl LtoTapeHandle { block_length: Option, buffer_mode: Option, ) -> Result<(), Error> { - self.sg_tape.set_drive_options(compression, block_length, buffer_mode) + self.sg_tape + .set_drive_options(compression, block_length, buffer_mode) } /// Write a single EOF mark without flushing buffers @@ -102,7 +107,7 @@ impl LtoTapeHandle { } /// Get Tape and Media status - pub fn get_drive_and_media_status(&mut self) -> Result { + pub fn get_drive_and_media_status(&mut self) -> Result { self.sg_tape.get_drive_and_media_status() } @@ -123,7 +128,7 @@ impl LtoTapeHandle { } /// Position the tape after filemark count. Count 0 means BOT. - pub fn locate_file(&mut self, position: u64) -> Result<(), Error> { + pub fn locate_file(&mut self, position: u64) -> Result<(), Error> { self.sg_tape.locate_file(position) } @@ -131,14 +136,14 @@ impl LtoTapeHandle { self.sg_tape.erase_media(fast) } - pub fn load(&mut self) -> Result<(), Error> { + pub fn load(&mut self) -> Result<(), Error> { self.sg_tape.load() } /// Read Cartridge Memory (MAM Attributes) pub fn cartridge_memory(&mut self) -> Result, Error> { self.sg_tape.cartridge_memory() - } + } /// Read Volume Statistics pub fn volume_statistics(&mut self) -> Result { @@ -146,21 +151,21 @@ impl LtoTapeHandle { } /// Lock the drive door - pub fn lock(&mut self) -> Result<(), Error> { - self.sg_tape.set_medium_removal(false) + pub fn lock(&mut self) -> Result<(), Error> { + self.sg_tape + .set_medium_removal(false) .map_err(|err| format_err!("lock door failed - {}", err)) } /// Unlock the drive door - pub fn unlock(&mut self) -> Result<(), Error> { - self.sg_tape.set_medium_removal(true) + pub fn unlock(&mut self) -> Result<(), Error> { + self.sg_tape + .set_medium_removal(true) .map_err(|err| format_err!("unlock door failed - {}", err)) } } - impl TapeDriver for LtoTapeHandle { - fn sync(&mut self) -> Result<(), Error> { self.sg_tape.sync()?; Ok(()) @@ -172,7 +177,6 @@ impl TapeDriver for LtoTapeHandle { } fn move_to_last_file(&mut self) -> Result<(), Error> { - self.move_to_eom(false)?; self.sg_tape.check_filemark()?; @@ -226,7 +230,6 @@ impl TapeDriver for LtoTapeHandle { media_set_label: &MediaSetLabel, key_config: Option<&KeyConfig>, ) -> Result<(), Error> { - let file_number = self.current_file_number()?; if file_number != 1 { self.rewind()?; @@ -235,12 +238,16 @@ impl TapeDriver for LtoTapeHandle { let file_number = self.current_file_number()?; if file_number != 1 { - bail!("write_media_set_label failed - got wrong file number ({} != 1)", file_number); + bail!( + "write_media_set_label failed - got wrong file number ({} != 1)", + file_number + ); } self.set_encryption(None)?; - { // limit handle scope + { + // limit handle scope let mut handle = self.write_file()?; let mut value = serde_json::to_value(media_set_label)?; @@ -257,7 +264,8 @@ impl TapeDriver for LtoTapeHandle { let raw = serde_json::to_string_pretty(&value)?; - let header = MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, raw.len() as u32); + let header = + MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, raw.len() as u32); handle.write_header(&header, raw.as_bytes())?; handle.finish(false)?; } @@ -285,15 +293,11 @@ impl TapeDriver for LtoTapeHandle { &mut self, key_fingerprint: Option<(Fingerprint, Uuid)>, ) -> Result<(), Error> { - if nix::unistd::Uid::effective().is_root() { - if let Some((ref key_fingerprint, ref uuid)) = key_fingerprint { - let (key_map, _digest) = pbs_config::tape_encryption_keys::load_keys()?; match key_map.get(key_fingerprint) { Some(item) => { - // derive specialized key for each media-set let mut tape_key = [0u8; 32]; @@ -305,7 +309,8 @@ impl TapeDriver for LtoTapeHandle { &uuid_bytes, 10, openssl::hash::MessageDigest::sha256(), - &mut tape_key)?; + &mut tape_key, + )?; return self.sg_tape.set_encryption(Some(tape_key)); } @@ -318,10 +323,11 @@ impl TapeDriver for LtoTapeHandle { let output = if let Some((fingerprint, uuid)) = key_fingerprint { let fingerprint = fingerprint.signature(); - run_sg_tape_cmd("encryption", &[ - "--fingerprint", &fingerprint, - "--uuid", &uuid.to_string(), - ], self.sg_tape.file_mut().as_raw_fd())? + run_sg_tape_cmd( + "encryption", + &["--fingerprint", &fingerprint, "--uuid", &uuid.to_string()], + self.sg_tape.file_mut().as_raw_fd(), + )? } else { run_sg_tape_cmd("encryption", &[], self.sg_tape.file_mut().as_raw_fd())? }; @@ -331,12 +337,12 @@ impl TapeDriver for LtoTapeHandle { } fn run_sg_tape_cmd(subcmd: &str, args: &[&str], fd: RawFd) -> Result { - let mut command = std::process::Command::new( - "/usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd"); + let mut command = + std::process::Command::new("/usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd"); command.args(&[subcmd]); command.args(&["--stdin"]); command.args(args); let device_fd = nix::unistd::dup(fd)?; - command.stdin(unsafe { std::process::Stdio::from_raw_fd(device_fd)}); + command.stdin(unsafe { std::process::Stdio::from_raw_fd(device_fd) }); run_command(command, None) } diff --git a/src/tape/drive/mod.rs b/src/tape/drive/mod.rs index eb068c4e..d72021c1 100644 --- a/src/tape/drive/mod.rs +++ b/src/tape/drive/mod.rs @@ -8,55 +8,40 @@ pub use lto::*; use std::path::PathBuf; use anyhow::{bail, format_err, Error}; -use serde::Deserialize; -use serde_json::Value; use nix::fcntl::OFlag; use nix::sys::stat::Mode; +use serde::Deserialize; +use serde_json::Value; use proxmox_sys::fs::{ - lock_file, - atomic_open_or_create_file, - file_read_optional_string, - replace_file, - CreateOptions, + atomic_open_or_create_file, file_read_optional_string, lock_file, replace_file, CreateOptions, }; use proxmox_io::ReadExt; use proxmox_section_config::SectionConfigData; -use proxmox_uuid::Uuid; use proxmox_sys::{task_log, WorkerTaskContext}; +use proxmox_uuid::Uuid; -use pbs_api_types::{VirtualTapeDrive, LtoTapeDrive, Fingerprint}; +use pbs_api_types::{Fingerprint, LtoTapeDrive, VirtualTapeDrive}; use pbs_config::key_config::KeyConfig; -use pbs_tape::{ - TapeWrite, TapeRead, BlockReadError, MediaContentHeader, - sg_tape::TapeAlertFlags, -}; +use pbs_tape::{sg_tape::TapeAlertFlags, BlockReadError, MediaContentHeader, TapeRead, TapeWrite}; use crate::{ server::send_load_media_email, tape::{ - MediaId, - drive::{ - virtual_tape::open_virtual_tape_drive, - }, + changer::{MediaChange, MtxMediaChanger}, + drive::virtual_tape::open_virtual_tape_drive, file_formats::{ - PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, + MediaLabel, MediaSetLabel, PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, - MediaLabel, - MediaSetLabel, - }, - changer::{ - MediaChange, - MtxMediaChanger, }, + MediaId, }, }; /// Tape driver interface pub trait TapeDriver { - /// Flush all data to the tape fn sync(&mut self) -> Result<(), Error>; @@ -90,14 +75,14 @@ pub trait TapeDriver { /// Write label to tape (erase tape content) fn label_tape(&mut self, label: &MediaLabel) -> Result<(), Error> { - self.set_encryption(None)?; self.format_media(true)?; // this rewinds the tape let raw = serde_json::to_string_pretty(&serde_json::to_value(&label)?)?; - let header = MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, raw.len() as u32); + let header = + MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, raw.len() as u32); { let mut writer = self.write_file()?; @@ -125,7 +110,6 @@ pub trait TapeDriver { /// This tries to read both media labels (label and /// media_set_label). Also returns the optional encryption key configuration. fn read_label(&mut self) -> Result<(Option, Option), Error> { - self.rewind()?; let label = { @@ -143,7 +127,7 @@ pub trait TapeDriver { }; let header: MediaContentHeader = unsafe { reader.read_le_value()? }; - header.check(PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, 1, 64*1024)?; + header.check(PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, 1, 64 * 1024)?; let data = reader.read_exact_allocated(header.size as usize)?; let label: MediaLabel = serde_json::from_slice(&data) @@ -157,7 +141,10 @@ pub trait TapeDriver { label }; - let mut media_id = MediaId { label, media_set_label: None }; + let mut media_id = MediaId { + label, + media_set_label: None, + }; // try to read MediaSet label let mut reader = match self.read_next_file() { @@ -174,7 +161,7 @@ pub trait TapeDriver { }; let header: MediaContentHeader = unsafe { reader.read_le_value()? }; - header.check(PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, 1, 64*1024)?; + header.check(PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, 1, 64 * 1024)?; let data = reader.read_exact_allocated(header.size as usize)?; let mut data: Value = serde_json::from_slice(&data) @@ -238,28 +225,25 @@ pub fn media_changer( config: &SectionConfigData, drive: &str, ) -> Result, String)>, Error> { - match config.sections.get(drive) { - Some((section_type_name, config)) => { - match section_type_name.as_ref() { - "virtual" => { - let tape = VirtualTapeDrive::deserialize(config)?; - Ok(Some((Box::new(tape), drive.to_string()))) - } - "lto" => { - let drive_config = LtoTapeDrive::deserialize(config)?; - match drive_config.changer { - Some(ref changer_name) => { - let changer = MtxMediaChanger::with_drive_config(&drive_config)?; - let changer_name = changer_name.to_string(); - Ok(Some((Box::new(changer), changer_name))) - } - None => Ok(None), - } - } - ty => bail!("unknown drive type '{}' - internal error", ty), + Some((section_type_name, config)) => match section_type_name.as_ref() { + "virtual" => { + let tape = VirtualTapeDrive::deserialize(config)?; + Ok(Some((Box::new(tape), drive.to_string()))) } - } + "lto" => { + let drive_config = LtoTapeDrive::deserialize(config)?; + match drive_config.changer { + Some(ref changer_name) => { + let changer = MtxMediaChanger::with_drive_config(&drive_config)?; + let changer_name = changer_name.to_string(); + Ok(Some((Box::new(changer), changer_name))) + } + None => Ok(None), + } + } + ty => bail!("unknown drive type '{}' - internal error", ty), + }, None => { bail!("no such drive '{}'", drive); } @@ -274,40 +258,30 @@ pub fn required_media_changer( drive: &str, ) -> Result<(Box, String), Error> { match media_changer(config, drive) { - Ok(Some(result)) => { - Ok(result) - } + Ok(Some(result)) => Ok(result), Ok(None) => { bail!("drive '{}' has no associated changer device", drive); - }, - Err(err) => { - Err(err) } + Err(err) => Err(err), } } /// Opens a tape drive (this fails if there is no media loaded) -pub fn open_drive( - config: &SectionConfigData, - drive: &str, -) -> Result, Error> { - +pub fn open_drive(config: &SectionConfigData, drive: &str) -> Result, Error> { match config.sections.get(drive) { - Some((section_type_name, config)) => { - match section_type_name.as_ref() { - "virtual" => { - let tape = VirtualTapeDrive::deserialize(config)?; - let handle = open_virtual_tape_drive(&tape)?; - Ok(Box::new(handle)) - } - "lto" => { - let tape = LtoTapeDrive::deserialize(config)?; - let handle = open_lto_tape_drive(&tape)?; - Ok(Box::new(handle)) - } - ty => bail!("unknown drive type '{}' - internal error", ty), + Some((section_type_name, config)) => match section_type_name.as_ref() { + "virtual" => { + let tape = VirtualTapeDrive::deserialize(config)?; + let handle = open_virtual_tape_drive(&tape)?; + Ok(Box::new(handle)) } - } + "lto" => { + let tape = LtoTapeDrive::deserialize(config)?; + let handle = open_lto_tape_drive(&tape)?; + Ok(Box::new(handle)) + } + ty => bail!("unknown drive type '{}' - internal error", ty), + }, None => { bail!("no such drive '{}'", drive); } @@ -328,7 +302,7 @@ impl std::fmt::Display for TapeRequestError { match self { TapeRequestError::None => { write!(f, "no error") - }, + } TapeRequestError::OpenFailed(reason) => { write!(f, "tape open failed - {}", reason) } @@ -336,7 +310,10 @@ impl std::fmt::Display for TapeRequestError { write!(f, "wrong media label {}", label) } TapeRequestError::EmptyTape => { - write!(f, "found empty media without label (please label all tapes first)") + write!( + f, + "found empty media without label (please label all tapes first)" + ) } TapeRequestError::ReadFailed(reason) => { write!(f, "tape read failed - {}", reason) @@ -356,11 +333,7 @@ pub fn request_and_load_media( drive: &str, label: &MediaLabel, notify_email: &Option, -) -> Result<( - Box, - MediaId, -), Error> { - +) -> Result<(Box, MediaId), Error> { let check_label = |handle: &mut dyn TapeDriver, uuid: &proxmox_uuid::Uuid| { if let Ok((Some(media_id), _)) = handle.read_label() { task_log!( @@ -399,13 +372,18 @@ pub fn request_and_load_media( let label_text = label.label_text.clone(); if drive_config.changer.is_some() { - - task_log!(worker, "loading media '{}' into drive '{}'", label_text, drive); + task_log!( + worker, + "loading media '{}' into drive '{}'", + label_text, + drive + ); let mut changer = MtxMediaChanger::with_drive_config(&drive_config)?; changer.load_media(&label_text)?; - let mut handle: Box = Box::new(open_lto_tape_drive(&drive_config)?); + let mut handle: Box = + Box::new(open_lto_tape_drive(&drive_config)?); let media_id = check_label(handle.as_mut(), &label.uuid)?; @@ -415,34 +393,34 @@ pub fn request_and_load_media( let mut last_error = TapeRequestError::None; let update_and_log_request_error = - |old: &mut TapeRequestError, new: TapeRequestError| -> Result<(), Error> - { - if new != *old { - task_log!(worker, "{}", new); - task_log!( - worker, - "Please insert media '{}' into drive '{}'", - label_text, - drive - ); - if let Some(to) = notify_email { - send_load_media_email( - drive, - &label_text, - to, - Some(new.to_string()), - )?; + |old: &mut TapeRequestError, new: TapeRequestError| -> Result<(), Error> { + if new != *old { + task_log!(worker, "{}", new); + task_log!( + worker, + "Please insert media '{}' into drive '{}'", + label_text, + drive + ); + if let Some(to) = notify_email { + send_load_media_email( + drive, + &label_text, + to, + Some(new.to_string()), + )?; + } + *old = new; } - *old = new; - } - Ok(()) - }; + Ok(()) + }; loop { worker.check_abort()?; if last_error != TapeRequestError::None { - for _ in 0..50 { // delay 5 seconds + for _ in 0..50 { + // delay 5 seconds worker.check_abort()?; std::thread::sleep(std::time::Duration::from_millis(100)); } @@ -484,12 +462,8 @@ pub fn request_and_load_media( ); TapeRequestError::WrongLabel(label_string) } - Ok((None, _)) => { - TapeRequestError::EmptyTape - } - Err(err) => { - TapeRequestError::ReadFailed(err.to_string()) - } + Ok((None, _)) => TapeRequestError::EmptyTape, + Err(err) => TapeRequestError::ReadFailed(err.to_string()), }; update_and_log_request_error(&mut last_error, request_error)?; @@ -537,11 +511,7 @@ pub fn lock_tape_device( /// Writes the given state for the specified drive /// /// This function does not lock, so make sure the drive is locked -pub fn set_tape_device_state( - drive: &str, - state: &str, -) -> Result<(), Error> { - +pub fn set_tape_device_state(drive: &str, state: &str) -> Result<(), Error> { let mut path = PathBuf::from(crate::tape::DRIVE_STATE_DIR); path.push(drive); @@ -571,19 +541,12 @@ pub fn get_tape_device_state( } } -fn tape_device_path( - config: &SectionConfigData, - drive: &str, -) -> Result { +fn tape_device_path(config: &SectionConfigData, drive: &str) -> Result { match config.sections.get(drive) { Some((section_type_name, config)) => { let path = match section_type_name.as_ref() { - "virtual" => { - VirtualTapeDrive::deserialize(config)?.path - } - "lto" => { - LtoTapeDrive::deserialize(config)?.path - } + "virtual" => VirtualTapeDrive::deserialize(config)?.path, + "lto" => LtoTapeDrive::deserialize(config)?.path, ty => bail!("unknown drive type '{}' - internal error", ty), }; Ok(path) @@ -622,7 +585,7 @@ fn open_device_lock(device_path: &str) -> Result { // Acquires an exclusive lock on `device_path` // fn lock_device_path(device_path: &str) -> Result { - let mut file = open_device_lock(device_path)?; + let mut file = open_device_lock(device_path)?; let timeout = std::time::Duration::new(10, 0); if let Err(err) = lock_file(&mut file, true, Some(timeout)) { if err.kind() == std::io::ErrorKind::Interrupted { @@ -638,13 +601,12 @@ fn lock_device_path(device_path: &str) -> Result // Same logic as lock_device_path, but uses a timeout of 0, making it // non-blocking, and returning if the file is locked or not fn test_device_path_lock(device_path: &str) -> Result { - - let mut file = open_device_lock(device_path)?; + let mut file = open_device_lock(device_path)?; let timeout = std::time::Duration::new(0, 0); match lock_file(&mut file, true, Some(timeout)) { // file was not locked, continue - Ok(()) => {}, + Ok(()) => {} // file was locked, return true Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => return Ok(true), Err(err) => bail!("{}", err), diff --git a/src/tape/drive/virtual_tape.rs b/src/tape/drive/virtual_tape.rs index 21f91e2b..c325b931 100644 --- a/src/tape/drive/virtual_tape.rs +++ b/src/tape/drive/virtual_tape.rs @@ -4,40 +4,19 @@ use std::fs::File; use std::io; use anyhow::{bail, format_err, Error}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -use proxmox_sys::{ - fs::{replace_file, CreateOptions}, -}; +use proxmox_sys::fs::{replace_file, CreateOptions}; use pbs_config::key_config::KeyConfig; use pbs_tape::{ - TapeWrite, - TapeRead, - BlockedReader, - BlockedWriter, - BlockReadError, - MtxStatus, - DriveStatus, - ElementStatus, - StorageElementStatus, - MediaContentHeader, - EmulateTapeReader, - EmulateTapeWriter, + BlockReadError, BlockedReader, BlockedWriter, DriveStatus, ElementStatus, EmulateTapeReader, + EmulateTapeWriter, MediaContentHeader, MtxStatus, StorageElementStatus, TapeRead, TapeWrite, }; -use crate::{ - tape::{ - drive::{ - VirtualTapeDrive, - TapeDriver, - MediaChange, - }, - file_formats::{ - MediaSetLabel, - PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, - }, - }, +use crate::tape::{ + drive::{MediaChange, TapeDriver, VirtualTapeDrive}, + file_formats::{MediaSetLabel, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0}, }; /// This needs to lock the drive @@ -53,24 +32,32 @@ pub fn open_virtual_tape_drive(config: &VirtualTapeDrive) -> Result, } -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] struct TapeIndex { files: usize, } @@ -83,7 +70,6 @@ pub struct VirtualTapeHandle { } impl VirtualTapeHandle { - fn status_file_path(&self) -> std::path::PathBuf { let mut path = self.path.clone(); path.push("drive-status.json"); @@ -121,11 +107,11 @@ impl VirtualTapeHandle { Ok(()) } - fn truncate_tape(&self, tape_name: &str, pos: usize) -> Result { + fn truncate_tape(&self, tape_name: &str, pos: usize) -> Result { let mut index = self.load_tape_index(tape_name)?; if index.files <= pos { - return Ok(index.files) + return Ok(index.files); } for i in pos..index.files { @@ -143,9 +129,7 @@ impl VirtualTapeHandle { fn load_status(&self) -> Result { let path = self.status_file_path(); - let default = serde_json::to_value(VirtualDriveStatus { - current_tape: None, - })?; + let default = serde_json::to_value(VirtualDriveStatus { current_tape: None })?; let data = proxmox_sys::fs::file_get_json(&path, Some(default))?; let status: VirtualDriveStatus = serde_json::from_value(data)?; @@ -183,9 +167,12 @@ impl VirtualTapeHandle { fn forward_space_count_files(&mut self, count: usize) -> Result<(), Error> { let mut status = self.load_status()?; match status.current_tape { - Some(VirtualTapeStatus { ref name, ref mut pos }) => { - - let index = self.load_tape_index(name) + Some(VirtualTapeStatus { + ref name, + ref mut pos, + }) => { + let index = self + .load_tape_index(name) .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; let new_pos = *pos + count; @@ -210,7 +197,6 @@ impl VirtualTapeHandle { let mut status = self.load_status()?; match status.current_tape { Some(VirtualTapeStatus { ref mut pos, .. }) => { - if count <= *pos { *pos = *pos - count; } else { @@ -225,28 +211,26 @@ impl VirtualTapeHandle { None => bail!("drive is empty (no tape loaded)."), } } - } impl TapeDriver for VirtualTapeHandle { - fn sync(&mut self) -> Result<(), Error> { Ok(()) // do nothing for now } fn current_file_number(&mut self) -> Result { - let status = self.load_status() + let status = self + .load_status() .map_err(|err| format_err!("current_file_number failed: {}", err.to_string()))?; match status.current_tape { - Some(VirtualTapeStatus { pos, .. }) => { Ok(pos as u64)}, + Some(VirtualTapeStatus { pos, .. }) => Ok(pos as u64), None => bail!("current_file_number failed: drive is empty (no tape loaded)."), } } /// Move to last file fn move_to_last_file(&mut self) -> Result<(), Error> { - self.move_to_eom(false)?; if self.current_file_number()? == 0 { @@ -261,9 +245,12 @@ impl TapeDriver for VirtualTapeHandle { fn move_to_file(&mut self, file: u64) -> Result<(), Error> { let mut status = self.load_status()?; match status.current_tape { - Some(VirtualTapeStatus { ref name, ref mut pos }) => { - - let index = self.load_tape_index(name) + Some(VirtualTapeStatus { + ref name, + ref mut pos, + }) => { + let index = self + .load_tape_index(name) .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; if file as usize > index.files { @@ -282,46 +269,55 @@ impl TapeDriver for VirtualTapeHandle { } fn read_next_file(&mut self) -> Result, BlockReadError> { - let mut status = self.load_status() - .map_err(|err| BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string())))?; + let mut status = self.load_status().map_err(|err| { + BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string())) + })?; match status.current_tape { - Some(VirtualTapeStatus { ref name, ref mut pos }) => { - - let index = self.load_tape_index(name) - .map_err(|err| BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string())))?; + Some(VirtualTapeStatus { + ref name, + ref mut pos, + }) => { + let index = self.load_tape_index(name).map_err(|err| { + BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string())) + })?; if *pos >= index.files { return Err(BlockReadError::EndOfStream); } let path = self.tape_file_path(name, *pos); - let file = std::fs::OpenOptions::new() - .read(true) - .open(path)?; + let file = std::fs::OpenOptions::new().read(true).open(path)?; *pos += 1; - self.store_status(&status) - .map_err(|err| BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string())))?; + self.store_status(&status).map_err(|err| { + BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string())) + })?; let reader = EmulateTapeReader::new(file); let reader = BlockedReader::open(reader)?; Ok(Box::new(reader)) } None => { - return Err(BlockReadError::Error(proxmox_lang::io_format_err!("drive is empty (no tape loaded)."))); + return Err(BlockReadError::Error(proxmox_lang::io_format_err!( + "drive is empty (no tape loaded)." + ))); } } } fn write_file(&mut self) -> Result, io::Error> { - let mut status = self.load_status() + let mut status = self + .load_status() .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; match status.current_tape { - Some(VirtualTapeStatus { ref name, ref mut pos }) => { - - let mut index = self.load_tape_index(name) + Some(VirtualTapeStatus { + ref name, + ref mut pos, + }) => { + let mut index = self + .load_tape_index(name) .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; for i in *pos..index.files { @@ -333,7 +329,6 @@ impl TapeDriver for VirtualTapeHandle { for i in 0..*pos { let path = self.tape_file_path(name, i); used_space += path.metadata()?.len() as usize; - } index.files = *pos + 1; @@ -369,9 +364,12 @@ impl TapeDriver for VirtualTapeHandle { fn move_to_eom(&mut self, _write_missing_eof: bool) -> Result<(), Error> { let mut status = self.load_status()?; match status.current_tape { - Some(VirtualTapeStatus { ref name, ref mut pos }) => { - - let index = self.load_tape_index(name) + Some(VirtualTapeStatus { + ref name, + ref mut pos, + }) => { + let index = self + .load_tape_index(name) .map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?; *pos = index.files; @@ -400,7 +398,10 @@ impl TapeDriver for VirtualTapeHandle { fn format_media(&mut self, _fast: bool) -> Result<(), Error> { let mut status = self.load_status()?; match status.current_tape { - Some(VirtualTapeStatus { ref name, ref mut pos }) => { + Some(VirtualTapeStatus { + ref name, + ref mut pos, + }) => { *pos = self.truncate_tape(name, 0)?; self.store_status(&status)?; Ok(()) @@ -414,7 +415,6 @@ impl TapeDriver for VirtualTapeHandle { media_set_label: &MediaSetLabel, key_config: Option<&KeyConfig>, ) -> Result<(), Error> { - self.set_encryption(None)?; if key_config.is_some() { @@ -423,7 +423,10 @@ impl TapeDriver for VirtualTapeHandle { let mut status = self.load_status()?; match status.current_tape { - Some(VirtualTapeStatus { ref name, ref mut pos }) => { + Some(VirtualTapeStatus { + ref name, + ref mut pos, + }) => { *pos = self.truncate_tape(name, 1)?; let pos = *pos; self.store_status(&status)?; @@ -432,11 +435,17 @@ impl TapeDriver for VirtualTapeHandle { bail!("media is empty (no label)."); } if pos != 1 { - bail!("write_media_set_label: truncate failed - got wrong pos '{}'", pos); + bail!( + "write_media_set_label: truncate failed - got wrong pos '{}'", + pos + ); } let raw = serde_json::to_string_pretty(&serde_json::to_value(media_set_label)?)?; - let header = MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, raw.len() as u32); + let header = MediaContentHeader::new( + PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, + raw.len() as u32, + ); { let mut writer = self.write_file()?; @@ -451,15 +460,12 @@ impl TapeDriver for VirtualTapeHandle { } fn eject_media(&mut self) -> Result<(), Error> { - let status = VirtualDriveStatus { - current_tape: None, - }; + let status = VirtualDriveStatus { current_tape: None }; self.store_status(&status) } } impl MediaChange for VirtualTapeHandle { - fn drive_number(&self) -> u64 { 0 } @@ -469,7 +475,6 @@ impl MediaChange for VirtualTapeHandle { } fn status(&mut self) -> Result { - let drive_status = self.load_status()?; let mut drives = Vec::new(); @@ -482,7 +487,7 @@ impl MediaChange for VirtualTapeHandle { vendor: None, model: None, element_address: 0, - }); + }); } // This implementation is lame, because we do not have fixed @@ -490,7 +495,7 @@ impl MediaChange for VirtualTapeHandle { let mut slots = Vec::new(); let label_texts = self.online_media_label_texts()?; - let max_slots = ((label_texts.len() + 7)/8) * 8; + let max_slots = ((label_texts.len() + 7) / 8) * 8; for i in 0..max_slots { let status = if let Some(label_text) = label_texts.get(i) { @@ -505,7 +510,11 @@ impl MediaChange for VirtualTapeHandle { }); } - Ok(MtxStatus { drives, slots, transports: Vec::new() }) + Ok(MtxStatus { + drives, + slots, + transports: Vec::new(), + }) } fn transfer_media(&mut self, _from: u64, _to: u64) -> Result { @@ -568,7 +577,6 @@ impl MediaChange for VirtualTapeHandle { } impl MediaChange for VirtualTapeDrive { - fn drive_number(&self) -> u64 { 0 } diff --git a/src/tape/file_formats/catalog_archive.rs b/src/tape/file_formats/catalog_archive.rs index 4e8b8fdf..f9b099b6 100644 --- a/src/tape/file_formats/catalog_archive.rs +++ b/src/tape/file_formats/catalog_archive.rs @@ -4,19 +4,9 @@ use std::io::Read; use proxmox_sys::error::SysError; use proxmox_uuid::Uuid; -use pbs_tape::{ - PROXMOX_TAPE_BLOCK_SIZE, - TapeWrite, MediaContentHeader, -}; +use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE}; -use crate::{ - tape::{ - file_formats::{ - PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, - CatalogArchiveHeader, - }, - }, -}; +use crate::tape::file_formats::{CatalogArchiveHeader, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0}; /// Write a media catalog to the tape /// @@ -32,17 +22,20 @@ pub fn tape_write_catalog<'a>( seq_nr: usize, file: &mut File, ) -> Result, std::io::Error> { - let archive_header = CatalogArchiveHeader { uuid: uuid.clone(), media_set_uuid: media_set_uuid.clone(), seq_nr: seq_nr as u64, }; - let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec(); + let header_data = serde_json::to_string_pretty(&archive_header)? + .as_bytes() + .to_vec(); let header = MediaContentHeader::new( - PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, header_data.len() as u32); + PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, + header_data.len() as u32, + ); let content_uuid: Uuid = header.uuid.into(); let leom = writer.write_header(&header, &header_data)?; @@ -54,7 +47,6 @@ pub fn tape_write_catalog<'a>( let mut file_copy_buffer = proxmox_io::vec::undefined(PROXMOX_TAPE_BLOCK_SIZE); let result: Result<(), std::io::Error> = proxmox_lang::try_block!({ - let file_size = file.metadata()?.len(); let mut remaining = file_size; @@ -71,7 +63,7 @@ pub fn tape_write_catalog<'a>( } Ok(()) }); - + match result { Ok(()) => { writer.finish(false)?; diff --git a/src/tape/file_formats/chunk_archive.rs b/src/tape/file_formats/chunk_archive.rs index 827dc347..8ed2f61d 100644 --- a/src/tape/file_formats/chunk_archive.rs +++ b/src/tape/file_formats/chunk_archive.rs @@ -7,16 +7,11 @@ use proxmox_io::ReadExt; use proxmox_uuid::Uuid; use pbs_datastore::DataBlob; -use pbs_tape::{ - PROXMOX_TAPE_BLOCK_SIZE, - TapeWrite, MediaContentHeader, -}; +use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE}; use crate::tape::file_formats::{ + ChunkArchiveEntryHeader, ChunkArchiveHeader, PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0, PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, - PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0, - ChunkArchiveHeader, - ChunkArchiveEntryHeader, }; /// Writes chunk archives to tape. @@ -32,8 +27,7 @@ pub struct ChunkArchiveWriter<'a> { close_on_leom: bool, } -impl <'a> ChunkArchiveWriter<'a> { - +impl<'a> ChunkArchiveWriter<'a> { pub const MAGIC: [u8; 8] = PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1; /// Creates a new instance @@ -41,10 +35,13 @@ impl <'a> ChunkArchiveWriter<'a> { mut writer: Box, store: &str, close_on_leom: bool, - ) -> Result<(Self,Uuid), Error> { - - let archive_header = ChunkArchiveHeader { store: store.to_string() }; - let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec(); + ) -> Result<(Self, Uuid), Error> { + let archive_header = ChunkArchiveHeader { + store: store.to_string(), + }; + let header_data = serde_json::to_string_pretty(&archive_header)? + .as_bytes() + .to_vec(); let header = MediaContentHeader::new(Self::MAGIC, header_data.len() as u32); writer.write_header(&header, &header_data)?; @@ -69,8 +66,9 @@ impl <'a> ChunkArchiveWriter<'a> { fn write_all(&mut self, data: &[u8]) -> Result { match self.writer { Some(ref mut writer) => writer.write_all(data), - None => proxmox_lang::io_bail!( - "detected write after archive finished - internal error"), + None => { + proxmox_lang::io_bail!("detected write after archive finished - internal error") + } } } @@ -80,10 +78,9 @@ impl <'a> ChunkArchiveWriter<'a> { /// In that case the archive only contains parts of the last chunk. pub fn try_write_chunk( &mut self, - digest: &[u8;32], + digest: &[u8; 32], blob: &DataBlob, ) -> Result { - if self.writer.is_none() { return Ok(false); } @@ -95,9 +92,11 @@ impl <'a> ChunkArchiveWriter<'a> { }; let head = head.to_le(); - let data = unsafe { std::slice::from_raw_parts( - &head as *const ChunkArchiveEntryHeader as *const u8, - std::mem::size_of::()) + let data = unsafe { + std::slice::from_raw_parts( + &head as *const ChunkArchiveEntryHeader as *const u8, + std::mem::size_of::(), + ) }; self.write_all(data)?; @@ -150,8 +149,7 @@ pub struct ChunkArchiveDecoder { reader: R, } -impl ChunkArchiveDecoder { - +impl ChunkArchiveDecoder { /// Creates a new instance pub fn new(reader: R) -> Self { Self { reader } @@ -163,8 +161,7 @@ impl ChunkArchiveDecoder { } /// Returns the next chunk (if any). - pub fn next_chunk(&mut self) -> Result, Error> { - + pub fn next_chunk(&mut self) -> Result, Error> { let mut header = ChunkArchiveEntryHeader { magic: [0u8; 8], digest: [0u8; 32], @@ -173,11 +170,12 @@ impl ChunkArchiveDecoder { let data = unsafe { std::slice::from_raw_parts_mut( (&mut header as *mut ChunkArchiveEntryHeader) as *mut u8, - std::mem::size_of::()) + std::mem::size_of::(), + ) }; match self.reader.read_exact_or_eof(data) { - Ok(true) => {}, + Ok(true) => {} Ok(false) => { // last chunk is allowed to be incomplete - simply report EOD return Ok(None); @@ -189,7 +187,7 @@ impl ChunkArchiveDecoder { bail!("wrong magic number"); } - let raw_data = match self.reader.read_exact_allocated(header.size as usize) { + let raw_data = match self.reader.read_exact_allocated(header.size as usize) { Ok(data) => data, Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => { // last chunk is allowed to be incomplete - simply report EOD diff --git a/src/tape/file_formats/mod.rs b/src/tape/file_formats/mod.rs index 9a76646e..06c89617 100644 --- a/src/tape/file_formats/mod.rs +++ b/src/tape/file_formats/mod.rs @@ -37,7 +37,8 @@ pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0: [u8; 8] = [62, 173, 167, 95, 4 pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1: [u8; 8] = [109, 49, 99, 109, 215, 2, 131, 191]; // openssl::sha::sha256(b"Proxmox Backup Chunk Archive Entry v1.0")[0..8] -pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] = [72, 87, 109, 242, 222, 66, 143, 220]; +pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] = + [72, 87, 109, 242, 222, 66, 143, 220]; // openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.0")[0..8]; // only used in unreleased version - no longer supported @@ -46,9 +47,10 @@ pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 1 pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1: [u8; 8] = [218, 22, 21, 208, 17, 226, 154, 98]; // openssl::sha::sha256(b"Proxmox Backup Catalog Archive v1.0")[0..8]; -pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0: [u8; 8] = [183, 207, 199, 37, 158, 153, 30, 115]; +pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0: [u8; 8] = + [183, 207, 199, 37, 158, 153, 30, 115]; -lazy_static::lazy_static!{ +lazy_static::lazy_static! { // Map content magic numbers to human readable names. static ref PROXMOX_TAPE_CONTENT_NAME: HashMap<&'static [u8;8], &'static str> = { let mut map = HashMap::new(); @@ -65,10 +67,11 @@ lazy_static::lazy_static!{ /// Map content magic numbers to human readable names. pub fn proxmox_tape_magic_to_text(magic: &[u8; 8]) -> Option { - PROXMOX_TAPE_CONTENT_NAME.get(magic).map(|s| String::from(*s)) + PROXMOX_TAPE_CONTENT_NAME + .get(magic) + .map(|s| String::from(*s)) } - #[derive(Deserialize, Serialize)] /// Header for chunk archives pub struct ChunkArchiveHeader { @@ -77,7 +80,7 @@ pub struct ChunkArchiveHeader { } #[derive(Endian)] -#[repr(C,packed)] +#[repr(C, packed)] /// Header for data blobs inside a chunk archive pub struct ChunkArchiveEntryHeader { /// fixed value `PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0` @@ -108,7 +111,7 @@ pub struct CatalogArchiveHeader { pub seq_nr: u64, } -#[derive(Serialize,Deserialize,Clone,Debug)] +#[derive(Serialize, Deserialize, Clone, Debug)] /// Media Label /// /// Media labels are used to uniquely identify a media. They are @@ -122,8 +125,7 @@ pub struct MediaLabel { pub ctime: i64, } - -#[derive(Serialize,Deserialize,Clone,Debug)] +#[derive(Serialize, Deserialize, Clone, Debug)] /// `MediaSet` Label /// /// Used to uniquely identify a `MediaSet`. They are stored as second @@ -138,12 +140,11 @@ pub struct MediaSetLabel { /// Creation time stamp pub ctime: i64, /// Encryption key finkerprint (if encryped) - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub encryption_key_fingerprint: Option, } impl MediaSetLabel { - pub fn with_data( pool: &str, uuid: Uuid, @@ -160,4 +161,3 @@ impl MediaSetLabel { } } } - diff --git a/src/tape/file_formats/multi_volume_reader.rs b/src/tape/file_formats/multi_volume_reader.rs index fcc1d3c0..9692c121 100644 --- a/src/tape/file_formats/multi_volume_reader.rs +++ b/src/tape/file_formats/multi_volume_reader.rs @@ -1,33 +1,33 @@ -use std::io::{Read}; +use std::io::Read; use anyhow::{bail, Error}; use proxmox_io::ReadExt; -use pbs_tape::{TapeRead, MediaContentHeader}; +use pbs_tape::{MediaContentHeader, TapeRead}; /// Read multi volume data streams written by `MultiVolumeWriter` /// /// Note: We do not use this feature currently. pub struct MultiVolumeReader<'a> { reader: Option>, - next_reader_fn: Box Result, Error>>, + next_reader_fn: Box Result, Error>>, complete: bool, header: MediaContentHeader, } -impl <'a> MultiVolumeReader<'a> { - +impl<'a> MultiVolumeReader<'a> { /// Creates a new instance pub fn new( - reader: Box, + reader: Box, header: MediaContentHeader, - next_reader_fn: Box Result, Error>>, + next_reader_fn: Box Result, Error>>, ) -> Result { - if header.part_number != 0 { - bail!("MultiVolumeReader::new - got wrong header part_number ({} != 0)", - header.part_number); + bail!( + "MultiVolumeReader::new - got wrong header part_number ({} != 0)", + header.part_number + ); } Ok(Self { @@ -39,8 +39,7 @@ impl <'a> MultiVolumeReader<'a> { } } -impl <'a> Read for MultiVolumeReader<'a> { - +impl<'a> Read for MultiVolumeReader<'a> { fn read(&mut self, buf: &mut [u8]) -> Result { if self.complete { return Ok(0); @@ -57,43 +56,45 @@ impl <'a> Read for MultiVolumeReader<'a> { if part_header.uuid != self.header.uuid { proxmox_lang::io_bail!("got wrong part uuid"); } - if part_header.content_magic!= self.header.content_magic { + if part_header.content_magic != self.header.content_magic { proxmox_lang::io_bail!("got wrong part content magic"); } let expect_part_number = self.header.part_number + 1; if part_header.part_number != expect_part_number { - proxmox_lang::io_bail!("got wrong part number ({} != {})", - part_header.part_number, expect_part_number); + proxmox_lang::io_bail!( + "got wrong part number ({} != {})", + part_header.part_number, + expect_part_number + ); } self.header.part_number = expect_part_number; Ok(()) - }).map_err(|err| { + }) + .map_err(|err| { proxmox_lang::io_format_err!("multi-volume read content header failed: {}", err) })?; - } + } match self.reader { None => unreachable!(), - Some(ref mut reader) => { - match reader.read(buf) { - Ok(0) => { - if reader.is_incomplete()? { - self.reader = None; - self.read(buf) - } else { - self.reader = None; - self.complete = true; - Ok(0) - } + Some(ref mut reader) => match reader.read(buf) { + Ok(0) => { + if reader.is_incomplete()? { + self.reader = None; + self.read(buf) + } else { + self.reader = None; + self.complete = true; + Ok(0) } - Ok(n) => Ok(n), - Err(err) => Err(err) } - } + Ok(n) => Ok(n), + Err(err) => Err(err), + }, } } } diff --git a/src/tape/file_formats/multi_volume_writer.rs b/src/tape/file_formats/multi_volume_writer.rs index bca2a27f..2106bae2 100644 --- a/src/tape/file_formats/multi_volume_writer.rs +++ b/src/tape/file_formats/multi_volume_writer.rs @@ -2,14 +2,14 @@ use anyhow::Error; use proxmox_uuid::Uuid; -use pbs_tape::{TapeWrite, MediaContentHeader}; +use pbs_tape::{MediaContentHeader, TapeWrite}; /// Writes data streams using multiple volumes /// /// Note: We do not use this feature currently. pub struct MultiVolumeWriter<'a> { writer: Option>, - next_writer_fn: Box Result, Error>>, + next_writer_fn: Box Result, Error>>, got_leom: bool, finished: bool, wrote_header: bool, @@ -18,16 +18,14 @@ pub struct MultiVolumeWriter<'a> { bytes_written: usize, // does not include bytes from current writer } -impl <'a> MultiVolumeWriter<'a> { - +impl<'a> MultiVolumeWriter<'a> { /// Creates a new instance pub fn new( - writer: Box, + writer: Box, content_magic: [u8; 8], header_data: Vec, next_writer_fn: Box Result, Error>>, ) -> Self { - let header = MediaContentHeader::new(content_magic, header_data.len() as u32); Self { @@ -48,21 +46,21 @@ impl <'a> MultiVolumeWriter<'a> { } } -impl <'a> TapeWrite for MultiVolumeWriter<'a> { - +impl<'a> TapeWrite for MultiVolumeWriter<'a> { fn write_all(&mut self, buf: &[u8]) -> Result { - if self.finished { proxmox_lang::io_bail!("multi-volume writer already finished: internal error"); } if self.got_leom { if !self.wrote_header { - proxmox_lang::io_bail!("multi-volume writer: got LEOM before writing anything - internal error"); + proxmox_lang::io_bail!( + "multi-volume writer: got LEOM before writing anything - internal error" + ); } let mut writer = match self.writer.take() { Some(writer) => writer, - None => proxmox_lang::io_bail!("multi-volume writer: no writer -internal error"), + None => proxmox_lang::io_bail!("multi-volume writer: no writer -internal error"), }; self.bytes_written = writer.bytes_written(); writer.finish(true)?; @@ -72,10 +70,9 @@ impl <'a> TapeWrite for MultiVolumeWriter<'a> { if self.header.part_number == u8::MAX { proxmox_lang::io_bail!("multi-volume writer: too many parts"); } - self.writer = Some( - (self.next_writer_fn)() - .map_err(|err| proxmox_lang::io_format_err!("multi-volume get next volume failed: {}", err))? - ); + self.writer = Some((self.next_writer_fn)().map_err(|err| { + proxmox_lang::io_format_err!("multi-volume get next volume failed: {}", err) + })?); self.got_leom = false; self.wrote_header = false; self.header.part_number += 1; @@ -92,7 +89,9 @@ impl <'a> TapeWrite for MultiVolumeWriter<'a> { } }; - if leom { self.got_leom = true; } + if leom { + self.got_leom = true; + } Ok(false) } @@ -108,12 +107,14 @@ impl <'a> TapeWrite for MultiVolumeWriter<'a> { fn finish(&mut self, incomplete: bool) -> Result { if incomplete { proxmox_lang::io_bail!( - "incomplete flag makes no sense for multi-volume stream: internal error"); + "incomplete flag makes no sense for multi-volume stream: internal error" + ); } match self.writer.take() { - None if self.finished => proxmox_lang::io_bail!( - "multi-volume writer already finished: internal error"), + None if self.finished => { + proxmox_lang::io_bail!("multi-volume writer already finished: internal error") + } None => Ok(false), Some(ref mut writer) => { self.finished = true; @@ -129,5 +130,4 @@ impl <'a> TapeWrite for MultiVolumeWriter<'a> { fn logical_end_of_media(&self) -> bool { self.got_leom } - } diff --git a/src/tape/file_formats/snapshot_archive.rs b/src/tape/file_formats/snapshot_archive.rs index 51ba6015..7085f4b3 100644 --- a/src/tape/file_formats/snapshot_archive.rs +++ b/src/tape/file_formats/snapshot_archive.rs @@ -5,17 +5,10 @@ use std::task::{Context, Poll}; use proxmox_sys::error::SysError; use proxmox_uuid::Uuid; -use pbs_tape::{ - PROXMOX_TAPE_BLOCK_SIZE, - TapeWrite, MediaContentHeader, -}; use pbs_datastore::SnapshotReader; +use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE}; -use crate::tape::file_formats::{ - PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, - SnapshotArchiveHeader, -}; - +use crate::tape::file_formats::{SnapshotArchiveHeader, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1}; /// Write a set of files as `pxar` archive to the tape /// @@ -29,17 +22,20 @@ pub fn tape_write_snapshot_archive<'a>( writer: &mut (dyn TapeWrite + 'a), snapshot_reader: &SnapshotReader, ) -> Result, std::io::Error> { - let snapshot = snapshot_reader.snapshot().to_string(); let store = snapshot_reader.datastore_name().to_string(); let file_list = snapshot_reader.file_list(); let archive_header = SnapshotArchiveHeader { snapshot, store }; - let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec(); + let header_data = serde_json::to_string_pretty(&archive_header)? + .as_bytes() + .to_vec(); let header = MediaContentHeader::new( - PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, header_data.len() as u32); + PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, + header_data.len() as u32, + ); let content_uuid = header.uuid.into(); let root_metadata = pxar::Metadata::dir_builder(0o0664).build(); @@ -47,18 +43,20 @@ pub fn tape_write_snapshot_archive<'a>( let mut file_copy_buffer = proxmox_io::vec::undefined(PROXMOX_TAPE_BLOCK_SIZE); let result: Result<(), std::io::Error> = proxmox_lang::try_block!({ - let leom = writer.write_header(&header, &header_data)?; if leom { - return Err(std::io::Error::from_raw_os_error(nix::errno::Errno::ENOSPC as i32)); + return Err(std::io::Error::from_raw_os_error( + nix::errno::Errno::ENOSPC as i32, + )); } - let mut encoder = pxar::encoder::sync::Encoder::new(PxarTapeWriter::new(writer), &root_metadata)?; + let mut encoder = + pxar::encoder::sync::Encoder::new(PxarTapeWriter::new(writer), &root_metadata)?; for filename in file_list.iter() { - - let mut file = snapshot_reader.open_file(filename) - .map_err(|err| proxmox_lang::io_format_err!("open file '{}' failed - {}", filename, err))?; + let mut file = snapshot_reader.open_file(filename).map_err(|err| { + proxmox_lang::io_format_err!("open file '{}' failed - {}", filename, err) + })?; let metadata = file.metadata()?; let file_size = metadata.len(); @@ -77,7 +75,6 @@ pub fn tape_write_snapshot_archive<'a>( } out.write_all(&file_copy_buffer[..got])?; remaining -= got as u64; - } if remaining > 0 { proxmox_lang::io_bail!("file '{}' shrunk while reading", filename); @@ -117,7 +114,6 @@ impl<'a, T: TapeWrite + ?Sized> PxarTapeWriter<'a, T> { } impl<'a, T: TapeWrite + ?Sized> pxar::encoder::SeqWrite for PxarTapeWriter<'a, T> { - fn poll_seq_write( self: Pin<&mut Self>, _cx: &mut Context, @@ -127,7 +123,9 @@ impl<'a, T: TapeWrite + ?Sized> pxar::encoder::SeqWrite for PxarTapeWriter<'a, T Poll::Ready(match this.inner.write_all(buf) { Ok(leom) => { if leom { - Err(std::io::Error::from_raw_os_error(nix::errno::Errno::ENOSPC as i32)) + Err(std::io::Error::from_raw_os_error( + nix::errno::Errno::ENOSPC as i32, + )) } else { Ok(buf.len()) } diff --git a/src/tape/inventory.rs b/src/tape/inventory.rs index 229e8b8d..c3bd4606 100644 --- a/src/tape/inventory.rs +++ b/src/tape/inventory.rs @@ -22,19 +22,19 @@ //! restore, to make sure it is not reused for backups. //! -use std::collections::{HashMap, BTreeMap}; +use std::collections::{BTreeMap, HashMap}; use std::path::{Path, PathBuf}; use std::time::Duration; use anyhow::{bail, Error}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use serde_json::json; -use proxmox_sys::fs::{replace_file, file_get_json, CreateOptions}; +use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions}; use proxmox_uuid::Uuid; +use pbs_api_types::{MediaLocation, MediaSetPolicy, MediaStatus, RetentionPolicy}; use pbs_config::BackupLockGuard; -use pbs_api_types::{MediaSetPolicy, RetentionPolicy, MediaStatus, MediaLocation}; #[cfg(not(test))] use pbs_config::open_backup_lockfile; @@ -48,37 +48,28 @@ fn open_backup_lockfile>( Ok(unsafe { pbs_config::create_mocked_lock() }) } - -use crate::{ - tape::{ - TAPE_STATUS_DIR, - MediaSet, - MediaCatalog, - file_formats::{ - MediaLabel, - MediaSetLabel, - }, - changer::OnlineStatusMap, - }, +use crate::tape::{ + changer::OnlineStatusMap, + file_formats::{MediaLabel, MediaSetLabel}, + MediaCatalog, MediaSet, TAPE_STATUS_DIR, }; /// Unique Media Identifier /// /// This combines the label and media set label. -#[derive(Debug,Serialize,Deserialize,Clone)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct MediaId { pub label: MediaLabel, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub media_set_label: Option, } - -#[derive(Serialize,Deserialize)] +#[derive(Serialize, Deserialize)] struct MediaStateEntry { id: MediaId, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] location: Option, - #[serde(skip_serializing_if="Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] status: Option, } @@ -90,17 +81,15 @@ pub struct Inventory { lockfile_path: PathBuf, // helpers - media_set_start_times: HashMap + media_set_start_times: HashMap, } impl Inventory { - pub const MEDIA_INVENTORY_FILENAME: &'static str = "inventory.json"; pub const MEDIA_INVENTORY_LOCKFILE: &'static str = ".inventory.lck"; /// Create empty instance, no data loaded pub fn new(base_path: &Path) -> Self { - let mut inventory_path = base_path.to_owned(); inventory_path.push(Self::MEDIA_INVENTORY_FILENAME); @@ -129,7 +118,6 @@ impl Inventory { } fn update_helpers(&mut self) { - // recompute media_set_start_times let mut set_start_times = HashMap::new(); @@ -153,7 +141,6 @@ impl Inventory { } fn load_media_db(path: &Path) -> Result, Error> { - let data = file_get_json(path, Some(json!([])))?; let media_list: Vec = serde_json::from_value(data)?; @@ -188,11 +175,7 @@ impl Inventory { } /// Stores a single MediaID persistently - pub fn store( - &mut self, - mut media_id: MediaId, - clear_media_status: bool, - ) -> Result<(), Error> { + pub fn store(&mut self, mut media_id: MediaId, clear_media_status: bool) -> Result<(), Error> { let _lock = self.lock()?; self.map = Self::load_media_db(&self.inventory_path)?; @@ -202,7 +185,7 @@ impl Inventory { // do not overwrite unsaved pool assignments if media_id.media_set_label.is_none() { if let Some(ref set) = previous.id.media_set_label { - if set.uuid.as_ref() == [0u8;16] { + if set.uuid.as_ref() == [0u8; 16] { media_id.media_set_label = Some(set.clone()); } } @@ -218,7 +201,11 @@ impl Inventory { }; self.map.insert(uuid, entry); } else { - let entry = MediaStateEntry { id: media_id, location: None, status: None }; + let entry = MediaStateEntry { + id: media_id, + location: None, + status: None, + }; self.map.insert(uuid, entry); } @@ -228,7 +215,7 @@ impl Inventory { } /// Remove a single media persistently - pub fn remove_media(&mut self, uuid: &Uuid) -> Result<(), Error> { + pub fn remove_media(&mut self, uuid: &Uuid) -> Result<(), Error> { let _lock = self.lock()?; self.map = Self::load_media_db(&self.inventory_path)?; self.map.remove(uuid); @@ -268,7 +255,7 @@ impl Inventory { match entry.id.media_set_label { None => None, // not assigned to any pool Some(ref set) => { - let is_empty = set.uuid.as_ref() == [0u8;16]; + let is_empty = set.uuid.as_ref() == [0u8; 16]; Some((&set.pool, is_empty)) } } @@ -288,7 +275,7 @@ impl Inventory { continue; // belong to another pool } - if set.uuid.as_ref() == [0u8;16] { + if set.uuid.as_ref() == [0u8; 16] { list.push(MediaId { label: entry.id.label.clone(), media_set_label: None, @@ -311,7 +298,7 @@ impl Inventory { match entry.id.media_set_label { None => continue, // not assigned to any pool Some(ref set) => { - if set.uuid.as_ref() != [0u8;16] { + if set.uuid.as_ref() != [0u8; 16] { list.push(entry.id.clone()); } } @@ -323,13 +310,16 @@ impl Inventory { /// List media not assigned to any pool pub fn list_unassigned_media(&self) -> Vec { - self.map.values().filter_map(|entry| - if entry.id.media_set_label.is_none() { - Some(entry.id.clone()) - } else { - None - } - ).collect() + self.map + .values() + .filter_map(|entry| { + if entry.id.media_set_label.is_none() { + Some(entry.id.clone()) + } else { + None + } + }) + .collect() } pub fn media_set_start_time(&self, media_set_uuid: &Uuid) -> Option { @@ -338,14 +328,13 @@ impl Inventory { /// Lookup media set pool pub fn lookup_media_set_pool(&self, media_set_uuid: &Uuid) -> Result { - let mut last_pool = None; for entry in self.map.values() { match entry.id.media_set_label { None => continue, Some(MediaSetLabel { ref uuid, .. }) => { - if uuid != media_set_uuid { + if uuid != media_set_uuid { continue; } if let Some((pool, _)) = self.lookup_media_pool(&entry.id.label.uuid) { @@ -363,20 +352,24 @@ impl Inventory { match last_pool { Some(pool) => Ok(pool.to_string()), - None => bail!("media set {} is incomplete - unable to lookup pool", media_set_uuid), + None => bail!( + "media set {} is incomplete - unable to lookup pool", + media_set_uuid + ), } } /// Compute a single media sets pub fn compute_media_set_members(&self, media_set_uuid: &Uuid) -> Result { - let mut set = MediaSet::with_data(media_set_uuid.clone(), Vec::new()); for entry in self.map.values() { match entry.id.media_set_label { None => continue, - Some(MediaSetLabel { seq_nr, ref uuid, .. }) => { - if uuid != media_set_uuid { + Some(MediaSetLabel { + seq_nr, ref uuid, .. + }) => { + if uuid != media_set_uuid { continue; } set.insert_media(entry.id.label.uuid.clone(), seq_nr)?; @@ -389,17 +382,17 @@ impl Inventory { /// Compute all media sets pub fn compute_media_set_list(&self) -> Result, Error> { - let mut set_map: HashMap = HashMap::new(); for entry in self.map.values() { match entry.id.media_set_label { None => continue, - Some(MediaSetLabel { seq_nr, ref uuid, .. }) => { - - let set = set_map.entry(uuid.clone()).or_insert_with(|| { - MediaSet::with_data(uuid.clone(), Vec::new()) - }); + Some(MediaSetLabel { + seq_nr, ref uuid, .. + }) => { + let set = set_map + .entry(uuid.clone()) + .or_insert_with(|| MediaSet::with_data(uuid.clone(), Vec::new())); set.insert_media(entry.id.label.uuid.clone(), seq_nr)?; } @@ -411,12 +404,13 @@ impl Inventory { /// Returns the latest media set for a pool pub fn latest_media_set(&self, pool: &str) -> Option { - let mut last_set: Option<(Uuid, i64)> = None; - let set_list = self.map.values() + let set_list = self + .map + .values() .filter_map(|entry| entry.id.media_set_label.as_ref()) - .filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8;16]); + .filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8; 16]); for set in set_list { match last_set { @@ -437,13 +431,19 @@ impl Inventory { }; // consistency check - must be the only set with that ctime - let set_list = self.map.values() + let set_list = self + .map + .values() .filter_map(|entry| entry.id.media_set_label.as_ref()) - .filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8;16]); + .filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8; 16]); for set in set_list { - if set.uuid != uuid && set.ctime >= ctime { // should not happen - eprintln!("latest_media_set: found set with equal ctime ({}, {})", set.uuid, uuid); + if set.uuid != uuid && set.ctime >= ctime { + // should not happen + eprintln!( + "latest_media_set: found set with equal ctime ({}, {})", + set.uuid, uuid + ); return None; } } @@ -454,8 +454,9 @@ impl Inventory { // Test if there is a media set (in the same pool) newer than this one. // Return the ctime of the nearest media set fn media_set_next_start_time(&self, media_set_uuid: &Uuid) -> Option { - - let (pool, ctime) = match self.map.values() + let (pool, ctime) = match self + .map + .values() .filter_map(|entry| entry.id.media_set_label.as_ref()) .find_map(|set| { if &set.uuid == media_set_uuid { @@ -464,11 +465,13 @@ impl Inventory { None } }) { - Some((pool, ctime)) => (pool, ctime), - None => return None, - }; + Some((pool, ctime)) => (pool, ctime), + None => return None, + }; - let set_list = self.map.values() + let set_list = self + .map + .values() .filter_map(|entry| entry.id.media_set_label.as_ref()) .filter(|set| (&set.uuid != media_set_uuid) && (set.pool == pool)); @@ -498,7 +501,6 @@ impl Inventory { media_set_policy: &MediaSetPolicy, retention_policy: &RetentionPolicy, ) -> i64 { - if let RetentionPolicy::KeepForever = retention_policy { return i64::MAX; } @@ -518,28 +520,22 @@ impl Inventory { }; let max_use_time = match self.media_set_next_start_time(&set.uuid) { - Some(next_start_time) => { - match media_set_policy { - MediaSetPolicy::AlwaysCreate => set_start_time, - _ => next_start_time, - } - } - None => { - match media_set_policy { - MediaSetPolicy::ContinueCurrent => { - return i64::MAX; - } - MediaSetPolicy::AlwaysCreate => { - set_start_time - } - MediaSetPolicy::CreateAt(ref event) => { - match event.compute_next_event(set_start_time) { - Ok(Some(next)) => next, - Ok(None) | Err(_) => return i64::MAX, - } + Some(next_start_time) => match media_set_policy { + MediaSetPolicy::AlwaysCreate => set_start_time, + _ => next_start_time, + }, + None => match media_set_policy { + MediaSetPolicy::ContinueCurrent => { + return i64::MAX; + } + MediaSetPolicy::AlwaysCreate => set_start_time, + MediaSetPolicy::CreateAt(ref event) => { + match event.compute_next_event(set_start_time) { + Ok(Some(next)) => next, + Ok(None) | Err(_) => return i64::MAX, } } - } + }, }; match retention_policy { @@ -560,7 +556,6 @@ impl Inventory { media_set_uuid: &Uuid, template: Option, ) -> Result { - if let Some(ctime) = self.media_set_start_time(media_set_uuid) { let mut template = template.unwrap_or_else(|| String::from("%c")); template = template.replace("%id%", &media_set_uuid.to_string()); @@ -575,7 +570,6 @@ impl Inventory { /// Generate and insert a new free tape (test helper) pub fn generate_free_tape(&mut self, label_text: &str, ctime: i64) -> Uuid { - let label = MediaLabel { label_text: label_text.to_string(), uuid: Uuid::generate(), @@ -583,20 +577,21 @@ impl Inventory { }; let uuid = label.uuid.clone(); - self.store(MediaId { label, media_set_label: None }, false).unwrap(); + self.store( + MediaId { + label, + media_set_label: None, + }, + false, + ) + .unwrap(); uuid } /// Generate and insert a new tape assigned to a specific pool /// (test helper) - pub fn generate_assigned_tape( - &mut self, - label_text: &str, - pool: &str, - ctime: i64, - ) -> Uuid { - + pub fn generate_assigned_tape(&mut self, label_text: &str, pool: &str, ctime: i64) -> Uuid { let label = MediaLabel { label_text: label_text.to_string(), uuid: Uuid::generate(), @@ -607,18 +602,20 @@ impl Inventory { let set = MediaSetLabel::with_data(pool, [0u8; 16].into(), 0, ctime, None); - self.store(MediaId { label, media_set_label: Some(set) }, false).unwrap(); + self.store( + MediaId { + label, + media_set_label: Some(set), + }, + false, + ) + .unwrap(); uuid } /// Generate and insert a used tape (test helper) - pub fn generate_used_tape( - &mut self, - label_text: &str, - set: MediaSetLabel, - ctime: i64, - ) -> Uuid { + pub fn generate_used_tape(&mut self, label_text: &str, set: MediaSetLabel, ctime: i64) -> Uuid { let label = MediaLabel { label_text: label_text.to_string(), uuid: Uuid::generate(), @@ -626,7 +623,14 @@ impl Inventory { }; let uuid = label.uuid.clone(); - self.store(MediaId { label, media_set_label: Some(set) }, false).unwrap(); + self.store( + MediaId { + label, + media_set_label: Some(set), + }, + false, + ) + .unwrap(); uuid } @@ -634,13 +638,11 @@ impl Inventory { // Status/location handling impl Inventory { - /// Returns status and location with reasonable defaults. /// /// Default status is 'MediaStatus::Unknown'. /// Default location is 'MediaLocation::Offline'. pub fn status_and_location(&self, uuid: &Uuid) -> (MediaStatus, MediaLocation) { - match self.map.get(uuid) { None => { // no info stored - assume media is writable/offline @@ -689,7 +691,11 @@ impl Inventory { } // Lock database, reload database, set location, store database - fn set_media_location(&mut self, uuid: &Uuid, location: Option) -> Result<(), Error> { + fn set_media_location( + &mut self, + uuid: &Uuid, + location: Option, + ) -> Result<(), Error> { let _lock = self.lock()?; self.map = Self::load_media_db(&self.inventory_path)?; if let Some(entry) = self.map.get_mut(uuid) { @@ -742,7 +748,6 @@ impl Inventory { Ok(()) } - } /// Lock a media pool @@ -755,7 +760,7 @@ pub fn lock_media_pool(base_path: &Path, name: &str) -> Result Result { +pub fn lock_unassigned_media_pool(base_path: &Path) -> Result { // lock artificial "__UNASSIGNED__" pool to avoid races lock_media_pool(base_path, "__UNASSIGNED__") } @@ -778,11 +783,7 @@ pub fn lock_media_set( // shell completion helper /// List of known media uuids -pub fn complete_media_uuid( - _arg: &str, - _param: &HashMap, -) -> Vec { - +pub fn complete_media_uuid(_arg: &str, _param: &HashMap) -> Vec { let inventory = match Inventory::load(Path::new(TAPE_STATUS_DIR)) { Ok(inventory) => inventory, Err(_) => return Vec::new(), @@ -792,33 +793,32 @@ pub fn complete_media_uuid( } /// List of known media sets -pub fn complete_media_set_uuid( - _arg: &str, - _param: &HashMap, -) -> Vec { - +pub fn complete_media_set_uuid(_arg: &str, _param: &HashMap) -> Vec { let inventory = match Inventory::load(Path::new(TAPE_STATUS_DIR)) { Ok(inventory) => inventory, Err(_) => return Vec::new(), }; - inventory.map.values() + inventory + .map + .values() .filter_map(|entry| entry.id.media_set_label.as_ref()) - .map(|set| set.uuid.to_string()).collect() + .map(|set| set.uuid.to_string()) + .collect() } /// List of known media labels (barcodes) -pub fn complete_media_label_text( - _arg: &str, - _param: &HashMap, -) -> Vec { - +pub fn complete_media_label_text(_arg: &str, _param: &HashMap) -> Vec { let inventory = match Inventory::load(Path::new(TAPE_STATUS_DIR)) { Ok(inventory) => inventory, Err(_) => return Vec::new(), }; - inventory.map.values().map(|entry| entry.id.label.label_text.clone()).collect() + inventory + .map + .values() + .map(|entry| entry.id.label.label_text.clone()) + .collect() } pub fn complete_media_set_snapshots(_arg: &str, param: &HashMap) -> Vec { @@ -833,12 +833,14 @@ pub fn complete_media_set_snapshots(_arg: &str, param: &HashMap) }; let mut res = Vec::new(); - let media_ids = inventory.list_used_media().into_iter().filter(|media| { - match &media.media_set_label { - Some(label) => label.uuid == media_set_uuid, - None => false, - } - }); + let media_ids = + inventory + .list_used_media() + .into_iter() + .filter(|media| match &media.media_set_label { + Some(label) => label.uuid == media_set_uuid, + None => false, + }); for media_id in media_ids { let catalog = match MediaCatalog::open(status_path, &media_id, false, false) { diff --git a/src/tape/media_catalog.rs b/src/tape/media_catalog.rs index 31672f02..0e2d9d6f 100644 --- a/src/tape/media_catalog.rs +++ b/src/tape/media_catalog.rs @@ -1,38 +1,28 @@ +use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::fs::File; -use std::io::{Write, Read, BufReader, Seek, SeekFrom}; +use std::io::{BufReader, Read, Seek, SeekFrom, Write}; use std::os::unix::io::AsRawFd; -use std::path::{PathBuf, Path}; -use std::collections::{HashSet, HashMap}; +use std::path::{Path, PathBuf}; use anyhow::{bail, format_err, Error}; use endian_trait::Endian; -use proxmox_sys::fs::read_subdir; use pbs_datastore::backup_info::BackupDir; +use proxmox_sys::fs::read_subdir; -use proxmox_sys::fs::{ - fchown, - create_path, - CreateOptions, -}; -use proxmox_io::{WriteExt, ReadExt}; +use proxmox_io::{ReadExt, WriteExt}; +use proxmox_sys::fs::{create_path, fchown, CreateOptions}; use proxmox_uuid::Uuid; -use crate::{ - tape::{ - MediaId, - file_formats::MediaSetLabel, - }, -}; +use crate::tape::{file_formats::MediaSetLabel, MediaId}; pub struct DatastoreContent { pub snapshot_index: HashMap, // snapshot => file_nr - pub chunk_index: HashMap<[u8;32], u64>, // chunk => file_nr + pub chunk_index: HashMap<[u8; 32], u64>, // chunk => file_nr } impl DatastoreContent { - pub fn new() -> Self { Self { chunk_index: HashMap::new(), @@ -47,8 +37,7 @@ impl DatastoreContent { /// including the file position. /// /// We use a simple binary format to store data on disk. -pub struct MediaCatalog { - +pub struct MediaCatalog { uuid: Uuid, // BackupMedia uuid file: Option, @@ -65,14 +54,14 @@ pub struct MediaCatalog { } impl MediaCatalog { - /// Magic number for media catalog files. // openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.0")[0..8] // Note: this version did not store datastore names (not supported anymore) pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0: [u8; 8] = [221, 29, 164, 1, 59, 69, 19, 40]; // openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.1")[0..8] - pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1: [u8; 8] = [76, 142, 232, 193, 32, 168, 137, 113]; + pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1: [u8; 8] = + [76, 142, 232, 193, 32, 168, 137, 113]; /// List media with catalogs pub fn media_with_catalogs(base_path: &Path) -> Result, Error> { @@ -81,8 +70,10 @@ impl MediaCatalog { for entry in read_subdir(libc::AT_FDCWD, base_path)? { let entry = entry?; let name = unsafe { entry.file_name_utf8_unchecked() }; - if !name.ends_with(".log") { continue; } - if let Ok(uuid) = Uuid::parse_str(&name[..(name.len()-4)]) { + if !name.ends_with(".log") { + continue; + } + if let Ok(uuid) = Uuid::parse_str(&name[..(name.len() - 4)]) { catalogs.insert(uuid); } } @@ -111,7 +102,6 @@ impl MediaCatalog { /// Destroy the media catalog (remove all files) pub fn destroy(base_path: &Path, uuid: &Uuid) -> Result<(), Error> { - let path = Self::catalog_path(base_path, uuid); match std::fs::remove_file(path) { @@ -122,11 +112,7 @@ impl MediaCatalog { } /// Destroy the media catalog if media_set uuid does not match - pub fn destroy_unrelated_catalog( - base_path: &Path, - media_id: &MediaId, - ) -> Result<(), Error> { - + pub fn destroy_unrelated_catalog(base_path: &Path, media_id: &MediaId) -> Result<(), Error> { let uuid = &media_id.label.uuid; let path = Self::catalog_path(base_path, uuid); @@ -144,8 +130,8 @@ impl MediaCatalog { let expected_media_set_id = match media_id.media_set_label { None => { std::fs::remove_file(path)?; - return Ok(()) - }, + return Ok(()); + } Some(ref set) => &set.uuid, }; @@ -197,13 +183,11 @@ impl MediaCatalog { write: bool, create: bool, ) -> Result { - let uuid = &media_id.label.uuid; let path = Self::catalog_path(base_path, uuid); let me = proxmox_lang::try_block!({ - Self::create_basedir(base_path)?; let mut file = std::fs::OpenOptions::new() @@ -213,8 +197,12 @@ impl MediaCatalog { .open(&path)?; let backup_user = pbs_config::backup_user()?; - fchown(file.as_raw_fd(), Some(backup_user.uid), Some(backup_user.gid)) - .map_err(|err| format_err!("fchown failed - {}", err))?; + fchown( + file.as_raw_fd(), + Some(backup_user.uid), + Some(backup_user.gid), + ) + .map_err(|err| format_err!("fchown failed - {}", err))?; let mut me = Self { uuid: uuid.clone(), @@ -234,26 +222,22 @@ impl MediaCatalog { let (found_magic_number, _) = result?; if !found_magic_number { - me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1); + me.pending + .extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1); } if write { me.file = Some(file); } Ok(me) - }).map_err(|err: Error| { - format_err!("unable to open media catalog {:?} - {}", path, err) - })?; + }) + .map_err(|err: Error| format_err!("unable to open media catalog {:?} - {}", path, err))?; Ok(me) } /// Creates a temporary empty catalog file - pub fn create_temporary_database_file( - base_path: &Path, - uuid: &Uuid, - ) -> Result { - + pub fn create_temporary_database_file(base_path: &Path, uuid: &Uuid) -> Result { Self::create_basedir(base_path)?; let tmp_path = Self::tmp_catalog_path(base_path, uuid); @@ -271,8 +255,12 @@ impl MediaCatalog { } let backup_user = pbs_config::backup_user()?; - fchown(file.as_raw_fd(), Some(backup_user.uid), Some(backup_user.gid)) - .map_err(|err| format_err!("fchown failed - {}", err))?; + fchown( + file.as_raw_fd(), + Some(backup_user.uid), + Some(backup_user.gid), + ) + .map_err(|err| format_err!("fchown failed - {}", err))?; Ok(file) } @@ -285,13 +273,11 @@ impl MediaCatalog { media_id: &MediaId, log_to_stdout: bool, ) -> Result { - let uuid = &media_id.label.uuid; let tmp_path = Self::tmp_catalog_path(base_path, uuid); let me = proxmox_lang::try_block!({ - let file = Self::create_temporary_database_file(base_path, uuid)?; let mut me = Self { @@ -306,7 +292,8 @@ impl MediaCatalog { me.log_to_stdout = log_to_stdout; - me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1); + me.pending + .extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1); me.register_label(&media_id.label.uuid, 0, 0)?; @@ -317,8 +304,13 @@ impl MediaCatalog { me.commit()?; Ok(me) - }).map_err(|err: Error| { - format_err!("unable to create temporary media catalog {:?} - {}", tmp_path, err) + }) + .map_err(|err: Error| { + format_err!( + "unable to create temporary media catalog {:?} - {}", + tmp_path, + err + ) })?; Ok(me) @@ -333,7 +325,6 @@ impl MediaCatalog { uuid: &Uuid, commit: bool, ) -> Result<(), Error> { - let tmp_path = Self::tmp_catalog_path(base_path, uuid); if commit { @@ -365,7 +356,6 @@ impl MediaCatalog { /// /// Fixme: this should be atomic ... pub fn commit(&mut self) -> Result<(), Error> { - if self.pending.is_empty() { return Ok(()); } @@ -398,7 +388,7 @@ impl MediaCatalog { if self.current_archive.is_some() { bail!("can't commit catalog in the middle of an chunk archive"); } - if self.pending.len() > 1024*1024 { + if self.pending.len() > 1024 * 1024 { self.commit()?; } Ok(()) @@ -409,8 +399,7 @@ impl MediaCatalog { base_path: &Path, media_id: &MediaId, log_to_stdout: bool, - ) -> Result { - + ) -> Result { let uuid = &media_id.label.uuid; let me = Self::create_temporary_database(base_path, media_id, log_to_stdout)?; @@ -437,7 +426,7 @@ impl MediaCatalog { } /// Test if the catalog already contain a chunk - pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool { + pub fn contains_chunk(&self, store: &str, digest: &[u8; 32]) -> bool { match self.content.get(store) { None => false, Some(content) => content.chunk_index.contains_key(digest), @@ -445,7 +434,7 @@ impl MediaCatalog { } /// Returns the chunk archive file number - pub fn lookup_chunk(&self, store: &str, digest: &[u8;32]) -> Option { + pub fn lookup_chunk(&self, store: &str, digest: &[u8; 32]) -> Option { match self.content.get(store) { None => None, Some(content) => content.chunk_index.get(digest).copied(), @@ -453,9 +442,11 @@ impl MediaCatalog { } fn check_register_label(&self, file_number: u64, uuid: &Uuid) -> Result<(), Error> { - if file_number >= 2 { - bail!("register label failed: got wrong file number ({} >= 2)", file_number); + bail!( + "register label failed: got wrong file number ({} >= 2)", + file_number + ); } if file_number == 0 && uuid != &self.uuid { @@ -472,8 +463,11 @@ impl MediaCatalog { }; if file_number != expected_file_number { - bail!("register label failed: got unexpected file number ({} < {})", - file_number, expected_file_number); + bail!( + "register label failed: got unexpected file number ({} < {})", + file_number, + expected_file_number + ); } Ok(()) } @@ -485,7 +479,6 @@ impl MediaCatalog { seq_nr: u64, // onyl used for media set labels file_number: u64, ) -> Result<(), Error> { - self.check_register_label(file_number, uuid)?; if file_number == 0 && seq_nr != 0 { @@ -504,7 +497,9 @@ impl MediaCatalog { self.pending.push(b'L'); - unsafe { self.pending.write_le_value(entry)?; } + unsafe { + self.pending.write_le_value(entry)?; + } self.last_entry = Some((uuid.clone(), file_number)); @@ -530,11 +525,7 @@ impl MediaCatalog { /// Register a chunk /// /// Only valid after start_chunk_archive. - fn register_chunk( - &mut self, - digest: &[u8;32], - ) -> Result<(), Error> { - + fn register_chunk(&mut self, digest: &[u8; 32]) -> Result<(), Error> { let (file_number, store) = match self.current_archive { None => bail!("register_chunk failed: no archive started"), Some((_, file_number, ref store)) => (file_number, store), @@ -558,13 +549,15 @@ impl MediaCatalog { } fn check_start_chunk_archive(&self, file_number: u64) -> Result<(), Error> { - if self.current_archive.is_some() { bail!("start_chunk_archive failed: already started"); } if file_number < 2 { - bail!("start_chunk_archive failed: got wrong file number ({} < 2)", file_number); + bail!( + "start_chunk_archive failed: got wrong file number ({} < 2)", + file_number + ); } let expect_min_file_number = match self.last_entry { @@ -573,8 +566,11 @@ impl MediaCatalog { }; if file_number < expect_min_file_number { - bail!("start_chunk_archive: got unexpected file number ({} < {})", - file_number, expect_min_file_number); + bail!( + "start_chunk_archive: got unexpected file number ({} < {})", + file_number, + expect_min_file_number + ); } Ok(()) @@ -586,8 +582,7 @@ impl MediaCatalog { uuid: Uuid, // Uuid form MediaContentHeader file_number: u64, store: &str, - ) -> Result<(), Error> { - + ) -> Result<(), Error> { self.check_start_chunk_archive(file_number)?; let entry = ChunkArchiveStart { @@ -602,10 +597,14 @@ impl MediaCatalog { self.pending.push(b'A'); - unsafe { self.pending.write_le_value(entry)?; } + unsafe { + self.pending.write_le_value(entry)?; + } self.pending.extend(store.as_bytes()); - self.content.entry(store.to_string()).or_insert(DatastoreContent::new()); + self.content + .entry(store.to_string()) + .or_insert(DatastoreContent::new()); self.current_archive = Some((uuid, file_number, store.to_string())); @@ -613,7 +612,6 @@ impl MediaCatalog { } fn check_end_chunk_archive(&self, uuid: &Uuid, file_number: u64) -> Result<(), Error> { - match self.current_archive { None => bail!("end_chunk archive failed: not started"), Some((ref expected_uuid, expected_file_number, ..)) => { @@ -621,8 +619,11 @@ impl MediaCatalog { bail!("end_chunk_archive failed: got unexpected uuid"); } if file_number != expected_file_number { - bail!("end_chunk_archive failed: got unexpected file number ({} != {})", - file_number, expected_file_number); + bail!( + "end_chunk_archive failed: got unexpected file number ({} != {})", + file_number, + expected_file_number + ); } } } @@ -631,11 +632,9 @@ impl MediaCatalog { /// End a chunk archive section fn end_chunk_archive(&mut self) -> Result<(), Error> { - match self.current_archive.take() { None => bail!("end_chunk_archive failed: not started"), Some((uuid, file_number, ..)) => { - let entry = ChunkArchiveEnd { file_number, uuid: *uuid.as_bytes(), @@ -647,7 +646,9 @@ impl MediaCatalog { self.pending.push(b'E'); - unsafe { self.pending.write_le_value(entry)?; } + unsafe { + self.pending.write_le_value(entry)?; + } self.last_entry = Some((uuid, file_number)); } @@ -657,13 +658,15 @@ impl MediaCatalog { } fn check_register_snapshot(&self, file_number: u64, snapshot: &str) -> Result<(), Error> { - if self.current_archive.is_some() { bail!("register_snapshot failed: inside chunk_archive"); } if file_number < 2 { - bail!("register_snapshot failed: got wrong file number ({} < 2)", file_number); + bail!( + "register_snapshot failed: got wrong file number ({} < 2)", + file_number + ); } let expect_min_file_number = match self.last_entry { @@ -672,12 +675,19 @@ impl MediaCatalog { }; if file_number < expect_min_file_number { - bail!("register_snapshot failed: got unexpected file number ({} < {})", - file_number, expect_min_file_number); + bail!( + "register_snapshot failed: got unexpected file number ({} < {})", + file_number, + expect_min_file_number + ); } if let Err(err) = snapshot.parse::() { - bail!("register_snapshot failed: unable to parse snapshot '{}' - {}", snapshot, err); + bail!( + "register_snapshot failed: unable to parse snapshot '{}' - {}", + snapshot, + err + ); } Ok(()) @@ -691,7 +701,6 @@ impl MediaCatalog { store: &str, snapshot: &str, ) -> Result<(), Error> { - self.check_register_snapshot(file_number, snapshot)?; let entry = SnapshotEntry { @@ -702,20 +711,32 @@ impl MediaCatalog { }; if self.log_to_stdout { - println!("S|{}|{}|{}:{}", file_number, uuid.to_string(), store, snapshot); + println!( + "S|{}|{}|{}:{}", + file_number, + uuid.to_string(), + store, + snapshot + ); } self.pending.push(b'S'); - unsafe { self.pending.write_le_value(entry)?; } + unsafe { + self.pending.write_le_value(entry)?; + } self.pending.extend(store.as_bytes()); self.pending.push(b':'); self.pending.extend(snapshot.as_bytes()); - let content = self.content.entry(store.to_string()) + let content = self + .content + .entry(store.to_string()) .or_insert(DatastoreContent::new()); - content.snapshot_index.insert(snapshot.to_string(), file_number); + content + .snapshot_index + .insert(snapshot.to_string(), file_number); self.last_entry = Some((uuid, file_number)); @@ -726,7 +747,6 @@ impl MediaCatalog { pub fn parse_catalog_header( reader: &mut R, ) -> Result<(bool, Option, Option), Error> { - // read/check magic number let mut magic = [0u8; 8]; if !reader.read_exact_or_eof(&mut magic)? { @@ -774,7 +794,6 @@ impl MediaCatalog { file: &mut File, media_set_label: Option<&MediaSetLabel>, ) -> Result<(bool, Option), Error> { - let mut file = BufReader::new(file); let mut found_magic_number = false; let mut media_set_uuid = None; @@ -782,10 +801,14 @@ impl MediaCatalog { loop { let pos = file.seek(SeekFrom::Current(0))?; // get current pos - if pos == 0 { // read/check magic number + if pos == 0 { + // read/check magic number let mut magic = [0u8; 8]; match file.read_exact_or_eof(&mut magic) { - Ok(false) => { /* EOF */ break; } + Ok(false) => { + /* EOF */ + break; + } Ok(true) => { /* OK */ } Err(err) => bail!("read failed - {}", err), } @@ -802,7 +825,10 @@ impl MediaCatalog { let mut entry_type = [0u8; 1]; match file.read_exact_or_eof(&mut entry_type) { - Ok(false) => { /* EOF */ break; } + Ok(false) => { + /* EOF */ + break; + } Ok(true) => { /* OK */ } Err(err) => bail!("read failed - {}", err), } @@ -833,11 +859,12 @@ impl MediaCatalog { self.check_start_chunk_archive(file_number)?; - self.content.entry(store.to_string()) + self.content + .entry(store.to_string()) .or_insert(DatastoreContent::new()); self.current_archive = Some((uuid, file_number, store.to_string())); - } + } b'E' => { let entry: ChunkArchiveEnd = unsafe { file.read_le_value()? }; let file_number = entry.file_number; @@ -867,10 +894,14 @@ impl MediaCatalog { self.check_register_snapshot(file_number, snapshot)?; - let content = self.content.entry(store.to_string()) + let content = self + .content + .entry(store.to_string()) .or_insert(DatastoreContent::new()); - content.snapshot_index.insert(snapshot.to_string(), file_number); + content + .snapshot_index + .insert(snapshot.to_string(), file_number); self.last_entry = Some((uuid, file_number)); } @@ -899,7 +930,6 @@ impl MediaCatalog { bail!("unknown entry type '{}'", entry_type[0]); } } - } Ok((found_magic_number, media_set_uuid)) @@ -909,12 +939,11 @@ impl MediaCatalog { /// Media set catalog /// /// Catalog for multiple media. -pub struct MediaSetCatalog { +pub struct MediaSetCatalog { catalog_list: HashMap, } impl MediaSetCatalog { - /// Creates a new instance pub fn new() -> Self { Self { @@ -924,7 +953,6 @@ impl MediaSetCatalog { /// Add a catalog pub fn append_catalog(&mut self, catalog: MediaCatalog) -> Result<(), Error> { - if self.catalog_list.get(&catalog.uuid).is_some() { bail!("MediaSetCatalog already contains media '{}'", catalog.uuid); } @@ -960,7 +988,7 @@ impl MediaSetCatalog { } /// Test if the catalog already contain a chunk - pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool { + pub fn contains_chunk(&self, store: &str, digest: &[u8; 32]) -> bool { for catalog in self.catalog_list.values() { if catalog.contains_chunk(store, digest) { return true; @@ -970,7 +998,7 @@ impl MediaSetCatalog { } /// Returns the media uuid and chunk archive file number - pub fn lookup_chunk(&self, store: &str, digest: &[u8;32]) -> Option<(&Uuid, u64)> { + pub fn lookup_chunk(&self, store: &str, digest: &[u8; 32]) -> Option<(&Uuid, u64)> { for (uuid, catalog) in self.catalog_list.iter() { if let Some(nr) = catalog.lookup_chunk(store, digest) { return Some((uuid, nr)); @@ -986,7 +1014,7 @@ impl MediaSetCatalog { #[repr(C)] struct LabelEntry { file_number: u64, - uuid: [u8;16], + uuid: [u8; 16], seq_nr: u64, // only used for media set labels } @@ -994,23 +1022,23 @@ struct LabelEntry { #[repr(C)] struct ChunkArchiveStart { file_number: u64, - uuid: [u8;16], + uuid: [u8; 16], store_name_len: u8, /* datastore name follows */ } #[derive(Endian)] #[repr(C)] -struct ChunkArchiveEnd{ +struct ChunkArchiveEnd { file_number: u64, - uuid: [u8;16], + uuid: [u8; 16], } #[derive(Endian)] #[repr(C)] -struct SnapshotEntry{ +struct SnapshotEntry { file_number: u64, - uuid: [u8;16], + uuid: [u8; 16], store_name_len: u8, name_len: u16, /* datastore name, ':', snapshot name follows */ diff --git a/src/tape/media_catalog_cache.rs b/src/tape/media_catalog_cache.rs index 00daa5b9..5d20101c 100644 --- a/src/tape/media_catalog_cache.rs +++ b/src/tape/media_catalog_cache.rs @@ -1,7 +1,7 @@ -use std::path::Path; use std::io::{BufRead, BufReader}; +use std::path::Path; -use anyhow::{format_err, bail, Error}; +use anyhow::{bail, format_err, Error}; use proxmox_sys::fs::CreateOptions; @@ -15,7 +15,6 @@ pub fn media_catalog_snapshot_list( base_path: &Path, media_id: &MediaId, ) -> Result, Error> { - let uuid = &media_id.label.uuid; let mut cache_path = base_path.to_owned(); @@ -29,7 +28,10 @@ pub fn media_catalog_snapshot_list( Err(err) => bail!("unable to stat media catalog {:?} - {}", catalog_path, err), }; - let cache_id = format!("{:016X}-{:016X}-{:016X}", stat.st_ino, stat.st_size as u64, stat.st_mtime as u64); + let cache_id = format!( + "{:016X}-{:016X}-{:016X}", + stat.st_ino, stat.st_size as u64, stat.st_mtime as u64 + ); match std::fs::OpenOptions::new().read(true).open(&cache_path) { Ok(file) => { @@ -38,7 +40,8 @@ pub fn media_catalog_snapshot_list( let mut lines = file.lines(); match lines.next() { Some(Ok(id)) => { - if id != cache_id { // cache is outdated - rewrite + if id != cache_id { + // cache is outdated - rewrite return write_snapshot_cache(base_path, media_id, &cache_path, &cache_id); } } @@ -71,8 +74,7 @@ fn write_snapshot_cache( media_id: &MediaId, cache_path: &Path, cache_id: &str, -) -> Result, Error> { - +) -> Result, Error> { // open normal catalog and write cache let catalog = MediaCatalog::open(base_path, media_id, false, false)?; @@ -98,12 +100,7 @@ fn write_snapshot_cache( .owner(backup_user.uid) .group(backup_user.gid); - proxmox_sys::fs::replace_file( - cache_path, - data.as_bytes(), - options, - false, - )?; + proxmox_sys::fs::replace_file(cache_path, data.as_bytes(), options, false)?; Ok(list) } diff --git a/src/tape/media_pool.rs b/src/tape/media_pool.rs index 6c2a21b7..c688693f 100644 --- a/src/tape/media_pool.rs +++ b/src/tape/media_pool.rs @@ -7,7 +7,7 @@ //! //! -use std::path::{PathBuf, Path}; +use std::path::{Path, PathBuf}; use anyhow::{bail, Error}; use serde::{Deserialize, Serialize}; @@ -15,28 +15,18 @@ use serde::{Deserialize, Serialize}; use proxmox_uuid::Uuid; use pbs_api_types::{ - Fingerprint, MediaStatus, MediaLocation, MediaSetPolicy, RetentionPolicy, - MediaPoolConfig, + Fingerprint, MediaLocation, MediaPoolConfig, MediaSetPolicy, MediaStatus, RetentionPolicy, }; use pbs_config::BackupLockGuard; use crate::tape::{ - MediaId, + file_formats::{MediaLabel, MediaSetLabel}, + lock_media_pool, lock_media_set, lock_unassigned_media_pool, Inventory, MediaCatalog, MediaId, MediaSet, - Inventory, - MediaCatalog, - lock_media_set, - lock_media_pool, - lock_unassigned_media_pool, - file_formats::{ - MediaLabel, - MediaSetLabel, - }, }; /// Media Pool pub struct MediaPool { - name: String, state_path: PathBuf, @@ -59,7 +49,6 @@ pub struct MediaPool { } impl MediaPool { - /// Creates a new instance /// /// If you specify a `changer_name`, only media accessible via @@ -75,8 +64,7 @@ impl MediaPool { changer_name: Option, encrypt_fingerprint: Option, no_media_set_locking: bool, // for list_media() - ) -> Result { - + ) -> Result { let _pool_lock = if no_media_set_locking { None } else { @@ -130,10 +118,17 @@ impl MediaPool { changer_name: Option, no_media_set_locking: bool, // for list_media() ) -> Result { + let allocation = config + .allocation + .clone() + .unwrap_or_else(|| String::from("continue")) + .parse()?; - let allocation = config.allocation.clone().unwrap_or_else(|| String::from("continue")).parse()?; - - let retention = config.retention.clone().unwrap_or_else(|| String::from("keep")).parse()?; + let retention = config + .retention + .clone() + .unwrap_or_else(|| String::from("keep")) + .parse()?; let encrypt_fingerprint = match config.encrypt { Some(ref fingerprint) => Some(fingerprint.parse()?), @@ -166,7 +161,6 @@ impl MediaPool { } fn compute_media_state(&self, media_id: &MediaId) -> (MediaStatus, MediaLocation) { - let (status, location) = self.inventory.status_and_location(&media_id.label.uuid); match status { @@ -183,10 +177,12 @@ impl MediaPool { Some(ref set) => set, }; - if set.pool != self.name { // should never trigger + if set.pool != self.name { + // should never trigger return (MediaStatus::Unknown, location); // belong to another pool } - if set.uuid.as_ref() == [0u8;16] { // not assigned to any pool + if set.uuid.as_ref() == [0u8; 16] { + // not assigned to any pool return (MediaStatus::Writable, location); } @@ -211,31 +207,28 @@ impl MediaPool { if let Some(ref set) = media_id.media_set_label { if set.pool != self.name { - bail!("media does not belong to pool ({} != {})", set.pool, self.name); + bail!( + "media does not belong to pool ({} != {})", + set.pool, + self.name + ); } } let (status, location) = self.compute_media_state(&media_id); - Ok(BackupMedia::with_media_id( - media_id, - location, - status, - )) + Ok(BackupMedia::with_media_id(media_id, location, status)) } /// List all media associated with this pool pub fn list_media(&self) -> Vec { let media_id_list = self.inventory.list_pool_media(&self.name); - media_id_list.into_iter() + media_id_list + .into_iter() .map(|media_id| { let (status, location) = self.compute_media_state(&media_id); - BackupMedia::with_media_id( - media_id, - location, - status, - ) + BackupMedia::with_media_id(media_id, location, status) }) .collect() } @@ -263,7 +256,6 @@ impl MediaPool { current_time: i64, force: bool, ) -> Result, Error> { - let _pool_lock = if self.no_media_set_locking { None } else { @@ -276,9 +268,7 @@ impl MediaPool { Some(String::from("forced")) } else { match self.current_set_usable() { - Err(err) => { - Some(err.to_string()) - } + Err(err) => Some(err.to_string()), Ok(_) => None, } }; @@ -289,10 +279,16 @@ impl MediaPool { create_new_set = Some(String::from("policy is AlwaysCreate")); } MediaSetPolicy::CreateAt(event) => { - if let Some(set_start_time) = self.inventory.media_set_start_time(self.current_media_set.uuid()) { - if let Ok(Some(alloc_time)) = event.compute_next_event(set_start_time as i64) { + if let Some(set_start_time) = self + .inventory + .media_set_start_time(self.current_media_set.uuid()) + { + if let Ok(Some(alloc_time)) = + event.compute_next_event(set_start_time as i64) + { if current_time >= alloc_time { - create_new_set = Some(String::from("policy CreateAt event triggered")); + create_new_set = + Some(String::from("policy CreateAt event triggered")); } } } @@ -335,8 +331,9 @@ impl MediaPool { return false; } - let expire_time = self.inventory.media_expire_time( - media.id(), &self.media_set_policy, &self.retention); + let expire_time = + self.inventory + .media_expire_time(media.id(), &self.media_set_policy, &self.retention); current_time >= expire_time } @@ -368,8 +365,11 @@ impl MediaPool { } } - fn add_media_to_current_set(&mut self, mut media_id: MediaId, current_time: i64) -> Result<(), Error> { - + fn add_media_to_current_set( + &mut self, + mut media_id: MediaId, + current_time: i64, + ) -> Result<(), Error> { if self.current_media_set_lock.is_none() { bail!("add_media_to_current_set: media set is not locked - internal error"); } @@ -406,16 +406,19 @@ impl MediaPool { let mut free_media = Vec::new(); for media_id in media_list { - let (status, location) = self.compute_media_state(media_id); - if media_id.media_set_label.is_some() { continue; } // should not happen + if media_id.media_set_label.is_some() { + continue; + } // should not happen if !self.location_is_available(&location) { continue; } // only consider writable media - if status != MediaStatus::Writable { continue; } + if status != MediaStatus::Writable { + continue; + } free_media.push(media_id); } @@ -462,7 +465,11 @@ impl MediaPool { } // Get next expired media - pub fn next_expired_media(&self, current_time: i64, media_list: &[BackupMedia]) -> Option { + pub fn next_expired_media( + &self, + current_time: i64, + media_list: &[BackupMedia], + ) -> Option { let mut expired_media = Vec::new(); for media in media_list.into_iter() { @@ -487,7 +494,11 @@ impl MediaPool { // sort expired_media, newest first -> oldest last expired_media.sort_unstable_by(|a, b| { - let mut res = b.media_set_label().unwrap().ctime.cmp(&a.media_set_label().unwrap().ctime); + let mut res = b + .media_set_label() + .unwrap() + .ctime + .cmp(&a.media_set_label().unwrap().ctime); if res == std::cmp::Ordering::Equal { res = b.label().label_text.cmp(&a.label().label_text); } @@ -541,13 +552,15 @@ impl MediaPool { return Ok(media_id); } - bail!("guess_next_writable_media in pool '{}' failed: no usable media found", self.name()); + bail!( + "guess_next_writable_media in pool '{}' failed: no usable media found", + self.name() + ); } /// Allocates a writable media to the current media set // Note: Please keep in sync with guess_next_writable_media() pub fn alloc_writable_media(&mut self, current_time: i64) -> Result { - if self.current_media_set_lock.is_none() { bail!("alloc_writable_media: media set is not locked - internal error"); } @@ -560,7 +573,8 @@ impl MediaPool { return Ok(media.uuid().clone()); } - { // limit pool lock scope + { + // limit pool lock scope let _pool_lock = lock_media_pool(&self.state_path, &self.name)?; self.inventory.reload()?; @@ -604,7 +618,10 @@ impl MediaPool { return Ok(uuid); } - bail!("alloc writable media in pool '{}' failed: no usable media found", self.name()); + bail!( + "alloc writable media in pool '{}' failed: no usable media found", + self.name() + ); } /// check if the current media set is usable for writing @@ -615,7 +632,6 @@ impl MediaPool { /// This return error when the media set must not be used any /// longer because of consistency errors. pub fn current_set_usable(&self) -> Result { - let media_list = self.current_media_set.media_list(); let media_count = media_list.len(); @@ -623,7 +639,7 @@ impl MediaPool { return Ok(false); } - let set_uuid = self.current_media_set.uuid(); + let set_uuid = self.current_media_set.uuid(); let mut last_is_writable = false; let mut last_enc: Option> = None; @@ -635,15 +651,20 @@ impl MediaPool { }; let media = self.lookup_media(uuid)?; match media.media_set_label() { - Some(MediaSetLabel { seq_nr, uuid, ..}) if *seq_nr == seq as u64 && uuid == set_uuid => { /* OK */ }, - Some(MediaSetLabel { seq_nr, uuid, ..}) if uuid == set_uuid => { + Some(MediaSetLabel { seq_nr, uuid, .. }) + if *seq_nr == seq as u64 && uuid == set_uuid => + { /* OK */ } + Some(MediaSetLabel { seq_nr, uuid, .. }) if uuid == set_uuid => { bail!("media sequence error ({} != {})", *seq_nr, seq); - }, - Some(MediaSetLabel { uuid, ..}) => bail!("media owner error ({} != {}", uuid, set_uuid), + } + Some(MediaSetLabel { uuid, .. }) => { + bail!("media owner error ({} != {}", uuid, set_uuid) + } None => bail!("media owner error (no owner)"), } - if let Some(set) = media.media_set_label() { // always true here + if let Some(set) = media.media_set_label() { + // always true here if set.encryption_key_fingerprint != self.encrypt_fingerprint { bail!("pool encryption key changed"); } @@ -660,8 +681,8 @@ impl MediaPool { } match media.status() { - MediaStatus::Full => { /* OK */ }, - MediaStatus::Writable if (seq + 1) == media_count => { + MediaStatus::Full => { /* OK */ } + MediaStatus::Writable if (seq + 1) == media_count => { let media_location = media.location(); if self.location_is_available(media_location) { last_is_writable = true; @@ -670,8 +691,11 @@ impl MediaPool { bail!("writable media offsite in vault '{}'", vault); } } - }, - _ => bail!("unable to use media set - wrong media status {:?}", media.status()), + } + _ => bail!( + "unable to use media set - wrong media status {:?}", + media.status() + ), } } @@ -684,16 +708,16 @@ impl MediaPool { media_set_uuid: &Uuid, template: Option, ) -> Result { - self.inventory.generate_media_set_name(media_set_uuid, template) + self.inventory + .generate_media_set_name(media_set_uuid, template) } - } /// Backup media /// /// Combines 'MediaId' with 'MediaLocation' and 'MediaStatus' /// information. -#[derive(Debug,Serialize,Deserialize,Clone)] +#[derive(Debug, Serialize, Deserialize, Clone)] pub struct BackupMedia { /// Media ID id: MediaId, @@ -704,14 +728,13 @@ pub struct BackupMedia { } impl BackupMedia { - /// Creates a new instance - pub fn with_media_id( - id: MediaId, - location: MediaLocation, - status: MediaStatus, - ) -> Self { - Self { id, location, status } + pub fn with_media_id(id: MediaId, location: MediaLocation, status: MediaStatus) -> Self { + Self { + id, + location, + status, + } } /// Returns the media location diff --git a/src/tape/media_set.rs b/src/tape/media_set.rs index 2c766339..ae349d54 100644 --- a/src/tape/media_set.rs +++ b/src/tape/media_set.rs @@ -1,5 +1,5 @@ use anyhow::{bail, Error}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use proxmox_uuid::Uuid; @@ -13,7 +13,6 @@ pub struct MediaSet { } impl MediaSet { - pub const MEDIA_SET_MAX_SEQ_NR: u64 = 100; pub fn new() -> Self { @@ -42,14 +41,21 @@ impl MediaSet { pub fn insert_media(&mut self, uuid: Uuid, seq_nr: u64) -> Result<(), Error> { if seq_nr > Self::MEDIA_SET_MAX_SEQ_NR { - bail!("media set sequence number to large in media set {} ({} > {})", - self.uuid.to_string(), seq_nr, Self::MEDIA_SET_MAX_SEQ_NR); + bail!( + "media set sequence number to large in media set {} ({} > {})", + self.uuid.to_string(), + seq_nr, + Self::MEDIA_SET_MAX_SEQ_NR + ); } let seq_nr = seq_nr as usize; if self.media_list.len() > seq_nr { if self.media_list[seq_nr].is_some() { - bail!("found duplicate sequence number in media set '{}/{}'", - self.uuid.to_string(), seq_nr); + bail!( + "found duplicate sequence number in media set '{}/{}'", + self.uuid.to_string(), + seq_nr + ); } } else { self.media_list.resize(seq_nr + 1, None); diff --git a/src/tape/mod.rs b/src/tape/mod.rs index 291d0b42..0ea0fdc1 100644 --- a/src/tape/mod.rs +++ b/src/tape/mod.rs @@ -2,10 +2,7 @@ use anyhow::{format_err, Error}; -use proxmox_sys::fs::{ - create_path, - CreateOptions, -}; +use proxmox_sys::fs::{create_path, CreateOptions}; use pbs_buildcfg::{PROXMOX_BACKUP_RUN_DIR_M, PROXMOX_BACKUP_STATE_DIR_M}; @@ -51,11 +48,10 @@ pub const CHANGER_STATE_DIR: &str = concat!(PROXMOX_BACKUP_RUN_DIR_M!(), "/chang /// We limit chunk archive size, so that we can faster restore a /// specific chunk (The catalog only store file numbers, so we /// need to read the whole archive to restore a single chunk) -pub const MAX_CHUNK_ARCHIVE_SIZE: usize = 4*1024*1024*1024; // 4GB for now +pub const MAX_CHUNK_ARCHIVE_SIZE: usize = 4 * 1024 * 1024 * 1024; // 4GB for now /// To improve performance, we need to avoid tape drive buffer flush. -pub const COMMIT_BLOCK_SIZE: usize = 128*1024*1024*1024; // 128 GiB - +pub const COMMIT_BLOCK_SIZE: usize = 128 * 1024 * 1024 * 1024; // 128 GiB /// Create tape status dir with correct permission pub fn create_tape_status_dir() -> Result<(), Error> { diff --git a/src/tape/pool_writer/catalog_set.rs b/src/tape/pool_writer/catalog_set.rs index ca488835..76dc5f7b 100644 --- a/src/tape/pool_writer/catalog_set.rs +++ b/src/tape/pool_writer/catalog_set.rs @@ -2,12 +2,7 @@ use anyhow::{bail, Error}; use proxmox_uuid::Uuid; -use crate::{ - tape::{ - MediaCatalog, - MediaSetCatalog, - }, -}; +use crate::tape::{MediaCatalog, MediaSetCatalog}; /// Helper to build and query sets of catalogs /// @@ -20,7 +15,6 @@ pub struct CatalogSet { } impl CatalogSet { - /// Create empty instance pub fn new() -> Self { Self { @@ -45,7 +39,7 @@ impl CatalogSet { } /// Test if the catalog already contains a chunk - pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool { + pub fn contains_chunk(&self, store: &str, digest: &[u8; 32]) -> bool { if let Some(ref catalog) = self.catalog { if catalog.contains_chunk(store, digest) { return true; @@ -56,7 +50,6 @@ impl CatalogSet { /// Add a new catalog, move the old on to the read-only set pub fn append_catalog(&mut self, new_catalog: MediaCatalog) -> Result<(), Error> { - // append current catalog to read-only set if let Some(catalog) = self.catalog.take() { self.media_set_catalog.append_catalog(catalog)?; @@ -77,7 +70,7 @@ impl CatalogSet { file_number: u64, store: &str, snapshot: &str, - ) -> Result<(), Error> { + ) -> Result<(), Error> { match self.catalog { Some(ref mut catalog) => { catalog.register_snapshot(uuid, file_number, store, snapshot)?; diff --git a/src/tape/pool_writer/mod.rs b/src/tape/pool_writer/mod.rs index 29860b53..3ec8a9ba 100644 --- a/src/tape/pool_writer/mod.rs +++ b/src/tape/pool_writer/mod.rs @@ -4,47 +4,29 @@ pub use catalog_set::*; mod new_chunks_iterator; pub use new_chunks_iterator::*; -use std::path::Path; use std::fs::File; -use std::time::SystemTime; +use std::path::Path; use std::sync::{Arc, Mutex}; +use std::time::SystemTime; use anyhow::{bail, Error}; -use proxmox_uuid::Uuid; use proxmox_sys::{task_log, task_warn}; +use proxmox_uuid::Uuid; use pbs_config::tape_encryption_keys::load_key_configs; -use pbs_tape::{ - TapeWrite, - sg_tape::tape_alert_flags_critical, -}; use pbs_datastore::{DataStore, SnapshotReader}; +use pbs_tape::{sg_tape::tape_alert_flags_critical, TapeWrite}; use proxmox_rest_server::WorkerTask; -use crate::{ - tape::{ - TAPE_STATUS_DIR, - MAX_CHUNK_ARCHIVE_SIZE, - COMMIT_BLOCK_SIZE, - MediaPool, - MediaId, - MediaCatalog, - file_formats::{ - MediaSetLabel, - ChunkArchiveWriter, - tape_write_snapshot_archive, - tape_write_catalog, - }, - drive::{ - TapeDriver, - request_and_load_media, - media_changer, - }, +use crate::tape::{ + drive::{media_changer, request_and_load_media, TapeDriver}, + file_formats::{ + tape_write_catalog, tape_write_snapshot_archive, ChunkArchiveWriter, MediaSetLabel, }, + MediaCatalog, MediaId, MediaPool, COMMIT_BLOCK_SIZE, MAX_CHUNK_ARCHIVE_SIZE, TAPE_STATUS_DIR, }; - struct PoolWriterState { drive: Box, // Media Uuid from loaded media @@ -65,7 +47,6 @@ pub struct PoolWriter { } impl PoolWriter { - pub fn new( mut pool: MediaPool, drive_name: &str, @@ -73,16 +54,11 @@ impl PoolWriter { notify_email: Option, force_media_set: bool, ) -> Result { - let current_time = proxmox_time::epoch_i64(); let new_media_set_reason = pool.start_write_session(current_time, force_media_set)?; if let Some(reason) = new_media_set_reason { - task_log!( - worker, - "starting new media set - reason: {}", - reason, - ); + task_log!(worker, "starting new media set - reason: {}", reason,); } let media_set_uuid = pool.current_media_set().uuid(); @@ -93,12 +69,8 @@ impl PoolWriter { // load all catalogs read-only at start for media_uuid in pool.current_media_list()? { let media_info = pool.lookup_media(media_uuid).unwrap(); - let media_catalog = MediaCatalog::open( - Path::new(TAPE_STATUS_DIR), - media_info.id(), - false, - false, - )?; + let media_catalog = + MediaCatalog::open(Path::new(TAPE_STATUS_DIR), media_info.id(), false, false)?; catalog_set.append_read_only_catalog(media_catalog)?; } @@ -108,7 +80,7 @@ impl PoolWriter { status: None, catalog_set: Arc::new(Mutex::new(catalog_set)), notify_email, - }) + }) } pub fn pool(&mut self) -> &mut MediaPool { @@ -122,7 +94,10 @@ impl PoolWriter { } pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool { - self.catalog_set.lock().unwrap().contains_snapshot(store, snapshot) + self.catalog_set + .lock() + .unwrap() + .contains_snapshot(store, snapshot) } /// Eject media and drop PoolWriterState (close drive) @@ -155,7 +130,6 @@ impl PoolWriter { let (drive_config, _digest) = pbs_config::drive::config()?; if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? { - if let Some(ref mut status) = status { task_log!(worker, "rewind media"); // rewind first so that the unload command later does not run into a timeout @@ -167,14 +141,25 @@ impl PoolWriter { let media = self.pool.lookup_media(media_uuid)?; let label_text = media.label_text(); if let Some(slot) = changer.export_media(label_text)? { - task_log!(worker, "exported media '{}' to import/export slot {}", label_text, slot); + task_log!( + worker, + "exported media '{}' to import/export slot {}", + label_text, + slot + ); } else { - task_warn!(worker, "export failed - media '{}' is not online or in different drive", label_text); + task_warn!( + worker, + "export failed - media '{}' is not online or in different drive", + label_text + ); } } - } else if let Some(mut status) = status { - task_log!(worker, "standalone drive - ejecting media instead of export"); + task_log!( + worker, + "standalone drive - ejecting media instead of export" + ); status.drive.eject_media()?; } @@ -186,7 +171,7 @@ impl PoolWriter { /// This is done automatically during a backupsession, but needs to /// be called explicitly before dropping the PoolWriter pub fn commit(&mut self) -> Result<(), Error> { - if let Some(PoolWriterState {ref mut drive, .. }) = self.status { + if let Some(PoolWriterState { ref mut drive, .. }) = self.status { drive.sync()?; // sync all data to the tape } self.catalog_set.lock().unwrap().commit()?; // then commit the catalog @@ -196,7 +181,7 @@ impl PoolWriter { /// Load a writable media into the drive pub fn load_writable_media(&mut self, worker: &WorkerTask) -> Result { let last_media_uuid = match self.status { - Some(PoolWriterState { ref media_uuid, ..}) => Some(media_uuid.clone()), + Some(PoolWriterState { ref media_uuid, .. }) => Some(media_uuid.clone()), None => None, }; @@ -214,9 +199,13 @@ impl PoolWriter { return Ok(media_uuid); } - task_log!(worker, "allocated new writable media '{}'", media.label_text()); + task_log!( + worker, + "allocated new writable media '{}'", + media.label_text() + ); - if let Some(PoolWriterState {mut drive, .. }) = self.status.take() { + if let Some(PoolWriterState { mut drive, .. }) = self.status.take() { if last_media_uuid.is_some() { task_log!(worker, "eject current media"); drive.eject_media()?; @@ -225,8 +214,13 @@ impl PoolWriter { let (drive_config, _digest) = pbs_config::drive::config()?; - let (mut drive, old_media_id) = - request_and_load_media(worker, &drive_config, &self.drive_name, media.label(), &self.notify_email)?; + let (mut drive, old_media_id) = request_and_load_media( + worker, + &drive_config, + &self.drive_name, + media.label(), + &self.notify_email, + )?; // test for critical tape alert flags if let Ok(alert_flags) = drive.tape_alert_flags() { @@ -234,7 +228,10 @@ impl PoolWriter { task_log!(worker, "TapeAlertFlags: {:?}", alert_flags); if tape_alert_flags_critical(alert_flags) { self.pool.set_media_status_damaged(&media_uuid)?; - bail!("aborting due to critical tape alert flags: {:?}", alert_flags); + bail!( + "aborting due to critical tape alert flags: {:?}", + alert_flags + ); } } } @@ -273,15 +270,12 @@ impl PoolWriter { } fn open_catalog_file(uuid: &Uuid) -> Result { - let status_path = Path::new(TAPE_STATUS_DIR); let mut path = status_path.to_owned(); path.push(uuid.to_string()); path.set_extension("log"); - let file = std::fs::OpenOptions::new() - .read(true) - .open(&path)?; + let file = std::fs::OpenOptions::new().read(true).open(&path)?; Ok(file) } @@ -289,11 +283,7 @@ impl PoolWriter { // Check it tape is loaded, then move to EOM (if not already there) // // Returns the tape position at EOM. - fn prepare_tape_write( - status: &mut PoolWriterState, - worker: &WorkerTask, - ) -> Result { - + fn prepare_tape_write(status: &mut PoolWriterState, worker: &WorkerTask) -> Result { if !status.at_eom { task_log!(worker, "moving to end of media"); status.drive.move_to_eom(true)?; @@ -302,7 +292,10 @@ impl PoolWriter { let current_file_number = status.drive.current_file_number()?; if current_file_number < 2 { - bail!("got strange file position number from drive ({})", current_file_number); + bail!( + "got strange file position number from drive ({})", + current_file_number + ); } Ok(current_file_number) @@ -315,11 +308,7 @@ impl PoolWriter { /// on the media (return value 'Ok(false, _)'). In that case, the /// archive is marked incomplete. The caller should mark the media /// as full and try again using another media. - pub fn append_catalog_archive( - &mut self, - worker: &WorkerTask, - ) -> Result { - + pub fn append_catalog_archive(&mut self, worker: &WorkerTask) -> Result { let status = match self.status { Some(ref mut status) => status, None => bail!("PoolWriter - no media loaded"), @@ -354,30 +343,21 @@ impl PoolWriter { let mut file = Self::open_catalog_file(uuid)?; - let done = tape_write_catalog( - writer.as_mut(), - uuid, - media_set.uuid(), - seq_nr, - &mut file, - )?.is_some(); + let done = tape_write_catalog(writer.as_mut(), uuid, media_set.uuid(), seq_nr, &mut file)? + .is_some(); Ok(done) } // Append catalogs for all previous media in set (without last) - fn append_media_set_catalogs( - &mut self, - worker: &WorkerTask, - ) -> Result<(), Error> { - + fn append_media_set_catalogs(&mut self, worker: &WorkerTask) -> Result<(), Error> { let media_set = self.pool.current_media_set(); let mut media_list = &media_set.media_list()[..]; if media_list.len() < 2 { return Ok(()); } - media_list = &media_list[..(media_list.len()-1)]; + media_list = &media_list[..(media_list.len() - 1)]; let status = match self.status { Some(ref mut status) => status, @@ -387,7 +367,6 @@ impl PoolWriter { Self::prepare_tape_write(status, worker)?; for (seq_nr, uuid) in media_list.iter().enumerate() { - let uuid = match uuid { None => bail!("got incomplete media list - internal error"), Some(uuid) => uuid, @@ -399,13 +378,9 @@ impl PoolWriter { task_log!(worker, "write catalog for previous media: {}", uuid); - if tape_write_catalog( - writer.as_mut(), - uuid, - media_set.uuid(), - seq_nr, - &mut file, - )?.is_none() { + if tape_write_catalog(writer.as_mut(), uuid, media_set.uuid(), seq_nr, &mut file)? + .is_none() + { bail!("got EOM while writing start catalog"); } } @@ -428,7 +403,6 @@ impl PoolWriter { worker: &WorkerTask, snapshot_reader: &SnapshotReader, ) -> Result<(bool, usize), Error> { - let status = match self.status { Some(ref mut status) => status, None => bail!("PoolWriter - no media loaded"), @@ -474,7 +448,6 @@ impl PoolWriter { chunk_iter: &mut std::iter::Peekable, store: &str, ) -> Result<(bool, usize), Error> { - let status = match self.status { Some(ref mut status) => status, None => bail!("PoolWriter - no media loaded"), @@ -486,30 +459,29 @@ impl PoolWriter { let start_time = SystemTime::now(); - let (saved_chunks, content_uuid, leom, bytes_written) = write_chunk_archive( - worker, - writer, - chunk_iter, - store, - MAX_CHUNK_ARCHIVE_SIZE, - )?; + let (saved_chunks, content_uuid, leom, bytes_written) = + write_chunk_archive(worker, writer, chunk_iter, store, MAX_CHUNK_ARCHIVE_SIZE)?; status.bytes_written += bytes_written; - let elapsed = start_time.elapsed()?.as_secs_f64(); + let elapsed = start_time.elapsed()?.as_secs_f64(); task_log!( worker, "wrote {} chunks ({:.2} MB at {:.2} MB/s)", saved_chunks.len(), - bytes_written as f64 /1_000_000.0, - (bytes_written as f64)/(1_000_000.0*elapsed), + bytes_written as f64 / 1_000_000.0, + (bytes_written as f64) / (1_000_000.0 * elapsed), ); let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE; // register chunks in media_catalog - self.catalog_set.lock().unwrap() - .register_chunk_archive(content_uuid, current_file_number, store, &saved_chunks)?; + self.catalog_set.lock().unwrap().register_chunk_archive( + content_uuid, + current_file_number, + store, + &saved_chunks, + )?; if leom || request_sync { self.commit()?; @@ -523,11 +495,7 @@ impl PoolWriter { datastore: Arc, snapshot_reader: Arc>, ) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> { - NewChunksIterator::spawn( - datastore, - snapshot_reader, - Arc::clone(&self.catalog_set), - ) + NewChunksIterator::spawn(datastore, snapshot_reader, Arc::clone(&self.catalog_set)) } } @@ -538,12 +506,11 @@ fn write_chunk_archive<'a>( chunk_iter: &mut std::iter::Peekable, store: &str, max_size: usize, -) -> Result<(Vec<[u8;32]>, Uuid, bool, usize), Error> { - +) -> Result<(Vec<[u8; 32]>, Uuid, bool, usize), Error> { let (mut writer, content_uuid) = ChunkArchiveWriter::new(writer, store, true)?; // we want to get the chunk list in correct order - let mut chunk_list: Vec<[u8;32]> = Vec::new(); + let mut chunk_list: Vec<[u8; 32]> = Vec::new(); let mut leom = false; @@ -589,7 +556,6 @@ fn update_media_set_label( old_set: Option, media_id: &MediaId, ) -> Result<(MediaCatalog, bool), Error> { - let media_catalog; let new_set = match media_id.media_set_label { @@ -602,7 +568,10 @@ fn update_media_set_label( match config_map.get(fingerprint) { Some(key_config) => Some(key_config.clone()), None => { - bail!("unable to find tape encryption key config '{}'", fingerprint); + bail!( + "unable to find tape encryption key config '{}'", + fingerprint + ); } } } else { @@ -621,10 +590,14 @@ fn update_media_set_label( Some(media_set_label) => { if new_set.uuid == media_set_label.uuid { if new_set.seq_nr != media_set_label.seq_nr { - bail!("got media with wrong media sequence number ({} != {}", - new_set.seq_nr,media_set_label.seq_nr); + bail!( + "got media with wrong media sequence number ({} != {}", + new_set.seq_nr, + media_set_label.seq_nr + ); } - if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint { + if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint + { bail!("detected changed encryption fingerprint - internal error"); } media_catalog = MediaCatalog::open(status_path, media_id, true, false)?; diff --git a/src/tape/pool_writer/new_chunks_iterator.rs b/src/tape/pool_writer/new_chunks_iterator.rs index 24df45d6..03b0fb37 100644 --- a/src/tape/pool_writer/new_chunks_iterator.rs +++ b/src/tape/pool_writer/new_chunks_iterator.rs @@ -3,7 +3,7 @@ use std::sync::{Arc, Mutex}; use anyhow::{format_err, Error}; -use pbs_datastore::{DataStore, DataBlob, SnapshotReader}; +use pbs_datastore::{DataBlob, DataStore, SnapshotReader}; use crate::tape::CatalogSet; @@ -16,7 +16,6 @@ pub struct NewChunksIterator { } impl NewChunksIterator { - /// Creates the iterator, spawning a new thread /// /// Make sure to join() the returnd thread handle. @@ -25,19 +24,16 @@ impl NewChunksIterator { snapshot_reader: Arc>, catalog_set: Arc>, ) -> Result<(std::thread::JoinHandle<()>, Self), Error> { - let (tx, rx) = std::sync::mpsc::sync_channel(3); let reader_thread = std::thread::spawn(move || { - let snapshot_reader = snapshot_reader.lock().unwrap(); - let mut chunk_index: HashSet<[u8;32]> = HashSet::new(); + let mut chunk_index: HashSet<[u8; 32]> = HashSet::new(); let datastore_name = snapshot_reader.datastore_name().to_string(); let result: Result<(), Error> = proxmox_lang::try_block!({ - let mut chunk_iter = snapshot_reader.chunk_iterator(move |digest| { catalog_set .lock() @@ -61,7 +57,7 @@ impl NewChunksIterator { let blob = datastore.load_chunk(&digest)?; //println!("LOAD CHUNK {}", hex::encode(&digest)); match tx.send(Ok(Some((digest, blob)))) { - Ok(()) => {}, + Ok(()) => {} Err(err) => { eprintln!("could not send chunk to reader thread: {}", err); break; diff --git a/src/tape/test/alloc_writable_media.rs b/src/tape/test/alloc_writable_media.rs index 08de612f..d6e87beb 100644 --- a/src/tape/test/alloc_writable_media.rs +++ b/src/tape/test/alloc_writable_media.rs @@ -2,10 +2,10 @@ // // # cargo test --release tape::test::alloc_writable_media -use std::path::PathBuf; use anyhow::Error; +use std::path::PathBuf; -use pbs_api_types::{RetentionPolicy, MediaSetPolicy}; +use pbs_api_types::{MediaSetPolicy, RetentionPolicy}; use crate::tape::{Inventory, MediaPool}; @@ -22,7 +22,6 @@ fn create_testdir(name: &str) -> Result { #[test] fn test_alloc_writable_media_1() -> Result<(), Error> { - let testdir = create_testdir("test_alloc_writable_media_1")?; let mut ctime = 0; @@ -49,7 +48,6 @@ fn test_alloc_writable_media_1() -> Result<(), Error> { #[test] fn test_alloc_writable_media_2() -> Result<(), Error> { - let testdir = create_testdir("test_alloc_writable_media_2")?; let mut inventory = Inventory::load(&testdir)?; @@ -87,7 +85,6 @@ fn test_alloc_writable_media_2() -> Result<(), Error> { #[test] fn test_alloc_writable_media_3() -> Result<(), Error> { - let testdir = create_testdir("test_alloc_writable_media_3")?; let mut inventory = Inventory::load(&testdir)?; @@ -136,7 +133,6 @@ fn test_alloc_writable_media_3() -> Result<(), Error> { #[test] fn test_alloc_writable_media_4() -> Result<(), Error> { - let testdir = create_testdir("test_alloc_writable_media_4")?; let mut inventory = Inventory::load(&testdir)?; diff --git a/src/tape/test/compute_media_state.rs b/src/tape/test/compute_media_state.rs index 78e32190..4844202f 100644 --- a/src/tape/test/compute_media_state.rs +++ b/src/tape/test/compute_media_state.rs @@ -2,20 +2,14 @@ // // # cargo test --release tape::test::compute_media_state -use std::path::PathBuf; use anyhow::Error; +use std::path::PathBuf; use proxmox_uuid::Uuid; -use pbs_api_types::{MediaStatus, MediaSetPolicy, RetentionPolicy}; +use pbs_api_types::{MediaSetPolicy, MediaStatus, RetentionPolicy}; -use crate::tape::{ - Inventory, - MediaPool, - file_formats::{ - MediaSetLabel, - }, -}; +use crate::tape::{file_formats::MediaSetLabel, Inventory, MediaPool}; fn create_testdir(name: &str) -> Result { let mut testdir: PathBuf = String::from("./target/testout").into(); @@ -30,7 +24,6 @@ fn create_testdir(name: &str) -> Result { #[test] fn test_compute_media_state() -> Result<(), Error> { - let testdir = create_testdir("test_compute_media_state")?; let ctime = 0; @@ -55,18 +48,21 @@ fn test_compute_media_state() -> Result<(), Error> { let tape4_uuid = inventory.generate_used_tape("tape4", sl4, 0); let tape5_uuid = inventory.generate_used_tape("tape5", sl5, 0); - let pool = MediaPool::new( + let pool = MediaPool::new( "p1", - &testdir , - MediaSetPolicy::AlwaysCreate, - RetentionPolicy::KeepForever, - None, - None, - false, + &testdir, + MediaSetPolicy::AlwaysCreate, + RetentionPolicy::KeepForever, + None, + None, + false, )?; // tape1 is free - assert_eq!(pool.lookup_media(&tape1_uuid)?.status(), &MediaStatus::Writable); + assert_eq!( + pool.lookup_media(&tape1_uuid)?.status(), + &MediaStatus::Writable + ); // intermediate tapes should be Full assert_eq!(pool.lookup_media(&tape2_uuid)?.status(), &MediaStatus::Full); @@ -74,14 +70,16 @@ fn test_compute_media_state() -> Result<(), Error> { assert_eq!(pool.lookup_media(&tape4_uuid)?.status(), &MediaStatus::Full); // last tape is writable - assert_eq!(pool.lookup_media(&tape5_uuid)?.status(), &MediaStatus::Writable); + assert_eq!( + pool.lookup_media(&tape5_uuid)?.status(), + &MediaStatus::Writable + ); Ok(()) } #[test] fn test_media_expire_time() -> Result<(), Error> { - let testdir = create_testdir("test_media_expire_time")?; let ctime = 0; @@ -97,7 +95,7 @@ fn test_media_expire_time() -> Result<(), Error> { let tape1_uuid = inventory.generate_used_tape("tape1", sl1, 0); // tape2: single tape media set - let sl2= MediaSetLabel::with_data("p1", Uuid::generate(), 0, ctime + 120, None); + let sl2 = MediaSetLabel::with_data("p1", Uuid::generate(), 0, ctime + 120, None); let tape2_uuid = inventory.generate_used_tape("tape2", sl2, 0); let event = "*:0/2".parse()?; @@ -105,7 +103,7 @@ fn test_media_expire_time() -> Result<(), Error> { let pool = MediaPool::new( "p1", - &testdir , + &testdir, MediaSetPolicy::CreateAt(event), RetentionPolicy::ProtectFor(span), None, @@ -115,19 +113,52 @@ fn test_media_expire_time() -> Result<(), Error> { assert_eq!(pool.lookup_media(&tape0_uuid)?.status(), &MediaStatus::Full); assert_eq!(pool.lookup_media(&tape1_uuid)?.status(), &MediaStatus::Full); - assert_eq!(pool.lookup_media(&tape2_uuid)?.status(), &MediaStatus::Writable); + assert_eq!( + pool.lookup_media(&tape2_uuid)?.status(), + &MediaStatus::Writable + ); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 0), false); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 60), false); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 120), false); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 180), true); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 0), + false + ); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 60), + false + ); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 120), + false + ); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 180), + true + ); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 0), false); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 60), false); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 120), false); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 180), false); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 190), false); - assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 240), true); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 0), + false + ); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 60), + false + ); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 120), + false + ); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 180), + false + ); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 190), + false + ); + assert_eq!( + pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 240), + true + ); Ok(()) } diff --git a/src/tape/test/current_set_usable.rs b/src/tape/test/current_set_usable.rs index 822ef77e..9a8683a0 100644 --- a/src/tape/test/current_set_usable.rs +++ b/src/tape/test/current_set_usable.rs @@ -2,22 +2,14 @@ // // # cargo test --release tape::test::current_set_usable -use std::path::PathBuf; use anyhow::Error; +use std::path::PathBuf; use proxmox_uuid::Uuid; -use pbs_api_types::{RetentionPolicy, MediaSetPolicy}; +use pbs_api_types::{MediaSetPolicy, RetentionPolicy}; -use crate::{ - tape::{ - Inventory, - MediaPool, - file_formats::{ - MediaSetLabel, - }, - }, -}; +use crate::tape::{file_formats::MediaSetLabel, Inventory, MediaPool}; fn create_testdir(name: &str) -> Result { let mut testdir: PathBuf = String::from("./target/testout").into(); @@ -32,7 +24,6 @@ fn create_testdir(name: &str) -> Result { #[test] fn test_current_set_usable_1() -> Result<(), Error> { - let testdir = create_testdir("test_current_set_usable_1")?; // pool without any media @@ -54,7 +45,6 @@ fn test_current_set_usable_1() -> Result<(), Error> { #[test] fn test_current_set_usable_2() -> Result<(), Error> { - let testdir = create_testdir("test_current_set_usable_2")?; let ctime = 0; @@ -81,7 +71,6 @@ fn test_current_set_usable_2() -> Result<(), Error> { #[test] fn test_current_set_usable_3() -> Result<(), Error> { - let testdir = create_testdir("test_current_set_usable_3")?; let ctime = 0; @@ -110,7 +99,6 @@ fn test_current_set_usable_3() -> Result<(), Error> { #[test] fn test_current_set_usable_4() -> Result<(), Error> { - let testdir = create_testdir("test_current_set_usable_4")?; let ctime = 0; @@ -139,7 +127,6 @@ fn test_current_set_usable_4() -> Result<(), Error> { #[test] fn test_current_set_usable_5() -> Result<(), Error> { - let testdir = create_testdir("test_current_set_usable_5")?; let ctime = 0; @@ -170,7 +157,6 @@ fn test_current_set_usable_5() -> Result<(), Error> { #[test] fn test_current_set_usable_6() -> Result<(), Error> { - let testdir = create_testdir("test_current_set_usable_6")?; let ctime = 0; @@ -199,7 +185,6 @@ fn test_current_set_usable_6() -> Result<(), Error> { #[test] fn test_current_set_usable_7() -> Result<(), Error> { - let testdir = create_testdir("test_current_set_usable_7")?; let ctime = 0; @@ -215,7 +200,6 @@ fn test_current_set_usable_7() -> Result<(), Error> { inventory.generate_used_tape("tape2", sl2, ctime); - // pool with one two media in current set, one set to damaged let pool = MediaPool::new( "p1", diff --git a/src/tape/test/inventory.rs b/src/tape/test/inventory.rs index 1bc25226..2393e1a1 100644 --- a/src/tape/test/inventory.rs +++ b/src/tape/test/inventory.rs @@ -2,21 +2,14 @@ // // # cargo test --release tape::test::inventory -use std::path::PathBuf; use anyhow::{bail, Error}; +use std::path::PathBuf; use proxmox_uuid::Uuid; use pbs_api_types::{MediaLocation, MediaStatus}; -use crate::{ - tape::{ - Inventory, - file_formats::{ - MediaSetLabel, - }, - }, -}; +use crate::tape::{file_formats::MediaSetLabel, Inventory}; fn create_testdir(name: &str) -> Result { let mut testdir: PathBuf = String::from("./target/testout").into(); @@ -31,38 +24,56 @@ fn create_testdir(name: &str) -> Result { #[test] fn test_media_state_db() -> Result<(), Error> { - let testdir = create_testdir("test_media_state_db")?; let mut inventory = Inventory::load(&testdir)?; let uuid1: Uuid = inventory.generate_free_tape("tape1", 0); - assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Unknown, MediaLocation::Offline)); + assert_eq!( + inventory.status_and_location(&uuid1), + (MediaStatus::Unknown, MediaLocation::Offline) + ); inventory.set_media_status_full(&uuid1)?; - assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Full, MediaLocation::Offline)); + assert_eq!( + inventory.status_and_location(&uuid1), + (MediaStatus::Full, MediaLocation::Offline) + ); inventory.set_media_location_vault(&uuid1, "Office2")?; - assert_eq!(inventory.status_and_location(&uuid1), - (MediaStatus::Full, MediaLocation::Vault(String::from("Office2")))); + assert_eq!( + inventory.status_and_location(&uuid1), + ( + MediaStatus::Full, + MediaLocation::Vault(String::from("Office2")) + ) + ); inventory.set_media_location_offline(&uuid1)?; - assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Full, MediaLocation::Offline)); + assert_eq!( + inventory.status_and_location(&uuid1), + (MediaStatus::Full, MediaLocation::Offline) + ); inventory.set_media_status_damaged(&uuid1)?; - assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Damaged, MediaLocation::Offline)); + assert_eq!( + inventory.status_and_location(&uuid1), + (MediaStatus::Damaged, MediaLocation::Offline) + ); inventory.clear_media_status(&uuid1)?; - assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Unknown, MediaLocation::Offline)); + assert_eq!( + inventory.status_and_location(&uuid1), + (MediaStatus::Unknown, MediaLocation::Offline) + ); Ok(()) } #[test] fn test_list_pool_media() -> Result<(), Error> { - let testdir = create_testdir("test_list_pool_media")?; let mut inventory = Inventory::load(&testdir)?; @@ -81,10 +92,16 @@ fn test_list_pool_media() -> Result<(), Error> { let list = inventory.list_pool_media("p1"); assert_eq!(list.len(), 2); - let tape2 = list.iter().find(|media_id| &media_id.label.uuid == &tape2_uuid).unwrap(); + let tape2 = list + .iter() + .find(|media_id| &media_id.label.uuid == &tape2_uuid) + .unwrap(); assert!(tape2.media_set_label.is_none()); - let tape3 = list.iter().find(|media_id| &media_id.label.uuid == &tape3_uuid).unwrap(); + let tape3 = list + .iter() + .find(|media_id| &media_id.label.uuid == &tape3_uuid) + .unwrap(); match tape3.media_set_label { None => bail!("missing media set label"), Some(ref set) => { @@ -97,17 +114,15 @@ fn test_list_pool_media() -> Result<(), Error> { #[test] fn test_media_set_simple() -> Result<(), Error> { - let testdir = create_testdir("test_media_set_simple")?; let mut inventory = Inventory::load(&testdir)?; let ctime = 0; let sl1 = MediaSetLabel::with_data("p1", Uuid::generate(), 0, ctime + 10, None); - let sl2 = MediaSetLabel::with_data("p1", sl1.uuid.clone(), 1, ctime+ 20, None); + let sl2 = MediaSetLabel::with_data("p1", sl1.uuid.clone(), 1, ctime + 20, None); let sl3 = MediaSetLabel::with_data("p1", sl1.uuid.clone(), 2, ctime + 30, None); - let tape1_uuid = inventory.generate_used_tape("tape1", sl1.clone(), 0); let tape2_uuid = inventory.generate_used_tape("tape2", sl2, 0); let tape3_uuid = inventory.generate_used_tape("tape3", sl3, 0); @@ -141,7 +156,6 @@ fn test_media_set_simple() -> Result<(), Error> { // test media set start time assert_eq!(inventory.media_set_start_time(&sl1.uuid), Some(ctime + 10)); - // test pool p2 let media_set = inventory.compute_media_set_members(&sl4.uuid)?; assert_eq!(media_set.uuid(), &sl4.uuid); @@ -158,10 +172,8 @@ fn test_media_set_simple() -> Result<(), Error> { Ok(()) } - #[test] fn test_latest_media_set() -> Result<(), Error> { - let testdir = create_testdir("test_latest_media_set")?; let insert_tape = |inventory: &mut Inventory, pool, label, seq_nr, ctime| -> Uuid { @@ -176,7 +188,12 @@ fn test_latest_media_set() -> Result<(), Error> { let set = inventory.compute_media_set_members(&latest_set).unwrap(); let media_list = set.media_list(); assert_eq!(media_list.iter().filter(|s| s.is_some()).count(), 1); - let media_uuid = media_list.iter().find(|s| s.is_some()).unwrap().clone().unwrap(); + let media_uuid = media_list + .iter() + .find(|s| s.is_some()) + .unwrap() + .clone() + .unwrap(); let media = inventory.lookup_media(&media_uuid).unwrap(); assert_eq!(media.label.label_text, label); }; diff --git a/src/tape/test/mod.rs b/src/tape/test/mod.rs index 44a81a4b..b5371dd1 100644 --- a/src/tape/test/mod.rs +++ b/src/tape/test/mod.rs @@ -1,5 +1,4 @@ - -mod inventory; -mod current_set_usable; -mod compute_media_state; mod alloc_writable_media; +mod compute_media_state; +mod current_set_usable; +mod inventory;