diff --git a/src/acme/client.rs b/src/acme/client.rs index 40b8e16e..74371207 100644 --- a/src/acme/client.rs +++ b/src/acme/client.rs @@ -161,7 +161,7 @@ impl AcmeClient { let mut data = Vec::::new(); self.write_to(&mut data)?; let account_path = self.account_path.as_ref().ok_or_else(|| { - format_err!("no account path set, cannot save upated account information") + format_err!("no account path set, cannot save updated account information") })?; crate::config::acme::make_acme_account_dir()?; replace_file( diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 43a54132..7ccda5af 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -588,7 +588,7 @@ fn get_snapshots_count(store: &Arc, owner: Option<&Authid>) -> Result }; let snapshot_count = group.list_backups()?.len() as u64; - // only include groups with snapshots, counting/displaying emtpy groups can confuse + // only include groups with snapshots, counting/displaying empty groups can confuse if snapshot_count > 0 { let type_count = match group.backup_type() { BackupType::Ct => counts.ct.get_or_insert(Default::default()), @@ -647,12 +647,12 @@ pub fn status( false // allow at least counts, user can read groups anyway.. } else { match user_info.any_privs_below(&auth_id, &["datastore", &store], NS_PRIVS_OK) { - // avoid leaking existance info if users hasn't at least any priv. below + // avoid leaking existence info if users hasn't at least any priv. below Ok(false) | Err(_) => return Err(http_err!(FORBIDDEN, "permission check failed")), _ => false, } }; - let datastore = datastore?; // only unwrap no to avoid leaking existance info + let datastore = datastore?; // only unwrap no to avoid leaking existence info let (counts, gc_status) = if verbose { let filter_owner = if store_privs & PRIV_DATASTORE_AUDIT != 0 { diff --git a/src/api2/admin/namespace.rs b/src/api2/admin/namespace.rs index 24d65dc5..b2b08ecc 100644 --- a/src/api2/admin/namespace.rs +++ b/src/api2/admin/namespace.rs @@ -133,7 +133,7 @@ pub fn list_namespaces( }, "delete-groups": { type: bool, - description: "If set, all groups will be destroyed in the whole hierachy below and\ + description: "If set, all groups will be destroyed in the whole hierarchy below and\ including `ns`. If not set, only empty namespaces will be pruned.", optional: true, default: false, diff --git a/src/api2/config/access/tfa.rs b/src/api2/config/access/tfa.rs index 6390524b..814fc0f3 100644 --- a/src/api2/config/access/tfa.rs +++ b/src/api2/config/access/tfa.rs @@ -121,7 +121,7 @@ pub fn update_webauthn_config( } else { let rp = webauthn .rp - .ok_or_else(|| format_err!("missing proeprty: 'rp'"))?; + .ok_or_else(|| format_err!("missing property: 'rp'"))?; let origin = webauthn.origin; let id = webauthn .id diff --git a/src/api2/node/mod.rs b/src/api2/node/mod.rs index 91f85f62..5859567e 100644 --- a/src/api2/node/mod.rs +++ b/src/api2/node/mod.rs @@ -317,7 +317,7 @@ fn upgrade_to_websocket( } #[api] -/// List Nodes (only for compatiblity) +/// List Nodes (only for compatibility) fn list_nodes() -> Result { Ok(json!([ { "node": proxmox_sys::nodename().to_string() } ])) } diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index 7957097e..66f212ae 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -824,7 +824,7 @@ fn restore_list_worker( if !media_file_chunk_map.is_empty() { task_log!(worker, "Phase 2: restore chunks to datastores"); } else { - task_log!(worker, "all chunks exist already, skipping phase 2..."); + task_log!(worker, "All chunks are already present, skip phase 2..."); } for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() { @@ -1988,7 +1988,7 @@ pub fn fast_catalog_restore( if &media_uuid != catalog_uuid { task_log!( worker, - "catalog uuid missmatch at pos {}", + "catalog uuid mismatch at pos {}", current_file_number ); continue; @@ -1996,7 +1996,7 @@ pub fn fast_catalog_restore( if media_set_uuid != archive_header.media_set_uuid { task_log!( worker, - "catalog media_set missmatch at pos {}", + "catalog media_set mismatch at pos {}", current_file_number ); continue; diff --git a/src/backup/hierarchy.rs b/src/backup/hierarchy.rs index d229165f..0f05505a 100644 --- a/src/backup/hierarchy.rs +++ b/src/backup/hierarchy.rs @@ -90,7 +90,7 @@ pub fn can_access_any_namespace( }) } -/// A priviledge aware iterator for all backup groups in all Namespaces below an anchor namespace, +/// A privilege aware iterator for all backup groups in all Namespaces below an anchor namespace, /// most often that will be the `BackupNamespace::root()` one. /// /// Is basically just a filter-iter for pbs_datastore::ListNamespacesRecursive including access and diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index aadd7c93..3a46dbfd 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -1001,7 +1001,7 @@ async fn run_stat_generator() { async fn generate_host_stats() { match tokio::task::spawn_blocking(generate_host_stats_sync).await { Ok(()) => (), - Err(err) => log::error!("generate_host_stats paniced: {}", err), + Err(err) => log::error!("generate_host_stats panicked: {}", err), } } diff --git a/src/config/acme/mod.rs b/src/config/acme/mod.rs index d226223e..5f1549c2 100644 --- a/src/config/acme/mod.rs +++ b/src/config/acme/mod.rs @@ -154,7 +154,7 @@ pub fn complete_acme_plugin(_arg: &str, _param: &HashMap) -> Vec pub fn complete_acme_plugin_type(_arg: &str, _param: &HashMap) -> Vec { vec![ "dns".to_string(), - //"http".to_string(), // makes currently not realyl sense to create or the like + //"http".to_string(), // makes currently not really sense to create or the like ] } diff --git a/src/server/gc_job.rs b/src/server/gc_job.rs index 1b859ef0..1940ce87 100644 --- a/src/server/gc_job.rs +++ b/src/server/gc_job.rs @@ -40,11 +40,7 @@ pub fn do_garbage_collection_job( let status = worker.create_state(&result); if let Err(err) = job.finish(status) { - eprintln!( - "could not finish job state for {}: {}", - job.jobtype(), - err - ); + eprintln!("could not finish job state for {}: {}", job.jobtype(), err); } if let Some(email) = email { diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs index 35352e06..a62177e4 100644 --- a/src/server/prune_job.rs +++ b/src/server/prune_job.rs @@ -60,7 +60,7 @@ pub fn prune_datastore( &datastore, ns, max_depth, - Some(PRIV_DATASTORE_MODIFY), // overides the owner check + Some(PRIV_DATASTORE_MODIFY), // overrides the owner check Some(PRIV_DATASTORE_PRUNE), // additionally required if owner Some(&auth_id), )? { @@ -188,11 +188,7 @@ pub fn do_prune_job( let status = worker.create_state(&result); if let Err(err) = job.finish(status) { - eprintln!( - "could not finish job state for {}: {}", - job.jobtype(), - err - ); + eprintln!("could not finish job state for {}: {}", job.jobtype(), err); } result diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs index 778a6935..8bf2a0c9 100644 --- a/src/server/verify_job.rs +++ b/src/server/verify_job.rs @@ -75,11 +75,7 @@ pub fn do_verification_job( let status = worker.create_state(&job_result); if let Err(err) = job.finish(status) { - eprintln!( - "could not finish job state for {}: {}", - job.jobtype(), - err - ); + eprintln!("could not finish job state for {}: {}", job.jobtype(), err); } if let Some(email) = email { diff --git a/src/tape/inventory.rs b/src/tape/inventory.rs index c3bd4606..c0eda035 100644 --- a/src/tape/inventory.rs +++ b/src/tape/inventory.rs @@ -18,7 +18,7 @@ //! MediaSet Locking //! //! To add/remove media from a media set, or to modify catalogs we -//! always do lock_media_set(). Also, we aquire this lock during +//! always do lock_media_set(). Also, we acquire this lock during //! restore, to make sure it is not reused for backups. //! diff --git a/src/tape/media_catalog.rs b/src/tape/media_catalog.rs index ba17f066..f66542a2 100644 --- a/src/tape/media_catalog.rs +++ b/src/tape/media_catalog.rs @@ -483,7 +483,7 @@ impl MediaCatalog { pub fn register_label( &mut self, uuid: &Uuid, // Media/MediaSet Uuid - seq_nr: u64, // onyl used for media set labels + seq_nr: u64, // only used for media set labels file_number: u64, ) -> Result<(), Error> { self.check_register_label(file_number, uuid)?; diff --git a/src/tape/media_pool.rs b/src/tape/media_pool.rs index c688693f..fa99c95c 100644 --- a/src/tape/media_pool.rs +++ b/src/tape/media_pool.rs @@ -1,6 +1,6 @@ //! Media Pool //! -//! A set of backup medias. +//! A set of backup mediums. //! //! This struct manages backup media state during backup. The main //! purpose is to allocate media sets and assign new tapes to it. @@ -392,7 +392,7 @@ impl MediaPool { let uuid = media_id.label.uuid.clone(); - MediaCatalog::overwrite(&self.state_path, &media_id, false)?; // overwite catalog + MediaCatalog::overwrite(&self.state_path, &media_id, false)?; // overwrite catalog let clear_media_status = true; // remove Full status self.inventory.store(media_id, clear_media_status)?; // store persistently diff --git a/src/tape/pool_writer/new_chunks_iterator.rs b/src/tape/pool_writer/new_chunks_iterator.rs index 03b0fb37..32f5737b 100644 --- a/src/tape/pool_writer/new_chunks_iterator.rs +++ b/src/tape/pool_writer/new_chunks_iterator.rs @@ -18,7 +18,7 @@ pub struct NewChunksIterator { impl NewChunksIterator { /// Creates the iterator, spawning a new thread /// - /// Make sure to join() the returnd thread handle. + /// Make sure to join() the returned thread handle. pub fn spawn( datastore: Arc, snapshot_reader: Arc>, diff --git a/src/traffic_control_cache.rs b/src/traffic_control_cache.rs index d71972c4..b0b24788 100644 --- a/src/traffic_control_cache.rs +++ b/src/traffic_control_cache.rs @@ -34,7 +34,7 @@ struct ParsedTcRule { /// Traffic control statistics pub struct TrafficStat { - /// Total incomming traffic (bytes) + /// Total incoming traffic (bytes) pub traffic_in: u64, /// Incoming data rate (bytes/second) pub rate_in: u64,