From de638df23e842497e0668964aac72fe98c9cb9af Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Wed, 4 Mar 2026 10:09:48 +1100 Subject: [PATCH 01/47] [skip ci] Adding proto, dependencies and required changes First commit summarizing the implementation in the draft pull request contained on the dev/sftp-start branch. I am trying to keep the commit number low and focused on different parts of the implementation. - sshwire-derive/src/lib.rs: Modified enconde_enum to allow encoding of enums discriminants - Added the full proto and sftpsource definitions and Cargo.toml for the sftp crate. - sftp/src/lib.rs: Will experience many changes as the functionality is implemented, but for now it just re-exports the proto and sftpsource modules. --- Cargo.lock | 48 +- Cargo.toml | 33 +- sftp/Cargo.toml | 26 + sftp/src/lib.rs | 128 +++++ sftp/src/proto.rs | 1120 +++++++++++++++++++++++++++++++++++++ sftp/src/sftpsource.rs | 239 ++++++++ sshwire-derive/src/lib.rs | 7 +- 7 files changed, 1571 insertions(+), 30 deletions(-) create mode 100644 sftp/Cargo.toml create mode 100644 sftp/src/lib.rs create mode 100644 sftp/src/proto.rs create mode 100644 sftp/src/sftpsource.rs diff --git a/Cargo.lock b/Cargo.lock index b347b125..8874a498 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -795,9 +795,9 @@ dependencies = [ [[package]] name = "embassy-futures" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f878075b9794c1e4ac788c95b728f26aa6366d32eeb10c7051389f898f7d067" +checksum = "dc2d050bdc5c21e0862a89256ed8029ae6c290a93aecefc73084b3002cdebb01" [[package]] name = "embassy-hal-internal" @@ -924,15 +924,15 @@ dependencies = [ [[package]] name = "embassy-sync" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef1a8a1ea892f9b656de0295532ac5d8067e9830d49ec75076291fd6066b136" +checksum = "73974a3edbd0bd286759b3d483540f0ebef705919a5f56f4fc7709066f71689b" dependencies = [ "cfg-if", "critical-section", "embedded-io-async", + "futures-core", "futures-sink", - "futures-util", "heapless", ] @@ -1792,11 +1792,12 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.3" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" dependencies = [ - "num_enum_derive 0.7.3", + "num_enum_derive 0.7.5", + "rustversion", ] [[package]] @@ -1812,9 +1813,9 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.3" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro2", "quote", @@ -1991,7 +1992,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61d90fddc3d67f21bbf93683bc461b05d6a29c708caf3ffb79947d7ff7095406" dependencies = [ "arrayvec", - "num_enum 0.7.3", + "num_enum 0.7.5", "paste", ] @@ -2740,7 +2741,7 @@ name = "sunset-async" version = "0.4.0" dependencies = [ "embassy-futures", - "embassy-sync 0.7.0", + "embassy-sync 0.7.2", "embedded-io-async", "log", "portable-atomic", @@ -2757,7 +2758,7 @@ dependencies = [ "embassy-futures", "embassy-net", "embassy-net-driver", - "embassy-sync 0.7.0", + "embassy-sync 0.7.2", "embassy-time", "embedded-io-async", "heapless", @@ -2787,7 +2788,7 @@ dependencies = [ "embassy-net", "embassy-net-wiznet", "embassy-rp", - "embassy-sync 0.7.0", + "embassy-sync 0.7.2", "embassy-time", "embassy-usb", "embassy-usb-driver", @@ -2822,7 +2823,7 @@ dependencies = [ "embassy-futures", "embassy-net", "embassy-net-tuntap", - "embassy-sync 0.7.0", + "embassy-sync 0.7.2", "embassy-time", "embedded-io-async", "env_logger", @@ -2850,6 +2851,21 @@ dependencies = [ "sunset-sshwire-derive", ] +[[package]] +name = "sunset-sftp" +version = "0.1.2" +dependencies = [ + "embassy-futures", + "embassy-sync 0.7.2", + "embedded-io-async", + "log", + "num_enum 0.7.5", + "paste", + "sunset", + "sunset-async", + "sunset-sshwire-derive", +] + [[package]] name = "sunset-sshwire-derive" version = "0.2.1" @@ -2865,7 +2881,7 @@ dependencies = [ "argh", "critical-section", "embassy-futures", - "embassy-sync 0.7.0", + "embassy-sync 0.7.2", "embedded-io-adapters", "embedded-io-async", "futures", diff --git a/Cargo.toml b/Cargo.toml index 61ffdc26..b5635e3b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,8 +12,10 @@ rust-version = "1.87" [workspace] members = [ "demo/picow", - "demo/std", "fuzz", + "demo/std", + "fuzz", "stdasync", + "sftp", # workspace.dependencies paths are automatic ] @@ -39,7 +41,9 @@ ascii = { version = "1.0", default-features = false } arbitrary = { workspace = true, optional = true } getrandom = "0.2" -rand_core = { version = "0.6", default-features = false, features = ["getrandom"]} +rand_core = { version = "0.6", default-features = false, features = [ + "getrandom", +] } ctr = { version = "0.9", features = ["zeroize"] } aes = { version = "0.8", features = ["zeroize"] } @@ -53,14 +57,27 @@ zeroize = { version = "1", default-features = false, features = ["derive"] } cipher = { version = "0.4", features = ["zeroize"] } subtle = { version = "2.4", default-features = false } # ed25519/x25519 -ed25519-dalek = { version = "2.1", default-features = false, features = ["zeroize", "rand_core"] } -x25519-dalek = { version = "2.0", default-features = false, features = ["zeroize"] } -curve25519-dalek = { version = "4.1", default-features = false, features = ["zeroize"] } -ml-kem = { version = "0.2.1", default-features = false, features = ["zeroize"], optional = true } +ed25519-dalek = { version = "2.1", default-features = false, features = [ + "zeroize", + "rand_core", +] } +x25519-dalek = { version = "2.0", default-features = false, features = [ + "zeroize", +] } +curve25519-dalek = { version = "4.1", default-features = false, features = [ + "zeroize", +] } +ml-kem = { version = "0.2.1", default-features = false, features = [ + "zeroize", +], optional = true } # p521 = { version = "0.13.2", default-features = false, features = ["ecdh", "ecdsa"] } -rsa = { version = "0.9", default-features = false, optional = true, features = ["sha2"] } +rsa = { version = "0.9", default-features = false, optional = true, features = [ + "sha2", +] } # TODO: getrandom feature is a workaround for missing ssh-key dependency with rsa. fixed in pending 0.6 -ssh-key = { version = "0.6", default-features = false, optional = true, features = ["getrandom"] } +ssh-key = { version = "0.6", default-features = false, optional = true, features = [ + "getrandom", +] } embedded-io = { version = "0.6", optional = true } diff --git a/sftp/Cargo.toml b/sftp/Cargo.toml new file mode 100644 index 00000000..23e4511a --- /dev/null +++ b/sftp/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "sunset-sftp" +version = "0.1.2" +edition = "2024" + +[features] +default = [] +# long paths support, which allows paths up to 4096 bytes, by default paths are limited to 256 bytes +long-paths-4096 = [] +long-paths-1024 = [] + +# Standard library support - enables std helpers +std = [] + +[dependencies] +sunset = { path = "../" } +sunset-async = { path = "../async" } +sunset-sshwire-derive = { path = "../sshwire-derive" } + + +embedded-io-async = "0.6" +num_enum = { version = "0.7.4", default-features = false } +paste = "1.0" +log = "0.4" +embassy-sync = "0.7.2" +embassy-futures = "0.1.2" diff --git a/sftp/src/lib.rs b/sftp/src/lib.rs new file mode 100644 index 00000000..24f7b363 --- /dev/null +++ b/sftp/src/lib.rs @@ -0,0 +1,128 @@ +//! SFTP (SSH File Transfer Protocol) implementation for [`sunset`]. +//! +//! (Partially) Implements SFTP v3 as defined in [draft-ietf-secsh-filexfer-02](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02). +//! +//! **Work in Progress**: Currently focuses on file upload operations. +//! Long packets for requests other than writing and additional SFTP operations +//! are not yet implemented. `no_std` compatibility is intended but not +//! yet complete. Please see the roadmap and use this crate carefully. +//! +//! This crate implements a handler that, given a [`sunset::ChanHandle`] +//! a `sunset_async::SSHServer` and some auxiliary buffers, +//! can dispatch SFTP packets to a struct implementing [`crate::sftpserver::SftpServer`] trait. +//! +//! See example usage in the `../demo/sftd/std` directory for the intended usage +//! of this library. +//! +//! # Roadmap +//! +//! The following list is an opinionated collection of the points that should be +//! completed to provide growing functionality. +//! +//! ## Basic features +//! +//! - [ ] [SFTP Protocol Initialization](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-4) (Only SFTP V3 supported) +//! - [ ] [Canonicalizing the Server-Side Path Name](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.11) support +//! - [ ] [Open, close](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.3) +//! and [write](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) +//! - [ ] Directory [Browsing](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.7) +//! - [ ] File [read](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4), +//! - [ ] File [stats](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.8) +//! +//! ## Minimal features for convenient usability +//! +//! - [ ] [Removing files](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.5) +//! - [ ] [Renaming files](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.5) +//! - [ ] [Creating directories](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.6) +//! - [ ] [Removing directories](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.6) +//! +//! ## Extended features +//! +//! - [ ] [Append, create and truncate files](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.3) +//! files +//! - [ ] [Reading](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.8) +//! files attributes +//! - [ ] [Setting](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.9) files attributes +//! - [ ] [Dealing with Symbolic links](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.10) +//! - [ ] [Vendor Specific](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-8) +//! request and responses + +#![forbid(unsafe_code)] +#![warn(missing_docs)] +#![cfg_attr(not(feature = "std"), no_std)] + +// mod opaquefilehandle; +mod proto; +// mod sftperror; +// mod sftphandler; +// mod sftpserver; +// mod sftpsink; +mod sftpsource; + +// Main calling point for the library provided that the user implements +// a [`server::SftpServer`]. +// +// Please see basic usage at `../demo/sftd/std` +// pub use sftphandler::SftpHandler; + +/// Source of SFTP packets +/// +/// Used to decode SFTP packets from a byte slice +pub use sftpsource::SftpSource; + +// /// Structures and types used to add the details for the target system +// /// Related to the implementation of the [`server::SftpServer`], which +// /// is meant to be instantiated by the user and passed to [`SftpHandler`] +// /// and has the task of executing client requests in the underlying system +// pub mod server { + +// pub use crate::sftpserver::DirReply; +// pub use crate::sftpserver::ReadReply; +// pub use crate::sftpserver::ReadStatus; +// pub use crate::sftpserver::SftpOpResult; +// pub use crate::sftpserver::SftpServer; +// /// Helpers to reduce error prone tasks and hide some details that +// /// add complexity when implementing an [`SftpServer`] +// pub mod helpers { +// pub use crate::sftpserver::helpers::*; + +// #[cfg(feature = "std")] +// pub use crate::sftpserver::DirEntriesCollection; +// #[cfg(feature = "std")] +// pub use crate::sftpserver::get_file_attrs; +// } +// pub use crate::sftpsink::SftpSink; +// pub use sunset::sshwire::SSHEncode; + +// pub use crate::proto::MAX_REQUEST_LEN; +// } + +/// Handles and helpers used by the [`sftpserver::SftpServer`] trait implementer +// pub mod handles { +// pub use crate::opaquefilehandle::OpaqueFileHandle; +// pub use crate::opaquefilehandle::OpaqueFileHandleManager; +// pub use crate::opaquefilehandle::PathFinder; +// } + +/// SFTP Protocol types and structures +pub mod protocol { + pub use crate::proto::Attrs; + pub use crate::proto::FileHandle; + pub use crate::proto::Filename; + pub use crate::proto::Name; + pub use crate::proto::NameEntry; + pub use crate::proto::PFlags; + pub use crate::proto::PathInfo; + pub use crate::proto::SftpPacket; + pub use crate::proto::StatusCode; + /// Constants that might be useful for SFTP developers + pub mod constants { + pub use crate::proto::MAX_NAME_ENTRY_SIZE; + } +} + +// /// Errors and results used in this crate +// pub mod error { +// pub use crate::sftperror::SftpError; +// pub use crate::sftperror::SftpResult; +// } diff --git a/sftp/src/proto.rs b/sftp/src/proto.rs new file mode 100644 index 00000000..7b1857b8 --- /dev/null +++ b/sftp/src/proto.rs @@ -0,0 +1,1120 @@ +use crate::sftpsource::SftpSource; + +use sunset::sshwire::{ + BinString, SSHDecode, SSHEncode, SSHSink, SSHSource, TextString, WireError, + WireResult, +}; +use sunset_sshwire_derive::{SSHDecode, SSHEncode}; + +#[allow(unused_imports)] +use log::{debug, error, info, log, trace, warn}; +use num_enum::FromPrimitive; +use paste::paste; + +/// SFTP Minimum packet length is 9 bytes corresponding with `SSH_FXP_INIT` +#[allow(unused)] +pub const SFTP_MINIMUM_PACKET_LEN: usize = 9; + +#[allow(unused)] +pub const SFTP_FIELD_LEN_INDEX: usize = 0; +/// SFTP packets length field us u32 +#[allow(unused)] +pub const SFTP_FIELD_LEN_LENGTH: usize = 4; +/// SFTP packets have the packet type after a u32 length field +#[allow(unused)] +pub const SFTP_FIELD_ID_INDEX: usize = 4; +/// SFTP packets ID length is 1 byte +#[allow(unused)] +pub const SFTP_FIELD_ID_LEN: usize = 1; +/// SFTP packets start with the length field + +/// SFTP packets have the packet request id after field id +#[allow(unused)] +pub const SFTP_FIELD_REQ_ID_INDEX: usize = 5; +/// SFTP packets ID length is 1 byte +#[allow(unused)] +pub const SFTP_FIELD_REQ_ID_LEN: usize = 4; +/// SFTP packets start with the length field + +// SSH_FXP_WRITE SFTP Packet definition used to decode long packets that do not fit in one buffer + +/// SFTP SSH_FXP_WRITE Packet cannot be shorter than this (len:4+pnum:1+rid:4+hand:4+0+data:4+0 bytes = 17 bytes) [draft-ietf-secsh-filexfer-02](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) +// pub const SFTP_MINIMUM_WRITE_PACKET_LEN: usize = 17; + +#[allow(unused)] +/// SFTP SSH_FXP_WRITE Packet request id field index [draft-ietf-secsh-filexfer-02](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) +pub const SFTP_WRITE_REQID_INDEX: usize = 5; + +/// SFTP SSH_FXP_WRITE Packet handle field index [draft-ietf-secsh-filexfer-02](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) +// pub const SFTP_WRITE_HANDLE_INDEX: usize = 9; + +/// Considering the definition in [Section 7](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7) +/// for handle maximum length +pub const _SSH_FXP_HANDLE_MAX_LEN: u32 = 256; + +/// The maximum size for full paths is only limited by the u32 where ssh strings lengths are contained. This causes that different platforms use different maximum path lengths. +/// We need to make a choice in this implementation. Since it is targeting embedded devices I am going to set it short, since influence the length of the [[requestHolder]] that needs to be allocated +/// to compose fragmented requests. +#[cfg(not(any(feature = "long-paths-4096", feature = "long-paths-1024")))] +pub const MAX_PATH_LEN: usize = 256; +#[cfg(feature = "long-paths-1024")] +pub const MAX_PATH_LEN: usize = 1024; // PATH_MAX for macOS +#[cfg(feature = "long-paths-4096")] +pub const MAX_PATH_LEN: usize = 4096; // Linux glibc PATH_MAX is typically 4096 bytes + +/// Maximum request size, considering [[MAX_PATH_LEN]] but not counting the data payload. +/// At this moment in time, the longest request is `ssh_fxp_open` +pub const MAX_REQUEST_LEN: usize = 4 + MAX_PATH_LEN // Filename string + + 4 // PFlags (u32) + + 32; // Attrs (Max 32Bytes not counting extensions) + +/// Considering the definition in [Section 7](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7) +/// for `SSH_FXP_READDIR` +/// +/// (4 + 256) bytes for filename, (4 + 0) bytes for empty long filename and 72 bytes for the attributes ( 32/4*7 + 64/4 * 1 = 72) +pub const MAX_NAME_ENTRY_SIZE: usize = 4 + MAX_PATH_LEN + 4 + 72; + +// TODO is utf8 enough, or does this need to be an opaque binstring? +/// See [SSH_FXP_NAME in Responses from the Server to the Client](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7) +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct Filename<'a>(TextString<'a>); + +impl<'a> From<&'a str> for Filename<'a> { + fn from(s: &'a str) -> Self { + Filename(TextString(s.as_bytes())) + } +} + +// TODO standardize the encoding of filenames as str +impl<'a> Filename<'a> { + /// + pub fn as_str(&self) -> Result<&'a str, WireError> { + core::str::from_utf8(self.0 .0).map_err(|_| WireError::BadString) + } +} + +/// An opaque handle that is used by the server to identify an open +/// file or folder. +#[derive(Debug, Clone, Copy, PartialEq, Eq, SSHEncode, SSHDecode)] +pub struct FileHandle<'a>(pub BinString<'a>); + +// ========================== Initialization =========================== + +/// The reference implementation we are working on is 3, this is, https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02 +pub const SFTP_VERSION: u32 = 3; + +/// The SFTP version of the client +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct InitVersionClient { + // No ReqId for SSH_FXP_INIT + pub version: u32, + // TODO variable number of ExtPair +} + +/// The lowers SFTP version from the client and the server +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct InitVersionLowest { + // No ReqId for SSH_FXP_VERSION + pub version: u32, + // TODO variable number of ExtPair +} + +// ============================= Requests ============================== + +/// Used for `ssh_fxp_open` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.3). +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct Open<'a> { + /// The relative or absolute path of the file to be open + pub filename: Filename<'a>, + /// File [permissions flags](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.3) + pub pflags: PFlags, + /// Initial attributes for the file + pub attrs: Attrs, +} + +/// Flags for Open RequestFor more information see [Opening, creating and closing files](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.3) +#[derive(Debug, FromPrimitive, PartialEq)] +#[repr(u32)] +#[allow(non_camel_case_types, missing_docs)] +pub enum PFlags { + //#[sshwire(variant = "ssh_fx_read")] + SSH_FXF_READ = 0x00000001, + //#[sshwire(variant = "ssh_fx_write")] + SSH_FXF_WRITE = 0x00000002, + //#[sshwire(variant = "ssh_fx_append")] + SSH_FXF_APPEND = 0x00000004, + //#[sshwire(variant = "ssh_fx_creat")] + SSH_FXF_CREAT = 0x00000008, + //#[sshwire(variant = "ssh_fx_trunk")] + SSH_FXF_TRUNC = 0x00000010, + //#[sshwire(variant = "ssh_fx_excl")] + SSH_FXF_EXCL = 0x00000020, + //#[sshwire(unknown)] + #[num_enum(catch_all)] + Multiple(u32), +} + +impl<'de> SSHDecode<'de> for PFlags { + fn dec(s: &mut S) -> WireResult + where + S: SSHSource<'de>, + { + Ok(PFlags::from(u32::dec(s)?)) + } +} + +// This is prone to errors if we update PFlags enum: Unlikely +impl From<&PFlags> for u32 { + fn from(value: &PFlags) -> Self { + match value { + PFlags::SSH_FXF_READ => 0x00000001, + PFlags::SSH_FXF_WRITE => 0x00000002, + PFlags::SSH_FXF_APPEND => 0x00000004, + PFlags::SSH_FXF_CREAT => 0x00000008, + PFlags::SSH_FXF_TRUNC => 0x00000010, + PFlags::SSH_FXF_EXCL => 0x00000020, + PFlags::Multiple(value) => *value, + } + } +} +// TODO: Implement an SSHEncode attribute for enums to encode them in a given numeric format +impl SSHEncode for PFlags { + fn enc(&self, s: &mut dyn SSHSink) -> WireResult<()> { + let numeric_value: u32 = self.into(); + numeric_value.enc(s) + } +} + +/// Used for `ssh_fxp_open` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.7). +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct OpenDir<'a> { + /// The relative or absolute path of the directory to be open + pub dirname: Filename<'a>, +} + +/// Used for `ssh_fxp_close` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.3). +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct Close<'a> { + /// An opaque handle that is used by the server to identify an open + /// file or folder to be closed. + pub handle: FileHandle<'a>, +} + +/// Used for `ssh_fxp_read` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4). +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct Read<'a> { + /// An opaque handle that is used by the server to identify an open + /// file or folder. + pub handle: FileHandle<'a>, + /// The offset for the read operation + pub offset: u64, + /// The number of bytes to be retrieved + pub len: u32, +} + +/// Used for `ssh_fxp_readdir` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.7). +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct ReadDir<'a> { + /// An opaque handle that is used by the server to identify an open + /// file or folder. + pub handle: FileHandle<'a>, +} + +/// Used for `ssh_fxp_write` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4). +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct Write<'a> { + /// An opaque handle that is used by the server to identify an open + /// file or folder. + pub handle: FileHandle<'a>, + /// The offset for the read operation + pub offset: u64, + + /// The data length to be written. Given that it can be arbitrary long, the data is not decoded + /// Instead the data_len is used in [[SftpHandler.Process]] to generate SftpServer.Write calls + pub data_len: u32, +} + +/// Used for `ssh_fxp_lstat` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.8). +/// LSTAT does not follow symbolic links +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct LStat<'a> { + /// The path of the element which stats are to be retrieved + pub file_path: TextString<'a>, +} + +/// Used for `ssh_fxp_lstat` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.8). +/// STAT does follow symbolic links +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct Stat<'a> { + /// The path of the element which stats are to be retrieved + pub file_path: TextString<'a>, +} + +// ============================= Responses ============================= + +/// Used for `ssh_fxp_realpath` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.11). +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct PathInfo<'a> { + /// The path + pub path: TextString<'a>, +} + +/// Used for `ssh_fxp_status` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7). +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct Status<'a> { + /// See [`StatusCode`] for possible codes + pub code: StatusCode, + /// An extra message + pub message: TextString<'a>, + /// A language tag as defined by [Tags for the Identification of Languages](https://datatracker.ietf.org/doc/html/rfc1766) + pub lang: TextString<'a>, +} + +/// Used for `ssh_fxp_handle` [response](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7). +#[derive(Debug, Clone, Copy, PartialEq, Eq, SSHEncode, SSHDecode)] +pub struct Handle<'a> { + /// An opaque handle that is used by the server to identify an open + /// file or folder. + pub handle: FileHandle<'a>, +} + +/// Used for `ssh_fxp_data` [responses](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7). +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct Data<'a> { + /// raw data + pub data: BinString<'a>, +} + +/// This is the encoded length for the [`Data`] Sftp Response. +/// +/// This considers the Packet type (1), the request ID (4), and the data string +/// length (4) +/// +/// - It excludes explicitly length field for the SftpPacket +/// - It excludes explicitly length of the data string content +/// +/// It is defined a single source of truth for what is the length for the +/// encoded [`SftpPacket::Data`] variant +/// +/// See [Responses from the Server to the Client](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) +pub(crate) const ENCODED_SSH_FXP_DATA_MIN_LENGTH: u32 = 1 + 4 + 4; + +/// Struct to hold `SSH_FXP_NAME` response. +/// See [SSH_FXP_NAME in Responses from the Server to the Client](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7) +#[derive(Debug, SSHEncode, SSHDecode)] +pub struct NameEntry<'a> { + /// Is a file name being returned + pub filename: Filename<'a>, + /// longname is an undefined text line like "ls -l", + /// SHOULD NOT be used. + pub _longname: Filename<'a>, + /// Attributes for the file entry + /// + /// See [File Attributes](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#autoid-5) + /// for more information. + pub attrs: Attrs, +} + +/// This is the encoded length for the Name Sftp Response. +/// +/// This considers the Packet type (1), the Request Id (4) and +/// count of [`NameEntry`] that will follow +/// +/// It excludes the length of [`NameEntry`] explicitly +/// +/// It is defined a single source of truth for what is the length for the +/// encoded [`SftpPacket::Name`] variant +/// +/// See [Responses from the Server to the Client](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7) +pub(crate) const ENCODED_BASE_NAME_SFTP_PACKET_LENGTH: u32 = 9; + +// TODO Will a Vector be an issue for no_std? +// Maybe we should migrate this to heapless::Vec and let the user decide +// the number of elements via features flags? +/// This is the first part of the `SSH_FXP_NAME` response. It includes +/// only the count of [`NameEntry`] items that follow this Name +/// +/// After encoding or decoding [`Name`], [`NameEntry`] must be encoded or +/// decoded `count` times +/// A collection of [`NameEntry`] used for [ssh_fxp_name responses](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7). +#[derive(Debug)] +// pub struct Name<'a>(pub Vec>); +pub struct Name { + /// Number of [`NameEntry`] items that follow this Name + pub count: u32, +} + +impl<'de> SSHDecode<'de> for Name { + fn dec(s: &mut S) -> WireResult + where + S: SSHSource<'de>, + { + let count = u32::dec(s)? as u32; + + // let mut names = Vec::with_capacity(count); + + // for _ in 0..count { + // names.push(NameEntry::dec(s)?); + // } + + Ok(Name { count }) + } +} + +impl SSHEncode for Name { + fn enc(&self, s: &mut dyn SSHSink) -> WireResult<()> { + self.count.enc(s) + // (self.0.len() as u32).enc(s)?; + + // for element in self.0.iter() { + // element.enc(s)?; + // } + // Ok(()) + } +} + +// Requests/Responses data types + +#[derive(Debug, SSHEncode, SSHDecode, Clone, Copy, PartialEq, Eq)] +pub struct ReqId(pub u32); + +/// For more information see [Responses from the Server to the Client](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7) +#[derive(Debug, FromPrimitive)] +#[repr(u32)] +#[allow(non_camel_case_types, missing_docs)] +pub enum StatusCode { + // #[sshwire(variant = "ssh_fx_ok")] + SSH_FX_OK = 0, + // #[sshwire(variant = "ssh_fx_eof")] + SSH_FX_EOF = 1, + // #[sshwire(variant = "ssh_fx_no_such_file")] + SSH_FX_NO_SUCH_FILE = 2, + // #[sshwire(variant = "ssh_fx_permission_denied")] + SSH_FX_PERMISSION_DENIED = 3, + // #[sshwire(variant = "ssh_fx_failure")] + SSH_FX_FAILURE = 4, + // #[sshwire(variant = "ssh_fx_bad_message")] + SSH_FX_BAD_MESSAGE = 5, + // #[sshwire(variant = "ssh_fx_no_connection")] + SSH_FX_NO_CONNECTION = 6, + // #[sshwire(variant = "ssh_fx_connection_lost")] + SSH_FX_CONNECTION_LOST = 7, + // #[sshwire(variant = "ssh_fx_unsupported")] + SSH_FX_OP_UNSUPPORTED = 8, + // #[sshwire(unknown)] + #[num_enum(catch_all)] + Other(u32), +} + +impl<'de> SSHDecode<'de> for StatusCode { + fn dec(s: &mut S) -> WireResult + where + S: SSHSource<'de>, + { + Ok(StatusCode::from(u32::dec(s)?)) + } +} + +// This is prone to errors if we update StatusCode enum: Unlikely to change +impl From<&StatusCode> for u32 { + fn from(value: &StatusCode) -> Self { + match value { + StatusCode::SSH_FX_OK => 0, + StatusCode::SSH_FX_EOF => 1, + StatusCode::SSH_FX_NO_SUCH_FILE => 2, + StatusCode::SSH_FX_PERMISSION_DENIED => 3, + StatusCode::SSH_FX_FAILURE => 4, + StatusCode::SSH_FX_BAD_MESSAGE => 5, + StatusCode::SSH_FX_NO_CONNECTION => 6, + StatusCode::SSH_FX_CONNECTION_LOST => 7, + StatusCode::SSH_FX_OP_UNSUPPORTED => 8, + StatusCode::Other(value) => *value, + } + } +} +// TODO: Implement an SSHEncode attribute for enums to encode them in a given numeric format +impl SSHEncode for StatusCode { + fn enc(&self, s: &mut dyn SSHSink) -> WireResult<()> { + let numeric_value: u32 = self.into(); + numeric_value.enc(s) + } +} + +// TODO: Implement extensions. Low in priority +/// Provided to provide a mechanism to implement extensions +// #[derive(Debug, SSHEncode, SSHDecode)] +// pub struct ExtPair<'a> { +// pub name: &'a str, +// pub data: BinString<'a>, +// } + +/// Files attributes to describe Files as SFTP v3 specification +/// +/// See [File Attributes](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#autoid-5) +/// for more information. +#[allow(missing_docs)] +#[derive(Debug, Default, PartialEq)] +pub struct Attrs { + pub size: Option, + pub uid: Option, + pub gid: Option, + pub permissions: Option, + pub atime: Option, + pub mtime: Option, + pub ext_count: Option, + // TODO extensions +} + +/// For more information see [File Attributes](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#autoid-5) +#[repr(u32)] +#[allow(non_camel_case_types)] +pub enum AttrsFlags { + SSH_FILEXFER_ATTR_SIZE = 0x01, + SSH_FILEXFER_ATTR_UIDGID = 0x02, + SSH_FILEXFER_ATTR_PERMISSIONS = 0x04, + SSH_FILEXFER_ATTR_ACMODTIME = 0x08, + SSH_FILEXFER_ATTR_EXTENDED = 0x80000000, +} +impl core::ops::AddAssign for u32 { + fn add_assign(&mut self, other: AttrsFlags) { + *self |= other as u32; + } +} + +impl core::ops::BitAnd for u32 { + type Output = u32; + + fn bitand(self, rhs: AttrsFlags) -> Self::Output { + self & rhs as u32 + } +} + +impl Attrs { + /// Obtains the flags for the values stored in the [`Attrs`] struct. + /// + /// See [File Attributes](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#autoid-5) + /// for more information. + pub fn flags(&self) -> u32 { + let mut flags: u32 = 0; + if self.size.is_some() { + flags += AttrsFlags::SSH_FILEXFER_ATTR_SIZE + } + if self.uid.is_some() || self.gid.is_some() { + flags += AttrsFlags::SSH_FILEXFER_ATTR_UIDGID + } + if self.permissions.is_some() { + flags += AttrsFlags::SSH_FILEXFER_ATTR_PERMISSIONS + } + if self.atime.is_some() || self.mtime.is_some() { + flags += AttrsFlags::SSH_FILEXFER_ATTR_ACMODTIME + } + // TODO Implement extensions + // if self.ext_count.is_some() { + // flags += AttrsFlags::SSH_FILEXFER_ATTR_EXTENDED + // } + + flags + } +} + +impl SSHEncode for Attrs { + fn enc(&self, s: &mut dyn SSHSink) -> WireResult<()> { + self.flags().enc(s)?; + + // IMPORTANT: Order matters in the encoding/decoding since it will be interpreted together with the flags + if let Some(value) = self.size.as_ref() { + value.enc(s)? + } + if let Some(value) = self.uid.as_ref() { + value.enc(s)? + } + if let Some(value) = self.gid.as_ref() { + value.enc(s)? + } + if let Some(value) = self.permissions.as_ref() { + value.enc(s)? + } + if let Some(value) = self.atime.as_ref() { + value.enc(s)? + } + if let Some(value) = self.mtime.as_ref() { + value.enc(s)? + } + // TODO Implement extensions + // if let Some(value) = self.ext_count.as_ref() { value.enc(s)? } + + Ok(()) + } +} + +impl<'de> SSHDecode<'de> for Attrs { + fn dec(s: &mut S) -> WireResult + where + S: SSHSource<'de>, + { + let mut attrs = Attrs::default(); + let flags = u32::dec(s)? as u32; + if flags & AttrsFlags::SSH_FILEXFER_ATTR_SIZE != 0 { + attrs.size = Some(u64::dec(s)?); + } + if flags & AttrsFlags::SSH_FILEXFER_ATTR_UIDGID != 0 { + attrs.uid = Some(u32::dec(s)?); + attrs.gid = Some(u32::dec(s)?); + } + if flags & AttrsFlags::SSH_FILEXFER_ATTR_PERMISSIONS != 0 { + attrs.permissions = Some(u32::dec(s)?); + } + if flags & AttrsFlags::SSH_FILEXFER_ATTR_ACMODTIME != 0 { + attrs.atime = Some(u32::dec(s)?); + attrs.mtime = Some(u32::dec(s)?); + } + // TODO Implement extensions + // if flags & AttrsFlags::SSH_FILEXFER_ATTR_EXTENDED != 0{ + + Ok(attrs) + } +} + +macro_rules! sftpmessages { + ( + init: { + $( ( $init_message_num:tt, + $init_packet_variant:ident, + $init_packet_type:ty, + $init_ssh_fxp_name:literal + ), + )* + }, + request: { + $( ( $request_message_num:tt, + $request_packet_variant:ident, + $request_packet_type:ty, + $request_ssh_fxp_name:literal + ), + )* + }, + response: { + $( ( $response_message_num:tt, + $response_packet_variant:ident, + $response_packet_type:ty, + $response_ssh_fxp_name:literal + ), + )* + }, + ) => { + paste! { + /// Represent a subset of the SFTP packet types defined by draft-ietf-secsh-filexfer-02 + #[derive(Debug, Copy, Clone, PartialEq, Eq, FromPrimitive, SSHEncode)] + #[repr(u8)] + #[allow(non_camel_case_types)] + pub enum SftpNum { + $( + #[sshwire(variant = $init_ssh_fxp_name)] + [<$init_ssh_fxp_name:upper>] = $init_message_num, + )* + + $( + #[sshwire(variant = $request_ssh_fxp_name)] + [<$request_ssh_fxp_name:upper>] = $request_message_num, + )* + + $( + #[sshwire(variant = $response_ssh_fxp_name)] + [<$response_ssh_fxp_name:upper>] = $response_message_num, + )* + + #[sshwire(unknown)] + #[num_enum(catch_all)] + Other(u8), + } + } // paste + + impl<'de> SSHDecode<'de> for SftpNum { + fn dec(s: &mut S) -> WireResult + where + S: SSHSource<'de>, + { + Ok(SftpNum::from(u8::dec(s)?)) + } + } + paste!{ + impl From for u8{ + fn from(sftp_num: SftpNum) -> u8 { + match sftp_num { + $( + SftpNum::[<$init_ssh_fxp_name:upper>] => $init_message_num, + )* + $( + SftpNum::[<$request_ssh_fxp_name:upper>] => $request_message_num, + )* + $( + SftpNum::[<$response_ssh_fxp_name:upper>] => $response_message_num, + )* + + SftpNum::Other(number) => number // Other, not in the enum definition + + } + } + + } + + } //paste + + impl SftpNum { + fn is_init(&self) -> bool { + (1..=1).contains(&(u8::from(self.clone()))) + } + + pub(crate) fn is_request(&self) -> bool { + // TODO SSH_FXP_EXTENDED + (3..=20).contains(&(u8::from(self.clone()))) + } + + fn is_response(&self) -> bool { + // TODO SSH_FXP_EXTENDED_REPLY + (100..=105).contains(&(u8::from(self.clone()))) + ||(2..=2).contains(&(u8::from(self.clone()))) + } + } + + + /// Top level SSH packet enum + /// + /// It helps identifying the SFTP Packet type and handling it accordingly + /// This is done using the SFTP field type + #[derive(Debug)] + pub enum SftpPacket<'a> { + $( + #[doc = concat!("Initialization packet: ", $init_ssh_fxp_name)] + $init_packet_variant($init_packet_type), + )* + $( + #[doc = concat!("Request packet: ", $request_ssh_fxp_name)] + $request_packet_variant(ReqId, $request_packet_type), + )* + $( + #[doc = concat!("Response packet: ", $response_ssh_fxp_name)] + $response_packet_variant(ReqId, $response_packet_type), + )* + + } + + + impl SSHEncode for SftpPacket<'_> { + fn enc(&self, s: &mut dyn SSHSink) -> WireResult<()> { + let t = u8::from(self.sftp_num()); + t.enc(s)?; + match self { + // eg + // SftpPacket::KexInit(p) => { + // ... + $( + SftpPacket::$init_packet_variant(p) => { + p.enc(s)? + } + )* + $( + SftpPacket::$request_packet_variant(id, p) => { + id.enc(s)?; + p.enc(s)? + } + )* + $( + SftpPacket::$response_packet_variant(id, p) => { + id.enc(s)?; + p.enc(s)? + } + )* + }; + Ok(()) + } + } + + paste!{ + + + impl<'a: 'de, 'de> SSHDecode<'de> for SftpPacket<'a> + where 'de: 'a // This implies that both lifetimes are equal + { + fn dec(s: &mut S) -> WireResult + where S: SSHSource<'de> { + let packet_type_number = u8::dec(s)?; + + let packet_type = SftpNum::from(packet_type_number); + + let decoded_packet = match packet_type { + $( + SftpNum::[<$init_ssh_fxp_name:upper>] => { + + let inner_type = <$init_packet_type>::dec(s)?; + SftpPacket::$init_packet_variant(inner_type) + + }, + )* + $( + SftpNum::[<$request_ssh_fxp_name:upper>] => { + let req_id = ::dec(s)?; + let inner_type = <$request_packet_type>::dec(s)?; + SftpPacket::$request_packet_variant(req_id,inner_type) + + }, + )* + $( + SftpNum::[<$response_ssh_fxp_name:upper>] => { + let req_id = ::dec(s)?; + let inner_type = <$response_packet_type>::dec(s)?; + SftpPacket::$response_packet_variant(req_id,inner_type) + + }, + )* + _ => return Err(WireError::UnknownPacket { number: packet_type_number }) + }; + Ok(decoded_packet) + } + } + } // paste + + impl<'a> SftpPacket<'a> { + /// Maps `SpecificPacketVariant` to `message_num` + pub fn sftp_num(&self) -> SftpNum { + match self { + // eg + // SftpPacket::Open(_) => { + // .. + $( + SftpPacket::$init_packet_variant(_) => { + + SftpNum::from($init_message_num as u8) + } + )* + $( + SftpPacket::$request_packet_variant(_,_) => { + + SftpNum::from($request_message_num as u8) + } + )* + $( + SftpPacket::$response_packet_variant(_,_) => { + + SftpNum::from($response_message_num as u8) + } + )* + } + } + + // TODO Maybe change WireResult -> SftpResult and SSHSink to SftpSink? + // This way I have more internal details and can return a Error::bug() if required + /// Encode a request. + /// + /// Used by a SFTP client. Does not include the length field. + pub fn encode_request(&self, id: ReqId, s: &mut dyn SSHSink) -> WireResult<()> { + if !self.sftp_num().is_request() { + return Err(WireError::PacketWrong) + // return Err(Error::bug()) + // I understand that it would be a bad call of encode_response and + // therefore a bug, bug Error::bug() is not compatible with WireResult + } + + // packet type + self.sftp_num().enc(s)?; + // request ID + id.0.enc(s)?; + // contents + self.enc(s) + } + + // TODO Maybe change WireResult -> SftpResult and SSHSource to SftpSource? + // This way I have more internal details and can return a more appropriate error if required + /// Decode a response. + /// + /// Used by a SFTP client. Does not include the length field. + pub fn decode_response<'de>(s: &mut SftpSource<'de>) -> WireResult + where + // S: SftpSource<'de>, + 'a: 'de, // 'a must outlive 'de and 'de must outlive 'a so they have matching lifetimes + 'de: 'a + { + let packet_length = u32::dec(s)?; + trace!("Packet field len = {:?}, buffer len = {:?}", packet_length, s.remaining()); + match Self::dec(s) { + Ok(sftp_packet)=> { + if !sftp_packet.sftp_num().is_response() + { + Err(WireError::PacketWrong) + }else{ + Ok(sftp_packet) + + } + }, + Err(e) => { + Err(e) + } + } + } + + + /// Decode a request or initialization packets + /// + /// Used by a SFTP server. Does not include the length field. + /// + /// It will fail if the received packet is a response, no valid or incomplete packet + pub fn decode_request<'de>(s: &mut SftpSource<'de>) -> WireResult + where + // S: SftpSource<'de>, + 'a: 'de, // 'a must outlive 'de and 'de must outlive 'a so they have matching lifetimes + 'de: 'a + { + let packet_length = u32::dec(s)?; + trace!("Packet field len = {:?}, buffer len = {:?}", packet_length, s.remaining()); + + match Self::dec(s) { + Ok(sftp_packet)=> { + if (!sftp_packet.sftp_num().is_request() + && !sftp_packet.sftp_num().is_init()) + { + Err(WireError::PacketWrong) + }else{ + Ok(sftp_packet) + + } + }, + Err(e) => { + match e { + WireError::UnknownPacket{..} if !s.packet_fits() => Err(WireError::RanOut), + _ => Err(e) + } + + } + } + } + + /// Decode a a packet without checking if it is request or response + /// + /// Used by a SFTP server. Does not include the length field. + /// + /// It will fail if the received packet is a response, no valid or incomplete packet + pub fn decode<'de>(s: &mut SftpSource<'de>) -> WireResult + where + // S: SftpSource<'de>, + 'a: 'de, // 'a must outlive 'de and 'de must outlive 'a so they have matching lifetimes + 'de: 'a + { + let packet_length = u32::dec(s)?; + trace!("Packet field len = {:?}, buffer remaining = {:?}", packet_length, s.remaining()); + Self::dec(s) + } + + // TODO Maybe change WireResult -> SftpResult and SSHSink to SftpSink? + // This way I have more internal details and can return a Error::bug() if required + /// Encode a response. + /// + /// Used by a SFTP server. Does not include the length field. + /// + /// Fails if the encoded SFTP Packet is not a response + pub fn encode_response(&self, s: &mut dyn SSHSink) -> WireResult<()> { + + if !self.sftp_num().is_response() { + return Err(WireError::PacketWrong) + // return Err(Error::bug()) + // I understand that it would be a bad call of encode_response and + // therefore a bug, bug Error::bug() is not compatible with WireResult + } + + self.enc(s) + } + + } + + $( + impl<'a> From<$init_packet_type> for SftpPacket<'a> { + fn from(s: $init_packet_type) -> SftpPacket<'a> { + SftpPacket::$init_packet_variant(s) //find me + } + } + )* + $( + /// **Warning**: No Sequence Id can be infered from a Packet Type + impl<'a> From<$request_packet_type> for SftpPacket<'a> { + fn from(s: $request_packet_type) -> SftpPacket<'a> { + warn!("Casting from {:?} to SftpPacket cannot set Request Id",$request_ssh_fxp_name); + SftpPacket::$request_packet_variant(ReqId(0), s) + } + } + )* + $( + /// **Warning**: No Sequence Id can be infered from a Packet Type + impl<'a> From<$response_packet_type> for SftpPacket<'a> { + fn from(s: $response_packet_type) -> SftpPacket<'a> { + warn!("Casting from {:?} to SftpPacket cannot set Request Id",$response_ssh_fxp_name); + SftpPacket::$response_packet_variant(ReqId(0), s) + } + } + )* + + }; // main macro + +} // sftpmessages macro + +sftpmessages! [ + + init:{ + (1, Init, InitVersionClient, "ssh_fxp_init"), + (2, Version, InitVersionLowest, "ssh_fxp_version"), + }, + + request: { + (3, Open, Open<'a>, "ssh_fxp_open"), + (4, Close, Close<'a>, "ssh_fxp_close"), + (5, Read, Read<'a>, "ssh_fxp_read"), + (6, Write, Write<'a>, "ssh_fxp_write"), + (7, LStat, LStat<'a>, "ssh_fxp_lstat"), + (11, OpenDir, OpenDir<'a>, "ssh_fxp_opendir"), + (12, ReadDir, ReadDir<'a>, "ssh_fxp_readdir"), + (16, PathInfo, PathInfo<'a>, "ssh_fxp_realpath"), + (17, Stat, Stat<'a>, "ssh_fxp_stat"), + // When adding requests, review MAX_REQUEST_LEN in order to adjust its value + }, + + response: { + (101, Status, Status<'a>, "ssh_fxp_status"), + (102, Handle, Handle<'a>, "ssh_fxp_handle"), + (103, Data, Data<'a>, "ssh_fxp_data"), + (104, Name, Name, "ssh_fxp_name"), + (105, Attrs, Attrs, "ssh_fxp_attrs"), + }, +]; + +#[cfg(test)] +mod proto_tests { + use super::*; + use crate::server::SftpSink; + + // TODO: There are always more test that can be done + + #[cfg(test)] + extern crate std; + #[cfg(test)] + use std::println; + + #[test] + fn test_data_roundtrip() { + let data_slice = b"Hello, world!".as_slice(); + let mut buff = [0u8; 512]; + let data_packet = + SftpPacket::Data(ReqId(10), Data { data: BinString(data_slice) }); + + let mut sink = SftpSink::new(&mut buff); + data_packet.encode_response(&mut sink).expect("Failed to encode response"); + println!( + "data_packet encoded_len = {:?}, encoded = {:?}", + sink.payload_len(), + sink.payload_slice() + ); + let mut source = SftpSource::new(sink.used_slice()); + println!("source = {:?}", source); + + match SftpPacket::decode_response(&mut source) { + Ok(SftpPacket::Data(req_id, data)) => { + assert_eq!(req_id, ReqId(10)); + assert_eq!(data.data, BinString(data_slice)); + } + Ok(other) => panic!("Expected Data packet, got: {:?}", other), + Err(e) => panic!("Failed to decode packet: {:?}", e), + } + } + + #[test] + fn test_status_encoding() { + let mut buf = [0u8; 256]; + let mut sink = SftpSink::new(&mut buf); + let status_packet = SftpPacket::Status( + ReqId(16), + Status { + code: StatusCode::SSH_FX_EOF, + message: "A".into(), + lang: "en-US".into(), + }, + ); + + let expected_status_packet_slice: [u8; 27] = [ + 0, 0, 0, 23, // Packet len + 101, // Packet type + 0, 0, 0, 16, // ReqId + 0, 0, 0, 1, // Status code: SSH_FX_EOF + 0, 0, 0, 1, // string message length + 65, // string message content + 0, 0, 0, 5, // string lang length + 101, 110, 45, 85, 83, // string lang content + ]; + + let _ = status_packet.encode_response(&mut sink); + + assert_eq!(&expected_status_packet_slice, sink.used_slice()); + } + + #[test] + fn test_attributes_roundtrip() { + let mut buff = [0u8; MAX_NAME_ENTRY_SIZE]; + let attr_read_only = Attrs { + size: Some(1), + uid: Some(2), + gid: Some(3), + permissions: Some(222), + atime: Some(4), + mtime: Some(5), + ext_count: None, + }; + + let mut sink = SftpSink::new(&mut buff); + attr_read_only.enc(&mut sink).unwrap(); + println!( + "attr_read_only encoded_len = {:?}, encoded = {:?}", + sink.payload_len(), + sink.payload_slice() + ); + let mut source = SftpSource::new(sink.payload_slice()); + println!("source = {:?}", source); + + let a_r = Attrs::dec(&mut source); + match a_r { + Ok(attrs) => { + println!("source = {:?}", attrs); + assert_eq!(attr_read_only, attrs); + } + Err(e) => panic!("The attributes could not be decoded: {:?}", e), + } + } + + #[test] + fn test_packet_open_reading() { + let buff_open_read = [ + 0u8, 0, 0, + 58, // Len + 3, // SftpPacket + 0, 0, 0, + 4, // ReqId + 0, 0, 0, + 41, // Text String len + 46, 47, 100, 101, 109, 111, 47, 115, 102, + 116, // file Path + 112, 47, 115, 116, 100, 47, 116, 101, 115, 116, 105, 110, 103, 47, 111, + 117, 116, 47, 46, 47, 53, 49, 50, 66, 95, 114, 97, 110, 100, 111, + 109, // and 41 + 0, 0, 0, + 1, // PFlags: 1u32 == SSSH_FXF_READ + 0, 0, 0, + 0, // Attrib flags == 0 No flags, no attributes + ]; + + let mut source = SftpSource::new(&buff_open_read); + println!("source = {:?}", source); + + match SftpPacket::decode_request(&mut source) { + Ok(SftpPacket::Open(_req_id, open)) => { + assert_eq!(PFlags::SSH_FXF_READ, open.pflags); + } + Ok(other) => panic!("Expected Open packet, got: {:?}", other), + Err(e) => panic!("Failed to decode packet: {:?}", e), + } + } +} diff --git a/sftp/src/sftpsource.rs b/sftp/src/sftpsource.rs new file mode 100644 index 00000000..837ee0f1 --- /dev/null +++ b/sftp/src/sftpsource.rs @@ -0,0 +1,239 @@ +use crate::proto::{ + SftpNum, SFTP_FIELD_ID_INDEX, SFTP_FIELD_LEN_INDEX, SFTP_FIELD_LEN_LENGTH, + SFTP_FIELD_REQ_ID_INDEX, SFTP_FIELD_REQ_ID_LEN, +}; + +use sunset::sshwire::{SSHSource, WireError, WireResult}; + +#[allow(unused_imports)] +use log::{debug, error, info, log, trace, warn}; + +/// SftpSource implements [`SSHSource`] and also extra functions to handle +/// some challenges related to long SFTP packets in constrained environments +#[derive(Default, Debug)] +pub struct SftpSource<'de> { + buffer: &'de [u8], + index: usize, +} + +impl<'de> SSHSource<'de> for SftpSource<'de> { + fn take(&mut self, len: usize) -> sunset::sshwire::WireResult<&'de [u8]> { + if len + self.index > self.buffer.len() { + return Err(WireError::RanOut); + } + let original_index = self.index; + let slice = &self.buffer[self.index..self.index + len]; + self.index += len; + trace!( + "slice returned: {:?}. original index {:?}, new index: {:?}", + slice, + original_index, + self.index + ); + Ok(slice) + } + + fn remaining(&self) -> usize { + self.buffer.len() - self.index + } + + fn ctx(&mut self) -> &mut sunset::packets::ParseContext { + todo!("Which context for sftp?"); + } +} + +impl<'de> SftpSource<'de> { + /// Creates a new [`SftpSource`] referencing a buffer + pub fn new(buffer: &'de [u8]) -> Self { + debug!("New source with content: : {:?}", buffer); + SftpSource { buffer: buffer, index: 0 } + } + /// Peaks the buffer for packet type [`SftpNum`]. This does not advance + /// the reading index + /// + /// Useful to observe the packet fields in special conditions where a + /// `dec(s)` would fail + /// + /// **Warning**: will only work in well formed packets, in other case + /// the result will contains garbage + pub(crate) fn peak_packet_type(&self) -> WireResult { + if self.buffer.len() <= SFTP_FIELD_ID_INDEX { + debug!( + "Peak packet type failed: buffer len <= SFTP_FIELD_ID_INDEX ( {:?} <= {:?})", + self.buffer.len(), + SFTP_FIELD_ID_INDEX + ); + Err(WireError::RanOut) + } else { + Ok(SftpNum::from(self.buffer[SFTP_FIELD_ID_INDEX])) + } + } + + /// Peaks the buffer for packet length field. This does not advance the reading index + /// + /// Useful to observe the packet fields in special conditions where a `dec(s)` + /// would fail + /// + /// Use `peak_total_packet_len` instead if you want to also consider the the + /// length field + /// + /// **Warning**: will only work in well formed packets, in other case the result + /// will contains garbage + pub(crate) fn peak_packet_len(&self) -> WireResult { + if self.buffer.len() < SFTP_FIELD_LEN_INDEX + SFTP_FIELD_LEN_LENGTH { + Err(WireError::RanOut) + } else { + let bytes: [u8; 4] = self.buffer + [SFTP_FIELD_LEN_INDEX..SFTP_FIELD_LEN_INDEX + SFTP_FIELD_LEN_LENGTH] + .try_into() + .expect("slice length mismatch"); + + Ok(u32::from_be_bytes(bytes)) + } + } + + /// Peaks the packet in the source to obtain a total packet length, which + /// considers the length of the length field itself. For the packet length field + /// use [`peak_packet_len()`] + /// + /// This does not advance the reading index + /// + /// + /// **Warning**: will only work in well formed packets, in other case the result + /// will contains garbage + pub(crate) fn peak_total_packet_len(&self) -> WireResult { + Ok(self.peak_packet_len()? + SFTP_FIELD_LEN_LENGTH as u32) + } + + /// Compares the total source capacity and the peaked packet length + /// plus the length field length itself to find out if the packet fit + /// in the source + /// **Warning**: will only work in well formed packets, in other case + /// the result will contains garbage + pub fn packet_fits(&self) -> bool { + match self.peak_total_packet_len() { + Ok(len) => self.buffer.len() >= len as usize, + Err(_) => false, + } + } + + /// Peaks the buffer for packet request id [`u32`]. This does not advance + /// the reading index + /// + /// Useful to observe the packet fields in special conditions where a + /// `dec(s)` would fail + /// + /// **Warning**: will only work in well formed packets, in other case + /// the result will contains garbage + pub fn peak_packet_req_id(&self) -> WireResult { + if self.buffer.len() < SFTP_FIELD_REQ_ID_INDEX + SFTP_FIELD_REQ_ID_LEN { + Err(WireError::RanOut) + } else { + let bytes: [u8; 4] = self.buffer[SFTP_FIELD_REQ_ID_INDEX + ..SFTP_FIELD_REQ_ID_INDEX + SFTP_FIELD_LEN_LENGTH] + .try_into() + .expect("slice length mismatch"); + + Ok(u32::from_be_bytes(bytes)) + } + } + /// Returns a slice on the used portion of the held buffer. + /// + /// This does not modify the internal index + pub fn buffer_used(&self) -> &[u8] { + &self.buffer[..self.index] + } + + /// returns a slice on the held buffer and makes it unavailable for further + /// decodes. + pub fn consume_all(&mut self) -> &[u8] { + self.index = self.buffer.len(); + self.buffer + } +} + +#[cfg(test)] +mod local_tests { + use super::*; + + fn status_buffer() -> [u8; 27] { + let expected_status_packet_slice: [u8; 27] = [ + 0, 0, 0, 23, // Packet len + 101, // Packet type + 0, 0, 0, 16, // ReqId + 0, 0, 0, 1, // Status code: SSH_FX_EOF + 0, 0, 0, 1, // string message length + 65, // string message content + 0, 0, 0, 5, // string lang length + 101, 110, 45, 85, 83, // string lang content + ]; + expected_status_packet_slice + } + + #[test] + fn peaking_len() { + let buffer_status = status_buffer(); + let source = SftpSource::new(&buffer_status); + + let read_packet_len = source.peak_packet_len().unwrap(); + let original_packet_len = 23u32; + assert_eq!(original_packet_len, read_packet_len); + } + #[test] + fn peaking_total_len() { + let buffer_status = status_buffer(); + let source = SftpSource::new(&buffer_status); + + let read_total_packet_len = source.peak_total_packet_len().unwrap(); + let original_total_packet_len = 23u32 + 4u32; + assert_eq!(original_total_packet_len, read_total_packet_len); + } + + #[test] + fn peaking_type() { + let buffer_status = status_buffer(); + let source = SftpSource::new(&buffer_status); + let read_packet_type = source.peak_packet_type().unwrap(); + let original_packet_type = SftpNum::from(101u8); + assert_eq!(original_packet_type, read_packet_type); + } + #[test] + fn peaking_req_id() { + let buffer_status = status_buffer(); + let source = SftpSource::new(&buffer_status); + let read_req_id = source.peak_packet_req_id().unwrap(); + let original_req_id = 16u32; + assert_eq!(original_req_id, read_req_id); + } + + #[test] + fn packet_does_fit() { + let buffer_status = status_buffer(); + let source = SftpSource::new(&buffer_status); + assert_eq!(true, source.packet_fits()); + } + + #[test] + fn packet_does_not_fit() { + let buffer_status = status_buffer(); + let no_room_buffer = &buffer_status[..buffer_status.len() - 2]; + let source = SftpSource::new(no_room_buffer); + assert_eq!(false, source.packet_fits()); + } + + #[test] + fn consume_all_remaining() { + let inc_array: [u8; 512] = core::array::from_fn(|i| (i % 255) as u8); + let mut source = SftpSource::new(&inc_array); + let _consumed = source.consume_all(); + assert_eq!(0usize, source.remaining()); + } + + #[test] + fn consume_all_consumed() { + let inc_array: [u8; 512] = core::array::from_fn(|i| (i % 255) as u8); + let mut source = SftpSource::new(&inc_array); + let consumed = source.consume_all(); + assert_eq!(inc_array.len(), consumed.len()); + } +} diff --git a/sshwire-derive/src/lib.rs b/sshwire-derive/src/lib.rs index fed01188..462ca224 100644 --- a/sshwire-derive/src/lib.rs +++ b/sshwire-derive/src/lib.rs @@ -283,11 +283,6 @@ fn encode_enum( let atts = take_field_atts(&var.attributes)?; let mut rhs = StreamBuilder::new(); - if let Some(val) = &var.value { - // Avoid users expecting enum values to be encoded. - // Could be implemented if needed. - return Err(Error::Custom { error: "sunset_sshwire_derive::SSHEncode currently does not encode enum discriminants.".into(), span: Some(val.span())}) - } match var.fields { None => { // Unit enum @@ -305,7 +300,7 @@ fn encode_enum( } } - _ => return Err(Error::Custom { error: "sunset_sshwire_derive::SSHEncode currently only implements Unit or single value enum variants.".into(), span: None}) + _ => return Err(Error::Custom { error: "SSHEncode currently only implements Unit or single value enum variants.".into(), span: None}) } match_arm.puncts("=>"); From fd01170021148ef13dfdc0cc4f1e2d2d04b97838 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Wed, 4 Mar 2026 12:48:44 +1100 Subject: [PATCH 02/47] [skip ci] Adding sunset-sftp crate with basic SFTP server implementation - lib.rs: Now it contains the main library code for the sunset-sftp crate, including module declarations and public exports. Updated documentation to reflect the current state of the library and its features including issue #40. Main additions include: - sftphandler module: Implementation of the main entrypoint for the SFTP server, which will handle incoming SFTP requests and manage the server's state. - sftpserver.rs: Contains the trait definition for the SFTP server that is to be implemented by the user of the library, defining the required methods for handling SFTP operations. - sftperror.rs: Defines error types and handling for the SFTP server operations. Additional files: - sftpsink.rs: An implementation of SSHSink with extra functionality for handling SFTP packets - opaquefilehandle.rs: Collection of traits that a filehandle is expected to implement. About SftpHandler: Main entry point for the SFTP server. It requires to take ownership of an async_channel.rs::ChanInOut in order to write long responses to the client. This makes it not exactly sans-io and not completely observable, but this compromise facilitates the implementation of the SftpServer trait thanks to an internal embassy pipe (See sftpoutputchannelhandler.rs). --- sftp/src/lib.rs | 91 +-- sftp/src/opaquefilehandle.rs | 70 ++ sftp/src/sftperror.rs | 98 +++ sftp/src/sftphandler/mod.rs | 6 + sftp/src/sftphandler/requestholder.rs | 366 +++++++++ sftp/src/sftphandler/sftphandler.rs | 757 ++++++++++++++++++ .../sftphandler/sftpoutputchannelhandler.rs | 195 +++++ sftp/src/sftpserver.rs | 663 +++++++++++++++ sftp/src/sftpsink.rs | 99 +++ 9 files changed, 2300 insertions(+), 45 deletions(-) create mode 100644 sftp/src/opaquefilehandle.rs create mode 100644 sftp/src/sftperror.rs create mode 100644 sftp/src/sftphandler/mod.rs create mode 100644 sftp/src/sftphandler/requestholder.rs create mode 100644 sftp/src/sftphandler/sftphandler.rs create mode 100644 sftp/src/sftphandler/sftpoutputchannelhandler.rs create mode 100644 sftp/src/sftpserver.rs create mode 100644 sftp/src/sftpsink.rs diff --git a/sftp/src/lib.rs b/sftp/src/lib.rs index 24f7b363..f6cd7a3d 100644 --- a/sftp/src/lib.rs +++ b/sftp/src/lib.rs @@ -21,13 +21,14 @@ //! //! ## Basic features //! -//! - [ ] [SFTP Protocol Initialization](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-4) (Only SFTP V3 supported) -//! - [ ] [Canonicalizing the Server-Side Path Name](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.11) support -//! - [ ] [Open, close](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.3) +//! - [x] [SFTP Protocol Initialization](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-4) (Only SFTP V3 supported) +//! - [x] [Canonicalizing the Server-Side Path Name](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.11) support +//! - [x] [Open, close](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.3) //! and [write](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) -//! - [ ] Directory [Browsing](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.7) -//! - [ ] File [read](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4), -//! - [ ] File [stats](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.8) +//! - [x] Directory [Browsing](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.7) +//! - [x] File [read](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4), +//! - [] File [write](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) with conditions. See [Server Channel Window length is reduced to zero when long data is sent from server to client](https://github.com/mkj/sunset/issues/40), +//! - [x] File [stats](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.8) //! //! ## Minimal features for convenient usability //! @@ -51,58 +52,58 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -// mod opaquefilehandle; +mod opaquefilehandle; mod proto; -// mod sftperror; -// mod sftphandler; -// mod sftpserver; -// mod sftpsink; +mod sftperror; +mod sftphandler; +mod sftpserver; +mod sftpsink; mod sftpsource; // Main calling point for the library provided that the user implements // a [`server::SftpServer`]. // // Please see basic usage at `../demo/sftd/std` -// pub use sftphandler::SftpHandler; +pub use sftphandler::SftpHandler; /// Source of SFTP packets /// /// Used to decode SFTP packets from a byte slice pub use sftpsource::SftpSource; -// /// Structures and types used to add the details for the target system -// /// Related to the implementation of the [`server::SftpServer`], which -// /// is meant to be instantiated by the user and passed to [`SftpHandler`] -// /// and has the task of executing client requests in the underlying system -// pub mod server { +/// Structures and types used to add the details for the target system +/// Related to the implementation of the [`server::SftpServer`], which +/// is meant to be instantiated by the user and passed to [`SftpHandler`] +/// and has the task of executing client requests in the underlying system +pub mod server { -// pub use crate::sftpserver::DirReply; -// pub use crate::sftpserver::ReadReply; -// pub use crate::sftpserver::ReadStatus; -// pub use crate::sftpserver::SftpOpResult; -// pub use crate::sftpserver::SftpServer; -// /// Helpers to reduce error prone tasks and hide some details that -// /// add complexity when implementing an [`SftpServer`] -// pub mod helpers { -// pub use crate::sftpserver::helpers::*; + pub use crate::sftpserver::DirReply; + pub use crate::sftpserver::ReadReply; + pub use crate::sftpserver::ReadStatus; + pub use crate::sftpserver::SftpOpResult; + pub use crate::sftpserver::SftpServer; + /// Helpers to reduce error prone tasks and hide some details that + /// add complexity when implementing an [`SftpServer`] + pub mod helpers { + pub use crate::sftpserver::helpers::*; -// #[cfg(feature = "std")] -// pub use crate::sftpserver::DirEntriesCollection; -// #[cfg(feature = "std")] -// pub use crate::sftpserver::get_file_attrs; -// } -// pub use crate::sftpsink::SftpSink; -// pub use sunset::sshwire::SSHEncode; + #[cfg(feature = "std")] + pub use crate::sftpserver::DirEntriesCollection; + #[cfg(feature = "std")] + pub use crate::sftpserver::get_file_attrs; + } + pub use crate::sftpsink::SftpSink; + pub use sunset::sshwire::SSHEncode; -// pub use crate::proto::MAX_REQUEST_LEN; -// } + pub use crate::proto::MAX_REQUEST_LEN; +} /// Handles and helpers used by the [`sftpserver::SftpServer`] trait implementer -// pub mod handles { -// pub use crate::opaquefilehandle::OpaqueFileHandle; -// pub use crate::opaquefilehandle::OpaqueFileHandleManager; -// pub use crate::opaquefilehandle::PathFinder; -// } +pub mod handles { + pub use crate::opaquefilehandle::OpaqueFileHandle; + pub use crate::opaquefilehandle::OpaqueFileHandleManager; + pub use crate::opaquefilehandle::PathFinder; +} /// SFTP Protocol types and structures pub mod protocol { @@ -121,8 +122,8 @@ pub mod protocol { } } -// /// Errors and results used in this crate -// pub mod error { -// pub use crate::sftperror::SftpError; -// pub use crate::sftperror::SftpResult; -// } +/// Errors and results used in this crate +pub mod error { + pub use crate::sftperror::SftpError; + pub use crate::sftperror::SftpResult; +} diff --git a/sftp/src/opaquefilehandle.rs b/sftp/src/opaquefilehandle.rs new file mode 100644 index 00000000..19450ef1 --- /dev/null +++ b/sftp/src/opaquefilehandle.rs @@ -0,0 +1,70 @@ +use crate::protocol::FileHandle; + +use sunset::sshwire::WireResult; + +/// This is the trait with the required methods for interoperability between different opaque file handles +/// used in SFTP transactions +pub trait OpaqueFileHandle: + Sized + Clone + core::hash::Hash + PartialEq + Eq + core::fmt::Debug +{ + /// Creates a new instance using a given string slice as `seed` which + /// content should not clearly related to the seed + fn new(seed: &str) -> Self; + + /// Creates a new `OpaqueFileHandleTrait` copying the content of the `FileHandle` + fn try_from(file_handle: &FileHandle<'_>) -> WireResult; + + /// Returns a FileHandle pointing to the data in the `OpaqueFileHandleTrait` Implementation + fn into_file_handle(&self) -> FileHandle<'_>; +} + +/// Used to standardize finding a path within the HandleManager +/// +/// Must be implemented by the private handle structure to allow the `OpaqueHandleManager` to look for the path of the file itself +pub trait PathFinder { + /// Helper function to find elements stored in the HandleManager that matches the give path + fn matches(&self, path: &Self) -> bool; + + /// gets the path as a reference + fn get_path_ref(&self) -> &str; +} + +/// This trait is used to manage the OpaqueFile +/// +/// The SFTP module user is not required to use it but instead is a suggestion for an exchangeable +/// trait that facilitates structuring the store and retrieve of 'OpaqueFileHandleTrait' (K), +/// together with a private handle type or structure (V) that will contains all the details internally stored for the given file. +/// +/// The only requisite for v is that implements PathFinder, which in fact is another suggested helper to allow the `OpaqueHandleManager` +/// to look for the file path. +pub trait OpaqueFileHandleManager +where + K: OpaqueFileHandle, + V: PathFinder, +{ + /// The error used for all the trait members returning an error + type Error; + + // Excluded since it is too restrictive + // /// Performs any HandleManager Initialization + // fn new() -> Self; + + /// Given the private_handle, stores it and return an opaque file handle + /// + /// Returns an error if the private_handle has a matching path as obtained from `PathFinder` + /// + /// Salt has been added to allow the user to add a factor that will mask how the opaque handle is generated + fn insert(&mut self, private_handle: V, salt: &str) -> Result; + + /// + fn remove(&mut self, opaque_handle: &K) -> Option; + + /// Returns true if the opaque handle exist + fn opaque_handle_exist(&self, opaque_handle: &K) -> bool; + + /// given the opaque_handle returns a reference to the associated private handle + fn get_private_as_mut_ref(&mut self, opaque_handle: &K) -> Option<&mut V>; + + /// given the opaque_handle returns a reference to the associated private handle + fn get_private_as_ref(&self, opaque_handle: &K) -> Option<&V>; +} diff --git a/sftp/src/sftperror.rs b/sftp/src/sftperror.rs new file mode 100644 index 00000000..e19c253f --- /dev/null +++ b/sftp/src/sftperror.rs @@ -0,0 +1,98 @@ +use crate::protocol::StatusCode; + +use crate::sftphandler::requestholder::RequestHolderError; +use sunset::Error as SunsetError; +use sunset::sshwire::WireError; + +use core::convert::From; +use log::warn; + +/// Errors that are specific to this SFTP lib +#[derive(Debug)] +pub enum SftpError { + /// The SFTP server has not been initialised. No SFTP version has been + /// establish + NotInitialized, + /// An `SSH_FXP_INIT` packet was received after the server was already + /// initialized + AlreadyInitialized, + /// A packet could not be decoded as it was malformed + MalformedPacket, + /// The server does not have an implementation for the current request. + /// Some possible causes are: + /// + /// - The request has not been handled by an [`crate::sftpserver::SftpServer`] + /// - Long request which its handling was not implemented + NotSupported, + /// The connection has been closed by the client + ClientDisconnected, + /// The [`crate::sftpserver::SftpServer`] failed doing an IO operation + FileServerError(StatusCode), + // A RequestHolder instance throw an error. See [`crate::requestholder::RequestHolderError`] + /// A RequestHolder instance threw an error. See `RequestHolderError` + RequestHolderError(RequestHolderError), + /// A variant containing a [`WireError`] + WireError(WireError), + /// A variant containing a [`SunsetError`] + SunsetError(SunsetError), +} + +impl From for SftpError { + fn from(value: WireError) -> Self { + SftpError::WireError(value) + } +} + +impl From for SftpError { + fn from(value: SunsetError) -> Self { + SftpError::SunsetError(value) + } +} + +impl From for SftpError { + fn from(value: StatusCode) -> Self { + SftpError::FileServerError(value) + } +} + +impl From for SftpError { + fn from(value: RequestHolderError) -> Self { + SftpError::RequestHolderError(value) + } +} +// impl From for SftpError { +// fn from(value: FileServerError) -> Self { +// SftpError::FileServerError(value) +// } +// } + +impl From for WireError { + fn from(value: SftpError) -> Self { + match value { + SftpError::WireError(wire_error) => wire_error, + _ => WireError::PacketWrong, + } + } +} + +impl From for SunsetError { + fn from(value: SftpError) -> Self { + match value { + SftpError::SunsetError(error) => error, + SftpError::WireError(wire_error) => wire_error.into(), + SftpError::NotInitialized + | SftpError::NotSupported + | SftpError::AlreadyInitialized + | SftpError::MalformedPacket + | SftpError::RequestHolderError(_) + | SftpError::FileServerError(_) => { + warn!("Casting error loosing information: {:?}", value); + sunset::error::PacketWrong.build() + } + SftpError::ClientDisconnected => SunsetError::ChannelEOF, + } + } +} + +/// result specific to this SFTP lib +pub type SftpResult = Result; diff --git a/sftp/src/sftphandler/mod.rs b/sftp/src/sftphandler/mod.rs new file mode 100644 index 00000000..988cc09f --- /dev/null +++ b/sftp/src/sftphandler/mod.rs @@ -0,0 +1,6 @@ +pub mod requestholder; +mod sftphandler; +mod sftpoutputchannelhandler; + +pub use sftphandler::SftpHandler; +pub use sftpoutputchannelhandler::SftpOutputProducer; diff --git a/sftp/src/sftphandler/requestholder.rs b/sftp/src/sftphandler/requestholder.rs new file mode 100644 index 00000000..e962118f --- /dev/null +++ b/sftp/src/sftphandler/requestholder.rs @@ -0,0 +1,366 @@ +use crate::{ + proto::{MAX_REQUEST_LEN, SftpNum, SftpPacket}, + sftpsource::SftpSource, +}; + +#[allow(unused_imports)] +use log::{debug, error, info, log, trace, warn}; +use sunset::sshwire::WireError; + +#[derive(Debug)] +pub enum RequestHolderError { + /// The slice to hold is too long + NoRoom, + /// The slice holder is keeping a slice already. Consider cleaning + Busy, + /// The slice holder is empty + Empty, + /// There is not enough data in the slice we are trying to add. we need more data + RanOut, + /// The Packet held is not a request + NotRequest, + /// WireError + WireError(WireError), +} + +impl From for RequestHolderError { + fn from(value: WireError) -> Self { + RequestHolderError::WireError(value) + } +} + +pub(crate) type RequestHolderResult = Result; + +/// Helper struct to manage short fragmented requests that have been +/// received in consecutive read operations +/// +/// For requests exceeding the length of buffers other techniques, such +/// as composing them into multiple request, might help reducing the +/// required buffer sizes. This is recommended for restricted environments. +/// +/// The intended use for this RequestHolder is (in order): +/// - `new`: Initialize the struct with a slice that will keep the +/// request in memory +/// +/// - `try_hold`: load the data for an incomplete request +/// +/// - `try_append_for_valid_request`: append more data from another +/// slice to complete the request +/// +/// - `try_get_ref`: returns a reference to the portion of the slice +/// containing a request +/// +/// - `reset`: reset counters and flags to allow `try_hold` a new request +/// +#[derive(Debug, PartialEq, Eq)] +pub(crate) struct RequestHolder<'a> { + /// The buffer used to contain the data for the request + buffer: &'a mut [u8; MAX_REQUEST_LEN], + /// The index of the last byte in the buffer containing usable data + buffer_fill_index: usize, + /// Number of bytes appended in a previous `try_hold` or `try_append_for_valid_request` slice + appended: usize, + /// Used to mark when the structure is holding data + busy: bool, +} + +impl<'a> RequestHolder<'a> { + /// The buffer will be used to hold a full request. Choose a + /// reasonable size for this buffer. + pub(crate) fn new(buffer: &'a mut [u8; MAX_REQUEST_LEN]) -> Self { + RequestHolder { + buffer: buffer, + buffer_fill_index: 0, + busy: false, + appended: 0, + } + } + + /// Uses the internal buffer to store a copy of the provided slice + /// + /// The definition of `try_hold` and `try_append_slice` separately + /// is deliberated to follow an order in composing the held request + /// + /// Increases the `appended()` counter + /// + /// returns: + /// + /// - Ok(usize): the number of bytes read from the slice + /// + /// - `Err(Busy)`: If there has been a call to `try_hold` without a call to `reset` + pub(crate) fn try_hold(&mut self, slice: &[u8]) -> RequestHolderResult { + if self.busy { + return Err(RequestHolderError::Busy); + } + + self.busy = true; + self.try_append_slice(slice)?; + let read_in = self.appended(); + self.appended = 0; + Ok(read_in) + } + + /// Resets the structure allowing it to hold a new request. + /// + /// Resets the `appended()` counter. + /// + /// Will **clear** the previous data from the buffer. + pub(crate) fn reset(&mut self) -> () { + self.busy = false; + self.buffer_fill_index = 0; + self.appended = 0; + self.buffer.fill(0); + } + + /// Appends a byte at a time to the internal buffer and tries to + /// decode a request + /// + /// Reset and increase the `appended()` counter. + /// + /// **Returns**: + /// + /// - `Ok(())`: A valid request is held now + /// + /// - `Err(NotRequest)`: The decoded packet is not a request + /// + /// - `Err(RanOut)`: Not enough bytes in the slice to add a single byte + /// + /// - `Err(NoRoom)`: The internal buffer is full + /// + /// - `Err(Empty)`: If the structure has not been loaded with `try_hold` + /// + pub(crate) fn try_appending_for_valid_request( + &mut self, + slice_in: &[u8], + ) -> RequestHolderResult { + debug!( + "try_appending_for_valid_request: self = {:?}\n\ + Space left = {:?}\n\ + Length of slice to append from = {:?}", + self, + self.remaining_len(), + slice_in.len() + ); + + if !self.busy { + error!("Request Holder is not busy"); + return Err(RequestHolderError::Empty); + } + + self.appended = 0; // reset appended bytes counter. Try_append_slice will increase it + + if self.is_full() { + error!("Request Holder is full"); + return Err(RequestHolderError::NoRoom); + } + + if let Some(request) = self.valid_request() { + debug!("The request holder already contained a valid request"); + return Ok(request.sftp_num()); + } + + let mut slice = slice_in; + loop { + debug!( + "try_appending_for_valid_request: Slice length {:?}", + slice.len() + ); + if slice.len() > 0 { + self.try_append_slice(&[slice[0]])?; + slice = &slice[1..]; + let mut source = SftpSource::new(self.try_get_ref()?); + if let Ok(pt) = source.peak_packet_type() { + if !pt.is_request() { + error!("The request candidate is not a request: {pt:?}"); + return Err(RequestHolderError::NotRequest); + } + } else { + continue; + }; + match SftpPacket::decode_request(&mut source) { + Ok(request) => { + debug!("Request is {:?}", request); + return Ok(request.sftp_num()); + } + Err(WireError::RanOut) => { + if slice.len() == 0 { + return Err(RequestHolderError::RanOut); + } + } + Err(WireError::NoRoom) => { + return Err(RequestHolderError::NoRoom); + } + Err(WireError::PacketWrong) => { + return Err(RequestHolderError::NotRequest); + } + Err(e) => return Err(RequestHolderError::WireError(e)), + } + } else { + return Err(RequestHolderError::RanOut); + } + } + } + + pub(crate) fn valid_request(&self) -> Option> { + if !self.busy { + return None; + } + let mut source = SftpSource::new(self.try_get_ref().unwrap_or(&[0])); + match SftpPacket::decode_request(&mut source) { + Ok(request) => { + return Some(request); + } + Err(..) => return None, + } + } + + /// Gets a reference to the slice that it is holding + pub(crate) fn try_get_ref(&self) -> RequestHolderResult<&[u8]> { + if self.busy { + debug!( + "Returning reference to: {:?}", + &self.buffer[..self.buffer_fill_index] + ); + Ok(&self.buffer[..self.buffer_fill_index]) + } else { + Err(RequestHolderError::Empty) + } + } + + pub(crate) fn is_full(&mut self) -> bool { + self.buffer_fill_index == self.buffer.len() + } + + #[allow(unused)] + /// Returns true if it has a slice in its buffer + pub(crate) fn is_busy(&self) -> bool { + self.busy + } + + /// Returns the bytes appened in the last call to + /// [`RequestHolder::try_append_for_valid_request`] or + /// [`RequestHolder::try_append_for_valid_header`] or + /// [`RequestHolder::try_append_slice`] or + /// [`RequestHolder::try_appending_single_byte`] + pub(crate) fn appended(&self) -> usize { + self.appended + } + + /// Appends a slice to the internal buffer. Requires the buffer to + /// be busy by using `try_hold` first + /// + /// Increases the `appended` counter but does not reset it + /// + /// Returns: + /// + /// - `Ok(())`: the slice was appended + /// + /// - `Err(Empty)`: If the structure has not been loaded with `try_hold` + /// + /// - `Err(NoRoom)`: The internal buffer is full but there is not a full valid request in the buffer + fn try_append_slice(&mut self, slice: &[u8]) -> RequestHolderResult<()> { + if slice.len() == 0 { + warn!("try appending a zero length slice"); + return Ok(()); + } + if !self.busy { + return Err(RequestHolderError::Empty); + } + + let in_len = slice.len(); + if in_len > self.remaining_len() { + return Err(RequestHolderError::NoRoom); + } + debug!("Adding: {:?}", slice); + + self.buffer[self.buffer_fill_index..self.buffer_fill_index + in_len] + .copy_from_slice(slice); + + self.buffer_fill_index += in_len; + debug!( + "RequestHolder: index = {:?}, slice = {:?}", + self.buffer_fill_index, + self.try_get_ref()? + ); + self.appended += in_len; + Ok(()) + } + + /// Returns the number of bytes unused at the end of the buffer, + /// this is, the remaining length + fn remaining_len(&self) -> usize { + self.buffer.len() - self.buffer_fill_index + } +} + +#[cfg(test)] +mod local_test { + use super::*; + + #[cfg(test)] + extern crate std; + #[cfg(test)] + use std::println; + + fn get_buffer_with_valid_request() -> [u8; 85] { + [ + 0, 0, 128, 25, 6, 0, 0, 0, 23, 0, 0, 0, 4, 249, 67, 81, 122, 0, 0, 0, 0, + 0, 9, 128, 0, 0, 0, 128, 0, 116, 101, 115, 116, 105, 110, 103, 47, 111, + 117, 116, 47, 49, 48, 48, 77, 66, 95, 114, 97, 110, 100, 111, 109, 0, 0, + 0, 26, 0, 0, 0, 4, 0, 0, 1, 164, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, + ] + } + #[test] + fn valid_request_uses_filled_data() { + let mut clean_buffer = [0u8; MAX_REQUEST_LEN]; + let buff_data = get_buffer_with_valid_request(); + + let mut rh = RequestHolder::new(&mut clean_buffer); + rh.try_hold(&buff_data).unwrap(); + assert!(rh.valid_request().is_some()); + + rh.reset(); + assert!(rh.valid_request().is_none()); + + rh.try_hold(&buff_data[..5]).unwrap(); + assert!(rh.valid_request().is_none()); + } + + #[test] + fn try_appending_for_valid_request_uses_filled_data() { + let mut clean_buffer = [0u8; MAX_REQUEST_LEN]; + let buff_data = get_buffer_with_valid_request(); + + let mut rh = RequestHolder::new(&mut clean_buffer); + rh.try_hold(&buff_data).unwrap(); + assert!(rh.valid_request().is_some()); + + rh.reset(); + assert!(rh.valid_request().is_none()); + + rh.try_hold(&buff_data[..5]).unwrap(); + assert!(rh.try_appending_for_valid_request(&buff_data[5..10]).is_err()); + } + + #[test] + fn try_appending_for_valid_request_works() { + let mut clean_buffer = [0u8; MAX_REQUEST_LEN]; + let buff_data = get_buffer_with_valid_request(); + println!("{buff_data:?}"); + + let mut rh = RequestHolder::new(&mut clean_buffer); + rh.try_hold(&buff_data).unwrap(); + assert!(rh.valid_request().is_some()); + + rh.reset(); + assert!(rh.valid_request().is_none()); + + rh.try_hold(&buff_data[..5]).unwrap(); + println!("before appending{rh:?}"); + let appending = rh.try_appending_for_valid_request(&buff_data[5..]); + // println!("{appending:?}",); + println!("after appending {rh:?}"); + assert!(appending.is_ok()); + } +} diff --git a/sftp/src/sftphandler/sftphandler.rs b/sftp/src/sftphandler/sftphandler.rs new file mode 100644 index 00000000..50500a50 --- /dev/null +++ b/sftp/src/sftphandler/sftphandler.rs @@ -0,0 +1,757 @@ +use crate::error::SftpError; +use crate::handles::OpaqueFileHandle; +use crate::proto::{ + self, InitVersionClient, InitVersionLowest, LStat, MAX_REQUEST_LEN, ReqId, + SFTP_VERSION, SftpNum, SftpPacket, Stat, StatusCode, +}; +use crate::server::{DirReply, ReadReply}; +use crate::sftperror::SftpResult; +use crate::sftphandler::requestholder::{RequestHolder, RequestHolderError}; +use crate::sftphandler::sftpoutputchannelhandler::{ + SftpOutputPipe, SftpOutputProducer, +}; +use crate::sftpserver::SftpServer; +use crate::sftpsource::SftpSource; + +use embassy_futures::select::select; +use sunset::Error as SunsetError; +use sunset::sshwire::{SSHSource, WireError}; +use sunset_async::ChanInOut; + +use core::u32; +use embedded_io_async::Read; +#[allow(unused_imports)] +use log::{debug, error, info, log, trace, warn}; + +/// FSM for handling sftp requests during [`SftpHandler::process`] +#[derive(Default, Debug, PartialEq, Eq)] +enum HandlerState { + /// The handle is not been initialized. + /// if the client receivs an Init packet it will process it. + #[default] + Uninitialized, + /// The handle is ready to process requests. No request pending + /// A new packet will be evaluated to be process as: + /// - a regular request + /// - fragment (More data is needed) + /// - long request (It does not fit in the buffers and segmenting + /// strategies are used) + Idle, + /// The client has received a request and will decide how to process it. + /// Use the self.incomplete_request_holder + ProcessRequest { sftp_num: SftpNum }, + /// There is a fragmented request and more bytes are needed + /// Use the self.incomplete_request_holder + ProcessFragment, + /// A request, with a length over the incoming buffer capacity is being + /// processed. + /// + /// E.g. a write request with size exceeding the + /// buffer size: Processing this request will require to be split + /// into multiple write actions + ProcessWriteRequest { offset: u64, remaining_data: u32 }, + + /// Used to clear an invalid buffer in cases where there is still + /// data to be process but no longer required + ClearBuffer { data: usize }, +} + +/// Process the raw buffers in and out from a subsystem channel decoding +/// request and encoding responses +/// +/// It will delegate request to an [`crate::sftpserver::SftpServer`] +/// implemented by the library +/// user taking into account the local system details. +/// +/// The compiler time constant `BUFFER_OUT_SIZE` is used to define the +/// size of the output buffer for the subsystem [`Embassy-sync::pipe`] used +/// to send responses safely across the instantiated structure. +/// +pub struct SftpHandler<'a, T, S, const BUFFER_OUT_SIZE: usize> +where + T: OpaqueFileHandle, + S: SftpServer<'a, T>, +{ + /// Holds the internal state if the SFTP handle + state: HandlerState, + + /// The local SFTP File server implementing the basic SFTP requests + /// defined by [`crate::sftpserver::SftpServer`] + file_server: &'a mut S, + + // /// Use to process SFTP Write packets that have been received + // /// partially and the remaining is expected in successive buffers + // partial_write_request_tracker: Option>, + /// Used to handle received buffers that do not hold a complete request [`SftpPacket`] + request_holder: RequestHolder<'a>, + + /// Marker to keep track of the OpaqueFileHandle type + _marker: core::marker::PhantomData, +} + +impl<'a, T, S, const BUFFER_OUT_SIZE: usize> SftpHandler<'a, T, S, BUFFER_OUT_SIZE> +where + T: OpaqueFileHandle, + S: SftpServer<'a, T>, +{ + /// Creates a new instance of the structure. + /// + /// Requires: + /// + /// - `file_server` (implementing [`crate::sftpserver::SftpServer`] ): to execute + /// the request in the local system + /// - `request_buffer`: used to deal with fragmented + /// packets during [`SftpHandler::process_loop`] + pub fn new( + file_server: &'a mut S, + request_buffer: &'a mut [u8; MAX_REQUEST_LEN], + ) -> Self { + SftpHandler { + file_server, + state: HandlerState::default(), + request_holder: RequestHolder::new(request_buffer), + _marker: core::marker::PhantomData, + } + } + + /// Take the [`ChanInOut`] and locks, Processing all the request from stdio until + /// an EOF is received + pub async fn process_loop( + &mut self, + stdio: ChanInOut<'a>, + buffer_in: &mut [u8], + ) -> SftpResult<()> { + let (mut chan_in, chan_out) = stdio.split(); + + let mut sftp_output_pipe = SftpOutputPipe::::new(); + + let (mut output_consumer, output_producer) = + sftp_output_pipe.split(chan_out)?; + + let output_consumer_loop = output_consumer.receive_task(); + + let processing_loop = async { + loop { + trace!("SFTP: About to read bytes from SSH Channel"); + let lr: usize = match chan_in.read(buffer_in).await { + Ok(lr) => lr, + Err(e) => match e { + SunsetError::NoRoom {} => { + error!("SSH channel is full"); + continue; + } + _ => return Err(e.into()), + }, + }; + + debug!("SFTP <---- received: {:?} bytes", lr); + trace!("SFTP <---- received: {:?}", &buffer_in[0..lr]); + if lr == 0 { + debug!("client disconnected"); + return Err(SftpError::ClientDisconnected); + } + + self.process(&buffer_in[0..lr], &output_producer).await?; + } + #[allow(unreachable_code)] + SftpResult::Ok(()) + }; + match select(processing_loop, output_consumer_loop).await { + embassy_futures::select::Either::First(r) => { + error!("Processing returned: {:?}", r); + r + } + embassy_futures::select::Either::Second(r) => { + error!("Output consumer returned: {:?}", r); + r + } + } + } + + /// - Decodes the buffer_in request + /// - Process the request delegating + /// operations to a [`SftpServer`] implementation + /// - Serializes an answer in `output_producer` + /// + async fn process( + &mut self, + buffer_in: &[u8], + output_producer: &SftpOutputProducer<'_, BUFFER_OUT_SIZE>, + ) -> SftpResult<()> { + /* + Possible scenarios: + - Init: The init handshake has to be performed. Only Init packet is accepted. NAV(Idle) + - handshake?: The client has received an Init packet and is processing it. NAV( Init, Idle) + - Idle: Ready to process request. No request pending. In this point. NAV(ProcessRequest, Fragment) + - Fragment: There is a fragmented request and more data is needed. NAV(ProcessRequest, ProcessLongRequest) + - ProcessRequest: The client has received a request and is processing it. NAV(Idle) + - ProcessLongRequest: The client has received a request that cannot fit in the buffer. Special treatment is required. NAV(Idle) + */ + let mut buf = buffer_in; + + trace!("Received {:} bytes to process", buf.len()); + + // We used `run_another_loop` to bypass the buf len check in + // cases where we need to process data held + // TODO: Fix this pattern + let mut skip_checking_buffer = false; + trace!("Entering loop to process the full received buffer"); + while skip_checking_buffer || buf.len() > 0 { + debug!( + "<=======================[ SFTP Process State: {:?} ]=======================> Buffer remaining: {}", + self.state, + buf.len() + ); + skip_checking_buffer = false; + match &self.state { + HandlerState::ProcessWriteRequest { + offset, + remaining_data: data_len, + } => { + if let Some(request) = self.request_holder.valid_request() { + if let SftpPacket::Write(req_id, write) = request { + let used = (*data_len as usize).min(buf.len()); + let remaining_data = *data_len - used as u32; + + let data = &buf[..used]; + buf = &buf[used..]; + match self + .file_server + .write(&T::try_from(&write.handle)?, *offset, data) + .await + { + Ok(_) => { + if remaining_data == 0 { + output_producer + .send_status( + req_id, + StatusCode::SSH_FX_OK, + "", + ) + .await?; + trace!("Still in buffer: {buf:?}"); + self.state = HandlerState::Idle; + } else { + self.state = + HandlerState::ProcessWriteRequest { + offset: *offset + (used as u64), + remaining_data, + }; + } + } + Err(e) => { + error!("SFTP write thrown: {:?}", e); + output_producer + .send_status( + req_id, + StatusCode::SSH_FX_FAILURE, + "error writing", + ) + .await?; + self.state = HandlerState::ClearBuffer { + data: remaining_data as usize, + }; + } + }; + } else { + todo!("Wrong transition? Uncontrolled for now"); + } + } else { + todo!("Wrong transition? Uncontrolled for now"); + } + } + HandlerState::Uninitialized => { + debug!("Creating a source: buf_len = {:?}", buf.len()); + let mut source = SftpSource::new(&buf); + + match SftpPacket::decode_request(&mut source) { + Ok(request) => match request { + SftpPacket::Init(InitVersionClient { + version: SFTP_VERSION, + }) => { + debug!( + "Accepted initialization request: {:?}", + request + ); + output_producer + .send_packet(&SftpPacket::Version( + InitVersionLowest { version: SFTP_VERSION }, + )) + .await?; + buf = &buf[buf.len() - source.remaining()..]; + self.state = HandlerState::Idle; + } + SftpPacket::Init(init_version_client) => { + error!( + "Incompatible SFTP Version: {:?} is not {SFTP_VERSION:?}", + &init_version_client + ); + return Err(SftpError::NotSupported); + } + _ => { + error!( + "Wrong SFTP Packet before Init or incompatible version: {request:?}" + ); + return Err(SftpError::NotInitialized); + } + }, + Err(e) => { + error!("Malformed SFTP Packet before Init: {e:?}"); + return Err(SftpError::MalformedPacket); + } // Err(e) => { + // error!("Malformed SFTP Packet before Init: {e:?}"); + // return Err(SftpError::MalformedPacket); + // } + } + } + HandlerState::Idle => { + self.request_holder.reset(); + debug!("Creating a source: buf_len = {:?}", buf.len()); + let mut source = SftpSource::new(&buf); + trace!("source: {source:?}"); + + match SftpPacket::decode_request(&mut source) { + Ok(request) => { + debug!("Got a valid request {:?}", request.sftp_num()); + self.request_holder.try_hold(&source.buffer_used())?; + + // We got the request. Moving on to process it before deserializing more + // data + skip_checking_buffer = true; + self.state = HandlerState::ProcessRequest { + sftp_num: request.sftp_num(), + }; + // TODO Wasteful. Will have to decode the request again. Maybe hold it? + buf = &buf[buf.len() - source.remaining()..]; + } + Err(WireError::RanOut) => { + debug!("source: {source:?}"); + let rl = self + .request_holder + .try_hold(&source.consume_all())?; + + buf = &buf[buf.len() - source.remaining()..]; + debug!( + "Incomplete packet. request holder initialized with {rl:?} bytes" + ); + self.state = HandlerState::ProcessFragment; + } + Err(WireError::UnknownPacket { number }) => { + error!("Unknown packet: {number}"); + output_producer + .send_status( + ReqId( + source + .peak_packet_req_id() + .unwrap_or(u32::MAX), + ), + StatusCode::SSH_FX_OP_UNSUPPORTED, + "", + ) + .await?; + buf = &buf[buf.len() - source.remaining()..]; + debug!( + "Unknown Packet. clearing the buffer in place since it filts" + ); + } + Err(WireError::PacketWrong) => { + error!("Not a request: "); + output_producer + .send_status( + ReqId( + source + .peak_packet_req_id() + .unwrap_or(u32::MAX), + ), + StatusCode::SSH_FX_BAD_MESSAGE, + "Not a request", + ) + .await?; + } + Err(e) => { + error!("Unexpected error: Bug!"); + return Err(SftpError::WireError(e)); + } + }; + } + HandlerState::ProcessFragment => { + match self.request_holder.try_appending_for_valid_request(&buf) { + Ok(sftp_num) => { + let used = self.request_holder.appended(); + debug!( + "{used:?} bytes added. We got a complete request: {sftp_num:?}:: {:?}", + self.request_holder + ); + debug!( + "Request: {:?}", + self.request_holder.valid_request() + ); + buf = &buf[used..]; + self.state = HandlerState::ProcessRequest { sftp_num } + } + Err(RequestHolderError::RanOut) => { + let used = self.request_holder.appended(); + buf = &buf[used..]; + debug!( + "{used:?} bytes added. Will keep adding \ + until we hold a valid request" + ); + } + Err(RequestHolderError::NoRoom) => { + error!( + "Could not complete the request. holding buffer is full" + ); + return Err(SunsetError::Bug.into()); + } + Err(e) => { + error!("{e:?}"); + return Err(e.into()); + } + } + } + HandlerState::ProcessRequest { .. } => { + // At this point the assumption is that the request holder will contain + // a full valid request (Lets call this an invariant) + + if let Some(request) = self.request_holder.valid_request() { + if !request.sftp_num().is_request() { + error!( + "Unexpected SftpPacket: {:?}", + request.sftp_num() + ); + return Err(SunsetError::BadUsage {}.into()); + } + match request { + // SftpPacket::Init(init_version_client) => todo!(), + // SftpPacket::Version(init_version_lowest) => todo!(), + SftpPacket::Read(req_id, ref read) => { + debug!("Read request: {:?}", request); + + let mut reply = + ReadReply::new(req_id, output_producer); + if let Err(error) = self + .file_server + .read( + &T::try_from(&read.handle)?, + read.offset, + read.len, + &mut reply, + ) + .await + { + error!("Error reading data: {:?}", error); + if let SftpError::FileServerError(status) = error + { + output_producer + .send_status( + req_id, + status, + "Could not list attributes", + ) + .await?; + } else { + output_producer + .send_status( + req_id, + StatusCode::SSH_FX_FAILURE, + "Could not list attributes", + ) + .await?; + } + }; + + match reply.read_diff() { + diff if diff > 0 => { + debug!( + "ReadReply not completed after read operation. Still need to send {} bytes", + diff + ); + return Err(SunsetError::Bug.into()); + } + diff if diff < 0 => { + error!( + "ReadReply has sent more data than announced: {} bytes extra", + -diff + ); + return Err(SunsetError::Bug.into()); + } + _ => {} + } + + self.state = HandlerState::Idle; + } + SftpPacket::LStat(req_id, LStat { file_path: path }) => { + match self + .file_server + .stats(false, path.as_str()?) + .await + { + Ok(attrs) => { + debug!( + "List stats for {} is {:?}", + path, attrs + ); + + output_producer + .send_packet(&SftpPacket::Attrs( + req_id, attrs, + )) + .await?; + } + Err(status) => { + error!( + "Error listing stats for {}: {:?}", + path, status + ); + output_producer + .send_status( + req_id, + status, + "Could not list attributes", + ) + .await?; + } + }; + self.state = HandlerState::Idle; + } + SftpPacket::Stat(req_id, Stat { file_path: path }) => { + match self + .file_server + .stats(true, path.as_str()?) + .await + { + Ok(attrs) => { + debug!( + "List stats for {} is {:?}", + path, attrs + ); + + output_producer + .send_packet(&SftpPacket::Attrs( + req_id, attrs, + )) + .await?; + } + Err(status) => { + error!( + "Error listing stats for {}: {:?}", + path, status + ); + output_producer + .send_status( + req_id, + status, + "Could not list attributes", + ) + .await?; + } + }; + self.state = HandlerState::Idle; + } + SftpPacket::ReadDir(req_id, read_dir) => { + let mut reply = + DirReply::new(req_id, output_producer); + if let Err(status) = self + .file_server + .readdir( + &T::try_from(&read_dir.handle)?, + &mut reply, + ) + .await + { + error!("Open failed: {:?}", status); + + output_producer + .send_status( + req_id, + status, + "Error Reading Directory", + ) + .await?; + }; + match reply.read_diff() { + diff if diff > 0 => { + debug!( + "DirReply not completed after read operation. Still need to send {} bytes", + diff + ); + return Err(SunsetError::Bug.into()); + } + diff if diff < 0 => { + error!( + "DirReply has sent more data than announced: {} bytes extra", + -diff + ); + return Err(SunsetError::Bug.into()); + } + _ => {} + } + self.state = HandlerState::Idle; + } + SftpPacket::OpenDir(req_id, open_dir) => { + match self + .file_server + .opendir(open_dir.dirname.as_str()?) + .await + { + Ok(opaque_file_handle) => { + let response = SftpPacket::Handle( + req_id, + proto::Handle { + handle: opaque_file_handle + .into_file_handle(), + }, + ); + output_producer + .send_packet(&response) + .await?; + } + Err(status_code) => { + error!("Open failed: {:?}", status_code); + output_producer + .send_status( + req_id, + StatusCode::SSH_FX_FAILURE, + "", + ) + .await?; + } + }; + self.state = HandlerState::Idle; + } + SftpPacket::Close(req_id, close) => { + match self + .file_server + .close(&T::try_from(&close.handle)?) + .await + { + Ok(_) => { + output_producer + .send_status( + req_id, + StatusCode::SSH_FX_OK, + "", + ) + .await?; + } + Err(e) => { + error!("SFTP Close thrown: {:?}", e); + output_producer + .send_status( + req_id, + StatusCode::SSH_FX_FAILURE, + "Could not Close the handle", + ) + .await?; + } + } + self.state = HandlerState::Idle; + } + SftpPacket::Write(_, write) => { + debug!("Got write: {:?}", write); + self.state = HandlerState::ProcessWriteRequest { + offset: write.offset, + remaining_data: write.data_len, + }; + } + SftpPacket::Open(req_id, open) => { + match self + .file_server + .open(open.filename.as_str()?, &open.pflags) + .await + { + Ok(opaque_file_handle) => { + let response = SftpPacket::Handle( + req_id, + proto::Handle { + handle: opaque_file_handle + .into_file_handle(), + }, + ); + output_producer + .send_packet(&response) + .await?; + } + Err(status_code) => { + error!("Open failed: {:?}", status_code); + output_producer + .send_status( + req_id, + StatusCode::SSH_FX_FAILURE, + "", + ) + .await?; + } + }; + self.state = HandlerState::Idle; + } + SftpPacket::PathInfo(req_id, path_info) => { + match self + .file_server + .realpath(path_info.path.as_str()?) + .await + { + Ok(name_entry) => { + let mut dir_reply = + DirReply::new(req_id, output_producer); + let encoded_len = + crate::sftpserver::helpers::get_name_entry_len(&name_entry)?; + debug!( + "PathInfo encoded length: {:?}", + encoded_len + ); + trace!( + "PathInfo Response content: {:?}", + encoded_len + ); + dir_reply + .send_header(1, encoded_len) + .await?; + dir_reply.send_item(&name_entry).await?; + if dir_reply.read_diff() != 0 { + error!( + "PathInfo reply not completed after sending the only item" + ); + return Err(SunsetError::Bug.into()); + } + } + Err(code) => { + output_producer + .send_status(req_id, code, "") + .await?; + } + } + self.state = HandlerState::Idle; + } + SftpPacket::Init(..) + | SftpPacket::Version(..) + | SftpPacket::Status(..) + | SftpPacket::Handle(..) + | SftpPacket::Data(..) + | SftpPacket::Name(..) + | SftpPacket::Attrs(..) => { + error!( + "Unexpected SftpPacket in ProcessRequest state: {:?}", + request.sftp_num() + ); + return Err(SunsetError::BadUsage {}.into()); + } + } + } else { + return Err(SunsetError::bug().into()); + } + } + HandlerState::ClearBuffer { data } => { + if *data == 0 { + self.state = HandlerState::Idle; + } else { + buf = &buf[(*data).min(buf.len())..] + } + } + } + trace!("Process will check buf len {:?}", buf.len()); + } + debug!("Whole buffer processed. Getting more data"); + Ok(()) + } +} diff --git a/sftp/src/sftphandler/sftpoutputchannelhandler.rs b/sftp/src/sftphandler/sftpoutputchannelhandler.rs new file mode 100644 index 00000000..3843e8e8 --- /dev/null +++ b/sftp/src/sftphandler/sftpoutputchannelhandler.rs @@ -0,0 +1,195 @@ +use crate::error::{SftpError, SftpResult}; +use crate::proto::{ReqId, SftpPacket, Status, StatusCode}; +use crate::server::SftpSink; + +use embassy_sync::mutex::Mutex; +use sunset_async::ChanOut; + +use embassy_sync::pipe::{Pipe, Reader as PipeReader, Writer as PipeWriter}; +use embedded_io_async::Write; +use sunset_async::SunsetRawMutex; + +use log::{debug, error, trace}; + +type CounterMutex = Mutex; + +pub struct SftpOutputPipe { + pipe: Pipe, + counter_send: CounterMutex, + counter_recv: CounterMutex, + splitted: bool, +} + +/// M: SunsetSunsetRawMutex +impl SftpOutputPipe { + /// Creates an empty SftpOutputPipe. + /// The output channel will be consumed during the split call + /// + /// Usage: + /// + /// let output_pipe = SftpOutputPipe::::new(); + /// + pub fn new() -> Self { + SftpOutputPipe { + pipe: Pipe::new(), + counter_send: Mutex::::new(0), + counter_recv: Mutex::::new(0), + splitted: false, + } + } + + /// Get a Consumer and Producer pair so the producer can send data to the + /// output channel without mutable borrows. + /// + /// The [`SftpOutputConsumer`] needs to be running to write data to the + /// [`ChanOut`] + /// + /// ## Lifetimes + /// The lifetime indicates that the lifetime of self, ChanOut and the + /// consumer and producer are the same. I chose this because if the ChanOut + /// is closed, there is no point on having a pipe outliving it. + pub fn split<'a>( + &'a mut self, + ssh_chan_out: ChanOut<'a>, + ) -> SftpResult<(SftpOutputConsumer<'a, N>, SftpOutputProducer<'a, N>)> { + if self.splitted { + return Err(SftpError::AlreadyInitialized); + } + self.splitted = true; + let (reader, writer) = self.pipe.split(); + Ok(( + SftpOutputConsumer { reader, ssh_chan_out, counter: &self.counter_recv }, + SftpOutputProducer { writer, counter: &self.counter_send }, + )) + } +} + +/// Consumer that takes ownership of [`ChanOut`]. It pipes the data received +/// from a [`PipeReader`] into the channel +pub(crate) struct SftpOutputConsumer<'a, const N: usize> { + reader: PipeReader<'a, SunsetRawMutex, N>, + ssh_chan_out: ChanOut<'a>, + counter: &'a CounterMutex, +} + +impl<'a, const N: usize> SftpOutputConsumer<'a, N> { + /// Run it to start the piping + pub async fn receive_task(&mut self) -> SftpResult<()> { + debug!("Running SftpOutout Consumer Reader task"); + let mut buf = [0u8; N]; + loop { + let rl = self.reader.read(&mut buf).await; + let mut _total = 0; + { + let mut lock = self.counter.lock().await; + *lock += rl; + _total = *lock; + } + + debug!("Output Consumer: ---> Reads {rl} bytes. Total {_total}"); + let mut scanning_buffer = &buf[..rl]; + if rl > 0 { + // Replaced write_all with loop to handle partial writes to discard issues in write_all + while scanning_buffer.len() > 0 { + trace!( + "Output Consumer: Tries to write {:?} bytes to ChanOut", + scanning_buffer.len() + ); + let wl = self.ssh_chan_out.write(scanning_buffer).await?; + debug!("Output Consumer: Written {:?} bytes ", wl); + if wl < scanning_buffer.len() { + debug!( + "Output Consumer: ChanOut accepted only part of the buffer" + ); + } + trace!( + "Output Consumer: Bytes written {:?}", + &scanning_buffer[..wl] + ); + scanning_buffer = &scanning_buffer[wl..]; + } + debug!("Output Consumer: Finished writing all bytes in read buffer"); + } else { + error!("Output Consumer: Empty array received"); + } + } + } +} + +/// Producer used to send data to a [`ChanOut`] without the restrictions +/// of mutable borrows +#[derive(Clone)] +pub struct SftpOutputProducer<'a, const N: usize> { + writer: PipeWriter<'a, SunsetRawMutex, N>, + counter: &'a CounterMutex, +} +impl<'a, const N: usize> SftpOutputProducer<'a, N> { + /// Sends the data encoded in the provided [`SftpSink`] without including + /// the size. + /// + /// Use this when you are sending chunks of data after a valid header + pub async fn send_data(&self, buf: &[u8]) -> SftpResult<()> { + Self::send_buffer(&self.writer, &buf, &self.counter).await; + Ok(()) + } + + /// Simplifies the task of sending a status response to the client. + pub async fn send_status( + &self, + req_id: ReqId, + status: StatusCode, + msg: &'static str, + ) -> SftpResult<()> { + let response = SftpPacket::Status( + req_id, + Status { code: status, message: msg.into(), lang: "en-US".into() }, + ); + trace!("Output Producer: Pushing a status message: {:?}", response); + self.send_packet(&response).await?; + Ok(()) + } + + /// Sends a SFTP Packet into the channel out, including the length field + pub async fn send_packet(&self, packet: &SftpPacket<'_>) -> SftpResult<()> { + let mut buf = [0u8; N]; + let mut sink = SftpSink::new(&mut buf); + packet.encode_response(&mut sink)?; + debug!("Output Producer: Sending packet {:?}", packet); + Self::send_buffer(&self.writer, &sink.used_slice(), &self.counter).await; + Ok(()) + } + + /// Internal associated method to log the writes to the pipe + async fn send_buffer( + writer: &PipeWriter<'a, SunsetRawMutex, N>, + buf: &[u8], + counter: &CounterMutex, + ) { + let mut _total = 0; + { + let mut lock = counter.lock().await; + *lock += buf.len(); + _total = *lock; + } + + debug!("Output Producer: <--- Sends {:?} bytes. Total {_total}", buf.len()); + trace!("Output Producer: Sending buffer {:?}", buf); + + // writer.write_all(buf); // ??? error[E0596]: cannot borrow `*writer` as mutable, as it is behind a `&` reference + + let mut buf = buf; + loop { + if buf.len() == 0 { + break; + } + + trace!("Output Producer: Tries to send {:?} bytes", buf.len()); + let bytes_sent = writer.write(&buf).await; + buf = &buf[bytes_sent..]; + trace!( + "Output Producer: sent {bytes_sent:?}. {:?} bytes remain ", + buf.len() + ); + } + } +} diff --git a/sftp/src/sftpserver.rs b/sftp/src/sftpserver.rs new file mode 100644 index 00000000..35019f55 --- /dev/null +++ b/sftp/src/sftpserver.rs @@ -0,0 +1,663 @@ +use crate::error::{SftpError, SftpResult}; +use crate::proto::{ + ENCODED_BASE_NAME_SFTP_PACKET_LENGTH, ENCODED_SSH_FXP_DATA_MIN_LENGTH, + MAX_NAME_ENTRY_SIZE, NameEntry, PFlags, SftpNum, +}; +use crate::server::SftpSink; +use crate::sftphandler::SftpOutputProducer; +use crate::{ + handles::OpaqueFileHandle, + proto::{Attrs, ReqId, StatusCode}, +}; + +use sunset::sshwire::SSHEncode; + +#[allow(unused_imports)] +use log::{debug, error, info, log, trace, warn}; + +/// Result used to store the result of an Sftp Operation +pub type SftpOpResult = core::result::Result; + +/// To finish read requests the server needs to answer to +/// **subsequent READ requests** after all the data has been sent already +/// with a [`SftpPacket`] including a status code [`StatusCode::SSH_FX_EOF`]. +/// +/// [`ReadStatus`] enum has been implemented to keep record of these exhausted +/// read operations. +/// +/// See: +/// +/// - [Reading and Writing](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) +/// - [Scanning Directories](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.7) +#[derive(PartialEq, Debug, Default)] +pub enum ReadStatus { + /// There is more data to be read therefore the [`SftpServer`] will + /// send more data in the next read request. + #[default] + PendingData, + /// The server has provided all the data requested therefore the [`SftpServer`] + /// will send a [`SftpPacket`] including a status code [`StatusCode::SSH_FX_EOF`] + /// in the next read request. + EndOfFile, +} + +/// All trait functions are optional in the SFTP protocol. +/// Some less core operations have a Provided implementation returning +/// returns `SSH_FX_OP_UNSUPPORTED`. Common operations must be implemented, +/// but may return `Err(StatusCode::SSH_FX_OP_UNSUPPORTED)`. +pub trait SftpServer<'a, T> +where + T: OpaqueFileHandle, +{ + /// Opens a file for reading/writing + fn open( + &'_ mut self, + path: &str, + mode: &PFlags, + ) -> impl core::future::Future> { + async move { + log::error!( + "SftpServer Open operation not defined: path = {:?}, attrs = {:?}", + path, + mode + ); + Err(StatusCode::SSH_FX_OP_UNSUPPORTED) + } + } + + /// Close either a file or directory handle + fn close( + &mut self, + handle: &T, + ) -> impl core::future::Future> { + async move { + log::error!( + "SftpServer Close operation not defined: handle = {:?}", + handle + ); + + Err(StatusCode::SSH_FX_OP_UNSUPPORTED) + } + } + /// Reads from a file that has previously being opened for reading + /// + /// ## Notes to the implementer: + /// + /// The implementer is expected to use the parameter `reply` [`DirReply`] to: + /// + /// - In case of no more data is to be sent, call `reply.send_eof()` + /// - There is more data to be sent from an open file: + /// 1. Call `reply.send_header()` with the length of data to be sent + /// 2. Call `reply.send_data()` once or multiple times to send all the data announced + /// 3. Do not call `reply.send_eof()` during this [`readdir`] method call + /// + + /// If the length communicated in the header does not match the total length of the data + /// sent using `reply.send_data()`, the SFTP session will be broken. + /// + #[allow(unused)] + fn read( + &mut self, + opaque_file_handle: &T, + offset: u64, + len: u32, + reply: &mut ReadReply<'_, N>, + ) -> impl core::future::Future> { + async move { + log::error!( + "SftpServer Read operation not defined: handle = {:?}, offset = {:?}, len = {:?}", + opaque_file_handle, + offset, + len + ); + Err(SftpError::FileServerError(StatusCode::SSH_FX_OP_UNSUPPORTED)) + } + } + /// Writes to a file that has previously being opened for writing + fn write( + &mut self, + opaque_file_handle: &T, + offset: u64, + buf: &[u8], + ) -> impl core::future::Future> { + async move { + log::error!( + "SftpServer Write operation not defined: handle = {:?}, offset = {:?}, buf = {:?}", + opaque_file_handle, + offset, + buf + ); + Ok(()) + } + } + + /// Opens a directory and returns a handle + fn opendir( + &mut self, + dir: &str, + ) -> impl core::future::Future> { + async move { + log::error!("SftpServer OpenDir operation not defined: dir = {:?}", dir); + Err(StatusCode::SSH_FX_OP_UNSUPPORTED) + } + } + + /// Reads the list of items in a directory and returns them using the [`DirReply`] + /// parameter. + /// + /// ## Notes to the implementer: + /// + /// The implementer is expected to use the parameter `reply` [`DirReply`] to: + /// + /// - In case of no more items in the directory to send, call `reply.send_eof()` + /// - There are more items in the directory: + /// 1. Call `reply.send_header()` with the number of items and the [`SSHEncode`] + /// length of all the items to be sent + /// 2. Call `reply.send_item()` for each of the items announced to be sent + /// 3. Do not call `reply.send_eof()` during this [`readdir`] method call + /// + /// If the length communicated in the header does not match the total length of all + /// the items sent using `reply.send_item()`, the SFTP session will be + /// broken. + /// + /// The server is expected to keep track of the number of items that remain to be sent + /// to the client since the client will only stop asking for more elements in the + /// directory when a read dir request is answer with an reply.send_eof() + /// + #[allow(unused_variables)] + fn readdir( + &mut self, + opaque_dir_handle: &T, + reply: &mut DirReply<'_, N>, + ) -> impl core::future::Future> { + async move { + log::error!( + "SftpServer ReadDir operation not defined: handle = {:?}", + opaque_dir_handle + ); + Err(StatusCode::SSH_FX_OP_UNSUPPORTED) + } + } + + /// Provides the real path of the directory specified + fn realpath( + &mut self, + dir: &str, + ) -> impl core::future::Future>> { + async move { + log::error!( + "SftpServer RealPath operation not defined: dir = {:?}", + dir + ); + Err(StatusCode::SSH_FX_OP_UNSUPPORTED) + } + } + + /// Provides the stats of the given file path + fn stats( + &mut self, + follow_links: bool, + file_path: &str, + ) -> impl core::future::Future> { + async move { + log::error!( + "SftpServer Stats operation not defined: follow_link = {:?}, \ + file_path = {:?}", + follow_links, + file_path + ); + Err(StatusCode::SSH_FX_OP_UNSUPPORTED) + } + } +} + +/// A reference structure passed to the [`SftpServer::read()`] method to +/// allow replying with the read data. +/// Uses for [`ReadReply`] to: +/// +/// - In case of no more data avaliable to be sent, call `reply.send_eof()` +/// - There is data to be sent from an open file: +/// 1. Call `reply.send_header()` with the length of data to be sent +/// 2. Call `reply.send_data()` as many times as needed to complete a +/// sent of data of the announced length +/// 3. Do not call `reply.send_eof()` during this [`read`] method call +/// +/// It handles immutable sending data via the underlying sftp-channel +/// [`sunset_async::async_channel::ChanOut`] used in the context of an +/// SFTP Session. +/// +pub struct ReadReply<'g, const N: usize> { + /// The request Id that will be use`d in the response + req_id: ReqId, + + /// Immutable writer + chan_out: &'g SftpOutputProducer<'g, N>, + /// Length of data to be sent as announced in [`ReadReply::send_header`] + data_len: u32, + /// Length of data sent so far using [`ReadReply::send_data`] + data_sent_len: u32, +} + +impl<'g, const N: usize> ReadReply<'g, N> { + /// New instances can only be created within the crate. Users can only + /// use other public methods to use it. + pub(crate) fn new( + req_id: ReqId, + chan_out: &'g SftpOutputProducer<'g, N>, + ) -> Self { + ReadReply { req_id, chan_out, data_len: 0, data_sent_len: 0 } + } + + // TODO Make this enforceable + // TODO Automate encoding the SftpPacket + /// Sends a header for `SSH_FXP_DATA` response. This includes the total + /// response length, the packet type, request id and data length + /// + /// The packet data content, excluding the length must be sent using + /// [`ReadReply::send_data`] + pub async fn send_header(&mut self, data_len: u32) -> SftpResult<()> { + debug!( + "ReadReply: Sending header for request id {:?}: data length = {:?}", + self.req_id, data_len + ); + let mut s = [0u8; N]; + let mut sink = SftpSink::new(&mut s); + + let payload = + ReadReply::::encode_data_header(&mut sink, self.req_id, data_len)?; + + debug!( + "Sending header: len = {:?}, content = {:?}", + payload.len(), + payload + ); + // Sending payload_slice since we are not making use of the sink sftpPacket length calculation + self.chan_out.send_data(payload).await?; + self.data_len = data_len; + Ok(()) + } + + /// Sends a buffer with data. Call it as many times as needed to send + /// the announced data length + /// + /// **Important**: Call this after you have called `send_header` + pub async fn send_data(&mut self, buff: &[u8]) -> SftpResult<()> { + self.chan_out.send_data(buff).await?; + self.data_sent_len += buff.len() as u32; + Ok(()) + } + + /// Sends EOF meaning that there is no more data to be sent + /// + pub async fn send_eof(&self) -> SftpResult<()> { + self.chan_out.send_status(self.req_id, StatusCode::SSH_FX_EOF, "").await + } + + /// Indicates whether all the data announced in the header has been sent + /// + /// returns 0 when all data has been sent + /// returns >0 when there is still data to be sent + /// returns <0 when too much data has been sent + pub fn read_diff(&self) -> i32 { + (self.data_len as i32) - (self.data_sent_len as i32) + } + + fn encode_data_header( + sink: &'g mut SftpSink<'g>, + req_id: ReqId, + data_len: u32, + ) -> Result<&'g [u8], SftpError> { + // length field + (data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH).enc(sink)?; + // packet type (1) + u8::from(SftpNum::SSH_FXP_DATA).enc(sink)?; + // request id (4) + req_id.enc(sink)?; + // data length (4) + data_len.enc(sink)?; + Ok(sink.payload_slice()) + } +} + +#[cfg(test)] +mod read_reply_tests { + use super::*; + + #[cfg(test)] + extern crate std; + // #[cfg(test)] + // use std::println; + + #[test] + fn compose_header() { + const N: usize = 512; + + let req_id = ReqId(42); + let data_len = 128; + let mut buffer = [0u8; N]; + let mut sink = SftpSink::new(&mut buffer); + + let payload = + ReadReply::::encode_data_header(&mut sink, req_id, data_len).unwrap(); + + assert_eq!( + data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH, + u32::from_be_bytes(payload[..4].try_into().unwrap()) + ); + } +} + +/// Uses for [`DirReply`] to: +/// +/// - In case of no more items in the directory to be sent, call `reply.send_eof()` +/// - There are more items in the directory to be sent: +/// 1. Call `reply.send_header()` with the number of items and the [`SSHEncode`] +/// length of all the items to be sent +/// 2. Call `reply.send_item()` for each of the items announced to be sent +/// 3. Do not call `reply.send_eof()` during this [`readdir`] method call +/// +/// It handles immutable sending data via the underlying sftp-channel +/// [`sunset_async::async_channel::ChanOut`] used in the context of an +/// SFTP Session. +/// +pub struct DirReply<'g, const N: usize> { + /// The request Id that will be use`d in the response + req_id: ReqId, + /// Immutable writer + chan_out: &'g SftpOutputProducer<'g, N>, + /// Length of data to be sent as announced in [`ReadReply::send_header`] + data_len: u32, + /// Length of data sent so far using [`ReadReply::send_data`] + data_sent_len: u32, +} + +impl<'g, const N: usize> DirReply<'g, N> { + // const ENCODED_NAME_SFTP_PACKET_LENGTH: u32 = 9; + + /// New instances can only be created within the crate. Users can only + /// use other public methods to use it. + pub(crate) fn new( + req_id: ReqId, + chan_out: &'g SftpOutputProducer<'g, N>, + ) -> Self { + // DirReply { chan_out: chan_out_wrapper, req_id } + DirReply { req_id, chan_out, data_len: 0, data_sent_len: 0 } + } + + // TODO Make this enforceable + // TODO Automate encoding the SftpPacket + /// Sends the header to the client with the number of files as [`NameEntry`] and the [`SSHEncode`] + /// length of all these [`NameEntry`] items + pub async fn send_header( + &mut self, + count: u32, + items_encoded_len: u32, + ) -> SftpResult<()> { + debug!( + "I will send the header here for request id {:?}: count = {:?}, length = {:?}", + self.req_id, count, items_encoded_len + ); + let mut s = [0u8; N]; + let mut sink = SftpSink::new(&mut s); + + let payload = DirReply::::encode_data_header( + &mut sink, + self.req_id, + items_encoded_len, + count, + )?; + + debug!( + "Sending header: len = {:?}, content = {:?}", + payload.len(), + payload + ); + self.chan_out.send_data(payload).await?; + self.data_len = items_encoded_len; + Ok(()) + } + + /// Sends a directory item to the client as a [`NameEntry`] + /// + /// Call this + pub async fn send_item(&mut self, name_entry: &NameEntry<'_>) -> SftpResult<()> { + let mut buffer = [0u8; MAX_NAME_ENTRY_SIZE]; + let mut sftp_sink = SftpSink::new(&mut buffer); + name_entry.enc(&mut sftp_sink).map_err(|err| { + error!("WireError: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; + + self.chan_out.send_data(sftp_sink.payload_slice()).await?; + self.data_sent_len += sftp_sink.payload_len() as u32; + Ok(()) + } + + /// Sends EOF meaning that there is no more files in the directory + pub async fn send_eof(&self) -> SftpResult<()> { + self.chan_out.send_status(self.req_id, StatusCode::SSH_FX_EOF, "").await + } + + /// Indicates whether all the data announced in the header has been sent + /// + /// returns 0 when all data has been sent + /// returns >0 when there is still data to be sent + /// returns <0 when too much data has been sent + pub fn read_diff(&self) -> i32 { + (self.data_len as i32) - (self.data_sent_len as i32) + } + + fn encode_data_header( + sink: &'g mut SftpSink<'g>, + req_id: ReqId, + items_encoded_len: u32, + count: u32, + ) -> Result<&'g [u8], SftpError> { + // We need to consider the packet type, Id and count fields + // This way I collect data required for the header and collect + // valid entries into a vector (only std) + (items_encoded_len + ENCODED_BASE_NAME_SFTP_PACKET_LENGTH).enc(sink)?; + u8::from(SftpNum::SSH_FXP_NAME).enc(sink)?; + req_id.enc(sink)?; + count.enc(sink)?; + + Ok(sink.payload_slice()) + } +} + +#[cfg(test)] +mod dir_reply_tests { + use super::*; + + #[cfg(test)] + extern crate std; + // #[cfg(test)] + // use std::println; + + #[test] + fn compose_header() { + const N: usize = 512; + + let req_id = ReqId(42); + let data_len = 128; + let count = 128; + let mut buffer = [0u8; N]; + let mut sink = SftpSink::new(&mut buffer); + + let payload = + DirReply::::encode_data_header(&mut sink, req_id, data_len, count) + .unwrap(); + + // println!("{payload:?}"); + + // println!("{:?}", &u32::from_be_bytes(payload[..4].try_into().unwrap())); + assert_eq!( + data_len + ENCODED_BASE_NAME_SFTP_PACKET_LENGTH, + u32::from_be_bytes(payload[..4].try_into().unwrap()) + ); + } +} + +pub mod helpers { + use crate::{ + error::SftpResult, + proto::{MAX_NAME_ENTRY_SIZE, NameEntry}, + server::SftpSink, + }; + + use sunset::sshwire::SSHEncode; + + /// Helper function to get the length of a given [`NameEntry`] + /// as it would be serialized to the wire. + /// + /// Use this function to calculate the total length of a collection + /// of `NameEntry`s in order to send a correct response Name header + pub fn get_name_entry_len(name_entry: &NameEntry<'_>) -> SftpResult { + let mut buf = [0u8; MAX_NAME_ENTRY_SIZE]; + let mut temp_sink = SftpSink::new(&mut buf); + name_entry.enc(&mut temp_sink)?; + Ok(temp_sink.payload_len() as u32) + } +} + +#[cfg(feature = "std")] +use crate::proto::Filename; +#[cfg(feature = "std")] +use std::{ + fs::{DirEntry, Metadata, ReadDir}, + os::{linux::fs::MetadataExt, unix::fs::PermissionsExt}, + time::SystemTime, +}; + +#[cfg(feature = "std")] +/// This is a helper structure to make ReadDir into something manageable for +/// [`DirReply`] +#[derive(Debug)] +pub struct DirEntriesCollection { + /// Number of elements + count: u32, + /// Computed length of all the encoded elements + encoded_length: u32, + /// The actual entries. As you can see these are DirEntry. This is a std choice + entries: Vec, +} + +#[cfg(feature = "std")] +impl DirEntriesCollection { + /// Creates this DirEntriesCollection so linux std users do not need to + /// translate `std` directory elements into Sftp structures before sending a response + /// back to the client + pub fn new(dir_iterator: ReadDir) -> SftpOpResult { + use log::info; + + let mut encoded_length = 0; + + let entries: Vec = dir_iterator + .filter_map(|entry_result| { + let entry = entry_result.ok()?; + let filename = entry.file_name().to_string_lossy().into_owned(); + let name_entry = NameEntry { + filename: Filename::from(filename.as_str()), + _longname: Filename::from(""), + attrs: Self::get_attrs_or_empty(entry.metadata()), + }; + + let mut buffer = [0u8; MAX_NAME_ENTRY_SIZE]; + let mut sftp_sink = SftpSink::new(&mut buffer); + name_entry.enc(&mut sftp_sink).ok()?; + encoded_length += u32::try_from(sftp_sink.payload_len()) + .map_err(|_| StatusCode::SSH_FX_FAILURE) + .ok()?; + Some(entry) + }) + .collect(); + + let count = + u32::try_from(entries.len()).map_err(|_| StatusCode::SSH_FX_FAILURE)?; + + info!( + "Processed {} entries, estimated serialized length: {}", + count, encoded_length + ); + + Ok(Self { count, encoded_length, entries }) + } + + /// Using the provided [`DirReply`] sends a response taking care of + /// composing a SFTP Entry header and sending everything in the right order + /// + /// Returns a [`ReadStatus`] + pub async fn send_response( + &self, + reply: &mut DirReply<'_, N>, + ) -> SftpOpResult { + self.send_entries_header(reply).await?; + self.send_entries(reply).await?; + Ok(ReadStatus::EndOfFile) + } + /// Sends a header for all the elements in the ReadDir iterator + /// + /// It will take care of counting them and finding the serialized length of each + /// element + async fn send_entries_header( + &self, + reply: &mut DirReply<'_, N>, + ) -> SftpOpResult<()> { + reply.send_header(self.count, self.encoded_length).await.map_err(|e| { + debug!("Could not send header {e:?}"); + StatusCode::SSH_FX_FAILURE + }) + } + + /// Sends the entries in the ReadDir iterator back to the client + async fn send_entries( + &self, + reply: &mut DirReply<'_, N>, + ) -> SftpOpResult<()> { + for entry in &self.entries { + let filename = entry.file_name().to_string_lossy().into_owned(); + let attrs = Self::get_attrs_or_empty(entry.metadata()); + let name_entry = NameEntry { + filename: Filename::from(filename.as_str()), + _longname: Filename::from(""), + attrs, + }; + debug!("Sending new item: {:?}", name_entry); + reply.send_item(&name_entry).await.map_err(|err| { + error!("SftpError: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; + } + Ok(()) + } + + fn get_attrs_or_empty( + maybe_metadata: Result, + ) -> Attrs { + maybe_metadata.map(get_file_attrs).unwrap_or_default() + } +} + +#[cfg(feature = "std")] +/// [`std`] helper function to get [`Attrs`] from a [`Metadata`]. +pub fn get_file_attrs(metadata: Metadata) -> Attrs { + let time_to_u32 = |time_result: std::io::Result| { + time_result + .ok()? + .duration_since(SystemTime::UNIX_EPOCH) + .ok()? + .as_secs() + .try_into() + .ok() + }; + + Attrs { + size: Some(metadata.len()), + uid: Some(metadata.st_uid()), + gid: Some(metadata.st_gid()), + permissions: Some(metadata.permissions().mode()), + atime: time_to_u32(metadata.accessed()), + mtime: time_to_u32(metadata.modified()), + ext_count: None, + } +} diff --git a/sftp/src/sftpsink.rs b/sftp/src/sftpsink.rs new file mode 100644 index 00000000..31fb0c76 --- /dev/null +++ b/sftp/src/sftpsink.rs @@ -0,0 +1,99 @@ +use crate::proto::SFTP_FIELD_LEN_LENGTH; + +use sunset::sshwire::{SSHSink, WireError}; + +#[allow(unused_imports)] +use log::{debug, error, info, log, trace, warn}; + +/// A implementation fo [`SSHSink`] that observes some constraints for +/// SFTP packets +/// +/// **Important**: It needs to be [`SftpSink::finalize`] to add the packet +/// len +#[derive(Default)] +pub struct SftpSink<'g> { + buffer: &'g mut [u8], + index: usize, +} + +impl<'g> SftpSink<'g> { + /// Initializes the Sink, with the particularity that it will leave + /// [`crate::proto::SFTP_FIELD_LEN_LENGTH`] bytes empty at the + /// start of the buffer that will contain the total packet length + /// once the [`SftpSink::finalize`] method is called + pub fn new(s: &'g mut [u8]) -> Self { + SftpSink { buffer: s, index: SFTP_FIELD_LEN_LENGTH } + } + + /// Finalise the buffer by prepending the packet length field, + /// excluding the field itself. + /// + /// **Returns** the final index in the buffer as a reference of the + /// space used + fn finalize(&mut self) -> usize { + if self.index <= SFTP_FIELD_LEN_LENGTH { + warn!("SftpSink trying to terminate it before pushing data"); + return 0; + } // size is 0 + let used_size = self.payload_len() as u32; + + used_size + .to_be_bytes() + .iter() + .enumerate() + .for_each(|(i, v)| self.buffer[i] = *v); + + self.index + } + + /// Auxiliary method to allow seen the len used by the encoded payload + pub fn payload_len(&self) -> usize { + self.index - SFTP_FIELD_LEN_LENGTH + } + + /// Auxiliary method to allow an immutable reference to the encoded payload + /// excluding the `u32` length field prepended to it + pub fn payload_slice(&self) -> &[u8] { + &self.buffer + [SFTP_FIELD_LEN_LENGTH..SFTP_FIELD_LEN_LENGTH + self.payload_len()] + } + + /// Auxiliary method to allow an immutable reference to the full used + /// data (includes the prepended length field) + /// + /// **Important:** Call this after [`SftpSink::finalize()`] + pub fn used_slice(&self) -> &[u8] { + debug!( + "SftpSink used_slice called, total len: {}. Index: {}", + SFTP_FIELD_LEN_LENGTH + self.payload_len(), + self.index + ); + &self.buffer[..SFTP_FIELD_LEN_LENGTH + self.payload_len()] + } + + /// Reset the index and cleans the length field + pub fn reset(&mut self) -> () { + debug!("SftpSink reset called when index was {:?}", self.index); + self.index = SFTP_FIELD_LEN_LENGTH; + for i in 0..SFTP_FIELD_LEN_LENGTH { + self.buffer[i] = 0; + } + } +} + +impl<'g> SSHSink for SftpSink<'g> { + fn push(&mut self, v: &[u8]) -> sunset::sshwire::WireResult<()> { + if v.len() + self.index > self.buffer.len() { + return Err(WireError::NoRoom); + } + trace!("Sink index: {:}", self.index); + v.iter().for_each(|val| { + trace!("Writing val {:} at index {:}", *val, self.index); + self.buffer[self.index] = *val; + self.index += 1; + }); + trace!("Sink new index: {:}", self.index); + self.finalize(); + Ok(()) + } +} From 636c25edf9d4dfe516c927112783db541dfb1765 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Wed, 3 Dec 2025 15:56:51 +1100 Subject: [PATCH 03/47] Merge branch 'matt/sftptesting' into dev/sftp-start My apologies, I though that It had been merged to the main branch and merged when the env_var (pull 35) feature was included --- src/channel.rs | 18 ++++++++++++++---- src/encrypt.rs | 4 +++- src/error.rs | 2 ++ 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/channel.rs b/src/channel.rs index 10c66142..83284429 100644 --- a/src/channel.rs +++ b/src/channel.rs @@ -211,7 +211,16 @@ impl Channels { let ch = self.get_mut(num)?; ch.finished_input(len); if let Some(w) = ch.check_window_adjust()? { - s.send(w)?; + // The send buffer may be full. Ignore the failure and hope another adjustment is + // sent later. TODO improve this. + match s.send(w) { + Ok(_) => ch.pending_adjust = 0, + Err(Error::NoRoom { .. }) => { + // TODO better retry rather than hoping a retry occurs + debug!("noroom for adjustment") + } + error => return error, + } } Ok(()) } @@ -1028,11 +1037,12 @@ impl Channel { } /// Returns a window adjustment packet if required - fn check_window_adjust(&mut self) -> Result>> { - let num = self.send.as_mut().trap()?.num; + /// + /// Does not reset the adjustment to 0, should be done by caller on successful send. + fn check_window_adjust(&self) -> Result>> { + let num = self.send.as_ref().trap()?.num; if self.pending_adjust > self.full_window / 2 { let adjust = self.pending_adjust as u32; - self.pending_adjust = 0; let p = packets::ChannelWindowAdjust { num, adjust }.into(); Ok(Some(p)) } else { diff --git a/src/encrypt.rs b/src/encrypt.rs index 34c71ca7..bc520c87 100644 --- a/src/encrypt.rs +++ b/src/encrypt.rs @@ -130,7 +130,9 @@ impl KeyState { buf: &mut [u8], ) -> Result { let e = self.enc.encrypt(payload_len, buf, self.seq_encrypt.0); - self.seq_encrypt += 1; + if !matches!(e, Err(Error::NoRoom { .. })) { + self.seq_encrypt += 1; + } e } diff --git a/src/error.rs b/src/error.rs index 42333e8b..0ac33552 100644 --- a/src/error.rs +++ b/src/error.rs @@ -6,6 +6,8 @@ use core::fmt::Arguments; use snafu::prelude::*; +use heapless::String; + use crate::channel::ChanNum; #[allow(unused_imports)] From e73f958333ea60c686b4966a57decd9702ea0052 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Fri, 22 Aug 2025 15:40:28 +1000 Subject: [PATCH 04/47] Modifying packets.rs ParseContext for allowing proto.rs StatusCode to use #[sshwire(unknown)] Without changing some visiblitities, the code generated by SSHDecode (SSHDecodeEnum) for proto.rs would try accessing s.ctx().seen_unknown and Unknown::new() throwing errors. Am I doing using SSHDecode wrong? --- src/packets.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/packets.rs b/src/packets.rs index 6ffe97d2..a96c9a93 100644 --- a/src/packets.rs +++ b/src/packets.rs @@ -841,7 +841,7 @@ pub struct DirectTcpip<'a> { pub struct Unknown<'a>(pub &'a [u8]); impl<'a> Unknown<'a> { - fn new(u: &'a [u8]) -> Self { + pub fn new(u: &'a [u8]) -> Self { let u = Unknown(u); trace!("saw unknown variant \"{u}\""); u @@ -882,7 +882,7 @@ pub struct ParseContext { // Set to true if an unknown variant is encountered. // Packet length checks should be omitted in that case. - pub(crate) seen_unknown: bool, + pub seen_unknown: bool, } impl ParseContext { From 1dc4e454bbf71c824918b906bc28006d1e6fa7d9 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Wed, 4 Mar 2026 14:03:12 +1100 Subject: [PATCH 05/47] [skip ci] WIP: Adding demo sftp std example and testing TODO: Improve the tests. Running them is a bit hairy. You need to run the demo server from the root of the repo, and then run the test scripts from the testing folder. This is an implementation of the sunset-sftp basic functionality. It also has a testing folder with scripts to test different operations --- Cargo.lock | 26 + Cargo.toml | 1 + demo/sftp/std/Cargo.toml | 40 ++ demo/sftp/std/README.md | 64 +++ demo/sftp/std/debug_sftp_client.sh | 5 + demo/sftp/std/rust-toolchain.toml | 3 + demo/sftp/std/src/demofilehandlemanager.rs | 63 +++ demo/sftp/std/src/demoopaquefilehandle.rs | 37 ++ demo/sftp/std/src/demosftpserver.rs | 444 ++++++++++++++++++ demo/sftp/std/src/main.rs | 231 +++++++++ demo/sftp/std/tap.sh | 7 + demo/sftp/std/testing/extract_txrx.sh | 43 ++ .../std/testing/log_demo_sftp_with_test.sh | 106 +++++ demo/sftp/std/testing/log_get_single_long.sh | 1 + demo/sftp/std/testing/log_get_single_short.sh | 1 + demo/sftp/std/testing/merge_logs.sh | 9 + demo/sftp/std/testing/test_get.sh | 53 +++ demo/sftp/std/testing/test_get_long.sh | 45 ++ demo/sftp/std/testing/test_get_short.sh | 45 ++ .../std/testing/test_long_write_requests.sh | 38 ++ demo/sftp/std/testing/test_read_dir.sh | 33 ++ demo/sftp/std/testing/test_stats.sh | 27 ++ demo/sftp/std/testing/test_write_requests.sh | 44 ++ 23 files changed, 1366 insertions(+) create mode 100644 demo/sftp/std/Cargo.toml create mode 100644 demo/sftp/std/README.md create mode 100755 demo/sftp/std/debug_sftp_client.sh create mode 100644 demo/sftp/std/rust-toolchain.toml create mode 100644 demo/sftp/std/src/demofilehandlemanager.rs create mode 100644 demo/sftp/std/src/demoopaquefilehandle.rs create mode 100644 demo/sftp/std/src/demosftpserver.rs create mode 100644 demo/sftp/std/src/main.rs create mode 100755 demo/sftp/std/tap.sh create mode 100755 demo/sftp/std/testing/extract_txrx.sh create mode 100755 demo/sftp/std/testing/log_demo_sftp_with_test.sh create mode 100755 demo/sftp/std/testing/log_get_single_long.sh create mode 100755 demo/sftp/std/testing/log_get_single_short.sh create mode 100755 demo/sftp/std/testing/merge_logs.sh create mode 100755 demo/sftp/std/testing/test_get.sh create mode 100755 demo/sftp/std/testing/test_get_long.sh create mode 100755 demo/sftp/std/testing/test_get_short.sh create mode 100755 demo/sftp/std/testing/test_long_write_requests.sh create mode 100755 demo/sftp/std/testing/test_read_dir.sh create mode 100755 demo/sftp/std/testing/test_stats.sh create mode 100755 demo/sftp/std/testing/test_write_requests.sh diff --git a/Cargo.lock b/Cargo.lock index 8874a498..534121f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2813,6 +2813,32 @@ dependencies = [ "sunset-sshwire-derive", ] +[[package]] +name = "sunset-demo-sftp-std" +version = "0.1.2" +dependencies = [ + "async-io", + "critical-section", + "embassy-executor", + "embassy-futures", + "embassy-net", + "embassy-net-tuntap", + "embassy-sync 0.7.2", + "embassy-time", + "embedded-io-async", + "env_logger", + "fnv", + "heapless", + "libc", + "log", + "rand", + "sha2", + "sunset", + "sunset-async", + "sunset-demo-common", + "sunset-sftp", +] + [[package]] name = "sunset-demo-std" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index b5635e3b..391503f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,6 +13,7 @@ rust-version = "1.87" members = [ "demo/picow", "demo/std", + "demo/sftp/std", "fuzz", "stdasync", "sftp", diff --git a/demo/sftp/std/Cargo.toml b/demo/sftp/std/Cargo.toml new file mode 100644 index 00000000..a2f08d6f --- /dev/null +++ b/demo/sftp/std/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "sunset-demo-sftp-std" +version = "0.1.2" +edition = "2021" + +[dependencies] +sunset = { workspace = true, features = ["rsa", "std"] } +sunset-async.workspace = true +sunset-demo-common.workspace = true +sunset-sftp = { version = "0.1.0", path = "../../../sftp", features = ["std"] } + +# 131072 was determined empirically +embassy-executor = { version = "0.7", features = [ + "executor-thread", "arch-std", "log", "task-arena-size-131072"] } +embassy-net = { version = "0.7", features = ["tcp", "dhcpv4", "medium-ethernet"] } +embassy-net-tuntap = { version = "0.1" } +embassy-sync = { version = "0.7" } +embassy-futures = { version = "0.1" } +# embassy-time dep required to link a time driver +embassy-time = { version = "0.4", default-features=false, features = ["log", "std"] } + +log = { version = "0.4" } +# default regex feature is huge +env_logger = { version = "0.11", default-features=false, features = ["auto-color", "humantime"] } + +embedded-io-async = "0.6" +heapless = "0.8" + +# for tuntap +libc = "0.2.101" +async-io = "1.6.0" + +# using local fork +# menu = "0.3" + + +critical-section = "1.1" +rand = { version = "0.8", default-features = false, features = ["getrandom"] } +sha2 = { version = "0.10", default-features = false } +fnv = "1.0.7" diff --git a/demo/sftp/std/README.md b/demo/sftp/std/README.md new file mode 100644 index 00000000..98394a10 --- /dev/null +++ b/demo/sftp/std/README.md @@ -0,0 +1,64 @@ +# sunset-demo-sftp-std + +`demo/sftp/std` contains a host-side (`std`) demo that runs an SSH server with SFTP support using the `sunset` and `sunset-sftp` crates. It runs on linux distributions. + +It is intended as a **reference implementation** for building your own SFTP server with `sunset-sftp`. It is not a complete implementation and you should make your own choices for your sftp server. + +In particular, this demo shows how to: + +- implement an `SftpServer` for request handling +- add a `FileHandleManager` to track/open/close active handles +- define an `OpaqueFileHandle` format to safely encode/decode handle IDs across requests + +Use `src/demosftpserver.rs`, `src/demofilehandlemanager.rs`, and `src/demoopaquefilehandle.rs` together with `main.rs` and common demo files as a reference for custom server development. + +## What this folder contains + +- `src/main.rs` + Demo entry point. Sets up logging, runtime/executor, network stack, and starts the SSH/SFTP demo server. +- `src/demosftpserver.rs` + Demo SFTP server wiring and request handling glue. +- `src/demofilehandlemanager.rs` + Tracks and manages open file handles used by the SFTP session. +- `src/demoopaquefilehandle.rs` + Defines/encodes opaque file handle values used by the demo protocol layer. +- `tap.sh` + Helper script to create/configure a TAP interface for local testing. +- `debug_sftp_client.sh` + Convenience script for running an SFTP client in a debug-friendly way. +- `testing/` + Test and log scripts (read/write/stat/readdir scenarios, log helpers, and parsing utilities). + +## Setup + +This demo uses a tap interface to run the server and accept connections. The tap.sh sets this up in a linux environment. I have not find a way to run this on MacOS. On windows I recommend using WSL2. + +Run: + +```bash +sudo ./tap.sh +``` + +## Build / run + +From base project folder `sunset`: + +```bash +cargo run -p sunset-demo-sftp-std +``` + +Then connect with an SFTP client using the configured demo host/user settings. The first info log will display the server ipv4 address. + +## Testing + +`testing/` contains runnable scripts and utilities to validate SFTP behavior end-to-end. It includes scenarios for: + +- file reads/writes +- `stat`/metadata checks +- directory listing (`readdir`) +- log capture and parsing helpers (Requires a tshark installation with the current user in wireshark group) + +These scripts are useful both for regression checks and as examples of expected server behavior during development. + +these scripts have been used through the development of `sunset-sftp` and might not respond to a general use but some particular troubleshooting. I hope that they are useful as a reference for you exploration. + diff --git a/demo/sftp/std/debug_sftp_client.sh b/demo/sftp/std/debug_sftp_client.sh new file mode 100755 index 00000000..f67b1df6 --- /dev/null +++ b/demo/sftp/std/debug_sftp_client.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# This sftp options are meant to help debugging and do not store any host key or known hosts information. +# That is not a good practice in real life, as it can lead to security issues, but it is useful for debugging purposes. + +sftp -vvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR any@192.168.69.2 \ No newline at end of file diff --git a/demo/sftp/std/rust-toolchain.toml b/demo/sftp/std/rust-toolchain.toml new file mode 100644 index 00000000..9993e936 --- /dev/null +++ b/demo/sftp/std/rust-toolchain.toml @@ -0,0 +1,3 @@ +[toolchain] +channel = "stable" +components = [ "rustfmt" ] diff --git a/demo/sftp/std/src/demofilehandlemanager.rs b/demo/sftp/std/src/demofilehandlemanager.rs new file mode 100644 index 00000000..6b1cb278 --- /dev/null +++ b/demo/sftp/std/src/demofilehandlemanager.rs @@ -0,0 +1,63 @@ +use sunset_sftp::handles::{OpaqueFileHandle, OpaqueFileHandleManager, PathFinder}; +use sunset_sftp::protocol::StatusCode; + +use std::collections::HashMap; // Not enforced. Only for std. For no_std environments other solutions can be used to store Key, Value + +pub struct DemoFileHandleManager +where + K: OpaqueFileHandle, + V: PathFinder, +{ + handle_map: HashMap, +} + +impl DemoFileHandleManager +where + K: OpaqueFileHandle, + V: PathFinder, +{ + pub fn new() -> Self { + Self { handle_map: HashMap::new() } + } +} + +impl OpaqueFileHandleManager for DemoFileHandleManager +where + K: OpaqueFileHandle, + V: PathFinder, +{ + type Error = StatusCode; + + fn insert(&mut self, private_handle: V, salt: &str) -> Result { + if self + .handle_map + .iter() + .any(|(_, private_handle)| private_handle.matches(&private_handle)) + { + return Err(StatusCode::SSH_FX_PERMISSION_DENIED); + } + + let handle = K::new( + format!("{:}-{:}", &private_handle.get_path_ref(), salt).as_str(), + ); + + self.handle_map.insert(handle.clone(), private_handle); + Ok(handle) + } + + fn remove(&mut self, opaque_handle: &K) -> Option { + self.handle_map.remove(opaque_handle) + } + + fn opaque_handle_exist(&self, opaque_handle: &K) -> bool { + self.handle_map.contains_key(opaque_handle) + } + + fn get_private_as_ref(&self, opaque_handle: &K) -> Option<&V> { + self.handle_map.get(opaque_handle) + } + + fn get_private_as_mut_ref(&mut self, opaque_handle: &K) -> Option<&mut V> { + self.handle_map.get_mut(opaque_handle) + } +} diff --git a/demo/sftp/std/src/demoopaquefilehandle.rs b/demo/sftp/std/src/demoopaquefilehandle.rs new file mode 100644 index 00000000..67c2fc6b --- /dev/null +++ b/demo/sftp/std/src/demoopaquefilehandle.rs @@ -0,0 +1,37 @@ +use sunset_sftp::handles::OpaqueFileHandle; +use sunset_sftp::protocol::FileHandle; + +use sunset::sshwire::{BinString, WireError}; + +use core::hash::Hasher; + +use fnv::FnvHasher; + +const HASH_LEN: usize = 4; +#[derive(Debug, Hash, PartialEq, Eq, Clone)] +pub(crate) struct DemoOpaqueFileHandle { + tiny_hash: [u8; HASH_LEN], +} + +impl OpaqueFileHandle for DemoOpaqueFileHandle { + fn new(seed: &str) -> Self { + let mut hasher = FnvHasher::default(); + hasher.write(seed.as_bytes()); + DemoOpaqueFileHandle { tiny_hash: (hasher.finish() as u32).to_be_bytes() } + } + + fn try_from(file_handle: &FileHandle<'_>) -> sunset::sshwire::WireResult { + if !file_handle.0 .0.len().eq(&core::mem::size_of::()) + { + return Err(WireError::BadString); + } + + let mut tiny_hash = [0u8; HASH_LEN]; + tiny_hash.copy_from_slice(file_handle.0 .0); + Ok(DemoOpaqueFileHandle { tiny_hash }) + } + + fn into_file_handle(&self) -> FileHandle<'_> { + FileHandle(BinString(&self.tiny_hash)) + } +} diff --git a/demo/sftp/std/src/demosftpserver.rs b/demo/sftp/std/src/demosftpserver.rs new file mode 100644 index 00000000..2a703bce --- /dev/null +++ b/demo/sftp/std/src/demosftpserver.rs @@ -0,0 +1,444 @@ +use crate::demofilehandlemanager::DemoFileHandleManager; + +use sunset_sftp::error::SftpResult; +use sunset_sftp::handles::{OpaqueFileHandle, OpaqueFileHandleManager, PathFinder}; +use sunset_sftp::protocol::{Attrs, Filename, NameEntry, PFlags, StatusCode}; +use sunset_sftp::server::helpers::DirEntriesCollection; +use sunset_sftp::server::{ + DirReply, ReadReply, ReadStatus, SftpOpResult, SftpServer, +}; + +#[allow(unused_imports)] +use log::{debug, error, info, log, trace, warn}; +use std::fs; +use std::os::unix::fs::PermissionsExt; +use std::{fs::File, os::unix::fs::FileExt, path::Path}; + +// Used during read operations +const ARBITRARY_READ_BUFFER_LENGTH: usize = 1024; + +#[derive(Debug)] +pub(crate) enum PrivatePathHandle { + File(PrivateFileHandle), + Directory(PrivateDirHandle), +} + +#[derive(Debug)] +pub(crate) struct PrivateFileHandle { + path: String, + permissions: Option, + file: File, +} + +#[derive(Debug)] +pub(crate) struct PrivateDirHandle { + path: String, + read_status: ReadStatus, +} + +/// It is a better practice generating it on creation. Used to generate the opaque handles instead of using a constant +static OPAQUE_SALT: &'static str = "12d%32"; + +impl PathFinder for PrivatePathHandle { + fn matches(&self, path: &Self) -> bool { + match self { + PrivatePathHandle::File(self_private_path_handler) => { + if let PrivatePathHandle::File(private_file_handle) = path { + return self_private_path_handler.matches(private_file_handle); + } else { + false + } + } + PrivatePathHandle::Directory(self_private_dir_handle) => { + if let PrivatePathHandle::Directory(private_dir_handle) = path { + self_private_dir_handle.matches(private_dir_handle) + } else { + false + } + } + } + } + + fn get_path_ref(&self) -> &str { + match self { + PrivatePathHandle::File(private_file_handler) => { + private_file_handler.get_path_ref() + } + PrivatePathHandle::Directory(private_dir_handle) => { + private_dir_handle.get_path_ref() + } + } + } +} + +impl PathFinder for PrivateFileHandle { + fn matches(&self, path: &PrivateFileHandle) -> bool { + self.path.as_str().eq_ignore_ascii_case(path.get_path_ref()) + } + + fn get_path_ref(&self) -> &str { + self.path.as_str() + } +} + +impl PathFinder for PrivateDirHandle { + fn matches(&self, path: &PrivateDirHandle) -> bool { + self.path.as_str().eq_ignore_ascii_case(path.get_path_ref()) + } + + fn get_path_ref(&self) -> &str { + self.path.as_str() + } +} + +/// A basic demo server. Used as a demo and to test SFTP functionality +pub struct DemoSftpServer { + base_path: String, + handles_manager: DemoFileHandleManager, +} + +impl DemoSftpServer { + pub fn new(base_path: String) -> Self { + if !Path::new(&base_path).exists() { + debug!("Base path {:?} does not exist. Creating it", base_path); + if let Err(err) = fs::create_dir_all(&base_path) { + error!("Could not create the base path {:?}: {:?}", base_path, err); + panic!(); + } + } else { + debug!("Base path {:?} already exists", base_path); + } + DemoSftpServer { base_path, handles_manager: DemoFileHandleManager::new() } + } +} + +impl SftpServer<'_, OFH> for DemoSftpServer { + async fn open(&mut self, filename: &str, mode: &PFlags) -> SftpOpResult { + debug!("Open file: filename = {:?}, mode = {:?}", filename, mode); + + let can_write = u32::from(mode) & u32::from(&PFlags::SSH_FXF_WRITE) > 0; + let can_read = u32::from(mode) & u32::from(&PFlags::SSH_FXF_READ) > 0; + + info!( + "File open for read/write access: can_read={:?}, can_write={:?}", + can_read, can_write + ); + + let file = File::options() + .read(can_read) + .write(can_write) + .create(can_write) + .open(filename) + .map_err(|_| StatusCode::SSH_FX_FAILURE)?; + + let permissions = file + .metadata() + .map_err(|_| StatusCode::SSH_FX_FAILURE)? + .permissions() + .mode() + & 0o777; + + let fh = self.handles_manager.insert( + PrivatePathHandle::File(PrivateFileHandle { + path: filename.into(), + permissions: Some(permissions), + file, + }), + OPAQUE_SALT, + ); + + debug!( + "Filename \"{:?}\" will have the obscured file handle: {:?}", + filename, fh + ); + + fh + } + + async fn opendir(&mut self, dir: &str) -> SftpOpResult { + info!("Open Directory = {:?}", dir); + + let dir_handle = self.handles_manager.insert( + PrivatePathHandle::Directory(PrivateDirHandle { + path: dir.into(), + read_status: ReadStatus::default(), + }), + OPAQUE_SALT, + ); + + debug!( + "Directory \"{:?}\" will have the obscured file handle: {:?}", + dir, dir_handle + ); + + dir_handle + } + + async fn realpath(&mut self, dir: &str) -> SftpOpResult> { + info!("finding path for: {:?}", dir); + let name_entry = NameEntry { + filename: Filename::from(self.base_path.as_str()), + _longname: Filename::from(""), + attrs: Attrs { + size: None, + uid: None, + gid: None, + permissions: None, + atime: None, + mtime: None, + ext_count: None, + }, + }; + debug!("Will return: {:?}", name_entry); + Ok(name_entry) + } + + async fn close(&mut self, opaque_file_handle: &OFH) -> SftpOpResult<()> { + if let Some(handle) = self.handles_manager.remove(opaque_file_handle) { + match handle { + PrivatePathHandle::File(private_file_handle) => { + info!( + "SftpServer Close operation on file {:?} was successful", + private_file_handle.path + ); + drop(private_file_handle.file); // Not really required but illustrative + Ok(()) + } + PrivatePathHandle::Directory(private_dir_handle) => { + info!( + "SftpServer Close operation on dir {:?} was successful", + private_dir_handle.path + ); + + Ok(()) + } + } + } else { + error!( + "SftpServer Close operation on handle {:?} failed", + opaque_file_handle + ); + Err(StatusCode::SSH_FX_FAILURE) + } + } + + async fn read( + &mut self, + opaque_file_handle: &OFH, + offset: u64, + len: u32, + reply: &mut ReadReply<'_, N>, + ) -> SftpResult<()> { + if let PrivatePathHandle::File(private_file_handle) = self + .handles_manager + .get_private_as_mut_ref(opaque_file_handle) + .ok_or(StatusCode::SSH_FX_FAILURE)? + { + log::debug!( + "SftpServer Read operation: handle = {:?}, filepath = {:?}, offset = {:?}, len = {:?}", + opaque_file_handle, + private_file_handle.path, + offset, + len + ); + let permissions_poxit = private_file_handle.permissions.unwrap_or(0o000); + if (permissions_poxit & 0o444) == 0 { + error!( + "No read permissions for file {:?}", + private_file_handle.path + ); + return Err(StatusCode::SSH_FX_PERMISSION_DENIED.into()); + }; + + let file_len = private_file_handle + .file + .metadata() + .map_err(|err| { + error!("Could not read the file length: {:?}", err); + StatusCode::SSH_FX_FAILURE + })? + .len(); + + if offset >= file_len { + info!( + "offset is larger than file length, sending EOF for {:?}", + private_file_handle.path + ); + reply.send_eof().await.map_err(|err| { + error!("Could not sent EOF: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; + return Ok(()); + } + + let read_len = if file_len >= len as u64 + offset { + len + } else { + debug!("Read operation: length + offset > file length. Clipping ( {:?} + {:?} > {:?})", + len, offset, file_len); + (file_len - offset).try_into().unwrap_or(u32::MAX) + }; + + reply.send_header(read_len).await?; + + let mut read_buff = [0u8; ARBITRARY_READ_BUFFER_LENGTH]; + + let mut running_offset = offset; + let mut remaining = read_len as usize; + + debug!("Starting reading loop: remaining = {}", remaining); + while remaining > 0 { + let next_read_len: usize = remaining.min(read_buff.len()); + trace!("next_read_len = {}", next_read_len); + let br = private_file_handle + .file + .read_at(&mut read_buff[..next_read_len], running_offset) + .map_err(|err| { + error!("read error: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; + trace!("{} bytes readed", br); + reply.send_data(&read_buff[..br.min(remaining)]).await?; + trace!("Read sent {} bytes", br.min(remaining)); + trace!("remaining {} bytes. {} byte read", remaining, br); + + remaining = + remaining.checked_sub(br).ok_or(StatusCode::SSH_FX_FAILURE)?; + trace!( + "after subtracting {} bytes, there are {} bytes remaining", + br, + remaining + ); + running_offset = running_offset + .checked_add(br as u64) + .ok_or(StatusCode::SSH_FX_FAILURE)?; + } + debug!("Finished sending data"); + return Ok(()); + } + Err(StatusCode::SSH_FX_PERMISSION_DENIED.into()) + } + + async fn write( + &mut self, + opaque_file_handle: &OFH, + offset: u64, + buf: &[u8], + ) -> SftpOpResult<()> { + if let PrivatePathHandle::File(private_file_handle) = self + .handles_manager + .get_private_as_ref(opaque_file_handle) + .ok_or(StatusCode::SSH_FX_FAILURE)? + { + let permissions_poxit = (private_file_handle + .permissions + .ok_or(StatusCode::SSH_FX_PERMISSION_DENIED))?; + + if (permissions_poxit & 0o222) == 0 { + return Err(StatusCode::SSH_FX_PERMISSION_DENIED); + }; + + log::trace!( + "SftpServer Write operation: handle = {:?}, filepath = {:?}, offset = {:?}, buf = {:?}", + opaque_file_handle, + private_file_handle.path, + offset, + String::from_utf8(buf.to_vec()) + ); + let bytes_written = private_file_handle + .file + .write_at(buf, offset) + .map_err(|_| StatusCode::SSH_FX_FAILURE)?; + + log::debug!( + "SftpServer Write operation: handle = {:?}, filepath = {:?}, offset = {:?}, buffer length = {:?}, bytes written = {:?}", + opaque_file_handle, + private_file_handle.path, + offset, + buf.len(), + bytes_written + ); + + Ok(()) + } else { + Err(StatusCode::SSH_FX_PERMISSION_DENIED) + } + } + + async fn readdir( + &mut self, + opaque_dir_handle: &OFH, + reply: &mut DirReply<'_, N>, + ) -> SftpOpResult<()> { + info!("read dir for {:?}", opaque_dir_handle); + + if let PrivatePathHandle::Directory(dir) = self + .handles_manager + .get_private_as_mut_ref(opaque_dir_handle) + .ok_or(StatusCode::SSH_FX_NO_SUCH_FILE)? + { + if dir.read_status == ReadStatus::EndOfFile { + reply.send_eof().await.map_err(|error| { + error!("{:?}", error); + StatusCode::SSH_FX_FAILURE + })?; + return Ok(()); + } + + let path_str = dir.path.clone(); + debug!("opaque handle found in handles manager: {:?}", path_str); + let dir_path = Path::new(&path_str); + debug!("path: {:?}", dir_path); + + if dir_path.is_dir() { + info!("SftpServer ReadDir operation path = {:?}", dir_path); + + let dir_iterator = fs::read_dir(dir_path).map_err(|err| { + error!("could not get the directory {:?}: {:?}", path_str, err); + StatusCode::SSH_FX_PERMISSION_DENIED + })?; + + let name_entry_collection = DirEntriesCollection::new(dir_iterator)?; + + let response_read_status = + name_entry_collection.send_response(reply).await?; + + dir.read_status = response_read_status; + return Ok(()); + } else { + error!("the path is not a directory = {:?}", dir_path); + return Err(StatusCode::SSH_FX_NO_SUCH_FILE); + } + } else { + error!("Could not find the directory for {:?}", opaque_dir_handle); + return Err(StatusCode::SSH_FX_NO_SUCH_FILE); + } + } + + async fn stats( + &mut self, + follow_links: bool, + file_path: &str, + ) -> SftpOpResult { + log::debug!("SftpServer ListStats: file_path = {:?}", file_path); + let file_path = Path::new(file_path); + + let metadata = if follow_links { + file_path.metadata() // follows symlinks + } else { + file_path.symlink_metadata() // doesn't follow symlinks + } + .map_err(|err| { + error!("Problem listing stats: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; + + if file_path.is_file() { + return Ok(sunset_sftp::server::helpers::get_file_attrs(metadata)); + } else if file_path.is_symlink() { + return Ok(sunset_sftp::server::helpers::get_file_attrs(metadata)); + } else { + return Err(StatusCode::SSH_FX_NO_SUCH_FILE); + } + } +} diff --git a/demo/sftp/std/src/main.rs b/demo/sftp/std/src/main.rs new file mode 100644 index 00000000..efc7835b --- /dev/null +++ b/demo/sftp/std/src/main.rs @@ -0,0 +1,231 @@ +use sunset::*; +use sunset_async::{ProgressHolder, SSHServer, SunsetMutex, SunsetRawMutex}; +use sunset_sftp::{server::MAX_REQUEST_LEN, SftpHandler}; + +pub(crate) use sunset_demo_common as demo_common; + +use demo_common::{DemoCommon, DemoServer, SSHConfig}; + +use crate::{ + demoopaquefilehandle::DemoOpaqueFileHandle, demosftpserver::DemoSftpServer, +}; + +use embassy_executor::Spawner; +use embassy_net::{Stack, StackResources, StaticConfigV4}; + +use rand::rngs::OsRng; +use rand::RngCore; + +use embassy_futures::select::select; +use embassy_net_tuntap::TunTapDevice; +use embassy_sync::channel::Channel; + +#[allow(unused_imports)] +use log::{debug, error, info, log, trace, warn}; + +mod demofilehandlemanager; +mod demoopaquefilehandle; +mod demosftpserver; + +const NUM_LISTENERS: usize = 4; +// +1 for dhcp +const NUM_SOCKETS: usize = NUM_LISTENERS + 1; + +#[embassy_executor::task] +async fn net_task(mut runner: embassy_net::Runner<'static, TunTapDevice>) -> ! { + runner.run().await +} + +#[embassy_executor::task] +async fn main_task(spawner: Spawner) { + let opt_tap0 = "tap0"; + let ip4 = "192.168.69.2"; + let cir = 24; + + let config = Box::leak(Box::new({ + let mut config = SSHConfig::new().unwrap(); + config.set_admin_pw(Some("pw")).unwrap(); + config.console_noauth = true; + config.ip4_static = if let Ok(ip) = ip4.parse() { + Some(StaticConfigV4 { + address: embassy_net::Ipv4Cidr::new(ip, cir), + gateway: None, + dns_servers: { heapless::Vec::new() }, + }) + } else { + None + }; + SunsetMutex::new(config) + })); + + let net_cf = if let Some(ref s) = config.lock().await.ip4_static { + embassy_net::Config::ipv4_static(s.clone()) + } else { + embassy_net::Config::dhcpv4(Default::default()) + }; + info!("Net config: {net_cf:?}"); + + // Init network device + let net_device = TunTapDevice::new(opt_tap0).unwrap(); + + let seed = OsRng.next_u64(); + + // Init network stack + let res = Box::leak(Box::new(StackResources::::new())); + let (stack, runner) = embassy_net::new(net_device, net_cf, res, seed); + + // Launch network task + spawner.spawn(net_task(runner)).unwrap(); + + for _ in 0..NUM_LISTENERS { + spawner.spawn(listen(stack, config)).unwrap(); + } +} + +#[derive(Default)] +struct StdDemo; + +impl DemoServer for StdDemo { + async fn run(&self, serv: &SSHServer<'_>, mut common: DemoCommon) -> Result<()> { + let chan_pipe = Channel::::new(); + + let ssh_loop_inner = async { + loop { + let mut ph = ProgressHolder::new(); + let ev = match serv.progress(&mut ph).await { + Ok(event) => event, + Err(e) => { + match e { + Error::NoRoom {} => { + warn!("NoRoom triggered. Trying again"); + continue; + } + _ => { + error!("server progress failed: {:?}", e); // NoRoom: 2048 Bytes Output buffer + return Err(e); + } + } + } + }; + + trace!("ev {ev:?}"); + match ev { + ServEvent::SessionShell(a) => { + a.fail()?; // Not allowed in this example, kept here for compatibility + } + ServEvent::SessionExec(a) => { + a.fail()?; // Not allowed in this example, kept here for compatibility + } + ServEvent::SessionSubsystem(a) => { + match a.command()?.to_lowercase().as_str() { + "sftp" => { + info!("Starting '{}' subsystem", a.command()?); + + if let Some(ch) = common.sess.take() { + debug_assert!(ch.num() == a.channel()); + a.succeed()?; + let _ = chan_pipe.try_send(ch); + } else { + a.fail()?; + } + } + _ => { + warn!( + "request for subsystem '{}' not implemented: fail", + a.command()? + ); + a.fail()?; + } + } + } + other => common.handle_event(other)?, + }; + } + #[allow(unreachable_code)] + Ok::<_, Error>(()) + }; + + let ssh_loop = async { + info!("prog_loop started"); + if let Err(e) = ssh_loop_inner.await { + warn!("Prog Loop Exited: {e:?}"); + return Err(e); + } + Ok(()) + }; + + #[allow(unreachable_code)] + let sftp_loop = async { + loop { + let ch = chan_pipe.receive().await; + + info!("SFTP loop has received a channel handle {:?}", ch.num()); + + // TODO Do some research to find reasonable default buffer lengths + let mut buffer_in = [0u8; 512]; + let mut request_buffer = [0u8; MAX_REQUEST_LEN]; + + match { + let stdio = serv.stdio(ch).await?; + let mut file_server = + DemoSftpServer::::new( + "./demo/sftp/std/testing/out/".to_string(), + ); + + SftpHandler::< + DemoOpaqueFileHandle, + DemoSftpServer, + 512, + >::new(&mut file_server, &mut request_buffer) + .process_loop(stdio, &mut buffer_in) + .await?; + + Ok::<_, Error>(()) + } { + Ok(_) => { + warn!("sftp server loop finished gracefully"); + return Ok(()); + } + Err(e) => { + error!("sftp server loop finished with an error: {}", e); + return Err(e); + } + }; + } + Ok::<_, Error>(()) + }; + + let selected = select(ssh_loop, sftp_loop).await; + match selected { + embassy_futures::select::Either::First(res) => { + warn!("prog_loop finished: {:?}", res); + res + } + embassy_futures::select::Either::Second(res) => { + warn!("sftp_loop finished: {:?}", res); + res + } + } + } +} + +// TODO pool_size should be NUM_LISTENERS but needs a literal +#[embassy_executor::task(pool_size = 4)] +async fn listen( + stack: Stack<'static>, + config: &'static SunsetMutex, +) -> ! { + let demo = StdDemo::default(); + demo_common::listen(stack, config, &demo).await +} + +#[embassy_executor::main] +async fn main(spawner: Spawner) { + env_logger::Builder::new() + .filter_level(log::LevelFilter::Info) + .format_timestamp_nanos() + .target(env_logger::Target::Stdout) + .init(); + + spawner.spawn(main_task(spawner)).unwrap(); +} diff --git a/demo/sftp/std/tap.sh b/demo/sftp/std/tap.sh new file mode 100755 index 00000000..8732a0cd --- /dev/null +++ b/demo/sftp/std/tap.sh @@ -0,0 +1,7 @@ +#!/bin/sh +# This script generates the tap device that the demo will bind the network stack +# usage `sudo ./tap.sh` + +ip tuntap add name tap0 mode tap user $SUDO_USER group $SUDO_USER +ip addr add 192.168.69.100/24 dev tap0 +ip link set tap0 up diff --git a/demo/sftp/std/testing/extract_txrx.sh b/demo/sftp/std/testing/extract_txrx.sh new file mode 100755 index 00000000..f2c7ab82 --- /dev/null +++ b/demo/sftp/std/testing/extract_txrx.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Find all lines containing SFTP... OR Output Consumer... OR Output Producer... +# and reformat them into a simpler form for further processing. + + +cat < ${1}.txrx +Extracting communications from sunset-demo-sftp-std log file: $1 +Extract of RX (c: Client), TX (s: server), And internal TX (p: pipe producer) +------------------------------------------------ +EOF + +cat $1 | \ +grep -E 'SFTP <---- received: \[|Output Consumer: Bytes written \[|Output Producer: Sending buffer \[' | \ +sed 's/.*received: /c / ; s/.*written /s / ; s/.*Output Producer: Sending buffer /p /' >> ${1}.txrx + + +# Extract received lines. Remove brackets, spaces, +# and split by comma into new lines. Finally remove empty lines. + +# RX +cat $1 | \ +grep -E 'SFTP <---- received: \[' | \ +sed 's/.*received: //' | \ +sed 's/\[//; s/\]/,/' | \ +tr -d ' ' |tr ',' '\n'| \ +grep -v '^$' > ${1}.rx + +# Producer +cat $1 | \ +grep -E 'Output Producer: Sending buffer \[' | \ +sed 's/.*buffer //' | \ +sed 's/\[//; s/\]/,/' | \ +tr -d ' ' |tr ',' '\n'| \ +grep -v '^$' > ${1}.txp + +# TX +cat $1 | \ +grep -E 'Output Consumer: Bytes written \[' | \ +sed 's/.*written //' | \ +sed 's/\[//; s/\]/,/' | \ +tr -d ' ' |tr ',' '\n'| \ +grep -v '^$' > ${1}.tx \ No newline at end of file diff --git a/demo/sftp/std/testing/log_demo_sftp_with_test.sh b/demo/sftp/std/testing/log_demo_sftp_with_test.sh new file mode 100755 index 00000000..58768bc9 --- /dev/null +++ b/demo/sftp/std/testing/log_demo_sftp_with_test.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +TIME_STAMP=$(date +%Y%m%d_%H%M%S) +TEST_FILE=$1 +# Used for log files naming +BASE_NAME=$(basename "$TEST_FILE" | cut -d. -f1) +START_PWD=$PWD +PROYECT_ROOT=$(dirname "$PWD")/../../.. + +# Check if file exist and can be executed + +if [ ! -f "${TEST_FILE}" ]; then + echo "File ${TEST_FILE} not found" + exit 1 +fi +if [ ! -x "${TEST_FILE}" ]; then + echo "File ${TEST_FILE} is not executable" + exit 2 +fi + +echo "debuging file: $TEST_FILE with logging and pcap" + +cargo build -p sunset-demo-sftp-std +if [ $? -ne 0 ]; then + echo "Failed to build sunset-demo-sftp-std. Aborting" + return 1 +fi + +sleep 3; +clear; + +# Create logs directory if it doesn't exist +LOG_DIR="$PWD/logs" +mkdir -p "$LOG_DIR" + + +# Starts an Tshark session to capture packets in tap0 +WIRESHARK_LOG=${LOG_DIR}/${TIME_STAMP}_${BASE_NAME}.pcap +tshark -i tap0 -w ${WIRESHARK_LOG} & +TSHARK_PID=$! + +# waits while tshark started writting to the file +echo "Waiting for tshark to start..." + +while [ ! -s "${WIRESHARK_LOG}" ]; do + sleep 1 +done +echo "Tshark has started." + +# ################################################################ +# Start the sunset-demo-sftp-std with strace +# ################################################################ +echo "Starting sunset-demo-sftp-std" +echo "Changing directory to Project root: ${PROYECT_ROOT}" +cd ${PROYECT_ROOT} +echo "Project root directory is: ${PWD}" +RUST_LOG_FILE="${LOG_DIR}/${TIME_STAMP}_${BASE_NAME}.log" +STRACE_LOG=${LOG_DIR}/${TIME_STAMP}_${BASE_NAME}_strace.log +STRACE_OPTIONS="-fintttCDTYyy -v" +STRACE_CMD="strace ${STRACE_OPTIONS} -o ${STRACE_LOG} -P /dev/net/tun ./target/debug/sunset-demo-sftp-std" + +echo "Running strace for sunset-demo-sftp-std:" +echo "TZ=UTC ${STRACE_CMD}" +TZ=UTC ${STRACE_CMD} 2>&1 > $RUST_LOG_FILE & +STRACE_PID=$! + +echo "Sleeping for 2 seconds to let the server start..." +sleep 2 + +echo "Changing back to the starting directory: $START_PWD" +cd $START_PWD + +echo "Cleaning up previous run files" +rm -f -r ./*_random ./out/*_random + +echo "Running ${TEST_FILE}. Logging all data to ${LOG_DIR} with prefix ${TIME_STAMP}." +${TEST_FILE} | awk '{ cmd = "date -u +\"[%Y-%m-%dT%H:%M:%S.%NZ]\""; cmd | getline timestamp; print timestamp, $0; close(cmd) }' > $LOG_DIR/${TIME_STAMP}_${BASE_NAME}_client.log 2>&1 & +TEST_FILE_PID=$! + +kill_test(){ + echo "traped signal, killing test file process ${TEST_FILE_PID}" + kill -SIGTERM $TEST_FILE_PID +} +cleanup() { + echo "Cleaning up..." + if kill -0 $TSHARK_PID 2>/dev/null; then + echo "Killing tshark process ${TSHARK_PID}" + kill -SIGTERM $TSHARK_PID + fi + if kill -0 $STRACE_PID 2>/dev/null; then + echo "Killing strace process ${STRACE_PID}" + kill -SIGTERM $STRACE_PID + fi + echo "Cleanup done." +} + +trap kill_test SIGINT SIGTERM + +echo "If stuck use Ctrl+C to stop the script and cleanup." +wait "$TEST_FILE_PID" +echo "Finished executing ${TEST_FILE}" + +echo "extracting TX/RX data from log file..." +./extract_txrx.sh $RUST_LOG_FILE + +cleanup diff --git a/demo/sftp/std/testing/log_get_single_long.sh b/demo/sftp/std/testing/log_get_single_long.sh new file mode 100755 index 00000000..f01b186d --- /dev/null +++ b/demo/sftp/std/testing/log_get_single_long.sh @@ -0,0 +1 @@ +./log_demo_sftp_with_test.sh ./test_get_long.sh \ No newline at end of file diff --git a/demo/sftp/std/testing/log_get_single_short.sh b/demo/sftp/std/testing/log_get_single_short.sh new file mode 100755 index 00000000..58342c33 --- /dev/null +++ b/demo/sftp/std/testing/log_get_single_short.sh @@ -0,0 +1 @@ +./log_demo_sftp_with_test.sh ./test_get_short.sh \ No newline at end of file diff --git a/demo/sftp/std/testing/merge_logs.sh b/demo/sftp/std/testing/merge_logs.sh new file mode 100755 index 00000000..050c05d9 --- /dev/null +++ b/demo/sftp/std/testing/merge_logs.sh @@ -0,0 +1,9 @@ +# merge-logs.sh +# Useful to get events from both client and server logs in chronological order. +# +# usage: ./merge-logs.sh client.log server.log > merged.log + +{ + awk 'match($0,/^\[([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9:.]+Z)\]/,m){print m[1] "\tC:\t" $0}' "$1" + awk 'match($0,/^\[([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9:.]+Z)/,m){print m[1] "\tS:\t" $0}' "$2" +} | sort -t $'\t' -k1,1 | cut -f2- \ No newline at end of file diff --git a/demo/sftp/std/testing/test_get.sh b/demo/sftp/std/testing/test_get.sh new file mode 100755 index 00000000..913e1741 --- /dev/null +++ b/demo/sftp/std/testing/test_get.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +echo "Testing Multiple GETs..." + +echo "Cleaning up previous run files" +rm -f -r ./*_random ./out/*_random + + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + +# Define test files +FILES=("512B_random" "16kB_random" "64kB_random" "65kB_random" "2048kB_random") + + +echo "Generating random data files..." +dd if=/dev/random bs=512 count=1 of=./512B_random 2>/dev/null +dd if=/dev/random bs=1024 count=16 of=./16kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=64 of=./64kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=65 of=./65kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=256 of=./256kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=1024 of=./1024kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=2048 of=./2048kB_random 2>/dev/null +echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." + +echo "Moving to the server folder..." +for file in "${FILES[@]}"; do + mv "./${file}" "./out/${file}" +done + +echo "Output folder content:" + +ls ./out -l + +echo "Downloading files..." +sftp -vvvvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} << EOF +$(printf 'get ./%s\n' "${FILES[@]}") + +bye +EOF + +echo "DOWNLOAD Test Results:" +echo "=============" +# Test each file +for file in "${FILES[@]}"; do + if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then + echo "Download PASS: ${file}. Cleaning it" + rm -f -r ./${file} ./out/${file} + else + echo "Download FAIL: ${file}". Keeping for inspection + fi +done diff --git a/demo/sftp/std/testing/test_get_long.sh b/demo/sftp/std/testing/test_get_long.sh new file mode 100755 index 00000000..69c60a04 --- /dev/null +++ b/demo/sftp/std/testing/test_get_long.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +echo "Testing Single long GETs..." + +echo "Cleaning up previous run files" +rm -f -r ./*_random ./out/*_random + + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + + + +# Generate random data files +echo "Generating random data files..." +# Define test files +FILES=("100MB_random") + +echo "Generating random data files..." +dd if=/dev/random bs=1048576 count=100 of=./100MB_random 2>/dev/null +echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." + +echo "Moving to the server folder..." +for file in "${FILES[@]}"; do + mv "./${file}" "./out/${file}" +done + +echo "Downloading files..." +sftp -vvvvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} 2>&1 << EOF +$(printf 'get ./%s\n' "${FILES[@]}") +bye +EOF + +echo "DOWNLOAD Test Results:" +echo "=============" +# Test each file +for file in "${FILES[@]}"; do + if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then + echo "Download PASS: ${file}. Cleaning it" + rm -f -r ./${file} ./out/${file} + else + echo "Download FAIL: ${file}". Keeping for inspection + fi +done diff --git a/demo/sftp/std/testing/test_get_short.sh b/demo/sftp/std/testing/test_get_short.sh new file mode 100755 index 00000000..6d5b5799 --- /dev/null +++ b/demo/sftp/std/testing/test_get_short.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +echo "Testing Single long GETs..." + +echo "Cleaning up previous run files" +rm -f -r ./*_random ./out/*_random + + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + + + +# Generate random data files +echo "Generating random data files..." +# Define test files +FILES=("1MB_random") + +echo "Generating random data files..." +dd if=/dev/random bs=1048576 count=1 of=./1MB_random 2>/dev/null +echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." + +echo "Moving to the server folder..." +for file in "${FILES[@]}"; do + mv "./${file}" "./out/${file}" +done + +echo "Downloading files..." +sftp -vvvvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} 2>&1 << EOF +$(printf 'get %s\n' "${FILES[@]}") +bye +EOF + +echo "DOWNLOAD Test Results:" +echo "=============" +# Test each file +for file in "${FILES[@]}"; do + if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then + echo "Download PASS: ${file}. Cleaning it" + rm -f -r ./${file} ./out/${file} + else + echo "Download FAIL: ${file}". Keeping for inspection + fi +done diff --git a/demo/sftp/std/testing/test_long_write_requests.sh b/demo/sftp/std/testing/test_long_write_requests.sh new file mode 100755 index 00000000..71813ede --- /dev/null +++ b/demo/sftp/std/testing/test_long_write_requests.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + +# Define test files +FILES=("100MB_random") + +# Generate random data files +echo "Generating random data files..." +dd if=/dev/random bs=1048576 count=100 of=./100MB_random 2>/dev/null +# dd if=/dev/random bs=1048576 count=1024 of=./1024MB_random 2>/dev/null + +echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." + +# Upload all files +sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} -vvv << EOF +$(printf 'put ./%s\n' "${FILES[@]}") +bye +EOF + +echo "Test Results:" +echo "=============" + +# Test each file +for file in "${FILES[@]}"; do + if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then + echo "PASS: ${file}" + else + echo "FAIL: ${file}" + fi +done + +echo "Cleaning up local files..." +rm -f -r ./*_random ./out/*_random + +echo "Upload test completed." \ No newline at end of file diff --git a/demo/sftp/std/testing/test_read_dir.sh b/demo/sftp/std/testing/test_read_dir.sh new file mode 100755 index 00000000..ec2f18d3 --- /dev/null +++ b/demo/sftp/std/testing/test_read_dir.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + +# Define test files +FILES=("A_random" "B_random" "D_random" "E_random" "F_random" "G_random") + +# Generate random data files +echo "Generating random data files..." +dd if=/dev/random bs=512 count=1 of=./512B_random 2>/dev/null + +# Generating copies of the test file +echo "Creating copies for each test file..." +for file in "${FILES[@]}"; do + cp ./512B_random "./${file}" + echo "Created: ${file}" +done +ls + +echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." + +# Upload all files +sftp -vvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} << EOF +$(printf 'put ./%s\n' "${FILES[@]}") +ls -lh +bye +EOF + +echo "Cleaning up local files..." +rm -f -r ./*_random ./out/*_random + diff --git a/demo/sftp/std/testing/test_stats.sh b/demo/sftp/std/testing/test_stats.sh new file mode 100755 index 00000000..a5c2ceb5 --- /dev/null +++ b/demo/sftp/std/testing/test_stats.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +echo "Testing Stats..." + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + +# Define test files +FILES=("512B_random") + +# Generate random data files +echo "Generating random data files..." +dd if=/dev/random bs=512 count=1 of=./512B_random 2>/dev/null + +echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." + +# Upload all files +sftp -vvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} << EOF +$(printf 'put ./%s\n' "${FILES[@]}") +$(printf 'ls -lh ./%s\n' "${FILES[@]}") + +bye +EOF + +echo "Cleaning up local files..." +rm -f -r ./*_random ./out/*_random diff --git a/demo/sftp/std/testing/test_write_requests.sh b/demo/sftp/std/testing/test_write_requests.sh new file mode 100755 index 00000000..cabab6b2 --- /dev/null +++ b/demo/sftp/std/testing/test_write_requests.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + +# Define test files +FILES=("512B_random" "16kB_random" "64kB_random" "65kB_random" "256kB_random" "1024kB_random" "2048kB_random") + +# Generate random data files +echo "Generating random data files..." +dd if=/dev/random bs=512 count=1 of=./512B_random 2>/dev/null +dd if=/dev/random bs=1024 count=16 of=./16kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=64 of=./64kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=65 of=./65kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=256 of=./256kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=1024 of=./1024kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=2048 of=./2048kB_random 2>/dev/null + + +echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." + +# Upload all files +sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} << EOF +$(printf 'put ./%s\n' "${FILES[@]}") +bye +EOF + +echo "Test Results:" +echo "=============" + +# Test each file +for file in "${FILES[@]}"; do + if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then + echo "PASS: ${file}" + else + echo "FAIL: ${file}" + fi +done + +echo "Cleaning up local files..." +rm -f ./*_random ./out/*_random + +echo "Upload test completed." \ No newline at end of file From d80df75a6f47ba405a04f3194138cdbf24ff6015 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 5 Mar 2026 12:28:27 +1100 Subject: [PATCH 06/47] [skip ci] Improved demo sftp std testing scripts - Can be run from the repo base folder. Other pwd will fail - All of them will return a value coherent with the test result so they can be used for CI - Improved, but complicating dir listing and stat listing test: require expect and use expect script for the test --- .../std/testing/log_demo_sftp_with_test.sh | 3 + demo/sftp/std/testing/log_get_file_long.sh | 3 + demo/sftp/std/testing/log_get_file_short.sh | 3 + demo/sftp/std/testing/log_get_single_long.sh | 1 - demo/sftp/std/testing/log_get_single_short.sh | 1 - demo/sftp/std/testing/merge_logs.sh | 7 +- demo/sftp/std/testing/out/512B_random | Bin 0 -> 512 bytes demo/sftp/std/testing/test_get.sh | 53 ------ demo/sftp/std/testing/test_get_file_long.sh | 55 ++++++ demo/sftp/std/testing/test_get_file_short.sh | 55 ++++++ demo/sftp/std/testing/test_get_long.sh | 45 ----- demo/sftp/std/testing/test_get_short.sh | 45 ----- .../std/testing/test_long_write_requests.sh | 38 ----- demo/sftp/std/testing/test_ls_dir.sh | 121 ++++++++++++++ demo/sftp/std/testing/test_put_file_long.sh | 62 +++++++ demo/sftp/std/testing/test_put_files.sh | 69 ++++++++ demo/sftp/std/testing/test_read_dir.sh | 33 ---- demo/sftp/std/testing/test_stats.sh | 27 --- demo/sftp/std/testing/test_stats_file.sh | 156 ++++++++++++++++++ demo/sftp/std/testing/test_write_requests.sh | 44 ----- 20 files changed, 531 insertions(+), 290 deletions(-) create mode 100755 demo/sftp/std/testing/log_get_file_long.sh create mode 100755 demo/sftp/std/testing/log_get_file_short.sh delete mode 100755 demo/sftp/std/testing/log_get_single_long.sh delete mode 100755 demo/sftp/std/testing/log_get_single_short.sh create mode 100644 demo/sftp/std/testing/out/512B_random delete mode 100755 demo/sftp/std/testing/test_get.sh create mode 100755 demo/sftp/std/testing/test_get_file_long.sh create mode 100755 demo/sftp/std/testing/test_get_file_short.sh delete mode 100755 demo/sftp/std/testing/test_get_long.sh delete mode 100755 demo/sftp/std/testing/test_get_short.sh delete mode 100755 demo/sftp/std/testing/test_long_write_requests.sh create mode 100755 demo/sftp/std/testing/test_ls_dir.sh create mode 100755 demo/sftp/std/testing/test_put_file_long.sh create mode 100755 demo/sftp/std/testing/test_put_files.sh delete mode 100755 demo/sftp/std/testing/test_read_dir.sh delete mode 100755 demo/sftp/std/testing/test_stats.sh create mode 100755 demo/sftp/std/testing/test_stats_file.sh delete mode 100755 demo/sftp/std/testing/test_write_requests.sh diff --git a/demo/sftp/std/testing/log_demo_sftp_with_test.sh b/demo/sftp/std/testing/log_demo_sftp_with_test.sh index 58768bc9..8e44f352 100755 --- a/demo/sftp/std/testing/log_demo_sftp_with_test.sh +++ b/demo/sftp/std/testing/log_demo_sftp_with_test.sh @@ -1,4 +1,7 @@ #!/bin/bash +# Used to run sftp demo while logging all the interactions with strace and tshark. +# The passed argument is the test file to run, for example: ./test_get_long.sh or ./test_get_short.sh +# This script will be run once the sftp demo is running. TIME_STAMP=$(date +%Y%m%d_%H%M%S) TEST_FILE=$1 diff --git a/demo/sftp/std/testing/log_get_file_long.sh b/demo/sftp/std/testing/log_get_file_long.sh new file mode 100755 index 00000000..a955cb5b --- /dev/null +++ b/demo/sftp/std/testing/log_get_file_long.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./log_demo_sftp_with_test.sh ./test_get_file_long.sh \ No newline at end of file diff --git a/demo/sftp/std/testing/log_get_file_short.sh b/demo/sftp/std/testing/log_get_file_short.sh new file mode 100755 index 00000000..627280c4 --- /dev/null +++ b/demo/sftp/std/testing/log_get_file_short.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./log_demo_sftp_with_test.sh ./test_get_file_short.sh \ No newline at end of file diff --git a/demo/sftp/std/testing/log_get_single_long.sh b/demo/sftp/std/testing/log_get_single_long.sh deleted file mode 100755 index f01b186d..00000000 --- a/demo/sftp/std/testing/log_get_single_long.sh +++ /dev/null @@ -1 +0,0 @@ -./log_demo_sftp_with_test.sh ./test_get_long.sh \ No newline at end of file diff --git a/demo/sftp/std/testing/log_get_single_short.sh b/demo/sftp/std/testing/log_get_single_short.sh deleted file mode 100755 index 58342c33..00000000 --- a/demo/sftp/std/testing/log_get_single_short.sh +++ /dev/null @@ -1 +0,0 @@ -./log_demo_sftp_with_test.sh ./test_get_short.sh \ No newline at end of file diff --git a/demo/sftp/std/testing/merge_logs.sh b/demo/sftp/std/testing/merge_logs.sh index 050c05d9..07b03b5b 100755 --- a/demo/sftp/std/testing/merge_logs.sh +++ b/demo/sftp/std/testing/merge_logs.sh @@ -2,8 +2,9 @@ # Useful to get events from both client and server logs in chronological order. # # usage: ./merge-logs.sh client.log server.log > merged.log - +CLIENT_LOG=$1 +SERVER_LOG=$2 { - awk 'match($0,/^\[([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9:.]+Z)\]/,m){print m[1] "\tC:\t" $0}' "$1" - awk 'match($0,/^\[([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9:.]+Z)/,m){print m[1] "\tS:\t" $0}' "$2" + awk 'match($0,/^\[([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9:.]+Z)\]/,m){print m[1] "\tC:\t" $0}' "$CLIENT_LOG" + awk 'match($0,/^\[([0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9:.]+Z)/,m){print m[1] "\tS:\t" $0}' "$SERVER_LOG" } | sort -t $'\t' -k1,1 | cut -f2- \ No newline at end of file diff --git a/demo/sftp/std/testing/out/512B_random b/demo/sftp/std/testing/out/512B_random new file mode 100644 index 0000000000000000000000000000000000000000..6c6c281ce6b40a2064ff021dd28c607e97e0a3af GIT binary patch literal 512 zcmV+b0{{K1=A$-%XY#g1if0qpp9VZjuCo`ba{h>q^`+l$rxxu_`GM^@0cjdkS!_N) z$&y1YSospsa4&ONl`hUjB;qBg@>>t!+wu!8JVl%EXa>FSOjE}>*qlU$q7V?8wnh-Y zz#Vbb)f{({xUz<#+H4Jh*GpkX<-n~>7Cs<4x#u)=?5l7%Bek)~cZb_z4>R^1u5cst zWrJd6W$bYV{1u#;1u>}{qNmuR_oxX~7nZ1p`>rAnDLo&0j^r^79Y`ef+*$vPs*Hqa zAwVx3g=<2|76DA9cJ2K^>*bQ>$?k%jA<6AJ&W!yjIVMEyUAVe3B>oKaX60Fw!-?@L z*$0sv`B9oiq(enA@{v+3ZjtLj%_DQUrv+^|B4XEgoxlREZdTGKZrU~JFjndp3>)g2 zIxUN*odzkuuBcetwNjI!GT>zyz`X2uJu3Jh%j-ES7ew-3X>&Eq8xDh7k1^YDm^;Xn z83@*j2czi_&SAckS8&nff-ppyDYI`|j!aAdUPt#P&B9q4OYHw&)*;#_$x+Yv+Vcsp zIH1TeYFT4g!HPTRSC((VrtUr$Fv@(Ve32nzp|~r*%vsfJd)`VkWd@UJNoh|$D2b-T zS}VBr*zFB)!F9A1!i7?FN=cHB3JUitW*cMuK#6qL`}Iw5w@N%V C-1#5? literal 0 HcmV?d00001 diff --git a/demo/sftp/std/testing/test_get.sh b/demo/sftp/std/testing/test_get.sh deleted file mode 100755 index 913e1741..00000000 --- a/demo/sftp/std/testing/test_get.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -echo "Testing Multiple GETs..." - -echo "Cleaning up previous run files" -rm -f -r ./*_random ./out/*_random - - -# Set remote server details -REMOTE_HOST="192.168.69.2" -REMOTE_USER="any" - -# Define test files -FILES=("512B_random" "16kB_random" "64kB_random" "65kB_random" "2048kB_random") - - -echo "Generating random data files..." -dd if=/dev/random bs=512 count=1 of=./512B_random 2>/dev/null -dd if=/dev/random bs=1024 count=16 of=./16kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=64 of=./64kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=65 of=./65kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=256 of=./256kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=1024 of=./1024kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=2048 of=./2048kB_random 2>/dev/null -echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." - -echo "Moving to the server folder..." -for file in "${FILES[@]}"; do - mv "./${file}" "./out/${file}" -done - -echo "Output folder content:" - -ls ./out -l - -echo "Downloading files..." -sftp -vvvvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} << EOF -$(printf 'get ./%s\n' "${FILES[@]}") - -bye -EOF - -echo "DOWNLOAD Test Results:" -echo "=============" -# Test each file -for file in "${FILES[@]}"; do - if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then - echo "Download PASS: ${file}. Cleaning it" - rm -f -r ./${file} ./out/${file} - else - echo "Download FAIL: ${file}". Keeping for inspection - fi -done diff --git a/demo/sftp/std/testing/test_get_file_long.sh b/demo/sftp/std/testing/test_get_file_long.sh new file mode 100755 index 00000000..aeb51fb5 --- /dev/null +++ b/demo/sftp/std/testing/test_get_file_long.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Tests the GET command with a single file of 100MB +# It tests if the downloaded file is the same as the original one (diff) +# Run it from the project root directory or testing folder + +BASE_DIR=$(pwd) + +if [ -f "Cargo.toml" ]; then + REMOTE_DIR=$BASE_DIR"/demo/sftp/std/testing/out" +elif [[ "$BASE_DIR" == *"/testing"* ]]; then + REMOTE_DIR=$BASE_DIR"/out" +else + echo "Please run this script from the project root or from the testing folder" + exit 1 +fi + +echo "Testing Single long GETs..." + +echo "Cleaning up previous run files" +rm -f -r $BASE_DIR/*_random $REMOTE_DIR/*_random + + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + + + +# Generate random data files +echo "Generating random data files..." +# Define test files +FILES=("100MB_random") + +echo "Generating random data files..." +dd if=/dev/random bs=1048576 count=100 of=$REMOTE_DIR/100MB_random 2>/dev/null + + +echo "Downloading files..." +sftp -vvvvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} 2>&1 << EOF +$(printf 'get ./%s\n' "${FILES[@]}") +bye +EOF + +echo "DOWNLOAD Test Results:" +echo "=============" +# Test each file +for file in "${FILES[@]}"; do + if diff "$BASE_DIR/${file}" "$REMOTE_DIR/${file}" >/dev/null 2>&1; then + echo "Download PASS: ${file}. Cleaning it" + rm -f -r "$BASE_DIR/${file}" "$REMOTE_DIR/${file}" + else + echo "Download FAIL: Keeping downloaded and remote files for inspection + ${BASE_DIR}/${file} and ${REMOTE_DIR}/${file}" + fi +done diff --git a/demo/sftp/std/testing/test_get_file_short.sh b/demo/sftp/std/testing/test_get_file_short.sh new file mode 100755 index 00000000..e1cc6b64 --- /dev/null +++ b/demo/sftp/std/testing/test_get_file_short.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Tests the GET command with a single file of 1MB +# It tests if the downloaded file is the same as the original one (diff) +# Run it from the project root directory or testing folder + +BASE_DIR=$(pwd) + +if [ -f "Cargo.toml" ]; then + REMOTE_DIR=$BASE_DIR"/demo/sftp/std/testing/out" +elif [[ "$BASE_DIR" == *"/testing"* ]]; then + REMOTE_DIR=$BASE_DIR"/out" +else + echo "Please run this script from the project root or from the testing folder" + exit 1 +fi + +echo "Testing Single short GETs..." + +echo "Cleaning up previous run files" +rm -f -r $BASE_DIR/*_random $REMOTE_DIR/*_random + + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + + + +# Generate random data files +echo "Generating random data files..." +# Define test files +FILES=("1MB_random") + +echo "Generating random data files..." +dd if=/dev/random bs=1048576 count=1 of=$REMOTE_DIR/1MB_random 2>/dev/null + + +echo "Downloading files..." +sftp -vvvvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} 2>&1 << EOF +$(printf 'get ./%s\n' "${FILES[@]}") +bye +EOF + +echo "DOWNLOAD Test Results:" +echo "=============" +# Test each file +for file in "${FILES[@]}"; do + if diff "$BASE_DIR/${file}" "$REMOTE_DIR/${file}" >/dev/null 2>&1; then + echo "Download PASS: ${file}. Cleaning it" + rm -f -r "$BASE_DIR/${file}" "$REMOTE_DIR/${file}" + else + echo "Download FAIL: Keeping downloaded and remote files for inspection + ${BASE_DIR}/${file} and ${REMOTE_DIR}/${file}" + fi +done diff --git a/demo/sftp/std/testing/test_get_long.sh b/demo/sftp/std/testing/test_get_long.sh deleted file mode 100755 index 69c60a04..00000000 --- a/demo/sftp/std/testing/test_get_long.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -echo "Testing Single long GETs..." - -echo "Cleaning up previous run files" -rm -f -r ./*_random ./out/*_random - - -# Set remote server details -REMOTE_HOST="192.168.69.2" -REMOTE_USER="any" - - - -# Generate random data files -echo "Generating random data files..." -# Define test files -FILES=("100MB_random") - -echo "Generating random data files..." -dd if=/dev/random bs=1048576 count=100 of=./100MB_random 2>/dev/null -echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." - -echo "Moving to the server folder..." -for file in "${FILES[@]}"; do - mv "./${file}" "./out/${file}" -done - -echo "Downloading files..." -sftp -vvvvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} 2>&1 << EOF -$(printf 'get ./%s\n' "${FILES[@]}") -bye -EOF - -echo "DOWNLOAD Test Results:" -echo "=============" -# Test each file -for file in "${FILES[@]}"; do - if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then - echo "Download PASS: ${file}. Cleaning it" - rm -f -r ./${file} ./out/${file} - else - echo "Download FAIL: ${file}". Keeping for inspection - fi -done diff --git a/demo/sftp/std/testing/test_get_short.sh b/demo/sftp/std/testing/test_get_short.sh deleted file mode 100755 index 6d5b5799..00000000 --- a/demo/sftp/std/testing/test_get_short.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -echo "Testing Single long GETs..." - -echo "Cleaning up previous run files" -rm -f -r ./*_random ./out/*_random - - -# Set remote server details -REMOTE_HOST="192.168.69.2" -REMOTE_USER="any" - - - -# Generate random data files -echo "Generating random data files..." -# Define test files -FILES=("1MB_random") - -echo "Generating random data files..." -dd if=/dev/random bs=1048576 count=1 of=./1MB_random 2>/dev/null -echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." - -echo "Moving to the server folder..." -for file in "${FILES[@]}"; do - mv "./${file}" "./out/${file}" -done - -echo "Downloading files..." -sftp -vvvvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} 2>&1 << EOF -$(printf 'get %s\n' "${FILES[@]}") -bye -EOF - -echo "DOWNLOAD Test Results:" -echo "=============" -# Test each file -for file in "${FILES[@]}"; do - if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then - echo "Download PASS: ${file}. Cleaning it" - rm -f -r ./${file} ./out/${file} - else - echo "Download FAIL: ${file}". Keeping for inspection - fi -done diff --git a/demo/sftp/std/testing/test_long_write_requests.sh b/demo/sftp/std/testing/test_long_write_requests.sh deleted file mode 100755 index 71813ede..00000000 --- a/demo/sftp/std/testing/test_long_write_requests.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# Set remote server details -REMOTE_HOST="192.168.69.2" -REMOTE_USER="any" - -# Define test files -FILES=("100MB_random") - -# Generate random data files -echo "Generating random data files..." -dd if=/dev/random bs=1048576 count=100 of=./100MB_random 2>/dev/null -# dd if=/dev/random bs=1048576 count=1024 of=./1024MB_random 2>/dev/null - -echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." - -# Upload all files -sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} -vvv << EOF -$(printf 'put ./%s\n' "${FILES[@]}") -bye -EOF - -echo "Test Results:" -echo "=============" - -# Test each file -for file in "${FILES[@]}"; do - if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then - echo "PASS: ${file}" - else - echo "FAIL: ${file}" - fi -done - -echo "Cleaning up local files..." -rm -f -r ./*_random ./out/*_random - -echo "Upload test completed." \ No newline at end of file diff --git a/demo/sftp/std/testing/test_ls_dir.sh b/demo/sftp/std/testing/test_ls_dir.sh new file mode 100755 index 00000000..a022a2e7 --- /dev/null +++ b/demo/sftp/std/testing/test_ls_dir.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# Writes some files in the remote server folder and list them with the LS command +# Run it from the project root directory or testing folder +# This script requires expect tool + +if ! command -v expect >/dev/null 2>&1; then + echo "Error: 'expect' is not installed or not in PATH." + echo "Please install it and run this test again." + exit 1 +fi + +BASE_DIR=$(pwd) + +if [ -f "Cargo.toml" ]; then + REMOTE_DIR=$BASE_DIR"/demo/sftp/std/testing/out" +elif [[ "$BASE_DIR" == *"/testing"* ]]; then + REMOTE_DIR=$BASE_DIR"/out" +else + echo "Please run this script from the project root or from the testing folder" + exit 1 +fi + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + +# Define test files +FILES=("A_random" "B_random" "D_random" "E_random" "F_random" "G_random") + +# Generate random data files +echo "Generating random data files..." +dd if=/dev/random bs=512 count=1 of=$REMOTE_DIR/512B_random 2>/dev/null + +# Generating copies of the test file +echo "Creating copies for each test file..." +for file in "${FILES[@]}"; do + cp $REMOTE_DIR/512B_random "$REMOTE_DIR/${file}" +done + +rm $REMOTE_DIR/512B_random + +echo "Files created in remote folder ($REMOTE_DIR):" +echo "=============" +ls -l $REMOTE_DIR +echo "" + +# Using expect to automate the sftp session and list the files in the remote folder +# Comparing them to the expected files list + + +echo "Checking that the filenames are present" +echo "==============" + + +FILES_STR="${FILES[*]}" +export FILES_STR REMOTE_HOST REMOTE_USER +expect << 'EOF' +set timeout 20 + +spawn sftp -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $env(REMOTE_USER)@$env(REMOTE_HOST) + +# Wait for sftp> prompt +expect { + -re {(?m)^sftp> ?$} {} + -re {(?i)password:} { + puts "ERROR: password prompt received" + exit 1 + } + -re {.+\n} { exp_continue } + timeout { + puts "ERROR: did not receive sftp prompt" + exit 1 + } + eof { + puts "ERROR: sftp terminated before showing prompt" + exit 1 + } +} + +send -- "ls -1\r" +expect { + -re {(?ms)(.*)\r?\nsftp> ?$} { + set ls_output $expect_out(1,string) + } + timeout { + puts "ERROR: did not receive prompt after ls" + exit 1 + } + eof { + puts "ERROR: sftp terminated after ls" + exit 1 + } +} +# Normalize CRLF -> LF for reliable matching +regsub -all {\r} $ls_output "" ls_output + +set expected_files [split $env(FILES_STR) " "] +foreach f $expected_files { + if {![regexp -line -- "^$f$" $ls_output]} { + puts "ERROR: missing file: $f" + exit 1 + } +} +send -- "bye\r" +expect eof +EOF +EXPECT_RESULT=$? + +echo "Cleaning up local files..." +rm -f -r $REMOTE_DIR/*_random + +if [ "$EXPECT_RESULT" -ne 0 ]; then + echo "SFTP connection test failed" + exit 1 +else + echo "SFTP connection test passed: all expected files are present" + + exit 0 +fi + + diff --git a/demo/sftp/std/testing/test_put_file_long.sh b/demo/sftp/std/testing/test_put_file_long.sh new file mode 100755 index 00000000..ac2879f0 --- /dev/null +++ b/demo/sftp/std/testing/test_put_file_long.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# Test PUT a long file of 100MB +# It tests if the uploaded file is the same as the original one (diff) +# Run it from the project root directory or testing folder + +BASE_DIR=$(pwd) + +if [ -f "Cargo.toml" ]; then + REMOTE_DIR=$BASE_DIR"/demo/sftp/std/testing/out" +elif [[ "$BASE_DIR" == *"/testing"* ]]; then + REMOTE_DIR=$BASE_DIR"/out" +else + echo "Please run this script from the project root or from the testing folder" + exit 1 +fi + +# Cleaning the remote directory +rm -f -r $REMOTE_DIR/* + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + +# Define test files +FILES=("100MB_random") + +# Generate random data files +echo "Generating random data files..." +dd if=/dev/random bs=1048576 count=100 of=$BASE_DIR/100MB_random 2>/dev/null + + +echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." + +# Upload all files +sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${REMOTE_USER}@${REMOTE_HOST} << EOF +$(printf "put $BASE_DIR/%s\n" "${FILES[@]}") +bye +EOF + +echo "Test Results:" +echo "=============" + +# Test each file +DIFF_RESULT=0 +for file in "${FILES[@]}"; do + if diff "$BASE_DIR/${file}" "$REMOTE_DIR/${file}" >/dev/null 2>&1; then + echo "PASS: ${file}" + rm -f -r "$BASE_DIR"/${file} "$REMOTE_DIR"/${file} + else + ((DIFF_RESULT++)) + echo "FAIL: ${file}" + fi +done + +if [ "$DIFF_RESULT" -ne 0 ]; then + echo "$DIFF_RESULT files failed: Keeping file(s) for inspection" + exit "$DIFF_RESULT" +else + echo "Upload test Passed." + exit 0 +fi + diff --git a/demo/sftp/std/testing/test_put_files.sh b/demo/sftp/std/testing/test_put_files.sh new file mode 100755 index 00000000..a7074e2d --- /dev/null +++ b/demo/sftp/std/testing/test_put_files.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# Test PUT a small files onto the server +# It tests if the uploaded file is the same as the original one (diff) +# Run it from the project root directory or testing folder + +BASE_DIR=$(pwd) + +if [ -f "Cargo.toml" ]; then + REMOTE_DIR=$BASE_DIR"/demo/sftp/std/testing/out" +elif [[ "$BASE_DIR" == *"/testing"* ]]; then + REMOTE_DIR=$BASE_DIR"/out" +else + echo "Please run this script from the project root or from the testing folder" + exit 1 +fi + +# Cleaning the remote directory +rm -f -r $REMOTE_DIR/* + + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + +# Define test files +FILES=("512B_random" "16kB_random" "64kB_random" "65kB_random" "256kB_random" "1024kB_random" "2048kB_random") + +# Generate random data files +echo "Generating random data files..." +dd if=/dev/random bs=512 count=1 of=$BASE_DIR/512B_random 2>/dev/null +dd if=/dev/random bs=1024 count=16 of=$BASE_DIR/16kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=64 of=$BASE_DIR/64kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=65 of=$BASE_DIR/65kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=256 of=$BASE_DIR/256kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=1024 of=$BASE_DIR/1024kB_random 2>/dev/null +dd if=/dev/random bs=1024 count=2048 of=$BASE_DIR/2048kB_random 2>/dev/null + + +echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." + +# Upload all files +sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${REMOTE_USER}@${REMOTE_HOST} -vvv << EOF +$(printf "put $BASE_DIR/%s\n" "${FILES[@]}") +bye +EOF + +echo "Test Results:" +echo "=============" + +# Test each file +DIFF_RESULT=0 +for file in "${FILES[@]}"; do + if diff "$BASE_DIR/${file}" "$REMOTE_DIR/${file}" >/dev/null 2>&1; then + echo "PASS: ${file}" + rm -f -r "$BASE_DIR"/${file} "$REMOTE_DIR"/${file} + else + ((DIFF_RESULT++)) + echo "FAIL: ${file}" + fi +done + +if [ "$DIFF_RESULT" -ne 0 ]; then + echo "$DIFF_RESULT files failed: Keeping file(s) for inspection" + exit "$DIFF_RESULT" +else + echo "Upload test Passed." + exit 0 +fi + diff --git a/demo/sftp/std/testing/test_read_dir.sh b/demo/sftp/std/testing/test_read_dir.sh deleted file mode 100755 index ec2f18d3..00000000 --- a/demo/sftp/std/testing/test_read_dir.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -# Set remote server details -REMOTE_HOST="192.168.69.2" -REMOTE_USER="any" - -# Define test files -FILES=("A_random" "B_random" "D_random" "E_random" "F_random" "G_random") - -# Generate random data files -echo "Generating random data files..." -dd if=/dev/random bs=512 count=1 of=./512B_random 2>/dev/null - -# Generating copies of the test file -echo "Creating copies for each test file..." -for file in "${FILES[@]}"; do - cp ./512B_random "./${file}" - echo "Created: ${file}" -done -ls - -echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." - -# Upload all files -sftp -vvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} << EOF -$(printf 'put ./%s\n' "${FILES[@]}") -ls -lh -bye -EOF - -echo "Cleaning up local files..." -rm -f -r ./*_random ./out/*_random - diff --git a/demo/sftp/std/testing/test_stats.sh b/demo/sftp/std/testing/test_stats.sh deleted file mode 100755 index a5c2ceb5..00000000 --- a/demo/sftp/std/testing/test_stats.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -echo "Testing Stats..." - -# Set remote server details -REMOTE_HOST="192.168.69.2" -REMOTE_USER="any" - -# Define test files -FILES=("512B_random") - -# Generate random data files -echo "Generating random data files..." -dd if=/dev/random bs=512 count=1 of=./512B_random 2>/dev/null - -echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." - -# Upload all files -sftp -vvv -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} << EOF -$(printf 'put ./%s\n' "${FILES[@]}") -$(printf 'ls -lh ./%s\n' "${FILES[@]}") - -bye -EOF - -echo "Cleaning up local files..." -rm -f -r ./*_random ./out/*_random diff --git a/demo/sftp/std/testing/test_stats_file.sh b/demo/sftp/std/testing/test_stats_file.sh new file mode 100755 index 00000000..6673b019 --- /dev/null +++ b/demo/sftp/std/testing/test_stats_file.sh @@ -0,0 +1,156 @@ +#!/bin/bash +# This test checks the stats of a single file +# Run it from the project root directory or testing folder +# This script requires expect tool + +if ! command -v expect >/dev/null 2>&1; then + echo "Error: 'expect' is not installed or not in PATH." + echo "Please install it and run this test again." + exit 1 +fi + +BASE_DIR=$(pwd) + +if [ -f "Cargo.toml" ]; then + REMOTE_DIR=$BASE_DIR"/demo/sftp/std/testing/out" +elif [[ "$BASE_DIR" == *"/testing"* ]]; then + REMOTE_DIR=$BASE_DIR"/out" +else + echo "Please run this script from the project root or from the testing folder" + exit 1 +fi + +echo "Testing Stats..." + +# Set remote server details +REMOTE_HOST="192.168.69.2" +REMOTE_USER="any" + +# Define test files +FILES=("512B_random") + +# Generate random data files +echo "Generating random data files..." +dd if=/dev/random bs=512 count=1 of=$REMOTE_DIR/512B_random 2>/dev/null + +# # List files +# sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${REMOTE_USER}@${REMOTE_HOST} << EOF +# $(printf 'ls -l %s\n' "${FILES[@]} | awk '{print $1, $9}'") +# bye +# EOF + +FILES_STR="${FILES[*]}" + +export REMOTE_HOST REMOTE_USER FILES_STR +expect << 'EOF' +set timeout 20 + +spawn sftp -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null $env(REMOTE_USER)@$env(REMOTE_HOST) + +# Wait for sftp> prompt +expect { + -re {(?m)^sftp> ?$} {} + -re {(?i)password:} { + puts "ERROR: password prompt received" + exit 1 + } + -re {.+\n} { exp_continue } + timeout { + puts "ERROR: did not receive sftp prompt" + exit 1 + } + eof { + puts "ERROR: sftp terminated before showing prompt" + exit 1 + } +} + +send -- "ls -ln\r" +expect { + -re {(?ms)(.*)\r?\nsftp> ?$} { + set ls_output $expect_out(0,string) + } + timeout { + puts "ERROR: did not receive prompt after ls" + exit 1 + } + eof { + puts "ERROR: sftp terminated after ls" + exit 1 + } +} + +# Normalize CRLF -> LF +regsub -all {\r} $ls_output "" ls_output + +# Hardcoded expected values. If +set expected_name "512B_random" +set expected_perm "-rw-rw-r--" +set expected_uid "1000" +set expected_gid "1000" +set expected_size "512" + +set found 0 +foreach line [split $ls_output "\n"] { + set line [string trim $line] + if {$line eq ""} { continue } + if {[string match "ls -ln*" $line]} { continue } ;# echoed command + if {[string match "sftp>*" $line]} { continue } ;# prompt + if {[string match "total *" $line]} { continue } ;# ls header + + puts "Good candidate: <$line>" + + # Split into non-space fields: + # perms links uid gid size month day time-or-year name + set fields [regexp -all -inline {\S+} $line] + if {[llength $fields] < 9} { + puts "Skip: not enough fields: <$line>" + continue + } + + set perm [lindex $fields 0] + set uid [lindex $fields 2] + set gid [lindex $fields 3] + set size [lindex $fields 4] + set name [lindex $fields end] + + puts "Parsed: perm=$perm uid=$uid gid=$gid size=$size name=$name" + + if {$name ne $expected_name} { + puts "Skip: different filename: <$line>" + continue + } + + set found 1 + + if {$perm ne $expected_perm || $uid ne $expected_uid || $gid ne $expected_gid || $size ne $expected_size} { + puts "ERROR: stat mismatch for $expected_name" + puts " expected: perm=$expected_perm uid=$expected_uid gid=$expected_gid size=$expected_size" + puts " actual: perm=$perm uid=$uid gid=$gid size=$size" + exit 1 + } +} + +if {!$found} { + puts "ERROR: file $expected_name not found in ls output" + exit 1 +} else { + puts "Stats test passed: file $expected_name has expected permissions, ownership and size" + exit 0 +} + +send -- "bye\r" +expect eof +EOF +EXPECT_RESULT=$? + +if [ "$EXPECT_RESULT" -ne 0 ]; then + echo "SFTP stats test failed" + exit 1 +else + echo "SFTP stats test passed" + exit 0 +fi + +echo "Cleaning up local files..." +rm -f -r $REMOTE_DIR/*_random \ No newline at end of file diff --git a/demo/sftp/std/testing/test_write_requests.sh b/demo/sftp/std/testing/test_write_requests.sh deleted file mode 100755 index cabab6b2..00000000 --- a/demo/sftp/std/testing/test_write_requests.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Set remote server details -REMOTE_HOST="192.168.69.2" -REMOTE_USER="any" - -# Define test files -FILES=("512B_random" "16kB_random" "64kB_random" "65kB_random" "256kB_random" "1024kB_random" "2048kB_random") - -# Generate random data files -echo "Generating random data files..." -dd if=/dev/random bs=512 count=1 of=./512B_random 2>/dev/null -dd if=/dev/random bs=1024 count=16 of=./16kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=64 of=./64kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=65 of=./65kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=256 of=./256kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=1024 of=./1024kB_random 2>/dev/null -dd if=/dev/random bs=1024 count=2048 of=./2048kB_random 2>/dev/null - - -echo "Uploading files to ${REMOTE_USER}@${REMOTE_HOST}..." - -# Upload all files -sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=DEBUG ${REMOTE_USER}@${REMOTE_HOST} << EOF -$(printf 'put ./%s\n' "${FILES[@]}") -bye -EOF - -echo "Test Results:" -echo "=============" - -# Test each file -for file in "${FILES[@]}"; do - if diff "./${file}" "./out/${file}" >/dev/null 2>&1; then - echo "PASS: ${file}" - else - echo "FAIL: ${file}" - fi -done - -echo "Cleaning up local files..." -rm -f ./*_random ./out/*_random - -echo "Upload test completed." \ No newline at end of file From 4b487ebb2c5f262d600988a09cce78b4ea7deff8 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 5 Mar 2026 12:29:12 +1100 Subject: [PATCH 07/47] CI updated --- testing/ci.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/testing/ci.sh b/testing/ci.sh index 162df28a..49b56766 100755 --- a/testing/ci.sh +++ b/testing/ci.sh @@ -3,7 +3,7 @@ set -v set -e -export CARGO_TARGET_DIR=target/ci +export CARGO_TARGET_DIR=testing/target # Set OFFLINE=1 to avoid rustup. cargo might still run offline. @@ -74,6 +74,15 @@ cargo build --release --no-default-features --features w5500,romfw ) size target/thumbv6m-none-eabi/release/sunset-demo-picow | tee "$OUT/picow-size.txt" +( +cd demo/sftp/std +cargo build --release +cargo test --release +cargo bloat --release -n 100 | tee "$OUT/sftp-std-bloat.txt" +cargo bloat --release --crates | tee "$OUT/sftp-std-bloat-crates.txt" +) +size ./target/release/sunset-demo-sftp-std | tee "$OUT/sftp-std-size.txt" + ( cd fuzz cargo check --features nofuzz --profile fuzz From 8b8db45fd1a9738f897e32316fbe3c7064acc7d3 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 5 Mar 2026 15:17:33 +1100 Subject: [PATCH 08/47] CI fix: cargo fmt Should have run testing/ci.sh beforehand --- sftp/src/proto.rs | 2 +- sftp/src/sftpsource.rs | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/sftp/src/proto.rs b/sftp/src/proto.rs index 7b1857b8..ee61edb4 100644 --- a/sftp/src/proto.rs +++ b/sftp/src/proto.rs @@ -89,7 +89,7 @@ impl<'a> From<&'a str> for Filename<'a> { impl<'a> Filename<'a> { /// pub fn as_str(&self) -> Result<&'a str, WireError> { - core::str::from_utf8(self.0 .0).map_err(|_| WireError::BadString) + core::str::from_utf8(self.0.0).map_err(|_| WireError::BadString) } } diff --git a/sftp/src/sftpsource.rs b/sftp/src/sftpsource.rs index 837ee0f1..26d28081 100644 --- a/sftp/src/sftpsource.rs +++ b/sftp/src/sftpsource.rs @@ -1,6 +1,6 @@ use crate::proto::{ - SftpNum, SFTP_FIELD_ID_INDEX, SFTP_FIELD_LEN_INDEX, SFTP_FIELD_LEN_LENGTH, - SFTP_FIELD_REQ_ID_INDEX, SFTP_FIELD_REQ_ID_LEN, + SFTP_FIELD_ID_INDEX, SFTP_FIELD_LEN_INDEX, SFTP_FIELD_LEN_LENGTH, + SFTP_FIELD_REQ_ID_INDEX, SFTP_FIELD_REQ_ID_LEN, SftpNum, }; use sunset::sshwire::{SSHSource, WireError, WireResult}; @@ -26,9 +26,7 @@ impl<'de> SSHSource<'de> for SftpSource<'de> { self.index += len; trace!( "slice returned: {:?}. original index {:?}, new index: {:?}", - slice, - original_index, - self.index + slice, original_index, self.index ); Ok(slice) } From 8d90991ed224d646ff854b71c121accf325a8925 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Tue, 10 Mar 2026 15:28:53 +1100 Subject: [PATCH 09/47] Addressing easier some points in the review Thanks for the review, you have risen some good points. I am going to continue addressing your review, for now these are my changes: - removed default = [] as it is unnecessary - warn->debug for From request_packet_type for SftpPacket - requestholder.rs::RequestHolder.valid_request() : explicit None on Err() try_get_ref() - SftpServer.rs::SftpServe.stats()->attrs() and uses replaced - sftpsource.rs::SftpSource.peak_packet_type()->peek_packet_type() - ci.sh undo revert from [342a515](https://github.com/mkj/sunset/commit/342a5156475c9934f7f157382af16eec369c1c47) and now builds `demo/sftp/std` without release or bloat --- sftp/Cargo.toml | 1 - sftp/src/proto.rs | 4 ++-- sftp/src/sftphandler/requestholder.rs | 17 ++++++++++------- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/sftp/Cargo.toml b/sftp/Cargo.toml index 23e4511a..4822179b 100644 --- a/sftp/Cargo.toml +++ b/sftp/Cargo.toml @@ -4,7 +4,6 @@ version = "0.1.2" edition = "2024" [features] -default = [] # long paths support, which allows paths up to 4096 bytes, by default paths are limited to 256 bytes long-paths-4096 = [] long-paths-1024 = [] diff --git a/sftp/src/proto.rs b/sftp/src/proto.rs index ee61edb4..6ea55e0e 100644 --- a/sftp/src/proto.rs +++ b/sftp/src/proto.rs @@ -936,7 +936,7 @@ macro_rules! sftpmessages { /// **Warning**: No Sequence Id can be infered from a Packet Type impl<'a> From<$request_packet_type> for SftpPacket<'a> { fn from(s: $request_packet_type) -> SftpPacket<'a> { - warn!("Casting from {:?} to SftpPacket cannot set Request Id",$request_ssh_fxp_name); + debug!("Casting from {:?} to SftpPacket cannot set Request Id",$request_ssh_fxp_name); SftpPacket::$request_packet_variant(ReqId(0), s) } } @@ -945,7 +945,7 @@ macro_rules! sftpmessages { /// **Warning**: No Sequence Id can be infered from a Packet Type impl<'a> From<$response_packet_type> for SftpPacket<'a> { fn from(s: $response_packet_type) -> SftpPacket<'a> { - warn!("Casting from {:?} to SftpPacket cannot set Request Id",$response_ssh_fxp_name); + debug!("Casting from {:?} to SftpPacket cannot set Request Id",$response_ssh_fxp_name); SftpPacket::$response_packet_variant(ReqId(0), s) } } diff --git a/sftp/src/sftphandler/requestholder.rs b/sftp/src/sftphandler/requestholder.rs index e962118f..e4a80172 100644 --- a/sftp/src/sftphandler/requestholder.rs +++ b/sftp/src/sftphandler/requestholder.rs @@ -205,13 +205,16 @@ impl<'a> RequestHolder<'a> { if !self.busy { return None; } - let mut source = SftpSource::new(self.try_get_ref().unwrap_or(&[0])); - match SftpPacket::decode_request(&mut source) { - Ok(request) => { - return Some(request); - } - Err(..) => return None, - } + let Ok(buffer_ref) = self.try_get_ref() else { + return None; + }; + + let mut source = SftpSource::new(buffer_ref); + + let Ok(request) = SftpPacket::decode_request(&mut source) else { + return None; + }; + return Some(request); } /// Gets a reference to the slice that it is holding From 96617eb78930ed89f48c07efe38c813f8e0dd4bc Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Tue, 24 Mar 2026 15:42:14 +1100 Subject: [PATCH 10/47] Fixing missing points in previous commit These should have been added to the previous commit but I did not track the changes: - ci.sh: Changing back teh target folder - sftpserver.rs: Renaming stats to attrs trait and changing all related implementations and calls - sftpsource.rs: Fixing typo peak-> peek in file and related calls --- demo/sftp/std/src/demosftpserver.rs | 2 +- sftp/src/sftphandler/requestholder.rs | 2 +- sftp/src/sftphandler/sftphandler.rs | 4 ++-- sftp/src/sftpserver.rs | 4 ++-- sftp/src/sftpsource.rs | 6 +++--- testing/ci.sh | 8 +++----- 6 files changed, 12 insertions(+), 14 deletions(-) diff --git a/demo/sftp/std/src/demosftpserver.rs b/demo/sftp/std/src/demosftpserver.rs index 2a703bce..7bc7dbce 100644 --- a/demo/sftp/std/src/demosftpserver.rs +++ b/demo/sftp/std/src/demosftpserver.rs @@ -415,7 +415,7 @@ impl SftpServer<'_, OFH> for DemoSftpServer { } } - async fn stats( + async fn attrs( &mut self, follow_links: bool, file_path: &str, diff --git a/sftp/src/sftphandler/requestholder.rs b/sftp/src/sftphandler/requestholder.rs index e4a80172..fe4ac47f 100644 --- a/sftp/src/sftphandler/requestholder.rs +++ b/sftp/src/sftphandler/requestholder.rs @@ -169,7 +169,7 @@ impl<'a> RequestHolder<'a> { self.try_append_slice(&[slice[0]])?; slice = &slice[1..]; let mut source = SftpSource::new(self.try_get_ref()?); - if let Ok(pt) = source.peak_packet_type() { + if let Ok(pt) = source.peek_packet_type() { if !pt.is_request() { error!("The request candidate is not a request: {pt:?}"); return Err(RequestHolderError::NotRequest); diff --git a/sftp/src/sftphandler/sftphandler.rs b/sftp/src/sftphandler/sftphandler.rs index 50500a50..339e61a2 100644 --- a/sftp/src/sftphandler/sftphandler.rs +++ b/sftp/src/sftphandler/sftphandler.rs @@ -483,7 +483,7 @@ where SftpPacket::LStat(req_id, LStat { file_path: path }) => { match self .file_server - .stats(false, path.as_str()?) + .attrs(false, path.as_str()?) .await { Ok(attrs) => { @@ -517,7 +517,7 @@ where SftpPacket::Stat(req_id, Stat { file_path: path }) => { match self .file_server - .stats(true, path.as_str()?) + .attrs(true, path.as_str()?) .await { Ok(attrs) => { diff --git a/sftp/src/sftpserver.rs b/sftp/src/sftpserver.rs index 35019f55..5942fa80 100644 --- a/sftp/src/sftpserver.rs +++ b/sftp/src/sftpserver.rs @@ -193,8 +193,8 @@ where } } - /// Provides the stats of the given file path - fn stats( + /// Provides the attributes of the given file path + fn attrs( &mut self, follow_links: bool, file_path: &str, diff --git a/sftp/src/sftpsource.rs b/sftp/src/sftpsource.rs index 26d28081..69c9699e 100644 --- a/sftp/src/sftpsource.rs +++ b/sftp/src/sftpsource.rs @@ -46,7 +46,7 @@ impl<'de> SftpSource<'de> { debug!("New source with content: : {:?}", buffer); SftpSource { buffer: buffer, index: 0 } } - /// Peaks the buffer for packet type [`SftpNum`]. This does not advance + /// Peeks the buffer for packet type [`SftpNum`]. This does not advance /// the reading index /// /// Useful to observe the packet fields in special conditions where a @@ -54,7 +54,7 @@ impl<'de> SftpSource<'de> { /// /// **Warning**: will only work in well formed packets, in other case /// the result will contains garbage - pub(crate) fn peak_packet_type(&self) -> WireResult { + pub(crate) fn peek_packet_type(&self) -> WireResult { if self.buffer.len() <= SFTP_FIELD_ID_INDEX { debug!( "Peak packet type failed: buffer len <= SFTP_FIELD_ID_INDEX ( {:?} <= {:?})", @@ -191,7 +191,7 @@ mod local_tests { fn peaking_type() { let buffer_status = status_buffer(); let source = SftpSource::new(&buffer_status); - let read_packet_type = source.peak_packet_type().unwrap(); + let read_packet_type = source.peek_packet_type().unwrap(); let original_packet_type = SftpNum::from(101u8); assert_eq!(original_packet_type, read_packet_type); } diff --git a/testing/ci.sh b/testing/ci.sh index 49b56766..0c17df96 100755 --- a/testing/ci.sh +++ b/testing/ci.sh @@ -3,7 +3,7 @@ set -v set -e -export CARGO_TARGET_DIR=testing/target +export CARGO_TARGET_DIR=target/ci # Set OFFLINE=1 to avoid rustup. cargo might still run offline. @@ -76,10 +76,8 @@ size target/thumbv6m-none-eabi/release/sunset-demo-picow | tee "$OUT/picow-size. ( cd demo/sftp/std -cargo build --release -cargo test --release -cargo bloat --release -n 100 | tee "$OUT/sftp-std-bloat.txt" -cargo bloat --release --crates | tee "$OUT/sftp-std-bloat-crates.txt" +cargo build +cargo test ) size ./target/release/sunset-demo-sftp-std | tee "$OUT/sftp-std-size.txt" From 0dd3c5160702810f5c76437eea841a98182bbe68 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Tue, 24 Mar 2026 15:44:08 +1100 Subject: [PATCH 11/47] removing size from demo/sftp/std build outputs --- testing/ci.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/testing/ci.sh b/testing/ci.sh index 0c17df96..4b01fe79 100755 --- a/testing/ci.sh +++ b/testing/ci.sh @@ -79,7 +79,6 @@ cd demo/sftp/std cargo build cargo test ) -size ./target/release/sunset-demo-sftp-std | tee "$OUT/sftp-std-size.txt" ( cd fuzz From 410fb76a7b11f82e2085c2ff1e196ce5903130c6 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Tue, 24 Mar 2026 16:00:11 +1100 Subject: [PATCH 12/47] Fixing unnecessary duplicated lifetimes and tidying up - [x] All tests in testing passing - [ ] Deleting forgotten 512B_random file from the testing output directory --- demo/sftp/std/testing/out/512B_random | Bin 512 -> 0 bytes sftp/src/proto.rs | 5 ++--- 2 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 demo/sftp/std/testing/out/512B_random diff --git a/demo/sftp/std/testing/out/512B_random b/demo/sftp/std/testing/out/512B_random deleted file mode 100644 index 6c6c281ce6b40a2064ff021dd28c607e97e0a3af..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 512 zcmV+b0{{K1=A$-%XY#g1if0qpp9VZjuCo`ba{h>q^`+l$rxxu_`GM^@0cjdkS!_N) z$&y1YSospsa4&ONl`hUjB;qBg@>>t!+wu!8JVl%EXa>FSOjE}>*qlU$q7V?8wnh-Y zz#Vbb)f{({xUz<#+H4Jh*GpkX<-n~>7Cs<4x#u)=?5l7%Bek)~cZb_z4>R^1u5cst zWrJd6W$bYV{1u#;1u>}{qNmuR_oxX~7nZ1p`>rAnDLo&0j^r^79Y`ef+*$vPs*Hqa zAwVx3g=<2|76DA9cJ2K^>*bQ>$?k%jA<6AJ&W!yjIVMEyUAVe3B>oKaX60Fw!-?@L z*$0sv`B9oiq(enA@{v+3ZjtLj%_DQUrv+^|B4XEgoxlREZdTGKZrU~JFjndp3>)g2 zIxUN*odzkuuBcetwNjI!GT>zyz`X2uJu3Jh%j-ES7ew-3X>&Eq8xDh7k1^YDm^;Xn z83@*j2czi_&SAckS8&nff-ppyDYI`|j!aAdUPt#P&B9q4OYHw&)*;#_$x+Yv+Vcsp zIH1TeYFT4g!HPTRSC((VrtUr$Fv@(Ve32nzp|~r*%vsfJd)`VkWd@UJNoh|$D2b-T zS}VBr*zFB)!F9A1!i7?FN=cHB3JUitW*cMuK#6qL`}Iw5w@N%V C-1#5? diff --git a/sftp/src/proto.rs b/sftp/src/proto.rs index 6ea55e0e..404b179f 100644 --- a/sftp/src/proto.rs +++ b/sftp/src/proto.rs @@ -733,11 +733,10 @@ macro_rules! sftpmessages { paste!{ - impl<'a: 'de, 'de> SSHDecode<'de> for SftpPacket<'a> - where 'de: 'a // This implies that both lifetimes are equal + impl<'a> SSHDecode<'a> for SftpPacket<'a> { fn dec(s: &mut S) -> WireResult - where S: SSHSource<'de> { + where S: SSHSource<'a> { let packet_type_number = u8::dec(s)?; let packet_type = SftpNum::from(packet_type_number); From 43f7cdf69d42478c2daf56b049ff321190367758 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 26 Mar 2026 09:29:35 +1100 Subject: [PATCH 13/47] Reverting changes to sshwire-derive/src/lib.rs Addressing needed changes in proto.rs. I looked at the code generated by the macro before reverting lib.rs (cargo-expand expand) and applied equivalent code. --- sftp/src/proto.rs | 37 ++++++++++++++++++++++++++++++------- sshwire-derive/src/lib.rs | 7 ++++++- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/sftp/src/proto.rs b/sftp/src/proto.rs index 404b179f..e53eb0af 100644 --- a/sftp/src/proto.rs +++ b/sftp/src/proto.rs @@ -1,8 +1,8 @@ use crate::sftpsource::SftpSource; use sunset::sshwire::{ - BinString, SSHDecode, SSHEncode, SSHSink, SSHSource, TextString, WireError, - WireResult, + BinString, SSHDecode, SSHEncode, SSHEncodeEnum, SSHSink, SSHSource, TextString, + WireError, WireResult, }; use sunset_sshwire_derive::{SSHDecode, SSHEncode}; @@ -604,31 +604,54 @@ macro_rules! sftpmessages { ) => { paste! { /// Represent a subset of the SFTP packet types defined by draft-ietf-secsh-filexfer-02 - #[derive(Debug, Copy, Clone, PartialEq, Eq, FromPrimitive, SSHEncode)] + #[derive(Debug, Copy, Clone, PartialEq, Eq, FromPrimitive)] #[repr(u8)] #[allow(non_camel_case_types)] pub enum SftpNum { $( - #[sshwire(variant = $init_ssh_fxp_name)] [<$init_ssh_fxp_name:upper>] = $init_message_num, )* $( - #[sshwire(variant = $request_ssh_fxp_name)] [<$request_ssh_fxp_name:upper>] = $request_message_num, )* $( - #[sshwire(variant = $response_ssh_fxp_name)] [<$response_ssh_fxp_name:upper>] = $response_message_num, )* - #[sshwire(unknown)] #[num_enum(catch_all)] Other(u8), } + impl SSHEncode for SftpNum { + fn enc(&self, _: &mut dyn SSHSink) -> WireResult<()> { + Ok(()) + } + } + + impl SSHEncodeEnum for SftpNum { + fn variant_name(&self) -> WireResult<&'static str> { + let r = match self { + $( + Self::[<$init_ssh_fxp_name:upper>] => $init_ssh_fxp_name, + )* + $( + Self::[<$request_ssh_fxp_name:upper>] => $request_ssh_fxp_name, + )* + $( + Self::[<$response_ssh_fxp_name:upper>] => $response_ssh_fxp_name, + )* + Self::Other(_) => { + return Err(WireError::UnknownVariant); + } + }; + #[allow(unreachable_code)] Ok(r) + } + } + } // paste + impl<'de> SSHDecode<'de> for SftpNum { fn dec(s: &mut S) -> WireResult where diff --git a/sshwire-derive/src/lib.rs b/sshwire-derive/src/lib.rs index 462ca224..fed01188 100644 --- a/sshwire-derive/src/lib.rs +++ b/sshwire-derive/src/lib.rs @@ -283,6 +283,11 @@ fn encode_enum( let atts = take_field_atts(&var.attributes)?; let mut rhs = StreamBuilder::new(); + if let Some(val) = &var.value { + // Avoid users expecting enum values to be encoded. + // Could be implemented if needed. + return Err(Error::Custom { error: "sunset_sshwire_derive::SSHEncode currently does not encode enum discriminants.".into(), span: Some(val.span())}) + } match var.fields { None => { // Unit enum @@ -300,7 +305,7 @@ fn encode_enum( } } - _ => return Err(Error::Custom { error: "SSHEncode currently only implements Unit or single value enum variants.".into(), span: None}) + _ => return Err(Error::Custom { error: "sunset_sshwire_derive::SSHEncode currently only implements Unit or single value enum variants.".into(), span: None}) } match_arm.puncts("=>"); From 2744e48758aae9d0ec35fc0d5bd34f2f4137a24a Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Fri, 27 Mar 2026 10:00:55 +1100 Subject: [PATCH 14/47] Removing new(&str) from `OpaqueFileHandle` As pointed out by @mkj, the new(& str) method in `OpaqueFileHandle` is brittle. I added it mindlessly by the DemoFileHandleManager implementation. Now I replaced the `new(&str)`by adding the condition to whoever decides to use `FileHandleManager` trait to implement `InitWithSeed` + `OpaqueFileHandle` for the key. --- demo/sftp/std/src/demofilehandlemanager.rs | 19 +++++++++------- demo/sftp/std/src/demoopaquefilehandle.rs | 22 ++++++++++++------ demo/sftp/std/src/demosftpserver.rs | 13 +++++++---- sftp/src/lib.rs | 1 + sftp/src/opaquefilehandle.rs | 26 ++++++++++++---------- 5 files changed, 50 insertions(+), 31 deletions(-) diff --git a/demo/sftp/std/src/demofilehandlemanager.rs b/demo/sftp/std/src/demofilehandlemanager.rs index 6b1cb278..b1c649ba 100644 --- a/demo/sftp/std/src/demofilehandlemanager.rs +++ b/demo/sftp/std/src/demofilehandlemanager.rs @@ -1,11 +1,13 @@ -use sunset_sftp::handles::{OpaqueFileHandle, OpaqueFileHandleManager, PathFinder}; +use sunset_sftp::handles::{ + InitWithSeed, OpaqueFileHandle, OpaqueFileHandleManager, PathFinder, +}; use sunset_sftp::protocol::StatusCode; use std::collections::HashMap; // Not enforced. Only for std. For no_std environments other solutions can be used to store Key, Value pub struct DemoFileHandleManager where - K: OpaqueFileHandle, + K: OpaqueFileHandle + InitWithSeed, V: PathFinder, { handle_map: HashMap, @@ -13,7 +15,7 @@ where impl DemoFileHandleManager where - K: OpaqueFileHandle, + K: OpaqueFileHandle + InitWithSeed, V: PathFinder, { pub fn new() -> Self { @@ -23,12 +25,12 @@ where impl OpaqueFileHandleManager for DemoFileHandleManager where - K: OpaqueFileHandle, + K: OpaqueFileHandle + InitWithSeed, V: PathFinder, { - type Error = StatusCode; + type Err = StatusCode; - fn insert(&mut self, private_handle: V, salt: &str) -> Result { + fn insert(&mut self, private_handle: V, salt: &str) -> Result { if self .handle_map .iter() @@ -37,9 +39,10 @@ where return Err(StatusCode::SSH_FX_PERMISSION_DENIED); } - let handle = K::new( + let handle = K::init_with_seed( format!("{:}-{:}", &private_handle.get_path_ref(), salt).as_str(), - ); + ) + .map_err(|_| StatusCode::SSH_FX_FAILURE)?; self.handle_map.insert(handle.clone(), private_handle); Ok(handle) diff --git a/demo/sftp/std/src/demoopaquefilehandle.rs b/demo/sftp/std/src/demoopaquefilehandle.rs index 67c2fc6b..e5bad21b 100644 --- a/demo/sftp/std/src/demoopaquefilehandle.rs +++ b/demo/sftp/std/src/demoopaquefilehandle.rs @@ -1,4 +1,4 @@ -use sunset_sftp::handles::OpaqueFileHandle; +use sunset_sftp::handles::{InitWithSeed, OpaqueFileHandle}; use sunset_sftp::protocol::FileHandle; use sunset::sshwire::{BinString, WireError}; @@ -14,12 +14,6 @@ pub(crate) struct DemoOpaqueFileHandle { } impl OpaqueFileHandle for DemoOpaqueFileHandle { - fn new(seed: &str) -> Self { - let mut hasher = FnvHasher::default(); - hasher.write(seed.as_bytes()); - DemoOpaqueFileHandle { tiny_hash: (hasher.finish() as u32).to_be_bytes() } - } - fn try_from(file_handle: &FileHandle<'_>) -> sunset::sshwire::WireResult { if !file_handle.0 .0.len().eq(&core::mem::size_of::()) { @@ -35,3 +29,17 @@ impl OpaqueFileHandle for DemoOpaqueFileHandle { FileHandle(BinString(&self.tiny_hash)) } } + +/// Implemented to allow the use of `DemoOpaqueFileHandle` as a key in the `OpaqueHandleManager` +impl InitWithSeed for DemoOpaqueFileHandle { + type Err = WireError; + + fn init_with_seed(seed: &str) -> Result { + let mut hasher = FnvHasher::default(); + hasher.write(seed.as_bytes()); + + Ok(DemoOpaqueFileHandle { + tiny_hash: (hasher.finish() as u32).to_be_bytes(), + }) + } +} diff --git a/demo/sftp/std/src/demosftpserver.rs b/demo/sftp/std/src/demosftpserver.rs index 7bc7dbce..60cad253 100644 --- a/demo/sftp/std/src/demosftpserver.rs +++ b/demo/sftp/std/src/demosftpserver.rs @@ -1,7 +1,9 @@ use crate::demofilehandlemanager::DemoFileHandleManager; use sunset_sftp::error::SftpResult; -use sunset_sftp::handles::{OpaqueFileHandle, OpaqueFileHandleManager, PathFinder}; +use sunset_sftp::handles::{ + InitWithSeed, OpaqueFileHandle, OpaqueFileHandleManager, PathFinder, +}; use sunset_sftp::protocol::{Attrs, Filename, NameEntry, PFlags, StatusCode}; use sunset_sftp::server::helpers::DirEntriesCollection; use sunset_sftp::server::{ @@ -10,6 +12,7 @@ use sunset_sftp::server::{ #[allow(unused_imports)] use log::{debug, error, info, log, trace, warn}; + use std::fs; use std::os::unix::fs::PermissionsExt; use std::{fs::File, os::unix::fs::FileExt, path::Path}; @@ -92,12 +95,12 @@ impl PathFinder for PrivateDirHandle { } /// A basic demo server. Used as a demo and to test SFTP functionality -pub struct DemoSftpServer { +pub struct DemoSftpServer { base_path: String, handles_manager: DemoFileHandleManager, } -impl DemoSftpServer { +impl DemoSftpServer { pub fn new(base_path: String) -> Self { if !Path::new(&base_path).exists() { debug!("Base path {:?} does not exist. Creating it", base_path); @@ -112,7 +115,9 @@ impl DemoSftpServer { } } -impl SftpServer<'_, OFH> for DemoSftpServer { +impl SftpServer<'_, OFH> + for DemoSftpServer +{ async fn open(&mut self, filename: &str, mode: &PFlags) -> SftpOpResult { debug!("Open file: filename = {:?}, mode = {:?}", filename, mode); diff --git a/sftp/src/lib.rs b/sftp/src/lib.rs index f6cd7a3d..e7158172 100644 --- a/sftp/src/lib.rs +++ b/sftp/src/lib.rs @@ -100,6 +100,7 @@ pub mod server { /// Handles and helpers used by the [`sftpserver::SftpServer`] trait implementer pub mod handles { + pub use crate::opaquefilehandle::InitWithSeed; pub use crate::opaquefilehandle::OpaqueFileHandle; pub use crate::opaquefilehandle::OpaqueFileHandleManager; pub use crate::opaquefilehandle::PathFinder; diff --git a/sftp/src/opaquefilehandle.rs b/sftp/src/opaquefilehandle.rs index 19450ef1..cde4aa7d 100644 --- a/sftp/src/opaquefilehandle.rs +++ b/sftp/src/opaquefilehandle.rs @@ -7,10 +7,6 @@ use sunset::sshwire::WireResult; pub trait OpaqueFileHandle: Sized + Clone + core::hash::Hash + PartialEq + Eq + core::fmt::Debug { - /// Creates a new instance using a given string slice as `seed` which - /// content should not clearly related to the seed - fn new(seed: &str) -> Self; - /// Creates a new `OpaqueFileHandleTrait` copying the content of the `FileHandle` fn try_from(file_handle: &FileHandle<'_>) -> WireResult; @@ -29,7 +25,17 @@ pub trait PathFinder { fn get_path_ref(&self) -> &str; } -/// This trait is used to manage the OpaqueFile +/// Used in the `OpaqueFileHandleManager` to generate a Key (OpaqueFileHandle) from a seed +pub trait InitWithSeed: Sized { + /// The error type used for the implementation of `init_with_seed` useful to harmonize the error handling of the `OpaqueFileHandleManager` implementation + type Err; + + /// Creates a new instance using a given string slice as `seed` which + /// content should not clearly related to the seed + fn init_with_seed(s: &str) -> Result; +} + +/// This trait is used to manage the OpaqueFileHandles (K) together with the private handle (V) that contains the details of the file internally stored in the system /// /// The SFTP module user is not required to use it but instead is a suggestion for an exchangeable /// trait that facilitates structuring the store and retrieve of 'OpaqueFileHandleTrait' (K), @@ -39,22 +45,18 @@ pub trait PathFinder { /// to look for the file path. pub trait OpaqueFileHandleManager where - K: OpaqueFileHandle, + K: OpaqueFileHandle + InitWithSeed, V: PathFinder, { /// The error used for all the trait members returning an error - type Error; - - // Excluded since it is too restrictive - // /// Performs any HandleManager Initialization - // fn new() -> Self; + type Err; /// Given the private_handle, stores it and return an opaque file handle /// /// Returns an error if the private_handle has a matching path as obtained from `PathFinder` /// /// Salt has been added to allow the user to add a factor that will mask how the opaque handle is generated - fn insert(&mut self, private_handle: V, salt: &str) -> Result; + fn insert(&mut self, private_handle: V, salt: &str) -> Result; /// fn remove(&mut self, opaque_handle: &K) -> Option; From 68036862ebffe4dff5ce4cf38d16a08d2336202b Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Sat, 4 Apr 2026 12:34:16 +1100 Subject: [PATCH 15/47] RUSTSEC-2024-0436: Fixing Paste to version 1.0.25 --- sftp/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sftp/Cargo.toml b/sftp/Cargo.toml index 4822179b..a829b6e0 100644 --- a/sftp/Cargo.toml +++ b/sftp/Cargo.toml @@ -19,7 +19,7 @@ sunset-sshwire-derive = { path = "../sshwire-derive" } embedded-io-async = "0.6" num_enum = { version = "0.7.4", default-features = false } -paste = "1.0" +paste = "1.25.0" # Paste is no longer maintained (RUSTSEC-2024-0436). Fixing the used version to latest one. Not making the move to Pastey at the moment log = "0.4" embassy-sync = "0.7.2" embassy-futures = "0.1.2" From 3c90e35ecc1873ddfb675ca6e4a6af24b968c858 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Sat, 4 Apr 2026 12:38:27 +1100 Subject: [PATCH 16/47] Fixing typo in previous commit --- sftp/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sftp/Cargo.toml b/sftp/Cargo.toml index a829b6e0..dc9c66d6 100644 --- a/sftp/Cargo.toml +++ b/sftp/Cargo.toml @@ -19,7 +19,7 @@ sunset-sshwire-derive = { path = "../sshwire-derive" } embedded-io-async = "0.6" num_enum = { version = "0.7.4", default-features = false } -paste = "1.25.0" # Paste is no longer maintained (RUSTSEC-2024-0436). Fixing the used version to latest one. Not making the move to Pastey at the moment +paste = "1.0.25" # Paste is no longer maintained (RUSTSEC-2024-0436). Fixing the used version to latest one. Not making the move to Pastey at the moment log = "0.4" embassy-sync = "0.7.2" embassy-futures = "0.1.2" From 2cfa4d225dca91edf0d2cdc6b43dbf2de7e8faf3 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Sat, 4 Apr 2026 12:46:14 +1100 Subject: [PATCH 17/47] Extra typo. Running CI now to make sure that all is good Sorry about the noise --- Cargo.lock | 4 ++-- sftp/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 534121f7..6629141a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1925,9 +1925,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pem-rfc7468" diff --git a/sftp/Cargo.toml b/sftp/Cargo.toml index dc9c66d6..fe4e834e 100644 --- a/sftp/Cargo.toml +++ b/sftp/Cargo.toml @@ -19,7 +19,7 @@ sunset-sshwire-derive = { path = "../sshwire-derive" } embedded-io-async = "0.6" num_enum = { version = "0.7.4", default-features = false } -paste = "1.0.25" # Paste is no longer maintained (RUSTSEC-2024-0436). Fixing the used version to latest one. Not making the move to Pastey at the moment +paste = "1.0.15" # Paste is no longer maintained (RUSTSEC-2024-0436). Fixing the used version to latest one. Not making the move to Pastey at the moment log = "0.4" embassy-sync = "0.7.2" embassy-futures = "0.1.2" From 8206f86a0a56c6d370f98570b4cae66ab427248a Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Mon, 13 Apr 2026 00:19:25 +0800 Subject: [PATCH 18/47] sftp: simplify some lifetimes --- demo/sftp/std/src/demosftpserver.rs | 2 +- sftp/src/proto.rs | 18 +++--------------- sftp/src/sftphandler/sftphandler.rs | 6 +++--- sftp/src/sftpserver.rs | 4 ++-- 4 files changed, 9 insertions(+), 21 deletions(-) diff --git a/demo/sftp/std/src/demosftpserver.rs b/demo/sftp/std/src/demosftpserver.rs index 60cad253..24984dad 100644 --- a/demo/sftp/std/src/demosftpserver.rs +++ b/demo/sftp/std/src/demosftpserver.rs @@ -115,7 +115,7 @@ impl DemoSftpServer { } } -impl SftpServer<'_, OFH> +impl SftpServer for DemoSftpServer { async fn open(&mut self, filename: &str, mode: &PFlags) -> SftpOpResult { diff --git a/sftp/src/proto.rs b/sftp/src/proto.rs index e53eb0af..885f5668 100644 --- a/sftp/src/proto.rs +++ b/sftp/src/proto.rs @@ -850,11 +850,7 @@ macro_rules! sftpmessages { /// Decode a response. /// /// Used by a SFTP client. Does not include the length field. - pub fn decode_response<'de>(s: &mut SftpSource<'de>) -> WireResult - where - // S: SftpSource<'de>, - 'a: 'de, // 'a must outlive 'de and 'de must outlive 'a so they have matching lifetimes - 'de: 'a + pub fn decode_response(s: &mut SftpSource<'a>) -> WireResult { let packet_length = u32::dec(s)?; trace!("Packet field len = {:?}, buffer len = {:?}", packet_length, s.remaining()); @@ -880,11 +876,7 @@ macro_rules! sftpmessages { /// Used by a SFTP server. Does not include the length field. /// /// It will fail if the received packet is a response, no valid or incomplete packet - pub fn decode_request<'de>(s: &mut SftpSource<'de>) -> WireResult - where - // S: SftpSource<'de>, - 'a: 'de, // 'a must outlive 'de and 'de must outlive 'a so they have matching lifetimes - 'de: 'a + pub fn decode_request(s: &mut SftpSource<'a>) -> WireResult { let packet_length = u32::dec(s)?; trace!("Packet field len = {:?}, buffer len = {:?}", packet_length, s.remaining()); @@ -915,11 +907,7 @@ macro_rules! sftpmessages { /// Used by a SFTP server. Does not include the length field. /// /// It will fail if the received packet is a response, no valid or incomplete packet - pub fn decode<'de>(s: &mut SftpSource<'de>) -> WireResult - where - // S: SftpSource<'de>, - 'a: 'de, // 'a must outlive 'de and 'de must outlive 'a so they have matching lifetimes - 'de: 'a + pub fn decode(s: &mut SftpSource<'a>) -> WireResult { let packet_length = u32::dec(s)?; trace!("Packet field len = {:?}, buffer remaining = {:?}", packet_length, s.remaining()); diff --git a/sftp/src/sftphandler/sftphandler.rs b/sftp/src/sftphandler/sftphandler.rs index 339e61a2..2e0ec68a 100644 --- a/sftp/src/sftphandler/sftphandler.rs +++ b/sftp/src/sftphandler/sftphandler.rs @@ -70,7 +70,7 @@ enum HandlerState { pub struct SftpHandler<'a, T, S, const BUFFER_OUT_SIZE: usize> where T: OpaqueFileHandle, - S: SftpServer<'a, T>, + S: SftpServer, { /// Holds the internal state if the SFTP handle state: HandlerState, @@ -92,7 +92,7 @@ where impl<'a, T, S, const BUFFER_OUT_SIZE: usize> SftpHandler<'a, T, S, BUFFER_OUT_SIZE> where T: OpaqueFileHandle, - S: SftpServer<'a, T>, + S: SftpServer, { /// Creates a new instance of the structure. /// @@ -118,7 +118,7 @@ where /// an EOF is received pub async fn process_loop( &mut self, - stdio: ChanInOut<'a>, + stdio: ChanInOut<'_>, buffer_in: &mut [u8], ) -> SftpResult<()> { let (mut chan_in, chan_out) = stdio.split(); diff --git a/sftp/src/sftpserver.rs b/sftp/src/sftpserver.rs index 5942fa80..d00596a0 100644 --- a/sftp/src/sftpserver.rs +++ b/sftp/src/sftpserver.rs @@ -45,13 +45,13 @@ pub enum ReadStatus { /// Some less core operations have a Provided implementation returning /// returns `SSH_FX_OP_UNSUPPORTED`. Common operations must be implemented, /// but may return `Err(StatusCode::SSH_FX_OP_UNSUPPORTED)`. -pub trait SftpServer<'a, T> +pub trait SftpServer where T: OpaqueFileHandle, { /// Opens a file for reading/writing fn open( - &'_ mut self, + &mut self, path: &str, mode: &PFlags, ) -> impl core::future::Future> { From bca856976d4f26ae4ff64761472cd148b9254930 Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Mon, 13 Apr 2026 00:37:35 +0800 Subject: [PATCH 19/47] Fix formatting for "sftp: simplify lifetimes" --- demo/sftp/std/src/demosftpserver.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/demo/sftp/std/src/demosftpserver.rs b/demo/sftp/std/src/demosftpserver.rs index 24984dad..d5fa1cf4 100644 --- a/demo/sftp/std/src/demosftpserver.rs +++ b/demo/sftp/std/src/demosftpserver.rs @@ -115,9 +115,7 @@ impl DemoSftpServer { } } -impl SftpServer - for DemoSftpServer -{ +impl SftpServer for DemoSftpServer { async fn open(&mut self, filename: &str, mode: &PFlags) -> SftpOpResult { debug!("Open file: filename = {:?}, mode = {:?}", filename, mode); From b303a67e706a1403d38d9c2ae383d80cfc06eadf Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Tue, 17 Mar 2026 18:14:59 +0800 Subject: [PATCH 20/47] Fix read refcount for ChanIn and ChanInOut clone() The read refcount wasn't incremented on ChanIn or ChanInOut clone. This could result in input being discarded if the read refcount hit zero. It isn't clear whether this could result in missed EOF (which has been reported). --- async/src/async_channel.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/async/src/async_channel.rs b/async/src/async_channel.rs index 6306d599..4db80865 100644 --- a/async/src/async_channel.rs +++ b/async/src/async_channel.rs @@ -113,7 +113,7 @@ impl Write for ChanIO<'_> { /// Otherwise ordering will be arbitrary, and if competing readers or writers /// are in different tasks, there will be churn as they continually wake /// each other up. Simultaneous single-reader and single-writer is fine. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct ChanIn<'g>(ChanIO<'g>); impl<'g> ChanIn<'g> { @@ -134,6 +134,12 @@ impl Drop for ChanIn<'_> { } } +impl Clone for ChanIn<'_> { + fn clone(&self) -> Self { + Self::new(self.0.clone()) + } +} + /// An output-only SSH channel. /// /// This is used as stderr for a server, or can also be obtained using @@ -187,7 +193,7 @@ impl<'g> ChanOut<'g> { /// Otherwise ordering will be arbitrary, and if competing readers or writers /// are in different tasks, there will be churn as they continually wake /// each other up. Simultaneous single-reader and single-writer is fine. -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct ChanInOut<'g>(ChanIO<'g>); impl<'g> ChanInOut<'g> { @@ -225,6 +231,12 @@ impl Drop for ChanInOut<'_> { } } +impl Clone for ChanInOut<'_> { + fn clone(&self) -> Self { + Self::new(self.0.clone()) + } +} + impl ErrorType for ChanInOut<'_> { type Error = sunset::Error; } From 8d9370166b5ecb9c414f828f5bd2846896b4fec8 Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Wed, 1 Apr 2026 21:51:25 +0800 Subject: [PATCH 21/47] Rust 1.88 min version This is required by time 0.3.47 --- .github/workflows/ci.yml | 4 ++-- Cargo.toml | 2 +- README.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b5be8aeb..30da13c3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,8 +13,8 @@ jobs: all: strategy: matrix: - # 1.87 is an arbitrary minimum, tested to notice when it bumps - rust_version: [stable, nightly, 1.87] + # 1.88 is an arbitrary minimum, tested to notice when it bumps + rust_version: [stable, nightly, 1.88] runs-on: ubuntu-latest env: RUSTUP_TOOLCHAIN: ${{ matrix.rust_version }} diff --git a/Cargo.toml b/Cargo.toml index 391503f3..fefaa7f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/mkj/sunset" categories = ["network-programming", "embedded", "no-std"] license = "0BSD" keywords = ["ssh"] -rust-version = "1.87" +rust-version = "1.88" [workspace] members = [ diff --git a/README.md b/README.md index 9549ac76..2b8fd274 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ Desirable: ## Rust versions -At the time of writing Sunset will build with Rust 1.87. +At the time of writing Sunset will build with Rust 1.88. The requirement may increase whenever useful, targetting stable. ## Checks From 0f81359971b8396bd71b7602b0b430a36c949f4c Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Wed, 1 Apr 2026 21:44:41 +0800 Subject: [PATCH 22/47] Update some outdated dependencies These were reported by cargo audit keccak bytes time instant --- Cargo.lock | 94 ++++++++++++++++++++++++++++++------------------------ 1 file changed, 52 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6629141a..824395de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,7 +146,7 @@ dependencies = [ "argh_shared", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -360,9 +360,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "caprand" @@ -564,7 +564,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -618,7 +618,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -629,7 +629,7 @@ checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" dependencies = [ "darling_core", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -650,9 +650,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", ] @@ -665,7 +665,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -790,7 +790,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -1280,7 +1280,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -1481,9 +1481,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", ] @@ -1532,9 +1532,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" dependencies = [ "cpufeatures", ] @@ -1746,9 +1746,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967" [[package]] name = "num-integer" @@ -1819,7 +1819,7 @@ checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -2020,7 +2020,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -2132,14 +2132,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] name = "proc-macro2" -version = "1.0.76" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fc56cda0b5c3325f5fbbd7ff9fda9e02bb00bb3dac51252d2f1bfa1cb8cc8c" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -2424,22 +2424,32 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.195" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -2570,7 +2580,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -2941,9 +2951,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.58" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -2971,9 +2981,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", @@ -2981,22 +2991,22 @@ dependencies = [ "num-conv", "num_threads", "powerfmt", - "serde", + "serde_core", "time-core", "time-macros", ] [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", @@ -3027,7 +3037,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -3205,7 +3215,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", "wasm-bindgen-shared", ] @@ -3227,7 +3237,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -3564,7 +3574,7 @@ checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] [[package]] @@ -3584,5 +3594,5 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.117", ] From c6f6736d7812b94a2d71caac20711f32134d1f82 Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Mon, 13 Apr 2026 00:31:45 +0800 Subject: [PATCH 23/47] Fix some clippy warnings --- async/src/async_sunset.rs | 4 ++-- src/kex.rs | 2 +- stdasync/src/knownhosts.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/async/src/async_sunset.rs b/async/src/async_sunset.rs index 9e7bd6a4..3758ba76 100644 --- a/async/src/async_sunset.rs +++ b/async/src/async_sunset.rs @@ -661,8 +661,8 @@ where return sunset::error::ChannelEOF.fail(); } let b = &b[..n]; - if let Err(_) = w.write_all(b).await { - info!("write error"); + if let Err(e) = w.write_all(b).await { + info!("write error {e:?}"); } } #[allow(unreachable_code)] diff --git a/src/kex.rs b/src/kex.rs index 65b9149b..dde7e287 100644 --- a/src/kex.rs +++ b/src/kex.rs @@ -996,7 +996,7 @@ impl KexMlkemX25519 { let enc = ek .encapsulate(&mut rand_core::OsRng) .map_err(|_| error::BadKex.build())?; - let (ct, pq_secret) = enc.into(); + let (ct, pq_secret) = enc; // TODO: check if this is another stack copy. *pq = ct.into(); ec.copy_from_slice(self.ecdh.pubkey()); diff --git a/stdasync/src/knownhosts.rs b/stdasync/src/knownhosts.rs index 01d30490..1a9eae59 100644 --- a/stdasync/src/knownhosts.rs +++ b/stdasync/src/knownhosts.rs @@ -16,7 +16,7 @@ pub enum KnownHostsError { Mismatch { path: PathBuf, line: usize, - existing: OpenSSHKey, + existing: Box, }, /// User didn't accept new key @@ -127,7 +127,7 @@ pub fn check_known_hosts_file( return Err(KnownHostsError::Mismatch { path: p.to_path_buf(), line, - existing: known_key, + existing: Box::new(known_key), }); } } From c15e10bfec2ebce37086e62896cdee2f642d27cf Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Wed, 15 Apr 2026 21:56:30 +0800 Subject: [PATCH 24/47] pretty-hex isn't needed, plain hex format instead --- Cargo.lock | 10 ---------- Cargo.toml | 4 ---- demo/common/Cargo.toml | 1 - demo/picow/Cargo.toml | 2 -- demo/picow/src/flashconfig.rs | 12 ------------ demo/picow/src/picowmenu.rs | 2 -- src/conn.rs | 4 +--- src/encrypt.rs | 1 - src/packets.rs | 6 +----- src/sshwire.rs | 5 +---- src/test.rs | 4 ---- stdasync/Cargo.toml | 4 ---- stdasync/src/agent.rs | 3 +-- 13 files changed, 4 insertions(+), 54 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 824395de..d632c183 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2098,12 +2098,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" -[[package]] -name = "pretty-hex" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbc83ee4a840062f368f9096d80077a9841ec117e17e7f700df81958f1451254" - [[package]] name = "primeorder" version = "0.13.6" @@ -2732,7 +2726,6 @@ dependencies = [ "log", "ml-kem", "poly1305", - "pretty-hex", "rand_core", "rsa", "sha2", @@ -2774,7 +2767,6 @@ dependencies = [ "heapless", "hmac", "log", - "pretty-hex", "sha2", "subtle", "sunset", @@ -2811,7 +2803,6 @@ dependencies = [ "heapless", "log", "portable-atomic", - "pretty-hex", "rand", "rtt-target", "sha2", @@ -2925,7 +2916,6 @@ dependencies = [ "libc", "log", "nix", - "pretty-hex", "rpassword", "simplelog", "ssh-key", diff --git a/Cargo.toml b/Cargo.toml index fefaa7f8..549efa8c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -82,9 +82,6 @@ ssh-key = { version = "0.6", default-features = false, optional = true, features embedded-io = { version = "0.6", optional = true } -# for debug printing -pretty-hex = { version = "0.4", default-features = false } - [features] default = [] std = ["snafu/std", "ssh-key/alloc", "larger", "mlkem"] @@ -106,7 +103,6 @@ larger = [] # examples want std::error snafu = { version = "0.8", default-features = true } anyhow = { version = "1.0" } -pretty-hex = "0.4" simplelog = { version = "0.12", features = ["test"] } # [patch.crates-io] diff --git a/demo/common/Cargo.toml b/demo/common/Cargo.toml index 1ab459d0..b832d7db 100644 --- a/demo/common/Cargo.toml +++ b/demo/common/Cargo.toml @@ -29,7 +29,6 @@ ed25519-dalek = { version = "2.0.0-rc.2", default-features = false } subtle = { version = "2.4", default-features = false } log = "0.4" -pretty-hex = { version = "0.4", default-features = false } [dev-dependencies] anyhow = { version = "1.0" } diff --git a/demo/picow/Cargo.toml b/demo/picow/Cargo.toml index e4b51362..709a094d 100644 --- a/demo/picow/Cargo.toml +++ b/demo/picow/Cargo.toml @@ -32,8 +32,6 @@ portable-atomic = { workspace = true, features = ["critical-section"] } log = { version = "0.4" } rtt-target = { version = "0.6", features = ["log"] } -pretty-hex = { version = "0.4", default-features = false } - snafu = { version = "0.8", default-features = false, features = ["rust_1_65"] } cortex-m = { version = "0.7.6", features = ["critical-section-single-core"]} diff --git a/demo/picow/src/flashconfig.rs b/demo/picow/src/flashconfig.rs index ebf93b31..b1a2a914 100644 --- a/demo/picow/src/flashconfig.rs +++ b/demo/picow/src/flashconfig.rs @@ -87,12 +87,6 @@ pub async fn load(fl: &mut Fl<'_>) -> Result { Error::msg("flash error") })?; - // use pretty_hex::PrettyHex; - // use core::fmt::Write; - // let mut b = demo_common::BufOutput::default(); - // writeln!(b, "load {:?}", buf.hex_dump()); - // info!("{}", &b.s); - let s: FlashConfig = sshwire::read_ssh(&fl.buf, None)?; if s.version != SSHConfig::CURRENT_VERSION { @@ -121,12 +115,6 @@ pub async fn save(fl: &mut Fl<'_>, config: &SSHConfig) -> Result<()> { let l = sshwire::write_ssh(&mut fl.buf, &sc)?; let buf = &fl.buf[..l]; - // use pretty_hex::PrettyHex; - // use core::fmt::Write; - // let mut b = demo_common::BufOutput::default(); - // writeln!(b, "save {:?}", buf.hex_dump()); - // info!("{}", &b.s); - trace!("flash erase"); fl.flash .erase(CONFIG_OFFSET, CONFIG_OFFSET + ERASE_SIZE as u32) diff --git a/demo/picow/src/picowmenu.rs b/demo/picow/src/picowmenu.rs index 7d2b5ad0..dca6dba8 100644 --- a/demo/picow/src/picowmenu.rs +++ b/demo/picow/src/picowmenu.rs @@ -18,8 +18,6 @@ use embassy_time::Duration; use heapless::{String, Vec}; -use pretty_hex::PrettyHex; - use crate::demo_common; use crate::flashconfig; use crate::PicoDemo; diff --git a/src/conn.rs b/src/conn.rs index f7c6108a..e7c99cae 100644 --- a/src/conn.rs +++ b/src/conn.rs @@ -12,8 +12,6 @@ use { log::{debug, error, info, log, trace, warn}, }; -use pretty_hex::PrettyHex; - use crate::*; use channel::{Channels, CliSessionExit}; use client::Client; @@ -350,7 +348,7 @@ impl Conn { } Err(e) => { debug!("Error decoding packet: {e}"); - trace!("Input:\n{:#?}", payload.hex_dump()); + trace!("Input:\n{:02x?}", payload); Err(e) } } diff --git a/src/encrypt.rs b/src/encrypt.rs index bc520c87..fa6fae6d 100644 --- a/src/encrypt.rs +++ b/src/encrypt.rs @@ -712,7 +712,6 @@ mod tests { use crate::sshnames::SSH_NAME_CURVE25519; use crate::sunsetlog::*; #[allow(unused_imports)] - use pretty_hex::PrettyHex; use sha2::Sha256; // setting `corrupt` tests that incorrect mac is detected diff --git a/src/packets.rs b/src/packets.rs index a96c9a93..c771d2c8 100644 --- a/src/packets.rs +++ b/src/packets.rs @@ -15,7 +15,6 @@ use core::fmt::{Debug, Display}; #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; -use pretty_hex::PrettyHex; use sunset_sshwire_derive::*; @@ -853,7 +852,7 @@ impl Display for Unknown<'_> { if let Ok(s) = sshwire::try_as_ascii_str(self.0) { f.write_str(s) } else { - write!(f, "non-ascii {:?}", self.0.hex_dump()) + write!(f, "non-ascii {:02x?}", self.0) } } } @@ -1093,7 +1092,6 @@ mod tests { use crate::sshwire::tests::test_roundtrip; use crate::sshwire::{packet_from_bytes, write_ssh}; use crate::sunsetlog::init_test_log; - use pretty_hex::PrettyHex; #[test] /// check round trip of packet enums is right @@ -1177,7 +1175,6 @@ mod tests { buf1.truncate(l); // change a byte buf1[8] = 'X' as u8; - trace!("broken: {:?}", buf1.hex_dump()); let ctx = ParseContext::default(); let p2 = packet_from_bytes(&buf1, &ctx).unwrap(); trace!("broken: {p2:#?}"); @@ -1212,7 +1209,6 @@ mod tests { buf1.truncate(l); // change a byte in the "ssh-ed25519" variant string buf1[60] = 'F' as u8; - trace!("broken: {:?}", buf1.hex_dump()); let ctx = ParseContext::default(); let p2 = packet_from_bytes(&buf1, &ctx).unwrap(); trace!("broken: {p2:#?}"); diff --git a/src/sshwire.rs b/src/sshwire.rs index f79a9dc1..c0d2f71d 100644 --- a/src/sshwire.rs +++ b/src/sshwire.rs @@ -14,7 +14,6 @@ use { use core::convert::AsRef; use core::fmt::{Debug, Display}; use core::str::FromStr; -use pretty_hex::PrettyHex; use ascii::{AsAsciiStr, AsciiChar, AsciiStr}; @@ -356,7 +355,7 @@ impl Debug for TextString<'_> { if let Ok(s) = s { write!(f, "TextString(\"{}\")", s.escape_default()) } else { - write!(f, "TextString(not utf8!, {:#?})", self.0.hex_dump()) + write!(f, "TextString(not utf8!, {:02x?})", self.0) } } } @@ -753,7 +752,6 @@ pub(crate) mod tests { use crate::*; use error::Error; use packets::*; - use pretty_hex::PrettyHex; use sshwire::*; use sunsetlog::init_test_log; @@ -802,7 +800,6 @@ pub(crate) mod tests { let mut buf = vec![99; 200]; let l = write_ssh(&mut buf, p).unwrap(); buf.truncate(l); - trace!("wrote packet {:?}", buf.hex_dump()); let p2 = packet_from_bytes(&buf, &ctx).unwrap(); trace!("returned packet {:#?}", p2); diff --git a/src/test.rs b/src/test.rs index be359595..7d5dacea 100644 --- a/src/test.rs +++ b/src/test.rs @@ -22,10 +22,6 @@ mod tests { let mut buf2 = vec![99; 500]; let _w2 = sshwire::write_ssh(&mut buf2, &p2)?; - // println!("{p:?}"); - // println!("{p2:?}"); - // println!("{:?}", buf1.hex_dump()); - // println!("{:?}", buf2.hex_dump()); assert_eq!(buf1, buf2); Ok(()) diff --git a/stdasync/Cargo.toml b/stdasync/Cargo.toml index 8b23440c..5908a08f 100644 --- a/stdasync/Cargo.toml +++ b/stdasync/Cargo.toml @@ -31,15 +31,11 @@ nix = "0.26" heapless = "0.8" -# TODO -pretty-hex = "0.4" - [features] rsa = ["sunset/rsa"] [dev-dependencies] anyhow = { version = "1.0" } -pretty-hex = "0.4" whoami = "1.3" zeroize = "1.5" diff --git a/stdasync/src/agent.rs b/stdasync/src/agent.rs index 62f9b44a..2c755389 100644 --- a/stdasync/src/agent.rs +++ b/stdasync/src/agent.rs @@ -6,7 +6,6 @@ use { use std::path::Path; -use pretty_hex::PrettyHex; use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::UnixStream; @@ -127,7 +126,7 @@ impl AgentClient { let mut b = vec![]; sshwire::ssh_push_vec(&mut b, &Blob(r))?; - trace!("agent request {:?}", b.hex_dump()); + trace!("agent request {b:?}"); self.conn.write_all(&b).await?; self.response().await From 07154bb537946b7a65b50813de56629b24545952 Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Wed, 15 Apr 2026 21:57:52 +0800 Subject: [PATCH 25/47] Fix url typo in readme --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 2b8fd274..fc9162f6 100644 --- a/README.md +++ b/README.md @@ -11,9 +11,9 @@ suggest something! - [`sunset-async`](async) - async SSH client and server library, also `no_std` no-alloc. This is async-executor agnostic (using Embassy for mutexes, but works on std too). -- [`demos`](demos) has demos with Embassy executor for wifi on a Raspberry Pi - [Pico W](demos/picow) or a - [Linux tap device on `std`](demos/std) running locally. +- [`demo`](demo) has demos with Embassy executor for wifi on a Raspberry Pi + [Pico W](demo/picow) or a + [Linux tap device on `std`](demo/std) running locally. At present the Pico W build is around 150kB binary size (plus ~200KB [cyw43](https://github.com/embassy-rs/embassy/tree/main/cyw43) wifi firmware), From 23b0a5012ee96fd837757c0d77f4fb50d0b0d2ae Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Wed, 15 Apr 2026 21:58:51 +0800 Subject: [PATCH 26/47] Remove rust-toolchain files Aren't needed now embassy doesn't need rust nightly --- async/rust-toolchain.toml | 3 --- demo/picow/rust-toolchain.toml | 3 --- demo/std/rust-toolchain.toml | 3 --- rust-toolchain.toml | 3 --- stdasync/rust-toolchain.toml | 3 --- testing/ci.sh | 7 ------- 6 files changed, 22 deletions(-) delete mode 100644 async/rust-toolchain.toml delete mode 100644 demo/picow/rust-toolchain.toml delete mode 100644 demo/std/rust-toolchain.toml delete mode 100644 rust-toolchain.toml delete mode 100644 stdasync/rust-toolchain.toml diff --git a/async/rust-toolchain.toml b/async/rust-toolchain.toml deleted file mode 100644 index 9993e936..00000000 --- a/async/rust-toolchain.toml +++ /dev/null @@ -1,3 +0,0 @@ -[toolchain] -channel = "stable" -components = [ "rustfmt" ] diff --git a/demo/picow/rust-toolchain.toml b/demo/picow/rust-toolchain.toml deleted file mode 100644 index 9993e936..00000000 --- a/demo/picow/rust-toolchain.toml +++ /dev/null @@ -1,3 +0,0 @@ -[toolchain] -channel = "stable" -components = [ "rustfmt" ] diff --git a/demo/std/rust-toolchain.toml b/demo/std/rust-toolchain.toml deleted file mode 100644 index 9993e936..00000000 --- a/demo/std/rust-toolchain.toml +++ /dev/null @@ -1,3 +0,0 @@ -[toolchain] -channel = "stable" -components = [ "rustfmt" ] diff --git a/rust-toolchain.toml b/rust-toolchain.toml deleted file mode 100644 index 9993e936..00000000 --- a/rust-toolchain.toml +++ /dev/null @@ -1,3 +0,0 @@ -[toolchain] -channel = "stable" -components = [ "rustfmt" ] diff --git a/stdasync/rust-toolchain.toml b/stdasync/rust-toolchain.toml deleted file mode 100644 index 9993e936..00000000 --- a/stdasync/rust-toolchain.toml +++ /dev/null @@ -1,3 +0,0 @@ -[toolchain] -channel = "stable" -components = [ "rustfmt" ] diff --git a/testing/ci.sh b/testing/ci.sh index 4b01fe79..4af02757 100755 --- a/testing/ci.sh +++ b/testing/ci.sh @@ -86,11 +86,4 @@ cargo check --features nofuzz --profile fuzz ) # other checks - -if [ $(find async -name rust-toolchain.toml -print0 | xargs -0 grep -h ^channel | uniq | wc -l) -ne 1 ]; then - echo "rust-toolchain.toml has varying toolchains" - find async -name rust-toolchain.toml -print0 | xargs -0 grep . - exit 1 -fi - echo success From 637db9b0c9c12dc39a13981cd214e0e646f6a5aa Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Wed, 15 Apr 2026 23:25:58 +0800 Subject: [PATCH 27/47] Delete update-toolchain.sh --- testing/update-toolchain.sh | 6 ------ 1 file changed, 6 deletions(-) delete mode 100755 testing/update-toolchain.sh diff --git a/testing/update-toolchain.sh b/testing/update-toolchain.sh deleted file mode 100755 index df163223..00000000 --- a/testing/update-toolchain.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -cp ./rust-toolchain.toml ./embassy/demos/std/rust-toolchain.toml -cp ./rust-toolchain.toml ./embassy/demos/picow/rust-toolchain.toml -cp ./rust-toolchain.toml ./embassy/rust-toolchain.toml -cp ./rust-toolchain.toml ./async/rust-toolchain.toml From b2793b090647771e4d1382028aba975693493bdc Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 16 Apr 2026 15:05:25 +1000 Subject: [PATCH 28/47] [skip ci] Deleted empty default feature --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 549efa8c..ddc1df63 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,7 +83,6 @@ ssh-key = { version = "0.6", default-features = false, optional = true, features embedded-io = { version = "0.6", optional = true } [features] -default = [] std = ["snafu/std", "ssh-key/alloc", "larger", "mlkem"] backtrace = ["snafu/backtrace"] rsa = ["dep:rsa", "ssh-key/rsa"] From 4516fc27a1afad0d7f24e2efc7634e5b016b409a Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 16 Apr 2026 15:10:44 +1000 Subject: [PATCH 29/47] [skip ci] Correcting typo "peak" --- sftp/src/sftphandler/sftphandler.rs | 4 +-- sftp/src/sftpsource.rs | 38 ++++++++++++++--------------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/sftp/src/sftphandler/sftphandler.rs b/sftp/src/sftphandler/sftphandler.rs index 2e0ec68a..cd8189d5 100644 --- a/sftp/src/sftphandler/sftphandler.rs +++ b/sftp/src/sftphandler/sftphandler.rs @@ -342,7 +342,7 @@ where .send_status( ReqId( source - .peak_packet_req_id() + .peek_packet_req_id() .unwrap_or(u32::MAX), ), StatusCode::SSH_FX_OP_UNSUPPORTED, @@ -360,7 +360,7 @@ where .send_status( ReqId( source - .peak_packet_req_id() + .peek_packet_req_id() .unwrap_or(u32::MAX), ), StatusCode::SSH_FX_BAD_MESSAGE, diff --git a/sftp/src/sftpsource.rs b/sftp/src/sftpsource.rs index 69c9699e..9eccf132 100644 --- a/sftp/src/sftpsource.rs +++ b/sftp/src/sftpsource.rs @@ -57,7 +57,7 @@ impl<'de> SftpSource<'de> { pub(crate) fn peek_packet_type(&self) -> WireResult { if self.buffer.len() <= SFTP_FIELD_ID_INDEX { debug!( - "Peak packet type failed: buffer len <= SFTP_FIELD_ID_INDEX ( {:?} <= {:?})", + "Peek packet type failed: buffer len <= SFTP_FIELD_ID_INDEX ( {:?} <= {:?})", self.buffer.len(), SFTP_FIELD_ID_INDEX ); @@ -67,17 +67,17 @@ impl<'de> SftpSource<'de> { } } - /// Peaks the buffer for packet length field. This does not advance the reading index + /// Peeks the buffer for packet length field. This does not advance the reading index /// /// Useful to observe the packet fields in special conditions where a `dec(s)` /// would fail /// - /// Use `peak_total_packet_len` instead if you want to also consider the the + /// Use `peek_total_packet_len` instead if you want to also consider the the /// length field /// /// **Warning**: will only work in well formed packets, in other case the result /// will contains garbage - pub(crate) fn peak_packet_len(&self) -> WireResult { + pub(crate) fn peek_packet_len(&self) -> WireResult { if self.buffer.len() < SFTP_FIELD_LEN_INDEX + SFTP_FIELD_LEN_LENGTH { Err(WireError::RanOut) } else { @@ -90,32 +90,32 @@ impl<'de> SftpSource<'de> { } } - /// Peaks the packet in the source to obtain a total packet length, which + /// Peeks the packet in the source to obtain a total packet length, which /// considers the length of the length field itself. For the packet length field - /// use [`peak_packet_len()`] + /// use [`peek_packet_len()`] /// /// This does not advance the reading index /// /// /// **Warning**: will only work in well formed packets, in other case the result /// will contains garbage - pub(crate) fn peak_total_packet_len(&self) -> WireResult { - Ok(self.peak_packet_len()? + SFTP_FIELD_LEN_LENGTH as u32) + pub(crate) fn peek_total_packet_len(&self) -> WireResult { + Ok(self.peek_packet_len()? + SFTP_FIELD_LEN_LENGTH as u32) } - /// Compares the total source capacity and the peaked packet length + /// Compares the total source capacity and the peeked packet length /// plus the length field length itself to find out if the packet fit /// in the source /// **Warning**: will only work in well formed packets, in other case /// the result will contains garbage pub fn packet_fits(&self) -> bool { - match self.peak_total_packet_len() { + match self.peek_total_packet_len() { Ok(len) => self.buffer.len() >= len as usize, Err(_) => false, } } - /// Peaks the buffer for packet request id [`u32`]. This does not advance + /// Peeks the buffer for packet request id [`u32`]. This does not advance /// the reading index /// /// Useful to observe the packet fields in special conditions where a @@ -123,7 +123,7 @@ impl<'de> SftpSource<'de> { /// /// **Warning**: will only work in well formed packets, in other case /// the result will contains garbage - pub fn peak_packet_req_id(&self) -> WireResult { + pub fn peek_packet_req_id(&self) -> WireResult { if self.buffer.len() < SFTP_FIELD_REQ_ID_INDEX + SFTP_FIELD_REQ_ID_LEN { Err(WireError::RanOut) } else { @@ -169,26 +169,26 @@ mod local_tests { } #[test] - fn peaking_len() { + fn peeking_len() { let buffer_status = status_buffer(); let source = SftpSource::new(&buffer_status); - let read_packet_len = source.peak_packet_len().unwrap(); + let read_packet_len = source.peek_packet_len().unwrap(); let original_packet_len = 23u32; assert_eq!(original_packet_len, read_packet_len); } #[test] - fn peaking_total_len() { + fn peeking_total_len() { let buffer_status = status_buffer(); let source = SftpSource::new(&buffer_status); - let read_total_packet_len = source.peak_total_packet_len().unwrap(); + let read_total_packet_len = source.peek_total_packet_len().unwrap(); let original_total_packet_len = 23u32 + 4u32; assert_eq!(original_total_packet_len, read_total_packet_len); } #[test] - fn peaking_type() { + fn peeking_type() { let buffer_status = status_buffer(); let source = SftpSource::new(&buffer_status); let read_packet_type = source.peek_packet_type().unwrap(); @@ -196,10 +196,10 @@ mod local_tests { assert_eq!(original_packet_type, read_packet_type); } #[test] - fn peaking_req_id() { + fn peeking_req_id() { let buffer_status = status_buffer(); let source = SftpSource::new(&buffer_status); - let read_req_id = source.peak_packet_req_id().unwrap(); + let read_req_id = source.peek_packet_req_id().unwrap(); let original_req_id = 16u32; assert_eq!(original_req_id, read_req_id); } From c3f9ab2fa51b535a374b03a24fcdabd0ba341f8e Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 16 Apr 2026 15:22:33 +1000 Subject: [PATCH 30/47] [skip ci] Naively adding bug to WireError This is used to catch bad usages of encode_request. @mkj Please let me know if you want this reverted --- sftp/src/proto.rs | 2 +- src/sshwire.rs | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/sftp/src/proto.rs b/sftp/src/proto.rs index 885f5668..90397962 100644 --- a/sftp/src/proto.rs +++ b/sftp/src/proto.rs @@ -831,7 +831,7 @@ macro_rules! sftpmessages { /// Used by a SFTP client. Does not include the length field. pub fn encode_request(&self, id: ReqId, s: &mut dyn SSHSink) -> WireResult<()> { if !self.sftp_num().is_request() { - return Err(WireError::PacketWrong) + return Err(WireError::Bug) // return Err(Error::bug()) // I understand that it would be a bad call of encode_response and // therefore a bug, bug Error::bug() is not compatible with WireResult diff --git a/src/sshwire.rs b/src/sshwire.rs index c0d2f71d..078ab384 100644 --- a/src/sshwire.rs +++ b/src/sshwire.rs @@ -94,6 +94,8 @@ pub enum WireError { BadKeyFormat, + Bug, + UnknownPacket { number: u8 }, } @@ -108,6 +110,7 @@ impl From for Error { WireError::PacketWrong => error::PacketWrong.build(), WireError::BadKeyFormat => Error::BadKeyFormat, WireError::UnknownVariant => Error::bug_err_msg("Can't encode Unknown"), + WireError::Bug => error::Bug.build(), WireError::UnknownPacket { number } => Error::UnknownPacket { number }, } } From aebcc41b64b28973ca9dca9f7d2301ed09f34218 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Fri, 17 Apr 2026 11:43:46 +1000 Subject: [PATCH 31/47] [skip ci] Documenting generic parameters for sftp structures Looking for structures with redundant lifetimes resulted in no more of them. The methodology has been analyzing the results of a reg-exp search ( <'[^>]+, ) within sftp containing paths --- sftp/src/sftphandler/sftphandler.rs | 7 ++++--- sftp/src/sftphandler/sftpoutputchannelhandler.rs | 10 +++++++++- sftp/src/sftpserver.rs | 4 ++++ 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/sftp/src/sftphandler/sftphandler.rs b/sftp/src/sftphandler/sftphandler.rs index cd8189d5..97f7c55a 100644 --- a/sftp/src/sftphandler/sftphandler.rs +++ b/sftp/src/sftphandler/sftphandler.rs @@ -59,9 +59,10 @@ enum HandlerState { /// Process the raw buffers in and out from a subsystem channel decoding /// request and encoding responses /// -/// It will delegate request to an [`crate::sftpserver::SftpServer`] -/// implemented by the library -/// user taking into account the local system details. +/// Parameter (S): It will delegate request to an [`crate::sftpserver::SftpServer`] +/// implemented by the library user taking into account the local system details. +/// +/// Parameter (T): Is a type that implements [`crate::handles::OpaqueFileHandle`] that **must** match the type used in the [`crate::sftpserver::SftpServer`] provided in (S) /// /// The compiler time constant `BUFFER_OUT_SIZE` is used to define the /// size of the output buffer for the subsystem [`Embassy-sync::pipe`] used diff --git a/sftp/src/sftphandler/sftpoutputchannelhandler.rs b/sftp/src/sftphandler/sftpoutputchannelhandler.rs index 3843e8e8..b0cc054d 100644 --- a/sftp/src/sftphandler/sftpoutputchannelhandler.rs +++ b/sftp/src/sftphandler/sftpoutputchannelhandler.rs @@ -65,7 +65,11 @@ impl SftpOutputPipe { } /// Consumer that takes ownership of [`ChanOut`]. It pipes the data received -/// from a [`PipeReader`] into the channel +/// from a [`PipeReader`] into the channel. +/// +/// N is the length of the +/// [PipeReader](https://docs.embassy.dev/embassy-sync/git/default/pipe/struct.Reader.html) +/// buffer used to receive the data. pub(crate) struct SftpOutputConsumer<'a, const N: usize> { reader: PipeReader<'a, SunsetRawMutex, N>, ssh_chan_out: ChanOut<'a>, @@ -118,6 +122,10 @@ impl<'a, const N: usize> SftpOutputConsumer<'a, N> { /// Producer used to send data to a [`ChanOut`] without the restrictions /// of mutable borrows +/// +/// Under the hood it uses an +/// [embassy_sync Pipe](https://docs.embassy.dev/embassy-sync/git/default/pipe/struct.Pipe.html) +/// where N is the pipe buffer length in bytes #[derive(Clone)] pub struct SftpOutputProducer<'a, const N: usize> { writer: PipeWriter<'a, SunsetRawMutex, N>, diff --git a/sftp/src/sftpserver.rs b/sftp/src/sftpserver.rs index d00596a0..c0912ff2 100644 --- a/sftp/src/sftpserver.rs +++ b/sftp/src/sftpserver.rs @@ -226,6 +226,8 @@ where /// [`sunset_async::async_channel::ChanOut`] used in the context of an /// SFTP Session. /// +/// Parameter N is the length for the SftpOutputProducer buffer +/// pub struct ReadReply<'g, const N: usize> { /// The request Id that will be use`d in the response req_id: ReqId, @@ -360,6 +362,8 @@ mod read_reply_tests { /// [`sunset_async::async_channel::ChanOut`] used in the context of an /// SFTP Session. /// +/// Parameter N is the length for the SftpOutputProducer buffer +/// pub struct DirReply<'g, const N: usize> { /// The request Id that will be use`d in the response req_id: ReqId, From 8ff236b0a8aa69a67109e462e31a89f9ccfccd0d Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Fri, 17 Apr 2026 15:11:46 +1000 Subject: [PATCH 32/47] Using AtomicUsize instead of Mutex in sftpoutputchannelhandler.rs Also modifying the receiver counter to use a an usize and fencing the counters in a debug_assertions configuration as they are of no use during release --- .../sftphandler/sftpoutputchannelhandler.rs | 95 ++++++++++++------- 1 file changed, 63 insertions(+), 32 deletions(-) diff --git a/sftp/src/sftphandler/sftpoutputchannelhandler.rs b/sftp/src/sftphandler/sftpoutputchannelhandler.rs index b0cc054d..e5ae2f58 100644 --- a/sftp/src/sftphandler/sftpoutputchannelhandler.rs +++ b/sftp/src/sftphandler/sftpoutputchannelhandler.rs @@ -2,22 +2,27 @@ use crate::error::{SftpError, SftpResult}; use crate::proto::{ReqId, SftpPacket, Status, StatusCode}; use crate::server::SftpSink; -use embassy_sync::mutex::Mutex; use sunset_async::ChanOut; use embassy_sync::pipe::{Pipe, Reader as PipeReader, Writer as PipeWriter}; use embedded_io_async::Write; use sunset_async::SunsetRawMutex; +#[cfg(debug_assertions)] +use core::sync::atomic::AtomicUsize; +#[cfg(debug_assertions)] +use core::sync::atomic::Ordering; + use log::{debug, error, trace}; -type CounterMutex = Mutex; +#[cfg(debug_assertions)] +type Counter = AtomicUsize; pub struct SftpOutputPipe { pipe: Pipe, - counter_send: CounterMutex, - counter_recv: CounterMutex, - splitted: bool, + split: bool, + #[cfg(debug_assertions)] + counter_send: Counter, } /// M: SunsetSunsetRawMutex @@ -32,9 +37,9 @@ impl SftpOutputPipe { pub fn new() -> Self { SftpOutputPipe { pipe: Pipe::new(), - counter_send: Mutex::::new(0), - counter_recv: Mutex::::new(0), - splitted: false, + #[cfg(debug_assertions)] + counter_send: Counter::new(0), + split: false, } } @@ -52,14 +57,23 @@ impl SftpOutputPipe { &'a mut self, ssh_chan_out: ChanOut<'a>, ) -> SftpResult<(SftpOutputConsumer<'a, N>, SftpOutputProducer<'a, N>)> { - if self.splitted { + if self.split { return Err(SftpError::AlreadyInitialized); } - self.splitted = true; + self.split = true; let (reader, writer) = self.pipe.split(); Ok(( - SftpOutputConsumer { reader, ssh_chan_out, counter: &self.counter_recv }, - SftpOutputProducer { writer, counter: &self.counter_send }, + SftpOutputConsumer { + reader, + ssh_chan_out, + #[cfg(debug_assertions)] + counter: 0, + }, + SftpOutputProducer { + writer, + #[cfg(debug_assertions)] + counter: &self.counter_send, + }, )) } } @@ -72,8 +86,11 @@ impl SftpOutputPipe { /// buffer used to receive the data. pub(crate) struct SftpOutputConsumer<'a, const N: usize> { reader: PipeReader<'a, SunsetRawMutex, N>, + /// The [sunset_async::ChanOut] where the channel data is written to ssh_chan_out: ChanOut<'a>, - counter: &'a CounterMutex, + /// Only used for debug purposes + #[cfg(debug_assertions)] + counter: usize, } impl<'a, const N: usize> SftpOutputConsumer<'a, N> { @@ -83,14 +100,15 @@ impl<'a, const N: usize> SftpOutputConsumer<'a, N> { let mut buf = [0u8; N]; loop { let rl = self.reader.read(&mut buf).await; - let mut _total = 0; + #[cfg(debug_assertions)] { - let mut lock = self.counter.lock().await; - *lock += rl; - _total = *lock; - } + self.counter = self.counter.wrapping_add(buf.len()); - debug!("Output Consumer: ---> Reads {rl} bytes. Total {_total}"); + debug!( + "Output Consumer: ---> Reads {rl} bytes. Total {}", + self.counter + ); + } let mut scanning_buffer = &buf[..rl]; if rl > 0 { // Replaced write_all with loop to handle partial writes to discard issues in write_all @@ -129,7 +147,8 @@ impl<'a, const N: usize> SftpOutputConsumer<'a, N> { #[derive(Clone)] pub struct SftpOutputProducer<'a, const N: usize> { writer: PipeWriter<'a, SunsetRawMutex, N>, - counter: &'a CounterMutex, + #[cfg(debug_assertions)] + counter: &'a Counter, } impl<'a, const N: usize> SftpOutputProducer<'a, N> { /// Sends the data encoded in the provided [`SftpSink`] without including @@ -137,7 +156,13 @@ impl<'a, const N: usize> SftpOutputProducer<'a, N> { /// /// Use this when you are sending chunks of data after a valid header pub async fn send_data(&self, buf: &[u8]) -> SftpResult<()> { - Self::send_buffer(&self.writer, &buf, &self.counter).await; + Self::send_buffer( + &self.writer, + &buf, + #[cfg(debug_assertions)] + &self.counter, + ) + .await; Ok(()) } @@ -163,7 +188,13 @@ impl<'a, const N: usize> SftpOutputProducer<'a, N> { let mut sink = SftpSink::new(&mut buf); packet.encode_response(&mut sink)?; debug!("Output Producer: Sending packet {:?}", packet); - Self::send_buffer(&self.writer, &sink.used_slice(), &self.counter).await; + Self::send_buffer( + &self.writer, + &sink.used_slice(), + #[cfg(debug_assertions)] + &self.counter, + ) + .await; Ok(()) } @@ -171,19 +202,19 @@ impl<'a, const N: usize> SftpOutputProducer<'a, N> { async fn send_buffer( writer: &PipeWriter<'a, SunsetRawMutex, N>, buf: &[u8], - counter: &CounterMutex, + #[cfg(debug_assertions)] counter: &Counter, ) { - let mut _total = 0; + #[cfg(debug_assertions)] { - let mut lock = counter.lock().await; - *lock += buf.len(); - _total = *lock; - } + let total = counter.load(Ordering::Relaxed).wrapping_add(buf.len()); + counter.store(total, Ordering::Relaxed); - debug!("Output Producer: <--- Sends {:?} bytes. Total {_total}", buf.len()); - trace!("Output Producer: Sending buffer {:?}", buf); - - // writer.write_all(buf); // ??? error[E0596]: cannot borrow `*writer` as mutable, as it is behind a `&` reference + debug!( + "Output Producer: <--- Sends {:?} bytes. Total {total}", + buf.len() + ); + trace!("Output Producer: Sending buffer {:?}", buf); + } let mut buf = buf; loop { From 6df6cc66bb1f8d20bf4faec2c5b4674f503cd33c Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Fri, 17 Apr 2026 15:32:09 +1000 Subject: [PATCH 33/47] SftpOutputConsumer.receive_task exits on 0 byte reads To eliminate the risk in the select for the SftpOutputConsumer to persist on select, receiving 0 bytes on the PipeReader.read will end receive_task function. --- .../sftphandler/sftpoutputchannelhandler.rs | 51 ++++++++++--------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/sftp/src/sftphandler/sftpoutputchannelhandler.rs b/sftp/src/sftphandler/sftpoutputchannelhandler.rs index e5ae2f58..bf1f28d2 100644 --- a/sftp/src/sftphandler/sftpoutputchannelhandler.rs +++ b/sftp/src/sftphandler/sftpoutputchannelhandler.rs @@ -13,7 +13,7 @@ use core::sync::atomic::AtomicUsize; #[cfg(debug_assertions)] use core::sync::atomic::Ordering; -use log::{debug, error, trace}; +use log::{debug, trace}; #[cfg(debug_assertions)] type Counter = AtomicUsize; @@ -64,7 +64,7 @@ impl SftpOutputPipe { let (reader, writer) = self.pipe.split(); Ok(( SftpOutputConsumer { - reader, + pipe_reader: reader, ssh_chan_out, #[cfg(debug_assertions)] counter: 0, @@ -85,7 +85,7 @@ impl SftpOutputPipe { /// [PipeReader](https://docs.embassy.dev/embassy-sync/git/default/pipe/struct.Reader.html) /// buffer used to receive the data. pub(crate) struct SftpOutputConsumer<'a, const N: usize> { - reader: PipeReader<'a, SunsetRawMutex, N>, + pipe_reader: PipeReader<'a, SunsetRawMutex, N>, /// The [sunset_async::ChanOut] where the channel data is written to ssh_chan_out: ChanOut<'a>, /// Only used for debug purposes @@ -99,7 +99,11 @@ impl<'a, const N: usize> SftpOutputConsumer<'a, N> { debug!("Running SftpOutout Consumer Reader task"); let mut buf = [0u8; N]; loop { - let rl = self.reader.read(&mut buf).await; + let rl = self.pipe_reader.read(&mut buf).await; + if rl == 0 { + debug!("Output Consumer: Pipe closed, stopping receiving task"); + return Ok(()); + } #[cfg(debug_assertions)] { self.counter = self.counter.wrapping_add(buf.len()); @@ -110,30 +114,27 @@ impl<'a, const N: usize> SftpOutputConsumer<'a, N> { ); } let mut scanning_buffer = &buf[..rl]; - if rl > 0 { - // Replaced write_all with loop to handle partial writes to discard issues in write_all - while scanning_buffer.len() > 0 { - trace!( - "Output Consumer: Tries to write {:?} bytes to ChanOut", - scanning_buffer.len() - ); - let wl = self.ssh_chan_out.write(scanning_buffer).await?; - debug!("Output Consumer: Written {:?} bytes ", wl); - if wl < scanning_buffer.len() { - debug!( - "Output Consumer: ChanOut accepted only part of the buffer" - ); - } - trace!( - "Output Consumer: Bytes written {:?}", - &scanning_buffer[..wl] + + // Replaced write_all with loop to handle partial writes to discard issues in write_all + while scanning_buffer.len() > 0 { + trace!( + "Output Consumer: Tries to write {:?} bytes to ChanOut", + scanning_buffer.len() + ); + let wl = self.ssh_chan_out.write(scanning_buffer).await?; + debug!("Output Consumer: Written {:?} bytes ", wl); + if wl < scanning_buffer.len() { + debug!( + "Output Consumer: ChanOut accepted only part of the buffer" ); - scanning_buffer = &scanning_buffer[wl..]; } - debug!("Output Consumer: Finished writing all bytes in read buffer"); - } else { - error!("Output Consumer: Empty array received"); + trace!( + "Output Consumer: Bytes written {:?}", + &scanning_buffer[..wl] + ); + scanning_buffer = &scanning_buffer[wl..]; } + debug!("Output Consumer: Finished writing all bytes in read buffer"); } } } From 39e51c86aa8e5d29a9a3af2e8f14cea7e6fc02d0 Mon Sep 17 00:00:00 2001 From: Matt Johnston Date: Mon, 13 Apr 2026 00:28:42 +0800 Subject: [PATCH 34/47] sftp: Add a ParseContext to sftpsource This replaces the todo!() for ctx(). Blob::dec() will call ctx() if the inner length doesn't match the blob length. --- sftp/src/sftpsource.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sftp/src/sftpsource.rs b/sftp/src/sftpsource.rs index 9eccf132..c3e16f46 100644 --- a/sftp/src/sftpsource.rs +++ b/sftp/src/sftpsource.rs @@ -4,6 +4,7 @@ use crate::proto::{ }; use sunset::sshwire::{SSHSource, WireError, WireResult}; +use sunset::packets::ParseContext; #[allow(unused_imports)] use log::{debug, error, info, log, trace, warn}; @@ -14,6 +15,7 @@ use log::{debug, error, info, log, trace, warn}; pub struct SftpSource<'de> { buffer: &'de [u8], index: usize, + ctx: ParseContext, } impl<'de> SSHSource<'de> for SftpSource<'de> { @@ -35,8 +37,8 @@ impl<'de> SSHSource<'de> for SftpSource<'de> { self.buffer.len() - self.index } - fn ctx(&mut self) -> &mut sunset::packets::ParseContext { - todo!("Which context for sftp?"); + fn ctx(&mut self) -> &mut ParseContext { + &mut self.ctx } } @@ -44,7 +46,8 @@ impl<'de> SftpSource<'de> { /// Creates a new [`SftpSource`] referencing a buffer pub fn new(buffer: &'de [u8]) -> Self { debug!("New source with content: : {:?}", buffer); - SftpSource { buffer: buffer, index: 0 } + let ctx = ParseContext::new(); + SftpSource { buffer: buffer, index: 0, ctx } } /// Peeks the buffer for packet type [`SftpNum`]. This does not advance /// the reading index From 68e6abca09b43a7bf7040866904ec720c7d896bb Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Fri, 17 Apr 2026 15:40:07 +1000 Subject: [PATCH 35/47] Fixing fmt to pass CI After cherry picking https://github.com/mkj/sunset/commit/a8a27eb9306a314ab47f4c1d025cebd7b7959096 --- sftp/src/sftpsource.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sftp/src/sftpsource.rs b/sftp/src/sftpsource.rs index c3e16f46..f0c025d7 100644 --- a/sftp/src/sftpsource.rs +++ b/sftp/src/sftpsource.rs @@ -3,8 +3,8 @@ use crate::proto::{ SFTP_FIELD_REQ_ID_INDEX, SFTP_FIELD_REQ_ID_LEN, SftpNum, }; -use sunset::sshwire::{SSHSource, WireError, WireResult}; use sunset::packets::ParseContext; +use sunset::sshwire::{SSHSource, WireError, WireResult}; #[allow(unused_imports)] use log::{debug, error, info, log, trace, warn}; From ded1946377706f9b8fc7b1a716d41fa928107689 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Mon, 20 Apr 2026 15:50:24 +1000 Subject: [PATCH 36/47] Adding strict-path to demo/sftp/std In order to provide a safe example for future SftpServer implementers. --- Cargo.lock | 25 +++++++++ demo/sftp/std/Cargo.toml | 24 +++++++-- demo/sftp/std/src/demosftpserver.rs | 82 ++++++++++++++++++++++++----- 3 files changed, 114 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d632c183..70e22ba6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2107,6 +2107,12 @@ dependencies = [ "elliptic-curve", ] +[[package]] +name = "proc-canonicalize" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6127461ef34c8119591fc98e4046230f4f953bec84aaebfe326e3d82e8f383da" + [[package]] name = "proc-macro-error-attr2" version = "2.0.0" @@ -2597,6 +2603,15 @@ dependencies = [ "windows-sys 0.60.2", ] +[[package]] +name = "soft-canonicalize" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaf99d1bbb279dfc59a8642f42fa4b6137935e506c97d1e2e57d282f417f163d" +dependencies = [ + "proc-canonicalize", +] + [[package]] name = "spin" version = "0.9.8" @@ -2680,6 +2695,15 @@ dependencies = [ "portable-atomic", ] +[[package]] +name = "strict-path" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9faaab0c30fb8dd6368d4b3a6f9126b85a599be66160ebf666b79e9b6e25c557" +dependencies = [ + "soft-canonicalize", +] + [[package]] name = "string_cache" version = "0.8.7" @@ -2834,6 +2858,7 @@ dependencies = [ "log", "rand", "sha2", + "strict-path", "sunset", "sunset-async", "sunset-demo-common", diff --git a/demo/sftp/std/Cargo.toml b/demo/sftp/std/Cargo.toml index a2f08d6f..78cfb946 100644 --- a/demo/sftp/std/Cargo.toml +++ b/demo/sftp/std/Cargo.toml @@ -11,17 +11,31 @@ sunset-sftp = { version = "0.1.0", path = "../../../sftp", features = ["std"] } # 131072 was determined empirically embassy-executor = { version = "0.7", features = [ - "executor-thread", "arch-std", "log", "task-arena-size-131072"] } -embassy-net = { version = "0.7", features = ["tcp", "dhcpv4", "medium-ethernet"] } + "executor-thread", + "arch-std", + "log", + "task-arena-size-131072", +] } +embassy-net = { version = "0.7", features = [ + "tcp", + "dhcpv4", + "medium-ethernet", +] } embassy-net-tuntap = { version = "0.1" } embassy-sync = { version = "0.7" } embassy-futures = { version = "0.1" } # embassy-time dep required to link a time driver -embassy-time = { version = "0.4", default-features=false, features = ["log", "std"] } +embassy-time = { version = "0.4", default-features = false, features = [ + "log", + "std", +] } log = { version = "0.4" } # default regex feature is huge -env_logger = { version = "0.11", default-features=false, features = ["auto-color", "humantime"] } +env_logger = { version = "0.11", default-features = false, features = [ + "auto-color", + "humantime", +] } embedded-io-async = "0.6" heapless = "0.8" @@ -38,3 +52,5 @@ critical-section = "1.1" rand = { version = "0.8", default-features = false, features = ["getrandom"] } sha2 = { version = "0.10", default-features = false } fnv = "1.0.7" + +strict-path = "0.2" diff --git a/demo/sftp/std/src/demosftpserver.rs b/demo/sftp/std/src/demosftpserver.rs index d5fa1cf4..a89f6908 100644 --- a/demo/sftp/std/src/demosftpserver.rs +++ b/demo/sftp/std/src/demosftpserver.rs @@ -13,6 +13,14 @@ use sunset_sftp::server::{ #[allow(unused_imports)] use log::{debug, error, info, log, trace, warn}; +use strict_path::StrictPath; +/// This is a marker for the SftpDir transactions. +/// See [the mix up problem](https://dk26.github.io/strict-path-rs/tutorial/chapter2_mixup_problem.html) +/// and [markers to the rescue](https://dk26.github.io/strict-path-rs/tutorial/chapter3_markers.html) +/// if you are not familiar with marker types + +struct SftpDir; + use std::fs; use std::os::unix::fs::PermissionsExt; use std::{fs::File, os::unix::fs::FileExt, path::Path}; @@ -96,7 +104,8 @@ impl PathFinder for PrivateDirHandle { /// A basic demo server. Used as a demo and to test SFTP functionality pub struct DemoSftpServer { - base_path: String, + base_path: StrictPath, + last_real_path: String, handles_manager: DemoFileHandleManager, } @@ -104,20 +113,44 @@ impl DemoSftpServer { pub fn new(base_path: String) -> Self { if !Path::new(&base_path).exists() { debug!("Base path {:?} does not exist. Creating it", base_path); - if let Err(err) = fs::create_dir_all(&base_path) { - error!("Could not create the base path {:?}: {:?}", base_path, err); - panic!(); - } + fs::create_dir_all(&base_path).unwrap_or_else(|e| { + panic!("Could not create the base path {:?}: {:?}", base_path, e); + }); } else { debug!("Base path {:?} already exists", base_path); } - DemoSftpServer { base_path, handles_manager: DemoFileHandleManager::new() } + + let base_path = StrictPath::::with_boundary(base_path) + .unwrap_or_else(|e| { + panic!("Could not create the base path {:?}", e); + }); + + let real_path = base_path.strictpath_display().to_string(); + + DemoSftpServer { + base_path, + last_real_path: real_path, + handles_manager: DemoFileHandleManager::new(), + } } } impl SftpServer for DemoSftpServer { async fn open(&mut self, filename: &str, mode: &PFlags) -> SftpOpResult { - debug!("Open file: filename = {:?}, mode = {:?}", filename, mode); + // Untrusted input: user upload, API param, config value, AI agent output, archive entry... + let Ok(validated_filename_path) = self.base_path.strict_join(filename) + else { + error!( + "Could not validate the filename {:?} with the protected path boundary {:?}", + filename, self.base_path + ); + return Err(StatusCode::SSH_FX_PERMISSION_DENIED); + }; + + debug!( + "Open file: filename = {:?}, mode = {:?}", + validated_filename_path, mode + ); let can_write = u32::from(mode) & u32::from(&PFlags::SSH_FXF_WRITE) > 0; let can_read = u32::from(mode) & u32::from(&PFlags::SSH_FXF_READ) > 0; @@ -127,12 +160,19 @@ impl SftpServer for DemoSftpServer SftpServer for DemoSftpServer SftpServer for DemoSftpServer SftpOpResult { info!("Open Directory = {:?}", dir); + // Untrusted input: user upload, API param, config value, AI agent output, archive entry... + let Ok(validated_dir_path) = self.base_path.strict_join(dir) else { + error!( + "Could not validate the directory {:?} with the protected path boundary {:?}", + dir, self.base_path + ); + return Err(StatusCode::SSH_FX_PERMISSION_DENIED); + }; let dir_handle = self.handles_manager.insert( PrivatePathHandle::Directory(PrivateDirHandle { - path: dir.into(), + path: validated_dir_path.strictpath_display().to_string(), read_status: ReadStatus::default(), }), OPAQUE_SALT, @@ -179,8 +227,16 @@ impl SftpServer for DemoSftpServer SftpOpResult> { info!("finding path for: {:?}", dir); + self.last_real_path = self.base_path.strict_join(dir) + .map_err(|err| { + error!("Could not validate the directory {:?} with the protected path boundary {:?}: {:?}", dir, self.base_path, err); + StatusCode::SSH_FX_PERMISSION_DENIED + })? + .strictpath_display() + .to_string(); + let name_entry = NameEntry { - filename: Filename::from(self.base_path.as_str()), + filename: Filename::from(self.last_real_path.as_str()), _longname: Filename::from(""), attrs: Attrs { size: None, From 283d4b4a0bf2a915997ba5aa0a463a157cf68b3d Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Wed, 22 Apr 2026 16:18:53 +1000 Subject: [PATCH 37/47] Simplifying sftp std example. No seeds + 32 bytes handle_id To provide better usage practices as recommended by Radically open Security, I have removed the "tiny_hash" and replaced it by an unpredictable 32 bytes array. All salt and seed mention removed. This should make --- Cargo.lock | 3 +-- demo/sftp/std/Cargo.toml | 5 ++-- demo/sftp/std/src/demofilehandlemanager.rs | 15 +++++------ demo/sftp/std/src/demoopaquefilehandle.rs | 30 ++++++++------------- demo/sftp/std/src/demosftpserver.rs | 31 ++++++++++------------ demo/sftp/std/src/main.rs | 16 +++++------ sftp/src/lib.rs | 2 +- sftp/src/opaquefilehandle.rs | 16 +++++------ 8 files changed, 48 insertions(+), 70 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70e22ba6..248e9bed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2159,6 +2159,7 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ + "libc", "rand_chacha", "rand_core", ] @@ -2852,12 +2853,10 @@ dependencies = [ "embassy-time", "embedded-io-async", "env_logger", - "fnv", "heapless", "libc", "log", "rand", - "sha2", "strict-path", "sunset", "sunset-async", diff --git a/demo/sftp/std/Cargo.toml b/demo/sftp/std/Cargo.toml index 78cfb946..5bfadc5e 100644 --- a/demo/sftp/std/Cargo.toml +++ b/demo/sftp/std/Cargo.toml @@ -49,8 +49,7 @@ async-io = "1.6.0" critical-section = "1.1" -rand = { version = "0.8", default-features = false, features = ["getrandom"] } -sha2 = { version = "0.10", default-features = false } -fnv = "1.0.7" + +rand = { version = "0.8", features = ["getrandom"] } strict-path = "0.2" diff --git a/demo/sftp/std/src/demofilehandlemanager.rs b/demo/sftp/std/src/demofilehandlemanager.rs index b1c649ba..adc7e6ff 100644 --- a/demo/sftp/std/src/demofilehandlemanager.rs +++ b/demo/sftp/std/src/demofilehandlemanager.rs @@ -1,5 +1,5 @@ use sunset_sftp::handles::{ - InitWithSeed, OpaqueFileHandle, OpaqueFileHandleManager, PathFinder, + InitFileHandler, OpaqueFileHandle, OpaqueFileHandleManager, PathFinder, }; use sunset_sftp::protocol::StatusCode; @@ -7,7 +7,7 @@ use std::collections::HashMap; // Not enforced. Only for std. For no_std environ pub struct DemoFileHandleManager where - K: OpaqueFileHandle + InitWithSeed, + K: OpaqueFileHandle + InitFileHandler, V: PathFinder, { handle_map: HashMap, @@ -15,7 +15,7 @@ where impl DemoFileHandleManager where - K: OpaqueFileHandle + InitWithSeed, + K: OpaqueFileHandle + InitFileHandler, V: PathFinder, { pub fn new() -> Self { @@ -25,12 +25,12 @@ where impl OpaqueFileHandleManager for DemoFileHandleManager where - K: OpaqueFileHandle + InitWithSeed, + K: OpaqueFileHandle + InitFileHandler, V: PathFinder, { type Err = StatusCode; - fn insert(&mut self, private_handle: V, salt: &str) -> Result { + fn insert(&mut self, private_handle: V) -> Result { if self .handle_map .iter() @@ -39,10 +39,7 @@ where return Err(StatusCode::SSH_FX_PERMISSION_DENIED); } - let handle = K::init_with_seed( - format!("{:}-{:}", &private_handle.get_path_ref(), salt).as_str(), - ) - .map_err(|_| StatusCode::SSH_FX_FAILURE)?; + let handle = K::init(); self.handle_map.insert(handle.clone(), private_handle); Ok(handle) diff --git a/demo/sftp/std/src/demoopaquefilehandle.rs b/demo/sftp/std/src/demoopaquefilehandle.rs index e5bad21b..124f8531 100644 --- a/demo/sftp/std/src/demoopaquefilehandle.rs +++ b/demo/sftp/std/src/demoopaquefilehandle.rs @@ -1,16 +1,14 @@ -use sunset_sftp::handles::{InitWithSeed, OpaqueFileHandle}; +use sunset_sftp::handles::{InitFileHandler, OpaqueFileHandle}; use sunset_sftp::protocol::FileHandle; use sunset::sshwire::{BinString, WireError}; -use core::hash::Hasher; +use rand::prelude::*; -use fnv::FnvHasher; - -const HASH_LEN: usize = 4; +const ID_LEN: usize = 32; #[derive(Debug, Hash, PartialEq, Eq, Clone)] pub(crate) struct DemoOpaqueFileHandle { - tiny_hash: [u8; HASH_LEN], + handle_id: [u8; ID_LEN], } impl OpaqueFileHandle for DemoOpaqueFileHandle { @@ -20,26 +18,20 @@ impl OpaqueFileHandle for DemoOpaqueFileHandle { return Err(WireError::BadString); } - let mut tiny_hash = [0u8; HASH_LEN]; + let mut tiny_hash = [0u8; ID_LEN]; tiny_hash.copy_from_slice(file_handle.0 .0); - Ok(DemoOpaqueFileHandle { tiny_hash }) + Ok(DemoOpaqueFileHandle { handle_id: tiny_hash }) } fn into_file_handle(&self) -> FileHandle<'_> { - FileHandle(BinString(&self.tiny_hash)) + FileHandle(BinString(&self.handle_id)) } } /// Implemented to allow the use of `DemoOpaqueFileHandle` as a key in the `OpaqueHandleManager` -impl InitWithSeed for DemoOpaqueFileHandle { - type Err = WireError; - - fn init_with_seed(seed: &str) -> Result { - let mut hasher = FnvHasher::default(); - hasher.write(seed.as_bytes()); - - Ok(DemoOpaqueFileHandle { - tiny_hash: (hasher.finish() as u32).to_be_bytes(), - }) +impl InitFileHandler for DemoOpaqueFileHandle { + fn init() -> Self { + let handle_id: [u8; ID_LEN] = rand::thread_rng().gen(); + DemoOpaqueFileHandle { handle_id } } } diff --git a/demo/sftp/std/src/demosftpserver.rs b/demo/sftp/std/src/demosftpserver.rs index a89f6908..b3da7d3f 100644 --- a/demo/sftp/std/src/demosftpserver.rs +++ b/demo/sftp/std/src/demosftpserver.rs @@ -2,7 +2,7 @@ use crate::demofilehandlemanager::DemoFileHandleManager; use sunset_sftp::error::SftpResult; use sunset_sftp::handles::{ - InitWithSeed, OpaqueFileHandle, OpaqueFileHandleManager, PathFinder, + InitFileHandler, OpaqueFileHandle, OpaqueFileHandleManager, PathFinder, }; use sunset_sftp::protocol::{Attrs, Filename, NameEntry, PFlags, StatusCode}; use sunset_sftp::server::helpers::DirEntriesCollection; @@ -47,9 +47,6 @@ pub(crate) struct PrivateDirHandle { read_status: ReadStatus, } -/// It is a better practice generating it on creation. Used to generate the opaque handles instead of using a constant -static OPAQUE_SALT: &'static str = "12d%32"; - impl PathFinder for PrivatePathHandle { fn matches(&self, path: &Self) -> bool { match self { @@ -103,13 +100,13 @@ impl PathFinder for PrivateDirHandle { } /// A basic demo server. Used as a demo and to test SFTP functionality -pub struct DemoSftpServer { +pub struct DemoSftpServer { base_path: StrictPath, last_real_path: String, handles_manager: DemoFileHandleManager, } -impl DemoSftpServer { +impl DemoSftpServer { pub fn new(base_path: String) -> Self { if !Path::new(&base_path).exists() { debug!("Base path {:?} does not exist. Creating it", base_path); @@ -135,7 +132,9 @@ impl DemoSftpServer { } } -impl SftpServer for DemoSftpServer { +impl SftpServer + for DemoSftpServer +{ async fn open(&mut self, filename: &str, mode: &PFlags) -> SftpOpResult { // Untrusted input: user upload, API param, config value, AI agent output, archive entry... let Ok(validated_filename_path) = self.base_path.strict_join(filename) @@ -181,14 +180,13 @@ impl SftpServer for DemoSftpServer SftpServer for DemoSftpServer, - 512, - >::new(&mut file_server, &mut request_buffer) + SftpHandler::<_, _, 512>::new( + &mut file_server, + &mut request_buffer, + ) .process_loop(stdio, &mut buffer_in) .await?; @@ -216,7 +214,7 @@ async fn listen( config: &'static SunsetMutex, ) -> ! { let demo = StdDemo::default(); - demo_common::listen(stack, config, &demo).await + sunset_demo_common::listen(stack, config, &demo).await } #[embassy_executor::main] diff --git a/sftp/src/lib.rs b/sftp/src/lib.rs index e7158172..0be32f13 100644 --- a/sftp/src/lib.rs +++ b/sftp/src/lib.rs @@ -100,7 +100,7 @@ pub mod server { /// Handles and helpers used by the [`sftpserver::SftpServer`] trait implementer pub mod handles { - pub use crate::opaquefilehandle::InitWithSeed; + pub use crate::opaquefilehandle::InitFileHandler; pub use crate::opaquefilehandle::OpaqueFileHandle; pub use crate::opaquefilehandle::OpaqueFileHandleManager; pub use crate::opaquefilehandle::PathFinder; diff --git a/sftp/src/opaquefilehandle.rs b/sftp/src/opaquefilehandle.rs index cde4aa7d..6603b831 100644 --- a/sftp/src/opaquefilehandle.rs +++ b/sftp/src/opaquefilehandle.rs @@ -25,14 +25,11 @@ pub trait PathFinder { fn get_path_ref(&self) -> &str; } -/// Used in the `OpaqueFileHandleManager` to generate a Key (OpaqueFileHandle) from a seed -pub trait InitWithSeed: Sized { - /// The error type used for the implementation of `init_with_seed` useful to harmonize the error handling of the `OpaqueFileHandleManager` implementation - type Err; - +/// Used in the `OpaqueFileHandleManager` to generate a Key (OpaqueFileHandle) +pub trait InitFileHandler: Sized { /// Creates a new instance using a given string slice as `seed` which /// content should not clearly related to the seed - fn init_with_seed(s: &str) -> Result; + fn init() -> Self; } /// This trait is used to manage the OpaqueFileHandles (K) together with the private handle (V) that contains the details of the file internally stored in the system @@ -45,7 +42,7 @@ pub trait InitWithSeed: Sized { /// to look for the file path. pub trait OpaqueFileHandleManager where - K: OpaqueFileHandle + InitWithSeed, + K: OpaqueFileHandle + InitFileHandler, V: PathFinder, { /// The error used for all the trait members returning an error @@ -54,9 +51,8 @@ where /// Given the private_handle, stores it and return an opaque file handle /// /// Returns an error if the private_handle has a matching path as obtained from `PathFinder` - /// - /// Salt has been added to allow the user to add a factor that will mask how the opaque handle is generated - fn insert(&mut self, private_handle: V, salt: &str) -> Result; + + fn insert(&mut self, private_handle: V) -> Result; /// fn remove(&mut self, opaque_handle: &K) -> Option; From 54573c8ce833fc9a469943a264318bbe99e505f5 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 23 Apr 2026 11:11:24 +1000 Subject: [PATCH 38/47] Fixing unclear naming in OpaqueFileHandle implementation @mkj thanks for catching this one. I believe that with this name adjustment there is no more references to hash or tiny in variables whiting the demo/sftp/std --- demo/sftp/std/src/demoopaquefilehandle.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/demo/sftp/std/src/demoopaquefilehandle.rs b/demo/sftp/std/src/demoopaquefilehandle.rs index 124f8531..f1ec44a5 100644 --- a/demo/sftp/std/src/demoopaquefilehandle.rs +++ b/demo/sftp/std/src/demoopaquefilehandle.rs @@ -18,9 +18,9 @@ impl OpaqueFileHandle for DemoOpaqueFileHandle { return Err(WireError::BadString); } - let mut tiny_hash = [0u8; ID_LEN]; - tiny_hash.copy_from_slice(file_handle.0 .0); - Ok(DemoOpaqueFileHandle { handle_id: tiny_hash }) + let mut handle_id = [0u8; ID_LEN]; + handle_id.copy_from_slice(file_handle.0 .0); + Ok(DemoOpaqueFileHandle { handle_id }) } fn into_file_handle(&self) -> FileHandle<'_> { From a10022b7edc22513dcd8fcd98af6ba2ac0c267e6 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 23 Apr 2026 11:26:40 +1000 Subject: [PATCH 39/47] Addressing potential integer overflow Thanks to @mkj for spotting and proposing a fix Co-authored-by: Copilot --- sftp/src/sftpsource.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sftp/src/sftpsource.rs b/sftp/src/sftpsource.rs index f0c025d7..640bf90d 100644 --- a/sftp/src/sftpsource.rs +++ b/sftp/src/sftpsource.rs @@ -20,7 +20,8 @@ pub struct SftpSource<'de> { impl<'de> SSHSource<'de> for SftpSource<'de> { fn take(&mut self, len: usize) -> sunset::sshwire::WireResult<&'de [u8]> { - if len + self.index > self.buffer.len() { + if len.checked_add(self.index).ok_or(WireError::RanOut)? > self.buffer.len() + { return Err(WireError::RanOut); } let original_index = self.index; From 903666c64fc10770ae65a0d357d6ba206df59e1d Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Wed, 29 Apr 2026 13:03:41 +1000 Subject: [PATCH 40/47] Refactoring sftphandler to extract ReadReply Creating a new module for the read replies and refactoring the SFTP read operations to use the new types. This is a step towards better separation of concerns and clearer code structure, especially around the handling of read operations in the SFTP server. This decluttering allows us to isolate the logic related to read replies, improving readability and maintainability. --- sftp/src/lib.rs | 6 +- sftp/src/sftphandler/mod.rs | 6 + .../sftphandler/sftpoutputchannelhandler.rs | 78 +++++-- sftp/src/sftpserver/mod.rs | 18 ++ sftp/src/sftpserver/readreplies.rs | 204 ++++++++++++++++++ sftp/src/{ => sftpserver}/sftpserver.rs | 2 - 6 files changed, 292 insertions(+), 22 deletions(-) create mode 100644 sftp/src/sftpserver/mod.rs create mode 100644 sftp/src/sftpserver/readreplies.rs rename sftp/src/{ => sftpserver}/sftpserver.rs (99%) diff --git a/sftp/src/lib.rs b/sftp/src/lib.rs index 0be32f13..4d61e324 100644 --- a/sftp/src/lib.rs +++ b/sftp/src/lib.rs @@ -26,8 +26,8 @@ //! - [x] [Open, close](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.3) //! and [write](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) //! - [x] Directory [Browsing](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.7) -//! - [x] File [read](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4), -//! - [] File [write](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) with conditions. See [Server Channel Window length is reduced to zero when long data is sent from server to client](https://github.com/mkj/sunset/issues/40), +//! - [ ] File [read](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4), with conditions. See [Server Channel Window length is reduced to zero when long data is sent from server to client](https://github.com/mkj/sunset/issues/40), +//! - [x] File [write](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) //! - [x] File [stats](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.8) //! //! ## Minimal features for convenient usability @@ -63,7 +63,7 @@ mod sftpsource; // Main calling point for the library provided that the user implements // a [`server::SftpServer`]. // -// Please see basic usage at `../demo/sftd/std` +// Please see basic usage at `../demo/sftp/std` pub use sftphandler::SftpHandler; /// Source of SFTP packets diff --git a/sftp/src/sftphandler/mod.rs b/sftp/src/sftphandler/mod.rs index 988cc09f..7e9d1797 100644 --- a/sftp/src/sftphandler/mod.rs +++ b/sftp/src/sftphandler/mod.rs @@ -4,3 +4,9 @@ mod sftpoutputchannelhandler; pub use sftphandler::SftpHandler; pub use sftpoutputchannelhandler::SftpOutputProducer; + +#[cfg(test)] +pub use sftpoutputchannelhandler::SftpOutputPipe; + +#[cfg(test)] +pub use sftpoutputchannelhandler::mock::MockWriter; diff --git a/sftp/src/sftphandler/sftpoutputchannelhandler.rs b/sftp/src/sftphandler/sftpoutputchannelhandler.rs index bf1f28d2..a4803965 100644 --- a/sftp/src/sftphandler/sftpoutputchannelhandler.rs +++ b/sftp/src/sftphandler/sftpoutputchannelhandler.rs @@ -2,10 +2,9 @@ use crate::error::{SftpError, SftpResult}; use crate::proto::{ReqId, SftpPacket, Status, StatusCode}; use crate::server::SftpSink; -use sunset_async::ChanOut; - use embassy_sync::pipe::{Pipe, Reader as PipeReader, Writer as PipeWriter}; -use embedded_io_async::Write; +use embedded_io_async::{ErrorType, Write}; +use sunset::Error as SunsetError; use sunset_async::SunsetRawMutex; #[cfg(debug_assertions)] @@ -47,16 +46,15 @@ impl SftpOutputPipe { /// output channel without mutable borrows. /// /// The [`SftpOutputConsumer`] needs to be running to write data to the - /// [`ChanOut`] + /// provided writer. /// /// ## Lifetimes - /// The lifetime indicates that the lifetime of self, ChanOut and the - /// consumer and producer are the same. I chose this because if the ChanOut - /// is closed, there is no point on having a pipe outliving it. - pub fn split<'a>( - &'a mut self, - ssh_chan_out: ChanOut<'a>, - ) -> SftpResult<(SftpOutputConsumer<'a, N>, SftpOutputProducer<'a, N>)> { + /// The lifetime `'a` ties the pipe reader (from `self`) to the consumer. + /// The writer `W` carries its own lifetime if needed (e.g. `ChanOut<'a>`). + pub fn split<'a, W>(&'a mut self, ssh_chan_out: W) -> SftpResult<(SftpOutputConsumer<'a, W, N>, SftpOutputProducer<'a, N>)> + where + W: Write + ErrorType, + { if self.split { return Err(SftpError::AlreadyInitialized); } @@ -78,22 +76,25 @@ impl SftpOutputPipe { } } -/// Consumer that takes ownership of [`ChanOut`]. It pipes the data received -/// from a [`PipeReader`] into the channel. +/// Consumer that pipes data received from a [`PipeReader`] into a writer +/// implementing [`Write`]. /// /// N is the length of the /// [PipeReader](https://docs.embassy.dev/embassy-sync/git/default/pipe/struct.Reader.html) /// buffer used to receive the data. -pub(crate) struct SftpOutputConsumer<'a, const N: usize> { +pub(crate) struct SftpOutputConsumer<'a, W, const N: usize> { pipe_reader: PipeReader<'a, SunsetRawMutex, N>, - /// The [sunset_async::ChanOut] where the channel data is written to - ssh_chan_out: ChanOut<'a>, + /// The writer where the channel data is written to + ssh_chan_out: W, /// Only used for debug purposes #[cfg(debug_assertions)] counter: usize, } -impl<'a, const N: usize> SftpOutputConsumer<'a, N> { +impl<'a, W, const N: usize> SftpOutputConsumer<'a, W, N> +where + W: Write + ErrorType, +{ /// Run it to start the piping pub async fn receive_task(&mut self) -> SftpResult<()> { debug!("Running SftpOutout Consumer Reader task"); @@ -233,3 +234,46 @@ impl<'a, const N: usize> SftpOutputProducer<'a, N> { } } } + +#[cfg(test)] +pub mod mock { + extern crate std; + use std::vec::Vec; + + use embedded_io_async::{ErrorType, Write}; + use sunset::Error as SunsetError; + + /// A mock writer that buffers all written bytes. + /// + /// Optionally injects a one-shot error on the next `write` call, + /// after which writes succeed again. + pub struct MockWriter { + pub buffer: Vec, + error: Option, + } + + impl MockWriter { + pub fn new() -> Self { + Self { buffer: Vec::new(), error: None } + } + + /// Pre-load an error that will be returned on the next `write` call. + pub fn inject_error(&mut self, e: SunsetError) { + self.error = Some(e); + } + } + + impl ErrorType for MockWriter { + type Error = SunsetError; + } + + impl Write for MockWriter { + async fn write(&mut self, buf: &[u8]) -> Result { + if let Some(e) = self.error.take() { + return Err(e); + } + self.buffer.extend_from_slice(buf); + Ok(buf.len()) + } + } +} diff --git a/sftp/src/sftpserver/mod.rs b/sftp/src/sftpserver/mod.rs new file mode 100644 index 00000000..a48e519e --- /dev/null +++ b/sftp/src/sftpserver/mod.rs @@ -0,0 +1,18 @@ +mod sftpserver; + +mod readreplies; + +pub use sftpserver::SftpServer; + +pub use sftpserver::ReadStatus; + +pub use sftpserver::DirReply; +pub use sftpserver::ReadReply; + +pub use sftpserver::SftpOpResult; +pub use sftpserver::helpers; + +#[cfg(feature = "std")] +pub use sftpserver::DirEntriesCollection; +#[cfg(feature = "std")] +pub use sftpserver::get_file_attrs; diff --git a/sftp/src/sftpserver/readreplies.rs b/sftp/src/sftpserver/readreplies.rs new file mode 100644 index 00000000..566a92c4 --- /dev/null +++ b/sftp/src/sftpserver/readreplies.rs @@ -0,0 +1,204 @@ +use crate::{ + error::{SftpError, SftpResult}, + proto::{ENCODED_SSH_FXP_DATA_MIN_LENGTH, ReqId, SftpNum}, + protocol::StatusCode, + server::{ReadReply, SftpSink}, + sftphandler::SftpOutputProducer, +}; + +use sunset::sshwire::SSHEncode; + +use log::{debug, error}; + +pub struct ReadReplyHeader<'g, const N: usize> { + /// The request Id that will be use`d in the response + req_id: ReqId, + /// Immutable writer + chan_out: &'g SftpOutputProducer<'g, N>, + /// Length of data to be sent as announced in [`ReadReply::send_header`] + data_len: u32, + /// Length of data sent so far using [`ReadReply::send_data`] + data_sent_len: u32, +} + +impl<'g, const N: usize> ReadReplyHeader<'g, N> { + pub(crate) fn new( + req_id: ReqId, + chan_out: &'g SftpOutputProducer<'g, N>, + ) -> Self { + Self { req_id, chan_out, data_len: 0, data_sent_len: 0 } + } + + pub async fn send_header( + &mut self, + data_len: u32, + ) -> SftpResult> { + debug!( + "ReadReply: Sending header for request id {:?}: data length = {:?}", + self.req_id, data_len + ); + let mut s = [0u8; N]; + let mut sink = SftpSink::new(&mut s); + + let payload = ReadReplyHeader::::encode_data_header( + &mut sink, + self.req_id, + data_len, + )?; + + debug!( + "Sending header: len = {:?}, content = {:?}", + payload.len(), + payload + ); + // Sending payload_slice since we are not making use of the sink sftpPacket length calculation + self.chan_out.send_data(payload).await?; + self.data_len = data_len; + Ok(ReadReplyData::new(self.req_id, self.chan_out)) + } + + pub async fn send_eof(&self) -> SftpResult> { + self.chan_out.send_status(self.req_id, StatusCode::SSH_FX_EOF, "").await?; + Ok(ReadReplyFinished::new(self.req_id, self.chan_out)) + } + + fn encode_data_header( + sink: &'g mut SftpSink<'g>, + req_id: ReqId, + data_len: u32, + ) -> Result<&'g [u8], SftpError> { + // length field + (data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH).enc(sink)?; + // packet type (1) + u8::from(SftpNum::SSH_FXP_DATA).enc(sink)?; + // request id (4) + req_id.enc(sink)?; + // data length (4) + data_len.enc(sink)?; + Ok(sink.payload_slice()) + } +} + +pub struct ReadReplyData<'g, const N: usize> { + /// The request Id that will be use`d in the response + req_id: ReqId, + /// Immutable writer + chan_out: &'g SftpOutputProducer<'g, N>, + /// Length of data to be sent as announced in [`ReadReply::send_header`] + data_len: u32, + /// Length of data sent so far using [`ReadReply::send_data`] + data_sent_len: u32, +} + +impl<'g, const N: usize> ReadReplyData<'g, N> { + pub(crate) fn new( + req_id: ReqId, + chan_out: &'g SftpOutputProducer<'g, N>, + ) -> Self { + Self { req_id, chan_out, data_len: 0, data_sent_len: 0 } + } + + pub async fn send_data( + &mut self, + buff: &[u8], + ) -> SftpResult> { + if buff.len() as u32 > (self.data_len - self.data_sent_len) { + error!( + "Trying to send more data than announced in the header: \ + data_len = {:?}, data_sent_len = {:?}, buff_len = {:?}", + self.data_len, + self.data_sent_len, + buff.len() + ); + return Err(SftpError::FileServerError(StatusCode::SSH_FX_FAILURE)); + } + + self.chan_out.send_data(buff).await?; + self.data_sent_len += buff.len() as u32; + + if self.data_len == self.data_sent_len { + Ok(ReadReplyDataResult::Finished(ReadReplyFinished::new( + self.req_id, + self.chan_out, + ))) + } else { + Ok(ReadReplyDataResult::MoreData) + } + } +} + +pub enum ReadReplyDataResult<'g, const N: usize> { + MoreData, + Finished(ReadReplyFinished<'g, N>), +} +pub struct ReadReplyFinished<'g, const N: usize> { + /// The request Id that will be use`d in the response + req_id: ReqId, + /// Immutable writer + chan_out: &'g SftpOutputProducer<'g, N>, +} + +impl<'g, const N: usize> ReadReplyFinished<'g, N> { + pub(crate) fn new( + req_id: ReqId, + chan_out: &'g SftpOutputProducer<'g, N>, + ) -> Self { + Self { req_id, chan_out: chan_out } + } +} + +#[cfg(test)] +mod enforcing_process_tests { + use crate::sftphandler::{MockWriter, SftpOutputPipe}; + + use super::*; + + #[cfg(test)] + extern crate std; + + #[test] + fn compose_header() { + const N: usize = 512; + + let req_id = ReqId(42); + let data_len = 128; + let mut buffer = [0u8; N]; + let mut sink = SftpSink::new(&mut buffer); + + let payload = + ReadReplyHeader::::encode_data_header(&mut sink, req_id, data_len) + .unwrap(); + + assert_eq!( + data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH, + u32::from_be_bytes(payload[..4].try_into().unwrap()) + ); + } + + #[test] + fn handling_process_eof() { + const N: usize = 512; + let mock_writer = MockWriter::new(); + let req_id = ReqId(42); + let mut buffer = [0u8; N]; + let mut output_pipe = SftpOutputPipe::::new(); + + let (consumer, producer) = + output_pipe.split(mock_writer).expect("Error splitting output_pipe"); + let mut header = ReadReplyHeader::new(req_id, &producer); + + // Run the consumer in the background to consume the data sent by the header and avoid deadlocks + let consumer_task = async { + consumer.receive_task().await.expect("Error running consumer task"); + }; + let producer_task = async { + header.send_eof().await.expect("Error sending EOF"); + }; + embassy_futures::block_on(async { + embassy_futures::select!( + _ = consumer_task => {}, + _ = producer_task => {}, + ); + }); + } +} diff --git a/sftp/src/sftpserver.rs b/sftp/src/sftpserver/sftpserver.rs similarity index 99% rename from sftp/src/sftpserver.rs rename to sftp/src/sftpserver/sftpserver.rs index c0912ff2..6c51add1 100644 --- a/sftp/src/sftpserver.rs +++ b/sftp/src/sftpserver/sftpserver.rs @@ -327,8 +327,6 @@ mod read_reply_tests { #[cfg(test)] extern crate std; - // #[cfg(test)] - // use std::println; #[test] fn compose_header() { From cff6b428184d913d8a881e2cb77f8fe6c05e7947 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Wed, 29 Apr 2026 14:21:19 +1000 Subject: [PATCH 41/47] Significant refactor of SFTP read operations This new structures enforce the correct sequence of operations for handling SFTP read requests, guiding the library user to provide the necessary information in simple types in the correct order. Other changes are related to the sftpoutputchannelhandler, where the OutChan has been replaced by a trait bound Write with an associated error type. This helps testing. --- demo/sftp/std/src/demosftpserver.rs | 165 ++++++----- sftp/src/lib.rs | 3 +- sftp/src/sftphandler/sftphandler.rs | 28 +- .../sftphandler/sftpoutputchannelhandler.rs | 38 ++- sftp/src/sftpserver/mod.rs | 3 +- sftp/src/sftpserver/readreplies.rs | 267 ++++++++++++------ sftp/src/sftpserver/sftpserver.rs | 131 +-------- 7 files changed, 318 insertions(+), 317 deletions(-) diff --git a/demo/sftp/std/src/demosftpserver.rs b/demo/sftp/std/src/demosftpserver.rs index b3da7d3f..60789066 100644 --- a/demo/sftp/std/src/demosftpserver.rs +++ b/demo/sftp/std/src/demosftpserver.rs @@ -7,7 +7,8 @@ use sunset_sftp::handles::{ use sunset_sftp::protocol::{Attrs, Filename, NameEntry, PFlags, StatusCode}; use sunset_sftp::server::helpers::DirEntriesCollection; use sunset_sftp::server::{ - DirReply, ReadReply, ReadStatus, SftpOpResult, SftpServer, + DirReply, ReadHeaderReply, ReadReplyFinished, ReadStatus, SftpOpResult, + SftpServer, }; #[allow(unused_imports)] @@ -25,7 +26,7 @@ use std::fs; use std::os::unix::fs::PermissionsExt; use std::{fs::File, os::unix::fs::FileExt, path::Path}; -// Used during read operations +/// Used during read operations const ARBITRARY_READ_BUFFER_LENGTH: usize = 1024; #[derive(Debug)] @@ -283,96 +284,106 @@ impl SftpServer opaque_file_handle: &OFH, offset: u64, len: u32, - reply: &mut ReadReply<'_, N>, - ) -> SftpResult<()> { - if let PrivatePathHandle::File(private_file_handle) = self + reply: ReadHeaderReply<'_, N>, + ) -> SftpResult { + let PrivatePathHandle::File(private_file_handle) = self .handles_manager .get_private_as_mut_ref(opaque_file_handle) .ok_or(StatusCode::SSH_FX_FAILURE)? - { - log::debug!( + else { + return Err(StatusCode::SSH_FX_PERMISSION_DENIED.into()); + }; + + log::debug!( "SftpServer Read operation: handle = {:?}, filepath = {:?}, offset = {:?}, len = {:?}", opaque_file_handle, private_file_handle.path, offset, len ); - let permissions_poxit = private_file_handle.permissions.unwrap_or(0o000); - if (permissions_poxit & 0o444) == 0 { - error!( - "No read permissions for file {:?}", - private_file_handle.path - ); - return Err(StatusCode::SSH_FX_PERMISSION_DENIED.into()); - }; - let file_len = private_file_handle - .file - .metadata() - .map_err(|err| { - error!("Could not read the file length: {:?}", err); - StatusCode::SSH_FX_FAILURE - })? - .len(); + let permissions_poxit = private_file_handle.permissions.unwrap_or(0o000); + if (permissions_poxit & 0o444) == 0 { + error!("No read permissions for file {:?}", private_file_handle.path); + return Err(StatusCode::SSH_FX_PERMISSION_DENIED.into()); + }; - if offset >= file_len { - info!( - "offset is larger than file length, sending EOF for {:?}", - private_file_handle.path - ); - reply.send_eof().await.map_err(|err| { - error!("Could not sent EOF: {:?}", err); - StatusCode::SSH_FX_FAILURE - })?; - return Ok(()); - } + let file_len = private_file_handle + .file + .metadata() + .map_err(|err| { + error!("Could not read the file length: {:?}", err); + StatusCode::SSH_FX_FAILURE + })? + .len(); - let read_len = if file_len >= len as u64 + offset { - len - } else { - debug!("Read operation: length + offset > file length. Clipping ( {:?} + {:?} > {:?})", - len, offset, file_len); - (file_len - offset).try_into().unwrap_or(u32::MAX) - }; + if offset >= file_len { + info!( + "offset is larger than file length, sending EOF for {:?}", + private_file_handle.path + ); + let finished = reply.send_eof().await.map_err(|err| { + error!("Could not sent EOF: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; + return Ok(finished); + } - reply.send_header(read_len).await?; - - let mut read_buff = [0u8; ARBITRARY_READ_BUFFER_LENGTH]; - - let mut running_offset = offset; - let mut remaining = read_len as usize; - - debug!("Starting reading loop: remaining = {}", remaining); - while remaining > 0 { - let next_read_len: usize = remaining.min(read_buff.len()); - trace!("next_read_len = {}", next_read_len); - let br = private_file_handle - .file - .read_at(&mut read_buff[..next_read_len], running_offset) - .map_err(|err| { - error!("read error: {:?}", err); - StatusCode::SSH_FX_FAILURE - })?; - trace!("{} bytes readed", br); - reply.send_data(&read_buff[..br.min(remaining)]).await?; - trace!("Read sent {} bytes", br.min(remaining)); - trace!("remaining {} bytes. {} byte read", remaining, br); - - remaining = - remaining.checked_sub(br).ok_or(StatusCode::SSH_FX_FAILURE)?; - trace!( - "after subtracting {} bytes, there are {} bytes remaining", - br, - remaining + let read_len = match file_len { + // Greater or equal than len + offset + file_len if file_len >= len as u64 + offset => { + debug!( + "File length ({:?}) is greater than offset + len ({:?} + {:?}). Will read the announced length", + file_len, offset, len ); - running_offset = running_offset - .checked_add(br as u64) - .ok_or(StatusCode::SSH_FX_FAILURE)?; + len } - debug!("Finished sending data"); - return Ok(()); - } - Err(StatusCode::SSH_FX_PERMISSION_DENIED.into()) + _ => { + debug!( + "File length ({:?}) is smaller than offset + len ({:?} + {:?}). Will read until the end of the file", + file_len, offset, len + ); + (file_len - offset).try_into().unwrap_or(u32::MAX) + } + }; + + let data_reply = reply.send_header(read_len).await?; + + let mut read_buff = [0u8; ARBITRARY_READ_BUFFER_LENGTH]; + + let mut accumulated_offset = offset; + + let finished = data_reply + .send_data(|limited_sender| async move { + loop { + match limited_sender.completed() { + Some(completed_token) => return Ok(completed_token), + None => { + let br = private_file_handle + .file + .read_at(&mut read_buff, accumulated_offset) + .map_err(|err| { + error!("read error: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; + if br == 0 { + error!( + "Unexpected EOF while reading the file {:?}", + private_file_handle.path + ); + return Err(StatusCode::SSH_FX_FAILURE)?; + } + let _sw = + limited_sender.send_data(&read_buff[..br]).await?; + accumulated_offset = accumulated_offset + .checked_add(br as u64) + .ok_or(StatusCode::SSH_FX_FAILURE)?; + } + } + } + }) + .await?; + return Ok(finished); } async fn write( diff --git a/sftp/src/lib.rs b/sftp/src/lib.rs index 4d61e324..a9185912 100644 --- a/sftp/src/lib.rs +++ b/sftp/src/lib.rs @@ -78,7 +78,8 @@ pub use sftpsource::SftpSource; pub mod server { pub use crate::sftpserver::DirReply; - pub use crate::sftpserver::ReadReply; + pub use crate::sftpserver::{ReadHeaderReply, ReadReplyFinished}; + pub use crate::sftpserver::ReadStatus; pub use crate::sftpserver::SftpOpResult; pub use crate::sftpserver::SftpServer; diff --git a/sftp/src/sftphandler/sftphandler.rs b/sftp/src/sftphandler/sftphandler.rs index 97f7c55a..027fc230 100644 --- a/sftp/src/sftphandler/sftphandler.rs +++ b/sftp/src/sftphandler/sftphandler.rs @@ -4,13 +4,13 @@ use crate::proto::{ self, InitVersionClient, InitVersionLowest, LStat, MAX_REQUEST_LEN, ReqId, SFTP_VERSION, SftpNum, SftpPacket, Stat, StatusCode, }; -use crate::server::{DirReply, ReadReply}; +use crate::server::DirReply; use crate::sftperror::SftpResult; use crate::sftphandler::requestholder::{RequestHolder, RequestHolderError}; use crate::sftphandler::sftpoutputchannelhandler::{ SftpOutputPipe, SftpOutputProducer, }; -use crate::sftpserver::SftpServer; +use crate::sftpserver::{ReadHeaderReply, SftpServer}; use crate::sftpsource::SftpSource; use embassy_futures::select::select; @@ -428,15 +428,15 @@ where SftpPacket::Read(req_id, ref read) => { debug!("Read request: {:?}", request); - let mut reply = - ReadReply::new(req_id, output_producer); + let reply = + ReadHeaderReply::new(req_id, output_producer); if let Err(error) = self .file_server .read( &T::try_from(&read.handle)?, read.offset, read.len, - &mut reply, + reply, ) .await { @@ -461,24 +461,6 @@ where } }; - match reply.read_diff() { - diff if diff > 0 => { - debug!( - "ReadReply not completed after read operation. Still need to send {} bytes", - diff - ); - return Err(SunsetError::Bug.into()); - } - diff if diff < 0 => { - error!( - "ReadReply has sent more data than announced: {} bytes extra", - -diff - ); - return Err(SunsetError::Bug.into()); - } - _ => {} - } - self.state = HandlerState::Idle; } SftpPacket::LStat(req_id, LStat { file_path: path }) => { diff --git a/sftp/src/sftphandler/sftpoutputchannelhandler.rs b/sftp/src/sftphandler/sftpoutputchannelhandler.rs index a4803965..72f5552d 100644 --- a/sftp/src/sftphandler/sftpoutputchannelhandler.rs +++ b/sftp/src/sftphandler/sftpoutputchannelhandler.rs @@ -51,7 +51,10 @@ impl SftpOutputPipe { /// ## Lifetimes /// The lifetime `'a` ties the pipe reader (from `self`) to the consumer. /// The writer `W` carries its own lifetime if needed (e.g. `ChanOut<'a>`). - pub fn split<'a, W>(&'a mut self, ssh_chan_out: W) -> SftpResult<(SftpOutputConsumer<'a, W, N>, SftpOutputProducer<'a, N>)> + pub fn split<'a, W>( + &'a mut self, + ssh_chan_out: W, + ) -> SftpResult<(SftpOutputConsumer<'a, W, N>, SftpOutputProducer<'a, N>)> where W: Write + ErrorType, { @@ -138,6 +141,33 @@ where debug!("Output Consumer: Finished writing all bytes in read buffer"); } } + + /// Consumes the consumer and returns the underlying writer. + /// Useful in tests to inspect what was written. + #[cfg(test)] + pub fn into_inner(self) -> W { + self.ssh_chan_out + } + + /// Reads one batch of bytes from the pipe and writes them to the inner + /// writer. Unlike [`receive_task`], this does not loop — it returns after + /// a single read, so it won't block waiting for more data. + /// + /// Intended for tests only. + #[cfg(test)] + pub async fn receive_once(&mut self) -> SftpResult<()> { + let mut buf = [0u8; N]; + let rl = self.pipe_reader.read(&mut buf).await; + if rl == 0 { + return Ok(()); + } + let mut scanning_buffer = &buf[..rl]; + while scanning_buffer.len() > 0 { + let wl = self.ssh_chan_out.write(scanning_buffer).await?; + scanning_buffer = &scanning_buffer[wl..]; + } + Ok(()) + } } /// Producer used to send data to a [`ChanOut`] without the restrictions @@ -146,7 +176,6 @@ where /// Under the hood it uses an /// [embassy_sync Pipe](https://docs.embassy.dev/embassy-sync/git/default/pipe/struct.Pipe.html) /// where N is the pipe buffer length in bytes -#[derive(Clone)] pub struct SftpOutputProducer<'a, const N: usize> { writer: PipeWriter<'a, SunsetRawMutex, N>, #[cfg(debug_assertions)] @@ -256,11 +285,6 @@ pub mod mock { pub fn new() -> Self { Self { buffer: Vec::new(), error: None } } - - /// Pre-load an error that will be returned on the next `write` call. - pub fn inject_error(&mut self, e: SunsetError) { - self.error = Some(e); - } } impl ErrorType for MockWriter { diff --git a/sftp/src/sftpserver/mod.rs b/sftp/src/sftpserver/mod.rs index a48e519e..ff0a6add 100644 --- a/sftp/src/sftpserver/mod.rs +++ b/sftp/src/sftpserver/mod.rs @@ -7,7 +7,6 @@ pub use sftpserver::SftpServer; pub use sftpserver::ReadStatus; pub use sftpserver::DirReply; -pub use sftpserver::ReadReply; pub use sftpserver::SftpOpResult; pub use sftpserver::helpers; @@ -16,3 +15,5 @@ pub use sftpserver::helpers; pub use sftpserver::DirEntriesCollection; #[cfg(feature = "std")] pub use sftpserver::get_file_attrs; + +pub use readreplies::{ReadHeaderReply, ReadReplyFinished}; diff --git a/sftp/src/sftpserver/readreplies.rs b/sftp/src/sftpserver/readreplies.rs index 566a92c4..856d065f 100644 --- a/sftp/src/sftpserver/readreplies.rs +++ b/sftp/src/sftpserver/readreplies.rs @@ -2,37 +2,44 @@ use crate::{ error::{SftpError, SftpResult}, proto::{ENCODED_SSH_FXP_DATA_MIN_LENGTH, ReqId, SftpNum}, protocol::StatusCode, - server::{ReadReply, SftpSink}, + server::SftpSink, sftphandler::SftpOutputProducer, }; use sunset::sshwire::SSHEncode; -use log::{debug, error}; +use log::debug; -pub struct ReadReplyHeader<'g, const N: usize> { - /// The request Id that will be use`d in the response +/// Used to provide the header of a read reply, including +/// only the fundamental information such as being an EOF reply or a data reply +/// +/// On the corresponding method call will return either a [`ReadDataReply`] or a [`ReadReplyFinished`] +/// which makes easy to implement correct behavior. +pub struct ReadHeaderReply<'g, const N: usize> { + /// The request Id that will be used in the response req_id: ReqId, /// Immutable writer chan_out: &'g SftpOutputProducer<'g, N>, - /// Length of data to be sent as announced in [`ReadReply::send_header`] - data_len: u32, - /// Length of data sent so far using [`ReadReply::send_data`] - data_sent_len: u32, } -impl<'g, const N: usize> ReadReplyHeader<'g, N> { +impl<'g, const N: usize> ReadHeaderReply<'g, N> { + /// Creates a new ReadHeaderReply with the given request ID and output channel. + /// + /// It is meant to be called in [`SftpHandler`] and used to call a method of the [`SftpServer`] that requires a read reply header, such as [`SftpServer::read`] pub(crate) fn new( req_id: ReqId, chan_out: &'g SftpOutputProducer<'g, N>, ) -> Self { - Self { req_id, chan_out, data_len: 0, data_sent_len: 0 } + Self { req_id, chan_out } } + /// Sends the header for a read reply with the given data length. + /// + /// Once used, the only way to obtain a [`ReadReplyFinished`] is by using its returned value. pub async fn send_header( - &mut self, + self, data_len: u32, - ) -> SftpResult> { + ) -> SftpResult> { debug!( "ReadReply: Sending header for request id {:?}: data length = {:?}", self.req_id, data_len @@ -40,7 +47,7 @@ impl<'g, const N: usize> ReadReplyHeader<'g, N> { let mut s = [0u8; N]; let mut sink = SftpSink::new(&mut s); - let payload = ReadReplyHeader::::encode_data_header( + let payload = ReadHeaderReply::::encode_data_header( &mut sink, self.req_id, data_len, @@ -53,13 +60,16 @@ impl<'g, const N: usize> ReadReplyHeader<'g, N> { ); // Sending payload_slice since we are not making use of the sink sftpPacket length calculation self.chan_out.send_data(payload).await?; - self.data_len = data_len; - Ok(ReadReplyData::new(self.req_id, self.chan_out)) + + Ok(ReadDataReply::new(self.req_id, data_len, self.chan_out)) } - pub async fn send_eof(&self) -> SftpResult> { + /// Sends an EOF status response for the read request. + /// + /// It will return a [`ReadReplyFinished`] that can be used to represent the state of the successful read reply. + pub async fn send_eof(&self) -> SftpResult { self.chan_out.send_status(self.req_id, StatusCode::SSH_FX_EOF, "").await?; - Ok(ReadReplyFinished::new(self.req_id, self.chan_out)) + Ok(ReadReplyFinished::new(self.req_id)) } fn encode_data_header( @@ -79,71 +89,99 @@ impl<'g, const N: usize> ReadReplyHeader<'g, N> { } } -pub struct ReadReplyData<'g, const N: usize> { - /// The request Id that will be use`d in the response - req_id: ReqId, - /// Immutable writer +/// Helper struct to manage the sending of data in a read reply, ensuring that +/// no more than the announced data length is sent. +/// +/// It is used as an argument in the closure passed to [`ReadDataReply::send_data`] +/// and it is meant to be used by the user to send the data of a read reply in chunks, +/// without having to worry about sending more data than the announced length. +pub struct LimitedSender<'g, const N: usize> { chan_out: &'g SftpOutputProducer<'g, N>, - /// Length of data to be sent as announced in [`ReadReply::send_header`] - data_len: u32, - /// Length of data sent so far using [`ReadReply::send_data`] - data_sent_len: u32, + remaining: core::cell::Cell, } -impl<'g, const N: usize> ReadReplyData<'g, N> { - pub(crate) fn new( - req_id: ReqId, - chan_out: &'g SftpOutputProducer<'g, N>, - ) -> Self { - Self { req_id, chan_out, data_len: 0, data_sent_len: 0 } +impl<'g, const N: usize> LimitedSender<'g, N> { + fn new(chan_out: &'g SftpOutputProducer<'g, N>, limit: u32) -> Self { + Self { chan_out, remaining: core::cell::Cell::new(limit) } } + /// Sends a chunk of data, ensuring that no more than the announced data length is sent. + /// + /// It returns the remaining data length that can be sent after this call. + pub async fn send_data(&self, buff: &[u8]) -> SftpResult { + let mut remaining = self.remaining.get(); - pub async fn send_data( - &mut self, - buff: &[u8], - ) -> SftpResult> { - if buff.len() as u32 > (self.data_len - self.data_sent_len) { - error!( - "Trying to send more data than announced in the header: \ - data_len = {:?}, data_sent_len = {:?}, buff_len = {:?}", - self.data_len, - self.data_sent_len, - buff.len() - ); - return Err(SftpError::FileServerError(StatusCode::SSH_FX_FAILURE)); - } - - self.chan_out.send_data(buff).await?; - self.data_sent_len += buff.len() as u32; - - if self.data_len == self.data_sent_len { - Ok(ReadReplyDataResult::Finished(ReadReplyFinished::new( - self.req_id, - self.chan_out, - ))) - } else { - Ok(ReadReplyDataResult::MoreData) - } + let length_to_send = remaining.min(buff.len() as u32); + self.chan_out.send_data(&buff[..length_to_send as usize]).await?; + remaining -= length_to_send; + self.remaining.set(remaining); + Ok(remaining) + } + + fn is_complete(&self) -> bool { + self.remaining.get() == 0 } -} -pub enum ReadReplyDataResult<'g, const N: usize> { - MoreData, - Finished(ReadReplyFinished<'g, N>), + /// Obtains a [`CompletedDataSent`] if the announced data length has been completely sent, otherwise returns None. + pub fn completed(&self) -> Option { + if self.is_complete() { Some(CompletedDataSent) } else { None } + } } -pub struct ReadReplyFinished<'g, const N: usize> { - /// The request Id that will be use`d in the response + +/// A marker struct to represent the completion of the data sending in a read reply +pub struct CompletedDataSent; + +/// This struct is used to represent the state of a read reply after the header has been sent +/// but before the data has been completely sent or an EOF has been sent +pub struct ReadDataReply<'g, const N: usize> { + /// The request Id that will be used in the response req_id: ReqId, /// Immutable writer chan_out: &'g SftpOutputProducer<'g, N>, + /// Length of data to be sent as announced in [`ReadHeaderReply::send_header`] + data_len: u32, } -impl<'g, const N: usize> ReadReplyFinished<'g, N> { +impl<'g, const N: usize> ReadDataReply<'g, N> { pub(crate) fn new( req_id: ReqId, + data_len: u32, chan_out: &'g SftpOutputProducer<'g, N>, ) -> Self { - Self { req_id, chan_out: chan_out } + Self { req_id, chan_out, data_len } + } + + /// Sends a chunk of data, ensuring that no more than the announced data length is sent. + /// It provides a closure-based API where the user can send multiple chunks of data until the announced data length is reached. + /// + /// It can only be called once, since it consumes self, and it returns a [`ReadReplyFinished`] + /// that can be used to represent the state of the successful read reply. + pub async fn send_data(self, f: F) -> SftpResult + where + F: FnOnce(LimitedSender<'g, N>) -> Fut, + Fut: core::future::Future>, + { + let sender = LimitedSender::new(self.chan_out, self.data_len); + f(sender).await?; + + // sender is consumed by f, but we need to check remaining + // Instead pass by ref: + // see below + Ok(ReadReplyFinished::new(self.req_id)) + } +} + +/// This struct is used to represent the state of a read reply after +/// the header and the data have been sent +/// +/// It is used as a mandatory return value for a successful [`SftpServer::read`] +/// operation +pub struct ReadReplyFinished { + _req_id: ReqId, +} + +impl ReadReplyFinished { + pub(crate) fn new(_req_id: ReqId) -> Self { + Self { _req_id } } } @@ -153,7 +191,6 @@ mod enforcing_process_tests { use super::*; - #[cfg(test)] extern crate std; #[test] @@ -166,7 +203,7 @@ mod enforcing_process_tests { let mut sink = SftpSink::new(&mut buffer); let payload = - ReadReplyHeader::::encode_data_header(&mut sink, req_id, data_len) + ReadHeaderReply::::encode_data_header(&mut sink, req_id, data_len) .unwrap(); assert_eq!( @@ -178,27 +215,89 @@ mod enforcing_process_tests { #[test] fn handling_process_eof() { const N: usize = 512; - let mock_writer = MockWriter::new(); + + let req_id = ReqId(42); + let mut output_pipe = SftpOutputPipe::::new(); + let mock = MockWriter::new(); + let (mut consumer, producer) = + output_pipe.split(mock).expect("split should succeed"); + + embassy_futures::block_on(async { + { + let header_reply = ReadHeaderReply::::new(req_id, &producer); + let _finished = header_reply + .send_eof() + .await + .expect("send_eof should succeed returning ReadReplyFinished"); + } + drop(producer); + // Read exactly the one packet written by send_eof; does not loop. + consumer.receive_once().await.unwrap(); + }); + + // SSH_FXP_STATUS (101) packet for SSH_FX_EOF (1) with req_id 42: + // [len:4][type:1=101][req_id:4][code:4][msg_len:4][msg][lang_len:4][lang] + let mock = consumer.into_inner(); + let buf = &mock.buffer; + // packet type byte should be 101 (SSH_FXP_STATUS) + assert_eq!(buf[4], 101, "expected SSH_FXP_STATUS packet type"); + // status code should be 1 (SSH_FX_EOF) + let code = u32::from_be_bytes(buf[9..13].try_into().unwrap()); + assert_eq!(code, 1, "expected SSH_FX_EOF status code"); + } + #[test] + fn handling_process_data() { + const N: usize = 512; + let req_id = ReqId(42); - let mut buffer = [0u8; N]; let mut output_pipe = SftpOutputPipe::::new(); + let mock = MockWriter::new(); + let (mut consumer, producer) = + output_pipe.split(mock).expect("split should succeed"); - let (consumer, producer) = - output_pipe.split(mock_writer).expect("Error splitting output_pipe"); - let mut header = ReadReplyHeader::new(req_id, &producer); - - // Run the consumer in the background to consume the data sent by the header and avoid deadlocks - let consumer_task = async { - consumer.receive_task().await.expect("Error running consumer task"); - }; - let producer_task = async { - header.send_eof().await.expect("Error sending EOF"); - }; embassy_futures::block_on(async { - embassy_futures::select!( - _ = consumer_task => {}, - _ = producer_task => {}, - ); + { + let header_reply = ReadHeaderReply::::new(req_id, &producer); + + let data_reply = header_reply + .send_header(10) + .await + .expect("send_eof should succeed returning ReadReplyData"); + + let _read_reply_finished = data_reply + .send_data(|limited_sender| { + // Simulate sending data in two chunks, first 5 bytes then the remaining 5 bytes + async move { + while !limited_sender.is_complete() { + limited_sender.send_data(&[0u8; 3]).await?; + } + Ok(CompletedDataSent) + } + }) + .await + .expect("send_data should succeed returning ReadReplyFinished"); + } + drop(producer); + // Read exactly the one packet written by send_eof; does not loop. + consumer.receive_once().await.expect("receive_once should succeed"); }); + + let mock = consumer.into_inner(); + let buf = &mock.buffer; + // packet type byte should be 103 (SSH_FXP_DATA) + assert_eq!(buf[4], 103, "expected SSH_FXP_DATA packet type"); + + // data length should be 10 + let data_len = u32::from_be_bytes( + buf[9..13] + .try_into() + .expect("data length should be present in the packet"), + ); + assert_eq!(data_len, 10, "expected data length of 10 bytes"); + assert_eq!( + buf.len(), + 13 + 10, + "expected packet length to be header (13 bytes) + data (10 bytes)" + ); } } diff --git a/sftp/src/sftpserver/sftpserver.rs b/sftp/src/sftpserver/sftpserver.rs index 6c51add1..70c31a90 100644 --- a/sftp/src/sftpserver/sftpserver.rs +++ b/sftp/src/sftpserver/sftpserver.rs @@ -1,10 +1,11 @@ use crate::error::{SftpError, SftpResult}; use crate::proto::{ - ENCODED_BASE_NAME_SFTP_PACKET_LENGTH, ENCODED_SSH_FXP_DATA_MIN_LENGTH, - MAX_NAME_ENTRY_SIZE, NameEntry, PFlags, SftpNum, + ENCODED_BASE_NAME_SFTP_PACKET_LENGTH, MAX_NAME_ENTRY_SIZE, NameEntry, PFlags, + SftpNum, }; use crate::server::SftpSink; use crate::sftphandler::SftpOutputProducer; +use crate::sftpserver::{ReadHeaderReply, ReadReplyFinished}; use crate::{ handles::OpaqueFileHandle, proto::{Attrs, ReqId, StatusCode}, @@ -101,8 +102,8 @@ where opaque_file_handle: &T, offset: u64, len: u32, - reply: &mut ReadReply<'_, N>, - ) -> impl core::future::Future> { + reply: ReadHeaderReply<'_, N>, + ) -> impl core::future::Future> { async move { log::error!( "SftpServer Read operation not defined: handle = {:?}, offset = {:?}, len = {:?}", @@ -228,124 +229,6 @@ where /// /// Parameter N is the length for the SftpOutputProducer buffer /// -pub struct ReadReply<'g, const N: usize> { - /// The request Id that will be use`d in the response - req_id: ReqId, - - /// Immutable writer - chan_out: &'g SftpOutputProducer<'g, N>, - /// Length of data to be sent as announced in [`ReadReply::send_header`] - data_len: u32, - /// Length of data sent so far using [`ReadReply::send_data`] - data_sent_len: u32, -} - -impl<'g, const N: usize> ReadReply<'g, N> { - /// New instances can only be created within the crate. Users can only - /// use other public methods to use it. - pub(crate) fn new( - req_id: ReqId, - chan_out: &'g SftpOutputProducer<'g, N>, - ) -> Self { - ReadReply { req_id, chan_out, data_len: 0, data_sent_len: 0 } - } - - // TODO Make this enforceable - // TODO Automate encoding the SftpPacket - /// Sends a header for `SSH_FXP_DATA` response. This includes the total - /// response length, the packet type, request id and data length - /// - /// The packet data content, excluding the length must be sent using - /// [`ReadReply::send_data`] - pub async fn send_header(&mut self, data_len: u32) -> SftpResult<()> { - debug!( - "ReadReply: Sending header for request id {:?}: data length = {:?}", - self.req_id, data_len - ); - let mut s = [0u8; N]; - let mut sink = SftpSink::new(&mut s); - - let payload = - ReadReply::::encode_data_header(&mut sink, self.req_id, data_len)?; - - debug!( - "Sending header: len = {:?}, content = {:?}", - payload.len(), - payload - ); - // Sending payload_slice since we are not making use of the sink sftpPacket length calculation - self.chan_out.send_data(payload).await?; - self.data_len = data_len; - Ok(()) - } - - /// Sends a buffer with data. Call it as many times as needed to send - /// the announced data length - /// - /// **Important**: Call this after you have called `send_header` - pub async fn send_data(&mut self, buff: &[u8]) -> SftpResult<()> { - self.chan_out.send_data(buff).await?; - self.data_sent_len += buff.len() as u32; - Ok(()) - } - - /// Sends EOF meaning that there is no more data to be sent - /// - pub async fn send_eof(&self) -> SftpResult<()> { - self.chan_out.send_status(self.req_id, StatusCode::SSH_FX_EOF, "").await - } - - /// Indicates whether all the data announced in the header has been sent - /// - /// returns 0 when all data has been sent - /// returns >0 when there is still data to be sent - /// returns <0 when too much data has been sent - pub fn read_diff(&self) -> i32 { - (self.data_len as i32) - (self.data_sent_len as i32) - } - - fn encode_data_header( - sink: &'g mut SftpSink<'g>, - req_id: ReqId, - data_len: u32, - ) -> Result<&'g [u8], SftpError> { - // length field - (data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH).enc(sink)?; - // packet type (1) - u8::from(SftpNum::SSH_FXP_DATA).enc(sink)?; - // request id (4) - req_id.enc(sink)?; - // data length (4) - data_len.enc(sink)?; - Ok(sink.payload_slice()) - } -} - -#[cfg(test)] -mod read_reply_tests { - use super::*; - - #[cfg(test)] - extern crate std; - - #[test] - fn compose_header() { - const N: usize = 512; - - let req_id = ReqId(42); - let data_len = 128; - let mut buffer = [0u8; N]; - let mut sink = SftpSink::new(&mut buffer); - - let payload = - ReadReply::::encode_data_header(&mut sink, req_id, data_len).unwrap(); - - assert_eq!( - data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH, - u32::from_be_bytes(payload[..4].try_into().unwrap()) - ); - } -} /// Uses for [`DirReply`] to: /// @@ -367,9 +250,9 @@ pub struct DirReply<'g, const N: usize> { req_id: ReqId, /// Immutable writer chan_out: &'g SftpOutputProducer<'g, N>, - /// Length of data to be sent as announced in [`ReadReply::send_header`] + /// Length of data to be sent as announced in [`DirReply::send_header`] data_len: u32, - /// Length of data sent so far using [`ReadReply::send_data`] + /// Length of data sent so far using [`DirReply::send_data`] data_sent_len: u32, } From ce07e39ae749891433993a08abdbdc1244dbe268 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 30 Apr 2026 07:18:37 +1000 Subject: [PATCH 42/47] Removing dead docs & Minimal rearrangement of methods --- sftp/src/sftpserver/readreplies.rs | 11 ++++------- sftp/src/sftpserver/sftpserver.rs | 18 ------------------ 2 files changed, 4 insertions(+), 25 deletions(-) diff --git a/sftp/src/sftpserver/readreplies.rs b/sftp/src/sftpserver/readreplies.rs index 856d065f..09aa187c 100644 --- a/sftp/src/sftpserver/readreplies.rs +++ b/sftp/src/sftpserver/readreplies.rs @@ -117,14 +117,14 @@ impl<'g, const N: usize> LimitedSender<'g, N> { Ok(remaining) } - fn is_complete(&self) -> bool { - self.remaining.get() == 0 - } - /// Obtains a [`CompletedDataSent`] if the announced data length has been completely sent, otherwise returns None. pub fn completed(&self) -> Option { if self.is_complete() { Some(CompletedDataSent) } else { None } } + + fn is_complete(&self) -> bool { + self.remaining.get() == 0 + } } /// A marker struct to represent the completion of the data sending in a read reply @@ -163,9 +163,6 @@ impl<'g, const N: usize> ReadDataReply<'g, N> { let sender = LimitedSender::new(self.chan_out, self.data_len); f(sender).await?; - // sender is consumed by f, but we need to check remaining - // Instead pass by ref: - // see below Ok(ReadReplyFinished::new(self.req_id)) } } diff --git a/sftp/src/sftpserver/sftpserver.rs b/sftp/src/sftpserver/sftpserver.rs index 70c31a90..9c225c34 100644 --- a/sftp/src/sftpserver/sftpserver.rs +++ b/sftp/src/sftpserver/sftpserver.rs @@ -212,24 +212,6 @@ where } } -/// A reference structure passed to the [`SftpServer::read()`] method to -/// allow replying with the read data. -/// Uses for [`ReadReply`] to: -/// -/// - In case of no more data avaliable to be sent, call `reply.send_eof()` -/// - There is data to be sent from an open file: -/// 1. Call `reply.send_header()` with the length of data to be sent -/// 2. Call `reply.send_data()` as many times as needed to complete a -/// sent of data of the announced length -/// 3. Do not call `reply.send_eof()` during this [`read`] method call -/// -/// It handles immutable sending data via the underlying sftp-channel -/// [`sunset_async::async_channel::ChanOut`] used in the context of an -/// SFTP Session. -/// -/// Parameter N is the length for the SftpOutputProducer buffer -/// - /// Uses for [`DirReply`] to: /// /// - In case of no more items in the directory to be sent, call `reply.send_eof()` From 28cf46cef773897f109f47e93b31926f8b3926a0 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 30 Apr 2026 07:26:02 +1000 Subject: [PATCH 43/47] Adding readdirreplies.rs to guide response composition for readdir requests - Base structure for readdir replies, similar to readreplies.rs - Added a test to demonstrate how the structure can be used to compose a readdir response - Refactoring helpers - Cleaning comments --- sftp/src/sftpserver/mod.rs | 1 + sftp/src/sftpserver/readdirreplies.rs | 509 ++++++++++++++++++++++++++ sftp/src/sftpserver/readreplies.rs | 13 +- 3 files changed, 516 insertions(+), 7 deletions(-) create mode 100644 sftp/src/sftpserver/readdirreplies.rs diff --git a/sftp/src/sftpserver/mod.rs b/sftp/src/sftpserver/mod.rs index ff0a6add..96f1ddfc 100644 --- a/sftp/src/sftpserver/mod.rs +++ b/sftp/src/sftpserver/mod.rs @@ -1,5 +1,6 @@ mod sftpserver; +mod readdirreplies; mod readreplies; pub use sftpserver::SftpServer; diff --git a/sftp/src/sftpserver/readdirreplies.rs b/sftp/src/sftpserver/readdirreplies.rs new file mode 100644 index 00000000..fe517f46 --- /dev/null +++ b/sftp/src/sftpserver/readdirreplies.rs @@ -0,0 +1,509 @@ +use crate::{ + error::{SftpError, SftpResult}, + proto::{ + ENCODED_BASE_NAME_SFTP_PACKET_LENGTH, ENCODED_SSH_FXP_DATA_MIN_LENGTH, + MAX_NAME_ENTRY_SIZE, NameEntry, ReqId, SftpNum, + }, + protocol::StatusCode, + server::SftpSink, + sftphandler::SftpOutputProducer, +}; + +use sunset::sshwire::SSHEncode; + +use log::{debug, error}; + +pub struct DirReadHeaderReply<'g, const N: usize> { + /// The request Id that will be use`d in the response + req_id: ReqId, + /// Immutable writer + chan_out: &'g SftpOutputProducer<'g, N>, +} + +impl<'g, const N: usize> DirReadHeaderReply<'g, N> { + /// Creates a new DirReadHeaderReply with the given request ID and output channel. + /// + /// It is meant to be called in [`SftpHandler`] and used to call a method of the [`SftpServer`] that requires a read reply header, such as [`SftpServer::readdir`] + pub(crate) fn new( + req_id: ReqId, + chan_out: &'g SftpOutputProducer<'g, N>, + ) -> Self { + Self { req_id, chan_out } + } + + /// Sends the header for a read reply with the given data length. + /// + /// Once used, the only way to obtain a [`DirReadReplyFinished`] is by using its returned value. + pub async fn send_header( + self, + data_len: u32, + ) -> SftpResult> { + debug!( + "DirReadReply: Sending header for request id {:?}: data length = {:?}", + self.req_id, data_len + ); + let mut s = [0u8; N]; + let mut sink = SftpSink::new(&mut s); + + let payload = DirReadHeaderReply::::encode_data_header( + &mut sink, + self.req_id, + data_len, + )?; + + debug!( + "Sending header: len = {:?}, content = {:?}", + payload.len(), + payload + ); + // Sending payload_slice since we are not making use of the sink sftpPacket length calculation + self.chan_out.send_data(payload).await?; + + Ok(DirReadDataReply::new(self.req_id, data_len, self.chan_out)) + } + + /// Sends an EOF status response for the read request. + /// + /// It will return a [`DirReadReplyFinished`] that can be used to represent the state of the successful read reply. + pub async fn send_eof(&self) -> SftpResult { + self.chan_out.send_status(self.req_id, StatusCode::SSH_FX_EOF, "").await?; + Ok(DirReadReplyFinished::new(self.req_id)) + } + + fn encode_data_header( + sink: &'g mut SftpSink<'g>, + req_id: ReqId, + data_len: u32, + ) -> Result<&'g [u8], SftpError> { + // length field + (data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH).enc(sink)?; + // packet type (1) + u8::from(SftpNum::SSH_FXP_DATA).enc(sink)?; + // request id (4) + req_id.enc(sink)?; + // data length (4) + data_len.enc(sink)?; + Ok(sink.payload_slice()) + } +} + +pub struct DirReadReplyFinished { + /// The request Id that will be use`d in the response + _req_id: ReqId, +} + +impl DirReadReplyFinished { + pub(crate) fn new(req_id: ReqId) -> Self { + Self { _req_id: req_id } + } +} + +pub struct LimitedDirSender<'g, const N: usize> { + /// Immutable writer + chan_out: &'g SftpOutputProducer<'g, N>, + /// remaining data length to be sent as announced in [`DirReply::send_header`] + remaining: core::cell::Cell, +} + +impl<'g, const N: usize> LimitedDirSender<'g, N> { + fn new(chan_out: &'g SftpOutputProducer<'g, N>, limit: u32) -> Self { + Self { chan_out, remaining: core::cell::Cell::new(limit) } + } + + /// Sends a directory item to the client as a [`NameEntry`] + /// + /// Call this + pub async fn send_item( + &mut self, + name_entry: &NameEntry<'_>, + ) -> SftpResult { + let mut buffer = [0u8; MAX_NAME_ENTRY_SIZE]; + let mut sftp_sink = SftpSink::new(&mut buffer); + name_entry.enc(&mut sftp_sink).map_err(|err| { + error!("WireError: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; + + self.send_data(sftp_sink.payload_slice()).await + } + /// Obtains a [`CompleteDirDataSent`] if the announced data length has been completely sent, otherwise returns None. + pub fn completed(&self) -> Option { + if self.is_complete() { Some(CompleteDirDataSent) } else { None } + } + + async fn send_data(&self, buff: &[u8]) -> SftpResult { + let mut remaining = self.remaining.get(); + + let length_to_send = remaining.min(buff.len() as u32); + self.chan_out.send_data(&buff[..length_to_send as usize]).await?; + remaining -= length_to_send; + self.remaining.set(remaining); + Ok(remaining) + } + + fn is_complete(&self) -> bool { + self.remaining.get() == 0 + } +} + +pub struct CompleteDirDataSent; + +pub struct DirReadDataReply<'g, const N: usize> { + /// The request Id that will be use`d in the response + req_id: ReqId, + /// Length of data to be sent as announced in [`DirReadHeaderReply::send_header`] + data_len: u32, + /// Immutable writer + chan_out: &'g SftpOutputProducer<'g, N>, +} + +impl<'g, const N: usize> DirReadDataReply<'g, N> { + pub(crate) fn new( + req_id: ReqId, + data_len: u32, + chan_out: &'g SftpOutputProducer<'g, N>, + ) -> Self { + Self { req_id, chan_out, data_len } + } + + /// It provides a closure-based API where the user can send multiple [`NameEntry`]s of data until the announced data length is reached. + /// + /// It can only be called once, since it consumes self, and it returns a [`DirReadReplyFinished`] + /// that can be used to represent the state of the successful read reply. + pub async fn send_data(self, f: F) -> SftpResult + where + F: FnOnce(LimitedDirSender<'g, N>) -> Fut, + Fut: core::future::Future>, + { + let dir_sender = LimitedDirSender::new(self.chan_out, self.data_len); + f(dir_sender).await?; + + Ok(DirReadReplyFinished::new(self.req_id)) + } +} + +#[cfg(test)] +mod enforcing_process_tests { + + use super::*; + + use crate::{ + proto::{Attrs, Filename, NameEntry}, + server::helpers, + sftphandler::{MockWriter, SftpOutputPipe}, + }; + + extern crate alloc; + extern crate std; + use alloc::vec; + use std::vec::Vec; + + #[test] + fn compose_header() { + const N: usize = 512; + + let req_id = ReqId(42); + let data_len = 128; + let mut buffer = [0u8; N]; + let mut sink = SftpSink::new(&mut buffer); + + let payload = + DirReadHeaderReply::::encode_data_header(&mut sink, req_id, data_len) + .unwrap(); + + assert_eq!( + data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH, + u32::from_be_bytes(payload[..4].try_into().unwrap()) + ); + } + + #[test] + fn handling_process_eof() { + const N: usize = 512; + + let req_id = ReqId(42); + let mut output_pipe = SftpOutputPipe::::new(); + let mock = MockWriter::new(); + let (mut consumer, producer) = + output_pipe.split(mock).expect("split should succeed"); + + embassy_futures::block_on(async { + { + let dir_header_reply = + DirReadHeaderReply::::new(req_id, &producer); + let _finished = dir_header_reply + .send_eof() + .await + .expect("send_eof should succeed returning ReadReplyFinished"); + } + drop(producer); + // Read exactly the one packet written by send_eof; does not loop. + consumer.receive_once().await.unwrap(); + }); + + // SSH_FXP_STATUS (101) packet for SSH_FX_EOF (1) with req_id 42: + // [len:4][type:1=101][req_id:4][code:4][msg_len:4][msg][lang_len:4][lang] + let mock = consumer.into_inner(); + let buf = &mock.buffer; + // packet type byte should be 101 (SSH_FXP_STATUS) + assert_eq!(buf[4], 101, "expected SSH_FXP_STATUS packet type"); + // status code should be 1 (SSH_FX_EOF) + let code = u32::from_be_bytes(buf[9..13].try_into().unwrap()); + assert_eq!(code, 1, "expected SSH_FX_EOF status code"); + } + + #[test] + fn handling_process_data() { + const N: usize = 2048; + + let req_id = ReqId(42); + let mut output_pipe = SftpOutputPipe::::new(); + let mock = MockWriter::new(); + let (mut consumer, producer) = + output_pipe.split(mock).expect("split should succeed"); + + // 1. Put together a collection of synthetic directory entries + let filenames = vec!["file1", "file2", "file3"]; + let name_entries: Vec> = filenames + .iter() + .map(|name| NameEntry { + filename: Filename::from(*name), + _longname: Filename::from(""), + attrs: Attrs::default(), + }) + .collect(); + + // 2. Obtain the length of the data to be sent by encoding these synthetic directory entries and summing their lengths + let items_encoded_len = name_entries.iter().fold(0u32, |acc: u32, entry| { + let len = helpers::get_name_entry_len(entry) + .expect("Decoding should not fail"); + acc.checked_add(len) + .expect("Length overflow when calculating total encoded length") + }); + + embassy_futures::block_on(async { + { + let dir_header_reply = + DirReadHeaderReply::::new(req_id, &producer); + + // 3. Call send_header with the length of the data to be sent + let dir_read_data_reply = dir_header_reply + .send_header(items_encoded_len) + .await + .expect("send_eof should succeed returning ReadReplyData"); + + let _dir_read_reply_finished = dir_read_data_reply + .send_data(|mut limited_sender| async move { + for entry in name_entries.iter() { + limited_sender.send_item(entry).await?; + } + match limited_sender.completed() { + Some(completed_token) => Ok(completed_token), + None => Err(SftpError::FileServerError( + StatusCode::SSH_FX_FAILURE, + )), + } + }) + .await + .expect("send_data should succeed returning ReadReplyFinished"); + } + drop(producer); + // Read exactly the one packet written by send_eof; does not loop. + consumer.receive_once().await.expect("receive_once should succeed"); + }); + + let mock = consumer.into_inner(); + let buf = &mock.buffer; + // packet type byte should be 103 (SSH_FXP_DATA) + assert_eq!(buf[4], 103, "expected SSH_FXP_DATA packet type"); + + // data length should be 10 + let data_len = u32::from_be_bytes( + buf[9..13] + .try_into() + .expect("data length should be present in the packet"), + ); + assert_eq!( + data_len, items_encoded_len, + "expected data length to match encoded length" + ); + assert_eq!( + buf.len(), + 13 + items_encoded_len as usize, + "expected packet length to be header (13 bytes) + data (items_encoded_len bytes)" + ); + } +} + +/// no_std compatible helpers to perform common tasks using solely sunset and sunset-sftp resources +pub mod no_std_helpers { + use crate::{ + error::SftpResult, + proto::{MAX_NAME_ENTRY_SIZE, NameEntry}, + server::SftpSink, + }; + + use sunset::sshwire::SSHEncode; + + /// Helper function to get the length of a given [`NameEntry`] + /// as it would be serialized to the wire. + /// + /// Use this function to calculate the total length of a collection + /// of `NameEntry`s in order to send a correct response Name header + pub fn get_name_entry_len(name_entry: &NameEntry<'_>) -> SftpResult { + let mut buf = [0u8; MAX_NAME_ENTRY_SIZE]; + let mut temp_sink = SftpSink::new(&mut buf); + name_entry.enc(&mut temp_sink)?; + Ok(temp_sink.payload_len() as u32) + } +} + +/// Helpers structures intended to for environment with `std` available, specially linux. +/// +/// The collection helps with directory and directory items enumeration, description +/// and organizing. Providing means to translate them into [`sunset-sftp`] structures +/// +#[cfg(feature = "std")] +pub mod std_helpers { + use crate::{ + proto::{Attrs, Filename}, + server::{ReadStatus, SftpOpResult}, + }; + + use std::{ + fs::{DirEntry, Metadata, ReadDir}, + os::{linux::fs::MetadataExt, unix::fs::PermissionsExt}, + time::SystemTime, + }; + /// This is a helper structure to make ReadDir into something manageable for + /// [`DirReply`] + #[derive(Debug)] + pub struct DirEntriesCollection { + /// Number of elements + count: u32, + /// Computed length of all the encoded elements + encoded_length: u32, + /// The actual entries. As you can see these are DirEntry. This is a std choice + entries: Vec, + } + + #[cfg(feature = "std")] + impl DirEntriesCollection { + /// Creates this DirEntriesCollection so linux std users do not need to + /// translate `std` directory elements into Sftp structures before sending a response + /// back to the client + pub fn new(dir_iterator: ReadDir) -> SftpOpResult { + use log::info; + + let mut encoded_length = 0; + + let entries: Vec = dir_iterator + .filter_map(|entry_result| { + let entry = entry_result.ok()?; + let filename = entry.file_name().to_string_lossy().into_owned(); + let name_entry = NameEntry { + filename: Filename::from(filename.as_str()), + _longname: Filename::from(""), + attrs: Self::get_attrs_or_empty(entry.metadata()), + }; + + let mut buffer = [0u8; MAX_NAME_ENTRY_SIZE]; + let mut sftp_sink = SftpSink::new(&mut buffer); + name_entry.enc(&mut sftp_sink).ok()?; + encoded_length += u32::try_from(sftp_sink.payload_len()) + .map_err(|_| StatusCode::SSH_FX_FAILURE) + .ok()?; + Some(entry) + }) + .collect(); + + let count = u32::try_from(entries.len()) + .map_err(|_| StatusCode::SSH_FX_FAILURE)?; + + info!( + "Processed {} entries, estimated serialized length: {}", + count, encoded_length + ); + + Ok(Self { count, encoded_length, entries }) + } + + /// Using the provided [`DirReply`] sends a response taking care of + /// composing a SFTP Entry header and sending everything in the right order + /// + /// Returns a [`ReadStatus`] + pub async fn send_response( + &self, + reply: &mut DirReply<'_, N>, + ) -> SftpOpResult { + self.send_entries_header(reply).await?; + self.send_entries(reply).await?; + Ok(ReadStatus::EndOfFile) + } + /// Sends a header for all the elements in the ReadDir iterator + /// + /// It will take care of counting them and finding the serialized length of each + /// element + async fn send_entries_header( + &self, + reply: &mut DirReply<'_, N>, + ) -> SftpOpResult<()> { + reply.send_header(self.count, self.encoded_length).await.map_err(|e| { + debug!("Could not send header {e:?}"); + StatusCode::SSH_FX_FAILURE + }) + } + + /// Sends the entries in the ReadDir iterator back to the client + async fn send_entries( + &self, + reply: &mut DirReply<'_, N>, + ) -> SftpOpResult<()> { + for entry in &self.entries { + let filename = entry.file_name().to_string_lossy().into_owned(); + let attrs = Self::get_attrs_or_empty(entry.metadata()); + let name_entry = NameEntry { + filename: Filename::from(filename.as_str()), + _longname: Filename::from(""), + attrs, + }; + debug!("Sending new item: {:?}", name_entry); + reply.send_item(&name_entry).await.map_err(|err| { + error!("SftpError: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; + } + Ok(()) + } + + fn get_attrs_or_empty( + maybe_metadata: Result, + ) -> Attrs { + maybe_metadata.map(get_file_attrs).unwrap_or_default() + } + } + + #[cfg(feature = "std")] + /// [`std`] helper function to get [`Attrs`] from a [`Metadata`]. + pub fn get_file_attrs(metadata: Metadata) -> Attrs { + let time_to_u32 = |time_result: std::io::Result| { + time_result + .ok()? + .duration_since(SystemTime::UNIX_EPOCH) + .ok()? + .as_secs() + .try_into() + .ok() + }; + + Attrs { + size: Some(metadata.len()), + uid: Some(metadata.st_uid()), + gid: Some(metadata.st_gid()), + permissions: Some(metadata.permissions().mode()), + atime: time_to_u32(metadata.accessed()), + mtime: time_to_u32(metadata.modified()), + ext_count: None, + } + } +} diff --git a/sftp/src/sftpserver/readreplies.rs b/sftp/src/sftpserver/readreplies.rs index 09aa187c..0dfae469 100644 --- a/sftp/src/sftpserver/readreplies.rs +++ b/sftp/src/sftpserver/readreplies.rs @@ -262,13 +262,12 @@ mod enforcing_process_tests { .expect("send_eof should succeed returning ReadReplyData"); let _read_reply_finished = data_reply - .send_data(|limited_sender| { - // Simulate sending data in two chunks, first 5 bytes then the remaining 5 bytes - async move { - while !limited_sender.is_complete() { - limited_sender.send_data(&[0u8; 3]).await?; - } - Ok(CompletedDataSent) + .send_data(|limited_sender| async move { + loop { + match limited_sender.completed() { + Some(token) => return Ok(token), + None => limited_sender.send_data(&[0u8; 3]).await?, + }; } }) .await From f68c79edff838ff00d34ba10f3f91578ea2db50e Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Thu, 30 Apr 2026 15:05:17 +1000 Subject: [PATCH 44/47] WIP: Starting to replace DirReply with Enforcing readdirreplies --- sftp/src/lib.rs | 6 +- sftp/src/sftphandler/sftphandler.rs | 75 +++--- sftp/src/sftpserver/mod.rs | 15 +- sftp/src/sftpserver/readdirreplies.rs | 13 +- sftp/src/sftpserver/sftpserver.rs | 371 +------------------------- 5 files changed, 69 insertions(+), 411 deletions(-) diff --git a/sftp/src/lib.rs b/sftp/src/lib.rs index a9185912..5af22b1e 100644 --- a/sftp/src/lib.rs +++ b/sftp/src/lib.rs @@ -77,7 +77,9 @@ pub use sftpsource::SftpSource; /// and has the task of executing client requests in the underlying system pub mod server { - pub use crate::sftpserver::DirReply; + pub use crate::sftpserver::{ + DirReadDataReply, DirReadHeaderReply, DirReadReplyFinished, + }; pub use crate::sftpserver::{ReadHeaderReply, ReadReplyFinished}; pub use crate::sftpserver::ReadStatus; @@ -86,7 +88,7 @@ pub mod server { /// Helpers to reduce error prone tasks and hide some details that /// add complexity when implementing an [`SftpServer`] pub mod helpers { - pub use crate::sftpserver::helpers::*; + pub use crate::sftpserver::no_std_helpers::*; #[cfg(feature = "std")] pub use crate::sftpserver::DirEntriesCollection; diff --git a/sftp/src/sftphandler/sftphandler.rs b/sftp/src/sftphandler/sftphandler.rs index 027fc230..b4156239 100644 --- a/sftp/src/sftphandler/sftphandler.rs +++ b/sftp/src/sftphandler/sftphandler.rs @@ -4,7 +4,7 @@ use crate::proto::{ self, InitVersionClient, InitVersionLowest, LStat, MAX_REQUEST_LEN, ReqId, SFTP_VERSION, SftpNum, SftpPacket, Stat, StatusCode, }; -use crate::server::DirReply; +use crate::server::DirReadHeaderReply; use crate::sftperror::SftpResult; use crate::sftphandler::requestholder::{RequestHolder, RequestHolderError}; use crate::sftphandler::sftpoutputchannelhandler::{ @@ -532,13 +532,13 @@ where self.state = HandlerState::Idle; } SftpPacket::ReadDir(req_id, read_dir) => { - let mut reply = - DirReply::new(req_id, output_producer); + let dir_read_header_reply = + DirReadHeaderReply::new(req_id, output_producer); if let Err(status) = self .file_server .readdir( &T::try_from(&read_dir.handle)?, - &mut reply, + dir_read_header_reply, ) .await { @@ -552,23 +552,23 @@ where ) .await?; }; - match reply.read_diff() { - diff if diff > 0 => { - debug!( - "DirReply not completed after read operation. Still need to send {} bytes", - diff - ); - return Err(SunsetError::Bug.into()); - } - diff if diff < 0 => { - error!( - "DirReply has sent more data than announced: {} bytes extra", - -diff - ); - return Err(SunsetError::Bug.into()); - } - _ => {} - } + // match reply.read_diff() { + // diff if diff > 0 => { + // debug!( + // "DirReply not completed after read operation. Still need to send {} bytes", + // diff + // ); + // return Err(SunsetError::Bug.into()); + // } + // diff if diff < 0 => { + // error!( + // "DirReply has sent more data than announced: {} bytes extra", + // -diff + // ); + // return Err(SunsetError::Bug.into()); + // } + // _ => {} + // } self.state = HandlerState::Idle; } SftpPacket::OpenDir(req_id, open_dir) => { @@ -675,10 +675,13 @@ where .await { Ok(name_entry) => { - let mut dir_reply = - DirReply::new(req_id, output_producer); + let dir_read_header_reply = + DirReadHeaderReply::new( + req_id, + output_producer, + ); let encoded_len = - crate::sftpserver::helpers::get_name_entry_len(&name_entry)?; + crate::sftpserver::no_std_helpers::get_name_entry_len(&name_entry)?; debug!( "PathInfo encoded length: {:?}", encoded_len @@ -687,16 +690,22 @@ where "PathInfo Response content: {:?}", encoded_len ); - dir_reply - .send_header(1, encoded_len) + let dir_read_data_reply = + dir_read_header_reply + .send_header(encoded_len) + .await?; + dir_read_data_reply + .send_data(|mut sender| async move { + sender + .send_item(&name_entry) + .await?; + sender.completed().ok_or( + SftpError::WireError( + WireError::Bug, + ), + ) + }) .await?; - dir_reply.send_item(&name_entry).await?; - if dir_reply.read_diff() != 0 { - error!( - "PathInfo reply not completed after sending the only item" - ); - return Err(SunsetError::Bug.into()); - } } Err(code) => { output_producer diff --git a/sftp/src/sftpserver/mod.rs b/sftp/src/sftpserver/mod.rs index 96f1ddfc..5c340b3f 100644 --- a/sftp/src/sftpserver/mod.rs +++ b/sftp/src/sftpserver/mod.rs @@ -7,14 +7,17 @@ pub use sftpserver::SftpServer; pub use sftpserver::ReadStatus; -pub use sftpserver::DirReply; - pub use sftpserver::SftpOpResult; -pub use sftpserver::helpers; + +pub use crate::sftpserver::readdirreplies::no_std_helpers; #[cfg(feature = "std")] -pub use sftpserver::DirEntriesCollection; -#[cfg(feature = "std")] -pub use sftpserver::get_file_attrs; +pub use crate::sftpserver::readdirreplies::std_helpers::{ + DirEntriesCollection, get_file_attrs, +}; pub use readreplies::{ReadHeaderReply, ReadReplyFinished}; + +pub use readdirreplies::{ + DirReadDataReply, DirReadHeaderReply, DirReadReplyFinished, +}; diff --git a/sftp/src/sftpserver/readdirreplies.rs b/sftp/src/sftpserver/readdirreplies.rs index fe517f46..3517952a 100644 --- a/sftp/src/sftpserver/readdirreplies.rs +++ b/sftp/src/sftpserver/readdirreplies.rs @@ -1,8 +1,8 @@ use crate::{ error::{SftpError, SftpResult}, proto::{ - ENCODED_BASE_NAME_SFTP_PACKET_LENGTH, ENCODED_SSH_FXP_DATA_MIN_LENGTH, - MAX_NAME_ENTRY_SIZE, NameEntry, ReqId, SftpNum, + ENCODED_SSH_FXP_DATA_MIN_LENGTH, MAX_NAME_ENTRY_SIZE, NameEntry, ReqId, + SftpNum, }, protocol::StatusCode, server::SftpSink, @@ -367,14 +367,19 @@ pub mod no_std_helpers { pub mod std_helpers { use crate::{ proto::{Attrs, Filename}, - server::{ReadStatus, SftpOpResult}, + protocol::{NameEntry, StatusCode, constants::MAX_NAME_ENTRY_SIZE}, + server::{ReadStatus, SftpOpResult, SftpSink}, }; + use sunset::sshwire::SSHEncode; + + use log::{debug, error, info}; use std::{ fs::{DirEntry, Metadata, ReadDir}, os::{linux::fs::MetadataExt, unix::fs::PermissionsExt}, time::SystemTime, }; + /// This is a helper structure to make ReadDir into something manageable for /// [`DirReply`] #[derive(Debug)] @@ -393,8 +398,6 @@ pub mod std_helpers { /// translate `std` directory elements into Sftp structures before sending a response /// back to the client pub fn new(dir_iterator: ReadDir) -> SftpOpResult { - use log::info; - let mut encoded_length = 0; let entries: Vec = dir_iterator diff --git a/sftp/src/sftpserver/sftpserver.rs b/sftp/src/sftpserver/sftpserver.rs index 9c225c34..7594fb47 100644 --- a/sftp/src/sftpserver/sftpserver.rs +++ b/sftp/src/sftpserver/sftpserver.rs @@ -1,18 +1,12 @@ use crate::error::{SftpError, SftpResult}; -use crate::proto::{ - ENCODED_BASE_NAME_SFTP_PACKET_LENGTH, MAX_NAME_ENTRY_SIZE, NameEntry, PFlags, - SftpNum, -}; -use crate::server::SftpSink; -use crate::sftphandler::SftpOutputProducer; +use crate::proto::{NameEntry, PFlags}; +use crate::server::DirReadHeaderReply; use crate::sftpserver::{ReadHeaderReply, ReadReplyFinished}; use crate::{ handles::OpaqueFileHandle, - proto::{Attrs, ReqId, StatusCode}, + proto::{Attrs, StatusCode}, }; -use sunset::sshwire::SSHEncode; - #[allow(unused_imports)] use log::{debug, error, info, log, trace, warn}; @@ -80,22 +74,7 @@ where Err(StatusCode::SSH_FX_OP_UNSUPPORTED) } } - /// Reads from a file that has previously being opened for reading - /// - /// ## Notes to the implementer: - /// - /// The implementer is expected to use the parameter `reply` [`DirReply`] to: - /// - /// - In case of no more data is to be sent, call `reply.send_eof()` - /// - There is more data to be sent from an open file: - /// 1. Call `reply.send_header()` with the length of data to be sent - /// 2. Call `reply.send_data()` once or multiple times to send all the data announced - /// 3. Do not call `reply.send_eof()` during this [`readdir`] method call - /// - /// If the length communicated in the header does not match the total length of the data - /// sent using `reply.send_data()`, the SFTP session will be broken. - /// #[allow(unused)] fn read( &mut self, @@ -114,6 +93,7 @@ where Err(SftpError::FileServerError(StatusCode::SSH_FX_OP_UNSUPPORTED)) } } + /// Writes to a file that has previously being opened for writing fn write( &mut self, @@ -128,7 +108,7 @@ where offset, buf ); - Ok(()) + Err(StatusCode::SSH_FX_OP_UNSUPPORTED) } } @@ -143,33 +123,11 @@ where } } - /// Reads the list of items in a directory and returns them using the [`DirReply`] - /// parameter. - /// - /// ## Notes to the implementer: - /// - /// The implementer is expected to use the parameter `reply` [`DirReply`] to: - /// - /// - In case of no more items in the directory to send, call `reply.send_eof()` - /// - There are more items in the directory: - /// 1. Call `reply.send_header()` with the number of items and the [`SSHEncode`] - /// length of all the items to be sent - /// 2. Call `reply.send_item()` for each of the items announced to be sent - /// 3. Do not call `reply.send_eof()` during this [`readdir`] method call - /// - /// If the length communicated in the header does not match the total length of all - /// the items sent using `reply.send_item()`, the SFTP session will be - /// broken. - /// - /// The server is expected to keep track of the number of items that remain to be sent - /// to the client since the client will only stop asking for more elements in the - /// directory when a read dir request is answer with an reply.send_eof() - /// #[allow(unused_variables)] fn readdir( &mut self, opaque_dir_handle: &T, - reply: &mut DirReply<'_, N>, + reply: DirReadHeaderReply<'_, N>, ) -> impl core::future::Future> { async move { log::error!( @@ -211,320 +169,3 @@ where } } } - -/// Uses for [`DirReply`] to: -/// -/// - In case of no more items in the directory to be sent, call `reply.send_eof()` -/// - There are more items in the directory to be sent: -/// 1. Call `reply.send_header()` with the number of items and the [`SSHEncode`] -/// length of all the items to be sent -/// 2. Call `reply.send_item()` for each of the items announced to be sent -/// 3. Do not call `reply.send_eof()` during this [`readdir`] method call -/// -/// It handles immutable sending data via the underlying sftp-channel -/// [`sunset_async::async_channel::ChanOut`] used in the context of an -/// SFTP Session. -/// -/// Parameter N is the length for the SftpOutputProducer buffer -/// -pub struct DirReply<'g, const N: usize> { - /// The request Id that will be use`d in the response - req_id: ReqId, - /// Immutable writer - chan_out: &'g SftpOutputProducer<'g, N>, - /// Length of data to be sent as announced in [`DirReply::send_header`] - data_len: u32, - /// Length of data sent so far using [`DirReply::send_data`] - data_sent_len: u32, -} - -impl<'g, const N: usize> DirReply<'g, N> { - // const ENCODED_NAME_SFTP_PACKET_LENGTH: u32 = 9; - - /// New instances can only be created within the crate. Users can only - /// use other public methods to use it. - pub(crate) fn new( - req_id: ReqId, - chan_out: &'g SftpOutputProducer<'g, N>, - ) -> Self { - // DirReply { chan_out: chan_out_wrapper, req_id } - DirReply { req_id, chan_out, data_len: 0, data_sent_len: 0 } - } - - // TODO Make this enforceable - // TODO Automate encoding the SftpPacket - /// Sends the header to the client with the number of files as [`NameEntry`] and the [`SSHEncode`] - /// length of all these [`NameEntry`] items - pub async fn send_header( - &mut self, - count: u32, - items_encoded_len: u32, - ) -> SftpResult<()> { - debug!( - "I will send the header here for request id {:?}: count = {:?}, length = {:?}", - self.req_id, count, items_encoded_len - ); - let mut s = [0u8; N]; - let mut sink = SftpSink::new(&mut s); - - let payload = DirReply::::encode_data_header( - &mut sink, - self.req_id, - items_encoded_len, - count, - )?; - - debug!( - "Sending header: len = {:?}, content = {:?}", - payload.len(), - payload - ); - self.chan_out.send_data(payload).await?; - self.data_len = items_encoded_len; - Ok(()) - } - - /// Sends a directory item to the client as a [`NameEntry`] - /// - /// Call this - pub async fn send_item(&mut self, name_entry: &NameEntry<'_>) -> SftpResult<()> { - let mut buffer = [0u8; MAX_NAME_ENTRY_SIZE]; - let mut sftp_sink = SftpSink::new(&mut buffer); - name_entry.enc(&mut sftp_sink).map_err(|err| { - error!("WireError: {:?}", err); - StatusCode::SSH_FX_FAILURE - })?; - - self.chan_out.send_data(sftp_sink.payload_slice()).await?; - self.data_sent_len += sftp_sink.payload_len() as u32; - Ok(()) - } - - /// Sends EOF meaning that there is no more files in the directory - pub async fn send_eof(&self) -> SftpResult<()> { - self.chan_out.send_status(self.req_id, StatusCode::SSH_FX_EOF, "").await - } - - /// Indicates whether all the data announced in the header has been sent - /// - /// returns 0 when all data has been sent - /// returns >0 when there is still data to be sent - /// returns <0 when too much data has been sent - pub fn read_diff(&self) -> i32 { - (self.data_len as i32) - (self.data_sent_len as i32) - } - - fn encode_data_header( - sink: &'g mut SftpSink<'g>, - req_id: ReqId, - items_encoded_len: u32, - count: u32, - ) -> Result<&'g [u8], SftpError> { - // We need to consider the packet type, Id and count fields - // This way I collect data required for the header and collect - // valid entries into a vector (only std) - (items_encoded_len + ENCODED_BASE_NAME_SFTP_PACKET_LENGTH).enc(sink)?; - u8::from(SftpNum::SSH_FXP_NAME).enc(sink)?; - req_id.enc(sink)?; - count.enc(sink)?; - - Ok(sink.payload_slice()) - } -} - -#[cfg(test)] -mod dir_reply_tests { - use super::*; - - #[cfg(test)] - extern crate std; - // #[cfg(test)] - // use std::println; - - #[test] - fn compose_header() { - const N: usize = 512; - - let req_id = ReqId(42); - let data_len = 128; - let count = 128; - let mut buffer = [0u8; N]; - let mut sink = SftpSink::new(&mut buffer); - - let payload = - DirReply::::encode_data_header(&mut sink, req_id, data_len, count) - .unwrap(); - - // println!("{payload:?}"); - - // println!("{:?}", &u32::from_be_bytes(payload[..4].try_into().unwrap())); - assert_eq!( - data_len + ENCODED_BASE_NAME_SFTP_PACKET_LENGTH, - u32::from_be_bytes(payload[..4].try_into().unwrap()) - ); - } -} - -pub mod helpers { - use crate::{ - error::SftpResult, - proto::{MAX_NAME_ENTRY_SIZE, NameEntry}, - server::SftpSink, - }; - - use sunset::sshwire::SSHEncode; - - /// Helper function to get the length of a given [`NameEntry`] - /// as it would be serialized to the wire. - /// - /// Use this function to calculate the total length of a collection - /// of `NameEntry`s in order to send a correct response Name header - pub fn get_name_entry_len(name_entry: &NameEntry<'_>) -> SftpResult { - let mut buf = [0u8; MAX_NAME_ENTRY_SIZE]; - let mut temp_sink = SftpSink::new(&mut buf); - name_entry.enc(&mut temp_sink)?; - Ok(temp_sink.payload_len() as u32) - } -} - -#[cfg(feature = "std")] -use crate::proto::Filename; -#[cfg(feature = "std")] -use std::{ - fs::{DirEntry, Metadata, ReadDir}, - os::{linux::fs::MetadataExt, unix::fs::PermissionsExt}, - time::SystemTime, -}; - -#[cfg(feature = "std")] -/// This is a helper structure to make ReadDir into something manageable for -/// [`DirReply`] -#[derive(Debug)] -pub struct DirEntriesCollection { - /// Number of elements - count: u32, - /// Computed length of all the encoded elements - encoded_length: u32, - /// The actual entries. As you can see these are DirEntry. This is a std choice - entries: Vec, -} - -#[cfg(feature = "std")] -impl DirEntriesCollection { - /// Creates this DirEntriesCollection so linux std users do not need to - /// translate `std` directory elements into Sftp structures before sending a response - /// back to the client - pub fn new(dir_iterator: ReadDir) -> SftpOpResult { - use log::info; - - let mut encoded_length = 0; - - let entries: Vec = dir_iterator - .filter_map(|entry_result| { - let entry = entry_result.ok()?; - let filename = entry.file_name().to_string_lossy().into_owned(); - let name_entry = NameEntry { - filename: Filename::from(filename.as_str()), - _longname: Filename::from(""), - attrs: Self::get_attrs_or_empty(entry.metadata()), - }; - - let mut buffer = [0u8; MAX_NAME_ENTRY_SIZE]; - let mut sftp_sink = SftpSink::new(&mut buffer); - name_entry.enc(&mut sftp_sink).ok()?; - encoded_length += u32::try_from(sftp_sink.payload_len()) - .map_err(|_| StatusCode::SSH_FX_FAILURE) - .ok()?; - Some(entry) - }) - .collect(); - - let count = - u32::try_from(entries.len()).map_err(|_| StatusCode::SSH_FX_FAILURE)?; - - info!( - "Processed {} entries, estimated serialized length: {}", - count, encoded_length - ); - - Ok(Self { count, encoded_length, entries }) - } - - /// Using the provided [`DirReply`] sends a response taking care of - /// composing a SFTP Entry header and sending everything in the right order - /// - /// Returns a [`ReadStatus`] - pub async fn send_response( - &self, - reply: &mut DirReply<'_, N>, - ) -> SftpOpResult { - self.send_entries_header(reply).await?; - self.send_entries(reply).await?; - Ok(ReadStatus::EndOfFile) - } - /// Sends a header for all the elements in the ReadDir iterator - /// - /// It will take care of counting them and finding the serialized length of each - /// element - async fn send_entries_header( - &self, - reply: &mut DirReply<'_, N>, - ) -> SftpOpResult<()> { - reply.send_header(self.count, self.encoded_length).await.map_err(|e| { - debug!("Could not send header {e:?}"); - StatusCode::SSH_FX_FAILURE - }) - } - - /// Sends the entries in the ReadDir iterator back to the client - async fn send_entries( - &self, - reply: &mut DirReply<'_, N>, - ) -> SftpOpResult<()> { - for entry in &self.entries { - let filename = entry.file_name().to_string_lossy().into_owned(); - let attrs = Self::get_attrs_or_empty(entry.metadata()); - let name_entry = NameEntry { - filename: Filename::from(filename.as_str()), - _longname: Filename::from(""), - attrs, - }; - debug!("Sending new item: {:?}", name_entry); - reply.send_item(&name_entry).await.map_err(|err| { - error!("SftpError: {:?}", err); - StatusCode::SSH_FX_FAILURE - })?; - } - Ok(()) - } - - fn get_attrs_or_empty( - maybe_metadata: Result, - ) -> Attrs { - maybe_metadata.map(get_file_attrs).unwrap_or_default() - } -} - -#[cfg(feature = "std")] -/// [`std`] helper function to get [`Attrs`] from a [`Metadata`]. -pub fn get_file_attrs(metadata: Metadata) -> Attrs { - let time_to_u32 = |time_result: std::io::Result| { - time_result - .ok()? - .duration_since(SystemTime::UNIX_EPOCH) - .ok()? - .as_secs() - .try_into() - .ok() - }; - - Attrs { - size: Some(metadata.len()), - uid: Some(metadata.st_uid()), - gid: Some(metadata.st_gid()), - permissions: Some(metadata.permissions().mode()), - atime: time_to_u32(metadata.accessed()), - mtime: time_to_u32(metadata.modified()), - ext_count: None, - } -} From bb2baf060de26fc4f027500ce2206e29de066712 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Fri, 1 May 2026 15:57:02 +1000 Subject: [PATCH 45/47] Final touches for the new guided dirread replies I moved the std_helpers to the demo to keep the library more readable and not force the user into a particular implementation. Fixed an issue caused by copy-paste the readreplies.rs where I was not sending the right response back. Refactored sftphandle to use the new interfaces. Refreshing lib.rs, renaming some constants, etc --- demo/sftp/std/src/demosftpserver.rs | 53 +++-- demo/sftp/std/src/main.rs | 1 + demo/sftp/std/src/stdhelpers.rs | 145 ++++++++++++++ sftp/src/lib.rs | 11 +- sftp/src/proto.rs | 31 +-- sftp/src/sftphandler/sftphandler.rs | 6 +- sftp/src/sftpserver/mod.rs | 9 +- sftp/src/sftpserver/readdirreplies.rs | 274 ++++++++------------------ sftp/src/sftpserver/readreplies.rs | 12 +- sftp/src/sftpserver/sftpserver.rs | 30 ++- 10 files changed, 315 insertions(+), 257 deletions(-) create mode 100644 demo/sftp/std/src/stdhelpers.rs diff --git a/demo/sftp/std/src/demosftpserver.rs b/demo/sftp/std/src/demosftpserver.rs index 60789066..13eb2a4c 100644 --- a/demo/sftp/std/src/demosftpserver.rs +++ b/demo/sftp/std/src/demosftpserver.rs @@ -1,14 +1,17 @@ use crate::demofilehandlemanager::DemoFileHandleManager; - -use sunset_sftp::error::SftpResult; -use sunset_sftp::handles::{ - InitFileHandler, OpaqueFileHandle, OpaqueFileHandleManager, PathFinder, -}; -use sunset_sftp::protocol::{Attrs, Filename, NameEntry, PFlags, StatusCode}; -use sunset_sftp::server::helpers::DirEntriesCollection; -use sunset_sftp::server::{ - DirReply, ReadHeaderReply, ReadReplyFinished, ReadStatus, SftpOpResult, - SftpServer, +use crate::stdhelpers::{get_file_attrs, DirEntriesCollection}; + +use sunset_sftp::server::DirReadReplyFinished; +use sunset_sftp::{ + error::SftpResult, + handles::{ + InitFileHandler, OpaqueFileHandle, OpaqueFileHandleManager, PathFinder, + }, + protocol::{Attrs, Filename, NameEntry, PFlags, StatusCode}, + server::{ + DirReadHeaderReply, ReadHeaderReply, ReadReplyFinished, ReadStatus, + SftpOpResult, SftpServer, + }, }; #[allow(unused_imports)] @@ -19,7 +22,6 @@ use strict_path::StrictPath; /// See [the mix up problem](https://dk26.github.io/strict-path-rs/tutorial/chapter2_mixup_problem.html) /// and [markers to the rescue](https://dk26.github.io/strict-path-rs/tutorial/chapter3_markers.html) /// if you are not familiar with marker types - struct SftpDir; use std::fs; @@ -435,8 +437,8 @@ impl SftpServer async fn readdir( &mut self, opaque_dir_handle: &OFH, - reply: &mut DirReply<'_, N>, - ) -> SftpOpResult<()> { + reply: DirReadHeaderReply<'_, N>, + ) -> SftpOpResult { info!("read dir for {:?}", opaque_dir_handle); if let PrivatePathHandle::Directory(dir) = self @@ -445,11 +447,11 @@ impl SftpServer .ok_or(StatusCode::SSH_FX_NO_SUCH_FILE)? { if dir.read_status == ReadStatus::EndOfFile { - reply.send_eof().await.map_err(|error| { + let finish_token = reply.send_eof().await.map_err(|error| { error!("{:?}", error); StatusCode::SSH_FX_FAILURE })?; - return Ok(()); + return Ok(finish_token); } let path_str = dir.path.clone(); @@ -467,11 +469,20 @@ impl SftpServer let name_entry_collection = DirEntriesCollection::new(dir_iterator)?; - let response_read_status = - name_entry_collection.send_response(reply).await?; + let encoded_length = name_entry_collection.encoded_length(); + let items_count = name_entry_collection.count(); + + let data_reply = reply + .send_header(encoded_length, items_count) + .await + .map_err(|_| StatusCode::SSH_FX_OP_UNSUPPORTED)?; + + let finish_token = + name_entry_collection.send_entries(data_reply).await?; + + dir.read_status = ReadStatus::EndOfFile; - dir.read_status = response_read_status; - return Ok(()); + return Ok(finish_token); } else { error!("the path is not a directory = {:?}", dir_path); return Err(StatusCode::SSH_FX_NO_SUCH_FILE); @@ -501,9 +512,9 @@ impl SftpServer })?; if file_path.is_file() { - return Ok(sunset_sftp::server::helpers::get_file_attrs(metadata)); + return Ok(get_file_attrs(metadata)); } else if file_path.is_symlink() { - return Ok(sunset_sftp::server::helpers::get_file_attrs(metadata)); + return Ok(get_file_attrs(metadata)); } else { return Err(StatusCode::SSH_FX_NO_SUCH_FILE); } diff --git a/demo/sftp/std/src/main.rs b/demo/sftp/std/src/main.rs index 1152189d..4662c105 100644 --- a/demo/sftp/std/src/main.rs +++ b/demo/sftp/std/src/main.rs @@ -25,6 +25,7 @@ use log::{debug, error, info, log, trace, warn}; mod demofilehandlemanager; mod demoopaquefilehandle; mod demosftpserver; +mod stdhelpers; const NUM_LISTENERS: usize = 4; // +1 for dhcp diff --git a/demo/sftp/std/src/stdhelpers.rs b/demo/sftp/std/src/stdhelpers.rs new file mode 100644 index 00000000..1493e743 --- /dev/null +++ b/demo/sftp/std/src/stdhelpers.rs @@ -0,0 +1,145 @@ +/// Helpers structures intended to for environment with `std` available, specially linux. +/// +/// The collection helps with directory and directory items enumeration, description +/// and organizing. Providing means to translate them into [`sunset-sftp`] structures +/// +use sunset_sftp::{ + error::SftpError, + protocol::{ + constants::MAX_NAME_ENTRY_SIZE, Attrs, Filename, NameEntry, StatusCode, + }, + server::{DirReadDataReply, DirReadReplyFinished, SftpOpResult, SftpSink}, +}; + +use sunset::sshwire::SSHEncode; + +use log::{debug, error, info}; +use std::{ + fs::{DirEntry, Metadata, ReadDir}, + os::{linux::fs::MetadataExt, unix::fs::PermissionsExt}, + time::SystemTime, +}; + +/// This is a helper structure to make ReadDir into something manageable for +/// [`DirReply`] +#[derive(Debug)] +pub struct DirEntriesCollection { + /// Number of elements + count: u32, + /// Computed length of all the encoded elements + encoded_length: u32, + /// The actual entries. As you can see these are DirEntry. This is a std choice + entries: Vec, +} + +impl DirEntriesCollection { + /// Creates this DirEntriesCollection so linux std users do not need to + /// translate `std` directory elements into Sftp structures before sending a response + /// back to the client + pub fn new(dir_iterator: ReadDir) -> SftpOpResult { + let mut encoded_length = 0; + + let entries: Vec = dir_iterator + .filter_map(|entry_result| { + let entry = entry_result.ok()?; + let filename = entry.file_name().to_string_lossy().into_owned(); + let name_entry = NameEntry { + filename: Filename::from(filename.as_str()), + _longname: Filename::from(""), + attrs: Self::get_attrs_or_empty(entry.metadata()), + }; + + let mut buffer = [0u8; MAX_NAME_ENTRY_SIZE]; + let mut sftp_sink = SftpSink::new(&mut buffer); + name_entry.enc(&mut sftp_sink).ok()?; + encoded_length += u32::try_from(sftp_sink.payload_len()) + .map_err(|_| StatusCode::SSH_FX_FAILURE) + .ok()?; + Some(entry) + }) + .collect(); + + let count = + u32::try_from(entries.len()).map_err(|_| StatusCode::SSH_FX_FAILURE)?; + + info!( + "Processed {} entries, estimated serialized length: {}", + count, encoded_length + ); + + Ok(Self { count, encoded_length, entries }) + } + + pub(crate) fn encoded_length(&self) -> u32 { + self.encoded_length + } + pub(crate) fn count(&self) -> u32 { + self.count + } + + pub(crate) async fn send_entries( + &self, + data_reply: DirReadDataReply<'_, N>, + ) -> SftpOpResult { + if self.entries.is_empty() { + return Err(StatusCode::SSH_FX_EOF); + } + + let Ok(token) = data_reply + .send_data(|mut limited_dir_sender| async move { + for entry in &self.entries { + let filename = entry.file_name().to_string_lossy().into_owned(); + let attrs = Self::get_attrs_or_empty(entry.metadata()); + let name_entry = NameEntry { + filename: Filename::from(filename.as_str()), + _longname: Filename::from(""), + attrs, + }; + debug!("Sending new item: {:?}", name_entry); + + limited_dir_sender.send_item(&name_entry).await?; + } + match limited_dir_sender.completed() { + Some(completed_token) => Ok(completed_token), + None => { + Err(SftpError::FileServerError(StatusCode::SSH_FX_FAILURE)) + } + } + }) + .await + else { + error!("Failed to send directory entries"); + return Err(StatusCode::SSH_FX_FAILURE); + }; + Ok(token) + } + + fn get_attrs_or_empty( + maybe_metadata: Result, + ) -> Attrs { + maybe_metadata.map(get_file_attrs).unwrap_or_default() + } +} + +/// [`std`] helper function to get [`Attrs`] from a [`Metadata`]. +pub fn get_file_attrs(metadata: Metadata) -> Attrs { + let time_to_u32 = |time_result: std::io::Result| { + time_result + .ok()? + .duration_since(SystemTime::UNIX_EPOCH) + .ok()? + .as_secs() + .try_into() + .ok() + }; + + Attrs { + size: Some(metadata.len()), + uid: Some(metadata.st_uid()), + gid: Some(metadata.st_gid()), + permissions: Some(metadata.permissions().mode()), + atime: time_to_u32(metadata.accessed()), + mtime: time_to_u32(metadata.modified()), + ext_count: None, + } +} diff --git a/sftp/src/lib.rs b/sftp/src/lib.rs index 5af22b1e..da056129 100644 --- a/sftp/src/lib.rs +++ b/sftp/src/lib.rs @@ -80,7 +80,7 @@ pub mod server { pub use crate::sftpserver::{ DirReadDataReply, DirReadHeaderReply, DirReadReplyFinished, }; - pub use crate::sftpserver::{ReadHeaderReply, ReadReplyFinished}; + pub use crate::sftpserver::{ReadDataReply, ReadHeaderReply, ReadReplyFinished}; pub use crate::sftpserver::ReadStatus; pub use crate::sftpserver::SftpOpResult; @@ -88,12 +88,7 @@ pub mod server { /// Helpers to reduce error prone tasks and hide some details that /// add complexity when implementing an [`SftpServer`] pub mod helpers { - pub use crate::sftpserver::no_std_helpers::*; - - #[cfg(feature = "std")] - pub use crate::sftpserver::DirEntriesCollection; - #[cfg(feature = "std")] - pub use crate::sftpserver::get_file_attrs; + pub use crate::sftpserver::helpers::*; } pub use crate::sftpsink::SftpSink; pub use sunset::sshwire::SSHEncode; @@ -123,6 +118,8 @@ pub mod protocol { /// Constants that might be useful for SFTP developers pub mod constants { pub use crate::proto::MAX_NAME_ENTRY_SIZE; + pub use crate::proto::MAX_PATH_LEN; + pub use crate::proto::SFTP_FIELD_LEN_LENGTH; } } diff --git a/sftp/src/proto.rs b/sftp/src/proto.rs index 90397962..a1c2eb31 100644 --- a/sftp/src/proto.rs +++ b/sftp/src/proto.rs @@ -285,7 +285,7 @@ pub struct Data<'a> { pub data: BinString<'a>, } -/// This is the encoded length for the [`Data`] Sftp Response. +/// This is the encoded length for the [`Data`] Sftp Response Header. /// /// This considers the Packet type (1), the request ID (4), and the data string /// length (4) @@ -297,7 +297,21 @@ pub struct Data<'a> { /// encoded [`SftpPacket::Data`] variant /// /// See [Responses from the Server to the Client](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) -pub(crate) const ENCODED_SSH_FXP_DATA_MIN_LENGTH: u32 = 1 + 4 + 4; +pub(crate) const ENCODED_SSH_FXP_DATA_HEADER: u32 = 1 + 4 + 4; + +/// This is the encoded length for the [`Name`] Sftp Response Header. +/// +/// This considers the Packet type (1), the request ID (4), and the count +/// of NameEntry (4) +/// +/// - It excludes explicitly length field for the SftpPacket +/// - It excludes explicitly length of the data string content +/// +/// It is defined a single source of truth for what is the length for the +/// encoded [`SftpPacket::Name`] variant +/// +/// See [Responses from the Server to the Client](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-6.4) +pub(crate) const ENCODED_SSH_FXP_NAME_HEADER: u32 = 1 + 4 + 4; /// Struct to hold `SSH_FXP_NAME` response. /// See [SSH_FXP_NAME in Responses from the Server to the Client](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7) @@ -315,19 +329,6 @@ pub struct NameEntry<'a> { pub attrs: Attrs, } -/// This is the encoded length for the Name Sftp Response. -/// -/// This considers the Packet type (1), the Request Id (4) and -/// count of [`NameEntry`] that will follow -/// -/// It excludes the length of [`NameEntry`] explicitly -/// -/// It is defined a single source of truth for what is the length for the -/// encoded [`SftpPacket::Name`] variant -/// -/// See [Responses from the Server to the Client](https://datatracker.ietf.org/doc/html/draft-ietf-secsh-filexfer-02#section-7) -pub(crate) const ENCODED_BASE_NAME_SFTP_PACKET_LENGTH: u32 = 9; - // TODO Will a Vector be an issue for no_std? // Maybe we should migrate this to heapless::Vec and let the user decide // the number of elements via features flags? diff --git a/sftp/src/sftphandler/sftphandler.rs b/sftp/src/sftphandler/sftphandler.rs index b4156239..ba9fc02d 100644 --- a/sftp/src/sftphandler/sftphandler.rs +++ b/sftp/src/sftphandler/sftphandler.rs @@ -65,7 +65,7 @@ enum HandlerState { /// Parameter (T): Is a type that implements [`crate::handles::OpaqueFileHandle`] that **must** match the type used in the [`crate::sftpserver::SftpServer`] provided in (S) /// /// The compiler time constant `BUFFER_OUT_SIZE` is used to define the -/// size of the output buffer for the subsystem [`Embassy-sync::pipe`] used +/// size of the output buffer for the subsystem [`embassy_sync::pipe::Pipe`] used /// to send responses safely across the instantiated structure. /// pub struct SftpHandler<'a, T, S, const BUFFER_OUT_SIZE: usize> @@ -681,7 +681,7 @@ where output_producer, ); let encoded_len = - crate::sftpserver::no_std_helpers::get_name_entry_len(&name_entry)?; + crate::sftpserver::helpers::get_name_entry_len(&name_entry)?; debug!( "PathInfo encoded length: {:?}", encoded_len @@ -692,7 +692,7 @@ where ); let dir_read_data_reply = dir_read_header_reply - .send_header(encoded_len) + .send_header(encoded_len, 1) .await?; dir_read_data_reply .send_data(|mut sender| async move { diff --git a/sftp/src/sftpserver/mod.rs b/sftp/src/sftpserver/mod.rs index 5c340b3f..2c914fa4 100644 --- a/sftp/src/sftpserver/mod.rs +++ b/sftp/src/sftpserver/mod.rs @@ -9,14 +9,9 @@ pub use sftpserver::ReadStatus; pub use sftpserver::SftpOpResult; -pub use crate::sftpserver::readdirreplies::no_std_helpers; +pub use crate::sftpserver::readdirreplies::helpers; -#[cfg(feature = "std")] -pub use crate::sftpserver::readdirreplies::std_helpers::{ - DirEntriesCollection, get_file_attrs, -}; - -pub use readreplies::{ReadHeaderReply, ReadReplyFinished}; +pub use readreplies::{ReadDataReply, ReadHeaderReply, ReadReplyFinished}; pub use readdirreplies::{ DirReadDataReply, DirReadHeaderReply, DirReadReplyFinished, diff --git a/sftp/src/sftpserver/readdirreplies.rs b/sftp/src/sftpserver/readdirreplies.rs index 3517952a..f1b13fa2 100644 --- a/sftp/src/sftpserver/readdirreplies.rs +++ b/sftp/src/sftpserver/readdirreplies.rs @@ -1,8 +1,7 @@ use crate::{ error::{SftpError, SftpResult}, proto::{ - ENCODED_SSH_FXP_DATA_MIN_LENGTH, MAX_NAME_ENTRY_SIZE, NameEntry, ReqId, - SftpNum, + ENCODED_SSH_FXP_NAME_HEADER, MAX_NAME_ENTRY_SIZE, NameEntry, ReqId, SftpNum, }, protocol::StatusCode, server::SftpSink, @@ -13,6 +12,10 @@ use sunset::sshwire::SSHEncode; use log::{debug, error}; +/// Structures and helpers to handle the process of sending read replies for readdir operations in a structured way. +/// +/// Enforces the correct sequence of sending a DirRead reply, +/// which consists of first sending a header with the announced data length using [`DirReadHeaderReply::send_header`] and then sending the data itself using [`DirReadDataReply::send_data`]. pub struct DirReadHeaderReply<'g, const N: usize> { /// The request Id that will be use`d in the response req_id: ReqId, @@ -31,12 +34,47 @@ impl<'g, const N: usize> DirReadHeaderReply<'g, N> { Self { req_id, chan_out } } + // /// Sends the header for a read reply with the given data length. + // /// + // /// Once used, the only way to obtain a [`DirReadReplyFinished`] is by using its returned value. + // pub async fn send_header( + // self, + // data_len: u32, + // ) -> SftpResult> { + // debug!( + // "DirReadReply: Sending header for request id {:?}: data length = {:?}", + // self.req_id, data_len + // ); + // let mut s = [0u8; N]; + // let mut sink = SftpSink::new(&mut s); + + // let payload = DirReadHeaderReply::::encode_data_header( + // &mut sink, + // self.req_id, + // data_len, + // ) + // .map_err(|err| { + // error!("WireError: {:?}", err); + // StatusCode::SSH_FX_FAILURE + // })?; + + // debug!( + // "Sending header: len = {:?}, content = {:?}", + // payload.len(), + // payload + // ); + // // Sending payload_slice since we are not making use of the sink sftpPacket length calculation + // self.chan_out.send_data(payload).await?; + + // Ok(DirReadDataReply::new(self.req_id, data_len, self.chan_out)) + // } /// Sends the header for a read reply with the given data length. /// /// Once used, the only way to obtain a [`DirReadReplyFinished`] is by using its returned value. pub async fn send_header( self, data_len: u32, + count: u32, ) -> SftpResult> { debug!( "DirReadReply: Sending header for request id {:?}: data length = {:?}", @@ -45,11 +83,16 @@ impl<'g, const N: usize> DirReadHeaderReply<'g, N> { let mut s = [0u8; N]; let mut sink = SftpSink::new(&mut s); - let payload = DirReadHeaderReply::::encode_data_header( + let payload = DirReadHeaderReply::::encode_header( &mut sink, self.req_id, data_len, - )?; + count, + ) + .map_err(|err| { + error!("WireError: {:?}", err); + StatusCode::SSH_FX_FAILURE + })?; debug!( "Sending header: len = {:?}, content = {:?}", @@ -70,23 +113,25 @@ impl<'g, const N: usize> DirReadHeaderReply<'g, N> { Ok(DirReadReplyFinished::new(self.req_id)) } - fn encode_data_header( + fn encode_header( sink: &'g mut SftpSink<'g>, req_id: ReqId, data_len: u32, + count: u32, ) -> Result<&'g [u8], SftpError> { // length field - (data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH).enc(sink)?; + (data_len + ENCODED_SSH_FXP_NAME_HEADER).enc(sink)?; // packet type (1) - u8::from(SftpNum::SSH_FXP_DATA).enc(sink)?; + u8::from(SftpNum::SSH_FXP_NAME).enc(sink)?; // request id (4) req_id.enc(sink)?; - // data length (4) - data_len.enc(sink)?; + count.enc(sink)?; Ok(sink.payload_slice()) } } +/// Represents the state of a successful read reply for a readdir operation after the +/// header has been sent and the data has been completely sent or an EOF status has been sent. pub struct DirReadReplyFinished { /// The request Id that will be use`d in the response _req_id: ReqId, @@ -98,10 +143,13 @@ impl DirReadReplyFinished { } } +/// Helper struct to enforce the correct sequence of sending directory items in a readdir +/// reply, which consists of sending items until the announced data length is reached. pub struct LimitedDirSender<'g, const N: usize> { /// Immutable writer chan_out: &'g SftpOutputProducer<'g, N>, - /// remaining data length to be sent as announced in [`DirReply::send_header`] + /// remaining data length to be sent as announced in [`DirReadDataReply::send_data`] + /// when calling the closure with this LimitedDirSender as an argument. remaining: core::cell::Cell, } @@ -119,10 +167,7 @@ impl<'g, const N: usize> LimitedDirSender<'g, N> { ) -> SftpResult { let mut buffer = [0u8; MAX_NAME_ENTRY_SIZE]; let mut sftp_sink = SftpSink::new(&mut buffer); - name_entry.enc(&mut sftp_sink).map_err(|err| { - error!("WireError: {:?}", err); - StatusCode::SSH_FX_FAILURE - })?; + name_entry.enc(&mut sftp_sink)?; self.send_data(sftp_sink.payload_slice()).await } @@ -146,8 +191,20 @@ impl<'g, const N: usize> LimitedDirSender<'g, N> { } } +/// Token struct to represent the state of having sent all the announced data for a readdir reply. +/// +/// It can only be obtained by calling [`LimitedDirSender::completed`] after having +/// sent items with [`LimitedDirSender::send_item`] until the announced data length is reached. +/// +/// It is used to guarantee that all the announced data has been sent in the closure +/// provided to [`DirReadDataReply::send_data`] before being able to return a [`DirReadReplyFinished`] +/// and thus completing the readdir reply process. pub struct CompleteDirDataSent; +/// Helper struct to enforce the correct sequence of sending a readdir reply, +/// which consists of first sending a header with the announced data length +/// using [`DirReadHeaderReply::send_header`] and then sending the data +/// itself using [`DirReadDataReply::send_data`]. pub struct DirReadDataReply<'g, const N: usize> { /// The request Id that will be use`d in the response req_id: ReqId, @@ -198,25 +255,6 @@ mod enforcing_process_tests { use alloc::vec; use std::vec::Vec; - #[test] - fn compose_header() { - const N: usize = 512; - - let req_id = ReqId(42); - let data_len = 128; - let mut buffer = [0u8; N]; - let mut sink = SftpSink::new(&mut buffer); - - let payload = - DirReadHeaderReply::::encode_data_header(&mut sink, req_id, data_len) - .unwrap(); - - assert_eq!( - data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH, - u32::from_be_bytes(payload[..4].try_into().unwrap()) - ); - } - #[test] fn handling_process_eof() { const N: usize = 512; @@ -281,6 +319,9 @@ mod enforcing_process_tests { .expect("Length overflow when calculating total encoded length") }); + let items_count = + u32::try_from(name_entries.len()).expect("Count should fit in u32"); + embassy_futures::block_on(async { { let dir_header_reply = @@ -288,7 +329,7 @@ mod enforcing_process_tests { // 3. Call send_header with the length of the data to be sent let dir_read_data_reply = dir_header_reply - .send_header(items_encoded_len) + .send_header(items_encoded_len, items_count) .await .expect("send_eof should succeed returning ReadReplyData"); @@ -314,17 +355,17 @@ mod enforcing_process_tests { let mock = consumer.into_inner(); let buf = &mock.buffer; - // packet type byte should be 103 (SSH_FXP_DATA) - assert_eq!(buf[4], 103, "expected SSH_FXP_DATA packet type"); + // packet type byte should be 104 (SSH_FXP_NAME) + assert_eq!(buf[4], 104, "expected SSH_FXP_NAME packet type"); - // data length should be 10 - let data_len = u32::from_be_bytes( + // data length should be + let items = u32::from_be_bytes( buf[9..13] .try_into() .expect("data length should be present in the packet"), ); assert_eq!( - data_len, items_encoded_len, + items, items_count, "expected data length to match encoded length" ); assert_eq!( @@ -336,7 +377,7 @@ mod enforcing_process_tests { } /// no_std compatible helpers to perform common tasks using solely sunset and sunset-sftp resources -pub mod no_std_helpers { +pub mod helpers { use crate::{ error::SftpResult, proto::{MAX_NAME_ENTRY_SIZE, NameEntry}, @@ -357,156 +398,3 @@ pub mod no_std_helpers { Ok(temp_sink.payload_len() as u32) } } - -/// Helpers structures intended to for environment with `std` available, specially linux. -/// -/// The collection helps with directory and directory items enumeration, description -/// and organizing. Providing means to translate them into [`sunset-sftp`] structures -/// -#[cfg(feature = "std")] -pub mod std_helpers { - use crate::{ - proto::{Attrs, Filename}, - protocol::{NameEntry, StatusCode, constants::MAX_NAME_ENTRY_SIZE}, - server::{ReadStatus, SftpOpResult, SftpSink}, - }; - - use sunset::sshwire::SSHEncode; - - use log::{debug, error, info}; - use std::{ - fs::{DirEntry, Metadata, ReadDir}, - os::{linux::fs::MetadataExt, unix::fs::PermissionsExt}, - time::SystemTime, - }; - - /// This is a helper structure to make ReadDir into something manageable for - /// [`DirReply`] - #[derive(Debug)] - pub struct DirEntriesCollection { - /// Number of elements - count: u32, - /// Computed length of all the encoded elements - encoded_length: u32, - /// The actual entries. As you can see these are DirEntry. This is a std choice - entries: Vec, - } - - #[cfg(feature = "std")] - impl DirEntriesCollection { - /// Creates this DirEntriesCollection so linux std users do not need to - /// translate `std` directory elements into Sftp structures before sending a response - /// back to the client - pub fn new(dir_iterator: ReadDir) -> SftpOpResult { - let mut encoded_length = 0; - - let entries: Vec = dir_iterator - .filter_map(|entry_result| { - let entry = entry_result.ok()?; - let filename = entry.file_name().to_string_lossy().into_owned(); - let name_entry = NameEntry { - filename: Filename::from(filename.as_str()), - _longname: Filename::from(""), - attrs: Self::get_attrs_or_empty(entry.metadata()), - }; - - let mut buffer = [0u8; MAX_NAME_ENTRY_SIZE]; - let mut sftp_sink = SftpSink::new(&mut buffer); - name_entry.enc(&mut sftp_sink).ok()?; - encoded_length += u32::try_from(sftp_sink.payload_len()) - .map_err(|_| StatusCode::SSH_FX_FAILURE) - .ok()?; - Some(entry) - }) - .collect(); - - let count = u32::try_from(entries.len()) - .map_err(|_| StatusCode::SSH_FX_FAILURE)?; - - info!( - "Processed {} entries, estimated serialized length: {}", - count, encoded_length - ); - - Ok(Self { count, encoded_length, entries }) - } - - /// Using the provided [`DirReply`] sends a response taking care of - /// composing a SFTP Entry header and sending everything in the right order - /// - /// Returns a [`ReadStatus`] - pub async fn send_response( - &self, - reply: &mut DirReply<'_, N>, - ) -> SftpOpResult { - self.send_entries_header(reply).await?; - self.send_entries(reply).await?; - Ok(ReadStatus::EndOfFile) - } - /// Sends a header for all the elements in the ReadDir iterator - /// - /// It will take care of counting them and finding the serialized length of each - /// element - async fn send_entries_header( - &self, - reply: &mut DirReply<'_, N>, - ) -> SftpOpResult<()> { - reply.send_header(self.count, self.encoded_length).await.map_err(|e| { - debug!("Could not send header {e:?}"); - StatusCode::SSH_FX_FAILURE - }) - } - - /// Sends the entries in the ReadDir iterator back to the client - async fn send_entries( - &self, - reply: &mut DirReply<'_, N>, - ) -> SftpOpResult<()> { - for entry in &self.entries { - let filename = entry.file_name().to_string_lossy().into_owned(); - let attrs = Self::get_attrs_or_empty(entry.metadata()); - let name_entry = NameEntry { - filename: Filename::from(filename.as_str()), - _longname: Filename::from(""), - attrs, - }; - debug!("Sending new item: {:?}", name_entry); - reply.send_item(&name_entry).await.map_err(|err| { - error!("SftpError: {:?}", err); - StatusCode::SSH_FX_FAILURE - })?; - } - Ok(()) - } - - fn get_attrs_or_empty( - maybe_metadata: Result, - ) -> Attrs { - maybe_metadata.map(get_file_attrs).unwrap_or_default() - } - } - - #[cfg(feature = "std")] - /// [`std`] helper function to get [`Attrs`] from a [`Metadata`]. - pub fn get_file_attrs(metadata: Metadata) -> Attrs { - let time_to_u32 = |time_result: std::io::Result| { - time_result - .ok()? - .duration_since(SystemTime::UNIX_EPOCH) - .ok()? - .as_secs() - .try_into() - .ok() - }; - - Attrs { - size: Some(metadata.len()), - uid: Some(metadata.st_uid()), - gid: Some(metadata.st_gid()), - permissions: Some(metadata.permissions().mode()), - atime: time_to_u32(metadata.accessed()), - mtime: time_to_u32(metadata.modified()), - ext_count: None, - } - } -} diff --git a/sftp/src/sftpserver/readreplies.rs b/sftp/src/sftpserver/readreplies.rs index 0dfae469..98c6035e 100644 --- a/sftp/src/sftpserver/readreplies.rs +++ b/sftp/src/sftpserver/readreplies.rs @@ -1,6 +1,6 @@ use crate::{ error::{SftpError, SftpResult}, - proto::{ENCODED_SSH_FXP_DATA_MIN_LENGTH, ReqId, SftpNum}, + proto::{ENCODED_SSH_FXP_DATA_HEADER, ReqId, SftpNum}, protocol::StatusCode, server::SftpSink, sftphandler::SftpOutputProducer, @@ -13,7 +13,7 @@ use log::debug; /// Used to provide the header of a read reply, including /// only the fundamental information such as being an EOF reply or a data reply /// -/// On the corresponding method call will return either a [`ReadDataReply`] or a [`ReadReplyFinished`] +/// On the corresponding method call will return either a [`crate::sftpserver::ReadDataReply`] or a [`crate::sftpserver::ReadReplyFinished`] /// which makes easy to implement correct behavior. pub struct ReadHeaderReply<'g, const N: usize> { /// The request Id that will be used in the response @@ -25,7 +25,7 @@ pub struct ReadHeaderReply<'g, const N: usize> { impl<'g, const N: usize> ReadHeaderReply<'g, N> { /// Creates a new ReadHeaderReply with the given request ID and output channel. /// - /// It is meant to be called in [`SftpHandler`] and used to call a method of the [`SftpServer`] that requires a read reply header, such as [`SftpServer::read`] + /// It is meant to be called in [`crate::SftpHandler`] and used to call a method of the [`crate::sftpserver::SftpServer`] that requires a read reply header, such as [`crate::sftpserver::SftpServer::read`] pub(crate) fn new( req_id: ReqId, chan_out: &'g SftpOutputProducer<'g, N>, @@ -78,7 +78,7 @@ impl<'g, const N: usize> ReadHeaderReply<'g, N> { data_len: u32, ) -> Result<&'g [u8], SftpError> { // length field - (data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH).enc(sink)?; + (data_len + ENCODED_SSH_FXP_DATA_HEADER).enc(sink)?; // packet type (1) u8::from(SftpNum::SSH_FXP_DATA).enc(sink)?; // request id (4) @@ -170,7 +170,7 @@ impl<'g, const N: usize> ReadDataReply<'g, N> { /// This struct is used to represent the state of a read reply after /// the header and the data have been sent /// -/// It is used as a mandatory return value for a successful [`SftpServer::read`] +/// It is used as a mandatory return value for a successful [`crate::sftpserver::SftpServer::read`] /// operation pub struct ReadReplyFinished { _req_id: ReqId, @@ -204,7 +204,7 @@ mod enforcing_process_tests { .unwrap(); assert_eq!( - data_len + ENCODED_SSH_FXP_DATA_MIN_LENGTH, + data_len + ENCODED_SSH_FXP_DATA_HEADER, u32::from_be_bytes(payload[..4].try_into().unwrap()) ); } diff --git a/sftp/src/sftpserver/sftpserver.rs b/sftp/src/sftpserver/sftpserver.rs index 7594fb47..01897f88 100644 --- a/sftp/src/sftpserver/sftpserver.rs +++ b/sftp/src/sftpserver/sftpserver.rs @@ -1,6 +1,6 @@ use crate::error::{SftpError, SftpResult}; use crate::proto::{NameEntry, PFlags}; -use crate::server::DirReadHeaderReply; +use crate::server::{DirReadHeaderReply, DirReadReplyFinished}; use crate::sftpserver::{ReadHeaderReply, ReadReplyFinished}; use crate::{ handles::OpaqueFileHandle, @@ -15,7 +15,7 @@ pub type SftpOpResult = core::result::Result; /// To finish read requests the server needs to answer to /// **subsequent READ requests** after all the data has been sent already -/// with a [`SftpPacket`] including a status code [`StatusCode::SSH_FX_EOF`]. +/// with a [`crate::proto::SftpPacket`] including a status code [`StatusCode::SSH_FX_EOF`]. /// /// [`ReadStatus`] enum has been implemented to keep record of these exhausted /// read operations. @@ -31,7 +31,7 @@ pub enum ReadStatus { #[default] PendingData, /// The server has provided all the data requested therefore the [`SftpServer`] - /// will send a [`SftpPacket`] including a status code [`StatusCode::SSH_FX_EOF`] + /// will send a [`crate::proto::SftpPacket`] including a status code [`crate::proto::StatusCode::SSH_FX_EOF`] /// in the next read request. EndOfFile, } @@ -39,7 +39,7 @@ pub enum ReadStatus { /// All trait functions are optional in the SFTP protocol. /// Some less core operations have a Provided implementation returning /// returns `SSH_FX_OP_UNSUPPORTED`. Common operations must be implemented, -/// but may return `Err(StatusCode::SSH_FX_OP_UNSUPPORTED)`. +/// but may return `Err(crate::proto::StatusCode::SSH_FX_OP_UNSUPPORTED)`. pub trait SftpServer where T: OpaqueFileHandle, @@ -75,6 +75,12 @@ where } } + /// Reads from a file that has previously being opened for reading + /// + /// The opaque_file_handle is a handle that the server can use to identify the file being read. It must have been set in [`crate::sftpserver::SftpServer::open`] function. + /// The offset is the position in the file from which to start reading. + /// The len is the number of bytes to read. + /// The reply is a structure that facilitates the task of sending the response back correctly. See [`ReadHeaderReply`] for more details. #[allow(unused)] fn read( &mut self, @@ -95,6 +101,10 @@ where } /// Writes to a file that has previously being opened for writing + /// + /// The opaque_file_handle is a handle that the server can use to identify the file being written. It must have been set in [`crate::sftpserver::SftpServer::open`] function. + /// The offset is the position in the file from which to start writing. + /// The buf is the data to be written. fn write( &mut self, opaque_file_handle: &T, @@ -113,6 +123,8 @@ where } /// Opens a directory and returns a handle + /// + /// The dir is the path of the directory to open. The returned handle can be used in subsequent calls to [`crate::sftpserver::SftpServer::readdir`] to read the contents of the directory. fn opendir( &mut self, dir: &str, @@ -123,12 +135,20 @@ where } } + /// Reads the contents of a directory that has previously being opened with [`crate::sftpserver::SftpServer::opendir`] + /// + /// Parameters: + /// - The opaque_dir_handle is a handle that the server can use to identify the directory being read. It must have been set in [`crate::sftpserver::SftpServer::opendir`] function. + /// - The reply is a structure that facilitates the task of sending the response back correctly. See [`DirReadHeaderReply`] for more details. + /// - N is the allocated size for the buffer that will be used to send the response back. + /// + /// #[allow(unused_variables)] fn readdir( &mut self, opaque_dir_handle: &T, reply: DirReadHeaderReply<'_, N>, - ) -> impl core::future::Future> { + ) -> impl core::future::Future> { async move { log::error!( "SftpServer ReadDir operation not defined: handle = {:?}", From 9f7aeb6c31c0e3abf9a8e36d0ccbf2005768e044 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Fri, 1 May 2026 15:58:13 +1000 Subject: [PATCH 46/47] Adding sunset-sftp testing and doc building to ci.sh This made me find many problems in the docs and now the tests are finally part of the CI --- demo/sftp/std/Cargo.toml | 4 ---- sftp/src/proto.rs | 9 +++++---- sftp/src/sftpsink.rs | 8 ++------ testing/ci.sh | 7 +++++++ 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/demo/sftp/std/Cargo.toml b/demo/sftp/std/Cargo.toml index 5bfadc5e..d39cab27 100644 --- a/demo/sftp/std/Cargo.toml +++ b/demo/sftp/std/Cargo.toml @@ -44,10 +44,6 @@ heapless = "0.8" libc = "0.2.101" async-io = "1.6.0" -# using local fork -# menu = "0.3" - - critical-section = "1.1" rand = { version = "0.8", features = ["getrandom"] } diff --git a/sftp/src/proto.rs b/sftp/src/proto.rs index a1c2eb31..5969649e 100644 --- a/sftp/src/proto.rs +++ b/sftp/src/proto.rs @@ -52,9 +52,10 @@ pub const SFTP_WRITE_REQID_INDEX: usize = 5; /// for handle maximum length pub const _SSH_FXP_HANDLE_MAX_LEN: u32 = 256; -/// The maximum size for full paths is only limited by the u32 where ssh strings lengths are contained. This causes that different platforms use different maximum path lengths. -/// We need to make a choice in this implementation. Since it is targeting embedded devices I am going to set it short, since influence the length of the [[requestHolder]] that needs to be allocated -/// to compose fragmented requests. +/// The maximum size for full paths is only limited by the u32 where ssh strings lengths are contained. This causes that +/// different platforms use different maximum path lengths. We need to make a choice in this implementation. +/// Since it is targeting embedded devices I am going to set it short, since influence the length of an internal buffer +/// used to compose request received in fragments. #[cfg(not(any(feature = "long-paths-4096", feature = "long-paths-1024")))] pub const MAX_PATH_LEN: usize = 256; #[cfg(feature = "long-paths-1024")] @@ -442,7 +443,7 @@ impl SSHEncode for StatusCode { } // TODO: Implement extensions. Low in priority -/// Provided to provide a mechanism to implement extensions +/// Provided to implement extensions // #[derive(Debug, SSHEncode, SSHDecode)] // pub struct ExtPair<'a> { // pub name: &'a str, diff --git a/sftp/src/sftpsink.rs b/sftp/src/sftpsink.rs index 31fb0c76..4e16ad92 100644 --- a/sftp/src/sftpsink.rs +++ b/sftp/src/sftpsink.rs @@ -8,8 +8,6 @@ use log::{debug, error, info, log, trace, warn}; /// A implementation fo [`SSHSink`] that observes some constraints for /// SFTP packets /// -/// **Important**: It needs to be [`SftpSink::finalize`] to add the packet -/// len #[derive(Default)] pub struct SftpSink<'g> { buffer: &'g mut [u8], @@ -19,8 +17,8 @@ pub struct SftpSink<'g> { impl<'g> SftpSink<'g> { /// Initializes the Sink, with the particularity that it will leave /// [`crate::proto::SFTP_FIELD_LEN_LENGTH`] bytes empty at the - /// start of the buffer that will contain the total packet length - /// once the [`SftpSink::finalize`] method is called + /// start of the buffer. That will contain the total packet length + /// when [`SftpSink::push()`] is called pub fn new(s: &'g mut [u8]) -> Self { SftpSink { buffer: s, index: SFTP_FIELD_LEN_LENGTH } } @@ -60,8 +58,6 @@ impl<'g> SftpSink<'g> { /// Auxiliary method to allow an immutable reference to the full used /// data (includes the prepended length field) - /// - /// **Important:** Call this after [`SftpSink::finalize()`] pub fn used_slice(&self) -> &[u8] { debug!( "SftpSink used_slice called, total len: {}. Index: {}", diff --git a/testing/ci.sh b/testing/ci.sh index 4af02757..e70c4df7 100755 --- a/testing/ci.sh +++ b/testing/ci.sh @@ -55,6 +55,13 @@ cargo test --doc cargo doc ) +( +cd sftp +cargo test +cargo test --doc +cargo doc +) + ( cd demo/std cargo build From 671428325dc8faa2387d95dc747ed039466965f9 Mon Sep 17 00:00:00 2001 From: jubeormk1 Date: Fri, 1 May 2026 18:11:45 +1000 Subject: [PATCH 47/47] [skip ci] Adding sunset-sftp crate with basic SFTP server implementation - lib.rs: Now it contains the main library code for the sunset-sftp crate, including module declarations and public exports. Updated documentation to reflect the current state of the library and its features including issue #40. Main additions include: - sftphandler module: Implementation of the main entrypoint for the SFTP server, which will handle incoming SFTP requests and manage the server's state. - sftpserver.rs: Contains the trait definition for the SFTP server that is to be implemented by the user of the library, defining the required methods for handling SFTP operations. - sftperror.rs: Defines error types and handling for the SFTP server operations. Additional files: - sftpsink.rs: An implementation of SSHSink with extra functionality for handling SFTP packets - opaquefilehandle.rs: Collection of traits that a filehandle is expected to implement. About SftpHandler: Main entry point for the SFTP server. It requires to take ownership of an async_channel.rs::ChanInOut in order to write long responses to the client. This makes it not exactly sans-io and not completely observable, but this compromise facilitates the implementation of the SftpServer trait thanks to an internal embassy pipe (See sftpoutputchannelhandler.rs). After that I have tested all my ./demo/sftp/std/testing/test_*.sh script and all worked fine except test_get_file_long.sh which triggers a known bug #40 I edited error.rs to avoid a CI check fail around unused use statements. All test passed after this --- src/error.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/error.rs b/src/error.rs index 0ac33552..42333e8b 100644 --- a/src/error.rs +++ b/src/error.rs @@ -6,8 +6,6 @@ use core::fmt::Arguments; use snafu::prelude::*; -use heapless::String; - use crate::channel::ChanNum; #[allow(unused_imports)]