mirror of
https://github.com/awfixers-stuff/src.git
synced 2026-03-27 12:56:19 +00:00
create src
This commit is contained in:
1665
src-packetline/CHANGELOG.md
Normal file
1665
src-packetline/CHANGELOG.md
Normal file
File diff suppressed because it is too large
Load Diff
63
src-packetline/Cargo.toml
Normal file
63
src-packetline/Cargo.toml
Normal file
@@ -0,0 +1,63 @@
|
||||
lints.workspace = true
|
||||
|
||||
[package]
|
||||
name = "src-packetline"
|
||||
version = "0.21.1"
|
||||
repository = "https://github.com/GitoxideLabs/gitoxide"
|
||||
license = "MIT OR Apache-2.0"
|
||||
description = "A crate of the gitoxide project implementing the pkt-line serialization format"
|
||||
authors = ["Sebastian Thiel <sebastian.thiel@icloud.com>"]
|
||||
edition = "2021"
|
||||
include = ["src/**/*", "LICENSE-*"]
|
||||
rust-version = "1.82"
|
||||
|
||||
[lib]
|
||||
doctest = false
|
||||
|
||||
[features]
|
||||
#! By default, all IO related capabilities will be missing unless one of the following is chosen.
|
||||
default = []
|
||||
|
||||
## Enable blocking I/O API.
|
||||
blocking-io = []
|
||||
## Enable async I/O API using IO traits from `futures-io`.
|
||||
# no `dep:` for futures-lite (https://github.com/rust-secure-code/cargo-auditable/issues/124)
|
||||
async-io = ["dep:futures-io", "futures-lite", "dep:pin-project-lite"]
|
||||
|
||||
#! ### Other
|
||||
## Data structures implement `serde::Serialize` and `serde::Deserialize`.
|
||||
serde = ["dep:serde", "bstr/serde", "faster-hex/serde"]
|
||||
|
||||
[[test]]
|
||||
name = "async-packetline"
|
||||
path = "tests/async-packetline.rs"
|
||||
required-features = ["async-io"]
|
||||
|
||||
[[test]]
|
||||
name = "blocking-packetline"
|
||||
path = "tests/blocking-packetline.rs"
|
||||
required-features = ["blocking-io", "maybe-async/is_sync"]
|
||||
|
||||
[dependencies]
|
||||
src-trace = { version = "^0.1.18", path = "../src-trace" }
|
||||
|
||||
serde = { version = "1.0.114", optional = true, default-features = false, features = ["std", "derive"] }
|
||||
thiserror = "2.0.18"
|
||||
faster-hex = { version = "0.10.0", default-features = false, features = ["std"] }
|
||||
bstr = { version = "1.12.0", default-features = false, features = ["std"] }
|
||||
# async support
|
||||
futures-io = { version = "0.3.32", optional = true }
|
||||
futures-lite = { version = "2.1.0", optional = true }
|
||||
pin-project-lite = { version = "0.2.6", optional = true }
|
||||
|
||||
document-features = { version = "0.2.0", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
src-odb = { path = "../src-odb" }
|
||||
src-pack-for-configuration = { package = "src-pack", path = "../src-pack", default-features = false, features = ["streaming-input"] }
|
||||
src-hash = { path = "../src-hash", features = ["sha1"] }
|
||||
async-std = { version = "1.9.0", features = ["attributes"] }
|
||||
maybe-async = "0.2.6"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
features = ["document-features", "blocking-io", "serde"]
|
||||
1
src-packetline/LICENSE-APACHE
Symbolic link
1
src-packetline/LICENSE-APACHE
Symbolic link
@@ -0,0 +1 @@
|
||||
../LICENSE-APACHE
|
||||
1
src-packetline/LICENSE-MIT
Symbolic link
1
src-packetline/LICENSE-MIT
Symbolic link
@@ -0,0 +1 @@
|
||||
../LICENSE-MIT
|
||||
249
src-packetline/src/async_io/encode.rs
Normal file
249
src-packetline/src/async_io/encode.rs
Normal file
@@ -0,0 +1,249 @@
|
||||
use std::{
|
||||
io,
|
||||
pin::Pin,
|
||||
task::{ready, Context, Poll},
|
||||
};
|
||||
|
||||
use futures_io::AsyncWrite;
|
||||
use futures_lite::AsyncWriteExt;
|
||||
|
||||
use crate::{
|
||||
encode::{u16_to_hex, Error},
|
||||
BandRef, Channel, ErrorRef, PacketLineRef, TextRef, DELIMITER_LINE, ERR_PREFIX, FLUSH_LINE, MAX_DATA_LEN,
|
||||
RESPONSE_END_LINE,
|
||||
};
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
/// A way of writing packet lines asynchronously.
|
||||
pub struct LineWriter<'a, W> {
|
||||
#[pin]
|
||||
pub(crate) writer: W,
|
||||
pub(crate) prefix: &'a [u8],
|
||||
pub(crate) suffix: &'a [u8],
|
||||
state: State<'a>,
|
||||
}
|
||||
}
|
||||
|
||||
enum State<'a> {
|
||||
Idle,
|
||||
WriteHexLen([u8; 4], usize),
|
||||
WritePrefix(&'a [u8]),
|
||||
WriteData(usize),
|
||||
WriteSuffix(&'a [u8]),
|
||||
}
|
||||
|
||||
impl<'a, W: AsyncWrite + Unpin> LineWriter<'a, W> {
|
||||
/// Create a new line writer writing data with a `prefix` and `suffix`.
|
||||
///
|
||||
/// Keep the additional `prefix` or `suffix` buffers empty if no prefix or suffix should be written.
|
||||
pub fn new(writer: W, prefix: &'a [u8], suffix: &'a [u8]) -> Self {
|
||||
LineWriter {
|
||||
writer,
|
||||
prefix,
|
||||
suffix,
|
||||
state: State::Idle,
|
||||
}
|
||||
}
|
||||
|
||||
/// Consume self and reveal the inner writer.
|
||||
pub fn into_inner(self) -> W {
|
||||
self.writer
|
||||
}
|
||||
}
|
||||
|
||||
impl<W: AsyncWrite + Unpin> AsyncWrite for LineWriter<'_, W> {
|
||||
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, data: &[u8]) -> Poll<io::Result<usize>> {
|
||||
let mut this = self.project();
|
||||
loop {
|
||||
match &mut this.state {
|
||||
State::Idle => {
|
||||
let data_len = this.prefix.len() + data.len() + this.suffix.len();
|
||||
if data_len > MAX_DATA_LEN {
|
||||
let err = Error::DataLengthLimitExceeded {
|
||||
length_in_bytes: data_len,
|
||||
};
|
||||
return Poll::Ready(Err(io::Error::other(err)));
|
||||
}
|
||||
if data.is_empty() {
|
||||
let err = Error::DataIsEmpty;
|
||||
return Poll::Ready(Err(io::Error::other(err)));
|
||||
}
|
||||
let data_len = data_len + 4;
|
||||
let len_buf = u16_to_hex(data_len as u16);
|
||||
*this.state = State::WriteHexLen(len_buf, 0);
|
||||
}
|
||||
State::WriteHexLen(hex_len, written) => {
|
||||
while *written != hex_len.len() {
|
||||
let n = ready!(this.writer.as_mut().poll_write(cx, &hex_len[*written..]))?;
|
||||
if n == 0 {
|
||||
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
|
||||
}
|
||||
*written += n;
|
||||
}
|
||||
if this.prefix.is_empty() {
|
||||
*this.state = State::WriteData(0);
|
||||
} else {
|
||||
*this.state = State::WritePrefix(this.prefix);
|
||||
}
|
||||
}
|
||||
State::WritePrefix(buf) => {
|
||||
while !buf.is_empty() {
|
||||
let n = ready!(this.writer.as_mut().poll_write(cx, buf))?;
|
||||
if n == 0 {
|
||||
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
|
||||
}
|
||||
let (_, rest) = std::mem::take(buf).split_at(n);
|
||||
*buf = rest;
|
||||
}
|
||||
*this.state = State::WriteData(0);
|
||||
}
|
||||
State::WriteData(written) => {
|
||||
while *written != data.len() {
|
||||
let n = ready!(this.writer.as_mut().poll_write(cx, &data[*written..]))?;
|
||||
if n == 0 {
|
||||
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
|
||||
}
|
||||
*written += n;
|
||||
}
|
||||
if this.suffix.is_empty() {
|
||||
let written = 4 + this.prefix.len() + *written;
|
||||
*this.state = State::Idle;
|
||||
return Poll::Ready(Ok(written));
|
||||
} else {
|
||||
*this.state = State::WriteSuffix(this.suffix);
|
||||
}
|
||||
}
|
||||
State::WriteSuffix(buf) => {
|
||||
while !buf.is_empty() {
|
||||
let n = ready!(this.writer.as_mut().poll_write(cx, buf))?;
|
||||
if n == 0 {
|
||||
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
|
||||
}
|
||||
let (_, rest) = std::mem::take(buf).split_at(n);
|
||||
*buf = rest;
|
||||
}
|
||||
*this.state = State::Idle;
|
||||
return Poll::Ready(Ok(4 + this.prefix.len() + data.len() + this.suffix.len()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
let this = self.project();
|
||||
this.writer.poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
let this = self.project();
|
||||
this.writer.poll_close(cx)
|
||||
}
|
||||
}
|
||||
|
||||
async fn prefixed_and_suffixed_data_to_write(
|
||||
prefix: &[u8],
|
||||
data: &[u8],
|
||||
suffix: &[u8],
|
||||
mut out: impl AsyncWrite + Unpin,
|
||||
) -> io::Result<usize> {
|
||||
let data_len = prefix.len() + data.len() + suffix.len();
|
||||
if data_len > MAX_DATA_LEN {
|
||||
let err = Error::DataLengthLimitExceeded {
|
||||
length_in_bytes: data_len,
|
||||
};
|
||||
return Err(io::Error::other(err));
|
||||
}
|
||||
if data.is_empty() {
|
||||
let err = Error::DataIsEmpty;
|
||||
return Err(io::Error::other(err));
|
||||
}
|
||||
|
||||
let data_len = data_len + 4;
|
||||
let buf = u16_to_hex(data_len as u16);
|
||||
|
||||
out.write_all(&buf).await?;
|
||||
if !prefix.is_empty() {
|
||||
out.write_all(prefix).await?;
|
||||
}
|
||||
out.write_all(data).await?;
|
||||
if !suffix.is_empty() {
|
||||
out.write_all(suffix).await?;
|
||||
}
|
||||
Ok(data_len)
|
||||
}
|
||||
|
||||
async fn prefixed_data_to_write(prefix: &[u8], data: &[u8], out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
prefixed_and_suffixed_data_to_write(prefix, data, &[], out).await
|
||||
}
|
||||
|
||||
/// Write a `text` message to `out`, which is assured to end in a newline.
|
||||
pub async fn text_to_write(text: &[u8], out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
prefixed_and_suffixed_data_to_write(&[], text, b"\n", out).await
|
||||
}
|
||||
|
||||
/// Write a `data` message to `out`.
|
||||
pub async fn data_to_write(data: &[u8], out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
prefixed_data_to_write(&[], data, out).await
|
||||
}
|
||||
|
||||
/// Write an error `message` to `out`.
|
||||
pub async fn error_to_write(message: &[u8], out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
prefixed_data_to_write(ERR_PREFIX, message, out).await
|
||||
}
|
||||
|
||||
/// Write a response-end message to `out`.
|
||||
pub async fn response_end_to_write(mut out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
out.write_all(RESPONSE_END_LINE).await?;
|
||||
Ok(4)
|
||||
}
|
||||
|
||||
/// Write a delim message to `out`.
|
||||
pub async fn delim_to_write(mut out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
out.write_all(DELIMITER_LINE).await?;
|
||||
Ok(4)
|
||||
}
|
||||
|
||||
/// Write a flush message to `out`.
|
||||
pub async fn flush_to_write(mut out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
out.write_all(FLUSH_LINE).await?;
|
||||
Ok(4)
|
||||
}
|
||||
|
||||
/// Write `data` of `kind` to `out` using sideband encoding.
|
||||
pub async fn band_to_write(kind: Channel, data: &[u8], out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
prefixed_data_to_write(&[kind as u8], data, out).await
|
||||
}
|
||||
|
||||
/// Serialize `band` to `out`, returning the amount of bytes written.
|
||||
///
|
||||
/// The data written to `out` can be decoded with [`crate::PacketLineRef::decode_band()`].
|
||||
pub async fn write_band(band: &BandRef<'_>, out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
match band {
|
||||
BandRef::Data(d) => band_to_write(Channel::Data, d, out),
|
||||
BandRef::Progress(d) => band_to_write(Channel::Progress, d, out),
|
||||
BandRef::Error(d) => band_to_write(Channel::Error, d, out),
|
||||
}
|
||||
.await
|
||||
}
|
||||
|
||||
/// Serialize `band` to `out`, appending a newline if there is none, returning the amount of bytes written.
|
||||
pub async fn write_text(text: &TextRef<'_>, out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
text_to_write(text.0, out).await
|
||||
}
|
||||
|
||||
/// Serialize `error` to `out`.
|
||||
///
|
||||
/// This includes a marker to allow decoding it outside a sideband channel, returning the amount of bytes written.
|
||||
pub async fn write_error(error: &ErrorRef<'_>, out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
error_to_write(error.0, out).await
|
||||
}
|
||||
|
||||
/// Serialize `line` to `out` in git `packetline` format, returning the amount of bytes written to `out`.
|
||||
pub async fn write_packet_line(line: &PacketLineRef<'_>, out: impl AsyncWrite + Unpin) -> io::Result<usize> {
|
||||
match line {
|
||||
PacketLineRef::Data(d) => data_to_write(d, out).await,
|
||||
PacketLineRef::Flush => flush_to_write(out).await,
|
||||
PacketLineRef::Delimiter => delim_to_write(out).await,
|
||||
PacketLineRef::ResponseEnd => response_end_to_write(out).await,
|
||||
}
|
||||
}
|
||||
238
src-packetline/src/async_io/read.rs
Normal file
238
src-packetline/src/async_io/read.rs
Normal file
@@ -0,0 +1,238 @@
|
||||
use std::{
|
||||
io,
|
||||
ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
use bstr::ByteSlice;
|
||||
use futures_io::AsyncRead;
|
||||
use futures_lite::AsyncReadExt;
|
||||
|
||||
pub use super::sidebands::WithSidebands;
|
||||
use crate::{
|
||||
decode,
|
||||
read::{ExhaustiveOutcome, ProgressAction, StreamingPeekableIterState},
|
||||
PacketLineRef, MAX_LINE_LEN, U16_HEX_BYTES,
|
||||
};
|
||||
|
||||
/// Read pack lines one after another, without consuming more than needed from the underlying
|
||||
/// [`AsyncRead`]. [`Flush`](PacketLineRef::Flush) lines cause the reader to stop producing lines forever,
|
||||
/// leaving [`AsyncRead`] at the start of whatever comes next.
|
||||
///
|
||||
/// This implementation tries hard not to allocate at all which leads to quite some added complexity and plenty of extra memory copies.
|
||||
pub struct StreamingPeekableIter<T> {
|
||||
pub(super) state: StreamingPeekableIterState<T>,
|
||||
}
|
||||
|
||||
/// Non-IO methods
|
||||
impl<T> StreamingPeekableIter<T>
|
||||
where
|
||||
T: AsyncRead + Unpin,
|
||||
{
|
||||
/// Return a new instance from `read` which will stop decoding packet lines when receiving one of the given `delimiters`.
|
||||
/// If `trace` is `true`, all packetlines received or sent will be passed to the facilities of the `src-trace` crate.
|
||||
pub fn new(read: T, delimiters: &'static [PacketLineRef<'static>], trace: bool) -> Self {
|
||||
Self {
|
||||
state: StreamingPeekableIterState::new(read, delimiters, trace),
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_line_inner<'a>(
|
||||
reader: &mut T,
|
||||
buf: &'a mut [u8],
|
||||
) -> io::Result<Result<PacketLineRef<'a>, decode::Error>> {
|
||||
let (hex_bytes, data_bytes) = buf.split_at_mut(4);
|
||||
reader.read_exact(hex_bytes).await?;
|
||||
let num_data_bytes = match decode::hex_prefix(hex_bytes) {
|
||||
Ok(decode::PacketLineOrWantedSize::Line(line)) => return Ok(Ok(line)),
|
||||
Ok(decode::PacketLineOrWantedSize::Wanted(additional_bytes)) => additional_bytes as usize,
|
||||
Err(err) => return Ok(Err(err)),
|
||||
};
|
||||
|
||||
let (data_bytes, _) = data_bytes.split_at_mut(num_data_bytes);
|
||||
reader.read_exact(data_bytes).await?;
|
||||
match decode::to_data_line(data_bytes) {
|
||||
Ok(line) => Ok(Ok(line)),
|
||||
Err(err) => Ok(Err(err)),
|
||||
}
|
||||
}
|
||||
|
||||
/// This function is needed to help the borrow checker allow us to return references all the time
|
||||
/// It contains a bunch of logic shared between peek and `read_line` invocations.
|
||||
async fn read_line_inner_exhaustive<'a>(
|
||||
reader: &mut T,
|
||||
buf: &'a mut Vec<u8>,
|
||||
delimiters: &[PacketLineRef<'static>],
|
||||
fail_on_err_lines: bool,
|
||||
buf_resize: bool,
|
||||
trace: bool,
|
||||
) -> ExhaustiveOutcome<'a> {
|
||||
(
|
||||
false,
|
||||
None,
|
||||
Some(match Self::read_line_inner(reader, buf).await {
|
||||
Ok(Ok(line)) => {
|
||||
if trace {
|
||||
match line {
|
||||
#[allow(unused_variables)]
|
||||
PacketLineRef::Data(d) => {
|
||||
gix_trace::trace!("<< {}", d.as_bstr().trim().as_bstr());
|
||||
}
|
||||
PacketLineRef::Flush => {
|
||||
gix_trace::trace!("<< FLUSH");
|
||||
}
|
||||
PacketLineRef::Delimiter => {
|
||||
gix_trace::trace!("<< DELIM");
|
||||
}
|
||||
PacketLineRef::ResponseEnd => {
|
||||
gix_trace::trace!("<< RESPONSE_END");
|
||||
}
|
||||
}
|
||||
}
|
||||
if delimiters.contains(&line) {
|
||||
let stopped_at = delimiters.iter().find(|l| **l == line).copied();
|
||||
buf.clear();
|
||||
return (true, stopped_at, None);
|
||||
} else if fail_on_err_lines {
|
||||
if let Some(err) = line.check_error() {
|
||||
let err = err.0.as_bstr().to_owned();
|
||||
buf.clear();
|
||||
return (
|
||||
true,
|
||||
None,
|
||||
Some(Err(io::Error::other(crate::read::Error { message: err }))),
|
||||
);
|
||||
}
|
||||
}
|
||||
let len = line.as_slice().map_or(U16_HEX_BYTES, |s| s.len() + U16_HEX_BYTES);
|
||||
if buf_resize {
|
||||
buf.resize(len, 0);
|
||||
}
|
||||
Ok(Ok(crate::decode(buf).expect("only valid data here")))
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
buf.clear();
|
||||
Ok(Err(err))
|
||||
}
|
||||
Err(err) => {
|
||||
buf.clear();
|
||||
Err(err)
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Read a packet line into the internal buffer and return it.
|
||||
///
|
||||
/// Returns `None` if the end of iteration is reached because of one of the following:
|
||||
///
|
||||
/// * natural EOF
|
||||
/// * ERR packet line encountered if [`fail_on_err_lines()`](StreamingPeekableIterState::fail_on_err_lines()) is true.
|
||||
/// * A `delimiter` packet line encountered
|
||||
pub async fn read_line(&mut self) -> Option<io::Result<Result<PacketLineRef<'_>, decode::Error>>> {
|
||||
let state = &mut self.state;
|
||||
if state.is_done {
|
||||
return None;
|
||||
}
|
||||
if !state.peek_buf.is_empty() {
|
||||
std::mem::swap(&mut state.peek_buf, &mut state.buf);
|
||||
state.peek_buf.clear();
|
||||
Some(Ok(Ok(crate::decode(&state.buf).expect("only valid data in peek buf"))))
|
||||
} else {
|
||||
if state.buf.len() != MAX_LINE_LEN {
|
||||
state.buf.resize(MAX_LINE_LEN, 0);
|
||||
}
|
||||
let (is_done, stopped_at, res) = Self::read_line_inner_exhaustive(
|
||||
&mut state.read,
|
||||
&mut state.buf,
|
||||
state.delimiters,
|
||||
state.fail_on_err_lines,
|
||||
false,
|
||||
state.trace,
|
||||
)
|
||||
.await;
|
||||
state.is_done = is_done;
|
||||
state.stopped_at = stopped_at;
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
/// Peek the next packet line without consuming it. Returns `None` if a stop-packet or an error
|
||||
/// was encountered.
|
||||
///
|
||||
/// Multiple calls to peek will return the same packet line, if there is one.
|
||||
pub async fn peek_line(&mut self) -> Option<io::Result<Result<PacketLineRef<'_>, decode::Error>>> {
|
||||
let state = &mut self.state;
|
||||
if state.is_done {
|
||||
return None;
|
||||
}
|
||||
if state.peek_buf.is_empty() {
|
||||
state.peek_buf.resize(MAX_LINE_LEN, 0);
|
||||
let (is_done, stopped_at, res) = Self::read_line_inner_exhaustive(
|
||||
&mut state.read,
|
||||
&mut state.peek_buf,
|
||||
state.delimiters,
|
||||
state.fail_on_err_lines,
|
||||
true,
|
||||
state.trace,
|
||||
)
|
||||
.await;
|
||||
state.is_done = is_done;
|
||||
state.stopped_at = stopped_at;
|
||||
res
|
||||
} else {
|
||||
Some(Ok(Ok(crate::decode(&state.peek_buf).expect("only valid data here"))))
|
||||
}
|
||||
}
|
||||
|
||||
/// Same as [`as_read_with_sidebands(…)`](StreamingPeekableIter::as_read_with_sidebands()), but for channels without side band support.
|
||||
///
|
||||
/// Due to the preconfigured function type this method can be called without 'turbofish'.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn as_read(&mut self) -> WithSidebands<'_, T, fn(bool, &[u8]) -> ProgressAction> {
|
||||
WithSidebands::new(self)
|
||||
}
|
||||
|
||||
/// Return this instance as implementor of [`Read`](io::Read) assuming sidebands to be used in all received packet lines.
|
||||
/// Each invocation of [`read_line()`](io::BufRead::read_line()) returns a packet line.
|
||||
///
|
||||
/// Progress or error information will be passed to the given `handle_progress(is_error, text)` function, with `is_error: bool`
|
||||
/// being true in case the `text` is to be interpreted as error.
|
||||
///
|
||||
/// _Please note_ that sidebands need to be negotiated with the server.
|
||||
pub fn as_read_with_sidebands<F: FnMut(bool, &[u8]) -> ProgressAction + Unpin>(
|
||||
&mut self,
|
||||
handle_progress: F,
|
||||
) -> WithSidebands<'_, T, F> {
|
||||
WithSidebands::with_progress_handler(self, handle_progress)
|
||||
}
|
||||
|
||||
/// Same as [`as_read_with_sidebands(…)`](StreamingPeekableIter::as_read_with_sidebands()), but for channels without side band support.
|
||||
///
|
||||
/// The type parameter `F` needs to be configured for this method to be callable using the 'turbofish' operator.
|
||||
/// Use [`as_read()`](StreamingPeekableIter::as_read()).
|
||||
pub fn as_read_without_sidebands<F: FnMut(bool, &[u8]) -> ProgressAction + Unpin>(
|
||||
&mut self,
|
||||
) -> WithSidebands<'_, T, F> {
|
||||
WithSidebands::without_progress_handler(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> StreamingPeekableIter<T> {
|
||||
/// Return the inner read
|
||||
pub fn into_inner(self) -> T {
|
||||
self.state.read
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for StreamingPeekableIter<T> {
|
||||
type Target = StreamingPeekableIterState<T>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.state
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for StreamingPeekableIter<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.state
|
||||
}
|
||||
}
|
||||
366
src-packetline/src/async_io/sidebands.rs
Normal file
366
src-packetline/src/async_io/sidebands.rs
Normal file
@@ -0,0 +1,366 @@
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{ready, Context, Poll},
|
||||
};
|
||||
|
||||
use futures_io::{AsyncBufRead, AsyncRead};
|
||||
|
||||
use super::read::StreamingPeekableIter;
|
||||
use crate::{decode, read::ProgressAction, BandRef, PacketLineRef, TextRef, U16_HEX_BYTES};
|
||||
|
||||
type ReadLineResult<'a> = Option<std::io::Result<Result<PacketLineRef<'a>, decode::Error>>>;
|
||||
/// An implementor of [`AsyncBufRead`] yielding packet lines on each call to `read_line()`.
|
||||
/// It's also possible to hide the underlying packet lines using the [`Read`](AsyncRead) implementation which is useful
|
||||
/// if they represent binary data, like the one of a pack file.
|
||||
pub struct WithSidebands<'a, T, F>
|
||||
where
|
||||
T: AsyncRead,
|
||||
{
|
||||
state: State<'a, T>,
|
||||
handle_progress: Option<F>,
|
||||
pos: usize,
|
||||
cap: usize,
|
||||
}
|
||||
|
||||
impl<T, F> Drop for WithSidebands<'_, T, F>
|
||||
where
|
||||
T: AsyncRead,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
if let State::Idle { ref mut parent } = self.state {
|
||||
parent
|
||||
.as_mut()
|
||||
.expect("parent is always available if we are idle")
|
||||
.reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> WithSidebands<'a, T, fn(bool, &[u8]) -> ProgressAction>
|
||||
where
|
||||
T: AsyncRead,
|
||||
{
|
||||
/// Create a new instance with the given provider as `parent`.
|
||||
pub fn new(parent: &'a mut StreamingPeekableIter<T>) -> Self {
|
||||
WithSidebands {
|
||||
state: State::Idle { parent: Some(parent) },
|
||||
handle_progress: None,
|
||||
pos: 0,
|
||||
cap: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enum State<'a, T> {
|
||||
Idle {
|
||||
parent: Option<&'a mut StreamingPeekableIter<T>>,
|
||||
},
|
||||
ReadLine {
|
||||
read_line: Pin<Box<dyn Future<Output = ReadLineResult<'a>> + 'a>>,
|
||||
parent_inactive: Option<*mut StreamingPeekableIter<T>>,
|
||||
},
|
||||
}
|
||||
|
||||
/// # SAFETY
|
||||
/// It's safe because T is `Send` and we have a test that assures that our `StreamingPeekableIter` is `Send` as well,
|
||||
/// hence the `*mut _` is `Send`.
|
||||
/// `read_line` isn't send and we can't declare it as such as it forces `Send` in all places (BUT WHY IS THAT A PROBLEM, I don't recall).
|
||||
/// However, it's only used when pinned and thus isn't actually sent anywhere, it's a secondary state of the future used after it was Send
|
||||
/// to a thread possibly.
|
||||
// TODO: Is it possible to declare it as it should be?
|
||||
#[allow(unsafe_code, clippy::non_send_fields_in_send_ty)]
|
||||
unsafe impl<T> Send for State<'_, T> where T: Send {}
|
||||
|
||||
impl<'a, T, F> WithSidebands<'a, T, F>
|
||||
where
|
||||
T: AsyncRead + Unpin,
|
||||
F: FnMut(bool, &[u8]) -> ProgressAction + Unpin,
|
||||
{
|
||||
/// Create a new instance with the given `parent` provider and the `handle_progress` function.
|
||||
///
|
||||
/// Progress or error information will be passed to the given `handle_progress(is_error, text)` function, with `is_error: bool`
|
||||
/// being true in case the `text` is to be interpreted as error.
|
||||
pub fn with_progress_handler(parent: &'a mut StreamingPeekableIter<T>, handle_progress: F) -> Self {
|
||||
WithSidebands {
|
||||
state: State::Idle { parent: Some(parent) },
|
||||
handle_progress: Some(handle_progress),
|
||||
pos: 0,
|
||||
cap: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new instance without a progress handler.
|
||||
pub fn without_progress_handler(parent: &'a mut StreamingPeekableIter<T>) -> Self {
|
||||
WithSidebands {
|
||||
state: State::Idle { parent: Some(parent) },
|
||||
handle_progress: None,
|
||||
pos: 0,
|
||||
cap: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Forwards to the parent [`StreamingPeekableIter::reset_with()`](crate::read::StreamingPeekableIterState::reset_with()).
|
||||
pub fn reset_with(&mut self, delimiters: &'static [PacketLineRef<'static>]) {
|
||||
if let State::Idle { ref mut parent } = self.state {
|
||||
parent
|
||||
.as_mut()
|
||||
.expect("parent is always available if we are idle")
|
||||
.reset_with(delimiters);
|
||||
}
|
||||
}
|
||||
|
||||
/// Forwards to the parent [`StreamingPeekableIterState::stopped_at()`](crate::read::StreamingPeekableIterState::stopped_at()).
|
||||
pub fn stopped_at(&self) -> Option<PacketLineRef<'static>> {
|
||||
match self.state {
|
||||
State::Idle { ref parent } => {
|
||||
parent
|
||||
.as_ref()
|
||||
.expect("parent is always available if we are idle")
|
||||
.stopped_at
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set or unset the progress handler.
|
||||
pub fn set_progress_handler(&mut self, handle_progress: Option<F>) {
|
||||
self.handle_progress = handle_progress;
|
||||
}
|
||||
|
||||
/// Effectively forwards to the parent [`StreamingPeekableIter::peek_line()`], allowing to see what would be returned
|
||||
/// next on a call to `read_line()`.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This skips all sideband handling and may return an unprocessed line with sidebands still contained in it.
|
||||
pub async fn peek_data_line(&mut self) -> Option<std::io::Result<Result<&[u8], decode::Error>>> {
|
||||
match self.state {
|
||||
State::Idle { ref mut parent } => match parent
|
||||
.as_mut()
|
||||
.expect("parent is always available if we are idle")
|
||||
.peek_line()
|
||||
.await
|
||||
{
|
||||
Some(Ok(Ok(PacketLineRef::Data(line)))) => Some(Ok(Ok(line))),
|
||||
Some(Ok(Err(err))) => Some(Ok(Err(err))),
|
||||
Some(Err(err)) => Some(Err(err)),
|
||||
_ => None,
|
||||
},
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a packet line as string line.
|
||||
pub fn read_line_to_string<'b>(&'b mut self, buf: &'b mut String) -> ReadLineFuture<'a, 'b, T, F> {
|
||||
ReadLineFuture { parent: self, buf }
|
||||
}
|
||||
|
||||
/// Read a packet line from the underlying packet reader, returning empty lines if a stop-packetline was reached.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This skips all sideband handling and may return an unprocessed line with sidebands still contained in it.
|
||||
pub async fn read_data_line(&mut self) -> Option<std::io::Result<Result<PacketLineRef<'_>, decode::Error>>> {
|
||||
match &mut self.state {
|
||||
State::Idle { parent: Some(parent) } => {
|
||||
assert_eq!(
|
||||
self.cap, 0,
|
||||
"we don't support partial buffers right now - read-line must be used consistently"
|
||||
);
|
||||
parent.read_line().await
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct ReadDataLineFuture<'a, 'b, T: AsyncRead, F> {
|
||||
parent: &'b mut WithSidebands<'a, T, F>,
|
||||
buf: &'b mut Vec<u8>,
|
||||
}
|
||||
|
||||
impl<T, F> Future for ReadDataLineFuture<'_, '_, T, F>
|
||||
where
|
||||
T: AsyncRead + Unpin,
|
||||
F: FnMut(bool, &[u8]) -> ProgressAction + Unpin,
|
||||
{
|
||||
type Output = std::io::Result<usize>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
assert_eq!(
|
||||
self.parent.cap, 0,
|
||||
"we don't support partial buffers right now - read-line must be used consistently"
|
||||
);
|
||||
let Self { buf, parent } = &mut *self;
|
||||
let line = ready!(Pin::new(parent).poll_fill_buf(cx))?;
|
||||
buf.clear();
|
||||
buf.extend_from_slice(line);
|
||||
let bytes = line.len();
|
||||
self.parent.cap = 0;
|
||||
Poll::Ready(Ok(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ReadLineFuture<'a, 'b, T: AsyncRead, F> {
|
||||
parent: &'b mut WithSidebands<'a, T, F>,
|
||||
buf: &'b mut String,
|
||||
}
|
||||
|
||||
impl<T, F> Future for ReadLineFuture<'_, '_, T, F>
|
||||
where
|
||||
T: AsyncRead + Unpin,
|
||||
F: FnMut(bool, &[u8]) -> ProgressAction + Unpin,
|
||||
{
|
||||
type Output = std::io::Result<usize>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
assert_eq!(
|
||||
self.parent.cap, 0,
|
||||
"we don't support partial buffers right now - read-line must be used consistently"
|
||||
);
|
||||
let Self { buf, parent } = &mut *self;
|
||||
let line = std::str::from_utf8(ready!(Pin::new(parent).poll_fill_buf(cx))?).map_err(std::io::Error::other)?;
|
||||
buf.clear();
|
||||
buf.push_str(line);
|
||||
let bytes = line.len();
|
||||
self.parent.cap = 0;
|
||||
Poll::Ready(Ok(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F> AsyncBufRead for WithSidebands<'_, T, F>
|
||||
where
|
||||
T: AsyncRead + Unpin,
|
||||
F: FnMut(bool, &[u8]) -> ProgressAction + Unpin,
|
||||
{
|
||||
fn poll_fill_buf(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<&[u8]>> {
|
||||
use std::io;
|
||||
|
||||
use futures_lite::FutureExt;
|
||||
{
|
||||
let this = self.as_mut().get_mut();
|
||||
if this.pos >= this.cap {
|
||||
let (ofs, cap) = loop {
|
||||
match this.state {
|
||||
State::Idle { ref mut parent } => {
|
||||
let parent = parent.take().expect("parent to be present here");
|
||||
let inactive = std::ptr::from_mut(parent);
|
||||
this.state = State::ReadLine {
|
||||
read_line: parent.read_line().boxed_local(),
|
||||
parent_inactive: Some(inactive),
|
||||
}
|
||||
}
|
||||
State::ReadLine {
|
||||
ref mut read_line,
|
||||
ref mut parent_inactive,
|
||||
} => {
|
||||
let line = ready!(read_line.poll(cx));
|
||||
|
||||
this.state = {
|
||||
let parent = parent_inactive.take().expect("parent pointer always set");
|
||||
// SAFETY: It's safe to recover the original mutable reference (from which
|
||||
// the `read_line` future was created as the latter isn't accessible anymore
|
||||
// once the state is set to Idle. In other words, either one or the other are
|
||||
// accessible, never both at the same time.
|
||||
// Also: We keep a pointer around which is protected by borrowcheck since it's created
|
||||
// from a legal mutable reference which is moved into the read_line future - if it was manually
|
||||
// implemented we would be able to re-obtain it from there.
|
||||
#[allow(unsafe_code)]
|
||||
let parent = unsafe { &mut *parent };
|
||||
State::Idle { parent: Some(parent) }
|
||||
};
|
||||
|
||||
let line = match line {
|
||||
Some(line) => line?.map_err(io::Error::other)?,
|
||||
None => break (0, 0),
|
||||
};
|
||||
|
||||
match this.handle_progress.as_mut() {
|
||||
Some(handle_progress) => {
|
||||
let band = line.decode_band().map_err(io::Error::other)?;
|
||||
const ENCODED_BAND: usize = 1;
|
||||
match band {
|
||||
BandRef::Data(d) => {
|
||||
if d.is_empty() {
|
||||
continue;
|
||||
}
|
||||
break (U16_HEX_BYTES + ENCODED_BAND, d.len());
|
||||
}
|
||||
BandRef::Progress(d) => {
|
||||
let text = TextRef::from(d).0;
|
||||
if handle_progress(false, text).is_break() {
|
||||
return Poll::Ready(Err(io::Error::other("interrupted by user")));
|
||||
}
|
||||
}
|
||||
BandRef::Error(d) => {
|
||||
let text = TextRef::from(d).0;
|
||||
if handle_progress(true, text).is_break() {
|
||||
return Poll::Ready(Err(io::Error::other("interrupted by user")));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
break match line.as_slice() {
|
||||
Some(d) => (U16_HEX_BYTES, d.len()),
|
||||
None => {
|
||||
return Poll::Ready(Err(io::Error::other(
|
||||
"encountered non-data line in a data-line only context",
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
this.cap = cap + ofs;
|
||||
this.pos = ofs;
|
||||
}
|
||||
}
|
||||
let range = self.pos..self.cap;
|
||||
match &self.get_mut().state {
|
||||
State::Idle { parent } => Poll::Ready(Ok(&parent.as_ref().expect("parent always available").buf[range])),
|
||||
State::ReadLine { .. } => unreachable!("at least in theory"),
|
||||
}
|
||||
}
|
||||
|
||||
fn consume(self: Pin<&mut Self>, amt: usize) {
|
||||
let this = self.get_mut();
|
||||
this.pos = std::cmp::min(this.pos + amt, this.cap);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F> AsyncRead for WithSidebands<'_, T, F>
|
||||
where
|
||||
T: AsyncRead + Unpin,
|
||||
F: FnMut(bool, &[u8]) -> ProgressAction + Unpin,
|
||||
{
|
||||
fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8]) -> Poll<std::io::Result<usize>> {
|
||||
use std::io::Read;
|
||||
let mut rem = ready!(self.as_mut().poll_fill_buf(cx))?;
|
||||
let nread = rem.read(buf)?;
|
||||
self.consume(nread);
|
||||
Poll::Ready(Ok(nread))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
fn receiver<T: Send>(_i: T) {}
|
||||
|
||||
/// We want to declare items containing pointers of `StreamingPeekableIter` `Send` as well, so it must be `Send` itself.
|
||||
#[test]
|
||||
fn streaming_peekable_iter_is_send() {
|
||||
receiver(StreamingPeekableIter::new(&[][..], &[], false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn state_is_send() {
|
||||
let mut s = StreamingPeekableIter::new(&[][..], &[], false);
|
||||
receiver(State::Idle { parent: Some(&mut s) });
|
||||
}
|
||||
}
|
||||
97
src-packetline/src/async_io/write.rs
Normal file
97
src-packetline/src/async_io/write.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
use std::{
|
||||
io,
|
||||
pin::Pin,
|
||||
task::{ready, Context, Poll},
|
||||
};
|
||||
|
||||
use futures_io::AsyncWrite;
|
||||
|
||||
use super::encode::LineWriter;
|
||||
use crate::{MAX_DATA_LEN, U16_HEX_BYTES};
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
/// An implementor of [`Write`][io::Write] which passes all input to an inner `Write` in packet line data encoding,
|
||||
/// one line per `write(…)` call or as many lines as it takes if the data doesn't fit into the maximum allowed line length.
|
||||
pub struct Writer<T> {
|
||||
#[pin]
|
||||
inner: LineWriter<'static, T>,
|
||||
state: State,
|
||||
}
|
||||
}
|
||||
|
||||
enum State {
|
||||
Idle,
|
||||
WriteData(usize),
|
||||
}
|
||||
|
||||
impl<T: AsyncWrite + Unpin> Writer<T> {
|
||||
/// Create a new instance from the given `write`
|
||||
pub fn new(write: T) -> Self {
|
||||
Writer {
|
||||
inner: LineWriter::new(write, &[], &[]),
|
||||
state: State::Idle,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the inner writer, consuming self.
|
||||
pub fn into_inner(self) -> T {
|
||||
self.inner.into_inner()
|
||||
}
|
||||
|
||||
/// Return a mutable reference to the inner writer, useful if packet lines should be serialized directly.
|
||||
pub fn inner_mut(&mut self) -> &mut T {
|
||||
&mut self.inner.writer
|
||||
}
|
||||
}
|
||||
|
||||
/// Non-IO methods
|
||||
impl<T> Writer<T> {
|
||||
/// If called, each call to [`write()`][io::Write::write()] will write bytes as is.
|
||||
pub fn enable_binary_mode(&mut self) {
|
||||
self.inner.suffix = &[];
|
||||
}
|
||||
/// If called, each call to [`write()`][io::Write::write()] will write the input as text, appending a trailing newline
|
||||
/// if needed before writing.
|
||||
pub fn enable_text_mode(&mut self) {
|
||||
self.inner.suffix = b"\n";
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncWrite + Unpin> AsyncWrite for Writer<T> {
|
||||
fn poll_write(self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll<io::Result<usize>> {
|
||||
let mut this = self.project();
|
||||
loop {
|
||||
match this.state {
|
||||
State::Idle => {
|
||||
if buf.is_empty() {
|
||||
return Poll::Ready(Err(io::Error::other(
|
||||
"empty packet lines are not permitted as '0004' is invalid",
|
||||
)));
|
||||
}
|
||||
*this.state = State::WriteData(0);
|
||||
}
|
||||
State::WriteData(written) => {
|
||||
while *written != buf.len() {
|
||||
let data = &buf[*written..*written + (buf.len() - *written).min(MAX_DATA_LEN)];
|
||||
let n = ready!(this.inner.as_mut().poll_write(cx, data))?;
|
||||
if n == 0 {
|
||||
return Poll::Ready(Err(io::ErrorKind::WriteZero.into()));
|
||||
}
|
||||
*written += n;
|
||||
*written -= U16_HEX_BYTES + this.inner.suffix.len();
|
||||
}
|
||||
*this.state = State::Idle;
|
||||
return Poll::Ready(Ok(buf.len()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
self.project().inner.poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
|
||||
self.project().inner.poll_close(cx)
|
||||
}
|
||||
}
|
||||
109
src-packetline/src/blocking_io/encode.rs
Normal file
109
src-packetline/src/blocking_io/encode.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use std::io;
|
||||
|
||||
use crate::{
|
||||
encode::{u16_to_hex, Error},
|
||||
BandRef, Channel, ErrorRef, PacketLineRef, TextRef, DELIMITER_LINE, ERR_PREFIX, FLUSH_LINE, MAX_DATA_LEN,
|
||||
RESPONSE_END_LINE,
|
||||
};
|
||||
|
||||
/// Write a response-end message to `out`.
|
||||
pub fn response_end_to_write(mut out: impl io::Write) -> io::Result<usize> {
|
||||
out.write_all(RESPONSE_END_LINE).map(|_| 4)
|
||||
}
|
||||
|
||||
/// Write a delim message to `out`.
|
||||
pub fn delim_to_write(mut out: impl io::Write) -> io::Result<usize> {
|
||||
out.write_all(DELIMITER_LINE).map(|_| 4)
|
||||
}
|
||||
|
||||
/// Write a flush message to `out`.
|
||||
pub fn flush_to_write(mut out: impl io::Write) -> io::Result<usize> {
|
||||
out.write_all(FLUSH_LINE).map(|_| 4)
|
||||
}
|
||||
|
||||
/// Write an error `message` to `out`.
|
||||
pub fn error_to_write(message: &[u8], out: impl io::Write) -> io::Result<usize> {
|
||||
prefixed_data_to_write(ERR_PREFIX, message, out)
|
||||
}
|
||||
|
||||
/// Serialize `error` to `out`.
|
||||
///
|
||||
/// This includes a marker to allow decoding it outside a sideband channel, returning the amount of bytes written.
|
||||
pub fn write_error(error: &ErrorRef<'_>, out: impl io::Write) -> io::Result<usize> {
|
||||
error_to_write(error.0, out)
|
||||
}
|
||||
|
||||
/// Write `data` of `kind` to `out` using sideband encoding.
|
||||
pub fn band_to_write(kind: Channel, data: &[u8], out: impl io::Write) -> io::Result<usize> {
|
||||
prefixed_data_to_write(&[kind as u8], data, out)
|
||||
}
|
||||
|
||||
/// Serialize `band` to `out`, returning the amount of bytes written.
|
||||
///
|
||||
/// The data written to `out` can be decoded with [`PacketLineRef::decode_band()`].
|
||||
pub fn write_band(band: &BandRef<'_>, out: impl io::Write) -> io::Result<usize> {
|
||||
match band {
|
||||
BandRef::Data(d) => band_to_write(Channel::Data, d, out),
|
||||
BandRef::Progress(d) => band_to_write(Channel::Progress, d, out),
|
||||
BandRef::Error(d) => band_to_write(Channel::Error, d, out),
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a `data` message to `out`.
|
||||
pub fn data_to_write(data: &[u8], out: impl io::Write) -> io::Result<usize> {
|
||||
prefixed_data_to_write(&[], data, out)
|
||||
}
|
||||
|
||||
/// Serialize `line` to `out` in git `packetline` format, returning the amount of bytes written to `out`.
|
||||
pub fn write_packet_line(line: &PacketLineRef<'_>, out: impl io::Write) -> io::Result<usize> {
|
||||
match line {
|
||||
PacketLineRef::Data(d) => data_to_write(d, out),
|
||||
PacketLineRef::Flush => flush_to_write(out),
|
||||
PacketLineRef::Delimiter => delim_to_write(out),
|
||||
PacketLineRef::ResponseEnd => response_end_to_write(out),
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a `text` message to `out`, which is assured to end in a newline.
|
||||
pub fn text_to_write(text: &[u8], out: impl io::Write) -> io::Result<usize> {
|
||||
prefixed_and_suffixed_data_to_write(&[], text, b"\n", out)
|
||||
}
|
||||
|
||||
/// Serialize `text` to `out`, appending a newline if there is none, returning the amount of bytes written.
|
||||
pub fn write_text(text: &TextRef<'_>, out: impl io::Write) -> io::Result<usize> {
|
||||
text_to_write(text.0, out)
|
||||
}
|
||||
|
||||
fn prefixed_data_to_write(prefix: &[u8], data: &[u8], out: impl io::Write) -> io::Result<usize> {
|
||||
prefixed_and_suffixed_data_to_write(prefix, data, &[], out)
|
||||
}
|
||||
|
||||
fn prefixed_and_suffixed_data_to_write(
|
||||
prefix: &[u8],
|
||||
data: &[u8],
|
||||
suffix: &[u8],
|
||||
mut out: impl io::Write,
|
||||
) -> io::Result<usize> {
|
||||
let data_len = prefix.len() + data.len() + suffix.len();
|
||||
if data_len > MAX_DATA_LEN {
|
||||
return Err(io::Error::other(Error::DataLengthLimitExceeded {
|
||||
length_in_bytes: data_len,
|
||||
}));
|
||||
}
|
||||
if data.is_empty() {
|
||||
return Err(io::Error::other(Error::DataIsEmpty));
|
||||
}
|
||||
|
||||
let data_len = data_len + 4;
|
||||
let buf = u16_to_hex(data_len as u16);
|
||||
|
||||
out.write_all(&buf)?;
|
||||
if !prefix.is_empty() {
|
||||
out.write_all(prefix)?;
|
||||
}
|
||||
out.write_all(data)?;
|
||||
if !suffix.is_empty() {
|
||||
out.write_all(suffix)?;
|
||||
}
|
||||
Ok(data_len)
|
||||
}
|
||||
230
src-packetline/src/blocking_io/read.rs
Normal file
230
src-packetline/src/blocking_io/read.rs
Normal file
@@ -0,0 +1,230 @@
|
||||
use std::{
|
||||
io,
|
||||
ops::{Deref, DerefMut},
|
||||
};
|
||||
|
||||
use bstr::ByteSlice;
|
||||
|
||||
pub use super::sidebands::WithSidebands;
|
||||
use crate::{
|
||||
decode,
|
||||
read::{ExhaustiveOutcome, ProgressAction, StreamingPeekableIterState},
|
||||
PacketLineRef, MAX_LINE_LEN, U16_HEX_BYTES,
|
||||
};
|
||||
|
||||
/// Read pack lines one after another, without consuming more than needed from the underlying
|
||||
/// [`Read`][std::io::Read]. [`Flush`][PacketLineRef::Flush] lines cause the reader to stop producing lines forever,
|
||||
/// leaving [`Read`][std::io::Read] at the start of whatever comes next.
|
||||
///
|
||||
/// This implementation tries hard not to allocate at all which leads to quite some added complexity and plenty of extra memory copies.
|
||||
pub struct StreamingPeekableIter<T> {
|
||||
pub(super) state: StreamingPeekableIterState<T>,
|
||||
}
|
||||
|
||||
/// Non-IO methods
|
||||
impl<T> StreamingPeekableIter<T>
|
||||
where
|
||||
T: io::Read,
|
||||
{
|
||||
/// Return a new instance from `read` which will stop decoding packet lines when receiving one of the given `delimiters`.
|
||||
/// If `trace` is `true`, all packetlines received or sent will be passed to the facilities of the `src-trace` crate.
|
||||
pub fn new(read: T, delimiters: &'static [PacketLineRef<'static>], trace: bool) -> Self {
|
||||
Self {
|
||||
state: StreamingPeekableIterState::new(read, delimiters, trace),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_line_inner<'a>(reader: &mut T, buf: &'a mut [u8]) -> io::Result<Result<PacketLineRef<'a>, decode::Error>> {
|
||||
let (hex_bytes, data_bytes) = buf.split_at_mut(4);
|
||||
reader.read_exact(hex_bytes)?;
|
||||
let num_data_bytes = match decode::hex_prefix(hex_bytes) {
|
||||
Ok(decode::PacketLineOrWantedSize::Line(line)) => return Ok(Ok(line)),
|
||||
Ok(decode::PacketLineOrWantedSize::Wanted(additional_bytes)) => additional_bytes as usize,
|
||||
Err(err) => return Ok(Err(err)),
|
||||
};
|
||||
|
||||
let (data_bytes, _) = data_bytes.split_at_mut(num_data_bytes);
|
||||
reader.read_exact(data_bytes)?;
|
||||
match decode::to_data_line(data_bytes) {
|
||||
Ok(line) => Ok(Ok(line)),
|
||||
Err(err) => Ok(Err(err)),
|
||||
}
|
||||
}
|
||||
|
||||
/// This function is needed to help the borrow checker allow us to return references all the time
|
||||
/// It contains a bunch of logic shared between peek and `read_line` invocations.
|
||||
fn read_line_inner_exhaustive<'a>(
|
||||
reader: &mut T,
|
||||
buf: &'a mut Vec<u8>,
|
||||
delimiters: &[PacketLineRef<'static>],
|
||||
fail_on_err_lines: bool,
|
||||
buf_resize: bool,
|
||||
trace: bool,
|
||||
) -> ExhaustiveOutcome<'a> {
|
||||
(
|
||||
false,
|
||||
None,
|
||||
Some(match Self::read_line_inner(reader, buf) {
|
||||
Ok(Ok(line)) => {
|
||||
if trace {
|
||||
match line {
|
||||
#[allow(unused_variables)]
|
||||
PacketLineRef::Data(d) => {
|
||||
gix_trace::trace!("<< {}", d.as_bstr().trim().as_bstr());
|
||||
}
|
||||
PacketLineRef::Flush => {
|
||||
gix_trace::trace!("<< FLUSH");
|
||||
}
|
||||
PacketLineRef::Delimiter => {
|
||||
gix_trace::trace!("<< DELIM");
|
||||
}
|
||||
PacketLineRef::ResponseEnd => {
|
||||
gix_trace::trace!("<< RESPONSE_END");
|
||||
}
|
||||
}
|
||||
}
|
||||
if delimiters.contains(&line) {
|
||||
let stopped_at = delimiters.iter().find(|l| **l == line).copied();
|
||||
buf.clear();
|
||||
return (true, stopped_at, None);
|
||||
} else if fail_on_err_lines {
|
||||
if let Some(err) = line.check_error() {
|
||||
let err = err.0.as_bstr().to_owned();
|
||||
buf.clear();
|
||||
return (
|
||||
true,
|
||||
None,
|
||||
Some(Err(io::Error::other(crate::read::Error { message: err }))),
|
||||
);
|
||||
}
|
||||
}
|
||||
let len = line.as_slice().map_or(U16_HEX_BYTES, |s| s.len() + U16_HEX_BYTES);
|
||||
if buf_resize {
|
||||
buf.resize(len, 0);
|
||||
}
|
||||
// TODO(borrowchk): remove additional decoding of internal buffer which is needed only to make it past borrowchk
|
||||
Ok(Ok(crate::decode(buf).expect("only valid data here")))
|
||||
}
|
||||
Ok(Err(err)) => {
|
||||
buf.clear();
|
||||
Ok(Err(err))
|
||||
}
|
||||
Err(err) => {
|
||||
buf.clear();
|
||||
Err(err)
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Read a packet line into the internal buffer and return it.
|
||||
///
|
||||
/// Returns `None` if the end of iteration is reached because of one of the following:
|
||||
///
|
||||
/// * natural EOF
|
||||
/// * ERR packet line encountered if [`fail_on_err_lines()`](StreamingPeekableIterState::fail_on_err_lines()) is true.
|
||||
/// * A `delimiter` packet line encountered
|
||||
pub fn read_line(&mut self) -> Option<io::Result<Result<PacketLineRef<'_>, decode::Error>>> {
|
||||
let state = &mut self.state;
|
||||
if state.is_done {
|
||||
return None;
|
||||
}
|
||||
if !state.peek_buf.is_empty() {
|
||||
std::mem::swap(&mut state.peek_buf, &mut state.buf);
|
||||
state.peek_buf.clear();
|
||||
Some(Ok(Ok(crate::decode(&state.buf).expect("only valid data in peek buf"))))
|
||||
} else {
|
||||
if state.buf.len() != MAX_LINE_LEN {
|
||||
state.buf.resize(MAX_LINE_LEN, 0);
|
||||
}
|
||||
let (is_done, stopped_at, res) = Self::read_line_inner_exhaustive(
|
||||
&mut state.read,
|
||||
&mut state.buf,
|
||||
state.delimiters,
|
||||
state.fail_on_err_lines,
|
||||
false,
|
||||
state.trace,
|
||||
);
|
||||
state.is_done = is_done;
|
||||
state.stopped_at = stopped_at;
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
/// Peek the next packet line without consuming it. Returns `None` if a stop-packet or an error
|
||||
/// was encountered.
|
||||
///
|
||||
/// Multiple calls to peek will return the same packet line, if there is one.
|
||||
pub fn peek_line(&mut self) -> Option<io::Result<Result<PacketLineRef<'_>, decode::Error>>> {
|
||||
let state = &mut self.state;
|
||||
if state.is_done {
|
||||
return None;
|
||||
}
|
||||
if state.peek_buf.is_empty() {
|
||||
state.peek_buf.resize(MAX_LINE_LEN, 0);
|
||||
let (is_done, stopped_at, res) = Self::read_line_inner_exhaustive(
|
||||
&mut state.read,
|
||||
&mut state.peek_buf,
|
||||
state.delimiters,
|
||||
state.fail_on_err_lines,
|
||||
true,
|
||||
state.trace,
|
||||
);
|
||||
state.is_done = is_done;
|
||||
state.stopped_at = stopped_at;
|
||||
res
|
||||
} else {
|
||||
Some(Ok(Ok(crate::decode(&state.peek_buf).expect("only valid data here"))))
|
||||
}
|
||||
}
|
||||
|
||||
/// Return this instance as implementor of [`Read`](io::Read) assuming side bands to be used in all received packet lines.
|
||||
/// Each invocation of [`read_line()`](io::BufRead::read_line()) returns a packet line.
|
||||
///
|
||||
/// Progress or error information will be passed to the given `handle_progress(is_error, text)` function, with `is_error: bool`
|
||||
/// being true in case the `text` is to be interpreted as error.
|
||||
///
|
||||
/// _Please note_ that side bands need to be negotiated with the server.
|
||||
pub fn as_read_with_sidebands<F: FnMut(bool, &[u8]) -> ProgressAction>(
|
||||
&mut self,
|
||||
handle_progress: F,
|
||||
) -> WithSidebands<'_, T, F> {
|
||||
WithSidebands::with_progress_handler(self, handle_progress)
|
||||
}
|
||||
|
||||
/// Same as [`as_read_with_sidebands(…)`](StreamingPeekableIter::as_read_with_sidebands()), but for channels without side band support.
|
||||
///
|
||||
/// The type parameter `F` needs to be configured for this method to be callable using the 'turbofish' operator.
|
||||
/// Use [`as_read()`][StreamingPeekableIter::as_read()].
|
||||
pub fn as_read_without_sidebands<F: FnMut(bool, &[u8]) -> ProgressAction>(&mut self) -> WithSidebands<'_, T, F> {
|
||||
WithSidebands::without_progress_handler(self)
|
||||
}
|
||||
|
||||
/// Same as [`as_read_with_sidebands(…)`](StreamingPeekableIter::as_read_with_sidebands()), but for channels without side band support.
|
||||
///
|
||||
/// Due to the preconfigured function type this method can be called without 'turbofish'.
|
||||
#[allow(clippy::type_complexity)]
|
||||
pub fn as_read(&mut self) -> WithSidebands<'_, T, fn(bool, &[u8]) -> ProgressAction> {
|
||||
WithSidebands::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> StreamingPeekableIter<T> {
|
||||
/// Return the inner read
|
||||
pub fn into_inner(self) -> T {
|
||||
self.state.read
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for StreamingPeekableIter<T> {
|
||||
type Target = StreamingPeekableIterState<T>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.state
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for StreamingPeekableIter<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.state
|
||||
}
|
||||
}
|
||||
203
src-packetline/src/blocking_io/sidebands.rs
Normal file
203
src-packetline/src/blocking_io/sidebands.rs
Normal file
@@ -0,0 +1,203 @@
|
||||
use std::{io, io::BufRead};
|
||||
|
||||
use super::read::StreamingPeekableIter;
|
||||
use crate::{read::ProgressAction, BandRef, PacketLineRef, TextRef, U16_HEX_BYTES};
|
||||
|
||||
/// An implementor of [`BufRead`][io::BufRead] yielding packet lines on each call to [`read_line()`][io::BufRead::read_line()].
|
||||
/// It's also possible to hide the underlying packet lines using the [`Read`][io::Read] implementation which is useful
|
||||
/// if they represent binary data, like the one of a pack file.
|
||||
pub struct WithSidebands<'a, T, F>
|
||||
where
|
||||
T: io::Read,
|
||||
{
|
||||
parent: &'a mut StreamingPeekableIter<T>,
|
||||
handle_progress: Option<F>,
|
||||
pos: usize,
|
||||
cap: usize,
|
||||
}
|
||||
|
||||
impl<T, F> Drop for WithSidebands<'_, T, F>
|
||||
where
|
||||
T: io::Read,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
self.parent.reset();
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> WithSidebands<'a, T, fn(bool, &[u8]) -> ProgressAction>
|
||||
where
|
||||
T: io::Read,
|
||||
{
|
||||
/// Create a new instance with the given provider as `parent`.
|
||||
pub fn new(parent: &'a mut StreamingPeekableIter<T>) -> Self {
|
||||
WithSidebands {
|
||||
parent,
|
||||
handle_progress: None,
|
||||
pos: 0,
|
||||
cap: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, F> WithSidebands<'a, T, F>
|
||||
where
|
||||
T: io::Read,
|
||||
F: FnMut(bool, &[u8]) -> ProgressAction,
|
||||
{
|
||||
/// Create a new instance with the given `parent` provider and the `handle_progress` function.
|
||||
///
|
||||
/// Progress or error information will be passed to the given `handle_progress(is_error, text)` function, with `is_error: bool`
|
||||
/// being true in case the `text` is to be interpreted as error.
|
||||
pub fn with_progress_handler(parent: &'a mut StreamingPeekableIter<T>, handle_progress: F) -> Self {
|
||||
WithSidebands {
|
||||
parent,
|
||||
handle_progress: Some(handle_progress),
|
||||
pos: 0,
|
||||
cap: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new instance without a progress handler.
|
||||
pub fn without_progress_handler(parent: &'a mut StreamingPeekableIter<T>) -> Self {
|
||||
WithSidebands {
|
||||
parent,
|
||||
handle_progress: None,
|
||||
pos: 0,
|
||||
cap: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Forwards to the parent [`crate::read::StreamingPeekableIterState::reset_with()`]
|
||||
pub fn reset_with(&mut self, delimiters: &'static [PacketLineRef<'static>]) {
|
||||
self.parent.reset_with(delimiters);
|
||||
}
|
||||
|
||||
/// Forwards to the parent [`StreamingPeekableIterState::stopped_at()`][crate::read::StreamingPeekableIterState::stopped_at()]
|
||||
pub fn stopped_at(&self) -> Option<PacketLineRef<'static>> {
|
||||
self.parent.stopped_at
|
||||
}
|
||||
|
||||
/// Set or unset the progress handler.
|
||||
pub fn set_progress_handler(&mut self, handle_progress: Option<F>) {
|
||||
self.handle_progress = handle_progress;
|
||||
}
|
||||
|
||||
/// Effectively forwards to the parent [`StreamingPeekableIter::peek_line()`], allowing to see what would be returned
|
||||
/// next on a call to [`read_line()`][io::BufRead::read_line()].
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This skips all sideband handling and may return an unprocessed line with sidebands still contained in it.
|
||||
pub fn peek_data_line(&mut self) -> Option<io::Result<Result<&[u8], crate::decode::Error>>> {
|
||||
match self.parent.peek_line() {
|
||||
Some(Ok(Ok(PacketLineRef::Data(line)))) => Some(Ok(Ok(line))),
|
||||
Some(Ok(Err(err))) => Some(Ok(Err(err))),
|
||||
Some(Err(err)) => Some(Err(err)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a whole packetline from the underlying reader, with empty lines indicating a stop packetline.
|
||||
///
|
||||
/// # Warning
|
||||
///
|
||||
/// This skips all sideband handling and may return an unprocessed line with sidebands still contained in it.
|
||||
pub fn read_data_line(&mut self) -> Option<io::Result<Result<PacketLineRef<'_>, crate::decode::Error>>> {
|
||||
assert_eq!(
|
||||
self.cap, 0,
|
||||
"we don't support partial buffers right now - read-line must be used consistently"
|
||||
);
|
||||
self.parent.read_line()
|
||||
}
|
||||
|
||||
/// Like `BufRead::read_line()`, but will only read one packetline at a time.
|
||||
///
|
||||
/// It will also be easier to call as sometimes it's unclear which implementation we get on a type like this with
|
||||
/// plenty of generic parameters.
|
||||
pub fn read_line_to_string(&mut self, buf: &mut String) -> io::Result<usize> {
|
||||
assert_eq!(
|
||||
self.cap, 0,
|
||||
"we don't support partial buffers right now - read-line must be used consistently"
|
||||
);
|
||||
let line = std::str::from_utf8(self.fill_buf()?).map_err(io::Error::other)?;
|
||||
buf.push_str(line);
|
||||
let bytes = line.len();
|
||||
self.cap = 0;
|
||||
Ok(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F> BufRead for WithSidebands<'_, T, F>
|
||||
where
|
||||
T: io::Read,
|
||||
F: FnMut(bool, &[u8]) -> ProgressAction,
|
||||
{
|
||||
fn fill_buf(&mut self) -> io::Result<&[u8]> {
|
||||
if self.pos >= self.cap {
|
||||
let (ofs, cap) = loop {
|
||||
let line = match self.parent.read_line() {
|
||||
Some(line) => line?.map_err(io::Error::other)?,
|
||||
None => break (0, 0),
|
||||
};
|
||||
match self.handle_progress.as_mut() {
|
||||
Some(handle_progress) => {
|
||||
let band = line.decode_band().map_err(io::Error::other)?;
|
||||
const ENCODED_BAND: usize = 1;
|
||||
match band {
|
||||
BandRef::Data(d) => {
|
||||
if d.is_empty() {
|
||||
continue;
|
||||
}
|
||||
break (U16_HEX_BYTES + ENCODED_BAND, d.len());
|
||||
}
|
||||
BandRef::Progress(d) => {
|
||||
let text = TextRef::from(d).0;
|
||||
if handle_progress(false, text).is_break() {
|
||||
return Err(std::io::Error::other("interrupted by user"));
|
||||
}
|
||||
}
|
||||
BandRef::Error(d) => {
|
||||
let text = TextRef::from(d).0;
|
||||
if handle_progress(true, text).is_break() {
|
||||
return Err(std::io::Error::other("interrupted by user"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
break match line.as_slice() {
|
||||
Some(d) => (U16_HEX_BYTES, d.len()),
|
||||
None => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"encountered non-data line in a data-line only context",
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
self.cap = cap + ofs;
|
||||
self.pos = ofs;
|
||||
}
|
||||
Ok(&self.parent.buf[self.pos..self.cap])
|
||||
}
|
||||
|
||||
fn consume(&mut self, amt: usize) {
|
||||
self.pos = std::cmp::min(self.pos + amt, self.cap);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, F> io::Read for WithSidebands<'_, T, F>
|
||||
where
|
||||
T: io::Read,
|
||||
F: FnMut(bool, &[u8]) -> ProgressAction,
|
||||
{
|
||||
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
|
||||
let mut rem = self.fill_buf()?;
|
||||
let nread = rem.read(buf)?;
|
||||
self.consume(nread);
|
||||
Ok(nread)
|
||||
}
|
||||
}
|
||||
70
src-packetline/src/blocking_io/write.rs
Normal file
70
src-packetline/src/blocking_io/write.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use std::io;
|
||||
|
||||
use crate::{blocking_io::encode, MAX_DATA_LEN, U16_HEX_BYTES};
|
||||
|
||||
/// An implementor of [`Write`][io::Write] which passes all input to an inner `Write` in packet line data encoding,
|
||||
/// one line per `write(…)` call or as many lines as it takes if the data doesn't fit into the maximum allowed line length.
|
||||
pub struct Writer<T> {
|
||||
/// the `Write` implementation to which to propagate packet lines
|
||||
inner: T,
|
||||
binary: bool,
|
||||
}
|
||||
|
||||
impl<T: io::Write> Writer<T> {
|
||||
/// Create a new instance from the given `write`
|
||||
pub fn new(write: T) -> Self {
|
||||
Writer {
|
||||
inner: write,
|
||||
binary: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Non-IO methods
|
||||
impl<T> Writer<T> {
|
||||
/// If called, each call to [`write()`][io::Write::write()] will write bytes as is.
|
||||
pub fn enable_binary_mode(&mut self) {
|
||||
self.binary = true;
|
||||
}
|
||||
/// If called, each call to [`write()`][io::Write::write()] will write the input as text, appending a trailing newline
|
||||
/// if needed before writing.
|
||||
pub fn enable_text_mode(&mut self) {
|
||||
self.binary = false;
|
||||
}
|
||||
/// Return the inner writer, consuming self.
|
||||
pub fn into_inner(self) -> T {
|
||||
self.inner
|
||||
}
|
||||
/// Return a mutable reference to the inner writer, useful if packet lines should be serialized directly.
|
||||
pub fn inner_mut(&mut self) -> &mut T {
|
||||
&mut self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: io::Write> io::Write for Writer<T> {
|
||||
fn write(&mut self, mut buf: &[u8]) -> io::Result<usize> {
|
||||
if buf.is_empty() {
|
||||
return Err(io::Error::other(
|
||||
"empty packet lines are not permitted as '0004' is invalid",
|
||||
));
|
||||
}
|
||||
|
||||
let mut written = 0;
|
||||
while !buf.is_empty() {
|
||||
let (data, rest) = buf.split_at(buf.len().min(MAX_DATA_LEN));
|
||||
written += if self.binary {
|
||||
encode::data_to_write(data, &mut self.inner)
|
||||
} else {
|
||||
encode::text_to_write(data, &mut self.inner)
|
||||
}?;
|
||||
// subtract header (and trailing NL) because write-all can't handle writing more than it passes in
|
||||
written -= U16_HEX_BYTES + usize::from(!self.binary);
|
||||
buf = rest;
|
||||
}
|
||||
Ok(written)
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
self.inner.flush()
|
||||
}
|
||||
}
|
||||
145
src-packetline/src/decode.rs
Normal file
145
src-packetline/src/decode.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
use bstr::BString;
|
||||
|
||||
use crate::{PacketLineRef, DELIMITER_LINE, FLUSH_LINE, MAX_DATA_LEN, MAX_LINE_LEN, RESPONSE_END_LINE, U16_HEX_BYTES};
|
||||
|
||||
/// The error used in the [`decode`][mod@crate::decode] module
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum Error {
|
||||
#[error("Failed to decode the first four hex bytes indicating the line length: {err}")]
|
||||
HexDecode { err: String },
|
||||
#[error("The data received claims to be larger than the maximum allowed size: got {length_in_bytes}, exceeds {MAX_DATA_LEN}")]
|
||||
DataLengthLimitExceeded { length_in_bytes: usize },
|
||||
#[error("Received an invalid empty line")]
|
||||
DataIsEmpty,
|
||||
#[error("Received an invalid line of length 3")]
|
||||
InvalidLineLength,
|
||||
#[error("{data:?} - consumed {bytes_consumed} bytes")]
|
||||
Line { data: BString, bytes_consumed: usize },
|
||||
#[error("Needing {bytes_needed} additional bytes to decode the line successfully")]
|
||||
NotEnoughData { bytes_needed: usize },
|
||||
}
|
||||
|
||||
///
|
||||
pub mod band {
|
||||
/// The error used in [`PacketLineRef::decode_band()`][super::PacketLineRef::decode_band()].
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum Error {
|
||||
#[error("attempt to decode a non-side channel line or input was malformed: {band_id}")]
|
||||
InvalidSideBand { band_id: u8 },
|
||||
#[error("attempt to decode a non-data line into a side-channel band")]
|
||||
NonDataLine,
|
||||
}
|
||||
}
|
||||
|
||||
/// A utility return type to support incremental parsing of packet lines.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Stream<'a> {
|
||||
/// Indicate a single packet line was parsed completely
|
||||
Complete {
|
||||
/// The parsed packet line
|
||||
line: PacketLineRef<'a>,
|
||||
/// The amount of bytes consumed from input
|
||||
bytes_consumed: usize,
|
||||
},
|
||||
/// A packet line could not yet be parsed due to missing bytes
|
||||
Incomplete {
|
||||
/// The amount of additional bytes needed for the parsing to complete
|
||||
bytes_needed: usize,
|
||||
},
|
||||
}
|
||||
|
||||
/// The result of [`hex_prefix()`] indicating either a special packet line or the amount of wanted bytes
|
||||
pub enum PacketLineOrWantedSize<'a> {
|
||||
/// The special kind of packet line decoded from the hex prefix. It never contains actual data.
|
||||
Line(PacketLineRef<'a>),
|
||||
/// The amount of bytes indicated by the hex prefix of the packet line.
|
||||
Wanted(u16),
|
||||
}
|
||||
|
||||
/// Decode the `four_bytes` packet line prefix provided in hexadecimal form and check it for validity.
|
||||
pub fn hex_prefix(four_bytes: &[u8]) -> Result<PacketLineOrWantedSize<'_>, Error> {
|
||||
debug_assert_eq!(four_bytes.len(), 4, "need four hex bytes");
|
||||
for (line_bytes, line_type) in &[
|
||||
(FLUSH_LINE, PacketLineRef::Flush),
|
||||
(DELIMITER_LINE, PacketLineRef::Delimiter),
|
||||
(RESPONSE_END_LINE, PacketLineRef::ResponseEnd),
|
||||
] {
|
||||
if four_bytes == *line_bytes {
|
||||
return Ok(PacketLineOrWantedSize::Line(*line_type));
|
||||
}
|
||||
}
|
||||
|
||||
let mut buf = [0u8; U16_HEX_BYTES / 2];
|
||||
faster_hex::hex_decode(four_bytes, &mut buf).map_err(|err| Error::HexDecode { err: err.to_string() })?;
|
||||
let wanted_bytes = u16::from_be_bytes(buf);
|
||||
|
||||
if wanted_bytes == 3 {
|
||||
return Err(Error::InvalidLineLength);
|
||||
}
|
||||
if wanted_bytes == 4 {
|
||||
return Err(Error::DataIsEmpty);
|
||||
}
|
||||
debug_assert!(
|
||||
wanted_bytes as usize > U16_HEX_BYTES,
|
||||
"by now there should be more wanted bytes than prefix bytes"
|
||||
);
|
||||
Ok(PacketLineOrWantedSize::Wanted(wanted_bytes - U16_HEX_BYTES as u16))
|
||||
}
|
||||
|
||||
/// Obtain a `PacketLine` from `data` after assuring `data` is small enough to fit.
|
||||
pub fn to_data_line(data: &[u8]) -> Result<PacketLineRef<'_>, Error> {
|
||||
if data.len() > MAX_LINE_LEN {
|
||||
return Err(Error::DataLengthLimitExceeded {
|
||||
length_in_bytes: data.len(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(PacketLineRef::Data(data))
|
||||
}
|
||||
|
||||
/// Decode `data` as packet line while reporting whether the data is complete or not using a [`Stream`].
|
||||
pub fn streaming(data: &[u8]) -> Result<Stream<'_>, Error> {
|
||||
let data_len = data.len();
|
||||
if data_len < U16_HEX_BYTES {
|
||||
return Ok(Stream::Incomplete {
|
||||
bytes_needed: U16_HEX_BYTES - data_len,
|
||||
});
|
||||
}
|
||||
let wanted_bytes = match hex_prefix(&data[..U16_HEX_BYTES])? {
|
||||
PacketLineOrWantedSize::Wanted(s) => s as usize,
|
||||
PacketLineOrWantedSize::Line(line) => {
|
||||
return Ok(Stream::Complete {
|
||||
line,
|
||||
bytes_consumed: 4,
|
||||
})
|
||||
}
|
||||
} + U16_HEX_BYTES;
|
||||
if wanted_bytes > MAX_LINE_LEN {
|
||||
return Err(Error::DataLengthLimitExceeded {
|
||||
length_in_bytes: wanted_bytes,
|
||||
});
|
||||
}
|
||||
if data_len < wanted_bytes {
|
||||
return Ok(Stream::Incomplete {
|
||||
bytes_needed: wanted_bytes - data_len,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(Stream::Complete {
|
||||
line: to_data_line(&data[U16_HEX_BYTES..wanted_bytes])?,
|
||||
bytes_consumed: wanted_bytes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode an entire packet line from data or fail.
|
||||
///
|
||||
/// Note that failure also happens if there is not enough data to parse a complete packet line, as opposed to [`streaming()`] decoding
|
||||
/// succeeds in that case, stating how much more bytes are required.
|
||||
pub fn all_at_once(data: &[u8]) -> Result<PacketLineRef<'_>, Error> {
|
||||
match streaming(data)? {
|
||||
Stream::Complete { line, .. } => Ok(line),
|
||||
Stream::Incomplete { bytes_needed } => Err(Error::NotEnoughData { bytes_needed }),
|
||||
}
|
||||
}
|
||||
17
src-packetline/src/encode.rs
Normal file
17
src-packetline/src/encode.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use super::MAX_DATA_LEN;
|
||||
|
||||
/// The error returned by most functions in the [`encode`](crate::encode) module
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[allow(missing_docs)]
|
||||
pub enum Error {
|
||||
#[error("Cannot encode more than {MAX_DATA_LEN} bytes, got {length_in_bytes}")]
|
||||
DataLengthLimitExceeded { length_in_bytes: usize },
|
||||
#[error("Empty lines are invalid")]
|
||||
DataIsEmpty,
|
||||
}
|
||||
|
||||
pub(crate) fn u16_to_hex(value: u16) -> [u8; 4] {
|
||||
let mut buf = [0u8; 4];
|
||||
faster_hex::hex_encode(&value.to_be_bytes(), &mut buf).expect("two bytes to 4 hex chars never fails");
|
||||
buf
|
||||
}
|
||||
186
src-packetline/src/lib.rs
Normal file
186
src-packetline/src/lib.rs
Normal file
@@ -0,0 +1,186 @@
|
||||
//! Read and write the git packet line wire format without copying it.
|
||||
//!
|
||||
//! ## Feature Flags
|
||||
#![cfg_attr(
|
||||
all(doc, all(doc, feature = "document-features")),
|
||||
doc = ::document_features::document_features!()
|
||||
)]
|
||||
#![cfg_attr(all(doc, feature = "document-features"), feature(doc_cfg))]
|
||||
#![deny(missing_docs, rust_2018_idioms, unsafe_code)]
|
||||
|
||||
use bstr::BStr;
|
||||
|
||||
///
|
||||
#[cfg(feature = "async-io")]
|
||||
pub mod async_io {
|
||||
///
|
||||
pub mod encode;
|
||||
mod read;
|
||||
pub use read::StreamingPeekableIter;
|
||||
mod sidebands;
|
||||
pub use sidebands::WithSidebands;
|
||||
mod write;
|
||||
pub use write::Writer;
|
||||
}
|
||||
|
||||
///
|
||||
#[cfg(feature = "blocking-io")]
|
||||
pub mod blocking_io {
|
||||
///
|
||||
pub mod encode;
|
||||
mod read;
|
||||
pub use read::StreamingPeekableIter;
|
||||
mod sidebands;
|
||||
pub use sidebands::WithSidebands;
|
||||
mod write;
|
||||
pub use write::Writer;
|
||||
}
|
||||
|
||||
/// Various utilities for `io::Read` trait implementation.
|
||||
///
|
||||
/// Only useful in conjunction with the `async-io` and `blocking-io` cargo features.
|
||||
pub mod read;
|
||||
|
||||
const U16_HEX_BYTES: usize = 4;
|
||||
const MAX_DATA_LEN: usize = 65516;
|
||||
const MAX_LINE_LEN: usize = MAX_DATA_LEN + U16_HEX_BYTES;
|
||||
const FLUSH_LINE: &[u8] = b"0000";
|
||||
const DELIMITER_LINE: &[u8] = b"0001";
|
||||
const RESPONSE_END_LINE: &[u8] = b"0002";
|
||||
const ERR_PREFIX: &[u8] = b"ERR ";
|
||||
|
||||
/// One of three sideband types allowing to multiplex information over a single connection.
|
||||
#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum Channel {
|
||||
/// The usable data itself in any format.
|
||||
Data = 1,
|
||||
/// Progress information in a user-readable format.
|
||||
Progress = 2,
|
||||
/// Error information in a user readable format. Receiving it usually terminates the connection.
|
||||
Error = 3,
|
||||
}
|
||||
|
||||
/// A borrowed packet line as it refers to a slice of data by reference.
|
||||
#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum PacketLineRef<'a> {
|
||||
/// A chunk of raw data.
|
||||
Data(&'a [u8]),
|
||||
/// A flush packet.
|
||||
Flush,
|
||||
/// A delimiter packet.
|
||||
Delimiter,
|
||||
/// The end of the response.
|
||||
ResponseEnd,
|
||||
}
|
||||
|
||||
impl<'a> PacketLineRef<'a> {
|
||||
/// Return this instance as slice if it's [`Data`](PacketLineRef::Data).
|
||||
pub fn as_slice(&self) -> Option<&'a [u8]> {
|
||||
match self {
|
||||
PacketLineRef::Data(d) => Some(d),
|
||||
PacketLineRef::Flush | PacketLineRef::Delimiter | PacketLineRef::ResponseEnd => None,
|
||||
}
|
||||
}
|
||||
/// Return this instance's [`as_slice()`](PacketLineRef::as_slice()) as [`BStr`].
|
||||
pub fn as_bstr(&self) -> Option<&'a BStr> {
|
||||
self.as_slice().map(Into::into)
|
||||
}
|
||||
/// Interpret this instance's [`as_slice()`](PacketLineRef::as_slice()) as [`ErrorRef`].
|
||||
///
|
||||
/// This works for any data received in an error [channel](crate::Channel).
|
||||
///
|
||||
/// Note that this creates an unchecked error using the slice verbatim, which is useful to serialize it.
|
||||
/// See [`check_error()`](PacketLineRef::check_error()) for a version that assures the error information is in the expected format.
|
||||
pub fn as_error(&self) -> Option<ErrorRef<'a>> {
|
||||
self.as_slice().map(ErrorRef)
|
||||
}
|
||||
/// Check this instance's [`as_slice()`](PacketLineRef::as_slice()) is a valid [`ErrorRef`] and return it.
|
||||
///
|
||||
/// This works for any data received in an error [channel](crate::Channel).
|
||||
pub fn check_error(&self) -> Option<ErrorRef<'a>> {
|
||||
self.as_slice().and_then(|data| {
|
||||
if data.len() >= ERR_PREFIX.len() && &data[..ERR_PREFIX.len()] == ERR_PREFIX {
|
||||
Some(ErrorRef(&data[ERR_PREFIX.len()..]))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
/// Return this instance as text, with the trailing newline truncated if present.
|
||||
pub fn as_text(&self) -> Option<TextRef<'a>> {
|
||||
self.as_slice().map(Into::into)
|
||||
}
|
||||
|
||||
/// Interpret the data in this [`slice`](PacketLineRef::as_slice()) as [`BandRef`] according to the given `kind` of channel.
|
||||
///
|
||||
/// Note that this is only relevant in a sideband channel.
|
||||
/// See [`decode_band()`](PacketLineRef::decode_band()) in case `kind` is unknown.
|
||||
pub fn as_band(&self, kind: Channel) -> Option<BandRef<'a>> {
|
||||
self.as_slice().map(|d| match kind {
|
||||
Channel::Data => BandRef::Data(d),
|
||||
Channel::Progress => BandRef::Progress(d),
|
||||
Channel::Error => BandRef::Error(d),
|
||||
})
|
||||
}
|
||||
|
||||
/// Decode the band of this [`slice`](PacketLineRef::as_slice())
|
||||
pub fn decode_band(&self) -> Result<BandRef<'a>, decode::band::Error> {
|
||||
let d = self.as_slice().ok_or(decode::band::Error::NonDataLine)?;
|
||||
Ok(match d[0] {
|
||||
1 => BandRef::Data(&d[1..]),
|
||||
2 => BandRef::Progress(&d[1..]),
|
||||
3 => BandRef::Error(&d[1..]),
|
||||
band => return Err(decode::band::Error::InvalidSideBand { band_id: band }),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A packet line representing an Error in a sideband channel.
|
||||
#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct ErrorRef<'a>(pub &'a [u8]);
|
||||
|
||||
/// A packet line representing text, which may include a trailing newline.
|
||||
#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct TextRef<'a>(pub &'a [u8]);
|
||||
|
||||
impl<'a> TextRef<'a> {
|
||||
/// Return this instance's data.
|
||||
pub fn as_slice(&self) -> &'a [u8] {
|
||||
self.0
|
||||
}
|
||||
/// Return this instance's data as [`BStr`].
|
||||
pub fn as_bstr(&self) -> &'a BStr {
|
||||
self.0.into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a [u8]> for TextRef<'a> {
|
||||
fn from(d: &'a [u8]) -> Self {
|
||||
let d = if d[d.len() - 1] == b'\n' { &d[..d.len() - 1] } else { d };
|
||||
TextRef(d)
|
||||
}
|
||||
}
|
||||
|
||||
/// A band in a sideband channel.
|
||||
#[derive(PartialEq, Eq, Debug, Hash, Ord, PartialOrd, Clone, Copy)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub enum BandRef<'a> {
|
||||
/// A band carrying data.
|
||||
Data(&'a [u8]),
|
||||
/// A band carrying user readable progress information.
|
||||
Progress(&'a [u8]),
|
||||
/// A band carrying user readable errors.
|
||||
Error(&'a [u8]),
|
||||
}
|
||||
|
||||
/// Utilities to help decoding packet lines
|
||||
pub mod decode;
|
||||
#[doc(inline)]
|
||||
pub use decode::all_at_once as decode;
|
||||
|
||||
/// Utilities to encode different kinds of packet lines
|
||||
pub mod encode;
|
||||
124
src-packetline/src/read.rs
Normal file
124
src-packetline/src/read.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
#[cfg(any(feature = "blocking-io", feature = "async-io"))]
|
||||
use crate::MAX_LINE_LEN;
|
||||
use crate::{PacketLineRef, U16_HEX_BYTES};
|
||||
|
||||
/// Allow the read-progress handler to determine how to continue.
|
||||
///
|
||||
/// Use [`std::ops::ControlFlow::Continue`] to continue reading the next progress if available.
|
||||
/// Use [`std::ops::ControlFlow::Break`] to abort all IO even if more would be available, claiming the operation was interrupted.
|
||||
pub type ProgressAction = std::ops::ControlFlow<()>;
|
||||
|
||||
#[cfg(any(feature = "blocking-io", feature = "async-io"))]
|
||||
pub(crate) type ExhaustiveOutcome<'a> = (
|
||||
bool, // is_done
|
||||
Option<PacketLineRef<'static>>, // stopped_at
|
||||
Option<std::io::Result<Result<PacketLineRef<'a>, crate::decode::Error>>>, // actual method result
|
||||
);
|
||||
|
||||
mod error {
|
||||
use std::fmt::{Debug, Display, Formatter};
|
||||
|
||||
use bstr::BString;
|
||||
|
||||
/// The error representing an ERR packet line, as possibly wrapped into an `std::io::Error`.
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
/// The contents of the ERR line, with `ERR` portion stripped.
|
||||
pub message: BString,
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
Display::fmt(&self.message, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
}
|
||||
pub use error::Error;
|
||||
|
||||
/// State for `StreamingPeekableIter` implementations.
|
||||
pub struct StreamingPeekableIterState<T> {
|
||||
pub(crate) read: T,
|
||||
pub(crate) peek_buf: Vec<u8>,
|
||||
#[cfg(any(feature = "blocking-io", feature = "async-io"))]
|
||||
pub(crate) buf: Vec<u8>,
|
||||
pub(crate) fail_on_err_lines: bool,
|
||||
pub(crate) delimiters: &'static [PacketLineRef<'static>],
|
||||
pub(crate) is_done: bool,
|
||||
pub(crate) stopped_at: Option<PacketLineRef<'static>>,
|
||||
#[cfg_attr(all(not(feature = "async-io"), not(feature = "blocking-io")), allow(dead_code))]
|
||||
pub(crate) trace: bool,
|
||||
}
|
||||
|
||||
impl<T> StreamingPeekableIterState<T> {
|
||||
/// Return a new instance from `read` which will stop decoding packet lines when receiving one of the given `delimiters`.
|
||||
/// If `trace` is `true`, all packetlines received or sent will be passed to the facilities of the `src-trace` crate.
|
||||
#[cfg(any(feature = "blocking-io", feature = "async-io"))]
|
||||
pub(crate) fn new(read: T, delimiters: &'static [PacketLineRef<'static>], trace: bool) -> Self {
|
||||
Self {
|
||||
read,
|
||||
#[cfg(any(feature = "blocking-io", feature = "async-io"))]
|
||||
buf: vec![0; MAX_LINE_LEN],
|
||||
peek_buf: Vec::new(),
|
||||
delimiters,
|
||||
fail_on_err_lines: false,
|
||||
is_done: false,
|
||||
stopped_at: None,
|
||||
trace,
|
||||
}
|
||||
}
|
||||
|
||||
/// Modify the peek buffer, overwriting the byte at `position` with the given byte to `replace_with` while truncating
|
||||
/// it to contain only bytes until the newly replaced `position`.
|
||||
///
|
||||
/// This is useful if you would want to remove 'special bytes' hidden behind, say a NULL byte to disappear and allow
|
||||
/// standard line readers to read the next line as usual.
|
||||
///
|
||||
/// **Note** that `position` does not include the 4 bytes prefix (they are invisible outside the reader)
|
||||
pub fn peek_buffer_replace_and_truncate(&mut self, position: usize, replace_with: u8) {
|
||||
let position = position + U16_HEX_BYTES;
|
||||
self.peek_buf[position] = replace_with;
|
||||
|
||||
let new_len = position + 1;
|
||||
self.peek_buf.truncate(new_len);
|
||||
self.peek_buf[..4].copy_from_slice(&crate::encode::u16_to_hex((new_len) as u16));
|
||||
}
|
||||
|
||||
/// Returns the packet line that stopped the iteration, or
|
||||
/// `None` if the end wasn't reached yet, on EOF, or if [`fail_on_err_lines()`][StreamingPeekableIterState::fail_on_err_lines()] was true.
|
||||
pub fn stopped_at(&self) -> Option<PacketLineRef<'static>> {
|
||||
self.stopped_at
|
||||
}
|
||||
|
||||
/// Reset all iteration state allowing to continue a stopped iteration that is not yet at EOF.
|
||||
///
|
||||
/// This can happen once a delimiter is reached.
|
||||
pub fn reset(&mut self) {
|
||||
let delimiters = std::mem::take(&mut self.delimiters);
|
||||
self.reset_with(delimiters);
|
||||
}
|
||||
|
||||
/// Similar to [`reset()`][StreamingPeekableIterState::reset()] with support to changing the `delimiters`.
|
||||
pub fn reset_with(&mut self, delimiters: &'static [PacketLineRef<'static>]) {
|
||||
self.delimiters = delimiters;
|
||||
self.is_done = false;
|
||||
self.stopped_at = None;
|
||||
}
|
||||
|
||||
/// If `value` is `true` the provider will check for special `ERR` packet lines and stop iteration when one is encountered.
|
||||
///
|
||||
/// Use [`stopped_at()]`[`StreamingPeekableIterState::stopped_at()`] to inspect the cause of the end of the iteration.
|
||||
/// ne
|
||||
pub fn fail_on_err_lines(&mut self, value: bool) {
|
||||
self.fail_on_err_lines = value;
|
||||
}
|
||||
|
||||
/// Replace the reader used with the given `read`, resetting all other iteration state as well.
|
||||
pub fn replace(&mut self, read: T) -> T {
|
||||
let prev = std::mem::replace(&mut self.read, read);
|
||||
self.reset();
|
||||
self.fail_on_err_lines = false;
|
||||
prev
|
||||
}
|
||||
}
|
||||
20
src-packetline/tests/async-packetline.rs
Normal file
20
src-packetline/tests/async-packetline.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
pub type Result = std::result::Result<(), Box<dyn std::error::Error>>;
|
||||
|
||||
pub fn assert_err_display<T: std::fmt::Debug, E: std::error::Error>(
|
||||
res: std::result::Result<T, E>,
|
||||
expected: impl AsRef<str>,
|
||||
) {
|
||||
match res {
|
||||
Ok(v) => panic!("Expected error '{}', got value {:?}", expected.as_ref(), v),
|
||||
Err(err) => assert_eq!(err.to_string(), expected.as_ref()),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
mod decode;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
mod encode;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
mod read;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
mod write;
|
||||
20
src-packetline/tests/blocking-packetline.rs
Normal file
20
src-packetline/tests/blocking-packetline.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
pub type Result = std::result::Result<(), Box<dyn std::error::Error>>;
|
||||
|
||||
pub fn assert_err_display<T: std::fmt::Debug, E: std::error::Error>(
|
||||
res: std::result::Result<T, E>,
|
||||
expected: impl AsRef<str>,
|
||||
) {
|
||||
match res {
|
||||
Ok(v) => panic!("Expected error '{}', got value {:?}", expected.as_ref(), v),
|
||||
Err(err) => assert_eq!(err.to_string(), expected.as_ref()),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "blocking-io")]
|
||||
mod decode;
|
||||
#[cfg(feature = "blocking-io")]
|
||||
mod encode;
|
||||
#[cfg(feature = "blocking-io")]
|
||||
mod read;
|
||||
#[cfg(feature = "blocking-io")]
|
||||
mod write;
|
||||
169
src-packetline/tests/decode/mod.rs
Normal file
169
src-packetline/tests/decode/mod.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
mod streaming {
|
||||
use gix_packetline::{
|
||||
decode::{self, streaming, Stream},
|
||||
ErrorRef, PacketLineRef,
|
||||
};
|
||||
|
||||
use crate::assert_err_display;
|
||||
|
||||
fn assert_complete(
|
||||
res: Result<Stream, decode::Error>,
|
||||
expected_consumed: usize,
|
||||
expected_value: PacketLineRef,
|
||||
) -> crate::Result {
|
||||
match res? {
|
||||
Stream::Complete { line, bytes_consumed } => {
|
||||
assert_eq!(bytes_consumed, expected_consumed);
|
||||
assert_eq!(line.as_bstr(), expected_value.as_bstr());
|
||||
}
|
||||
Stream::Incomplete { .. } => panic!("expected parsing to be complete, not partial"),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
mod round_trip {
|
||||
use bstr::ByteSlice;
|
||||
use gix_packetline::{decode, decode::streaming, Channel, PacketLineRef};
|
||||
|
||||
use crate::decode::streaming::assert_complete;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use gix_packetline::async_io::encode as encode_io;
|
||||
#[cfg(all(feature = "blocking-io", not(feature = "async-io")))]
|
||||
use gix_packetline::blocking_io::encode as encode_io;
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn trailing_line_feeds_are_removed_explicitly() -> crate::Result {
|
||||
let line = decode::all_at_once(b"0006a\n")?;
|
||||
assert_eq!(line.as_text().expect("text").0.as_bstr(), b"a".as_bstr());
|
||||
let mut out = Vec::new();
|
||||
encode_io::write_text(&line.as_text().expect("text"), &mut out)
|
||||
.await
|
||||
.expect("write to memory works");
|
||||
assert_eq!(out, b"0006a\n", "it appends a newline in text mode");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn all_kinds_of_packetlines() -> crate::Result {
|
||||
for (line, bytes) in &[
|
||||
(PacketLineRef::ResponseEnd, 4),
|
||||
(PacketLineRef::Delimiter, 4),
|
||||
(PacketLineRef::Flush, 4),
|
||||
(PacketLineRef::Data(b"hello there"), 15),
|
||||
] {
|
||||
let mut out = Vec::new();
|
||||
encode_io::write_packet_line(line, &mut out).await?;
|
||||
assert_complete(streaming(&out), *bytes, *line)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn error_line() -> crate::Result {
|
||||
let mut out = Vec::new();
|
||||
encode_io::write_error(
|
||||
&PacketLineRef::Data(b"the error").as_error().expect("data line"),
|
||||
&mut out,
|
||||
)
|
||||
.await?;
|
||||
let line = decode::all_at_once(&out)?;
|
||||
assert_eq!(line.check_error().expect("err").0, b"the error");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn side_bands() -> crate::Result {
|
||||
for channel in &[Channel::Data, Channel::Error, Channel::Progress] {
|
||||
let mut out = Vec::new();
|
||||
let band = PacketLineRef::Data(b"band data")
|
||||
.as_band(*channel)
|
||||
.expect("data is valid for band");
|
||||
encode_io::write_band(&band, &mut out).await?;
|
||||
let line = decode::all_at_once(&out)?;
|
||||
assert_eq!(line.decode_band().expect("valid band"), band);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flush() -> crate::Result {
|
||||
assert_complete(streaming(b"0000someotherstuff"), 4, PacketLineRef::Flush)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn trailing_line_feeds_are_not_removed_automatically() -> crate::Result {
|
||||
assert_complete(streaming(b"0006a\n"), 6, PacketLineRef::Data(b"a\n"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ignore_extra_bytes() -> crate::Result {
|
||||
assert_complete(streaming(b"0006a\nhello"), 6, PacketLineRef::Data(b"a\n"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_on_oversized_line() {
|
||||
assert_err_display(
|
||||
streaming(b"ffff"),
|
||||
"The data received claims to be larger than the maximum allowed size: got 65535, exceeds 65516",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_on_error_line() -> crate::Result {
|
||||
let line = PacketLineRef::Data(b"ERR the error");
|
||||
assert_complete(
|
||||
streaming(b"0011ERR the error-and just ignored because not part of the size"),
|
||||
17,
|
||||
line,
|
||||
)?;
|
||||
assert_eq!(
|
||||
line.check_error().expect("error to be parsed here"),
|
||||
ErrorRef(b"the error")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_on_invalid_hex() {
|
||||
assert_err_display(
|
||||
streaming(b"fooo"),
|
||||
"Failed to decode the first four hex bytes indicating the line length: Invalid character",
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error_on_empty_line() {
|
||||
assert_err_display(streaming(b"0004"), "Received an invalid empty line");
|
||||
}
|
||||
|
||||
mod incomplete {
|
||||
use gix_packetline::decode::{self, streaming, Stream};
|
||||
|
||||
fn assert_incomplete(res: Result<Stream, decode::Error>, expected_missing: usize) -> crate::Result {
|
||||
match res? {
|
||||
Stream::Complete { .. } => {
|
||||
panic!("expected parsing to be partial, not complete");
|
||||
}
|
||||
Stream::Incomplete { bytes_needed } => {
|
||||
assert_eq!(bytes_needed, expected_missing);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_hex_bytes() -> crate::Result {
|
||||
assert_incomplete(streaming(b"0"), 3)?;
|
||||
assert_incomplete(streaming(b"00"), 2)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_data_bytes() -> crate::Result {
|
||||
assert_incomplete(streaming(b"0005"), 1)?;
|
||||
assert_incomplete(streaming(b"0006a"), 1)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
114
src-packetline/tests/encode/mod.rs
Normal file
114
src-packetline/tests/encode/mod.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
mod data_to_write {
|
||||
#[cfg(feature = "blocking-io")]
|
||||
use std::io;
|
||||
|
||||
use bstr::ByteSlice;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use futures_lite::io;
|
||||
|
||||
use crate::assert_err_display;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use gix_packetline::async_io::encode::data_to_write;
|
||||
#[cfg(all(feature = "blocking-io", not(feature = "async-io")))]
|
||||
use gix_packetline::blocking_io::encode::data_to_write;
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn binary_and_non_binary() -> crate::Result {
|
||||
let mut out = Vec::new();
|
||||
let res = data_to_write(b"\0", &mut out).await?;
|
||||
assert_eq!(res, 5);
|
||||
assert_eq!(out.as_bstr(), b"0005\0".as_bstr());
|
||||
|
||||
out.clear();
|
||||
let res = data_to_write("hello world, it works\n".as_bytes(), &mut out).await?;
|
||||
assert_eq!(res, 26);
|
||||
assert_eq!(out.as_bstr(), b"001ahello world, it works\n".as_bstr());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn error_if_data_exceeds_limit() {
|
||||
fn vec_sized(size: usize) -> Vec<u8> {
|
||||
vec![0; size]
|
||||
}
|
||||
|
||||
let res = data_to_write(&vec_sized(65516 + 1), io::sink()).await;
|
||||
assert_err_display(res, "Cannot encode more than 65516 bytes, got 65517");
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn error_if_data_is_empty() {
|
||||
assert_err_display(data_to_write(&[], io::sink()).await, "Empty lines are invalid");
|
||||
}
|
||||
}
|
||||
|
||||
mod text_to_write {
|
||||
use bstr::ByteSlice;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use gix_packetline::async_io::encode::text_to_write;
|
||||
#[cfg(all(feature = "blocking-io", not(feature = "async-io")))]
|
||||
use gix_packetline::blocking_io::encode::text_to_write;
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn always_appends_a_newline() -> crate::Result {
|
||||
let mut out = Vec::new();
|
||||
let res = text_to_write(b"a", &mut out).await?;
|
||||
assert_eq!(res, 6);
|
||||
assert_eq!(out.as_bstr(), b"0006a\n".as_bstr());
|
||||
|
||||
out.clear();
|
||||
let res = text_to_write(b"a\n", &mut out).await?;
|
||||
assert_eq!(res, 7);
|
||||
assert_eq!(
|
||||
out.as_bstr(),
|
||||
b"0007a\n\n".as_bstr(),
|
||||
"newline must be appended, as the receiving end is likely to remove it"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
mod error {
|
||||
use bstr::ByteSlice;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use gix_packetline::async_io::encode::error_to_write;
|
||||
#[cfg(all(feature = "blocking-io", not(feature = "async-io")))]
|
||||
use gix_packetline::blocking_io::encode::error_to_write;
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn write_line() -> crate::Result {
|
||||
let mut out = Vec::new();
|
||||
let res = error_to_write(b"hello error", &mut out).await?;
|
||||
assert_eq!(res, 19);
|
||||
assert_eq!(out.as_bstr(), b"0013ERR hello error".as_bstr());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
mod flush_delim_response_end {
|
||||
use bstr::ByteSlice;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use gix_packetline::async_io::encode::{delim_to_write, flush_to_write, response_end_to_write};
|
||||
#[cfg(all(feature = "blocking-io", not(feature = "async-io")))]
|
||||
use gix_packetline::blocking_io::encode::{delim_to_write, flush_to_write, response_end_to_write};
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn success_flush_delim_response_end() -> crate::Result {
|
||||
let mut out = Vec::new();
|
||||
let res = flush_to_write(&mut out).await?;
|
||||
assert_eq!(res, 4);
|
||||
assert_eq!(out.as_bstr(), b"0000".as_bstr());
|
||||
|
||||
out.clear();
|
||||
let res = delim_to_write(&mut out).await?;
|
||||
assert_eq!(res, 4);
|
||||
assert_eq!(out.as_bstr(), b"0001".as_bstr());
|
||||
|
||||
out.clear();
|
||||
let res = response_end_to_write(&mut out).await?;
|
||||
assert_eq!(res, 4);
|
||||
assert_eq!(out.as_bstr(), b"0002".as_bstr());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
BIN
src-packetline/tests/fixtures/v1/01-clone.combined-output
vendored
Normal file
BIN
src-packetline/tests/fixtures/v1/01-clone.combined-output
vendored
Normal file
Binary file not shown.
BIN
src-packetline/tests/fixtures/v1/01-clone.combined-output-no-binary
vendored
Normal file
BIN
src-packetline/tests/fixtures/v1/01-clone.combined-output-no-binary
vendored
Normal file
Binary file not shown.
BIN
src-packetline/tests/fixtures/v1/01.request
vendored
Normal file
BIN
src-packetline/tests/fixtures/v1/01.request
vendored
Normal file
Binary file not shown.
BIN
src-packetline/tests/fixtures/v1/fetch/01-many-refs.request
vendored
Normal file
BIN
src-packetline/tests/fixtures/v1/fetch/01-many-refs.request
vendored
Normal file
Binary file not shown.
BIN
src-packetline/tests/fixtures/v1/fetch/01-many-refs.response
vendored
Normal file
BIN
src-packetline/tests/fixtures/v1/fetch/01-many-refs.response
vendored
Normal file
Binary file not shown.
BIN
src-packetline/tests/fixtures/v2/clone.all-received
vendored
Normal file
BIN
src-packetline/tests/fixtures/v2/clone.all-received
vendored
Normal file
Binary file not shown.
BIN
src-packetline/tests/fixtures/v2/clone.all-sent
vendored
Normal file
BIN
src-packetline/tests/fixtures/v2/clone.all-sent
vendored
Normal file
Binary file not shown.
234
src-packetline/tests/read/mod.rs
Normal file
234
src-packetline/tests/read/mod.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
mod sideband;
|
||||
|
||||
pub mod streaming_peek_iter {
|
||||
use std::{io, path::PathBuf};
|
||||
|
||||
use bstr::ByteSlice;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use gix_packetline::async_io::StreamingPeekableIter;
|
||||
#[cfg(all(feature = "blocking-io", not(feature = "async-io")))]
|
||||
use gix_packetline::blocking_io::StreamingPeekableIter;
|
||||
use gix_packetline::PacketLineRef;
|
||||
|
||||
fn fixture_path(path: &str) -> PathBuf {
|
||||
PathBuf::from("tests/fixtures").join(path)
|
||||
}
|
||||
|
||||
pub fn fixture_bytes(path: &str) -> Vec<u8> {
|
||||
std::fs::read(fixture_path(path)).expect("readable fixture")
|
||||
}
|
||||
|
||||
fn first_line() -> PacketLineRef<'static> {
|
||||
PacketLineRef::Data(b"7814e8a05a59c0cf5fb186661d1551c75d1299b5 HEAD\0multi_ack thin-pack side-band side-band-64k ofs-delta shallow deepen-since deepen-not deepen-relative no-progress include-tag multi_ack_detailed symref=HEAD:refs/heads/master object-format=sha1 agent=git/2.28.0\n")
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn peek_follows_read_line_delimiter_logic() -> crate::Result {
|
||||
let mut rd = StreamingPeekableIter::new(&b"0005a00000005b"[..], &[PacketLineRef::Flush], false);
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(res.expect("line")??, PacketLineRef::Data(b"a"));
|
||||
rd.read_line().await;
|
||||
|
||||
let res = rd.peek_line().await;
|
||||
assert!(res.is_none(), "we hit the delimiter, and thus are EOF");
|
||||
assert_eq!(
|
||||
rd.stopped_at(),
|
||||
Some(PacketLineRef::Flush),
|
||||
"Stopped tracking is done even when peeking"
|
||||
);
|
||||
let res = rd.peek_line().await;
|
||||
assert!(res.is_none(), "we are still done, no way around it");
|
||||
rd.reset();
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line")??,
|
||||
PacketLineRef::Data(b"b"),
|
||||
"after resetting, we get past the delimiter"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn peek_follows_read_line_err_logic() -> crate::Result {
|
||||
let mut rd = StreamingPeekableIter::new(&b"0005a0009ERR e0000"[..], &[PacketLineRef::Flush], false);
|
||||
rd.fail_on_err_lines(true);
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(res.expect("line")??, PacketLineRef::Data(b"a"));
|
||||
rd.read_line().await;
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line").unwrap_err().to_string(),
|
||||
"e",
|
||||
"io errors are used to communicate remote errors when peeking"
|
||||
);
|
||||
let res = rd.peek_line().await;
|
||||
assert!(res.is_none(), "we are still done, no way around it");
|
||||
assert_eq!(rd.stopped_at(), None, "we stopped not because of a delimiter");
|
||||
rd.reset();
|
||||
let res = rd.peek_line().await;
|
||||
assert!(res.is_none(), "it should stop due to the delimiter");
|
||||
assert_eq!(
|
||||
rd.stopped_at(),
|
||||
Some(PacketLineRef::Flush),
|
||||
"Stopped tracking is done even when peeking"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn peek_eof_is_none() -> crate::Result {
|
||||
let mut rd = StreamingPeekableIter::new(&b"0005a0009ERR e0000"[..], &[PacketLineRef::Flush], false);
|
||||
rd.fail_on_err_lines(false);
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(res.expect("line")??, PacketLineRef::Data(b"a"));
|
||||
rd.read_line().await;
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line")??,
|
||||
PacketLineRef::Data(b"ERR e"),
|
||||
"we read the ERR but it's not interpreted as such"
|
||||
);
|
||||
rd.read_line().await;
|
||||
|
||||
let res = rd.peek_line().await;
|
||||
assert!(res.is_none(), "we peek into the flush packet, which is EOF");
|
||||
assert_eq!(rd.stopped_at(), Some(PacketLineRef::Flush));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn peek_non_data() -> crate::Result {
|
||||
let mut rd = StreamingPeekableIter::new(&b"000000010002"[..], &[PacketLineRef::ResponseEnd], false);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(res.expect("line")??, PacketLineRef::Flush);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(res.expect("line")??, PacketLineRef::Delimiter);
|
||||
rd.reset_with(&[PacketLineRef::Flush]);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(res.expect("line")??, PacketLineRef::ResponseEnd);
|
||||
for _ in 0..2 {
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(
|
||||
res.expect("error").unwrap_err().kind(),
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"peeks on error/eof repeat the error"
|
||||
);
|
||||
}
|
||||
assert_eq!(
|
||||
rd.stopped_at(),
|
||||
None,
|
||||
"The reader is configured to ignore ResponseEnd, and thus hits the end of stream"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn fail_on_err_lines() -> crate::Result {
|
||||
let input = b"00010009ERR e0002";
|
||||
let mut rd = StreamingPeekableIter::new(&input[..], &[], false);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(res.expect("line")??, PacketLineRef::Delimiter);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line")??.as_bstr(),
|
||||
Some(b"ERR e".as_bstr()),
|
||||
"by default no special handling"
|
||||
);
|
||||
|
||||
let mut rd = StreamingPeekableIter::new(&input[..], &[], false);
|
||||
rd.fail_on_err_lines(true);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(res.expect("line")??, PacketLineRef::Delimiter);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line").unwrap_err().to_string(),
|
||||
"e",
|
||||
"io errors are used to communicate remote errors"
|
||||
);
|
||||
let res = rd.read_line().await;
|
||||
assert!(res.is_none(), "iteration is done after the first error");
|
||||
|
||||
rd.replace(input);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(res.expect("line")??, PacketLineRef::Delimiter);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line")??.as_bstr(),
|
||||
Some(b"ERR e".as_bstr()),
|
||||
"a 'replace' also resets error handling to the default: false"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn peek() -> crate::Result {
|
||||
let bytes = fixture_bytes("v1/fetch/01-many-refs.response");
|
||||
let mut rd = StreamingPeekableIter::new(&bytes[..], &[PacketLineRef::Flush], false);
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(res.expect("line")??, first_line(), "peek returns first line");
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line")??,
|
||||
first_line(),
|
||||
"peeked lines are never exhausted, unless they are finally read"
|
||||
);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(res.expect("line")??, first_line(), "read_line returns the peek once");
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line")??.as_bstr(),
|
||||
Some(b"7814e8a05a59c0cf5fb186661d1551c75d1299b5 refs/heads/master\n".as_bstr()),
|
||||
"the next read_line returns the next line"
|
||||
);
|
||||
let res = rd.peek_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line")??.as_bstr(),
|
||||
Some(b"7814e8a05a59c0cf5fb186661d1551c75d1299b5 refs/remotes/origin/HEAD\n".as_bstr()),
|
||||
"peek always gets the next line verbatim"
|
||||
);
|
||||
let res = exhaust(&mut rd).await;
|
||||
assert_eq!(res, 1559);
|
||||
assert_eq!(
|
||||
rd.stopped_at(),
|
||||
Some(PacketLineRef::Flush),
|
||||
"A flush packet line ends every pack file"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn read_from_file_and_reader_advancement() -> crate::Result {
|
||||
let mut bytes = fixture_bytes("v1/fetch/01-many-refs.response");
|
||||
bytes.extend(fixture_bytes("v1/fetch/01-many-refs.response"));
|
||||
let mut rd = StreamingPeekableIter::new(&bytes[..], &[PacketLineRef::Flush], false);
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(res.expect("line")??, first_line());
|
||||
let res = exhaust(&mut rd).await;
|
||||
assert_eq!(res + 1, 1561, "it stops after seeing the flush byte");
|
||||
rd.reset();
|
||||
let res = exhaust(&mut rd).await;
|
||||
assert_eq!(
|
||||
res, 1561,
|
||||
"it should read the second part of the identical file from the previously advanced reader"
|
||||
);
|
||||
|
||||
// this reset is will cause actual io::Errors to occur
|
||||
rd.reset();
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(
|
||||
res.expect("some error").unwrap_err().kind(),
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"trying to keep reading from exhausted input results in Some() containing the original error"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::maybe_async]
|
||||
async fn exhaust(rd: &mut StreamingPeekableIter<&[u8]>) -> i32 {
|
||||
let mut count = 0;
|
||||
while rd.read_line().await.is_some() {
|
||||
count += 1;
|
||||
}
|
||||
count
|
||||
}
|
||||
}
|
||||
278
src-packetline/tests/read/sideband.rs
Normal file
278
src-packetline/tests/read/sideband.rs
Normal file
@@ -0,0 +1,278 @@
|
||||
#[cfg(feature = "blocking-io")]
|
||||
use std::io::Read;
|
||||
|
||||
use bstr::{BString, ByteSlice};
|
||||
#[cfg(all(not(feature = "blocking-io"), feature = "async-io"))]
|
||||
use futures_lite::io::AsyncReadExt;
|
||||
use gix_odb::pack;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use gix_packetline::async_io::StreamingPeekableIter;
|
||||
#[cfg(all(feature = "blocking-io", not(feature = "async-io")))]
|
||||
use gix_packetline::blocking_io::StreamingPeekableIter;
|
||||
use gix_packetline::{read::ProgressAction, PacketLineRef};
|
||||
|
||||
use crate::read::streaming_peek_iter::fixture_bytes;
|
||||
|
||||
#[cfg(all(not(feature = "blocking-io"), feature = "async-io"))]
|
||||
mod util {
|
||||
use std::{io::Result, pin::Pin};
|
||||
|
||||
use futures_io::{AsyncBufRead, AsyncRead};
|
||||
use futures_lite::{future, AsyncBufReadExt, AsyncReadExt};
|
||||
|
||||
pub struct BlockOn<T>(pub T);
|
||||
|
||||
impl<T: AsyncRead + Unpin> std::io::Read for BlockOn<T> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||
future::block_on(self.0.read(buf))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: AsyncBufRead + Unpin> std::io::BufRead for BlockOn<T> {
|
||||
fn fill_buf(&mut self) -> Result<&[u8]> {
|
||||
future::block_on(self.0.fill_buf())
|
||||
}
|
||||
|
||||
fn consume(&mut self, amt: usize) {
|
||||
Pin::new(&mut self.0).consume(amt);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn read_pack_with_progress_extraction() -> crate::Result {
|
||||
let buf = fixture_bytes("v1/01-clone.combined-output");
|
||||
let mut rd = StreamingPeekableIter::new(&buf[..], &[PacketLineRef::Flush], false);
|
||||
|
||||
// Read without sideband decoding
|
||||
let mut out = Vec::new();
|
||||
rd.as_read().read_to_end(&mut out).await?;
|
||||
assert_eq!(out.as_bstr(), b"808e50d724f604f69ab93c6da2919c014667bedb HEAD\0multi_ack thin-pack side-band side-band-64k ofs-delta shallow deepen-since deepen-not deepen-relative no-progress include-tag multi_ack_detailed symref=HEAD:refs/heads/master object-format=sha1 agent=git/2.28.0\n808e50d724f604f69ab93c6da2919c014667bedb refs/heads/master\n".as_bstr());
|
||||
|
||||
let res = rd.read_line().await;
|
||||
assert_eq!(
|
||||
res.expect("line")??.as_text().expect("data line").0.as_bstr(),
|
||||
b"NAK".as_bstr()
|
||||
);
|
||||
let mut seen_texts = Vec::<BString>::new();
|
||||
let mut do_nothing = |is_err: bool, data: &[u8]| -> ProgressAction {
|
||||
assert!(!is_err);
|
||||
seen_texts.push(data.as_bstr().into());
|
||||
std::ops::ControlFlow::Continue(())
|
||||
};
|
||||
let pack_read = rd.as_read_with_sidebands(&mut do_nothing);
|
||||
#[cfg(all(not(feature = "blocking-io"), feature = "async-io"))]
|
||||
let mut pack_entries = pack::data::input::BytesToEntriesIter::new_from_header(
|
||||
util::BlockOn(pack_read),
|
||||
pack::data::input::Mode::Verify,
|
||||
pack::data::input::EntryDataMode::Ignore,
|
||||
gix_hash::Kind::Sha1,
|
||||
)?;
|
||||
#[cfg(feature = "blocking-io")]
|
||||
let mut pack_entries = pack::data::input::BytesToEntriesIter::new_from_header(
|
||||
pack_read,
|
||||
pack::data::input::Mode::Verify,
|
||||
pack::data::input::EntryDataMode::Ignore,
|
||||
gix_hash::Kind::Sha1,
|
||||
)?;
|
||||
let all_but_last = pack_entries.size_hint().0 - 1;
|
||||
let last = pack_entries.nth(all_but_last).expect("last entry")?;
|
||||
drop(pack_entries);
|
||||
|
||||
assert_eq!(
|
||||
last.trailer
|
||||
.expect("trailer to exist on last entry")
|
||||
.to_hex()
|
||||
.to_string(),
|
||||
"150a1045f04dc0fc2dbf72313699fda696bf4126"
|
||||
);
|
||||
assert_eq!(
|
||||
seen_texts,
|
||||
[
|
||||
"Enumerating objects: 3, done.",
|
||||
"Counting objects: 33% (1/3)\r",
|
||||
"Counting objects: 66% (2/3)\r",
|
||||
"Counting objects: 100% (3/3)\r",
|
||||
"Counting objects: 100% (3/3), done.",
|
||||
"Total 3 (delta 0), reused 0 (delta 0), pack-reused 0"
|
||||
]
|
||||
.iter()
|
||||
.map(|v| v.as_bytes().as_bstr().to_owned())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn read_line_trait_method_reads_one_packet_line_at_a_time() -> crate::Result {
|
||||
let buf = fixture_bytes("v1/01-clone.combined-output-no-binary");
|
||||
|
||||
let mut rd = StreamingPeekableIter::new(&buf[..], &[PacketLineRef::Flush], false);
|
||||
|
||||
let mut out = String::new();
|
||||
let mut r = rd.as_read();
|
||||
r.read_line_to_string(&mut out).await?;
|
||||
assert_eq!(out, "808e50d724f604f69ab93c6da2919c014667bedb HEAD\0multi_ack thin-pack side-band side-band-64k ofs-delta shallow deepen-since deepen-not deepen-relative no-progress include-tag multi_ack_detailed symref=HEAD:refs/heads/master object-format=sha1 agent=git/2.28.0\n");
|
||||
out.clear();
|
||||
r.read_line_to_string(&mut out).await?;
|
||||
assert_eq!(out, "808e50d724f604f69ab93c6da2919c014667bedb refs/heads/master\n");
|
||||
out.clear();
|
||||
r.read_line_to_string(&mut out).await?;
|
||||
assert_eq!(out, "", "flush means empty lines…");
|
||||
out.clear();
|
||||
r.read_line_to_string(&mut out).await?;
|
||||
assert_eq!(out, "", "…which can't be overcome unless the reader is reset");
|
||||
assert_eq!(
|
||||
r.stopped_at(),
|
||||
Some(PacketLineRef::Flush),
|
||||
"it knows what stopped the reader"
|
||||
);
|
||||
|
||||
drop(r);
|
||||
rd.reset();
|
||||
|
||||
let mut r = rd.as_read();
|
||||
r.read_line_to_string(&mut out).await?;
|
||||
assert_eq!(out, "NAK\n");
|
||||
|
||||
drop(r);
|
||||
|
||||
let mut r = rd.as_read_with_sidebands(|_, _| std::ops::ControlFlow::Continue(()));
|
||||
out.clear();
|
||||
r.read_line_to_string(&mut out).await?;
|
||||
assert_eq!(out, "&");
|
||||
|
||||
out.clear();
|
||||
r.read_line_to_string(&mut out).await?;
|
||||
assert_eq!(out, "");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn readline_reads_one_packet_line_at_a_time() -> crate::Result {
|
||||
let buf = fixture_bytes("v1/01-clone.combined-output-no-binary");
|
||||
|
||||
let mut rd = StreamingPeekableIter::new(&buf[..], &[PacketLineRef::Flush], false);
|
||||
|
||||
let mut r = rd.as_read();
|
||||
let line = r.read_data_line().await.unwrap()??.as_bstr().unwrap();
|
||||
assert_eq!(line, "808e50d724f604f69ab93c6da2919c014667bedb HEAD\0multi_ack thin-pack side-band side-band-64k ofs-delta shallow deepen-since deepen-not deepen-relative no-progress include-tag multi_ack_detailed symref=HEAD:refs/heads/master object-format=sha1 agent=git/2.28.0\n");
|
||||
let line = r.read_data_line().await.unwrap()??.as_bstr().unwrap();
|
||||
assert_eq!(line, "808e50d724f604f69ab93c6da2919c014667bedb refs/heads/master\n");
|
||||
let line = r.read_data_line().await;
|
||||
assert!(line.is_none(), "flush means `None`");
|
||||
let line = r.read_data_line().await;
|
||||
assert!(line.is_none(), "…which can't be overcome unless the reader is reset");
|
||||
assert_eq!(
|
||||
r.stopped_at(),
|
||||
Some(PacketLineRef::Flush),
|
||||
"it knows what stopped the reader"
|
||||
);
|
||||
|
||||
drop(r);
|
||||
rd.reset();
|
||||
|
||||
let mut r = rd.as_read();
|
||||
let line = r.read_data_line().await.unwrap()??.as_bstr().unwrap();
|
||||
assert_eq!(line.as_bstr(), "NAK\n");
|
||||
|
||||
drop(r);
|
||||
|
||||
let mut r = rd.as_read_with_sidebands(|_, _| std::ops::ControlFlow::Continue(()));
|
||||
let line = r.read_data_line().await.unwrap()??.as_bstr().unwrap();
|
||||
assert_eq!(
|
||||
line.as_bstr(),
|
||||
"\x02Enumerating objects: 3, done.\n",
|
||||
"sidebands are ignored entirely here"
|
||||
);
|
||||
for _ in 0..6 {
|
||||
let _discard_more_progress = r.read_data_line().await.unwrap()??.as_bstr().unwrap();
|
||||
}
|
||||
let line = r.read_data_line().await;
|
||||
assert!(line.is_none(), "and we have reached the end");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn peek_past_an_actual_eof_is_an_error() -> crate::Result {
|
||||
let input = b"0009ERR e";
|
||||
let mut rd = StreamingPeekableIter::new(&input[..], &[], false);
|
||||
let mut reader = rd.as_read();
|
||||
let res = reader.peek_data_line().await;
|
||||
assert_eq!(res.expect("one line")??, b"ERR e");
|
||||
|
||||
let mut buf = String::new();
|
||||
reader.read_line_to_string(&mut buf).await?;
|
||||
assert_eq!(
|
||||
buf, "ERR e",
|
||||
"by default ERR lines won't propagate as failure but are merely text"
|
||||
);
|
||||
|
||||
let res = reader.peek_data_line().await;
|
||||
assert_eq!(
|
||||
res.expect("an err").expect_err("foo").kind(),
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"peeking past the end is not an error as the caller should make sure we don't try 'invalid' reads"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn peek_past_a_delimiter_is_no_error() -> crate::Result {
|
||||
let input = b"0009hello0000";
|
||||
let mut rd = StreamingPeekableIter::new(&input[..], &[PacketLineRef::Flush], false);
|
||||
let mut reader = rd.as_read();
|
||||
let res = reader.peek_data_line().await;
|
||||
assert_eq!(res.expect("one line")??, b"hello");
|
||||
|
||||
let mut buf = String::new();
|
||||
reader.read_line_to_string(&mut buf).await?;
|
||||
assert_eq!(buf, "hello");
|
||||
|
||||
let res = reader.peek_data_line().await;
|
||||
assert!(
|
||||
res.is_none(),
|
||||
"peeking past a flush packet is a 'natural' event that should not cause an error"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn handling_of_err_lines() {
|
||||
let input = b"0009ERR e0009ERR x0000";
|
||||
let mut rd = StreamingPeekableIter::new(&input[..], &[], false);
|
||||
rd.fail_on_err_lines(true);
|
||||
let mut buf = [0u8; 2];
|
||||
let mut reader = rd.as_read();
|
||||
let res = reader.read(buf.as_mut()).await;
|
||||
let err = res.unwrap_err();
|
||||
assert_eq!(err.to_string(), "e", "it respects errors and passes them on");
|
||||
assert_eq!(
|
||||
err.into_inner()
|
||||
.expect("inner err")
|
||||
.downcast::<gix_packetline::read::Error>()
|
||||
.expect("it's this type")
|
||||
.message,
|
||||
"e",
|
||||
);
|
||||
let res = reader.read(buf.as_mut()).await;
|
||||
assert_eq!(
|
||||
res.expect("read to succeed - EOF"),
|
||||
0,
|
||||
"it stops reading after an error despite there being more to read"
|
||||
);
|
||||
reader.reset_with(&[PacketLineRef::Flush]);
|
||||
let res = reader.read(buf.as_mut()).await;
|
||||
assert_eq!(
|
||||
res.unwrap_err().to_string(),
|
||||
"x",
|
||||
"after a reset it continues reading, but retains the 'fail_on_err_lines' setting"
|
||||
);
|
||||
assert_eq!(
|
||||
reader.stopped_at(),
|
||||
None,
|
||||
"An error can also be the reason, which is not distinguishable from an EOF"
|
||||
);
|
||||
}
|
||||
61
src-packetline/tests/write/mod.rs
Normal file
61
src-packetline/tests/write/mod.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
#[cfg(feature = "blocking-io")]
|
||||
use std::io::Write;
|
||||
|
||||
use bstr::ByteSlice;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use futures_lite::prelude::*;
|
||||
#[cfg(all(feature = "async-io", not(feature = "blocking-io")))]
|
||||
use gix_packetline::async_io::Writer;
|
||||
#[cfg(all(feature = "blocking-io", not(feature = "async-io")))]
|
||||
use gix_packetline::blocking_io::Writer;
|
||||
|
||||
const MAX_DATA_LEN: usize = 65516;
|
||||
const MAX_LINE_LEN: usize = 4 + MAX_DATA_LEN;
|
||||
|
||||
#[allow(clippy::unused_io_amount)] // under test
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn each_write_results_in_one_line() -> crate::Result {
|
||||
let mut w = Writer::new(Vec::new());
|
||||
w.write_all(b"hello").await?;
|
||||
w.write(b"world!").await?;
|
||||
let buf = w.into_inner();
|
||||
assert_eq!(buf.as_bstr(), b"0009hello000aworld!".as_bstr());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::unused_io_amount)] // under test
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn write_text_and_write_binary() -> crate::Result {
|
||||
let buf = {
|
||||
let mut w = Writer::new(Vec::new());
|
||||
w.enable_text_mode();
|
||||
w.write_all(b"hello").await?;
|
||||
w.enable_binary_mode();
|
||||
w.write(b"world").await?;
|
||||
w.into_inner()
|
||||
};
|
||||
assert_eq!(buf.as_bstr(), b"000ahello\n0009world".as_bstr());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::unused_io_amount)] // under test
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn huge_writes_are_split_into_lines() -> crate::Result {
|
||||
let buf = {
|
||||
let data = vec![0u8; MAX_DATA_LEN * 2];
|
||||
let mut w = Writer::new(Vec::new());
|
||||
w.write(&data).await?;
|
||||
w.into_inner()
|
||||
};
|
||||
assert_eq!(buf.len(), MAX_LINE_LEN * 2);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[maybe_async::test(feature = "blocking-io", async(feature = "async-io", async_std::test))]
|
||||
async fn empty_writes_fail_with_error() {
|
||||
let res = Writer::new(Vec::new()).write(&[]).await;
|
||||
assert_eq!(
|
||||
res.unwrap_err().to_string(),
|
||||
"empty packet lines are not permitted as '0004' is invalid"
|
||||
);
|
||||
}
|
||||
Reference in New Issue
Block a user