create src

This commit is contained in:
awfixer
2026-03-11 02:04:19 -07:00
commit 52f7a22bf2
2595 changed files with 402870 additions and 0 deletions

1173
src-archive/CHANGELOG.md Normal file

File diff suppressed because it is too large Load Diff

60
src-archive/Cargo.toml Normal file
View File

@@ -0,0 +1,60 @@
lints.workspace = true
[package]
name = "src-archive"
version = "0.29.0"
repository = "https://github.com/GitoxideLabs/gitoxide"
license = "MIT OR Apache-2.0"
description = "archive generation from of a worktree stream"
authors = ["Sebastian Thiel <sebastian.thiel@icloud.com>"]
edition = "2021"
rust-version = "1.82"
include = ["src/**/*", "LICENSE-*"]
[lib]
doctest = false
[features]
default = ["tar", "tar_gz", "zip"]
## Enable support for the SHA-1 hash by forwarding the feature to dependencies.
sha1 = ["src-worktree-stream/sha1", "src-object/sha1"]
## Enable the `tar` archive format. It has support for all information, except for object ids.
tar = ["dep:tar", "dep:src-path"]
## Enable the `tar.gz` archive format.
tar_gz = ["tar", "dep:flate2"]
## Enable the `zip` archive format.
zip = ["dep:rawzip", "dep:flate2"]
[dependencies]
src-worktree-stream = { version = "^0.29.0", path = "../src-worktree-stream" }
src-object = { version = "^0.57.0", path = "../src-object" }
src-path = { version = "^0.11.1", path = "../src-path", optional = true }
src-date = { version = "^0.15.0", path = "../src-date" }
flate2 = { version = "1.1.9", optional = true, default-features = false, features = ["zlib-rs"] }
rawzip = { version = "0.4.3", optional = true }
src-error = { version = "^0.2.0", path = "../src-error" }
bstr = { version = "1.12.0", default-features = false }
tar = { version = "0.4.38", optional = true }
document-features = { version = "0.2.0", optional = true }
[dev-dependencies]
src-testtools = { path = "../tests/tools" }
src-odb = { path = "../src-odb" }
src-worktree = { path = "../src-worktree", default-features = false, features = [
"attributes",
] }
src-hash = { path = "../src-hash", features = ["sha1"] }
src-attributes = { path = "../src-attributes" }
src-object = { path = "../src-object" }
src-filter = { path = "../src-filter" }
[package.metadata.docs.rs]
all-features = true
features = ["sha1", "document-features"]

1
src-archive/LICENSE-APACHE Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-APACHE

1
src-archive/LICENSE-MIT Symbolic link
View File

@@ -0,0 +1 @@
../LICENSE-MIT

90
src-archive/src/lib.rs Normal file
View File

@@ -0,0 +1,90 @@
//! The implementation of creating an archive from a worktree stream, similar to `git archive`.
//!
//! ## Deviation
//!
//! This implementation is early and just does the basics. Git does more to support more context when filtering and to keep
//! more information about entries in the various archive formats.
//! `tar` is implemented in a very basic fashion only.
//!
//! ## Feature Flags
//! All features are related to which container formats are available.
#![cfg_attr(
all(doc, feature = "document-features"),
doc = ::document_features::document_features!()
)]
#![cfg_attr(all(doc, feature = "document-features"), feature(doc_cfg))]
#![deny(rust_2018_idioms, missing_docs)]
#![forbid(unsafe_code)]
use bstr::BString;
/// The error returned by [`write_stream()`].
pub type Error = gix_error::Exn<gix_error::Message>;
/// The supported container formats for use in [`write_stream()`].
#[derive(Default, PartialEq, Eq, Copy, Clone, Debug)]
pub enum Format {
/// An internal format that is suitable only for intra-process communication.
///
/// All transformations in the options are ignored. Calling [`write_stream`] is disallowed
/// as it's more efficient to call [gix_worktree_stream::Stream::into_read()] right away.
/// It is provided here as a basis available without extra dependencies, and as a debugging tool.
#[default]
InternalTransientNonPersistable,
/// A standard `tar` archive.
///
/// Use it as well if a custom container format is desired. The idea is to decode it on a separate thread
/// to rewrite the data to the desired format.
Tar,
/// A convenience format that will `gzip` deflate the `tar` stream.
TarGz {
/// If `None`, use the default compression level. Otherwise use the given one which
/// ranges from 0-9 for the deflate algorithm.
compression_level: Option<u8>,
},
/// A standard `zip` archive. Note that this format silently converts illformed UTF-8 to UTF-8, which will
/// equal a change of path.
///
/// Requires the `zip` feature toggle to have an effect.
///
/// ### Shortcoming
///
/// Even though symlinks are stored as such, for some reason at least on MacOS those aren't restored. That works,
/// however, when letting `git` create the archive.
Zip {
/// If `None`, use the default compression level. Otherwise use the given one which
/// ranges from 0-9 for the deflate algorithm.
compression_level: Option<u8>,
},
}
/// Options for configuring [`write_stream()`].
#[derive(Clone, Debug)]
pub struct Options {
/// The archive's format.
pub format: Format,
/// Given a `path`, originating in the git tree, to place into the archive, put `<prefix>/path` in front of it.
///
/// Note that `/` should be used as separator, and that a prefix directory has to end with `/`.
pub tree_prefix: Option<BString>,
/// The modification time for all entries in the archive as seen since UNIX epoch.
///
/// Defaults to the current time. The caller may set this to the commit time if available.
pub modification_time: gix_date::SecondsSinceUnixEpoch,
}
impl Default for Options {
fn default() -> Self {
Options {
format: Default::default(),
tree_prefix: None,
modification_time: std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|t| t.as_secs() as i64)
.unwrap_or_default(),
}
}
}
mod write;
pub use write::{write_stream, write_stream_seek};

345
src-archive/src/write.rs Normal file
View File

@@ -0,0 +1,345 @@
#[cfg(any(feature = "tar", feature = "tar_gz", feature = "zip"))]
use gix_error::ResultExt;
use gix_error::{message, ErrorExt};
use gix_worktree_stream::{Entry, Stream};
use crate::{Error, Format, Options};
#[cfg(feature = "zip")]
use std::io::Write;
/// Write all stream entries in `stream` as provided by `next_entry(stream)` to `out` configured according to `opts` which
/// also includes the streaming format.
///
/// ### Performance
///
/// * The caller should be sure `out` is fast enough. If in doubt, wrap in [`std::io::BufWriter`].
/// * Further, big files aren't suitable for archival into `tar` archives as they require the size of the stream to be known
/// prior to writing the header of each entry.
#[cfg_attr(not(feature = "tar"), allow(unused_mut, unused_variables))]
pub fn write_stream<NextFn>(
stream: &mut Stream,
mut next_entry: NextFn,
out: impl std::io::Write,
opts: Options,
) -> Result<(), Error>
where
NextFn: FnMut(&mut Stream) -> Result<Option<Entry<'_>>, gix_error::Exn>,
{
if opts.format == Format::InternalTransientNonPersistable {
return Err(message("The internal format cannot be used as an archive, it's merely a debugging tool").raise());
}
#[cfg(any(feature = "tar", feature = "tar_gz"))]
{
enum State<W: std::io::Write> {
#[cfg(feature = "tar")]
Tar((tar::Builder<W>, Vec<u8>)),
#[cfg(feature = "tar_gz")]
TarGz((tar::Builder<flate2::write::GzEncoder<W>>, Vec<u8>)),
}
impl<W: std::io::Write> State<W> {
pub fn new(format: Format, mtime: gix_date::SecondsSinceUnixEpoch, out: W) -> Result<Self, Error> {
match format {
Format::InternalTransientNonPersistable => unreachable!("handled earlier"),
Format::Zip { .. } => {
Err(message("Cannot create a zip archive if output stream does not support seek").raise())
}
Format::Tar => {
#[cfg(feature = "tar")]
{
Ok(State::Tar((
{
let mut ar = tar::Builder::new(out);
ar.mode(tar::HeaderMode::Deterministic);
ar
},
Vec::with_capacity(64 * 1024),
)))
}
#[cfg(not(feature = "tar"))]
{
Err(message!("Support for the format '{:?}' was not compiled in", Format::Tar).raise())
}
}
Format::TarGz { compression_level } => {
#[cfg(feature = "tar_gz")]
{
Ok(State::TarGz((
{
let gz = flate2::GzBuilder::new().mtime(mtime as u32).write(
out,
match compression_level {
None => flate2::Compression::default(),
Some(level) => flate2::Compression::new(u32::from(level)),
},
);
let mut ar = tar::Builder::new(gz);
ar.mode(tar::HeaderMode::Deterministic);
ar
},
Vec::with_capacity(64 * 1024),
)))
}
#[cfg(not(feature = "tar_gz"))]
{
Err(message!(
"Support for the format '{:?}' was not compiled in",
Format::TarGz {
compression_level: None,
}
)
.raise())
}
}
}
}
}
let mut state = State::new(opts.format, opts.modification_time, out)?;
while let Some(entry) =
next_entry(stream).or_raise(|| message("Could not retrieve the next entry from the stream"))?
{
match &mut state {
#[cfg(feature = "tar")]
State::Tar((ar, buf)) => {
append_tar_entry(ar, buf, entry, opts.modification_time, &opts)?;
}
#[cfg(feature = "tar_gz")]
State::TarGz((ar, buf)) => {
append_tar_entry(ar, buf, entry, opts.modification_time, &opts)?;
}
}
}
match state {
#[cfg(feature = "tar")]
State::Tar((mut ar, _)) => {
ar.finish().or_raise(|| message("Could not finish tar archive"))?;
}
#[cfg(feature = "tar_gz")]
State::TarGz((ar, _)) => {
ar.into_inner()
.or_raise(|| message("Could not finish tar.gz archive"))?
.finish()
.or_raise(|| message("Could not finish gzip stream"))?;
}
}
}
#[cfg(not(any(feature = "tar", feature = "tar_gz")))]
{
let _ = (next_entry, out);
return Err(message!("Support for the format '{:?}' was not compiled in", opts.format).raise());
}
#[allow(unreachable_code)]
Ok(())
}
/// Like [`write_stream()`], but requires [`std::io::Seek`] for `out`.
///
/// Note that `zip` is able to stream big files, which our `tar` implementation is not able to do, which makes it the
/// only suitable container to support huge files from `git-lfs` without consuming excessive amounts of memory.
#[cfg_attr(not(feature = "zip"), allow(unused_mut, unused_variables))]
pub fn write_stream_seek<NextFn>(
stream: &mut Stream,
mut next_entry: NextFn,
out: impl std::io::Write + std::io::Seek,
opts: Options,
) -> Result<(), Error>
where
NextFn: FnMut(&mut Stream) -> Result<Option<Entry<'_>>, gix_error::Exn>,
{
let compression_level = match opts.format {
Format::Zip { compression_level } => compression_level.map(i64::from),
_other => return write_stream(stream, next_entry, out, opts),
};
#[cfg(feature = "zip")]
{
let mut ar = rawzip::ZipArchiveWriter::new(out);
let mut buf = Vec::new();
let mtime = rawzip::time::UtcDateTime::from_unix(opts.modification_time);
while let Some(entry) =
next_entry(stream).or_raise(|| message("Could not retrieve the next entry from the stream"))?
{
append_zip_entry(
&mut ar,
entry,
&mut buf,
mtime,
compression_level,
opts.tree_prefix.as_ref(),
)?;
}
ar.finish().or_raise(|| message("Could not finish zip archive"))?;
}
#[cfg(not(feature = "zip"))]
{
let _ = compression_level;
#[allow(clippy::needless_return)]
return Err(message!(
"Support for the format '{:?}' was not compiled in",
Format::Zip {
compression_level: None
}
)
.raise());
}
#[cfg(feature = "zip")]
Ok(())
}
#[cfg(feature = "zip")]
fn append_zip_entry<W: std::io::Write + std::io::Seek>(
ar: &mut rawzip::ZipArchiveWriter<W>,
mut entry: gix_worktree_stream::Entry<'_>,
buf: &mut Vec<u8>,
mtime: rawzip::time::UtcDateTime,
compression_level: Option<i64>,
tree_prefix: Option<&bstr::BString>,
) -> Result<(), Error> {
use bstr::ByteSlice;
let path = add_prefix(entry.relative_path(), tree_prefix).into_owned();
let unix_permissions = if entry.mode.is_executable() { 0o755 } else { 0o644 };
let path = path
.to_str()
.or_raise(|| message!("Invalid UTF-8 in entry path: {path:?}"))?;
match entry.mode.kind() {
gix_object::tree::EntryKind::Blob | gix_object::tree::EntryKind::BlobExecutable => {
let file_builder = ar
.new_file(path)
.compression_method(rawzip::CompressionMethod::Deflate)
.last_modified(mtime)
.unix_permissions(unix_permissions);
let (mut zip_entry, config) = file_builder
.start()
.or_raise(|| message("Could not start zip file entry"))?;
// Use flate2 for compression. Level 9 is the maximum compression level for deflate.
let encoder = flate2::write::DeflateEncoder::new(
&mut zip_entry,
match compression_level {
None => flate2::Compression::default(),
Some(level) => flate2::Compression::new(level.clamp(0, 9) as u32),
},
);
let mut writer = config.wrap(encoder);
std::io::copy(&mut entry, &mut writer).or_raise(|| message("Could not write zip entry data"))?;
let (encoder, descriptor) = writer
.finish()
.or_raise(|| message("Could not finish zip entry writer"))?;
encoder
.finish()
.or_raise(|| message("Could not finish deflate encoder"))?;
zip_entry
.finish(descriptor)
.or_raise(|| message("Could not finish zip entry"))?;
}
gix_object::tree::EntryKind::Tree | gix_object::tree::EntryKind::Commit => {
// rawzip requires directory paths to end with '/'
let mut dir_path = path.to_owned();
if !dir_path.ends_with('/') {
dir_path.push('/');
}
ar.new_dir(&dir_path)
.last_modified(mtime)
.unix_permissions(unix_permissions)
.create()
.or_raise(|| message("Could not create zip directory entry"))?;
}
gix_object::tree::EntryKind::Link => {
buf.clear();
std::io::copy(&mut entry, buf).or_raise(|| message("Could not read symlink target"))?;
// For symlinks, we need to create a file with symlink permissions
let symlink_path = path;
let target = buf.as_bstr().to_str().or_raise(|| {
message!(
"Invalid UTF-8 in symlink target for entry '{symlink_path}': {:?}",
buf.as_bstr()
)
})?;
let (mut zip_entry, config) = ar
.new_file(symlink_path)
.compression_method(rawzip::CompressionMethod::Store)
.last_modified(mtime)
.unix_permissions(0o120644) // Symlink mode
.start()
.or_raise(|| message("Could not start zip symlink entry"))?;
let mut writer = config.wrap(&mut zip_entry);
writer
.write_all(target.as_bytes())
.or_raise(|| message("Could not write symlink target"))?;
let (_, descriptor) = writer
.finish()
.or_raise(|| message("Could not finish zip symlink writer"))?;
zip_entry
.finish(descriptor)
.or_raise(|| message("Could not finish zip symlink entry"))?;
}
}
Ok(())
}
#[cfg(any(feature = "tar", feature = "tar_gz"))]
fn append_tar_entry<W: std::io::Write>(
ar: &mut tar::Builder<W>,
buf: &mut Vec<u8>,
mut entry: gix_worktree_stream::Entry<'_>,
mtime_seconds_since_epoch: i64,
opts: &Options,
) -> Result<(), Error> {
let mut header = tar::Header::new_gnu();
header.set_mtime(mtime_seconds_since_epoch as u64);
header.set_entry_type(tar_entry_type(entry.mode));
header.set_mode(if entry.mode.is_executable() { 0o755 } else { 0o644 });
buf.clear();
std::io::copy(&mut entry, buf).or_raise(|| message("Could not read entry data"))?;
let path = gix_path::from_bstr(add_prefix(entry.relative_path(), opts.tree_prefix.as_ref()));
header.set_size(buf.len() as u64);
if entry.mode.is_link() {
use bstr::ByteSlice;
let target = gix_path::from_bstr(buf.as_bstr());
header.set_entry_type(tar::EntryType::Symlink);
header.set_size(0);
ar.append_link(&mut header, path, target)
.or_raise(|| message("Could not append symlink to tar archive"))?;
} else {
ar.append_data(&mut header, path, buf.as_slice())
.or_raise(|| message("Could not append data to tar archive"))?;
}
Ok(())
}
#[cfg(any(feature = "tar", feature = "tar_gz"))]
fn tar_entry_type(mode: gix_object::tree::EntryMode) -> tar::EntryType {
use gix_object::tree::EntryKind;
use tar::EntryType;
match mode.kind() {
EntryKind::Tree | EntryKind::Commit => EntryType::Directory,
EntryKind::Blob => EntryType::Regular,
EntryKind::BlobExecutable => EntryType::Regular,
EntryKind::Link => EntryType::Link,
}
}
#[cfg(any(feature = "tar", feature = "tar_gz", feature = "zip"))]
fn add_prefix<'a>(relative_path: &'a bstr::BStr, prefix: Option<&bstr::BString>) -> std::borrow::Cow<'a, bstr::BStr> {
use std::borrow::Cow;
match prefix {
None => Cow::Borrowed(relative_path),
Some(prefix) => {
use bstr::ByteVec;
let mut buf = prefix.clone();
buf.push_str(relative_path);
Cow::Owned(buf)
}
}
}

View File

@@ -0,0 +1,312 @@
/// Convert a hexadecimal hash into its corresponding `ObjectId` or _panic_.
fn hex_to_id(hex: &str) -> gix_hash::ObjectId {
gix_hash::ObjectId::from_hex(hex.as_bytes()).expect("40 bytes hex")
}
mod from_tree {
use std::{io::Read, path::PathBuf};
use gix_archive::Format;
use gix_attributes::glob::pattern::Case;
use gix_error::{Exn, ResultExt};
use gix_object::tree::EntryKind;
use gix_testtools::bstr::ByteSlice;
use gix_worktree::stack::state::attributes::Source;
use crate::hex_to_id;
#[cfg(target_pointer_width = "64")]
const EXPECTED_BUFFER_LENGTH: usize = 551;
#[cfg(target_pointer_width = "32")]
const EXPECTED_BUFFER_LENGTH: usize = 479;
#[test]
fn basic_usage_internal() -> gix_testtools::Result {
basic_usage(gix_archive::Format::InternalTransientNonPersistable, |buf| {
assert_eq!(buf.len(), EXPECTED_BUFFER_LENGTH);
let mut stream = gix_worktree_stream::Stream::from_read(std::io::Cursor::new(buf));
let mut paths_and_modes = Vec::new();
while let Some(mut entry) = stream.next_entry().expect("entry retrieval does not fail") {
paths_and_modes.push((entry.relative_path().to_owned(), entry.mode.kind(), entry.id));
let mut buf = Vec::new();
entry.read_to_end(&mut buf).expect("stream can always be read");
}
assert_eq!(
paths_and_modes,
&[
(
".gitattributes".into(),
EntryKind::Blob,
hex_to_id("45c160c35c17ad264b96431cceb9793160396e99")
),
(
"a".into(),
EntryKind::Blob,
hex_to_id("45b983be36b73c0788dc9cbcb76cbb80fc7bb057")
),
(
"symlink-to-a".into(),
EntryKind::Link,
hex_to_id("2e65efe2a145dda7ee51d1741299f848e5bf752e")
),
(
"dir/b".into(),
EntryKind::Blob,
hex_to_id("ab4a98190cf776b43cb0fe57cef231fb93fd07e6")
),
(
"dir/subdir/exe".into(),
EntryKind::BlobExecutable,
hex_to_id("e69de29bb2d1d6434b8b29ae775ad8c2e48c5391")
),
(
"extra-file".into(),
EntryKind::Blob,
hex_to_id("0000000000000000000000000000000000000000")
),
(
"extra-exe".into(),
if cfg!(windows) {
EntryKind::Blob
} else {
EntryKind::BlobExecutable
},
hex_to_id("0000000000000000000000000000000000000000")
),
(
"extra-dir-empty".into(),
EntryKind::Tree,
hex_to_id("0000000000000000000000000000000000000000")
),
(
"extra-dir/symlink-to-extra".into(),
EntryKind::Link,
hex_to_id("0000000000000000000000000000000000000000")
)
]
);
Ok(())
})
}
#[test]
#[cfg(feature = "tar")]
fn basic_usage_tar() -> gix_testtools::Result {
basic_usage(gix_archive::Format::Tar, |buf| {
use tar::EntryType;
let mut ar = tar::Archive::new(buf.as_slice());
let mut out = Vec::new();
for entry in ar.entries()? {
let mut entry = entry?;
let copied = std::io::copy(&mut entry, &mut std::io::sink())?;
let header = entry.header();
assert_eq!(
copied,
header.size()?,
"size field matches the size of the actual stream"
);
out.push((
entry.path_bytes().as_bstr().to_owned(),
header.entry_type(),
header.size()?,
header.mode()?,
));
}
assert_eq!(
out,
[
("prefix/.gitattributes", EntryType::Regular, 56, 420),
("prefix/a", EntryType::Regular, 3, 420),
("prefix/symlink-to-a", EntryType::Symlink, 0, 420),
("prefix/dir/b", EntryType::Regular, 3, 420),
("prefix/dir/subdir/exe", EntryType::Regular, 0, 493),
("prefix/extra-file", EntryType::Regular, 21, 420),
(
"prefix/extra-exe",
EntryType::Regular,
0,
if cfg!(windows) { 420 } else { 493 }
),
("prefix/extra-dir-empty", EntryType::Directory, 0, 420),
("prefix/extra-dir/symlink-to-extra", EntryType::Symlink, 0, 420)
]
.into_iter()
.map(|(path, b, c, d)| (bstr::BStr::new(path).to_owned(), b, c, d))
.collect::<Vec<_>>()
);
Ok(())
})
}
#[test]
#[cfg(feature = "tar_gz")]
fn basic_usage_tar_gz() -> gix_testtools::Result {
basic_usage(
gix_archive::Format::TarGz {
compression_level: Some(9),
},
|buf| {
assert!(
buf.len() < 340,
"quite a bit smaller than uncompressed: {} < 340",
buf.len()
);
Ok(())
},
)
}
#[test]
#[cfg(feature = "zip")]
fn basic_usage_zip() -> gix_testtools::Result {
basic_usage(
gix_archive::Format::Zip {
compression_level: Some(9),
},
|buf| {
assert!(
buf.len() < 1400,
"much bigger than uncompressed for some reason (565): {} < 1400",
buf.len()
);
let ar = rawzip::ZipArchive::from_slice(buf.as_slice())?;
assert_eq!(
{
let mut n: Vec<_> = Vec::new();
for entry_result in ar.entries() {
let entry = entry_result?;
n.push(String::from_utf8_lossy(entry.file_path().as_ref()).to_string());
}
n.sort();
n
},
&[
"prefix/.gitattributes",
"prefix/a",
"prefix/dir/b",
"prefix/dir/subdir/exe",
"prefix/extra-dir-empty/",
"prefix/extra-dir/symlink-to-extra",
"prefix/extra-exe",
"prefix/extra-file",
"prefix/symlink-to-a"
]
);
// assertions for the symlink entry.
let ar = rawzip::ZipArchive::from_slice(buf.as_slice())?;
let mut found_link = false;
for entry_result in ar.entries() {
let entry = entry_result?;
if String::from_utf8_lossy(entry.file_path().as_ref()) == "prefix/symlink-to-a" {
let mode = entry.mode();
assert!(mode.is_symlink(), "symlinks are supported as well, but only on Unix");
assert_eq!(mode.value(), 0o120644, "the mode specifies what it should be");
let wayfinder = entry.wayfinder();
let zip_entry = ar.get_entry(wayfinder)?;
let data = zip_entry.data();
assert_eq!(
data.as_bstr(),
"a",
"For symlinks stored with Store compression, the data is uncompressed"
);
found_link = true;
break;
}
}
assert!(found_link, "symlink entry should be found");
Ok(())
},
)
}
fn basic_usage(
format: gix_archive::Format,
make_assertion: impl FnOnce(Vec<u8>) -> gix_testtools::Result,
) -> gix_testtools::Result {
let (dir, head_tree, odb, mut cache) = basic()?;
let mut stream = gix_worktree_stream::from_tree(
head_tree,
odb.clone(),
noop_pipeline(),
move |rela_path, mode, attrs| {
cache
.at_entry(rela_path, Some(mode.into()), &odb)
.map(|entry| entry.matching_attributes(attrs))
.map(|_| ())
},
);
stream
.add_entry_from_path(&dir, &dir.join("extra-file"))?
.add_entry_from_path(&dir, &dir.join("extra-exe"))?
.add_entry_from_path(&dir, &dir.join("extra-dir-empty"))?
.add_entry_from_path(&dir, &dir.join("extra-dir").join("symlink-to-extra"))?;
let mut buf = Vec::new();
if format == Format::InternalTransientNonPersistable {
std::io::copy(&mut stream.into_read(), &mut buf)?;
} else {
if matches!(format, Format::Zip { .. }) {
gix_archive::write_stream_seek(
&mut stream,
|s| s.next_entry().or_erased(),
std::io::Cursor::new(&mut buf),
gix_archive::Options {
format,
tree_prefix: Some("prefix/".into()),
modification_time: 1820000000, // needs to be within a certain bound to be a valid MSDos time!
},
)
.map_err(Exn::into_error)?;
} else {
gix_archive::write_stream(
&mut stream,
|s| s.next_entry().or_erased(),
&mut buf,
gix_archive::Options {
format,
tree_prefix: Some("prefix/".into()),
modification_time: 120,
},
)
.map_err(Exn::into_error)?;
}
assert!(
stream.next_entry().map_err(Exn::into_error)?.is_none(),
"stream is exhausted, all written to buf"
);
}
make_assertion(buf).expect("all tests pass");
Ok(())
}
fn basic() -> gix_testtools::Result<(PathBuf, gix_hash::ObjectId, gix_odb::HandleArc, gix_worktree::Stack)> {
let dir = gix_testtools::scripted_fixture_read_only("basic.sh")?;
let head = {
let hex = std::fs::read(dir.join("head.hex"))?;
gix_hash::ObjectId::from_hex(hex.trim())?
};
let odb = gix_odb::at(dir.join(".git").join("objects"))?;
let mut collection = Default::default();
let mut buf = Default::default();
let attributes = gix_worktree::stack::state::Attributes::new(
gix_attributes::Search::new_globals(None::<PathBuf>, &mut buf, &mut collection)?,
None,
Source::WorktreeThenIdMapping,
collection,
);
let state = gix_worktree::stack::State::AttributesStack(attributes);
let cache = gix_worktree::Stack::new(&dir, state, Case::Sensitive, Default::default(), Default::default());
Ok((dir, head, odb.into_arc()?, cache))
}
fn noop_pipeline() -> gix_filter::Pipeline {
gix_filter::Pipeline::new(Default::default(), Default::default())
}
}

View File

@@ -0,0 +1,31 @@
#!/usr/bin/env bash
set -eu -o pipefail
git init
mkdir dir-ignored
touch dir-ignored/file-ignored-transitively
touch file-ignored
echo "hi" > a
mkdir dir
echo "ho" > dir/b
mkdir dir/subdir
touch dir/subdir/exe
chmod +x dir/subdir/exe
ln -s a symlink-to-a
echo "/dir-ignored/ export-ignore" > .gitattributes
echo "/file-ignored export-ignore" >> .gitattributes
git add .
git update-index --chmod=+x dir/subdir/exe # For Windows.
git commit -m "init"
echo "extra to be streamed" > extra-file
touch extra-exe && chmod +x extra-exe
mkdir extra-dir-empty extra-dir
ln -s ../extra-file extra-dir/symlink-to-extra
git rev-parse @^{tree} > head.hex

View File

@@ -0,0 +1 @@
basic.tar