mirror of
https://github.com/ouch-org/ouch.git
synced 2025-07-18 23:50:35 +00:00
feat: add decompressing support for squashfs
This commit is contained in:
parent
bcdff0f46b
commit
87cf8529f2
@ -1,8 +1,19 @@
|
||||
use std::path::Path;
|
||||
use std::{
|
||||
fs,
|
||||
io::{self, BufWriter, Write},
|
||||
path::Path,
|
||||
};
|
||||
|
||||
use backhand::{FilesystemReader, InnerNode};
|
||||
use backhand::{FilesystemReader, InnerNode, SquashfsFileReader};
|
||||
use filetime_creation::{set_file_handle_times, set_file_mtime, FileTime};
|
||||
|
||||
use crate::list::FileInArchive;
|
||||
use crate::{
|
||||
list::FileInArchive,
|
||||
utils::{
|
||||
logger::{info, warning},
|
||||
Bytes,
|
||||
},
|
||||
};
|
||||
|
||||
pub fn list_archive<'a>(archive: FilesystemReader<'a>) -> impl Iterator<Item = crate::Result<FileInArchive>> + 'a {
|
||||
archive.root.nodes.into_iter().filter_map(move |f| {
|
||||
@ -21,3 +32,102 @@ pub fn list_archive<'a>(archive: FilesystemReader<'a>) -> impl Iterator<Item = c
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn unpack_archive(archive: FilesystemReader<'_>, output_folder: &Path, quiet: bool) -> crate::Result<usize> {
|
||||
let mut unpacked_files = 0usize;
|
||||
|
||||
for f in archive.files() {
|
||||
// `output_folder` should already be created.
|
||||
if f.fullpath == Path::new("/") {
|
||||
continue;
|
||||
}
|
||||
|
||||
let relative_path = f.fullpath.strip_prefix("/").expect("paths must be absolute");
|
||||
let file_path = output_folder.join(relative_path);
|
||||
|
||||
let mtime = FileTime::from_unix_time(f.header.mtime.into(), 0);
|
||||
|
||||
let warn_ignored = |inode_type: &str| {
|
||||
warning(format!("ignored {inode_type} in archive {relative_path:?}"));
|
||||
};
|
||||
|
||||
match &f.inner {
|
||||
InnerNode::Dir(_) => {
|
||||
if !quiet {
|
||||
info(format!("extracting directory {file_path:?}"));
|
||||
}
|
||||
fs::create_dir(&file_path)?;
|
||||
// Directory mtime is not recovered. It will be overwritten by
|
||||
// the creation of inner files. We would need a second pass to do so.
|
||||
}
|
||||
InnerNode::File(file) => {
|
||||
if !quiet {
|
||||
let file_size = Bytes::new(match file {
|
||||
SquashfsFileReader::Basic(f) => f.file_size.into(),
|
||||
SquashfsFileReader::Extended(f) => f.file_size,
|
||||
});
|
||||
info(format!("extracting file ({file_size}) {file_path:?}"));
|
||||
}
|
||||
|
||||
let mut reader = archive.file(file).reader();
|
||||
let output_file = fs::File::create(&file_path)?;
|
||||
let mut output_file = BufWriter::new(output_file);
|
||||
io::copy(&mut reader, &mut output_file)?;
|
||||
output_file.flush()?;
|
||||
set_file_handle_times(output_file.get_ref(), None, Some(mtime), None)?;
|
||||
}
|
||||
InnerNode::Symlink(symlink) => {
|
||||
if !quiet {
|
||||
info(format!("extracting symlink {file_path:?}"));
|
||||
}
|
||||
|
||||
let target = &symlink.link;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
std::os::unix::fs::symlink(&target, &file_path)?;
|
||||
filetime_creation::set_symlink_file_times(&file_path, mtime, mtime, mtime)?;
|
||||
// Note: Symlink permissions are ignored on *NIX anyway. No need to set them.
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
std::os::windows::fs::symlink_file(&target, &file_path)?;
|
||||
|
||||
// Symlink mtime is specially handled above. Skip the normal handler.
|
||||
unpacked_files += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// TODO: Named pipes and sockets *CAN* be created by unprivileged users.
|
||||
// Should we extract them by default?
|
||||
InnerNode::NamedPipe => {
|
||||
warn_ignored("named pipe");
|
||||
continue;
|
||||
}
|
||||
InnerNode::Socket => {
|
||||
warn_ignored("socket");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Not possible without root permission.
|
||||
InnerNode::CharacterDevice(_) => {
|
||||
warn_ignored("character device");
|
||||
continue;
|
||||
}
|
||||
InnerNode::BlockDevice(_) => {
|
||||
warn_ignored("block device");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
|
||||
fs::set_permissions(&file_path, fs::Permissions::from_mode(f.header.permissions.into()))?;
|
||||
}
|
||||
|
||||
unpacked_files += 1;
|
||||
}
|
||||
|
||||
Ok(unpacked_files)
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ use std::{
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use backhand::BufReadSeek;
|
||||
use fs_err as fs;
|
||||
|
||||
#[cfg(not(feature = "bzip3"))]
|
||||
@ -25,9 +26,6 @@ use crate::{
|
||||
QuestionAction, QuestionPolicy, BUFFER_CAPACITY,
|
||||
};
|
||||
|
||||
trait ReadSeek: Read + io::Seek {}
|
||||
impl<T: Read + io::Seek> ReadSeek for T {}
|
||||
|
||||
pub struct DecompressOptions<'a> {
|
||||
pub input_file_path: &'a Path,
|
||||
pub formats: Vec<Extension>,
|
||||
@ -59,21 +57,32 @@ pub fn decompress_file(options: DecompressOptions) -> crate::Result<()> {
|
||||
//
|
||||
// Any other Zip decompression done can take up the whole RAM and freeze ouch.
|
||||
if let [Extension {
|
||||
compression_formats: [Zip],
|
||||
compression_formats: [archive_format @ (Zip | Squashfs)],
|
||||
..
|
||||
}] = options.formats.as_slice()
|
||||
{
|
||||
let is_zip = matches!(archive_format, Zip);
|
||||
|
||||
let mut vec = vec![];
|
||||
let reader: Box<dyn ReadSeek> = if input_is_stdin {
|
||||
warn_user_about_loading_in_memory(".zip");
|
||||
let reader: Box<dyn BufReadSeek> = if input_is_stdin {
|
||||
warn_user_about_loading_in_memory(if is_zip { ".zip" } else { ".sqfs" });
|
||||
io::copy(&mut io::stdin(), &mut vec)?;
|
||||
Box::new(io::Cursor::new(vec))
|
||||
} else {
|
||||
Box::new(fs::File::open(options.input_file_path)?)
|
||||
let file = fs::File::open(options.input_file_path)?;
|
||||
let file = BufReader::new(file);
|
||||
Box::new(file)
|
||||
};
|
||||
let zip_archive = zip::ZipArchive::new(reader)?;
|
||||
let files_unpacked = if let ControlFlow::Continue(files) = execute_decompression(
|
||||
|output_dir| crate::archive::zip::unpack_archive(zip_archive, output_dir, options.password, options.quiet),
|
||||
|output_dir| {
|
||||
if is_zip {
|
||||
let zip_archive = zip::ZipArchive::new(reader)?;
|
||||
crate::archive::zip::unpack_archive(zip_archive, output_dir, options.password, options.quiet)
|
||||
} else {
|
||||
let archive = backhand::FilesystemReader::from_reader(reader)?;
|
||||
crate::archive::squashfs::unpack_archive(archive, output_dir, options.quiet)
|
||||
}
|
||||
},
|
||||
options.output_dir,
|
||||
&options.output_file_path,
|
||||
options.question_policy,
|
||||
@ -174,14 +183,15 @@ pub fn decompress_file(options: DecompressOptions) -> crate::Result<()> {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Squashfs => todo!(),
|
||||
Zip => {
|
||||
Zip | Squashfs => {
|
||||
let is_zip = matches!(first_extension, Zip);
|
||||
|
||||
if options.formats.len() > 1 {
|
||||
// Locking necessary to guarantee that warning and question
|
||||
// messages stay adjacent
|
||||
let _locks = lock_and_flush_output_stdio();
|
||||
|
||||
warn_user_about_loading_in_memory(".zip");
|
||||
warn_user_about_loading_in_memory(if is_zip { ".zip" } else { ".sqfs" });
|
||||
if !user_wants_to_continue(
|
||||
options.input_file_path,
|
||||
options.question_policy,
|
||||
@ -193,11 +203,17 @@ pub fn decompress_file(options: DecompressOptions) -> crate::Result<()> {
|
||||
|
||||
let mut vec = vec![];
|
||||
io::copy(&mut reader, &mut vec)?;
|
||||
let zip_archive = zip::ZipArchive::new(io::Cursor::new(vec))?;
|
||||
let reader = io::Cursor::new(vec);
|
||||
|
||||
if let ControlFlow::Continue(files) = execute_decompression(
|
||||
|output_dir| {
|
||||
crate::archive::zip::unpack_archive(zip_archive, output_dir, options.password, options.quiet)
|
||||
move |output_dir| {
|
||||
if is_zip {
|
||||
let archive = zip::ZipArchive::new(reader)?;
|
||||
crate::archive::zip::unpack_archive(archive, output_dir, options.password, options.quiet)
|
||||
} else {
|
||||
let archive = backhand::FilesystemReader::from_reader(reader)?;
|
||||
crate::archive::squashfs::unpack_archive(archive, output_dir, options.quiet)
|
||||
}
|
||||
},
|
||||
options.output_dir,
|
||||
&options.output_file_path,
|
||||
|
Loading…
x
Reference in New Issue
Block a user