X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/917832191acb189705762e13aab3b7c5e6e26653..4dc77823d360158d6870a5710cc8c17064f1308f:/apt-inst/contrib/extracttar.cc?ds=sidebyside diff --git a/apt-inst/contrib/extracttar.cc b/apt-inst/contrib/extracttar.cc index 68c871a5d..2c86d0d01 100644 --- a/apt-inst/contrib/extracttar.cc +++ b/apt-inst/contrib/extracttar.cc @@ -6,7 +6,7 @@ Extract a Tar - Tar Extractor Some performance measurements showed that zlib performed quite poorly - in comparision to a forked gzip process. This tar extractor makes use + in comparison to a forked gzip process. This tar extractor makes use of the fact that dup'd file descriptors have the same seek pointer and that gzip will not read past the end of a compressed stream, even if there is more data. We use the dup property to track extraction @@ -16,18 +16,23 @@ ##################################################################### */ /*}}}*/ // Include Files /*{{{*/ -#include +#include +#include +#include #include #include #include -#include +#include -#include +#include +#include +#include #include #include #include #include + #include /*}}}*/ @@ -55,12 +60,10 @@ struct ExtractTar::TarHeader // ExtractTar::ExtractTar - Constructor /*{{{*/ // --------------------------------------------------------------------- /* */ -ExtractTar::ExtractTar(FileFd &Fd,unsigned long Max,string DecompressionProgram) : File(Fd), - MaxInSize(Max), DecompressProg(DecompressionProgram) - +ExtractTar::ExtractTar(FileFd &Fd,unsigned long long Max,string DecompressionProgram) + : File(Fd), MaxInSize(Max), DecompressProg(DecompressionProgram) { GZPid = -1; - InFd = -1; Eof = false; } /*}}}*/ @@ -109,10 +112,16 @@ bool ExtractTar::Done(bool Force) gzip will efficiently ignore the extra bits. */ bool ExtractTar::StartGzip() { + if (DecompressProg.empty()) + { + InFd.OpenDescriptor(File.Fd(), FileFd::ReadOnly, FileFd::None, false); + return true; + } + int Pipes[2]; if (pipe(Pipes) != 0) return _error->Errno("pipe",_("Failed to create pipes")); - + // Fork off the process GZPid = ExecFork(); @@ -128,9 +137,9 @@ bool ExtractTar::StartGzip() dup2(Fd,STDERR_FILENO); close(Fd); SetCloseExec(STDOUT_FILENO,false); - SetCloseExec(STDIN_FILENO,false); + SetCloseExec(STDIN_FILENO,false); SetCloseExec(STDERR_FILENO,false); - + const char *Args[3]; string confvar = string("dir::bin::") + DecompressProg; string argv0 = _config->Find(confvar.c_str(),DecompressProg.c_str()); @@ -143,7 +152,7 @@ bool ExtractTar::StartGzip() } // Fix up our FDs - InFd.Fd(Pipes[0]); + InFd.OpenDescriptor(Pipes[0], FileFd::ReadOnly, FileFd::None, true); close(Pipes[1]); return true; } @@ -159,8 +168,8 @@ bool ExtractTar::Go(pkgDirStream &Stream) return false; // Loop over all blocks - string LastLongLink; - string LastLongName; + string LastLongLink, ItemLink; + string LastLongName, ItemName; while (1) { bool BadRecord = false; @@ -195,32 +204,34 @@ bool ExtractTar::Go(pkgDirStream &Stream) // Decode all of the fields pkgDirStream::Item Itm; if (StrToNum(Tar->Mode,Itm.Mode,sizeof(Tar->Mode),8) == false || - StrToNum(Tar->UserID,Itm.UID,sizeof(Tar->UserID),8) == false || - StrToNum(Tar->GroupID,Itm.GID,sizeof(Tar->GroupID),8) == false || - StrToNum(Tar->Size,Itm.Size,sizeof(Tar->Size),8) == false || - StrToNum(Tar->MTime,Itm.MTime,sizeof(Tar->MTime),8) == false || + (Base256ToNum(Tar->UserID,Itm.UID,8) == false && + StrToNum(Tar->UserID,Itm.UID,sizeof(Tar->UserID),8) == false) || + (Base256ToNum(Tar->GroupID,Itm.GID,8) == false && + StrToNum(Tar->GroupID,Itm.GID,sizeof(Tar->GroupID),8) == false) || + (Base256ToNum(Tar->Size,Itm.Size,12) == false && + StrToNum(Tar->Size,Itm.Size,sizeof(Tar->Size),8) == false) || + (Base256ToNum(Tar->MTime,Itm.MTime,12) == false && + StrToNum(Tar->MTime,Itm.MTime,sizeof(Tar->MTime),8) == false) || StrToNum(Tar->Major,Itm.Major,sizeof(Tar->Major),8) == false || StrToNum(Tar->Minor,Itm.Minor,sizeof(Tar->Minor),8) == false) return _error->Error(_("Corrupted archive")); - - // Grab the filename + + // Grab the filename and link target: use last long name if one was + // set, otherwise use the header value as-is, but remember that it may + // fill the entire 100-byte block and needs to be zero-terminated. + // See Debian Bug #689582. if (LastLongName.empty() == false) Itm.Name = (char *)LastLongName.c_str(); else - { - Tar->Name[sizeof(Tar->Name)] = 0; - Itm.Name = Tar->Name; - } + Itm.Name = (char *)ItemName.assign(Tar->Name, sizeof(Tar->Name)).c_str(); if (Itm.Name[0] == '.' && Itm.Name[1] == '/' && Itm.Name[2] != 0) Itm.Name += 2; - - // Grab the link target - Tar->Name[sizeof(Tar->LinkName)] = 0; - Itm.LinkTarget = Tar->LinkName; if (LastLongLink.empty() == false) Itm.LinkTarget = (char *)LastLongLink.c_str(); - + else + Itm.LinkTarget = (char *)ItemLink.assign(Tar->LinkName, sizeof(Tar->LinkName)).c_str(); + // Convert the type over switch (Tar->LinkFlag) { @@ -255,7 +266,7 @@ bool ExtractTar::Go(pkgDirStream &Stream) case GNU_LongLink: { - unsigned long Length = Itm.Size; + unsigned long long Length = Itm.Size; unsigned char Block[512]; while (Length > 0) { @@ -274,7 +285,7 @@ bool ExtractTar::Go(pkgDirStream &Stream) case GNU_LongName: { - unsigned long Length = Itm.Size; + unsigned long long Length = Itm.Size; unsigned char Block[512]; while (Length > 0) { @@ -303,11 +314,11 @@ bool ExtractTar::Go(pkgDirStream &Stream) return false; // Copy the file over the FD - unsigned long Size = Itm.Size; + unsigned long long Size = Itm.Size; while (Size != 0) { unsigned char Junk[32*1024]; - unsigned long Read = min(Size,(unsigned long)sizeof(Junk)); + unsigned long Read = min(Size, (unsigned long long)sizeof(Junk)); if (InFd.Read(Junk,((Read+511)/512)*512) == false) return false; @@ -332,7 +343,7 @@ bool ExtractTar::Go(pkgDirStream &Stream) } // And finish up - if (Itm.Size >= 0 && BadRecord == false) + if (BadRecord == false) if (Stream.FinishedFile(Itm,Fd) == false) return false;