#include <signal.h>
#include <errno.h>
#include <glob.h>
+#include <pwd.h>
+#include <grp.h>
#include <set>
#include <algorithm>
#include <endian.h>
#include <stdint.h>
+#if __gnu_linux__
+#include <sys/prctl.h>
+#endif
+
#include <apti18n.h>
/*}}}*/
return Dir + '/' + File;
}
/*}}}*/
+// flAbsPath - Return the absolute path of the filename /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+string flAbsPath(string File)
+{
+ char *p = realpath(File.c_str(), NULL);
+ if (p == NULL)
+ {
+ _error->Errno("realpath", "flAbsPath failed");
+ return "";
+ }
+ std::string AbsPath(p);
+ free(p);
+ return AbsPath;
+}
+ /*}}}*/
// SetCloseExec - Set the close on exec flag /*{{{*/
// ---------------------------------------------------------------------
/* */
signal(SIGCONT,SIG_DFL);
signal(SIGTSTP,SIG_DFL);
+ long ScOpenMax = sysconf(_SC_OPEN_MAX);
// Close all of our FDs - just in case
- for (int K = 3; K != sysconf(_SC_OPEN_MAX); K++)
+ for (int K = 3; K != ScOpenMax; K++)
{
if(KeepFDs.find(K) == KeepFDs.end())
fcntl(K,F_SETFD,FD_CLOEXEC);
return true;
}
/*}}}*/
+// StartsWithGPGClearTextSignature - Check if a file is Pgp/GPG clearsigned /*{{{*/
+bool StartsWithGPGClearTextSignature(string const &FileName)
+{
+ static const char* SIGMSG = "-----BEGIN PGP SIGNED MESSAGE-----\n";
+ char buffer[strlen(SIGMSG)+1];
+ FILE* gpg = fopen(FileName.c_str(), "r");
+ if (gpg == NULL)
+ return false;
+
+ char const * const test = fgets(buffer, sizeof(buffer), gpg);
+ fclose(gpg);
+ if (test == NULL || strcmp(buffer, SIGMSG) != 0)
+ return false;
+
+ return true;
+}
+ /*}}}*/
+// ChangeOwnerAndPermissionOfFile - set file attributes to requested values /*{{{*/
+bool ChangeOwnerAndPermissionOfFile(char const * const requester, char const * const file, char const * const user, char const * const group, mode_t const mode)
+{
+ if (strcmp(file, "/dev/null") == 0)
+ return true;
+ bool Res = true;
+ if (getuid() == 0 && strlen(user) != 0 && strlen(group) != 0) // if we aren't root, we can't chown, so don't try it
+ {
+ // ensure the file is owned by root and has good permissions
+ struct passwd const * const pw = getpwnam(user);
+ struct group const * const gr = getgrnam(group);
+ if (pw != NULL && gr != NULL && chown(file, pw->pw_uid, gr->gr_gid) != 0)
+ Res &= _error->WarningE(requester, "chown to %s:%s of file %s failed", user, group, file);
+ }
+ if (chmod(file, mode) != 0)
+ Res &= _error->WarningE(requester, "chmod 0%o of file %s failed", mode, file);
+ return Res;
+}
+ /*}}}*/
class FileFdPrivate { /*{{{*/
public:
bool eof;
bool compressing;
- LZMAFILE() : file(NULL), eof(false), compressing(false) {}
+ LZMAFILE() : file(NULL), eof(false), compressing(false) { buffer[0] = '\0'; }
~LZMAFILE() {
if (compressing == true)
{
int err;
char const * const errmsg = BZ2_bzerror(d->bz2, &err);
if (err != BZ_IO_ERROR)
- return FileFdError("BZ2_bzread: %s (%d: %s)", _("Read error"), err, errmsg);
+ return FileFdError("BZ2_bzread: %s %s (%d: %s)", FileName.c_str(), _("Read error"), err, errmsg);
}
#endif
#ifdef HAVE_LZMA
{
if ((Flags & Compressed) != Compressed && iFd > 0 && close(iFd) != 0)
Res &= _error->Errno("close",_("Problem closing the file %s"), FileName.c_str());
-
if (d != NULL)
{
Res &= d->CloseDown(FileName);
#endif
}
-
-// Glob - wrapper around "glob()" /*{{{*/
-// ---------------------------------------------------------------------
-/* */
+// Glob - wrapper around "glob()" /*{{{*/
std::vector<std::string> Glob(std::string const &pattern, int flags)
{
std::vector<std::string> result;
return result;
}
/*}}}*/
-
-std::string GetTempDir()
+std::string GetTempDir() /*{{{*/
{
const char *tmpdir = getenv("TMPDIR");
tmpdir = P_tmpdir;
#endif
- // check that tmpdir is set and exists
struct stat st;
- if (!tmpdir || strlen(tmpdir) == 0 || stat(tmpdir, &st) != 0)
+ if (!tmpdir || strlen(tmpdir) == 0 || // tmpdir is set
+ stat(tmpdir, &st) != 0 || (st.st_mode & S_IFDIR) == 0 || // exists and is directory
+ access(tmpdir, R_OK | W_OK | X_OK) != 0 // current user has rwx access to directory
+ )
tmpdir = "/tmp";
return string(tmpdir);
}
+ /*}}}*/
+FileFd* GetTempFile(std::string const &Prefix, bool ImmediateUnlink) /*{{{*/
+{
+ char fn[512];
+ FileFd *Fd = new FileFd();
+
+ std::string tempdir = GetTempDir();
+ snprintf(fn, sizeof(fn), "%s/%s.XXXXXX",
+ tempdir.c_str(), Prefix.c_str());
+ int fd = mkstemp(fn);
+ if(ImmediateUnlink)
+ unlink(fn);
+ if (fd < 0)
+ {
+ _error->Errno("GetTempFile",_("Unable to mkstemp %s"), fn);
+ return NULL;
+ }
+ if (!Fd->OpenDescriptor(fd, FileFd::WriteOnly, FileFd::None, true))
+ {
+ _error->Errno("GetTempFile",_("Unable to write to %s"),fn);
+ return NULL;
+ }
-bool Rename(std::string From, std::string To)
+ return Fd;
+}
+ /*}}}*/
+bool Rename(std::string From, std::string To) /*{{{*/
{
if (rename(From.c_str(),To.c_str()) != 0)
{
_error->Error(_("rename failed, %s (%s -> %s)."),strerror(errno),
From.c_str(),To.c_str());
return false;
- }
+ }
return true;
}
+ /*}}}*/
+bool Popen(const char* Args[], FileFd &Fd, pid_t &Child, FileFd::OpenMode Mode)/*{{{*/
+{
+ int fd;
+ if (Mode != FileFd::ReadOnly && Mode != FileFd::WriteOnly)
+ return _error->Error("Popen supports ReadOnly (x)or WriteOnly mode only");
+
+ int Pipe[2] = {-1, -1};
+ if(pipe(Pipe) != 0)
+ return _error->Errno("pipe", _("Failed to create subprocess IPC"));
+
+ std::set<int> keep_fds;
+ keep_fds.insert(Pipe[0]);
+ keep_fds.insert(Pipe[1]);
+ Child = ExecFork(keep_fds);
+ if(Child < 0)
+ return _error->Errno("fork", "Failed to fork");
+ if(Child == 0)
+ {
+ if(Mode == FileFd::ReadOnly)
+ {
+ close(Pipe[0]);
+ fd = Pipe[1];
+ }
+ else if(Mode == FileFd::WriteOnly)
+ {
+ close(Pipe[1]);
+ fd = Pipe[0];
+ }
+
+ if(Mode == FileFd::ReadOnly)
+ {
+ dup2(fd, 1);
+ dup2(fd, 2);
+ } else if(Mode == FileFd::WriteOnly)
+ dup2(fd, 0);
+
+ execv(Args[0], (char**)Args);
+ _exit(100);
+ }
+ if(Mode == FileFd::ReadOnly)
+ {
+ close(Pipe[1]);
+ fd = Pipe[0];
+ } else if(Mode == FileFd::WriteOnly)
+ {
+ close(Pipe[0]);
+ fd = Pipe[1];
+ }
+ Fd.OpenDescriptor(fd, Mode, FileFd::None, true);
+
+ return true;
+}
+ /*}}}*/
+bool DropPrivileges() /*{{{*/
+{
+ if(_config->FindB("Debug::NoDropPrivs", false) == true)
+ return true;
+
+#if __gnu_linux__
+#if defined(PR_SET_NO_NEW_PRIVS) && ( PR_SET_NO_NEW_PRIVS != 38 )
+#error "PR_SET_NO_NEW_PRIVS is defined, but with a different value than expected!"
+#endif
+ // see prctl(2), needs linux3.5 at runtime - magic constant to avoid it at buildtime
+ int ret = prctl(38, 1, 0, 0, 0);
+ // ignore EINVAL - kernel is too old to understand the option
+ if(ret < 0 && errno != EINVAL)
+ _error->Warning("PR_SET_NO_NEW_PRIVS failed with %i", ret);
+#endif
+
+ // empty setting disables privilege dropping - this also ensures
+ // backward compatibility, see bug #764506
+ const std::string toUser = _config->Find("APT::Sandbox::User");
+ if (toUser.empty())
+ return true;
+
+ // uid will be 0 in the end, but gid might be different anyway
+ uid_t const old_uid = getuid();
+ gid_t const old_gid = getgid();
+
+ if (old_uid != 0)
+ return true;
+
+ struct passwd *pw = getpwnam(toUser.c_str());
+ if (pw == NULL)
+ return _error->Error("No user %s, can not drop rights", toUser.c_str());
+
+ // Do not change the order here, it might break things
+ if (setgroups(1, &pw->pw_gid))
+ return _error->Errno("setgroups", "Failed to setgroups");
+
+ if (setegid(pw->pw_gid) != 0)
+ return _error->Errno("setegid", "Failed to setegid");
+
+ if (setgid(pw->pw_gid) != 0)
+ return _error->Errno("setgid", "Failed to setgid");
+
+ if (setuid(pw->pw_uid) != 0)
+ return _error->Errno("setuid", "Failed to setuid");
+
+ // the seteuid() is probably uneeded (at least thats what the linux
+ // man-page says about setuid(2)) but we cargo culted it anyway
+ if (seteuid(pw->pw_uid) != 0)
+ return _error->Errno("seteuid", "Failed to seteuid");
+
+ // Verify that the user has only a single group, and the correct one
+ gid_t groups[1];
+ if (getgroups(1, groups) != 1)
+ return _error->Errno("getgroups", "Could not get new groups");
+ if (groups[0] != pw->pw_gid)
+ return _error->Error("Could not switch group");
+
+ // Verify that gid, egid, uid, and euid changed
+ if (getgid() != pw->pw_gid)
+ return _error->Error("Could not switch group");
+ if (getegid() != pw->pw_gid)
+ return _error->Error("Could not switch effective group");
+ if (getuid() != pw->pw_uid)
+ return _error->Error("Could not switch user");
+ if (geteuid() != pw->pw_uid)
+ return _error->Error("Could not switch effective user");
+
+#ifdef HAVE_GETRESUID
+ // verify that the saved set-user-id was changed as well
+ uid_t ruid = 0;
+ uid_t euid = 0;
+ uid_t suid = 0;
+ if (getresuid(&ruid, &euid, &suid))
+ return _error->Errno("getresuid", "Could not get saved set-user-ID");
+ if (suid != pw->pw_uid)
+ return _error->Error("Could not switch saved set-user-ID");
+#endif
+
+#ifdef HAVE_GETRESGID
+ // verify that the saved set-group-id was changed as well
+ gid_t rgid = 0;
+ gid_t egid = 0;
+ gid_t sgid = 0;
+ if (getresgid(&rgid, &egid, &sgid))
+ return _error->Errno("getresuid", "Could not get saved set-group-ID");
+ if (sgid != pw->pw_gid)
+ return _error->Error("Could not switch saved set-group-ID");
+#endif
+
+ // Check that uid and gid changes do not work anymore
+ if (pw->pw_gid != old_gid && (setgid(old_gid) != -1 || setegid(old_gid) != -1))
+ return _error->Error("Could restore a gid to root, privilege dropping did not work");
+
+ if (pw->pw_uid != old_uid && (setuid(old_uid) != -1 || seteuid(old_uid) != -1))
+ return _error->Error("Could restore a uid to root, privilege dropping did not work");
+
+ return true;
+}
+ /*}}}*/
/* Whenever the structures change the major version should be bumped,
whenever the generator changes the minor version should be bumped. */
- MajorVersion = 8;
-#if (APT_PKG_MAJOR >= 4 && APT_PKG_MINOR >= 13)
- MinorVersion = 2;
-#else
- MinorVersion = 1;
-#endif
+ MajorVersion = 10;
+ MinorVersion = 0;
Dirty = false;
HeaderSz = sizeof(pkgCache::Header);
MaxDescFileSize = 0;
FileList = 0;
- StringList = 0;
+#if APT_PKG_ABI < 413
+ APT_IGNORE_DEPRECATED(StringList = 0;)
+#endif
VerSysName = 0;
Architecture = 0;
- memset(PkgHashTable,0,sizeof(PkgHashTable));
- memset(GrpHashTable,0,sizeof(GrpHashTable));
+ SetArchitectures(0);
+ SetHashTableSize(_config->FindI("APT::Cache-HashTableSize", 10 * 1048));
memset(Pools,0,sizeof(Pools));
CacheFileSize = 0;
// Cache::pkgCache - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
+APT_IGNORE_DEPRECATED_PUSH
pkgCache::pkgCache(MMap *Map, bool DoMap) : Map(*Map)
{
// call getArchitectures() with cached=false to ensure that the
if (DoMap == true)
ReMap();
}
+APT_IGNORE_DEPRECATED_POP
/*}}}*/
// Cache::ReMap - Reopen the cache file /*{{{*/
// ---------------------------------------------------------------------
DescP = (Description *)Map.Data();
ProvideP = (Provides *)Map.Data();
DepP = (Dependency *)Map.Data();
- StringItemP = (StringItem *)Map.Data();
StrP = (char *)Map.Data();
if (Errorchecks == false)
if (Map.Size() < HeaderP->CacheFileSize)
return _error->Error(_("The package cache file is corrupted, it is too small"));
+ if (HeaderP->VerSysName == 0 || HeaderP->Architecture == 0 || HeaderP->GetArchitectures() == 0)
+ return _error->Error(_("The package cache file is corrupted"));
+
// Locate our VS..
- if (HeaderP->VerSysName == 0 ||
- (VS = pkgVersioningSystem::GetVS(StrP + HeaderP->VerSysName)) == 0)
+ if ((VS = pkgVersioningSystem::GetVS(StrP + HeaderP->VerSysName)) == 0)
return _error->Error(_("This APT does not support the versioning system '%s'"),StrP + HeaderP->VerSysName);
- // Chcek the arhcitecture
- if (HeaderP->Architecture == 0 ||
- _config->Find("APT::Architecture") != StrP + HeaderP->Architecture)
- return _error->Error(_("The package cache was built for a different architecture"));
+ // Check the architecture
+ std::vector<std::string> archs = APT::Configuration::getArchitectures();
+ std::vector<std::string>::const_iterator a = archs.begin();
+ std::string list = *a;
+ for (++a; a != archs.end(); ++a)
+ list.append(",").append(*a);
+ if (_config->Find("APT::Architecture") != StrP + HeaderP->Architecture ||
+ list != StrP + HeaderP->GetArchitectures())
+ return _error->Error(_("The package cache was built for different architectures: %s vs %s"), StrP + HeaderP->GetArchitectures(), list.c_str());
+
return true;
}
/*}}}*/
/* This is used to generate the hash entries for the HashTable. With my
package list from bo this function gets 94% table usage on a 512 item
table (480 used items) */
-unsigned long pkgCache::sHash(const string &Str) const
+map_id_t pkgCache::sHash(const string &Str) const
{
unsigned long Hash = 0;
for (string::const_iterator I = Str.begin(); I != Str.end(); ++I)
Hash = 41 * Hash + tolower_ascii(*I);
- return Hash % _count(HeaderP->PkgHashTable);
+ return Hash % HeaderP->GetHashTableSize();
}
-unsigned long pkgCache::sHash(const char *Str) const
+map_id_t pkgCache::sHash(const char *Str) const
{
unsigned long Hash = tolower_ascii(*Str);
for (const char *I = Str + 1; *I != 0; ++I)
Hash = 41 * Hash + tolower_ascii(*I);
- return Hash % _count(HeaderP->PkgHashTable);
+ return Hash % HeaderP->GetHashTableSize();
}
/*}}}*/
// Cache::SingleArchFindPkg - Locate a package by name /*{{{*/
pkgCache::PkgIterator pkgCache::SingleArchFindPkg(const string &Name)
{
// Look at the hash bucket
- Package *Pkg = PkgP + HeaderP->PkgHashTable[Hash(Name)];
+ Package *Pkg = PkgP + HeaderP->PkgHashTableP()[Hash(Name)];
for (; Pkg != PkgP; Pkg = PkgP + Pkg->NextPackage)
{
- if (unlikely(Pkg->Name == 0))
- continue;
-
- int const cmp = strcasecmp(Name.c_str(), StrP + Pkg->Name);
+ int const cmp = strcmp(Name.c_str(), StrP + (GrpP + Pkg->Group)->Name);
if (cmp == 0)
return PkgIterator(*this, Pkg);
else if (cmp < 0)
pkgCache::PkgIterator pkgCache::FindPkg(const string &Name) {
size_t const found = Name.find(':');
if (found == string::npos)
- {
- if (MultiArchCache() == false)
- return SingleArchFindPkg(Name);
- else
- return FindPkg(Name, "native");
- }
+ return FindPkg(Name, "native");
string const Arch = Name.substr(found+1);
/* Beware: This is specialcased to handle pkg:any in dependencies as
these are linked to virtual pkg:any named packages with all archs.
// ---------------------------------------------------------------------
/* Returns 0 on error, pointer to the package otherwise */
pkgCache::PkgIterator pkgCache::FindPkg(const string &Name, string const &Arch) {
- if (MultiArchCache() == false && Arch != "none") {
- if (Arch == "native" || Arch == "all" || Arch == "any" ||
- Arch == NativeArch())
- return SingleArchFindPkg(Name);
- else
- return PkgIterator(*this,0);
- }
/* We make a detour via the GrpIterator here as
on a multi-arch environment a group is easier to
find than a package (less entries in the buckets) */
return GrpIterator(*this,0);
// Look at the hash bucket for the group
- Group *Grp = GrpP + HeaderP->GrpHashTable[sHash(Name)];
+ Group *Grp = GrpP + HeaderP->GrpHashTableP()[sHash(Name)];
for (; Grp != GrpP; Grp = GrpP + Grp->Next) {
- if (unlikely(Grp->Name == 0))
- continue;
-
- int const cmp = strcasecmp(Name.c_str(), StrP + Grp->Name);
+ int const cmp = strcmp(Name.c_str(), StrP + Grp->Name);
if (cmp == 0)
return GrpIterator(*this, Grp);
else if (cmp < 0)
last one we check, so we do it now. */
if (Arch == "native" || Arch == myArch || Arch == "all") {
pkgCache::Package *Pkg = Owner->PkgP + S->LastPackage;
- if (strcasecmp(myArch, Owner->StrP + Pkg->Arch) == 0)
+ if (strcmp(myArch, Owner->StrP + Pkg->Arch) == 0)
return PkgIterator(*Owner, Pkg);
Arch = myArch;
}
- /* Iterate over the list to find the matching arch
- unfortunately this list includes "package noise"
- (= different packages with same calculated hash),
- so we need to check the name also */
+ // Iterate over the list to find the matching arch
for (pkgCache::Package *Pkg = PackageList(); Pkg != Owner->PkgP;
Pkg = Owner->PkgP + Pkg->NextPackage) {
- if (S->Name == Pkg->Name &&
- stringcasecmp(Arch, Owner->StrP + Pkg->Arch) == 0)
+ if (stringcmp(Arch, Owner->StrP + Pkg->Arch) == 0)
return PkgIterator(*Owner, Pkg);
if ((Owner->PkgP + S->LastPackage) == Pkg)
break;
S = Owner->GrpP + S->Next;
// Follow the hash table
- while (S == Owner->GrpP && (HashIndex+1) < (signed)_count(Owner->HeaderP->GrpHashTable))
+ while (S == Owner->GrpP && (HashIndex+1) < (signed)Owner->HeaderP->GetHashTableSize())
{
HashIndex++;
- S = Owner->GrpP + Owner->HeaderP->GrpHashTable[HashIndex];
+ S = Owner->GrpP + Owner->HeaderP->GrpHashTableP()[HashIndex];
}
}
/*}}}*/
S = Owner->PkgP + S->NextPackage;
// Follow the hash table
- while (S == Owner->PkgP && (HashIndex+1) < (signed)_count(Owner->HeaderP->PkgHashTable))
+ while (S == Owner->PkgP && (HashIndex+1) < (signed)Owner->HeaderP->GetHashTableSize())
{
HashIndex++;
- S = Owner->PkgP + Owner->HeaderP->PkgHashTable[HashIndex];
+ S = Owner->PkgP + Owner->HeaderP->PkgHashTableP()[HashIndex];
}
}
/*}}}*/
{
pkgCache::PkgIterator const Owner = OwnerPkg();
pkgCache::PkgIterator const Parent = ParentPkg();
- if (strcmp(Owner.Arch(), Parent.Arch()) != 0 || Owner->Name == Parent->Name)
+ if (strcmp(Owner.Arch(), Parent.Arch()) != 0 || Owner.Group()->Name == Parent.Group()->Name)
return true;
return false;
}
+apt (1.1~exp8) experimental; urgency=medium
+
+ [ Michael Vogt ]
+ * merge unstable upload version 1.0.9.3
+ * Ensure /etc/apt/auth.conf has _apt:root owner
+ * Use sysconf(_SC_ARG_MAX) to find the size of Dpkg::MaxArgBytes
+ * Only support Translation-* that are listed in the {In,}Release file
+ * Call "Dequeue()" for items in AbortTransaction() to fix race
+ * prepare ABI for feature/socketpair
+ * Bump ABI to 4.15
+
+ [ David Kalnischkies ]
+ * reenable support for -s (and co) in apt-get source (Closes: 742578)
+ * run acquire transactions only once
+ * aborted reverify restores file owner and permission
+ * test if TMPDIR is accessible before using (Closes: 765951)
+ * chown finished partial files earlier
+ * promote filesize to a hashstring
+
+ -- Michael Vogt <mvo@debian.org> Thu, 06 Nov 2014 10:01:21 +0100
+
+apt (1.1~exp7) experimental; urgency=medium
+
+ [ David Kalnischkies ]
+ * don't cleanup cdrom files in apt-get update (Closes: 765458)
+ * ignore Acquire::GzipIndexes for cdrom sources
+
+ -- David Kalnischkies <david@kalnischkies.de> Wed, 15 Oct 2014 20:12:15 +0200
+
+apt (1.1~exp6) experimental; urgency=medium
+
+ [ josch ]
+ * implement the updated build profile spec
+
+ [ Michael Vogt ]
+ * methods/rsh.cc: replace strcat with std::string (Closes: #76442)
+ * Add new configallowinsecurerepositories to the test framework
+
+ [ Guillem Jover ]
+ * Update Status field values handling
+
+ [ David Kalnischkies ]
+ * don't drop privileges if _apt has not enough rights
+ * check for available space, excluding root reserved blocks
+
+ -- Michael Vogt <mvo@debian.org> Wed, 15 Oct 2014 07:47:36 +0200
+
+apt (1.1~exp5) experimental; urgency=medium
+
+ [ Michael Vogt ]
+ * Only rename StatError files in AbortTransaction()
+ * Document Acquire{MaxReleaseFileSize,AllowInsecureRepositories,
+ AllowDowngradeToInsecureRepositories} and
+ --no-allow-insecure-repositories
+ * Fix backward compatiblity of the new pkgAcquireMethod::DropPrivsOrDie()
+ * Change default of Acquire::AllowInsecureRepositories to "true"
+ so that this change is less disruptive, this will be switched
+ to "false" again after jessie
+
+ [ David Kalnischkies ]
+ * remove useless pdiff filename output (Closes: 764737)
+ * make --allow-insecure-repositories message an error
+ * display a warning for unsigned repos
+ * trusted=yes sources are secure, we just don't know why
+
+ -- Michael Vogt <mvo@debian.org> Mon, 13 Oct 2014 16:15:22 +0200
+
+apt (1.1~exp4) experimental; urgency=medium
+
+ [ Michael Vogt ]
+ * Merge sid version 1.0.9.2
+ * feature/acq-trans:
+ - Make apt-get update more transactional by keeping all data from
+ a sources.list line in partial/ until all data is good and only
+ then move it into lists/ in one step
+ - add new -o Debug::Acquire::Transaction=1 debug option
+ * feature/expected-size:
+ Do not download more data in the mehotds than expected if we know
+ the size. For the InRelease/Release/Release.gpg add new
+ Acquire::MaxReleaseFileSize that defaults to 10Mb for now
+ * Verify the the hashes of the downloaded compressed files early
+ * Only load unauthenticated data into our parsers when the user
+ explicitly asked for it via --allow-insecure-repositories
+ (Acquire::AllowInsecureRepositories)
+ * Print warning when trying to use unauthenticated repositories
+ * Use /var/empty as the homedir for _apt
+ * Revert making pkgAcquire::Item::DescURI() "const" to not break
+ API
+ * Do not allow going from a authenticated to unauthenticated repository
+ * Add missing "adduser" dependency (for the new _apt user)
+ Thanks to Russ Allbery (Closes: #763004)
+ * Test if TMPDIR is a directory in apt-key and if not unset it
+ * add early verification for the .diff/Index download
+ * Bump library version to libapt-pkg4.14
+ * Rework pkgAcqMeta{Index,Sig,ClearSig}::{Done,Failed]() for readability
+ * Ignore EINVAL from prctl(PR_SET_NO_NEW_PRIVS) (closes: 764066)
+
+ [ David Kalnischkies ]
+ * deprecate Pkg->Name in favor of Grp->Name
+ * drop stored StringItems in favor of in-memory mappings
+ * de-duplicate version strings in the cache
+ * fix progress output for (dist-)upgrade calculation
+ * move PCI::From* methods into CacheSetHelper class (Closes: 686221)
+ * add a (hidden) --quiet option for apt-key
+ * only create new trusted.gpg if directory is writeable
+ * support (multiple) arguments properly in apt-key
+ * set a primary-keyring only if we have access to it
+ * merge fragment keyrings in apt-key to avoid hitting gpg limits
+ (Closes: 733028)
+ * use apt-key adv (+ gnupg) instead of gpgv for verify
+ * support gnupg2 as drop-in replacement for gnupg
+ * allow to specify fingerprints in 'apt-key del'
+ * use only one --keyring in gpg interactions
+ * add and use 'apt-key verify' which prefers gpgv over gpg
+ * remove empty keyrings in trusted.gpg.d on upgrade
+ * store source name and version in binary cache
+ * allow fetcher setup without directory creation (Closes: 762898)
+ * cleanup partial directory of lists in apt-get clean (Closes: #762889)
+ * allow options between command and -- on commandline
+ * update symbols file
+ * support parsing of all hashes for pdiff
+ * ensure world-readability for trusted.gpg in postinst (Closes: 647001)
+ * ensure partial dirs are 0700 and owned by _apt:root
+ * use _apt:root only for partial directories
+ * display errortext for all Err
+ * set PR_SET_NO_NEW_PRIVS also if run as non-root
+
+ [ James McCoy ]
+ * ensure apt-key del handles 16-byte key ids (Closes: 754436)
+
+ [ Kenshi Muto ]
+ * Japanese program translation update (Closes: 763033)
+
+ [ Trần Ngọc Quân ]
+ * Set STRIP_FROM_PATH for doxygen
+
+ [ Mert Dirik ]
+ * Turkish program translation update (Closes: 763379)
+
+ [ Guillem Jover ]
+ * apt-get: Create the temporary downloaded changelog inside tmpdir
+
+ [ Miroslav Kure ]
+ * [l10n] Updated Czech translation of apt (Closes: #764055)
+
+ -- Michael Vogt <mvo@ubuntu.com> Wed, 08 Oct 2014 09:37:35 +0200
+
+apt (1.1~exp3) experimental; urgency=medium
+
+ [ Michael Vogt ]
+ * merged changes from debian/sid up to 1.0.9.1
+ * Make /var/lib/apt/lists and /var/cache/apt/archives owned
+ by the new _apt user
+ * Drop Privileges in the following acquire methods:
+ copy, http, https, ftp, gpgv, gzip/bzip2/lzma/xz
+ * DropPrivs: Improvements based on feedback from error@debian.org
+
+ [ Julian Andres Klode ]
+ * DropPriv: Really call seteuid and not setuid, and add more checks
+ * Use _apt as our unprivileged user name
+ * DropPrivs: Also check for saved set-user-ID and set-group-ID
+ * methods: Fail if we cannot drop privileges
+ * DropPrivs: Also check for saved set-user-ID and set-group-ID
+
+ -- Michael Vogt <mvo@debian.org> Wed, 24 Sep 2014 22:30:09 +0200
+
+apt (1.1~exp2) experimental; urgency=medium
+
+ [ Guillem Jover ]
+ * Add new Base256ToNum long long overload function
+ * Fix ar and tar code to be LFS-safe (Closes: #742882)
+
+ [ Michael Vogt ]
+ * increase libapt-inst to version 1.6
+ * Only allow "apt-get build-dep path" when path starts with ./ or /
+ * Allow passing a full path to apt-get install /foo/bar.deb (CLoses: #752327)
+ * merge changes from the 1.0.6 upload
+
+ -- Michael Vogt <mvo@debian.org> Thu, 10 Jul 2014 13:18:08 +0200
+
+apt (1.1~exp1) experimental; urgency=low
+
+ [ David Kalnischkies ]
+ * [API Break] change "std::string pkgAcquire::Item::DescURI()" to
+ "std::string pkgAcquire::Item::DescURI() const"
+ * [ABI-Break] increase hashtable size for packages/groups by factor 5
+ * [ABI-Break] cleanup datatypes mix used in binary cache
+ * [internal API-Break] remove the Section member from package struct
+ * use 'best' hash for source authentication (LP: 1098738)
+ * use HashStringList in the acquire system
+ * deal with hashes in ftparchive more dynamic as well
+ * reenable pipelining via hashsum reordering support
+ * parse and retrieve multiple Descriptions in one record
+ * improve pkgTagSection scanning and parsing
+ * invalid cache if architecture set doesn't match (Closes: 745036)
+
+ [ Michael Vogt ]
+ * add support for "apt-get build-dep foo.dsc"
+ * add support for "apt-get build-dep unpacked-source-dir"
+ * add support for "apt-get install foo_1.0_all.deb"
+ * make "apt-get update" progress much more accurate by loading the
+ sizes of the targets into the fetcher early
+ * Implement simple by-hash for apt update to improve reliability of
+ the update. Apt will try to fetch the Packages file via
+ /by-hash/$hash_type/$hash_value if the repo supports that.
+ - add APT::Acquire::$(host)::By-Hash=1 knob
+ - add Acquire-By-Hash=1 to Release file
+ * add Debug::Acquire::Progress debug option
+ * [ABI-Break] lp:~mvo/apt/source-hashes:
+ - use sha{512,256,1} for deb-src when available LP: #1098738
+ * [ABI-Break] stop exporting the accidently exported parsenetrc() symbol
+ * [ABI-Break] remove the PACKAGE_MATCHER_ABI_COMPAT defines
+ * [ABI BREAK] apt-pkg/pkgcache.h:
+ - adjust pkgCache::State::VerPriority enum, to match reality
+ * test/integration/test-debsrc-hashes:
+ - add integration test, thanks to Daniel Hartwig
+ * [ABI-Break] remove the PACKAGE_MATCHER_ABI_COMPAT defines
+ * [ABI-Break] Pass struct IndexTarget/indexRecords to
+ pkgAcqIndex{,Merge}Diffs
+ * [internal API-Break] rename pkgCache::Package::NextPackage to
+ pkgCache::Package::Next
+ * Calculate Percent as part of pkgAcquireStatus to provide a weighted
+ percent for both items and bytes
+ * apt-pkg/contrib/macros.h: bump library version to 4.13
+ * apt-private/acqprogress.cc: do not show file size on IMSHit, it wasn't
+ fetched
+ * Fix warnings from clang -Wall/clang -fsanitize=address
+ * add DropPrivs() and drop privileges to nobody when running the
+ the buildin apt and dump solvers
+ * lp:~mvo/apt/webserver-simulate-broken-with-fix346386:
+ - fix invalid InRelease file download checking and add regression
+ test to server broken files to the buildin test webserver
+ - add regression test for LP: #34638
+
+ -- Michael Vogt <mvo@debian.org> Thu, 19 Jun 2014 12:01:48 +0200
+
+ apt (1.0.9.9) unstable; urgency=medium
+
+ [ David Kalnischkies ]
+ * parse specific-arch dependencies correctly on single-arch systems
+ (Closes: 777760)
+ * remove "first package seen is native package" assumption.
+ Thanks to Axel Beckert for testing (Closes: 782777)
+
+ -- David Kalnischkies <david@kalnischkies.de> Tue, 28 Apr 2015 16:11:27 +0200
+
apt (1.0.9.8) unstable; urgency=medium
[ David Kalnischkies ]
// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
// ---------------------------------------------------------------------
/* */
-CircleBuf::CircleBuf(unsigned long long Size) : Size(Size), Hash(0)
+CircleBuf::CircleBuf(unsigned long long Size)
+ : Size(Size), Hash(NULL), TotalWriten(0)
{
Buf = new unsigned char[Size];
Reset();
InP = 0;
OutP = 0;
StrPos = 0;
+ TotalWriten = 0;
MaxGet = (unsigned long long)-1;
OutQueue = string();
- if (Hash != 0)
+ if (Hash != NULL)
{
delete Hash;
- Hash = new Hashes;
+ Hash = NULL;
}
}
/*}}}*/
return false;
}
+
+ TotalWriten += Res;
- if (Hash != 0)
+ if (Hash != NULL)
Hash->Add(Buf + (OutP%Size),Res);
OutP += Res;
{
/* Closes encoding is used when the server did not specify a size, the
loss of the connection means we are done */
- if (Encoding == Closes)
+ if (Persistent == false)
In.Limit(-1);
else if (JunkSize != 0)
In.Limit(JunkSize);
else
- In.Limit(Size - StartPos);
+ In.Limit(DownloadSize);
// Just transfer the whole block.
do
return (ServerFd != -1);
}
/*}}}*/
-bool HttpServerState::InitHashes(FileFd &File) /*{{{*/
+bool HttpServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/
{
delete In.Hash;
- In.Hash = new Hashes;
-
- // Set the expected size and read file for the hashes
- File.Truncate(StartPos);
- return In.Hash->AddFD(File, StartPos);
+ In.Hash = new Hashes(ExpectedHashes);
+ return true;
}
/*}}}*/
+
APT_PURE Hashes * HttpServerState::GetHashes() /*{{{*/
{
return In.Hash;
// See if this is because the server finished the data stream
if (In.IsLimit() == false && State != HttpServerState::Header &&
- Encoding != HttpServerState::Closes)
+ Persistent == true)
{
Close();
if (LErrno == 0)
return true;
}
- if (In.IsLimit() == true || Encoding == ServerState::Closes)
+ if (In.IsLimit() == true || Persistent == false)
return true;
}
return false;
return _error->Errno("write",_("Error writing to output file"));
}
+ if (MaximumSize > 0 && File && File->Tell() > MaximumSize)
+ {
+ Owner->SetFailReason("MaximumSizeExceeded");
+ return _error->Error("Writing more data than expected (%llu > %llu)",
+ File->Tell(), MaximumSize);
+ }
+
// Handle commands from APT
if (FD_ISSET(STDIN_FILENO,&rfds))
{
/*}}}*/
using namespace std;
+struct APT_HIDDEN CURLUserPointer {
+ HttpsMethod * const https;
+ HttpsMethod::FetchResult * const Res;
+ HttpsMethod::FetchItem const * const Itm;
+ CURLUserPointer(HttpsMethod * const https, HttpsMethod::FetchResult * const Res,
+ HttpsMethod::FetchItem const * const Itm) : https(https), Res(Res), Itm(Itm) {}
+};
+
size_t
HttpsMethod::parse_header(void *buffer, size_t size, size_t nmemb, void *userp)
{
size_t len = size * nmemb;
- HttpsMethod *me = (HttpsMethod *)userp;
+ CURLUserPointer *me = (CURLUserPointer *)userp;
std::string line((char*) buffer, len);
for (--len; len > 0; --len)
if (isspace(line[len]) == 0)
if (line.empty() == true)
{
- if (me->Server->Result != 416 && me->Server->StartPos != 0)
+ if (me->https->Server->Result != 416 && me->https->Server->StartPos != 0)
;
- else if (me->Server->Result == 416 && me->Server->TotalFileSize == me->File->FileSize())
+ else if (me->https->Server->Result == 416)
{
- me->Server->Result = 200;
- me->Server->StartPos = me->Server->TotalFileSize;
- // the actual size is not important for https as curl will deal with it
- // by itself and e.g. doesn't bother us with transport-encoding…
- me->Server->JunkSize = std::numeric_limits<unsigned long long>::max();
+ bool partialHit = false;
+ if (me->Itm->ExpectedHashes.usable() == true)
+ {
+ Hashes resultHashes(me->Itm->ExpectedHashes);
+ FileFd file(me->Itm->DestFile, FileFd::ReadOnly);
- me->https->Server->Size = file.FileSize();
++ me->https->Server->TotalFileSize = file.FileSize();
+ me->https->Server->Date = file.ModificationTime();
+ resultHashes.AddFD(file);
+ HashStringList const hashList = resultHashes.GetHashStringList();
+ partialHit = (me->Itm->ExpectedHashes == hashList);
+ }
- else if (me->https->Server->Result == 416 && me->https->Server->Size == me->https->File->FileSize())
++ else if (me->https->Server->Result == 416 && me->https->Server->TotalFileSize == me->https->File->FileSize())
+ partialHit = true;
+
+ if (partialHit == true)
+ {
+ me->https->Server->Result = 200;
- me->https->Server->StartPos = me->https->Server->Size;
++ me->https->Server->StartPos = me->https->Server->TotalFileSize;
+ // the actual size is not important for https as curl will deal with it
+ // by itself and e.g. doesn't bother us with transport-encoding…
+ me->https->Server->JunkSize = std::numeric_limits<unsigned long long>::max();
+ }
+ else
+ me->https->Server->StartPos = 0;
}
else
- me->Server->StartPos = 0;
+ me->https->Server->StartPos = 0;
- me->File->Truncate(me->Server->StartPos);
- me->File->Seek(me->Server->StartPos);
+ me->Res->LastModified = me->https->Server->Date;
- me->Res->Size = me->https->Server->Size;
++ me->Res->Size = me->https->Server->TotalFileSize;
+ me->Res->ResumePoint = me->https->Server->StartPos;
- me->Res.Size = me->Server->TotalFileSize;
+ // we expect valid data, so tell our caller we get the file now
+ if (me->https->Server->Result >= 200 && me->https->Server->Result < 300)
+ {
+ if (me->https->Server->JunkSize == 0 && me->Res->Size != 0 && me->Res->Size > me->Res->ResumePoint)
+ me->https->URIStart(*me->Res);
+ if (me->https->Server->AddPartialFileToHashes(*(me->https->File)) == false)
+ return 0;
+ }
}
- else if (me->Server->HeaderLine(line) == false)
+ else if (me->https->Server->HeaderLine(line) == false)
return 0;
return size*nmemb;
if (me->Server->JunkSize != 0)
return buffer_size;
- if (me->ReceivedData == false)
+ if(me->File->Write(buffer, buffer_size) != true)
+ return 0;
+
+ if(me->Queue->MaximumSize > 0)
{
- me->URIStart(me->Res);
- me->ReceivedData = true;
+ unsigned long long const TotalWritten = me->File->Tell();
+ if (TotalWritten > me->Queue->MaximumSize)
+ {
+ me->SetFailReason("MaximumSizeExceeded");
+ _error->Error("Writing more data than expected (%llu > %llu)",
+ TotalWritten, me->Queue->MaximumSize);
+ return 0;
+ }
}
- if(me->File->Write(buffer, buffer_size) != true)
- return false;
+ if (me->Server->GetHashes()->Add((unsigned char const * const)buffer, buffer_size) == false)
+ return 0;
return buffer_size;
}
// HttpsServerState::HttpsServerState - Constructor /*{{{*/
-HttpsServerState::HttpsServerState(URI Srv,HttpsMethod * /*Owner*/) : ServerState(Srv, NULL)
+HttpsServerState::HttpsServerState(URI Srv,HttpsMethod * Owner) : ServerState(Srv, Owner), Hash(NULL)
{
TimeOut = _config->FindI("Acquire::https::Timeout",TimeOut);
Reset();
}
/*}}}*/
+bool HttpsServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/
+{
+ delete Hash;
+ Hash = new Hashes(ExpectedHashes);
+ return true;
+}
+ /*}}}*/
+APT_PURE Hashes * HttpsServerState::GetHashes() /*{{{*/
+{
+ return Hash;
+}
+ /*}}}*/
-void HttpsMethod::SetupProxy() /*{{{*/
+void HttpsMethod::SetupProxy() /*{{{*/
{
URI ServerName = Queue->Uri;
bool HttpsMethod::Fetch(FetchItem *Itm)
{
struct stat SBuf;
- struct curl_slist *headers=NULL;
+ struct curl_slist *headers=NULL;
char curl_errorstr[CURL_ERROR_SIZE];
URI Uri = Itm->Uri;
string remotehost = Uri.Host;
- ReceivedData = false;
// TODO:
// - http::Pipeline-Depth
maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc"));
+ FetchResult Res;
+ CURLUserPointer userp(this, &Res, Itm);
// callbacks
curl_easy_setopt(curl, CURLOPT_URL, static_cast<string>(Uri).c_str());
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, parse_header);
- curl_easy_setopt(curl, CURLOPT_WRITEHEADER, this);
+ curl_easy_setopt(curl, CURLOPT_WRITEHEADER, &userp);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, this);
// options
curl_easy_setopt(curl, CURLOPT_LOW_SPEED_TIME, timeout);
// set redirect options and default to 10 redirects
- bool const AllowRedirect = _config->FindB("Acquire::https::AllowRedirect",
- _config->FindB("Acquire::http::AllowRedirect",true));
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, AllowRedirect);
curl_easy_setopt(curl, CURLOPT_MAXREDIRS, 10);
// debug
- if(_config->FindB("Debug::Acquire::https", false))
+ if (Debug == true)
curl_easy_setopt(curl, CURLOPT_VERBOSE, true);
// error handling
// go for it - if the file exists, append on it
File = new FileFd(Itm->DestFile, FileFd::WriteAny);
- Server = new HttpsServerState(Itm->Uri, this);
- Res = FetchResult();
+ Server = CreateServerState(Itm->Uri);
+ if (Server->InitHashes(Itm->ExpectedHashes) == false)
+ return false;
// keep apt updated
Res.Filename = Itm->DestFile;
if (success != 0)
{
_error->Error("%s", curl_errorstr);
- unlink(File->Name().c_str());
return false;
}
char err[255];
snprintf(err, sizeof(err) - 1, "HttpError%i", Server->Result);
SetFailReason(err);
- _error->Error("%s", err);
+ _error->Error("%i %s", Server->Result, Server->Code);
// unlink, no need keep 401/404 page content in partial/
unlink(File->Name().c_str());
return false;
}
- struct stat resultStat;
- if (unlikely(stat(File->Name().c_str(), &resultStat) != 0))
- {
- _error->Errno("stat", "Unable to access file %s", File->Name().c_str());
- return false;
- }
- Res.Size = resultStat.st_size;
-
// invalid range-request
if (Server->Result == 416)
{
unlink(File->Name().c_str());
- Res.Size = 0;
delete File;
Redirect(Itm->Uri);
return true;
}
+ struct stat resultStat;
+ if (unlikely(stat(File->Name().c_str(), &resultStat) != 0))
+ {
+ _error->Errno("stat", "Unable to access file %s", File->Name().c_str());
+ return false;
+ }
+ Res.Size = resultStat.st_size;
+
// Timestamp
curl_easy_getinfo(curl, CURLINFO_FILETIME, &Res.LastModified);
if (Res.LastModified != -1)
Res.LastModified = resultStat.st_mtime;
// take hashes
- Hashes Hash;
- FileFd Fd(Res.Filename, FileFd::ReadOnly);
- Hash.AddFD(Fd);
- Res.TakeHashes(Hash);
+ Res.TakeHashes(*(Server->GetHashes()));
// keep apt updated
URIDone(Res);
// cleanup
- Res.Size = 0;
delete File;
return true;
}
+ /*}}}*/
+// HttpsMethod::Configuration - Handle a configuration message /*{{{*/
+bool HttpsMethod::Configuration(string Message)
+{
+ if (ServerMethod::Configuration(Message) == false)
+ return false;
+
+ AllowRedirect = _config->FindB("Acquire::https::AllowRedirect",
+ _config->FindB("Acquire::http::AllowRedirect", true));
+ Debug = _config->FindB("Debug::Acquire::https",false);
+
+ return true;
+}
+ /*}}}*/
+ServerState * HttpsMethod::CreateServerState(URI uri) /*{{{*/
+{
+ return new HttpsServerState(uri, this);
+}
+ /*}}}*/
int main()
{
Major = 0;
Minor = 0;
Result = 0;
- Size = 0;
+ TotalFileSize = 0;
JunkSize = 0;
StartPos = 0;
Encoding = Closes;
Encoding = Stream;
HaveContent = true;
- unsigned long long * SizePtr = &Size;
+ unsigned long long * DownloadSizePtr = &DownloadSize;
if (Result == 416)
- SizePtr = &JunkSize;
+ DownloadSizePtr = &JunkSize;
- *SizePtr = strtoull(Val.c_str(), NULL, 10);
- if (*SizePtr >= std::numeric_limits<unsigned long long>::max())
+ *DownloadSizePtr = strtoull(Val.c_str(), NULL, 10);
+ if (*DownloadSizePtr >= std::numeric_limits<unsigned long long>::max())
return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header"));
- else if (*SizePtr == 0)
+ else if (*DownloadSizePtr == 0)
HaveContent = false;
+
+ // On partial content (206) the Content-Length less than the real
+ // size, so do not set it here but leave that to the Content-Range
+ // header instead
+ if(Result != 206 && TotalFileSize == 0)
+ TotalFileSize = DownloadSize;
+
return true;
}
HaveContent = true;
// §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416
- if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&Size) == 1)
+ if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&TotalFileSize) == 1)
; // we got the expected filesize which is all we wanted
- else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&Size) != 2)
+ else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&TotalFileSize) != 2)
return _error->Error(_("The HTTP server sent an invalid Content-Range header"));
- if ((unsigned long long)StartPos > Size)
+ if ((unsigned long long)StartPos > TotalFileSize)
return _error->Error(_("This HTTP server has broken range support"));
+
+ // figure out what we will download
+ DownloadSize = TotalFileSize - StartPos;
return true;
}
Reset();
}
/*}}}*/
+bool ServerState::AddPartialFileToHashes(FileFd &File) /*{{{*/
+{
+ File.Truncate(StartPos);
+ return GetHashes()->AddFD(File, StartPos);
+}
+ /*}}}*/
bool ServerMethod::Configuration(string Message) /*{{{*/
{
- return pkgAcqMethod::Configuration(Message);
+ if (pkgAcqMethod::Configuration(Message) == false)
+ return false;
+
+ DropPrivsOrDie();
+
+ return true;
}
/*}}}*/
Res.LastModified = Queue->LastModified;
return IMS_HIT;
}
-
+
/* Redirect
*
* Note that it is only OK for us to treat all redirection the same
struct stat SBuf;
if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
{
- if ((unsigned long long)SBuf.st_size == Server->TotalFileSize)
+ bool partialHit = false;
+ if (Queue->ExpectedHashes.usable() == true)
+ {
+ Hashes resultHashes(Queue->ExpectedHashes);
+ FileFd file(Queue->DestFile, FileFd::ReadOnly);
- Server->Size = file.FileSize();
++ Server->TotalFileSize = file.FileSize();
+ Server->Date = file.ModificationTime();
+ resultHashes.AddFD(file);
+ HashStringList const hashList = resultHashes.GetHashStringList();
+ partialHit = (Queue->ExpectedHashes == hashList);
+ }
- else if ((unsigned long long)SBuf.st_size == Server->Size)
++ else if ((unsigned long long)SBuf.st_size == Server->TotalFileSize)
+ partialHit = true;
+ if (partialHit == true)
{
// the file is completely downloaded, but was not moved
if (Server->HaveContent == true)
Server->RunData(&DevNull);
}
Server->HaveContent = false;
- Server->StartPos = Server->Size;
+ Server->StartPos = Server->TotalFileSize;
Server->Result = 200;
}
else if (unlink(Queue->DestFile.c_str()) == 0)
failure */
if (Server->Result < 200 || Server->Result >= 300)
{
- char err[255];
- snprintf(err,sizeof(err)-1,"HttpError%i",Server->Result);
+ std::string err;
+ strprintf(err, "HttpError%u", Server->Result);
SetFailReason(err);
- _error->Error("%u %s",Server->Result,Server->Code);
+ _error->Error("%u %s", Server->Result, Server->Code);
if (Server->HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
// This is some sort of 2xx 'data follows' reply
Res.LastModified = Server->Date;
- Res.Size = Server->Size;
-
+ Res.Size = Server->TotalFileSize;
+
// Open the file
delete File;
File = new FileFd(Queue->DestFile,FileFd::WriteAny);
FailFd = File->Fd();
FailTime = Server->Date;
- if (Server->InitHashes(*File) == false)
+ if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false)
{
_error->Errno("read",_("Problem hashing file"));
return ERROR_NOT_FROM_SERVER;
for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth;
I = I->Next, Depth++)
{
- // If pipelining is disabled, we only queue 1 request
- if (Server->Pipeline == false && Depth >= 0)
- break;
+ if (Depth >= 0)
+ {
+ // If pipelining is disabled, we only queue 1 request
+ if (Server->Pipeline == false)
+ break;
+ // if we have no hashes, do at most one such request
+ // as we can't fixup pipeling misbehaviors otherwise
+ else if (I->ExpectedHashes.usable() == false)
+ break;
+ }
// Make sure we stick with the same server
if (Server->Comp(I->Uri) == false)
// Run the data
bool Result = true;
+
+ // ensure we don't fetch too much
+ // we could do "Server->MaximumSize = Queue->MaximumSize" here
+ // but that would break the clever pipeline messup detection
+ // so instead we use the size of the biggest item in the queue
+ Server->MaximumSize = FindMaximumObjectSizeInQueue();
+
if (Server->HaveContent)
Result = Server->RunData(File);
// Send status to APT
if (Result == true)
{
- Res.TakeHashes(*Server->GetHashes());
+ Hashes * const resultHashes = Server->GetHashes();
+ HashStringList const hashList = resultHashes->GetHashStringList();
+ if (PipelineDepth != 0 && Queue->ExpectedHashes.usable() == true && Queue->ExpectedHashes != hashList)
+ {
+ // we did not get the expected hash… mhhh:
+ // could it be that server/proxy messed up pipelining?
+ FetchItem * BeforeI = Queue;
+ for (FetchItem *I = Queue->Next; I != 0 && I != QueueBack; I = I->Next)
+ {
+ if (I->ExpectedHashes.usable() == true && I->ExpectedHashes == hashList)
+ {
+ // yes, he did! Disable pipelining and rewrite queue
+ if (Server->Pipeline == true)
+ {
+ // FIXME: fake a warning message as we have no proper way of communicating here
+ std::string out;
+ strprintf(out, _("Automatically disabled %s due to incorrect response from server/proxy. (man 5 apt.conf)"), "Acquire::http::PipelineDepth");
+ std::cerr << "W: " << out << std::endl;
+ Server->Pipeline = false;
+ // we keep the PipelineDepth value so that the rest of the queue can be fixed up as well
+ }
+ Rename(Res.Filename, I->DestFile);
+ Res.Filename = I->DestFile;
+ BeforeI->Next = I->Next;
+ I->Next = Queue;
+ Queue = I;
+ break;
+ }
+ BeforeI = I;
+ }
+ }
+ Res.TakeHashes(*resultHashes);
URIDone(Res);
}
else
QueueBack = Queue;
}
else
+ {
+ Server->Close();
Fail(true);
+ }
}
break;
}
}
return 0;
+}
+ /*}}}*/
+ /*{{{*/
+unsigned long long
+ServerMethod::FindMaximumObjectSizeInQueue() const
+{
+ unsigned long long MaxSizeInQueue = 0;
+ for (FetchItem *I = Queue; I != 0 && I != QueueBack; I = I->Next)
+ MaxSizeInQueue = std::max(MaxSizeInQueue, I->MaximumSize);
+ return MaxSizeInQueue;
}
/*}}}*/
char Code[360];
// These are some statistics from the last parsed header lines
- unsigned long long Size; // size of the usable content (aka: the file)
- unsigned long long JunkSize; // size of junk content (aka: server error pages)
+
+ // total size of the usable content (aka: the file)
+ unsigned long long TotalFileSize;
+ // size we actually download (can be smaller than Size if we have partial content)
+ unsigned long long DownloadSize;
+ // size of junk content (aka: server error pages)
+ unsigned long long JunkSize;
+ // The start of the data (for partial content)
unsigned long long StartPos;
+
time_t Date;
bool HaveContent;
enum {Chunked,Stream,Closes} Encoding;
URI Proxy;
unsigned long TimeOut;
+ unsigned long long MaximumSize;
+
protected:
ServerMethod *Owner;
};
/** \brief Get the headers before the data */
RunHeadersResult RunHeaders(FileFd * const File, const std::string &Uri);
+ bool AddPartialFileToHashes(FileFd &File);
bool Comp(URI Other) const {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;};
- virtual void Reset() {Major = 0; Minor = 0; Result = 0; Code[0] = '\0'; Size = 0; JunkSize = 0;
+ virtual void Reset() {Major = 0; Minor = 0; Result = 0; Code[0] = '\0'; TotalFileSize = 0; JunkSize = 0;
StartPos = 0; Encoding = Closes; time(&Date); HaveContent = false;
- State = Header; Persistent = false; Pipeline = true;};
+ State = Header; Persistent = false; Pipeline = true; MaximumSize = 0;};
virtual bool WriteResponse(std::string const &Data) = 0;
/** \brief Transfer the data from the socket */
virtual bool Open() = 0;
virtual bool IsOpen() = 0;
virtual bool Close() = 0;
- virtual bool InitHashes(FileFd &File) = 0;
+ virtual bool InitHashes(HashStringList const &ExpectedHashes) = 0;
virtual Hashes * GetHashes() = 0;
virtual bool Die(FileFd &File) = 0;
virtual bool Flush(FileFd * const File) = 0;
unsigned long PipelineDepth;
bool AllowRedirect;
+ // Find the biggest item in the fetch queue for the checking of the maximum
+ // size
+ unsigned long long FindMaximumObjectSizeInQueue() const APT_PURE;
+
public:
bool Debug;
virtual ServerState * CreateServerState(URI uri) = 0;
virtual void RotateDNS() = 0;
- ServerMethod(const char *Ver,unsigned long Flags = 0) : pkgAcqMethod(Ver, Flags), Server(NULL), File(NULL), PipelineDepth(0), AllowRedirect(false), Debug(false) {};
+ ServerMethod(const char *Ver,unsigned long Flags = 0) : pkgAcqMethod(Ver, Flags), Server(NULL), File(NULL), PipelineDepth(10), AllowRedirect(false), Debug(false) {};
virtual ~ServerMethod() {};
};
assertprogress() {
T="$1"
testsuccess grep "dlstatus:1:0:Retrieving file 1 of 1" "$T"
- if ! egrep -q "dlstatus:1:[0-9]{1,2}\.(.*):Retrieving file 1 of 1" "$T"; then
+ if ! egrep -q "dlstatus:1:[1-9][0-9](\..*)?:Retrieving file 1 of 1" "$T"; then
cat "$T"
msgfail "Failed to detect download progress"
fi
testsuccess grep "dlstatus:1:100:Retrieving file 1 of 1" "$T"
- #cat $T
}
# we need to ensure the file is reasonable big so that apt has a chance to
TESTFILE=testfile.big
testsuccess dd if=/dev/zero of=./aptarchive/$TESTFILE bs=800k count=1
+OPT='-o APT::Status-Fd=3 -o Debug::pkgAcquire::Worker=1 -o Debug::Acquire::http=1 -o Debug::Acquire::https=1'
+
msgtest 'download progress works via' 'http'
-printf '\n'
exec 3> apt-progress.log
-testsuccess apthelper download-file "http://localhost:8080/$TESTFILE" http-$TESTFILE -o APT::Status-Fd=3 -o Acquire::http::Dl-Limit=600
+testsuccess --nomsg apthelper download-file "http://localhost:8080/$TESTFILE" http-$TESTFILE $OPT -o Acquire::http::Dl-Limit=800
assertprogress apt-progress.log
msgtest 'download progress works via' 'https'
-printf '\n'
exec 3> apt-progress.log
-testsuccess apthelper download-file "https://localhost:4433/$TESTFILE" https-$TESTFILE -o APT::Status-Fd=3 -o Acquire::https::Dl-Limit=600
-assertprogress apt-progress.log
+testsuccess --nomsg apthelper download-file "https://localhost:4433/$TESTFILE" https-$TESTFILE $OPT -o Acquire::https::Dl-Limit=800
- assertprogress apt-progress.log
# cleanup
rm -f apt-progress*.log
--- /dev/null
-TARGET=testfile-downloaded
+ #!/bin/sh
+ #
+ # this is a regression test for LP: #1445239 where a partial download can
+ # trigger an endless hang of the download method
+ #
+
+ set -e
+
+ TESTDIR=$(readlink -f $(dirname $0))
+ . $TESTDIR/framework
+ setupenvironment
+ configarchitecture 'amd64'
+
+ changetowebserver
+ webserverconfig 'aptwebserver::support::range' 'true'
+
+ TESTFILE='aptarchive/testfile'
+ dd if=/dev/zero of=$TESTFILE bs=100k count=1 2>/dev/null
+
+ DOWNLOADLOG='rootdir/tmp/testdownloadfile.log'
+
++TARGET=./downloaded/testfile-downloaded
+ dd if=/dev/zero of=$TARGET bs=99k count=1 2>/dev/null
+ if ! downloadfile http://localhost:8080/testfile "$TARGET" > "$DOWNLOADLOG"; then
+ cat >&2 "$DOWNLOADLOG"
+ msgfail
+ else
+ msgpass
+ fi
#include <string>
#include <vector>
-static char const * httpcodeToStr(int const httpcode) /*{{{*/
+static std::string httpcodeToStr(int const httpcode) /*{{{*/
{
switch (httpcode)
{
// Informational 1xx
- case 100: return "100 Continue";
- case 101: return "101 Switching Protocols";
+ case 100: return _config->Find("aptwebserver::httpcode::100", "100 Continue");
+ case 101: return _config->Find("aptwebserver::httpcode::101", "101 Switching Protocols");
// Successful 2xx
- case 200: return "200 OK";
- case 201: return "201 Created";
- case 202: return "202 Accepted";
- case 203: return "203 Non-Authoritative Information";
- case 204: return "204 No Content";
- case 205: return "205 Reset Content";
- case 206: return "206 Partial Content";
+ case 200: return _config->Find("aptwebserver::httpcode::200", "200 OK");
+ case 201: return _config->Find("aptwebserver::httpcode::201", "201 Created");
+ case 202: return _config->Find("aptwebserver::httpcode::202", "202 Accepted");
+ case 203: return _config->Find("aptwebserver::httpcode::203", "203 Non-Authoritative Information");
+ case 204: return _config->Find("aptwebserver::httpcode::204", "204 No Content");
+ case 205: return _config->Find("aptwebserver::httpcode::205", "205 Reset Content");
+ case 206: return _config->Find("aptwebserver::httpcode::206", "206 Partial Content");
// Redirections 3xx
- case 300: return "300 Multiple Choices";
- case 301: return "301 Moved Permanently";
- case 302: return "302 Found";
- case 303: return "303 See Other";
- case 304: return "304 Not Modified";
- case 305: return "304 Use Proxy";
- case 307: return "307 Temporary Redirect";
+ case 300: return _config->Find("aptwebserver::httpcode::300", "300 Multiple Choices");
+ case 301: return _config->Find("aptwebserver::httpcode::301", "301 Moved Permanently");
+ case 302: return _config->Find("aptwebserver::httpcode::302", "302 Found");
+ case 303: return _config->Find("aptwebserver::httpcode::303", "303 See Other");
+ case 304: return _config->Find("aptwebserver::httpcode::304", "304 Not Modified");
+ case 305: return _config->Find("aptwebserver::httpcode::305", "305 Use Proxy");
+ case 307: return _config->Find("aptwebserver::httpcode::307", "307 Temporary Redirect");
// Client errors 4xx
- case 400: return "400 Bad Request";
- case 401: return "401 Unauthorized";
- case 402: return "402 Payment Required";
- case 403: return "403 Forbidden";
- case 404: return "404 Not Found";
- case 405: return "405 Method Not Allowed";
- case 406: return "406 Not Acceptable";
- case 407: return "407 Proxy Authentication Required";
- case 408: return "408 Request Time-out";
- case 409: return "409 Conflict";
- case 410: return "410 Gone";
- case 411: return "411 Length Required";
- case 412: return "412 Precondition Failed";
- case 413: return "413 Request Entity Too Large";
- case 414: return "414 Request-URI Too Large";
- case 415: return "415 Unsupported Media Type";
- case 416: return "416 Requested range not satisfiable";
- case 417: return "417 Expectation Failed";
- case 418: return "418 I'm a teapot";
+ case 400: return _config->Find("aptwebserver::httpcode::400", "400 Bad Request");
+ case 401: return _config->Find("aptwebserver::httpcode::401", "401 Unauthorized");
+ case 402: return _config->Find("aptwebserver::httpcode::402", "402 Payment Required");
+ case 403: return _config->Find("aptwebserver::httpcode::403", "403 Forbidden");
+ case 404: return _config->Find("aptwebserver::httpcode::404", "404 Not Found");
+ case 405: return _config->Find("aptwebserver::httpcode::405", "405 Method Not Allowed");
+ case 406: return _config->Find("aptwebserver::httpcode::406", "406 Not Acceptable");
+ case 407: return _config->Find("aptwebserver::httpcode::407", "407 Proxy Authentication Required");
+ case 408: return _config->Find("aptwebserver::httpcode::408", "408 Request Time-out");
+ case 409: return _config->Find("aptwebserver::httpcode::409", "409 Conflict");
+ case 410: return _config->Find("aptwebserver::httpcode::410", "410 Gone");
+ case 411: return _config->Find("aptwebserver::httpcode::411", "411 Length Required");
+ case 412: return _config->Find("aptwebserver::httpcode::412", "412 Precondition Failed");
+ case 413: return _config->Find("aptwebserver::httpcode::413", "413 Request Entity Too Large");
+ case 414: return _config->Find("aptwebserver::httpcode::414", "414 Request-URI Too Large");
+ case 415: return _config->Find("aptwebserver::httpcode::415", "415 Unsupported Media Type");
+ case 416: return _config->Find("aptwebserver::httpcode::416", "416 Requested range not satisfiable");
+ case 417: return _config->Find("aptwebserver::httpcode::417", "417 Expectation Failed");
+ case 418: return _config->Find("aptwebserver::httpcode::418", "418 I'm a teapot");
// Server error 5xx
- case 500: return "500 Internal Server Error";
- case 501: return "501 Not Implemented";
- case 502: return "502 Bad Gateway";
- case 503: return "503 Service Unavailable";
- case 504: return "504 Gateway Time-out";
- case 505: return "505 HTTP Version not supported";
- }
- return NULL;
+ case 500: return _config->Find("aptwebserver::httpcode::500", "500 Internal Server Error");
+ case 501: return _config->Find("aptwebserver::httpcode::501", "501 Not Implemented");
+ case 502: return _config->Find("aptwebserver::httpcode::502", "502 Bad Gateway");
+ case 503: return _config->Find("aptwebserver::httpcode::503", "503 Service Unavailable");
+ case 504: return _config->Find("aptwebserver::httpcode::504", "504 Gateway Time-out");
+ case 505: return _config->Find("aptwebserver::httpcode::505", "505 HTTP Version not supported");
+ }
+ return "";
}
/*}}}*/
static bool chunkedTransferEncoding(std::list<std::string> const &headers) {
contentlength << "Content-Length: " << data.FileSize();
headers.push_back(contentlength.str());
}
- std::string lastmodified("Last-Modified: ");
- lastmodified.append(TimeRFC1123(data.ModificationTime()));
- headers.push_back(lastmodified);
+ if (_config->FindB("aptwebserver::support::last-modified", true) == true)
+ {
+ std::string lastmodified("Last-Modified: ");
+ lastmodified.append(TimeRFC1123(data.ModificationTime()));
+ headers.push_back(lastmodified);
+ }
}
/*}}}*/
static void addDataHeaders(std::list<std::string> &headers, std::string &data)/*{{{*/
// Proxies require absolute uris, so this is a simple proxy-fake option
std::string const absolute = _config->Find("aptwebserver::request::absolute", "uri,path");
- if (strncmp(host.c_str(), filename.c_str(), host.length()) == 0)
+ if (strncmp(host.c_str(), filename.c_str(), host.length()) == 0 && APT::String::Startswith(filename, "/_config/") == false)
{
if (absolute.find("uri") == std::string::npos)
{
sendError(client, 400, request, sendContent, "Request is absoluteURI, but configured to not accept that", headers);
return false;
}
+
// strip the host from the request to make it an absolute path
filename.erase(0, host.length());
+
+ std::string const authConf = _config->Find("aptwebserver::proxy-authorization", "");
+ std::string auth = LookupTag(request, "Proxy-Authorization", "");
+ if (authConf.empty() != auth.empty())
+ {
+ if (auth.empty())
+ sendError(client, 407, request, sendContent, "Proxy requires authentication", headers);
+ else
+ sendError(client, 407, request, sendContent, "Client wants to authenticate to proxy, but proxy doesn't need it", headers);
+ return false;
+ }
+ if (authConf.empty() == false)
+ {
+ char const * const basic = "Basic ";
+ if (strncmp(auth.c_str(), basic, strlen(basic)) == 0)
+ {
+ auth.erase(0, strlen(basic));
+ if (auth != authConf)
+ {
+ sendError(client, 407, request, sendContent, "Proxy-Authentication doesn't match", headers);
+ return false;
+ }
+ }
+ else
+ {
+ std::list<std::string> headers;
+ headers.push_back("Proxy-Authenticate: Basic");
+ sendError(client, 407, request, sendContent, "Unsupported Proxy-Authentication Scheme", headers);
+ return false;
+ }
+ }
}
- else if (absolute.find("path") == std::string::npos)
+ else if (absolute.find("path") == std::string::npos && APT::String::Startswith(filename, "/_config/") == false)
{
sendError(client, 400, request, sendContent, "Request is absolutePath, but configured to not accept that", headers);
return false;
}
+ if (APT::String::Startswith(filename, "/_config/") == false)
+ {
+ std::string const authConf = _config->Find("aptwebserver::authorization", "");
+ std::string auth = LookupTag(request, "Authorization", "");
+ if (authConf.empty() != auth.empty())
+ {
+ if (auth.empty())
+ sendError(client, 401, request, sendContent, "Server requires authentication", headers);
+ else
+ sendError(client, 401, request, sendContent, "Client wants to authenticate to server, but server doesn't need it", headers);
+ return false;
+ }
+ if (authConf.empty() == false)
+ {
+ char const * const basic = "Basic ";
+ if (strncmp(auth.c_str(), basic, strlen(basic)) == 0)
+ {
+ auth.erase(0, strlen(basic));
+ if (auth != authConf)
+ {
+ sendError(client, 401, request, sendContent, "Authentication doesn't match", headers);
+ return false;
+ }
+ }
+ else
+ {
+ headers.push_back("WWW-Authenticate: Basic");
+ sendError(client, 401, request, sendContent, "Unsupported Authentication Scheme", headers);
+ return false;
+ }
+ }
+ }
+
size_t paramspos = filename.find('?');
if (paramspos != std::string::npos)
{
std::vector<std::string> parts, std::list<std::string> &headers)
{
size_t const pcount = parts.size();
+ for (size_t i = 0; i < pcount; ++i)
+ parts[i] = DeQuoteString(parts[i]);
if (pcount == 4 && parts[1] == "set")
{
_config->Set(parts[2], parts[3]);
{
int client = *((int*)(voidclient));
std::clog << "ACCEPT client " << client << std::endl;
- std::vector<std::string> messages;
bool closeConnection = false;
- std::list<std::string> headers;
- while (closeConnection == false && ReadMessages(client, messages))
+ while (closeConnection == false)
{
- // if we announced a closing, do the close
- if (std::find(headers.begin(), headers.end(), std::string("Connection: close")) != headers.end())
+ std::vector<std::string> messages;
+ if (ReadMessages(client, messages) == false)
break;
- headers.clear();
+
+ std::list<std::string> headers;
for (std::vector<std::string>::const_iterator m = messages.begin();
m != messages.end() && closeConnection == false; ++m) {
+ // if we announced a closing in previous response, do the close now
+ if (std::find(headers.begin(), headers.end(), std::string("Connection: close")) != headers.end())
+ {
+ closeConnection = true;
+ break;
+ }
+ headers.clear();
+
std::clog << ">>> REQUEST from " << client << " >>>" << std::endl << *m
<< std::endl << "<<<<<<<<<<<<<<<<" << std::endl;
std::string filename;
if (filesize > filestart)
{
data.Skip(filestart);
- std::ostringstream contentlength;
- contentlength << "Content-Length: " << (filesize - filestart);
- headers.push_back(contentlength.str());
+ // make sure to send content-range before conent-length
+ // as regression test for LP: #1445239
std::ostringstream contentrange;
contentrange << "Content-Range: bytes " << filestart << "-"
<< filesize - 1 << "/" << filesize;
headers.push_back(contentrange.str());
+ std::ostringstream contentlength;
+ contentlength << "Content-Length: " << (filesize - filestart);
+ headers.push_back(contentlength.str());
sendHead(client, 206, headers);
if (sendContent == true)
sendFile(client, headers, data);
}
else
{
- std::ostringstream contentrange;
- contentrange << "Content-Range: bytes */" << filesize;
- headers.push_back(contentrange.str());
+ if (_config->FindB("aptwebserver::support::content-range", true) == true)
+ {
+ std::ostringstream contentrange;
+ contentrange << "Content-Range: bytes */" << filesize;
+ headers.push_back(contentrange.str());
+ }
sendError(client, 416, *m, sendContent, "", headers);
break;
}
else
sendError(client, 404, *m, sendContent, "", headers);
}
+
+ // if we announced a closing in the last response, do the close now
+ if (std::find(headers.begin(), headers.end(), std::string("Connection: close")) != headers.end())
+ closeConnection = true;
+
+ if (_error->PendingError() == true)
+ break;
_error->DumpErrors(std::cerr);
- messages.clear();
}
+ _error->DumpErrors(std::cerr);
close(client);
std::clog << "CLOSE client " << client << std::endl;
return NULL;
CommandLine::Args Args[] = {
{0, "port", "aptwebserver::port", CommandLine::HasArg},
{0, "request-absolute", "aptwebserver::request::absolute", CommandLine::HasArg},
+ {0, "authorization", "aptwebserver::authorization", CommandLine::HasArg},
+ {0, "proxy-authorization", "aptwebserver::proxy-authorization", CommandLine::HasArg},
{'c',"config-file",0,CommandLine::ConfigFile},
{'o',"option",0,CommandLine::ArbItem},
{0,0,0,0}
std::clog << "Serving ANY file on port: " << port << std::endl;
- int const slaves = _config->FindB("aptwebserver::slaves", SOMAXCONN);
+ int const slaves = _config->FindI("aptwebserver::slaves", SOMAXCONN);
+ std::cerr << "SLAVES: " << slaves << std::endl;
listen(sock, slaves);
/*}}}*/