#include <apt-pkg/strutl.h>
#include <apt-pkg/sptr.h>
#include <apt-pkg/pkgsystem.h>
+#include <apt-pkg/macros.h>
#include <apt-pkg/tagfile.h>
#include <unistd.h>
#include <errno.h>
#include <stdio.h>
-#include <system.h>
/*}}}*/
typedef vector<pkgIndexFile *>::iterator FileIterator;
+template <typename Iter> std::vector<Iter*> pkgCacheGenerator::Dynamic<Iter>::toReMap;
// CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
// ---------------------------------------------------------------------
// Starting header
*Cache.HeaderP = pkgCache::Header();
- Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
- Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
+ map_ptrloc const idxVerSysName = WriteStringInMap(_system->VS->Label);
+ Cache.HeaderP->VerSysName = idxVerSysName;
+ map_ptrloc const idxArchitecture = WriteStringInMap(_config->Find("APT::Architecture"));
+ Cache.HeaderP->Architecture = idxArchitecture;
+ if (unlikely(idxVerSysName == 0 || idxArchitecture == 0))
+ return;
Cache.ReMap();
}
else
Map.Sync(0,sizeof(pkgCache::Header));
}
/*}}}*/
+void pkgCacheGenerator::ReMap(void const * const oldMap, void const * const newMap) {/*{{{*/
+ if (oldMap == newMap)
+ return;
+
+ Cache.ReMap(false);
+
+ CurrentFile += (pkgCache::PackageFile*) newMap - (pkgCache::PackageFile*) oldMap;
+
+ for (size_t i = 0; i < _count(UniqHash); ++i)
+ if (UniqHash[i] != 0)
+ UniqHash[i] += (pkgCache::StringItem*) newMap - (pkgCache::StringItem*) oldMap;
+
+ for (std::vector<pkgCache::GrpIterator*>::const_iterator i = Dynamic<pkgCache::GrpIterator>::toReMap.begin();
+ i != Dynamic<pkgCache::GrpIterator>::toReMap.end(); ++i)
+ (*i)->ReMap(oldMap, newMap);
+ for (std::vector<pkgCache::PkgIterator*>::const_iterator i = Dynamic<pkgCache::PkgIterator>::toReMap.begin();
+ i != Dynamic<pkgCache::PkgIterator>::toReMap.end(); ++i)
+ (*i)->ReMap(oldMap, newMap);
+ for (std::vector<pkgCache::VerIterator*>::const_iterator i = Dynamic<pkgCache::VerIterator>::toReMap.begin();
+ i != Dynamic<pkgCache::VerIterator>::toReMap.end(); ++i)
+ (*i)->ReMap(oldMap, newMap);
+ for (std::vector<pkgCache::DepIterator*>::const_iterator i = Dynamic<pkgCache::DepIterator>::toReMap.begin();
+ i != Dynamic<pkgCache::DepIterator>::toReMap.end(); ++i)
+ (*i)->ReMap(oldMap, newMap);
+ for (std::vector<pkgCache::DescIterator*>::const_iterator i = Dynamic<pkgCache::DescIterator>::toReMap.begin();
+ i != Dynamic<pkgCache::DescIterator>::toReMap.end(); ++i)
+ (*i)->ReMap(oldMap, newMap);
+ for (std::vector<pkgCache::PrvIterator*>::const_iterator i = Dynamic<pkgCache::PrvIterator>::toReMap.begin();
+ i != Dynamic<pkgCache::PrvIterator>::toReMap.end(); ++i)
+ (*i)->ReMap(oldMap, newMap);
+ for (std::vector<pkgCache::PkgFileIterator*>::const_iterator i = Dynamic<pkgCache::PkgFileIterator>::toReMap.begin();
+ i != Dynamic<pkgCache::PkgFileIterator>::toReMap.end(); ++i)
+ (*i)->ReMap(oldMap, newMap);
+} /*}}}*/
+// CacheGenerator::WriteStringInMap /*{{{*/
+map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String,
+ const unsigned long &Len) {
+ void const * const oldMap = Map.Data();
+ map_ptrloc const index = Map.WriteString(String, Len);
+ if (index != 0)
+ ReMap(oldMap, Map.Data());
+ return index;
+}
+ /*}}}*/
+// CacheGenerator::WriteStringInMap /*{{{*/
+map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String) {
+ void const * const oldMap = Map.Data();
+ map_ptrloc const index = Map.WriteString(String);
+ if (index != 0)
+ ReMap(oldMap, Map.Data());
+ return index;
+}
+ /*}}}*/
+map_ptrloc pkgCacheGenerator::AllocateInMap(const unsigned long &size) {/*{{{*/
+ void const * const oldMap = Map.Data();
+ map_ptrloc const index = Map.Allocate(size);
+ if (index != 0)
+ ReMap(oldMap, Map.Data());
+ return index;
+}
+ /*}}}*/
// CacheGenerator::MergeList - Merge the package list /*{{{*/
// ---------------------------------------------------------------------
/* This provides the generation of the entries in the cache. Each loop
std::vector<string> genArch;
if (List.ArchitectureAll() == true) {
genArch = APT::Configuration::getArchitectures();
- genArch.push_back("all");
+ if (genArch.size() != 1)
+ genArch.push_back("all");
} else
genArch.push_back(List.Architecture());
{
// Get a pointer to the package structure
pkgCache::PkgIterator Pkg;
+ Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
if (NewPackage(Pkg, PackageName, *arch) == false)
return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
Counter++;
// we first process the package, then the descriptions
// (this has the bonus that we get MMap error when we run out
// of MMap space)
- if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
+ pkgCache::VerIterator Ver(Cache);
+ Dynamic<pkgCache::VerIterator> DynVer(Ver);
+ if (List.UsePackage(Pkg, Ver) == false)
return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
PackageName.c_str());
// Find the right version to write the description
MD5SumValue CurMd5 = List.Description_md5();
- pkgCache::VerIterator Ver = Pkg.VersionList();
- map_ptrloc *LastVer = &Pkg->VersionList;
+ Ver = Pkg.VersionList();
- for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
+ for (; Ver.end() == false; ++Ver)
{
pkgCache::DescIterator Desc = Ver.DescriptionList();
+ Dynamic<pkgCache::DescIterator> DynDesc(Desc);
map_ptrloc *LastDesc = &Ver->DescriptionList;
bool duplicate=false;
if (MD5SumValue(Desc.md5()) == CurMd5)
{
// Add new description
- *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
+ void const * const oldMap = Map.Data();
+ map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
+ if (oldMap != Map.Data())
+ LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
+ *LastDesc = descindex;
Desc->ParentPkg = Pkg.Index();
if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
}
pkgCache::VerIterator Ver = Pkg.VersionList();
+ Dynamic<pkgCache::VerIterator> DynVer(Ver);
map_ptrloc *LastVer = &Pkg->VersionList;
+ void const * oldMap = Map.Data();
int Res = 1;
+ unsigned long const Hash = List.VersionHash();
for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
{
Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
- if (Res >= 0)
+ // Version is higher as current version - insert here
+ if (Res > 0)
+ break;
+ // Versionstrings are equal - is hash also equal?
+ if (Res == 0 && Ver->Hash == Hash)
break;
+ // proceed with the next till we have either the right
+ // or we found another version (which will be lower)
}
-
- /* We already have a version for this item, record that we
- saw it */
- unsigned long Hash = List.VersionHash();
- if (Res == 0 && Ver->Hash == Hash)
+
+ /* We already have a version for this item, record that we saw it */
+ if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
{
if (List.UsePackage(Pkg,Ver) == false)
return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
}
continue;
- }
-
- // Skip to the end of the same version set.
- if (Res == 0)
- {
- for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
- {
- Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
- if (Res != 0)
- break;
- }
}
// Add a new version
- *LastVer = NewVersion(Ver,Version,*LastVer);
+ map_ptrloc const verindex = NewVersion(Ver,Version,*LastVer);
+ if (verindex == 0 && _error->PendingError())
+ return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
+ PackageName.c_str(), 1);
+
+ if (oldMap != Map.Data())
+ LastVer += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
+ *LastVer = verindex;
Ver->ParentPkg = Pkg.Index();
Ver->Hash = Hash;
- if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
- return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
- PackageName.c_str());
+ if (List.NewVersion(Ver) == false)
+ return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
+ PackageName.c_str(), 2);
if (List.UsePackage(Pkg,Ver) == false)
return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
PackageName.c_str());
if (NewFileVer(Ver,List) == false)
- return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
- PackageName.c_str());
+ return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
+ PackageName.c_str(), 3);
// Read only a single record and return
if (OutVer != 0)
/* Record the Description data. Description data always exist in
Packages and Translation-* files. */
pkgCache::DescIterator Desc = Ver.DescriptionList();
+ Dynamic<pkgCache::DescIterator> DynDesc(Desc);
map_ptrloc *LastDesc = &Ver->DescriptionList;
-
+
// Skip to the end of description set
for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
// Add new description
- *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
+ oldMap = Map.Data();
+ map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
+ if (oldMap != Map.Data())
+ LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
+ *LastDesc = descindex;
Desc->ParentPkg = Pkg.Index();
if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
continue;
pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
+ Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
if (Pkg.end() == true)
return _error->Error(_("Error occurred while processing %s (FindPkg)"),
PackageName.c_str());
unsigned long Hash = List.VersionHash();
pkgCache::VerIterator Ver = Pkg.VersionList();
+ Dynamic<pkgCache::VerIterator> DynVer(Ver);
for (; Ver.end() == false; Ver++)
{
if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
// CacheGenerator::NewGroup - Add a new group /*{{{*/
// ---------------------------------------------------------------------
/* This creates a new group structure and adds it to the hash table */
-bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
- Grp = Cache.FindGrp(Name);
- if (Grp.end() == false)
- return true;
-
- // Get a structure
- unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
- if (unlikely(Group == 0))
- return false;
+bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
+{
+ Grp = Cache.FindGrp(Name);
+ if (Grp.end() == false)
+ return true;
- Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
- Grp->Name = Map.WriteString(Name);
- if (unlikely(Grp->Name == 0))
- return false;
+ // Get a structure
+ map_ptrloc const Group = AllocateInMap(sizeof(pkgCache::Group));
+ if (unlikely(Group == 0))
+ return false;
- // Insert it into the hash table
- unsigned long const Hash = Cache.Hash(Name);
- Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
- Cache.HeaderP->GrpHashTable[Hash] = Group;
+ Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
+ map_ptrloc const idxName = WriteStringInMap(Name);
+ if (unlikely(idxName == 0))
+ return false;
+ Grp->Name = idxName;
- Cache.HeaderP->GroupCount++;
+ // Insert it into the hash table
+ unsigned long const Hash = Cache.Hash(Name);
+ Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
+ Cache.HeaderP->GrpHashTable[Hash] = Group;
- return true;
+ Grp->ID = Cache.HeaderP->GroupCount++;
+ return true;
}
/*}}}*/
// CacheGenerator::NewPackage - Add a new package /*{{{*/
bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
const string &Arch) {
pkgCache::GrpIterator Grp;
+ Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
if (unlikely(NewGroup(Grp, Name) == false))
return false;
return true;
// Get a structure
- unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
+ map_ptrloc const Package = AllocateInMap(sizeof(pkgCache::Package));
if (unlikely(Package == 0))
return false;
Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
- // Insert it into the hash table
- unsigned long const Hash = Cache.Hash(Name);
- Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
- Cache.HeaderP->PkgHashTable[Hash] = Package;
-
- // remember the packages in the group
- Grp->FirstPackage = Package;
- if (Grp->LastPackage == 0)
- Grp->LastPackage = Package;
+ // Insert the package into our package list
+ if (Grp->FirstPackage == 0) // the group is new
+ {
+ // Insert it into the hash table
+ unsigned long const Hash = Cache.Hash(Name);
+ Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
+ Cache.HeaderP->PkgHashTable[Hash] = Package;
+ Grp->FirstPackage = Package;
+ }
+ else // Group the Packages together
+ {
+ // this package is the new last package
+ pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
+ Pkg->NextPackage = LastPkg->NextPackage;
+ LastPkg->NextPackage = Package;
+ }
+ Grp->LastPackage = Package;
// Set the name, arch and the ID
Pkg->Name = Grp->Name;
Pkg->Group = Grp.Index();
- Pkg->Arch = WriteUniqString(Arch.c_str());
- if (unlikely(Pkg->Arch == 0))
+ map_ptrloc const idxArch = WriteUniqString(Arch.c_str());
+ if (unlikely(idxArch == 0))
return false;
+ Pkg->Arch = idxArch;
Pkg->ID = Cache.HeaderP->PackageCount++;
return true;
return true;
// Get a structure
- unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
+ map_ptrloc const VerFile = AllocateInMap(sizeof(pkgCache::VerFile));
if (VerFile == 0)
return 0;
unsigned long Next)
{
// Get a structure
- unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
+ map_ptrloc const Version = AllocateInMap(sizeof(pkgCache::Version));
if (Version == 0)
return 0;
Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
Ver->NextVer = Next;
Ver->ID = Cache.HeaderP->VersionCount++;
- Ver->VerStr = Map.WriteString(VerStr);
- if (Ver->VerStr == 0)
+ map_ptrloc const idxVerStr = WriteStringInMap(VerStr);
+ if (unlikely(idxVerStr == 0))
return 0;
+ Ver->VerStr = idxVerStr;
return Version;
}
return true;
// Get a structure
- unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
+ map_ptrloc const DescFile = AllocateInMap(sizeof(pkgCache::DescFile));
if (DescFile == 0)
return false;
map_ptrloc Next)
{
// Get a structure
- map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
+ map_ptrloc const Description = AllocateInMap(sizeof(pkgCache::Description));
if (Description == 0)
return 0;
Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
Desc->NextDesc = Next;
Desc->ID = Cache.HeaderP->DescriptionCount++;
- Desc->language_code = Map.WriteString(Lang);
- Desc->md5sum = Map.WriteString(md5sum.Value());
- if (Desc->language_code == 0 || Desc->md5sum == 0)
+ map_ptrloc const idxlanguage_code = WriteStringInMap(Lang);
+ map_ptrloc const idxmd5sum = WriteStringInMap(md5sum.Value());
+ if (unlikely(idxlanguage_code == 0 || idxmd5sum == 0))
return 0;
+ Desc->language_code = idxlanguage_code;
+ Desc->md5sum = idxmd5sum;
return Description;
}
// CacheGenerator::FinishCache - do various finish operations /*{{{*/
// ---------------------------------------------------------------------
/* This prepares the Cache for delivery */
-bool pkgCacheGenerator::FinishCache(OpProgress &Progress) {
- // FIXME: add progress reporting for this operation
- // Do we have different architectures in your groups ?
- vector<string> archs = APT::Configuration::getArchitectures();
- if (archs.size() > 1) {
- // Create Conflicts in between the group
- for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++) {
- string const PkgName = G.Name();
- for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P)) {
- if (strcmp(P.Arch(),"all") == 0)
- continue;
- pkgCache::PkgIterator allPkg;
- for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++) {
- string const Arch = V.Arch(true);
- map_ptrloc *OldDepLast = NULL;
- /* MultiArch handling introduces a lot of implicit Dependencies:
- - MultiArch: same → Co-Installable if they have the same version
- - Architecture: all → Need to be Co-Installable for internal reasons
- - All others conflict with all other group members */
- bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
- V->MultiArch == pkgCache::Version::Same);
- if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
- allPkg = G.FindPkg("all");
- for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A) {
- if (*A == Arch)
- continue;
- /* We allow only one installed arch at the time
- per group, therefore each group member conflicts
- with all other group members */
- pkgCache::PkgIterator D = G.FindPkg(*A);
- if (D.end() == true)
- continue;
- if (coInstall == true) {
- // Replaces: ${self}:other ( << ${binary:Version})
- NewDepends(D, V, V.VerStr(),
- pkgCache::Dep::Less, pkgCache::Dep::Replaces,
- OldDepLast);
- // Breaks: ${self}:other (!= ${binary:Version})
- NewDepends(D, V, V.VerStr(),
- pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
- OldDepLast);
- NewDepends(D, V, V.VerStr(),
- pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
- OldDepLast);
- if (V->MultiArch == pkgCache::Version::All) {
- // Depend on ${self}:all which does depend on nothing
- NewDepends(allPkg, V, V.VerStr(),
- pkgCache::Dep::Equals, pkgCache::Dep::Depends,
- OldDepLast);
- }
- } else {
- // Conflicts: ${self}:other
- NewDepends(D, V, "",
- pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
- OldDepLast);
- }
- }
- }
+bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
+{
+ // FIXME: add progress reporting for this operation
+ // Do we have different architectures in your groups ?
+ vector<string> archs = APT::Configuration::getArchitectures();
+ if (archs.size() > 1)
+ {
+ // Create Conflicts in between the group
+ pkgCache::GrpIterator G = GetCache().GrpBegin();
+ Dynamic<pkgCache::GrpIterator> DynG(G);
+ for (; G.end() != true; G++)
+ {
+ string const PkgName = G.Name();
+ pkgCache::PkgIterator P = G.PackageList();
+ Dynamic<pkgCache::PkgIterator> DynP(P);
+ for (; P.end() != true; P = G.NextPkg(P))
+ {
+ pkgCache::PkgIterator allPkg;
+ Dynamic<pkgCache::PkgIterator> DynallPkg(allPkg);
+ pkgCache::VerIterator V = P.VersionList();
+ Dynamic<pkgCache::VerIterator> DynV(V);
+ for (; V.end() != true; V++)
+ {
+ char const * const Arch = P.Arch();
+ map_ptrloc *OldDepLast = NULL;
+ /* MultiArch handling introduces a lot of implicit Dependencies:
+ - MultiArch: same → Co-Installable if they have the same version
+ - Architecture: all → Need to be Co-Installable for internal reasons
+ - All others conflict with all other group members */
+ bool const coInstall = ((V->MultiArch == pkgCache::Version::All && strcmp(Arch, "all") != 0) ||
+ V->MultiArch == pkgCache::Version::Same);
+ if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
+ allPkg = G.FindPkg("all");
+ for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
+ {
+ if (*A == Arch)
+ continue;
+ /* We allow only one installed arch at the time
+ per group, therefore each group member conflicts
+ with all other group members */
+ pkgCache::PkgIterator D = G.FindPkg(*A);
+ Dynamic<pkgCache::PkgIterator> DynD(D);
+ if (D.end() == true)
+ continue;
+ if (coInstall == true)
+ {
+ // Replaces: ${self}:other ( << ${binary:Version})
+ NewDepends(D, V, V.VerStr(),
+ pkgCache::Dep::Less, pkgCache::Dep::Replaces,
+ OldDepLast);
+ // Breaks: ${self}:other (!= ${binary:Version})
+ NewDepends(D, V, V.VerStr(),
+ pkgCache::Dep::NotEquals, pkgCache::Dep::DpkgBreaks,
+ OldDepLast);
+ if (V->MultiArch == pkgCache::Version::All)
+ {
+ // Depend on ${self}:all which does depend on nothing
+ NewDepends(allPkg, V, V.VerStr(),
+ pkgCache::Dep::Equals, pkgCache::Dep::Depends,
+ OldDepLast);
+ }
+ } else {
+ // Conflicts: ${self}:other
+ if (strcmp(Arch, "all") == 0) {
+ NewDepends(D, V, V.VerStr(),
+ pkgCache::Dep::NotEquals, pkgCache::Dep::Conflicts,
+ OldDepLast);
+ } else {
+ NewDepends(D, V, "",
+ pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
+ OldDepLast);
}
- }
- }
- return true;
+ }
+ }
+ }
+ }
+ }
+ }
+ return true;
}
/*}}}*/
// CacheGenerator::NewDepends - Create a dependency element /*{{{*/
unsigned int const &Type,
map_ptrloc *OldDepLast)
{
+ void const * const oldMap = Map.Data();
// Get a structure
- unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
+ map_ptrloc const Dependency = AllocateInMap(sizeof(pkgCache::Dependency));
if (unlikely(Dependency == 0))
return false;
// Fill it in
pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
+ Dynamic<pkgCache::DepIterator> DynDep(Dep);
Dep->ParentVer = Ver.Index();
Dep->Type = Type;
Dep->CompareOp = Op;
/* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
if (I->Version != 0 && I.TargetVer() == Version)
Dep->Version = I->Version;*/
- if (Dep->Version == 0)
- if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
+ if (Dep->Version == 0) {
+ map_ptrloc const index = WriteStringInMap(Version);
+ if (unlikely(index == 0))
return false;
+ Dep->Version = index;
+ }
}
// Link it to the package
OldDepLast = &Ver->DependsList;
for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
OldDepLast = &D->NextDepends;
- }
+ } else if (oldMap != Map.Data())
+ OldDepLast += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
Dep->NextDepends = *OldDepLast;
*OldDepLast = Dep.Index();
// ---------------------------------------------------------------------
/* This creates a Group and the Package to link this dependency to if
needed and handles also the caching of the old endpoint */
-bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
+bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator &Ver,
const string &PackageName,
const string &Arch,
const string &Version,
unsigned int Type)
{
pkgCache::GrpIterator Grp;
+ Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
return false;
// Locate the target package
pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
+ Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
if (Pkg.end() == true) {
if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
return false;
// ListParser::NewProvides - Create a Provides element /*{{{*/
// ---------------------------------------------------------------------
/* */
-bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
+bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator &Ver,
const string &PkgName,
const string &PkgArch,
const string &Version)
return true;
// Get a structure
- unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
+ map_ptrloc const Provides = Owner->AllocateInMap(sizeof(pkgCache::Provides));
if (unlikely(Provides == 0))
return false;
Cache.HeaderP->ProvidesCount++;
// Fill it in
pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
+ Dynamic<pkgCache::PrvIterator> DynPrv(Prv);
Prv->Version = Ver.Index();
Prv->NextPkgProv = Ver->ProvidesList;
Ver->ProvidesList = Prv.Index();
// Locate the target package
pkgCache::PkgIterator Pkg;
+ Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
return false;
unsigned long Flags)
{
// Get some space for the structure
- CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
- if (CurrentFile == Cache.PkgFileP)
+ map_ptrloc const idxFile = AllocateInMap(sizeof(*CurrentFile));
+ if (unlikely(idxFile == 0))
return false;
-
+ CurrentFile = Cache.PkgFileP + idxFile;
+
// Fill it in
- CurrentFile->FileName = Map.WriteString(File);
- CurrentFile->Site = WriteUniqString(Site);
+ map_ptrloc const idxFileName = WriteStringInMap(File);
+ map_ptrloc const idxSite = WriteUniqString(Site);
+ if (unlikely(idxFileName == 0 || idxSite == 0))
+ return false;
+ CurrentFile->FileName = idxFileName;
+ CurrentFile->Site = idxSite;
CurrentFile->NextFile = Cache.HeaderP->FileList;
CurrentFile->Flags = Flags;
CurrentFile->ID = Cache.HeaderP->PackageFileCount;
- CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
+ map_ptrloc const idxIndexType = WriteUniqString(Index.GetType()->Label);
+ if (unlikely(idxIndexType == 0))
+ return false;
+ CurrentFile->IndexType = idxIndexType;
PkgFileName = File;
Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
Cache.HeaderP->PackageFileCount++;
- if (CurrentFile->FileName == 0)
- return false;
-
if (Progress != 0)
Progress->SubProgress(Index.Size());
return true;
}
// Get a structure
- unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
+ void const * const oldMap = Map.Data();
+ map_ptrloc const Item = AllocateInMap(sizeof(pkgCache::StringItem));
if (Item == 0)
return 0;
+ map_ptrloc const idxString = WriteStringInMap(S,Size);
+ if (unlikely(idxString == 0))
+ return 0;
+ if (oldMap != Map.Data()) {
+ Last += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
+ I += (pkgCache::StringItem*) Map.Data() - (pkgCache::StringItem*) oldMap;
+ }
+ *Last = Item;
+
// Fill in the structure
pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
ItemP->NextItem = I - Cache.StringItemP;
- *Last = Item;
- ItemP->String = Map.WriteString(S,Size);
- if (ItemP->String == 0)
- return 0;
-
+ ItemP->String = idxString;
+
Bucket = ItemP;
return ItemP->String;
}
static bool CheckValidity(const string &CacheFile, FileIterator Start,
FileIterator End,MMap **OutMap = 0)
{
+ bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
// No file, certainly invalid
if (CacheFile.empty() == true || FileExists(CacheFile) == false)
+ {
+ if (Debug == true)
+ std::clog << "CacheFile doesn't exist" << std::endl;
return false;
-
+ }
+
// Map it
FileFd CacheF(CacheFile,FileFd::ReadOnly);
SPtr<MMap> Map = new MMap(CacheF,0);
pkgCache Cache(Map);
if (_error->PendingError() == true || Map->Size() == 0)
{
+ if (Debug == true)
+ std::clog << "Errors are pending or Map is empty()" << std::endl;
_error->Discard();
return false;
}
SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
for (; Start != End; Start++)
- {
+ {
+ if (Debug == true)
+ std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
if ((*Start)->HasPackages() == false)
+ {
+ if (Debug == true)
+ std::clog << "Has NO packages" << std::endl;
continue;
+ }
if ((*Start)->Exists() == false)
{
_error->WarningE("stat",_("Couldn't stat source package list %s"),
(*Start)->Describe().c_str());
#endif
+ if (Debug == true)
+ std::clog << "file doesn't exist" << std::endl;
continue;
}
// FindInCache is also expected to do an IMS check.
pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
if (File.end() == true)
+ {
+ if (Debug == true)
+ std::clog << "FindInCache returned end-Pointer" << std::endl;
return false;
+ }
Visited[File->ID] = true;
+ if (Debug == true)
+ std::clog << "with ID " << File->ID << " is valid" << std::endl;
}
for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
if (Visited[I] == false)
+ {
+ if (Debug == true)
+ std::clog << "File with ID" << I << " wasn't visited" << std::endl;
return false;
+ }
if (_error->PendingError() == true)
{
+ if (Debug == true)
+ {
+ std::clog << "Validity failed because of pending errors:" << std::endl;
+ _error->DumpErrors();
+ }
_error->Discard();
return false;
}
// ---------------------------------------------------------------------
/* */
static bool BuildCache(pkgCacheGenerator &Gen,
- OpProgress &Progress,
+ OpProgress *Progress,
unsigned long &CurrentSize,unsigned long TotalSize,
FileIterator Start, FileIterator End)
{
}
unsigned long Size = (*I)->Size();
- Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
+ if (Progress != NULL)
+ Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
CurrentSize += Size;
if ((*I)->Merge(Gen,Progress) == false)
if (Gen.HasFileDeps() == true)
{
- Progress.Done();
+ if (Progress != NULL)
+ Progress->Done();
TotalSize = ComputeSize(Start, End);
CurrentSize = 0;
for (I = Start; I != End; I++)
{
unsigned long Size = (*I)->Size();
- Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
+ if (Progress != NULL)
+ Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
CurrentSize += Size;
if ((*I)->MergeFileProvides(Gen,Progress) == false)
return false;
return true;
}
/*}}}*/
-// MakeStatusCache - Construct the status cache /*{{{*/
+// CacheGenerator::CreateDynamicMMap - load an mmap with configuration options /*{{{*/
+DynamicMMap* pkgCacheGenerator::CreateDynamicMMap(FileFd *CacheF, unsigned long Flags) {
+ unsigned long const MapStart = _config->FindI("APT::Cache-Start", 24*1024*1024);
+ unsigned long const MapGrow = _config->FindI("APT::Cache-Grow", 1*1024*1024);
+ unsigned long const MapLimit = _config->FindI("APT::Cache-Limit", 0);
+ Flags |= MMap::Moveable;
+ if (_config->FindB("APT::Cache-Fallback", false) == true)
+ Flags |= MMap::Fallback;
+ if (CacheF != NULL)
+ return new DynamicMMap(*CacheF, Flags, MapStart, MapGrow, MapLimit);
+ else
+ return new DynamicMMap(Flags, MapStart, MapGrow, MapLimit);
+}
+ /*}}}*/
+// CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
// ---------------------------------------------------------------------
/* This makes sure that the status cache (the cache that has all
index files from the sources list and all local ones) is ready
the cache will be stored there. This is pretty much mandetory if you
are using AllowMem. AllowMem lets the function be run as non-root
where it builds the cache 'fast' into a memory buffer. */
-bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
+__deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
+ MMap **OutMap, bool AllowMem)
+ { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
+bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
MMap **OutMap,bool AllowMem)
{
- unsigned long MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
+ bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
vector<pkgIndexFile *> Files;
for (vector<metaIndex *>::const_iterator i = List.begin();
Files.push_back (*j);
}
- unsigned long EndOfSource = Files.size();
+ unsigned long const EndOfSource = Files.size();
if (_system->AddStatusFiles(Files) == false)
return false;
// Decide if we can write to the files..
- string CacheFile = _config->FindFile("Dir::Cache::pkgcache");
- string SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
-
+ string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
+ string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
+
+ // ensure the cache directory exists
+ if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
+ {
+ string dir = _config->FindDir("Dir::Cache");
+ size_t const len = dir.size();
+ if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
+ dir = dir.substr(0, len - 5);
+ if (CacheFile.empty() == false)
+ CreateDirectory(dir, flNotFile(CacheFile));
+ if (SrcCacheFile.empty() == false)
+ CreateDirectory(dir, flNotFile(SrcCacheFile));
+ }
+
// Decide if we can write to the cache
bool Writeable = false;
if (CacheFile.empty() == false)
else
if (SrcCacheFile.empty() == false)
Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
-
+ if (Debug == true)
+ std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
+
if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
-
- Progress.OverallProgress(0,1,1,_("Reading package lists"));
-
+
+ if (Progress != NULL)
+ Progress->OverallProgress(0,1,1,_("Reading package lists"));
+
// Cache is OK, Fin.
if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
{
- Progress.OverallProgress(1,1,1,_("Reading package lists"));
+ if (Progress != NULL)
+ Progress->OverallProgress(1,1,1,_("Reading package lists"));
+ if (Debug == true)
+ std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
return true;
}
+ else if (Debug == true)
+ std::clog << "pkgcache.bin is NOT valid" << std::endl;
/* At this point we know we need to reconstruct the package cache,
begin. */
if (Writeable == true && CacheFile.empty() == false)
{
unlink(CacheFile.c_str());
- CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
+ CacheF = new FileFd(CacheFile,FileFd::WriteAtomic);
fchmod(CacheF->Fd(),0644);
- Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
+ Map = CreateDynamicMMap(CacheF, MMap::Public);
if (_error->PendingError() == true)
return false;
+ if (Debug == true)
+ std::clog << "Open filebased MMap" << std::endl;
}
else
{
// Just build it in memory..
- Map = new DynamicMMap(0,MapSize);
+ Map = CreateDynamicMMap(NULL);
+ if (Debug == true)
+ std::clog << "Open memory Map (not filebased)" << std::endl;
}
// Lets try the source cache.
if (CheckValidity(SrcCacheFile,Files.begin(),
Files.begin()+EndOfSource) == true)
{
+ if (Debug == true)
+ std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
// Preload the map with the source cache
FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
- unsigned long alloc = Map->RawAllocate(SCacheF.Size());
+ unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
if ((alloc == 0 && _error->PendingError())
|| SCacheF.Read((unsigned char *)Map->Data() + alloc,
SCacheF.Size()) == false)
return false;
TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
-
+
// Build the status cache
- pkgCacheGenerator Gen(Map.Get(),&Progress);
+ pkgCacheGenerator Gen(Map.Get(),Progress);
if (_error->PendingError() == true)
return false;
if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
}
else
{
+ if (Debug == true)
+ std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
TotalSize = ComputeSize(Files.begin(),Files.end());
// Build the source cache
- pkgCacheGenerator Gen(Map.Get(),&Progress);
+ pkgCacheGenerator Gen(Map.Get(),Progress);
if (_error->PendingError() == true)
return false;
if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
// Write it back
if (Writeable == true && SrcCacheFile.empty() == false)
{
- FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
+ FileFd SCacheF(SrcCacheFile,FileFd::WriteAtomic);
if (_error->PendingError() == true)
return false;
// FIXME: move me to a better place
Gen.FinishCache(Progress);
}
+ if (Debug == true)
+ std::clog << "Caches are ready for shipping" << std::endl;
if (_error->PendingError() == true)
return false;
return true;
}
/*}}}*/
-// MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
+// CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
// ---------------------------------------------------------------------
/* */
-bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
+__deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
+ { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
+bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
{
- unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
vector<pkgIndexFile *> Files;
unsigned long EndOfSource = Files.size();
if (_system->AddStatusFiles(Files) == false)
return false;
-
- SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
+
+ SPtr<DynamicMMap> Map = CreateDynamicMMap(NULL);
unsigned long CurrentSize = 0;
unsigned long TotalSize = 0;
TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
// Build the status cache
- Progress.OverallProgress(0,1,1,_("Reading package lists"));
- pkgCacheGenerator Gen(Map.Get(),&Progress);
+ if (Progress != NULL)
+ Progress->OverallProgress(0,1,1,_("Reading package lists"));
+ pkgCacheGenerator Gen(Map.Get(),Progress);
if (_error->PendingError() == true)
return false;
if (BuildCache(Gen,Progress,CurrentSize,TotalSize,