]>
git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
6a9da4a9236db03aeabb03875ec2fa76dec9bf9e
1 // -*- mode: cpp; mode: fold -*-
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
6 Package Cache Generator - Generator for the cache structure.
8 This builds the cache structure from the abstract package list parser.
10 ##################################################################### */
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
27 #include <apt-pkg/tagfile.h>
38 typedef vector
<pkgIndexFile
*>::iterator FileIterator
;
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap
*pMap
,OpProgress
*Prog
) :
44 Map(*pMap
), Cache(pMap
,false), Progress(Prog
),
48 memset(UniqHash
,0,sizeof(UniqHash
));
50 if (_error
->PendingError() == true)
55 // Setup the map interface..
56 Cache
.HeaderP
= (pkgCache::Header
*)Map
.Data();
57 if (Map
.RawAllocate(sizeof(pkgCache::Header
)) == 0 && _error
->PendingError() == true)
60 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
63 *Cache
.HeaderP
= pkgCache::Header();
64 Cache
.HeaderP
->VerSysName
= WriteStringInMap(_system
->VS
->Label
);
65 Cache
.HeaderP
->Architecture
= WriteStringInMap(_config
->Find("APT::Architecture"));
70 // Map directly from the existing file
72 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
73 if (Cache
.VS
!= _system
->VS
)
75 _error
->Error(_("Cache has an incompatible versioning system"));
80 Cache
.HeaderP
->Dirty
= true;
81 Map
.Sync(0,sizeof(pkgCache::Header
));
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
90 if (_error
->PendingError() == true)
92 if (Map
.Sync() == false)
95 Cache
.HeaderP
->Dirty
= false;
96 Map
.Sync(0,sizeof(pkgCache::Header
));
99 // CacheGenerator::WriteStringInMap /*{{{*/
100 unsigned long pkgCacheGenerator::WriteStringInMap(const char *String
,
101 const unsigned long &Len
) {
102 return Map
.WriteString(String
, Len
);
105 // CacheGenerator::WriteStringInMap /*{{{*/
106 unsigned long pkgCacheGenerator::WriteStringInMap(const char *String
) {
107 return Map
.WriteString(String
);
110 unsigned long pkgCacheGenerator::AllocateInMap(const unsigned long &size
) {/*{{{*/
111 return Map
.Allocate(size
);
114 // CacheGenerator::MergeList - Merge the package list /*{{{*/
115 // ---------------------------------------------------------------------
116 /* This provides the generation of the entries in the cache. Each loop
117 goes through a single package record from the underlying parse engine. */
118 bool pkgCacheGenerator::MergeList(ListParser
&List
,
119 pkgCache::VerIterator
*OutVer
)
123 unsigned int Counter
= 0;
124 while (List
.Step() == true)
126 string
const PackageName
= List
.Package();
127 if (PackageName
.empty() == true)
130 /* As we handle Arch all packages as architecture bounded
131 we add all information to every (simulated) arch package */
132 std::vector
<string
> genArch
;
133 if (List
.ArchitectureAll() == true) {
134 genArch
= APT::Configuration::getArchitectures();
135 if (genArch
.size() != 1)
136 genArch
.push_back("all");
138 genArch
.push_back(List
.Architecture());
140 for (std::vector
<string
>::const_iterator arch
= genArch
.begin();
141 arch
!= genArch
.end(); ++arch
)
143 // Get a pointer to the package structure
144 pkgCache::PkgIterator Pkg
;
145 if (NewPackage(Pkg
, PackageName
, *arch
) == false)
146 return _error
->Error(_("Error occurred while processing %s (NewPackage)"),PackageName
.c_str());
148 if (Counter
% 100 == 0 && Progress
!= 0)
149 Progress
->Progress(List
.Offset());
151 /* Get a pointer to the version structure. We know the list is sorted
152 so we use that fact in the search. Insertion of new versions is
153 done with correct sorting */
154 string Version
= List
.Version();
155 if (Version
.empty() == true)
157 // we first process the package, then the descriptions
158 // (this has the bonus that we get MMap error when we run out
160 if (List
.UsePackage(Pkg
,pkgCache::VerIterator(Cache
)) == false)
161 return _error
->Error(_("Error occurred while processing %s (UsePackage1)"),
162 PackageName
.c_str());
164 // Find the right version to write the description
165 MD5SumValue CurMd5
= List
.Description_md5();
166 pkgCache::VerIterator Ver
= Pkg
.VersionList();
167 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
169 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
171 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
172 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
173 bool duplicate
=false;
175 // don't add a new description if we have one for the given
177 for ( ; Desc
.end() == false; Desc
++)
178 if (MD5SumValue(Desc
.md5()) == CurMd5
&&
179 Desc
.LanguageCode() == List
.DescriptionLanguage())
184 for (Desc
= Ver
.DescriptionList();
186 LastDesc
= &Desc
->NextDesc
, Desc
++)
188 if (MD5SumValue(Desc
.md5()) == CurMd5
)
190 // Add new description
191 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), CurMd5
, *LastDesc
);
192 Desc
->ParentPkg
= Pkg
.Index();
194 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
195 return _error
->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName
.c_str());
204 pkgCache::VerIterator Ver
= Pkg
.VersionList();
205 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
207 unsigned long const Hash
= List
.VersionHash();
208 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
210 Res
= Cache
.VS
->CmpVersion(Version
,Ver
.VerStr());
211 // Version is higher as current version - insert here
214 // Versionstrings are equal - is hash also equal?
215 if (Res
== 0 && Ver
->Hash
== Hash
)
217 // proceed with the next till we have either the right
218 // or we found another version (which will be lower)
221 /* We already have a version for this item, record that we saw it */
222 if (Res
== 0 && Ver
.end() == false && Ver
->Hash
== Hash
)
224 if (List
.UsePackage(Pkg
,Ver
) == false)
225 return _error
->Error(_("Error occurred while processing %s (UsePackage2)"),
226 PackageName
.c_str());
228 if (NewFileVer(Ver
,List
) == false)
229 return _error
->Error(_("Error occurred while processing %s (NewFileVer1)"),
230 PackageName
.c_str());
232 // Read only a single record and return
236 FoundFileDeps
|= List
.HasFileDeps();
244 *LastVer
= NewVersion(Ver
,Version
,*LastVer
);
245 Ver
->ParentPkg
= Pkg
.Index();
248 if ((*LastVer
== 0 && _error
->PendingError()) || List
.NewVersion(Ver
) == false)
249 return _error
->Error(_("Error occurred while processing %s (NewVersion1)"),
250 PackageName
.c_str());
252 if (List
.UsePackage(Pkg
,Ver
) == false)
253 return _error
->Error(_("Error occurred while processing %s (UsePackage3)"),
254 PackageName
.c_str());
256 if (NewFileVer(Ver
,List
) == false)
257 return _error
->Error(_("Error occurred while processing %s (NewVersion2)"),
258 PackageName
.c_str());
260 // Read only a single record and return
264 FoundFileDeps
|= List
.HasFileDeps();
268 /* Record the Description data. Description data always exist in
269 Packages and Translation-* files. */
270 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
271 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
273 // Skip to the end of description set
274 for (; Desc
.end() == false; LastDesc
= &Desc
->NextDesc
, Desc
++);
276 // Add new description
277 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), List
.Description_md5(), *LastDesc
);
278 Desc
->ParentPkg
= Pkg
.Index();
280 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
281 return _error
->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName
.c_str());
285 FoundFileDeps
|= List
.HasFileDeps();
287 if (Cache
.HeaderP
->PackageCount
>= (1ULL<<sizeof(Cache
.PkgP
->ID
)*8)-1)
288 return _error
->Error(_("Wow, you exceeded the number of package "
289 "names this APT is capable of."));
290 if (Cache
.HeaderP
->VersionCount
>= (1ULL<<(sizeof(Cache
.VerP
->ID
)*8))-1)
291 return _error
->Error(_("Wow, you exceeded the number of versions "
292 "this APT is capable of."));
293 if (Cache
.HeaderP
->DescriptionCount
>= (1ULL<<(sizeof(Cache
.DescP
->ID
)*8))-1)
294 return _error
->Error(_("Wow, you exceeded the number of descriptions "
295 "this APT is capable of."));
296 if (Cache
.HeaderP
->DependsCount
>= (1ULL<<(sizeof(Cache
.DepP
->ID
)*8))-1ULL)
297 return _error
->Error(_("Wow, you exceeded the number of dependencies "
298 "this APT is capable of."));
302 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
303 // ---------------------------------------------------------------------
304 /* If we found any file depends while parsing the main list we need to
305 resolve them. Since it is undesired to load the entire list of files
306 into the cache as virtual packages we do a two stage effort. MergeList
307 identifies the file depends and this creates Provdies for them by
308 re-parsing all the indexs. */
309 bool pkgCacheGenerator::MergeFileProvides(ListParser
&List
)
313 unsigned int Counter
= 0;
314 while (List
.Step() == true)
316 string PackageName
= List
.Package();
317 if (PackageName
.empty() == true)
319 string Version
= List
.Version();
320 if (Version
.empty() == true)
323 pkgCache::PkgIterator Pkg
= Cache
.FindPkg(PackageName
);
324 if (Pkg
.end() == true)
325 return _error
->Error(_("Error occurred while processing %s (FindPkg)"),
326 PackageName
.c_str());
328 if (Counter
% 100 == 0 && Progress
!= 0)
329 Progress
->Progress(List
.Offset());
331 unsigned long Hash
= List
.VersionHash();
332 pkgCache::VerIterator Ver
= Pkg
.VersionList();
333 for (; Ver
.end() == false; Ver
++)
335 if (Ver
->Hash
== Hash
&& Version
.c_str() == Ver
.VerStr())
337 if (List
.CollectFileProvides(Cache
,Ver
) == false)
338 return _error
->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName
.c_str());
343 if (Ver
.end() == true)
344 _error
->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName
.c_str(),Version
.c_str());
350 // CacheGenerator::NewGroup - Add a new group /*{{{*/
351 // ---------------------------------------------------------------------
352 /* This creates a new group structure and adds it to the hash table */
353 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator
&Grp
, const string
&Name
)
355 Grp
= Cache
.FindGrp(Name
);
356 if (Grp
.end() == false)
360 unsigned long const Group
= AllocateInMap(sizeof(pkgCache::Group
));
361 if (unlikely(Group
== 0))
364 Grp
= pkgCache::GrpIterator(Cache
, Cache
.GrpP
+ Group
);
365 Grp
->Name
= WriteStringInMap(Name
);
366 if (unlikely(Grp
->Name
== 0))
369 // Insert it into the hash table
370 unsigned long const Hash
= Cache
.Hash(Name
);
371 Grp
->Next
= Cache
.HeaderP
->GrpHashTable
[Hash
];
372 Cache
.HeaderP
->GrpHashTable
[Hash
] = Group
;
374 Grp
->ID
= Cache
.HeaderP
->GroupCount
++;
378 // CacheGenerator::NewPackage - Add a new package /*{{{*/
379 // ---------------------------------------------------------------------
380 /* This creates a new package structure and adds it to the hash table */
381 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator
&Pkg
,const string
&Name
,
382 const string
&Arch
) {
383 pkgCache::GrpIterator Grp
;
384 if (unlikely(NewGroup(Grp
, Name
) == false))
387 Pkg
= Grp
.FindPkg(Arch
);
388 if (Pkg
.end() == false)
392 unsigned long const Package
= AllocateInMap(sizeof(pkgCache::Package
));
393 if (unlikely(Package
== 0))
395 Pkg
= pkgCache::PkgIterator(Cache
,Cache
.PkgP
+ Package
);
397 // Insert the package into our package list
398 if (Grp
->FirstPackage
== 0) // the group is new
400 // Insert it into the hash table
401 unsigned long const Hash
= Cache
.Hash(Name
);
402 Pkg
->NextPackage
= Cache
.HeaderP
->PkgHashTable
[Hash
];
403 Cache
.HeaderP
->PkgHashTable
[Hash
] = Package
;
404 Grp
->FirstPackage
= Package
;
406 else // Group the Packages together
408 // this package is the new last package
409 pkgCache::PkgIterator
LastPkg(Cache
, Cache
.PkgP
+ Grp
->LastPackage
);
410 Pkg
->NextPackage
= LastPkg
->NextPackage
;
411 LastPkg
->NextPackage
= Package
;
413 Grp
->LastPackage
= Package
;
415 // Set the name, arch and the ID
416 Pkg
->Name
= Grp
->Name
;
417 Pkg
->Group
= Grp
.Index();
418 Pkg
->Arch
= WriteUniqString(Arch
.c_str());
419 if (unlikely(Pkg
->Arch
== 0))
421 Pkg
->ID
= Cache
.HeaderP
->PackageCount
++;
426 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
427 // ---------------------------------------------------------------------
429 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator
&Ver
,
432 if (CurrentFile
== 0)
436 unsigned long VerFile
= AllocateInMap(sizeof(pkgCache::VerFile
));
440 pkgCache::VerFileIterator
VF(Cache
,Cache
.VerFileP
+ VerFile
);
441 VF
->File
= CurrentFile
- Cache
.PkgFileP
;
443 // Link it to the end of the list
444 map_ptrloc
*Last
= &Ver
->FileList
;
445 for (pkgCache::VerFileIterator V
= Ver
.FileList(); V
.end() == false; V
++)
447 VF
->NextFile
= *Last
;
450 VF
->Offset
= List
.Offset();
451 VF
->Size
= List
.Size();
452 if (Cache
.HeaderP
->MaxVerFileSize
< VF
->Size
)
453 Cache
.HeaderP
->MaxVerFileSize
= VF
->Size
;
454 Cache
.HeaderP
->VerFileCount
++;
459 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
460 // ---------------------------------------------------------------------
461 /* This puts a version structure in the linked list */
462 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator
&Ver
,
463 const string
&VerStr
,
467 unsigned long Version
= AllocateInMap(sizeof(pkgCache::Version
));
472 Ver
= pkgCache::VerIterator(Cache
,Cache
.VerP
+ Version
);
474 Ver
->ID
= Cache
.HeaderP
->VersionCount
++;
475 Ver
->VerStr
= WriteStringInMap(VerStr
);
476 if (Ver
->VerStr
== 0)
482 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
483 // ---------------------------------------------------------------------
485 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator
&Desc
,
488 if (CurrentFile
== 0)
492 unsigned long DescFile
= AllocateInMap(sizeof(pkgCache::DescFile
));
496 pkgCache::DescFileIterator
DF(Cache
,Cache
.DescFileP
+ DescFile
);
497 DF
->File
= CurrentFile
- Cache
.PkgFileP
;
499 // Link it to the end of the list
500 map_ptrloc
*Last
= &Desc
->FileList
;
501 for (pkgCache::DescFileIterator D
= Desc
.FileList(); D
.end() == false; D
++)
504 DF
->NextFile
= *Last
;
507 DF
->Offset
= List
.Offset();
508 DF
->Size
= List
.Size();
509 if (Cache
.HeaderP
->MaxDescFileSize
< DF
->Size
)
510 Cache
.HeaderP
->MaxDescFileSize
= DF
->Size
;
511 Cache
.HeaderP
->DescFileCount
++;
516 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
517 // ---------------------------------------------------------------------
518 /* This puts a description structure in the linked list */
519 map_ptrloc
pkgCacheGenerator::NewDescription(pkgCache::DescIterator
&Desc
,
521 const MD5SumValue
&md5sum
,
525 map_ptrloc Description
= AllocateInMap(sizeof(pkgCache::Description
));
526 if (Description
== 0)
530 Desc
= pkgCache::DescIterator(Cache
,Cache
.DescP
+ Description
);
531 Desc
->NextDesc
= Next
;
532 Desc
->ID
= Cache
.HeaderP
->DescriptionCount
++;
533 Desc
->language_code
= WriteStringInMap(Lang
);
534 Desc
->md5sum
= WriteStringInMap(md5sum
.Value());
535 if (Desc
->language_code
== 0 || Desc
->md5sum
== 0)
541 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
542 // ---------------------------------------------------------------------
543 /* This prepares the Cache for delivery */
544 bool pkgCacheGenerator::FinishCache(OpProgress
*Progress
)
546 // FIXME: add progress reporting for this operation
547 // Do we have different architectures in your groups ?
548 vector
<string
> archs
= APT::Configuration::getArchitectures();
549 if (archs
.size() > 1)
551 // Create Conflicts in between the group
552 for (pkgCache::GrpIterator G
= GetCache().GrpBegin(); G
.end() != true; G
++)
554 string
const PkgName
= G
.Name();
555 for (pkgCache::PkgIterator P
= G
.PackageList(); P
.end() != true; P
= G
.NextPkg(P
))
557 if (strcmp(P
.Arch(),"all") == 0)
559 pkgCache::PkgIterator allPkg
;
560 for (pkgCache::VerIterator V
= P
.VersionList(); V
.end() != true; V
++)
562 string
const Arch
= V
.Arch(true);
563 map_ptrloc
*OldDepLast
= NULL
;
564 /* MultiArch handling introduces a lot of implicit Dependencies:
565 - MultiArch: same → Co-Installable if they have the same version
566 - Architecture: all → Need to be Co-Installable for internal reasons
567 - All others conflict with all other group members */
568 bool const coInstall
= (V
->MultiArch
== pkgCache::Version::All
||
569 V
->MultiArch
== pkgCache::Version::Same
);
570 if (V
->MultiArch
== pkgCache::Version::All
&& allPkg
.end() == true)
571 allPkg
= G
.FindPkg("all");
572 for (vector
<string
>::const_iterator A
= archs
.begin(); A
!= archs
.end(); ++A
)
576 /* We allow only one installed arch at the time
577 per group, therefore each group member conflicts
578 with all other group members */
579 pkgCache::PkgIterator D
= G
.FindPkg(*A
);
582 if (coInstall
== true)
584 // Replaces: ${self}:other ( << ${binary:Version})
585 NewDepends(D
, V
, V
.VerStr(),
586 pkgCache::Dep::Less
, pkgCache::Dep::Replaces
,
588 // Breaks: ${self}:other (!= ${binary:Version})
589 NewDepends(D
, V
, V
.VerStr(),
590 pkgCache::Dep::NotEquals
, pkgCache::Dep::DpkgBreaks
,
592 if (V
->MultiArch
== pkgCache::Version::All
)
594 // Depend on ${self}:all which does depend on nothing
595 NewDepends(allPkg
, V
, V
.VerStr(),
596 pkgCache::Dep::Equals
, pkgCache::Dep::Depends
,
600 // Conflicts: ${self}:other
602 pkgCache::Dep::NoOp
, pkgCache::Dep::Conflicts
,
613 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
614 // ---------------------------------------------------------------------
615 /* This creates a dependency element in the tree. It is linked to the
616 version and to the package that it is pointing to. */
617 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator
&Pkg
,
618 pkgCache::VerIterator
&Ver
,
619 string
const &Version
,
620 unsigned int const &Op
,
621 unsigned int const &Type
,
622 map_ptrloc
*OldDepLast
)
625 unsigned long const Dependency
= AllocateInMap(sizeof(pkgCache::Dependency
));
626 if (unlikely(Dependency
== 0))
630 pkgCache::DepIterator
Dep(Cache
,Cache
.DepP
+ Dependency
);
631 Dep
->ParentVer
= Ver
.Index();
634 Dep
->ID
= Cache
.HeaderP
->DependsCount
++;
636 // Probe the reverse dependency list for a version string that matches
637 if (Version
.empty() == false)
639 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
640 if (I->Version != 0 && I.TargetVer() == Version)
641 Dep->Version = I->Version;*/
642 if (Dep
->Version
== 0)
643 if (unlikely((Dep
->Version
= WriteStringInMap(Version
)) == 0))
647 // Link it to the package
648 Dep
->Package
= Pkg
.Index();
649 Dep
->NextRevDepends
= Pkg
->RevDepends
;
650 Pkg
->RevDepends
= Dep
.Index();
652 // Do we know where to link the Dependency to?
653 if (OldDepLast
== NULL
)
655 OldDepLast
= &Ver
->DependsList
;
656 for (pkgCache::DepIterator D
= Ver
.DependsList(); D
.end() == false; D
++)
657 OldDepLast
= &D
->NextDepends
;
660 Dep
->NextDepends
= *OldDepLast
;
661 *OldDepLast
= Dep
.Index();
662 OldDepLast
= &Dep
->NextDepends
;
667 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
668 // ---------------------------------------------------------------------
669 /* This creates a Group and the Package to link this dependency to if
670 needed and handles also the caching of the old endpoint */
671 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver
,
672 const string
&PackageName
,
674 const string
&Version
,
678 pkgCache::GrpIterator Grp
;
679 if (unlikely(Owner
->NewGroup(Grp
, PackageName
) == false))
682 // Locate the target package
683 pkgCache::PkgIterator Pkg
= Grp
.FindPkg(Arch
);
684 if (Pkg
.end() == true) {
685 if (unlikely(Owner
->NewPackage(Pkg
, PackageName
, Arch
) == false))
689 // Is it a file dependency?
690 if (unlikely(PackageName
[0] == '/'))
691 FoundFileDeps
= true;
693 /* Caching the old end point speeds up generation substantially */
694 if (OldDepVer
!= Ver
) {
699 return Owner
->NewDepends(Pkg
, Ver
, Version
, Op
, Type
, OldDepLast
);
702 // ListParser::NewProvides - Create a Provides element /*{{{*/
703 // ---------------------------------------------------------------------
705 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver
,
706 const string
&PkgName
,
707 const string
&PkgArch
,
708 const string
&Version
)
710 pkgCache
&Cache
= Owner
->Cache
;
712 // We do not add self referencing provides
713 if (Ver
.ParentPkg().Name() == PkgName
&& PkgArch
== Ver
.Arch(true))
717 unsigned long const Provides
= Owner
->AllocateInMap(sizeof(pkgCache::Provides
));
718 if (unlikely(Provides
== 0))
720 Cache
.HeaderP
->ProvidesCount
++;
723 pkgCache::PrvIterator
Prv(Cache
,Cache
.ProvideP
+ Provides
,Cache
.PkgP
);
724 Prv
->Version
= Ver
.Index();
725 Prv
->NextPkgProv
= Ver
->ProvidesList
;
726 Ver
->ProvidesList
= Prv
.Index();
727 if (Version
.empty() == false && unlikely((Prv
->ProvideVersion
= WriteString(Version
)) == 0))
730 // Locate the target package
731 pkgCache::PkgIterator Pkg
;
732 if (unlikely(Owner
->NewPackage(Pkg
,PkgName
, PkgArch
) == false))
735 // Link it to the package
736 Prv
->ParentPkg
= Pkg
.Index();
737 Prv
->NextProvides
= Pkg
->ProvidesList
;
738 Pkg
->ProvidesList
= Prv
.Index();
743 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
744 // ---------------------------------------------------------------------
745 /* This is used to select which file is to be associated with all newly
746 added versions. The caller is responsible for setting the IMS fields. */
747 bool pkgCacheGenerator::SelectFile(const string
&File
,const string
&Site
,
748 const pkgIndexFile
&Index
,
751 // Get some space for the structure
752 CurrentFile
= Cache
.PkgFileP
+ AllocateInMap(sizeof(*CurrentFile
));
753 if (CurrentFile
== Cache
.PkgFileP
)
757 CurrentFile
->FileName
= WriteStringInMap(File
);
758 CurrentFile
->Site
= WriteUniqString(Site
);
759 CurrentFile
->NextFile
= Cache
.HeaderP
->FileList
;
760 CurrentFile
->Flags
= Flags
;
761 CurrentFile
->ID
= Cache
.HeaderP
->PackageFileCount
;
762 CurrentFile
->IndexType
= WriteUniqString(Index
.GetType()->Label
);
764 Cache
.HeaderP
->FileList
= CurrentFile
- Cache
.PkgFileP
;
765 Cache
.HeaderP
->PackageFileCount
++;
767 if (CurrentFile
->FileName
== 0)
771 Progress
->SubProgress(Index
.Size());
775 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
776 // ---------------------------------------------------------------------
777 /* This is used to create handles to strings. Given the same text it
778 always returns the same number */
779 unsigned long pkgCacheGenerator::WriteUniqString(const char *S
,
782 /* We use a very small transient hash table here, this speeds up generation
783 by a fair amount on slower machines */
784 pkgCache::StringItem
*&Bucket
= UniqHash
[(S
[0]*5 + S
[1]) % _count(UniqHash
)];
786 stringcmp(S
,S
+Size
,Cache
.StrP
+ Bucket
->String
) == 0)
787 return Bucket
->String
;
789 // Search for an insertion point
790 pkgCache::StringItem
*I
= Cache
.StringItemP
+ Cache
.HeaderP
->StringList
;
792 map_ptrloc
*Last
= &Cache
.HeaderP
->StringList
;
793 for (; I
!= Cache
.StringItemP
; Last
= &I
->NextItem
,
794 I
= Cache
.StringItemP
+ I
->NextItem
)
796 Res
= stringcmp(S
,S
+Size
,Cache
.StrP
+ I
->String
);
809 unsigned long Item
= AllocateInMap(sizeof(pkgCache::StringItem
));
813 // Fill in the structure
814 pkgCache::StringItem
*ItemP
= Cache
.StringItemP
+ Item
;
815 ItemP
->NextItem
= I
- Cache
.StringItemP
;
817 ItemP
->String
= WriteStringInMap(S
,Size
);
818 if (ItemP
->String
== 0)
822 return ItemP
->String
;
825 // CheckValidity - Check that a cache is up-to-date /*{{{*/
826 // ---------------------------------------------------------------------
827 /* This just verifies that each file in the list of index files exists,
828 has matching attributes with the cache and the cache does not have
830 static bool CheckValidity(const string
&CacheFile
, FileIterator Start
,
831 FileIterator End
,MMap
**OutMap
= 0)
833 bool const Debug
= _config
->FindB("Debug::pkgCacheGen", false);
834 // No file, certainly invalid
835 if (CacheFile
.empty() == true || FileExists(CacheFile
) == false)
838 std::clog
<< "CacheFile doesn't exist" << std::endl
;
843 FileFd
CacheF(CacheFile
,FileFd::ReadOnly
);
844 SPtr
<MMap
> Map
= new MMap(CacheF
,0);
846 if (_error
->PendingError() == true || Map
->Size() == 0)
849 std::clog
<< "Errors are pending or Map is empty()" << std::endl
;
854 /* Now we check every index file, see if it is in the cache,
855 verify the IMS data and check that it is on the disk too.. */
856 SPtrArray
<bool> Visited
= new bool[Cache
.HeaderP
->PackageFileCount
];
857 memset(Visited
,0,sizeof(*Visited
)*Cache
.HeaderP
->PackageFileCount
);
858 for (; Start
!= End
; Start
++)
861 std::clog
<< "Checking PkgFile " << (*Start
)->Describe() << ": ";
862 if ((*Start
)->HasPackages() == false)
865 std::clog
<< "Has NO packages" << std::endl
;
869 if ((*Start
)->Exists() == false)
871 #if 0 // mvo: we no longer give a message here (Default Sources spec)
872 _error
->WarningE("stat",_("Couldn't stat source package list %s"),
873 (*Start
)->Describe().c_str());
876 std::clog
<< "file doesn't exist" << std::endl
;
880 // FindInCache is also expected to do an IMS check.
881 pkgCache::PkgFileIterator File
= (*Start
)->FindInCache(Cache
);
882 if (File
.end() == true)
885 std::clog
<< "FindInCache returned end-Pointer" << std::endl
;
889 Visited
[File
->ID
] = true;
891 std::clog
<< "with ID " << File
->ID
<< " is valid" << std::endl
;
894 for (unsigned I
= 0; I
!= Cache
.HeaderP
->PackageFileCount
; I
++)
895 if (Visited
[I
] == false)
898 std::clog
<< "File with ID" << I
<< " wasn't visited" << std::endl
;
902 if (_error
->PendingError() == true)
906 std::clog
<< "Validity failed because of pending errors:" << std::endl
;
907 _error
->DumpErrors();
914 *OutMap
= Map
.UnGuard();
918 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
919 // ---------------------------------------------------------------------
920 /* Size is kind of an abstract notion that is only used for the progress
922 static unsigned long ComputeSize(FileIterator Start
,FileIterator End
)
924 unsigned long TotalSize
= 0;
925 for (; Start
!= End
; Start
++)
927 if ((*Start
)->HasPackages() == false)
929 TotalSize
+= (*Start
)->Size();
934 // BuildCache - Merge the list of index files into the cache /*{{{*/
935 // ---------------------------------------------------------------------
937 static bool BuildCache(pkgCacheGenerator
&Gen
,
938 OpProgress
*Progress
,
939 unsigned long &CurrentSize
,unsigned long TotalSize
,
940 FileIterator Start
, FileIterator End
)
943 for (I
= Start
; I
!= End
; I
++)
945 if ((*I
)->HasPackages() == false)
948 if ((*I
)->Exists() == false)
951 if ((*I
)->FindInCache(Gen
.GetCache()).end() == false)
953 _error
->Warning("Duplicate sources.list entry %s",
954 (*I
)->Describe().c_str());
958 unsigned long Size
= (*I
)->Size();
959 if (Progress
!= NULL
)
960 Progress
->OverallProgress(CurrentSize
,TotalSize
,Size
,_("Reading package lists"));
963 if ((*I
)->Merge(Gen
,Progress
) == false)
967 if (Gen
.HasFileDeps() == true)
969 if (Progress
!= NULL
)
971 TotalSize
= ComputeSize(Start
, End
);
973 for (I
= Start
; I
!= End
; I
++)
975 unsigned long Size
= (*I
)->Size();
976 if (Progress
!= NULL
)
977 Progress
->OverallProgress(CurrentSize
,TotalSize
,Size
,_("Collecting File Provides"));
979 if ((*I
)->MergeFileProvides(Gen
,Progress
) == false)
987 // CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
988 // ---------------------------------------------------------------------
989 /* This makes sure that the status cache (the cache that has all
990 index files from the sources list and all local ones) is ready
991 to be mmaped. If OutMap is not zero then a MMap object representing
992 the cache will be stored there. This is pretty much mandetory if you
993 are using AllowMem. AllowMem lets the function be run as non-root
994 where it builds the cache 'fast' into a memory buffer. */
995 __deprecated
bool pkgMakeStatusCache(pkgSourceList
&List
,OpProgress
&Progress
,
996 MMap
**OutMap
, bool AllowMem
)
997 { return pkgCacheGenerator::MakeStatusCache(List
, &Progress
, OutMap
, AllowMem
); }
998 bool pkgCacheGenerator::MakeStatusCache(pkgSourceList
&List
,OpProgress
*Progress
,
999 MMap
**OutMap
,bool AllowMem
)
1001 bool const Debug
= _config
->FindB("Debug::pkgCacheGen", false);
1002 unsigned long const MapSize
= _config
->FindI("APT::Cache-Limit",24*1024*1024);
1004 vector
<pkgIndexFile
*> Files
;
1005 for (vector
<metaIndex
*>::const_iterator i
= List
.begin();
1009 vector
<pkgIndexFile
*> *Indexes
= (*i
)->GetIndexFiles();
1010 for (vector
<pkgIndexFile
*>::const_iterator j
= Indexes
->begin();
1011 j
!= Indexes
->end();
1013 Files
.push_back (*j
);
1016 unsigned long const EndOfSource
= Files
.size();
1017 if (_system
->AddStatusFiles(Files
) == false)
1020 // Decide if we can write to the files..
1021 string
const CacheFile
= _config
->FindFile("Dir::Cache::pkgcache");
1022 string
const SrcCacheFile
= _config
->FindFile("Dir::Cache::srcpkgcache");
1024 // ensure the cache directory exists
1025 if (CacheFile
.empty() == false || SrcCacheFile
.empty() == false)
1027 string dir
= _config
->FindDir("Dir::Cache");
1028 size_t const len
= dir
.size();
1029 if (len
> 5 && dir
.find("/apt/", len
- 6, 5) == len
- 5)
1030 dir
= dir
.substr(0, len
- 5);
1031 if (CacheFile
.empty() == false)
1032 CreateDirectory(dir
, flNotFile(CacheFile
));
1033 if (SrcCacheFile
.empty() == false)
1034 CreateDirectory(dir
, flNotFile(SrcCacheFile
));
1037 // Decide if we can write to the cache
1038 bool Writeable
= false;
1039 if (CacheFile
.empty() == false)
1040 Writeable
= access(flNotFile(CacheFile
).c_str(),W_OK
) == 0;
1042 if (SrcCacheFile
.empty() == false)
1043 Writeable
= access(flNotFile(SrcCacheFile
).c_str(),W_OK
) == 0;
1045 std::clog
<< "Do we have write-access to the cache files? " << (Writeable
? "YES" : "NO") << std::endl
;
1047 if (Writeable
== false && AllowMem
== false && CacheFile
.empty() == false)
1048 return _error
->Error(_("Unable to write to %s"),flNotFile(CacheFile
).c_str());
1050 if (Progress
!= NULL
)
1051 Progress
->OverallProgress(0,1,1,_("Reading package lists"));
1053 // Cache is OK, Fin.
1054 if (CheckValidity(CacheFile
,Files
.begin(),Files
.end(),OutMap
) == true)
1056 if (Progress
!= NULL
)
1057 Progress
->OverallProgress(1,1,1,_("Reading package lists"));
1059 std::clog
<< "pkgcache.bin is valid - no need to build anything" << std::endl
;
1062 else if (Debug
== true)
1063 std::clog
<< "pkgcache.bin is NOT valid" << std::endl
;
1065 /* At this point we know we need to reconstruct the package cache,
1067 SPtr
<FileFd
> CacheF
;
1068 SPtr
<DynamicMMap
> Map
;
1069 if (Writeable
== true && CacheFile
.empty() == false)
1071 unlink(CacheFile
.c_str());
1072 CacheF
= new FileFd(CacheFile
,FileFd::WriteEmpty
);
1073 fchmod(CacheF
->Fd(),0644);
1074 Map
= new DynamicMMap(*CacheF
,MMap::Public
,MapSize
);
1075 if (_error
->PendingError() == true)
1078 std::clog
<< "Open filebased MMap" << std::endl
;
1082 // Just build it in memory..
1083 Map
= new DynamicMMap(0,MapSize
);
1085 std::clog
<< "Open memory Map (not filebased)" << std::endl
;
1088 // Lets try the source cache.
1089 unsigned long CurrentSize
= 0;
1090 unsigned long TotalSize
= 0;
1091 if (CheckValidity(SrcCacheFile
,Files
.begin(),
1092 Files
.begin()+EndOfSource
) == true)
1095 std::clog
<< "srcpkgcache.bin is valid - populate MMap with it." << std::endl
;
1096 // Preload the map with the source cache
1097 FileFd
SCacheF(SrcCacheFile
,FileFd::ReadOnly
);
1098 unsigned long const alloc
= Map
->RawAllocate(SCacheF
.Size());
1099 if ((alloc
== 0 && _error
->PendingError())
1100 || SCacheF
.Read((unsigned char *)Map
->Data() + alloc
,
1101 SCacheF
.Size()) == false)
1104 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
1106 // Build the status cache
1107 pkgCacheGenerator
Gen(Map
.Get(),Progress
);
1108 if (_error
->PendingError() == true)
1110 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1111 Files
.begin()+EndOfSource
,Files
.end()) == false)
1114 // FIXME: move me to a better place
1115 Gen
.FinishCache(Progress
);
1120 std::clog
<< "srcpkgcache.bin is NOT valid - rebuild" << std::endl
;
1121 TotalSize
= ComputeSize(Files
.begin(),Files
.end());
1123 // Build the source cache
1124 pkgCacheGenerator
Gen(Map
.Get(),Progress
);
1125 if (_error
->PendingError() == true)
1127 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1128 Files
.begin(),Files
.begin()+EndOfSource
) == false)
1132 if (Writeable
== true && SrcCacheFile
.empty() == false)
1134 FileFd
SCacheF(SrcCacheFile
,FileFd::WriteEmpty
);
1135 if (_error
->PendingError() == true)
1138 fchmod(SCacheF
.Fd(),0644);
1140 // Write out the main data
1141 if (SCacheF
.Write(Map
->Data(),Map
->Size()) == false)
1142 return _error
->Error(_("IO Error saving source cache"));
1145 // Write out the proper header
1146 Gen
.GetCache().HeaderP
->Dirty
= false;
1147 if (SCacheF
.Seek(0) == false ||
1148 SCacheF
.Write(Map
->Data(),sizeof(*Gen
.GetCache().HeaderP
)) == false)
1149 return _error
->Error(_("IO Error saving source cache"));
1150 Gen
.GetCache().HeaderP
->Dirty
= true;
1154 // Build the status cache
1155 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1156 Files
.begin()+EndOfSource
,Files
.end()) == false)
1159 // FIXME: move me to a better place
1160 Gen
.FinishCache(Progress
);
1163 std::clog
<< "Caches are ready for shipping" << std::endl
;
1165 if (_error
->PendingError() == true)
1171 delete Map
.UnGuard();
1172 *OutMap
= new MMap(*CacheF
,0);
1176 *OutMap
= Map
.UnGuard();
1183 // CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1184 // ---------------------------------------------------------------------
1186 __deprecated
bool pkgMakeOnlyStatusCache(OpProgress
&Progress
,DynamicMMap
**OutMap
)
1187 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress
, OutMap
); }
1188 bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress
*Progress
,DynamicMMap
**OutMap
)
1190 unsigned long MapSize
= _config
->FindI("APT::Cache-Limit",20*1024*1024);
1191 vector
<pkgIndexFile
*> Files
;
1192 unsigned long EndOfSource
= Files
.size();
1193 if (_system
->AddStatusFiles(Files
) == false)
1196 SPtr
<DynamicMMap
> Map
= new DynamicMMap(0,MapSize
);
1197 unsigned long CurrentSize
= 0;
1198 unsigned long TotalSize
= 0;
1200 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
1202 // Build the status cache
1203 if (Progress
!= NULL
)
1204 Progress
->OverallProgress(0,1,1,_("Reading package lists"));
1205 pkgCacheGenerator
Gen(Map
.Get(),Progress
);
1206 if (_error
->PendingError() == true)
1208 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1209 Files
.begin()+EndOfSource
,Files
.end()) == false)
1212 // FIXME: move me to a better place
1213 Gen
.FinishCache(Progress
);
1215 if (_error
->PendingError() == true)
1217 *OutMap
= Map
.UnGuard();