]>
git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
6 Package Cache Generator - Generator for the cache structure.
8 This builds the cache structure from the abstract package list parser.
10 ##################################################################### */
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
27 #include <apt-pkg/tagfile.h>
38 typedef vector
<pkgIndexFile
*>::iterator FileIterator
;
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap
*pMap
,OpProgress
*Prog
) :
44 Map(*pMap
), Cache(pMap
,false), Progress(Prog
),
48 memset(UniqHash
,0,sizeof(UniqHash
));
50 if (_error
->PendingError() == true)
55 // Setup the map interface..
56 Cache
.HeaderP
= (pkgCache::Header
*)Map
.Data();
57 if (Map
.RawAllocate(sizeof(pkgCache::Header
)) == 0 && _error
->PendingError() == true)
60 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
63 *Cache
.HeaderP
= pkgCache::Header();
64 Cache
.HeaderP
->VerSysName
= Map
.WriteString(_system
->VS
->Label
);
65 Cache
.HeaderP
->Architecture
= Map
.WriteString(_config
->Find("APT::Architecture"));
70 // Map directly from the existing file
72 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
73 if (Cache
.VS
!= _system
->VS
)
75 _error
->Error(_("Cache has an incompatible versioning system"));
80 Cache
.HeaderP
->Dirty
= true;
81 Map
.Sync(0,sizeof(pkgCache::Header
));
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
90 if (_error
->PendingError() == true)
92 if (Map
.Sync() == false)
95 Cache
.HeaderP
->Dirty
= false;
96 Map
.Sync(0,sizeof(pkgCache::Header
));
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser
&List
,
104 pkgCache::VerIterator
*OutVer
)
108 unsigned int Counter
= 0;
109 while (List
.Step() == true)
111 string
const PackageName
= List
.Package();
112 if (PackageName
.empty() == true)
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector
<string
> genArch
;
118 if (List
.ArchitectureAll() == true) {
119 genArch
= APT::Configuration::getArchitectures();
120 if (genArch
.size() != 1)
121 genArch
.push_back("all");
123 genArch
.push_back(List
.Architecture());
125 for (std::vector
<string
>::const_iterator arch
= genArch
.begin();
126 arch
!= genArch
.end(); ++arch
)
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg
;
130 if (NewPackage(Pkg
, PackageName
, *arch
) == false)
131 return _error
->Error(_("Error occurred while processing %s (NewPackage)"),PackageName
.c_str());
133 if (Counter
% 100 == 0 && Progress
!= 0)
134 Progress
->Progress(List
.Offset());
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version
= List
.Version();
140 if (Version
.empty() == true)
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
145 if (List
.UsePackage(Pkg
,pkgCache::VerIterator(Cache
)) == false)
146 return _error
->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName
.c_str());
149 // Find the right version to write the description
150 MD5SumValue CurMd5
= List
.Description_md5();
151 pkgCache::VerIterator Ver
= Pkg
.VersionList();
152 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
154 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
156 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
157 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
158 bool duplicate
=false;
160 // don't add a new description if we have one for the given
162 for ( ; Desc
.end() == false; Desc
++)
163 if (MD5SumValue(Desc
.md5()) == CurMd5
&&
164 Desc
.LanguageCode() == List
.DescriptionLanguage())
169 for (Desc
= Ver
.DescriptionList();
171 LastDesc
= &Desc
->NextDesc
, Desc
++)
173 if (MD5SumValue(Desc
.md5()) == CurMd5
)
175 // Add new description
176 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), CurMd5
, *LastDesc
);
177 Desc
->ParentPkg
= Pkg
.Index();
179 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
180 return _error
->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName
.c_str());
189 pkgCache::VerIterator Ver
= Pkg
.VersionList();
190 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
192 unsigned long const Hash
= List
.VersionHash();
193 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
195 Res
= Cache
.VS
->CmpVersion(Version
,Ver
.VerStr());
196 // Version is higher as current version - insert here
199 // Versionstrings are equal - is hash also equal?
200 if (Res
== 0 && Ver
->Hash
== Hash
)
202 // proceed with the next till we have either the right
203 // or we found another version (which will be lower)
206 /* We already have a version for this item, record that we saw it */
207 if (Res
== 0 && Ver
.end() == false && Ver
->Hash
== Hash
)
209 if (List
.UsePackage(Pkg
,Ver
) == false)
210 return _error
->Error(_("Error occurred while processing %s (UsePackage2)"),
211 PackageName
.c_str());
213 if (NewFileVer(Ver
,List
) == false)
214 return _error
->Error(_("Error occurred while processing %s (NewFileVer1)"),
215 PackageName
.c_str());
217 // Read only a single record and return
221 FoundFileDeps
|= List
.HasFileDeps();
229 *LastVer
= NewVersion(Ver
,Version
,*LastVer
);
230 Ver
->ParentPkg
= Pkg
.Index();
233 if ((*LastVer
== 0 && _error
->PendingError()) || List
.NewVersion(Ver
) == false)
234 return _error
->Error(_("Error occurred while processing %s (NewVersion1)"),
235 PackageName
.c_str());
237 if (List
.UsePackage(Pkg
,Ver
) == false)
238 return _error
->Error(_("Error occurred while processing %s (UsePackage3)"),
239 PackageName
.c_str());
241 if (NewFileVer(Ver
,List
) == false)
242 return _error
->Error(_("Error occurred while processing %s (NewVersion2)"),
243 PackageName
.c_str());
245 // Read only a single record and return
249 FoundFileDeps
|= List
.HasFileDeps();
253 /* Record the Description data. Description data always exist in
254 Packages and Translation-* files. */
255 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
256 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
258 // Skip to the end of description set
259 for (; Desc
.end() == false; LastDesc
= &Desc
->NextDesc
, Desc
++);
261 // Add new description
262 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), List
.Description_md5(), *LastDesc
);
263 Desc
->ParentPkg
= Pkg
.Index();
265 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
266 return _error
->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName
.c_str());
270 FoundFileDeps
|= List
.HasFileDeps();
272 if (Cache
.HeaderP
->PackageCount
>= (1ULL<<sizeof(Cache
.PkgP
->ID
)*8)-1)
273 return _error
->Error(_("Wow, you exceeded the number of package "
274 "names this APT is capable of."));
275 if (Cache
.HeaderP
->VersionCount
>= (1ULL<<(sizeof(Cache
.VerP
->ID
)*8))-1)
276 return _error
->Error(_("Wow, you exceeded the number of versions "
277 "this APT is capable of."));
278 if (Cache
.HeaderP
->DescriptionCount
>= (1ULL<<(sizeof(Cache
.DescP
->ID
)*8))-1)
279 return _error
->Error(_("Wow, you exceeded the number of descriptions "
280 "this APT is capable of."));
281 if (Cache
.HeaderP
->DependsCount
>= (1ULL<<(sizeof(Cache
.DepP
->ID
)*8))-1ULL)
282 return _error
->Error(_("Wow, you exceeded the number of dependencies "
283 "this APT is capable of."));
287 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
288 // ---------------------------------------------------------------------
289 /* If we found any file depends while parsing the main list we need to
290 resolve them. Since it is undesired to load the entire list of files
291 into the cache as virtual packages we do a two stage effort. MergeList
292 identifies the file depends and this creates Provdies for them by
293 re-parsing all the indexs. */
294 bool pkgCacheGenerator::MergeFileProvides(ListParser
&List
)
298 unsigned int Counter
= 0;
299 while (List
.Step() == true)
301 string PackageName
= List
.Package();
302 if (PackageName
.empty() == true)
304 string Version
= List
.Version();
305 if (Version
.empty() == true)
308 pkgCache::PkgIterator Pkg
= Cache
.FindPkg(PackageName
);
309 if (Pkg
.end() == true)
310 return _error
->Error(_("Error occurred while processing %s (FindPkg)"),
311 PackageName
.c_str());
313 if (Counter
% 100 == 0 && Progress
!= 0)
314 Progress
->Progress(List
.Offset());
316 unsigned long Hash
= List
.VersionHash();
317 pkgCache::VerIterator Ver
= Pkg
.VersionList();
318 for (; Ver
.end() == false; Ver
++)
320 if (Ver
->Hash
== Hash
&& Version
.c_str() == Ver
.VerStr())
322 if (List
.CollectFileProvides(Cache
,Ver
) == false)
323 return _error
->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName
.c_str());
328 if (Ver
.end() == true)
329 _error
->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName
.c_str(),Version
.c_str());
335 // CacheGenerator::NewGroup - Add a new group /*{{{*/
336 // ---------------------------------------------------------------------
337 /* This creates a new group structure and adds it to the hash table */
338 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator
&Grp
, const string
&Name
) {
339 Grp
= Cache
.FindGrp(Name
);
340 if (Grp
.end() == false)
344 unsigned long const Group
= Map
.Allocate(sizeof(pkgCache::Group
));
345 if (unlikely(Group
== 0))
348 Grp
= pkgCache::GrpIterator(Cache
, Cache
.GrpP
+ Group
);
349 Grp
->Name
= Map
.WriteString(Name
);
350 if (unlikely(Grp
->Name
== 0))
353 // Insert it into the hash table
354 unsigned long const Hash
= Cache
.Hash(Name
);
355 Grp
->Next
= Cache
.HeaderP
->GrpHashTable
[Hash
];
356 Cache
.HeaderP
->GrpHashTable
[Hash
] = Group
;
358 Cache
.HeaderP
->GroupCount
++;
363 // CacheGenerator::NewPackage - Add a new package /*{{{*/
364 // ---------------------------------------------------------------------
365 /* This creates a new package structure and adds it to the hash table */
366 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator
&Pkg
,const string
&Name
,
367 const string
&Arch
) {
368 pkgCache::GrpIterator Grp
;
369 if (unlikely(NewGroup(Grp
, Name
) == false))
372 Pkg
= Grp
.FindPkg(Arch
);
373 if (Pkg
.end() == false)
377 unsigned long const Package
= Map
.Allocate(sizeof(pkgCache::Package
));
378 if (unlikely(Package
== 0))
380 Pkg
= pkgCache::PkgIterator(Cache
,Cache
.PkgP
+ Package
);
382 // Insert it into the hash table
383 unsigned long const Hash
= Cache
.Hash(Name
);
384 Pkg
->NextPackage
= Cache
.HeaderP
->PkgHashTable
[Hash
];
385 Cache
.HeaderP
->PkgHashTable
[Hash
] = Package
;
387 // remember the packages in the group
388 Grp
->FirstPackage
= Package
;
389 if (Grp
->LastPackage
== 0)
390 Grp
->LastPackage
= Package
;
392 // Set the name, arch and the ID
393 Pkg
->Name
= Grp
->Name
;
394 Pkg
->Group
= Grp
.Index();
395 Pkg
->Arch
= WriteUniqString(Arch
.c_str());
396 if (unlikely(Pkg
->Arch
== 0))
398 Pkg
->ID
= Cache
.HeaderP
->PackageCount
++;
403 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
404 // ---------------------------------------------------------------------
406 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator
&Ver
,
409 if (CurrentFile
== 0)
413 unsigned long VerFile
= Map
.Allocate(sizeof(pkgCache::VerFile
));
417 pkgCache::VerFileIterator
VF(Cache
,Cache
.VerFileP
+ VerFile
);
418 VF
->File
= CurrentFile
- Cache
.PkgFileP
;
420 // Link it to the end of the list
421 map_ptrloc
*Last
= &Ver
->FileList
;
422 for (pkgCache::VerFileIterator V
= Ver
.FileList(); V
.end() == false; V
++)
424 VF
->NextFile
= *Last
;
427 VF
->Offset
= List
.Offset();
428 VF
->Size
= List
.Size();
429 if (Cache
.HeaderP
->MaxVerFileSize
< VF
->Size
)
430 Cache
.HeaderP
->MaxVerFileSize
= VF
->Size
;
431 Cache
.HeaderP
->VerFileCount
++;
436 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
437 // ---------------------------------------------------------------------
438 /* This puts a version structure in the linked list */
439 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator
&Ver
,
440 const string
&VerStr
,
444 unsigned long Version
= Map
.Allocate(sizeof(pkgCache::Version
));
449 Ver
= pkgCache::VerIterator(Cache
,Cache
.VerP
+ Version
);
451 Ver
->ID
= Cache
.HeaderP
->VersionCount
++;
452 Ver
->VerStr
= Map
.WriteString(VerStr
);
453 if (Ver
->VerStr
== 0)
459 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
460 // ---------------------------------------------------------------------
462 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator
&Desc
,
465 if (CurrentFile
== 0)
469 unsigned long DescFile
= Map
.Allocate(sizeof(pkgCache::DescFile
));
473 pkgCache::DescFileIterator
DF(Cache
,Cache
.DescFileP
+ DescFile
);
474 DF
->File
= CurrentFile
- Cache
.PkgFileP
;
476 // Link it to the end of the list
477 map_ptrloc
*Last
= &Desc
->FileList
;
478 for (pkgCache::DescFileIterator D
= Desc
.FileList(); D
.end() == false; D
++)
481 DF
->NextFile
= *Last
;
484 DF
->Offset
= List
.Offset();
485 DF
->Size
= List
.Size();
486 if (Cache
.HeaderP
->MaxDescFileSize
< DF
->Size
)
487 Cache
.HeaderP
->MaxDescFileSize
= DF
->Size
;
488 Cache
.HeaderP
->DescFileCount
++;
493 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
494 // ---------------------------------------------------------------------
495 /* This puts a description structure in the linked list */
496 map_ptrloc
pkgCacheGenerator::NewDescription(pkgCache::DescIterator
&Desc
,
498 const MD5SumValue
&md5sum
,
502 map_ptrloc Description
= Map
.Allocate(sizeof(pkgCache::Description
));
503 if (Description
== 0)
507 Desc
= pkgCache::DescIterator(Cache
,Cache
.DescP
+ Description
);
508 Desc
->NextDesc
= Next
;
509 Desc
->ID
= Cache
.HeaderP
->DescriptionCount
++;
510 Desc
->language_code
= Map
.WriteString(Lang
);
511 Desc
->md5sum
= Map
.WriteString(md5sum
.Value());
512 if (Desc
->language_code
== 0 || Desc
->md5sum
== 0)
518 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
519 // ---------------------------------------------------------------------
520 /* This prepares the Cache for delivery */
521 bool pkgCacheGenerator::FinishCache(OpProgress
&Progress
) {
522 // FIXME: add progress reporting for this operation
523 // Do we have different architectures in your groups ?
524 vector
<string
> archs
= APT::Configuration::getArchitectures();
525 if (archs
.size() > 1) {
526 // Create Conflicts in between the group
527 for (pkgCache::GrpIterator G
= GetCache().GrpBegin(); G
.end() != true; G
++) {
528 string
const PkgName
= G
.Name();
529 for (pkgCache::PkgIterator P
= G
.PackageList(); P
.end() != true; P
= G
.NextPkg(P
)) {
530 if (strcmp(P
.Arch(),"all") == 0)
532 pkgCache::PkgIterator allPkg
;
533 for (pkgCache::VerIterator V
= P
.VersionList(); V
.end() != true; V
++) {
534 string
const Arch
= V
.Arch(true);
535 map_ptrloc
*OldDepLast
= NULL
;
536 /* MultiArch handling introduces a lot of implicit Dependencies:
537 - MultiArch: same → Co-Installable if they have the same version
538 - Architecture: all → Need to be Co-Installable for internal reasons
539 - All others conflict with all other group members */
540 bool const coInstall
= (V
->MultiArch
== pkgCache::Version::All
||
541 V
->MultiArch
== pkgCache::Version::Same
);
542 if (V
->MultiArch
== pkgCache::Version::All
&& allPkg
.end() == true)
543 allPkg
= G
.FindPkg("all");
544 for (vector
<string
>::const_iterator A
= archs
.begin(); A
!= archs
.end(); ++A
) {
547 /* We allow only one installed arch at the time
548 per group, therefore each group member conflicts
549 with all other group members */
550 pkgCache::PkgIterator D
= G
.FindPkg(*A
);
553 if (coInstall
== true) {
554 // Replaces: ${self}:other ( << ${binary:Version})
555 NewDepends(D
, V
, V
.VerStr(),
556 pkgCache::Dep::Less
, pkgCache::Dep::Replaces
,
558 // Breaks: ${self}:other (!= ${binary:Version})
559 NewDepends(D
, V
, V
.VerStr(),
560 pkgCache::Dep::Less
, pkgCache::Dep::DpkgBreaks
,
562 NewDepends(D
, V
, V
.VerStr(),
563 pkgCache::Dep::Greater
, pkgCache::Dep::DpkgBreaks
,
565 if (V
->MultiArch
== pkgCache::Version::All
) {
566 // Depend on ${self}:all which does depend on nothing
567 NewDepends(allPkg
, V
, V
.VerStr(),
568 pkgCache::Dep::Equals
, pkgCache::Dep::Depends
,
572 // Conflicts: ${self}:other
574 pkgCache::Dep::NoOp
, pkgCache::Dep::Conflicts
,
585 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
586 // ---------------------------------------------------------------------
587 /* This creates a dependency element in the tree. It is linked to the
588 version and to the package that it is pointing to. */
589 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator
&Pkg
,
590 pkgCache::VerIterator
&Ver
,
591 string
const &Version
,
592 unsigned int const &Op
,
593 unsigned int const &Type
,
594 map_ptrloc
*OldDepLast
)
597 unsigned long const Dependency
= Map
.Allocate(sizeof(pkgCache::Dependency
));
598 if (unlikely(Dependency
== 0))
602 pkgCache::DepIterator
Dep(Cache
,Cache
.DepP
+ Dependency
);
603 Dep
->ParentVer
= Ver
.Index();
606 Dep
->ID
= Cache
.HeaderP
->DependsCount
++;
608 // Probe the reverse dependency list for a version string that matches
609 if (Version
.empty() == false)
611 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
612 if (I->Version != 0 && I.TargetVer() == Version)
613 Dep->Version = I->Version;*/
614 if (Dep
->Version
== 0)
615 if (unlikely((Dep
->Version
= Map
.WriteString(Version
)) == 0))
619 // Link it to the package
620 Dep
->Package
= Pkg
.Index();
621 Dep
->NextRevDepends
= Pkg
->RevDepends
;
622 Pkg
->RevDepends
= Dep
.Index();
624 // Do we know where to link the Dependency to?
625 if (OldDepLast
== NULL
)
627 OldDepLast
= &Ver
->DependsList
;
628 for (pkgCache::DepIterator D
= Ver
.DependsList(); D
.end() == false; D
++)
629 OldDepLast
= &D
->NextDepends
;
632 Dep
->NextDepends
= *OldDepLast
;
633 *OldDepLast
= Dep
.Index();
634 OldDepLast
= &Dep
->NextDepends
;
639 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
640 // ---------------------------------------------------------------------
641 /* This creates a Group and the Package to link this dependency to if
642 needed and handles also the caching of the old endpoint */
643 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver
,
644 const string
&PackageName
,
646 const string
&Version
,
650 pkgCache::GrpIterator Grp
;
651 if (unlikely(Owner
->NewGroup(Grp
, PackageName
) == false))
654 // Locate the target package
655 pkgCache::PkgIterator Pkg
= Grp
.FindPkg(Arch
);
656 if (Pkg
.end() == true) {
657 if (unlikely(Owner
->NewPackage(Pkg
, PackageName
, Arch
) == false))
661 // Is it a file dependency?
662 if (unlikely(PackageName
[0] == '/'))
663 FoundFileDeps
= true;
665 /* Caching the old end point speeds up generation substantially */
666 if (OldDepVer
!= Ver
) {
671 return Owner
->NewDepends(Pkg
, Ver
, Version
, Op
, Type
, OldDepLast
);
674 // ListParser::NewProvides - Create a Provides element /*{{{*/
675 // ---------------------------------------------------------------------
677 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver
,
678 const string
&PkgName
,
679 const string
&PkgArch
,
680 const string
&Version
)
682 pkgCache
&Cache
= Owner
->Cache
;
684 // We do not add self referencing provides
685 if (Ver
.ParentPkg().Name() == PkgName
&& PkgArch
== Ver
.Arch(true))
689 unsigned long const Provides
= Owner
->Map
.Allocate(sizeof(pkgCache::Provides
));
690 if (unlikely(Provides
== 0))
692 Cache
.HeaderP
->ProvidesCount
++;
695 pkgCache::PrvIterator
Prv(Cache
,Cache
.ProvideP
+ Provides
,Cache
.PkgP
);
696 Prv
->Version
= Ver
.Index();
697 Prv
->NextPkgProv
= Ver
->ProvidesList
;
698 Ver
->ProvidesList
= Prv
.Index();
699 if (Version
.empty() == false && unlikely((Prv
->ProvideVersion
= WriteString(Version
)) == 0))
702 // Locate the target package
703 pkgCache::PkgIterator Pkg
;
704 if (unlikely(Owner
->NewPackage(Pkg
,PkgName
, PkgArch
) == false))
707 // Link it to the package
708 Prv
->ParentPkg
= Pkg
.Index();
709 Prv
->NextProvides
= Pkg
->ProvidesList
;
710 Pkg
->ProvidesList
= Prv
.Index();
715 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
716 // ---------------------------------------------------------------------
717 /* This is used to select which file is to be associated with all newly
718 added versions. The caller is responsible for setting the IMS fields. */
719 bool pkgCacheGenerator::SelectFile(const string
&File
,const string
&Site
,
720 const pkgIndexFile
&Index
,
723 // Get some space for the structure
724 CurrentFile
= Cache
.PkgFileP
+ Map
.Allocate(sizeof(*CurrentFile
));
725 if (CurrentFile
== Cache
.PkgFileP
)
729 CurrentFile
->FileName
= Map
.WriteString(File
);
730 CurrentFile
->Site
= WriteUniqString(Site
);
731 CurrentFile
->NextFile
= Cache
.HeaderP
->FileList
;
732 CurrentFile
->Flags
= Flags
;
733 CurrentFile
->ID
= Cache
.HeaderP
->PackageFileCount
;
734 CurrentFile
->IndexType
= WriteUniqString(Index
.GetType()->Label
);
736 Cache
.HeaderP
->FileList
= CurrentFile
- Cache
.PkgFileP
;
737 Cache
.HeaderP
->PackageFileCount
++;
739 if (CurrentFile
->FileName
== 0)
743 Progress
->SubProgress(Index
.Size());
747 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
748 // ---------------------------------------------------------------------
749 /* This is used to create handles to strings. Given the same text it
750 always returns the same number */
751 unsigned long pkgCacheGenerator::WriteUniqString(const char *S
,
754 /* We use a very small transient hash table here, this speeds up generation
755 by a fair amount on slower machines */
756 pkgCache::StringItem
*&Bucket
= UniqHash
[(S
[0]*5 + S
[1]) % _count(UniqHash
)];
758 stringcmp(S
,S
+Size
,Cache
.StrP
+ Bucket
->String
) == 0)
759 return Bucket
->String
;
761 // Search for an insertion point
762 pkgCache::StringItem
*I
= Cache
.StringItemP
+ Cache
.HeaderP
->StringList
;
764 map_ptrloc
*Last
= &Cache
.HeaderP
->StringList
;
765 for (; I
!= Cache
.StringItemP
; Last
= &I
->NextItem
,
766 I
= Cache
.StringItemP
+ I
->NextItem
)
768 Res
= stringcmp(S
,S
+Size
,Cache
.StrP
+ I
->String
);
781 unsigned long Item
= Map
.Allocate(sizeof(pkgCache::StringItem
));
785 // Fill in the structure
786 pkgCache::StringItem
*ItemP
= Cache
.StringItemP
+ Item
;
787 ItemP
->NextItem
= I
- Cache
.StringItemP
;
789 ItemP
->String
= Map
.WriteString(S
,Size
);
790 if (ItemP
->String
== 0)
794 return ItemP
->String
;
797 // CheckValidity - Check that a cache is up-to-date /*{{{*/
798 // ---------------------------------------------------------------------
799 /* This just verifies that each file in the list of index files exists,
800 has matching attributes with the cache and the cache does not have
802 static bool CheckValidity(const string
&CacheFile
, FileIterator Start
,
803 FileIterator End
,MMap
**OutMap
= 0)
805 bool const Debug
= _config
->FindB("Debug::pkgCacheGen", false);
806 // No file, certainly invalid
807 if (CacheFile
.empty() == true || FileExists(CacheFile
) == false)
810 std::clog
<< "CacheFile doesn't exist" << std::endl
;
815 FileFd
CacheF(CacheFile
,FileFd::ReadOnly
);
816 SPtr
<MMap
> Map
= new MMap(CacheF
,0);
818 if (_error
->PendingError() == true || Map
->Size() == 0)
821 std::clog
<< "Errors are pending or Map is empty()" << std::endl
;
826 /* Now we check every index file, see if it is in the cache,
827 verify the IMS data and check that it is on the disk too.. */
828 SPtrArray
<bool> Visited
= new bool[Cache
.HeaderP
->PackageFileCount
];
829 memset(Visited
,0,sizeof(*Visited
)*Cache
.HeaderP
->PackageFileCount
);
830 for (; Start
!= End
; Start
++)
833 std::clog
<< "Checking PkgFile " << (*Start
)->Describe() << ": ";
834 if ((*Start
)->HasPackages() == false)
837 std::clog
<< "Has NO packages" << std::endl
;
841 if ((*Start
)->Exists() == false)
843 #if 0 // mvo: we no longer give a message here (Default Sources spec)
844 _error
->WarningE("stat",_("Couldn't stat source package list %s"),
845 (*Start
)->Describe().c_str());
848 std::clog
<< "file doesn't exist" << std::endl
;
852 // FindInCache is also expected to do an IMS check.
853 pkgCache::PkgFileIterator File
= (*Start
)->FindInCache(Cache
);
854 if (File
.end() == true)
857 std::clog
<< "FindInCache returned end-Pointer" << std::endl
;
861 Visited
[File
->ID
] = true;
863 std::clog
<< "with ID " << File
->ID
<< " is valid" << std::endl
;
866 for (unsigned I
= 0; I
!= Cache
.HeaderP
->PackageFileCount
; I
++)
867 if (Visited
[I
] == false)
870 std::clog
<< "File with ID" << I
<< " wasn't visited" << std::endl
;
874 if (_error
->PendingError() == true)
878 std::clog
<< "Validity failed because of pending errors:" << std::endl
;
879 _error
->DumpErrors();
886 *OutMap
= Map
.UnGuard();
890 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
891 // ---------------------------------------------------------------------
892 /* Size is kind of an abstract notion that is only used for the progress
894 static unsigned long ComputeSize(FileIterator Start
,FileIterator End
)
896 unsigned long TotalSize
= 0;
897 for (; Start
!= End
; Start
++)
899 if ((*Start
)->HasPackages() == false)
901 TotalSize
+= (*Start
)->Size();
906 // BuildCache - Merge the list of index files into the cache /*{{{*/
907 // ---------------------------------------------------------------------
909 static bool BuildCache(pkgCacheGenerator
&Gen
,
910 OpProgress
&Progress
,
911 unsigned long &CurrentSize
,unsigned long TotalSize
,
912 FileIterator Start
, FileIterator End
)
915 for (I
= Start
; I
!= End
; I
++)
917 if ((*I
)->HasPackages() == false)
920 if ((*I
)->Exists() == false)
923 if ((*I
)->FindInCache(Gen
.GetCache()).end() == false)
925 _error
->Warning("Duplicate sources.list entry %s",
926 (*I
)->Describe().c_str());
930 unsigned long Size
= (*I
)->Size();
931 Progress
.OverallProgress(CurrentSize
,TotalSize
,Size
,_("Reading package lists"));
934 if ((*I
)->Merge(Gen
,Progress
) == false)
938 if (Gen
.HasFileDeps() == true)
941 TotalSize
= ComputeSize(Start
, End
);
943 for (I
= Start
; I
!= End
; I
++)
945 unsigned long Size
= (*I
)->Size();
946 Progress
.OverallProgress(CurrentSize
,TotalSize
,Size
,_("Collecting File Provides"));
948 if ((*I
)->MergeFileProvides(Gen
,Progress
) == false)
956 // MakeStatusCache - Construct the status cache /*{{{*/
957 // ---------------------------------------------------------------------
958 /* This makes sure that the status cache (the cache that has all
959 index files from the sources list and all local ones) is ready
960 to be mmaped. If OutMap is not zero then a MMap object representing
961 the cache will be stored there. This is pretty much mandetory if you
962 are using AllowMem. AllowMem lets the function be run as non-root
963 where it builds the cache 'fast' into a memory buffer. */
964 bool pkgMakeStatusCache(pkgSourceList
&List
,OpProgress
&Progress
,
965 MMap
**OutMap
,bool AllowMem
)
967 bool const Debug
= _config
->FindB("Debug::pkgCacheGen", false);
968 unsigned long const MapSize
= _config
->FindI("APT::Cache-Limit",24*1024*1024);
970 vector
<pkgIndexFile
*> Files
;
971 for (vector
<metaIndex
*>::const_iterator i
= List
.begin();
975 vector
<pkgIndexFile
*> *Indexes
= (*i
)->GetIndexFiles();
976 for (vector
<pkgIndexFile
*>::const_iterator j
= Indexes
->begin();
979 Files
.push_back (*j
);
982 unsigned long const EndOfSource
= Files
.size();
983 if (_system
->AddStatusFiles(Files
) == false)
986 // Decide if we can write to the files..
987 string
const CacheFile
= _config
->FindFile("Dir::Cache::pkgcache");
988 string
const SrcCacheFile
= _config
->FindFile("Dir::Cache::srcpkgcache");
990 // Decide if we can write to the cache
991 bool Writeable
= false;
992 if (CacheFile
.empty() == false)
993 Writeable
= access(flNotFile(CacheFile
).c_str(),W_OK
) == 0;
995 if (SrcCacheFile
.empty() == false)
996 Writeable
= access(flNotFile(SrcCacheFile
).c_str(),W_OK
) == 0;
998 std::clog
<< "Do we have write-access to the cache files? " << (Writeable
? "YES" : "NO") << std::endl
;
1000 if (Writeable
== false && AllowMem
== false && CacheFile
.empty() == false)
1001 return _error
->Error(_("Unable to write to %s"),flNotFile(CacheFile
).c_str());
1003 Progress
.OverallProgress(0,1,1,_("Reading package lists"));
1005 // Cache is OK, Fin.
1006 if (CheckValidity(CacheFile
,Files
.begin(),Files
.end(),OutMap
) == true)
1008 Progress
.OverallProgress(1,1,1,_("Reading package lists"));
1010 std::clog
<< "pkgcache.bin is valid - no need to build anything" << std::endl
;
1013 else if (Debug
== true)
1014 std::clog
<< "pkgcache.bin is NOT valid" << std::endl
;
1016 /* At this point we know we need to reconstruct the package cache,
1018 SPtr
<FileFd
> CacheF
;
1019 SPtr
<DynamicMMap
> Map
;
1020 if (Writeable
== true && CacheFile
.empty() == false)
1022 unlink(CacheFile
.c_str());
1023 CacheF
= new FileFd(CacheFile
,FileFd::WriteEmpty
);
1024 fchmod(CacheF
->Fd(),0644);
1025 Map
= new DynamicMMap(*CacheF
,MMap::Public
,MapSize
);
1026 if (_error
->PendingError() == true)
1029 std::clog
<< "Open filebased MMap" << std::endl
;
1033 // Just build it in memory..
1034 Map
= new DynamicMMap(0,MapSize
);
1036 std::clog
<< "Open memory Map (not filebased)" << std::endl
;
1039 // Lets try the source cache.
1040 unsigned long CurrentSize
= 0;
1041 unsigned long TotalSize
= 0;
1042 if (CheckValidity(SrcCacheFile
,Files
.begin(),
1043 Files
.begin()+EndOfSource
) == true)
1046 std::clog
<< "srcpkgcache.bin is valid - populate MMap with it." << std::endl
;
1047 // Preload the map with the source cache
1048 FileFd
SCacheF(SrcCacheFile
,FileFd::ReadOnly
);
1049 unsigned long const alloc
= Map
->RawAllocate(SCacheF
.Size());
1050 if ((alloc
== 0 && _error
->PendingError())
1051 || SCacheF
.Read((unsigned char *)Map
->Data() + alloc
,
1052 SCacheF
.Size()) == false)
1055 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
1057 // Build the status cache
1058 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1059 if (_error
->PendingError() == true)
1061 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1062 Files
.begin()+EndOfSource
,Files
.end()) == false)
1065 // FIXME: move me to a better place
1066 Gen
.FinishCache(Progress
);
1071 std::clog
<< "srcpkgcache.bin is NOT valid - rebuild" << std::endl
;
1072 TotalSize
= ComputeSize(Files
.begin(),Files
.end());
1074 // Build the source cache
1075 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1076 if (_error
->PendingError() == true)
1078 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1079 Files
.begin(),Files
.begin()+EndOfSource
) == false)
1083 if (Writeable
== true && SrcCacheFile
.empty() == false)
1085 FileFd
SCacheF(SrcCacheFile
,FileFd::WriteEmpty
);
1086 if (_error
->PendingError() == true)
1089 fchmod(SCacheF
.Fd(),0644);
1091 // Write out the main data
1092 if (SCacheF
.Write(Map
->Data(),Map
->Size()) == false)
1093 return _error
->Error(_("IO Error saving source cache"));
1096 // Write out the proper header
1097 Gen
.GetCache().HeaderP
->Dirty
= false;
1098 if (SCacheF
.Seek(0) == false ||
1099 SCacheF
.Write(Map
->Data(),sizeof(*Gen
.GetCache().HeaderP
)) == false)
1100 return _error
->Error(_("IO Error saving source cache"));
1101 Gen
.GetCache().HeaderP
->Dirty
= true;
1105 // Build the status cache
1106 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1107 Files
.begin()+EndOfSource
,Files
.end()) == false)
1110 // FIXME: move me to a better place
1111 Gen
.FinishCache(Progress
);
1114 std::clog
<< "Caches are ready for shipping" << std::endl
;
1116 if (_error
->PendingError() == true)
1122 delete Map
.UnGuard();
1123 *OutMap
= new MMap(*CacheF
,0);
1127 *OutMap
= Map
.UnGuard();
1134 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1135 // ---------------------------------------------------------------------
1137 bool pkgMakeOnlyStatusCache(OpProgress
&Progress
,DynamicMMap
**OutMap
)
1139 unsigned long MapSize
= _config
->FindI("APT::Cache-Limit",20*1024*1024);
1140 vector
<pkgIndexFile
*> Files
;
1141 unsigned long EndOfSource
= Files
.size();
1142 if (_system
->AddStatusFiles(Files
) == false)
1145 SPtr
<DynamicMMap
> Map
= new DynamicMMap(0,MapSize
);
1146 unsigned long CurrentSize
= 0;
1147 unsigned long TotalSize
= 0;
1149 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
1151 // Build the status cache
1152 Progress
.OverallProgress(0,1,1,_("Reading package lists"));
1153 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1154 if (_error
->PendingError() == true)
1156 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1157 Files
.begin()+EndOfSource
,Files
.end()) == false)
1160 // FIXME: move me to a better place
1161 Gen
.FinishCache(Progress
);
1163 if (_error
->PendingError() == true)
1165 *OutMap
= Map
.UnGuard();