]>
git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
577e2f1d41d93406d991613e78eebc2d8d121f1d
1 // -*- mode: cpp; mode: fold -*-
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
6 Package Cache Generator - Generator for the cache structure.
8 This builds the cache structure from the abstract package list parser.
10 ##################################################################### */
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
27 #include <apt-pkg/tagfile.h>
38 typedef vector
<pkgIndexFile
*>::iterator FileIterator
;
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap
*pMap
,OpProgress
*Prog
) :
44 Map(*pMap
), Cache(pMap
,false), Progress(Prog
),
48 memset(UniqHash
,0,sizeof(UniqHash
));
50 if (_error
->PendingError() == true)
55 // Setup the map interface..
56 Cache
.HeaderP
= (pkgCache::Header
*)Map
.Data();
57 if (Map
.RawAllocate(sizeof(pkgCache::Header
)) == 0 && _error
->PendingError() == true)
60 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
63 *Cache
.HeaderP
= pkgCache::Header();
64 Cache
.HeaderP
->VerSysName
= Map
.WriteString(_system
->VS
->Label
);
65 Cache
.HeaderP
->Architecture
= Map
.WriteString(_config
->Find("APT::Architecture"));
70 // Map directly from the existing file
72 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
73 if (Cache
.VS
!= _system
->VS
)
75 _error
->Error(_("Cache has an incompatible versioning system"));
80 Cache
.HeaderP
->Dirty
= true;
81 Map
.Sync(0,sizeof(pkgCache::Header
));
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
90 if (_error
->PendingError() == true)
92 if (Map
.Sync() == false)
95 Cache
.HeaderP
->Dirty
= false;
96 Map
.Sync(0,sizeof(pkgCache::Header
));
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser
&List
,
104 pkgCache::VerIterator
*OutVer
)
108 unsigned int Counter
= 0;
109 while (List
.Step() == true)
111 string
const PackageName
= List
.Package();
112 if (PackageName
.empty() == true)
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector
<string
> genArch
;
118 if (List
.ArchitectureAll() == true) {
119 genArch
= APT::Configuration::getArchitectures();
120 if (genArch
.size() != 1)
121 genArch
.push_back("all");
123 genArch
.push_back(List
.Architecture());
125 for (std::vector
<string
>::const_iterator arch
= genArch
.begin();
126 arch
!= genArch
.end(); ++arch
)
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg
;
130 if (NewPackage(Pkg
, PackageName
, *arch
) == false)
131 return _error
->Error(_("Error occurred while processing %s (NewPackage)"),PackageName
.c_str());
133 if (Counter
% 100 == 0 && Progress
!= 0)
134 Progress
->Progress(List
.Offset());
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version
= List
.Version();
140 if (Version
.empty() == true)
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
145 if (List
.UsePackage(Pkg
,pkgCache::VerIterator(Cache
)) == false)
146 return _error
->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName
.c_str());
149 // Find the right version to write the description
150 MD5SumValue CurMd5
= List
.Description_md5();
151 pkgCache::VerIterator Ver
= Pkg
.VersionList();
152 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
154 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
156 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
157 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
158 bool duplicate
=false;
160 // don't add a new description if we have one for the given
162 for ( ; Desc
.end() == false; Desc
++)
163 if (MD5SumValue(Desc
.md5()) == CurMd5
&&
164 Desc
.LanguageCode() == List
.DescriptionLanguage())
169 for (Desc
= Ver
.DescriptionList();
171 LastDesc
= &Desc
->NextDesc
, Desc
++)
173 if (MD5SumValue(Desc
.md5()) == CurMd5
)
175 // Add new description
176 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), CurMd5
, *LastDesc
);
177 Desc
->ParentPkg
= Pkg
.Index();
179 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
180 return _error
->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName
.c_str());
189 pkgCache::VerIterator Ver
= Pkg
.VersionList();
190 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
192 unsigned long const Hash
= List
.VersionHash();
193 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
195 Res
= Cache
.VS
->CmpVersion(Version
,Ver
.VerStr());
196 // Version is higher as current version - insert here
199 // Versionstrings are equal - is hash also equal?
200 if (Res
== 0 && Ver
->Hash
== Hash
)
202 // proceed with the next till we have either the right
203 // or we found another version (which will be lower)
206 /* We already have a version for this item, record that we saw it */
207 if (Res
== 0 && Ver
.end() == false && Ver
->Hash
== Hash
)
209 if (List
.UsePackage(Pkg
,Ver
) == false)
210 return _error
->Error(_("Error occurred while processing %s (UsePackage2)"),
211 PackageName
.c_str());
213 if (NewFileVer(Ver
,List
) == false)
214 return _error
->Error(_("Error occurred while processing %s (NewFileVer1)"),
215 PackageName
.c_str());
217 // Read only a single record and return
221 FoundFileDeps
|= List
.HasFileDeps();
229 *LastVer
= NewVersion(Ver
,Version
,*LastVer
);
230 Ver
->ParentPkg
= Pkg
.Index();
233 if ((*LastVer
== 0 && _error
->PendingError()) || List
.NewVersion(Ver
) == false)
234 return _error
->Error(_("Error occurred while processing %s (NewVersion1)"),
235 PackageName
.c_str());
237 if (List
.UsePackage(Pkg
,Ver
) == false)
238 return _error
->Error(_("Error occurred while processing %s (UsePackage3)"),
239 PackageName
.c_str());
241 if (NewFileVer(Ver
,List
) == false)
242 return _error
->Error(_("Error occurred while processing %s (NewVersion2)"),
243 PackageName
.c_str());
245 // Read only a single record and return
249 FoundFileDeps
|= List
.HasFileDeps();
253 /* Record the Description data. Description data always exist in
254 Packages and Translation-* files. */
255 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
256 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
258 // Skip to the end of description set
259 for (; Desc
.end() == false; LastDesc
= &Desc
->NextDesc
, Desc
++);
261 // Add new description
262 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), List
.Description_md5(), *LastDesc
);
263 Desc
->ParentPkg
= Pkg
.Index();
265 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
266 return _error
->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName
.c_str());
270 FoundFileDeps
|= List
.HasFileDeps();
272 if (Cache
.HeaderP
->PackageCount
>= (1ULL<<sizeof(Cache
.PkgP
->ID
)*8)-1)
273 return _error
->Error(_("Wow, you exceeded the number of package "
274 "names this APT is capable of."));
275 if (Cache
.HeaderP
->VersionCount
>= (1ULL<<(sizeof(Cache
.VerP
->ID
)*8))-1)
276 return _error
->Error(_("Wow, you exceeded the number of versions "
277 "this APT is capable of."));
278 if (Cache
.HeaderP
->DescriptionCount
>= (1ULL<<(sizeof(Cache
.DescP
->ID
)*8))-1)
279 return _error
->Error(_("Wow, you exceeded the number of descriptions "
280 "this APT is capable of."));
281 if (Cache
.HeaderP
->DependsCount
>= (1ULL<<(sizeof(Cache
.DepP
->ID
)*8))-1ULL)
282 return _error
->Error(_("Wow, you exceeded the number of dependencies "
283 "this APT is capable of."));
287 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
288 // ---------------------------------------------------------------------
289 /* If we found any file depends while parsing the main list we need to
290 resolve them. Since it is undesired to load the entire list of files
291 into the cache as virtual packages we do a two stage effort. MergeList
292 identifies the file depends and this creates Provdies for them by
293 re-parsing all the indexs. */
294 bool pkgCacheGenerator::MergeFileProvides(ListParser
&List
)
298 unsigned int Counter
= 0;
299 while (List
.Step() == true)
301 string PackageName
= List
.Package();
302 if (PackageName
.empty() == true)
304 string Version
= List
.Version();
305 if (Version
.empty() == true)
308 pkgCache::PkgIterator Pkg
= Cache
.FindPkg(PackageName
);
309 if (Pkg
.end() == true)
310 return _error
->Error(_("Error occurred while processing %s (FindPkg)"),
311 PackageName
.c_str());
313 if (Counter
% 100 == 0 && Progress
!= 0)
314 Progress
->Progress(List
.Offset());
316 unsigned long Hash
= List
.VersionHash();
317 pkgCache::VerIterator Ver
= Pkg
.VersionList();
318 for (; Ver
.end() == false; Ver
++)
320 if (Ver
->Hash
== Hash
&& Version
.c_str() == Ver
.VerStr())
322 if (List
.CollectFileProvides(Cache
,Ver
) == false)
323 return _error
->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName
.c_str());
328 if (Ver
.end() == true)
329 _error
->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName
.c_str(),Version
.c_str());
335 // CacheGenerator::NewGroup - Add a new group /*{{{*/
336 // ---------------------------------------------------------------------
337 /* This creates a new group structure and adds it to the hash table */
338 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator
&Grp
, const string
&Name
) {
339 Grp
= Cache
.FindGrp(Name
);
340 if (Grp
.end() == false)
344 unsigned long const Group
= Map
.Allocate(sizeof(pkgCache::Group
));
345 if (unlikely(Group
== 0))
348 Grp
= pkgCache::GrpIterator(Cache
, Cache
.GrpP
+ Group
);
349 Grp
->Name
= Map
.WriteString(Name
);
350 if (unlikely(Grp
->Name
== 0))
353 // Insert it into the hash table
354 unsigned long const Hash
= Cache
.Hash(Name
);
355 Grp
->Next
= Cache
.HeaderP
->GrpHashTable
[Hash
];
356 Cache
.HeaderP
->GrpHashTable
[Hash
] = Group
;
358 Cache
.HeaderP
->GroupCount
++;
363 // CacheGenerator::NewPackage - Add a new package /*{{{*/
364 // ---------------------------------------------------------------------
365 /* This creates a new package structure and adds it to the hash table */
366 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator
&Pkg
,const string
&Name
,
367 const string
&Arch
) {
368 pkgCache::GrpIterator Grp
;
369 if (unlikely(NewGroup(Grp
, Name
) == false))
372 Pkg
= Grp
.FindPkg(Arch
);
373 if (Pkg
.end() == false)
377 unsigned long const Package
= Map
.Allocate(sizeof(pkgCache::Package
));
378 if (unlikely(Package
== 0))
380 Pkg
= pkgCache::PkgIterator(Cache
,Cache
.PkgP
+ Package
);
382 // Insert the package into our package list
383 if (Grp
->FirstPackage
== 0) // the group is new
385 // Insert it into the hash table
386 unsigned long const Hash
= Cache
.Hash(Name
);
387 Pkg
->NextPackage
= Cache
.HeaderP
->PkgHashTable
[Hash
];
388 Cache
.HeaderP
->PkgHashTable
[Hash
] = Package
;
389 Grp
->FirstPackage
= Package
;
391 else // Group the Packages together
393 // this package is the new last package
394 pkgCache::PkgIterator
LastPkg(Cache
, Cache
.PkgP
+ Grp
->LastPackage
);
395 Pkg
->NextPackage
= LastPkg
->NextPackage
;
396 LastPkg
->NextPackage
= Package
;
398 Grp
->LastPackage
= Package
;
400 // Set the name, arch and the ID
401 Pkg
->Name
= Grp
->Name
;
402 Pkg
->Group
= Grp
.Index();
403 Pkg
->Arch
= WriteUniqString(Arch
.c_str());
404 if (unlikely(Pkg
->Arch
== 0))
406 Pkg
->ID
= Cache
.HeaderP
->PackageCount
++;
411 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
412 // ---------------------------------------------------------------------
414 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator
&Ver
,
417 if (CurrentFile
== 0)
421 unsigned long VerFile
= Map
.Allocate(sizeof(pkgCache::VerFile
));
425 pkgCache::VerFileIterator
VF(Cache
,Cache
.VerFileP
+ VerFile
);
426 VF
->File
= CurrentFile
- Cache
.PkgFileP
;
428 // Link it to the end of the list
429 map_ptrloc
*Last
= &Ver
->FileList
;
430 for (pkgCache::VerFileIterator V
= Ver
.FileList(); V
.end() == false; V
++)
432 VF
->NextFile
= *Last
;
435 VF
->Offset
= List
.Offset();
436 VF
->Size
= List
.Size();
437 if (Cache
.HeaderP
->MaxVerFileSize
< VF
->Size
)
438 Cache
.HeaderP
->MaxVerFileSize
= VF
->Size
;
439 Cache
.HeaderP
->VerFileCount
++;
444 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
445 // ---------------------------------------------------------------------
446 /* This puts a version structure in the linked list */
447 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator
&Ver
,
448 const string
&VerStr
,
452 unsigned long Version
= Map
.Allocate(sizeof(pkgCache::Version
));
457 Ver
= pkgCache::VerIterator(Cache
,Cache
.VerP
+ Version
);
459 Ver
->ID
= Cache
.HeaderP
->VersionCount
++;
460 Ver
->VerStr
= Map
.WriteString(VerStr
);
461 if (Ver
->VerStr
== 0)
467 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
468 // ---------------------------------------------------------------------
470 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator
&Desc
,
473 if (CurrentFile
== 0)
477 unsigned long DescFile
= Map
.Allocate(sizeof(pkgCache::DescFile
));
481 pkgCache::DescFileIterator
DF(Cache
,Cache
.DescFileP
+ DescFile
);
482 DF
->File
= CurrentFile
- Cache
.PkgFileP
;
484 // Link it to the end of the list
485 map_ptrloc
*Last
= &Desc
->FileList
;
486 for (pkgCache::DescFileIterator D
= Desc
.FileList(); D
.end() == false; D
++)
489 DF
->NextFile
= *Last
;
492 DF
->Offset
= List
.Offset();
493 DF
->Size
= List
.Size();
494 if (Cache
.HeaderP
->MaxDescFileSize
< DF
->Size
)
495 Cache
.HeaderP
->MaxDescFileSize
= DF
->Size
;
496 Cache
.HeaderP
->DescFileCount
++;
501 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
502 // ---------------------------------------------------------------------
503 /* This puts a description structure in the linked list */
504 map_ptrloc
pkgCacheGenerator::NewDescription(pkgCache::DescIterator
&Desc
,
506 const MD5SumValue
&md5sum
,
510 map_ptrloc Description
= Map
.Allocate(sizeof(pkgCache::Description
));
511 if (Description
== 0)
515 Desc
= pkgCache::DescIterator(Cache
,Cache
.DescP
+ Description
);
516 Desc
->NextDesc
= Next
;
517 Desc
->ID
= Cache
.HeaderP
->DescriptionCount
++;
518 Desc
->language_code
= Map
.WriteString(Lang
);
519 Desc
->md5sum
= Map
.WriteString(md5sum
.Value());
520 if (Desc
->language_code
== 0 || Desc
->md5sum
== 0)
526 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
527 // ---------------------------------------------------------------------
528 /* This prepares the Cache for delivery */
529 bool pkgCacheGenerator::FinishCache(OpProgress
&Progress
) {
530 // FIXME: add progress reporting for this operation
531 // Do we have different architectures in your groups ?
532 vector
<string
> archs
= APT::Configuration::getArchitectures();
533 if (archs
.size() > 1) {
534 // Create Conflicts in between the group
535 for (pkgCache::GrpIterator G
= GetCache().GrpBegin(); G
.end() != true; G
++) {
536 string
const PkgName
= G
.Name();
537 for (pkgCache::PkgIterator P
= G
.PackageList(); P
.end() != true; P
= G
.NextPkg(P
)) {
538 if (strcmp(P
.Arch(),"all") == 0)
540 pkgCache::PkgIterator allPkg
;
541 for (pkgCache::VerIterator V
= P
.VersionList(); V
.end() != true; V
++) {
542 string
const Arch
= V
.Arch(true);
543 map_ptrloc
*OldDepLast
= NULL
;
544 /* MultiArch handling introduces a lot of implicit Dependencies:
545 - MultiArch: same → Co-Installable if they have the same version
546 - Architecture: all → Need to be Co-Installable for internal reasons
547 - All others conflict with all other group members */
548 bool const coInstall
= (V
->MultiArch
== pkgCache::Version::All
||
549 V
->MultiArch
== pkgCache::Version::Same
);
550 if (V
->MultiArch
== pkgCache::Version::All
&& allPkg
.end() == true)
551 allPkg
= G
.FindPkg("all");
552 for (vector
<string
>::const_iterator A
= archs
.begin(); A
!= archs
.end(); ++A
) {
555 /* We allow only one installed arch at the time
556 per group, therefore each group member conflicts
557 with all other group members */
558 pkgCache::PkgIterator D
= G
.FindPkg(*A
);
561 if (coInstall
== true) {
562 // Replaces: ${self}:other ( << ${binary:Version})
563 NewDepends(D
, V
, V
.VerStr(),
564 pkgCache::Dep::Less
, pkgCache::Dep::Replaces
,
566 // Breaks: ${self}:other (!= ${binary:Version})
567 NewDepends(D
, V
, V
.VerStr(),
568 pkgCache::Dep::Less
, pkgCache::Dep::DpkgBreaks
,
570 NewDepends(D
, V
, V
.VerStr(),
571 pkgCache::Dep::Greater
, pkgCache::Dep::DpkgBreaks
,
573 if (V
->MultiArch
== pkgCache::Version::All
) {
574 // Depend on ${self}:all which does depend on nothing
575 NewDepends(allPkg
, V
, V
.VerStr(),
576 pkgCache::Dep::Equals
, pkgCache::Dep::Depends
,
580 // Conflicts: ${self}:other
582 pkgCache::Dep::NoOp
, pkgCache::Dep::Conflicts
,
593 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
594 // ---------------------------------------------------------------------
595 /* This creates a dependency element in the tree. It is linked to the
596 version and to the package that it is pointing to. */
597 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator
&Pkg
,
598 pkgCache::VerIterator
&Ver
,
599 string
const &Version
,
600 unsigned int const &Op
,
601 unsigned int const &Type
,
602 map_ptrloc
*OldDepLast
)
605 unsigned long const Dependency
= Map
.Allocate(sizeof(pkgCache::Dependency
));
606 if (unlikely(Dependency
== 0))
610 pkgCache::DepIterator
Dep(Cache
,Cache
.DepP
+ Dependency
);
611 Dep
->ParentVer
= Ver
.Index();
614 Dep
->ID
= Cache
.HeaderP
->DependsCount
++;
616 // Probe the reverse dependency list for a version string that matches
617 if (Version
.empty() == false)
619 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
620 if (I->Version != 0 && I.TargetVer() == Version)
621 Dep->Version = I->Version;*/
622 if (Dep
->Version
== 0)
623 if (unlikely((Dep
->Version
= Map
.WriteString(Version
)) == 0))
627 // Link it to the package
628 Dep
->Package
= Pkg
.Index();
629 Dep
->NextRevDepends
= Pkg
->RevDepends
;
630 Pkg
->RevDepends
= Dep
.Index();
632 // Do we know where to link the Dependency to?
633 if (OldDepLast
== NULL
)
635 OldDepLast
= &Ver
->DependsList
;
636 for (pkgCache::DepIterator D
= Ver
.DependsList(); D
.end() == false; D
++)
637 OldDepLast
= &D
->NextDepends
;
640 Dep
->NextDepends
= *OldDepLast
;
641 *OldDepLast
= Dep
.Index();
642 OldDepLast
= &Dep
->NextDepends
;
647 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
648 // ---------------------------------------------------------------------
649 /* This creates a Group and the Package to link this dependency to if
650 needed and handles also the caching of the old endpoint */
651 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver
,
652 const string
&PackageName
,
654 const string
&Version
,
658 pkgCache::GrpIterator Grp
;
659 if (unlikely(Owner
->NewGroup(Grp
, PackageName
) == false))
662 // Locate the target package
663 pkgCache::PkgIterator Pkg
= Grp
.FindPkg(Arch
);
664 if (Pkg
.end() == true) {
665 if (unlikely(Owner
->NewPackage(Pkg
, PackageName
, Arch
) == false))
669 // Is it a file dependency?
670 if (unlikely(PackageName
[0] == '/'))
671 FoundFileDeps
= true;
673 /* Caching the old end point speeds up generation substantially */
674 if (OldDepVer
!= Ver
) {
679 return Owner
->NewDepends(Pkg
, Ver
, Version
, Op
, Type
, OldDepLast
);
682 // ListParser::NewProvides - Create a Provides element /*{{{*/
683 // ---------------------------------------------------------------------
685 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver
,
686 const string
&PkgName
,
687 const string
&PkgArch
,
688 const string
&Version
)
690 pkgCache
&Cache
= Owner
->Cache
;
692 // We do not add self referencing provides
693 if (Ver
.ParentPkg().Name() == PkgName
&& PkgArch
== Ver
.Arch(true))
697 unsigned long const Provides
= Owner
->Map
.Allocate(sizeof(pkgCache::Provides
));
698 if (unlikely(Provides
== 0))
700 Cache
.HeaderP
->ProvidesCount
++;
703 pkgCache::PrvIterator
Prv(Cache
,Cache
.ProvideP
+ Provides
,Cache
.PkgP
);
704 Prv
->Version
= Ver
.Index();
705 Prv
->NextPkgProv
= Ver
->ProvidesList
;
706 Ver
->ProvidesList
= Prv
.Index();
707 if (Version
.empty() == false && unlikely((Prv
->ProvideVersion
= WriteString(Version
)) == 0))
710 // Locate the target package
711 pkgCache::PkgIterator Pkg
;
712 if (unlikely(Owner
->NewPackage(Pkg
,PkgName
, PkgArch
) == false))
715 // Link it to the package
716 Prv
->ParentPkg
= Pkg
.Index();
717 Prv
->NextProvides
= Pkg
->ProvidesList
;
718 Pkg
->ProvidesList
= Prv
.Index();
723 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
724 // ---------------------------------------------------------------------
725 /* This is used to select which file is to be associated with all newly
726 added versions. The caller is responsible for setting the IMS fields. */
727 bool pkgCacheGenerator::SelectFile(const string
&File
,const string
&Site
,
728 const pkgIndexFile
&Index
,
731 // Get some space for the structure
732 CurrentFile
= Cache
.PkgFileP
+ Map
.Allocate(sizeof(*CurrentFile
));
733 if (CurrentFile
== Cache
.PkgFileP
)
737 CurrentFile
->FileName
= Map
.WriteString(File
);
738 CurrentFile
->Site
= WriteUniqString(Site
);
739 CurrentFile
->NextFile
= Cache
.HeaderP
->FileList
;
740 CurrentFile
->Flags
= Flags
;
741 CurrentFile
->ID
= Cache
.HeaderP
->PackageFileCount
;
742 CurrentFile
->IndexType
= WriteUniqString(Index
.GetType()->Label
);
744 Cache
.HeaderP
->FileList
= CurrentFile
- Cache
.PkgFileP
;
745 Cache
.HeaderP
->PackageFileCount
++;
747 if (CurrentFile
->FileName
== 0)
751 Progress
->SubProgress(Index
.Size());
755 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
756 // ---------------------------------------------------------------------
757 /* This is used to create handles to strings. Given the same text it
758 always returns the same number */
759 unsigned long pkgCacheGenerator::WriteUniqString(const char *S
,
762 /* We use a very small transient hash table here, this speeds up generation
763 by a fair amount on slower machines */
764 pkgCache::StringItem
*&Bucket
= UniqHash
[(S
[0]*5 + S
[1]) % _count(UniqHash
)];
766 stringcmp(S
,S
+Size
,Cache
.StrP
+ Bucket
->String
) == 0)
767 return Bucket
->String
;
769 // Search for an insertion point
770 pkgCache::StringItem
*I
= Cache
.StringItemP
+ Cache
.HeaderP
->StringList
;
772 map_ptrloc
*Last
= &Cache
.HeaderP
->StringList
;
773 for (; I
!= Cache
.StringItemP
; Last
= &I
->NextItem
,
774 I
= Cache
.StringItemP
+ I
->NextItem
)
776 Res
= stringcmp(S
,S
+Size
,Cache
.StrP
+ I
->String
);
789 unsigned long Item
= Map
.Allocate(sizeof(pkgCache::StringItem
));
793 // Fill in the structure
794 pkgCache::StringItem
*ItemP
= Cache
.StringItemP
+ Item
;
795 ItemP
->NextItem
= I
- Cache
.StringItemP
;
797 ItemP
->String
= Map
.WriteString(S
,Size
);
798 if (ItemP
->String
== 0)
802 return ItemP
->String
;
805 // CheckValidity - Check that a cache is up-to-date /*{{{*/
806 // ---------------------------------------------------------------------
807 /* This just verifies that each file in the list of index files exists,
808 has matching attributes with the cache and the cache does not have
810 static bool CheckValidity(const string
&CacheFile
, FileIterator Start
,
811 FileIterator End
,MMap
**OutMap
= 0)
813 bool const Debug
= _config
->FindB("Debug::pkgCacheGen", false);
814 // No file, certainly invalid
815 if (CacheFile
.empty() == true || FileExists(CacheFile
) == false)
818 std::clog
<< "CacheFile doesn't exist" << std::endl
;
823 FileFd
CacheF(CacheFile
,FileFd::ReadOnly
);
824 SPtr
<MMap
> Map
= new MMap(CacheF
,0);
826 if (_error
->PendingError() == true || Map
->Size() == 0)
829 std::clog
<< "Errors are pending or Map is empty()" << std::endl
;
834 /* Now we check every index file, see if it is in the cache,
835 verify the IMS data and check that it is on the disk too.. */
836 SPtrArray
<bool> Visited
= new bool[Cache
.HeaderP
->PackageFileCount
];
837 memset(Visited
,0,sizeof(*Visited
)*Cache
.HeaderP
->PackageFileCount
);
838 for (; Start
!= End
; Start
++)
841 std::clog
<< "Checking PkgFile " << (*Start
)->Describe() << ": ";
842 if ((*Start
)->HasPackages() == false)
845 std::clog
<< "Has NO packages" << std::endl
;
849 if ((*Start
)->Exists() == false)
851 #if 0 // mvo: we no longer give a message here (Default Sources spec)
852 _error
->WarningE("stat",_("Couldn't stat source package list %s"),
853 (*Start
)->Describe().c_str());
856 std::clog
<< "file doesn't exist" << std::endl
;
860 // FindInCache is also expected to do an IMS check.
861 pkgCache::PkgFileIterator File
= (*Start
)->FindInCache(Cache
);
862 if (File
.end() == true)
865 std::clog
<< "FindInCache returned end-Pointer" << std::endl
;
869 Visited
[File
->ID
] = true;
871 std::clog
<< "with ID " << File
->ID
<< " is valid" << std::endl
;
874 for (unsigned I
= 0; I
!= Cache
.HeaderP
->PackageFileCount
; I
++)
875 if (Visited
[I
] == false)
878 std::clog
<< "File with ID" << I
<< " wasn't visited" << std::endl
;
882 if (_error
->PendingError() == true)
886 std::clog
<< "Validity failed because of pending errors:" << std::endl
;
887 _error
->DumpErrors();
894 *OutMap
= Map
.UnGuard();
898 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
899 // ---------------------------------------------------------------------
900 /* Size is kind of an abstract notion that is only used for the progress
902 static unsigned long ComputeSize(FileIterator Start
,FileIterator End
)
904 unsigned long TotalSize
= 0;
905 for (; Start
!= End
; Start
++)
907 if ((*Start
)->HasPackages() == false)
909 TotalSize
+= (*Start
)->Size();
914 // BuildCache - Merge the list of index files into the cache /*{{{*/
915 // ---------------------------------------------------------------------
917 static bool BuildCache(pkgCacheGenerator
&Gen
,
918 OpProgress
&Progress
,
919 unsigned long &CurrentSize
,unsigned long TotalSize
,
920 FileIterator Start
, FileIterator End
)
923 for (I
= Start
; I
!= End
; I
++)
925 if ((*I
)->HasPackages() == false)
928 if ((*I
)->Exists() == false)
931 if ((*I
)->FindInCache(Gen
.GetCache()).end() == false)
933 _error
->Warning("Duplicate sources.list entry %s",
934 (*I
)->Describe().c_str());
938 unsigned long Size
= (*I
)->Size();
939 Progress
.OverallProgress(CurrentSize
,TotalSize
,Size
,_("Reading package lists"));
942 if ((*I
)->Merge(Gen
,Progress
) == false)
946 if (Gen
.HasFileDeps() == true)
949 TotalSize
= ComputeSize(Start
, End
);
951 for (I
= Start
; I
!= End
; I
++)
953 unsigned long Size
= (*I
)->Size();
954 Progress
.OverallProgress(CurrentSize
,TotalSize
,Size
,_("Collecting File Provides"));
956 if ((*I
)->MergeFileProvides(Gen
,Progress
) == false)
964 // MakeStatusCache - Construct the status cache /*{{{*/
965 // ---------------------------------------------------------------------
966 /* This makes sure that the status cache (the cache that has all
967 index files from the sources list and all local ones) is ready
968 to be mmaped. If OutMap is not zero then a MMap object representing
969 the cache will be stored there. This is pretty much mandetory if you
970 are using AllowMem. AllowMem lets the function be run as non-root
971 where it builds the cache 'fast' into a memory buffer. */
972 bool pkgMakeStatusCache(pkgSourceList
&List
,OpProgress
&Progress
,
973 MMap
**OutMap
,bool AllowMem
)
975 bool const Debug
= _config
->FindB("Debug::pkgCacheGen", false);
976 unsigned long const MapSize
= _config
->FindI("APT::Cache-Limit",24*1024*1024);
978 vector
<pkgIndexFile
*> Files
;
979 for (vector
<metaIndex
*>::const_iterator i
= List
.begin();
983 vector
<pkgIndexFile
*> *Indexes
= (*i
)->GetIndexFiles();
984 for (vector
<pkgIndexFile
*>::const_iterator j
= Indexes
->begin();
987 Files
.push_back (*j
);
990 unsigned long const EndOfSource
= Files
.size();
991 if (_system
->AddStatusFiles(Files
) == false)
994 // Decide if we can write to the files..
995 string
const CacheFile
= _config
->FindFile("Dir::Cache::pkgcache");
996 string
const SrcCacheFile
= _config
->FindFile("Dir::Cache::srcpkgcache");
998 // Decide if we can write to the cache
999 bool Writeable
= false;
1000 if (CacheFile
.empty() == false)
1001 Writeable
= access(flNotFile(CacheFile
).c_str(),W_OK
) == 0;
1003 if (SrcCacheFile
.empty() == false)
1004 Writeable
= access(flNotFile(SrcCacheFile
).c_str(),W_OK
) == 0;
1006 std::clog
<< "Do we have write-access to the cache files? " << (Writeable
? "YES" : "NO") << std::endl
;
1008 if (Writeable
== false && AllowMem
== false && CacheFile
.empty() == false)
1009 return _error
->Error(_("Unable to write to %s"),flNotFile(CacheFile
).c_str());
1011 Progress
.OverallProgress(0,1,1,_("Reading package lists"));
1013 // Cache is OK, Fin.
1014 if (CheckValidity(CacheFile
,Files
.begin(),Files
.end(),OutMap
) == true)
1016 Progress
.OverallProgress(1,1,1,_("Reading package lists"));
1018 std::clog
<< "pkgcache.bin is valid - no need to build anything" << std::endl
;
1021 else if (Debug
== true)
1022 std::clog
<< "pkgcache.bin is NOT valid" << std::endl
;
1024 /* At this point we know we need to reconstruct the package cache,
1026 SPtr
<FileFd
> CacheF
;
1027 SPtr
<DynamicMMap
> Map
;
1028 if (Writeable
== true && CacheFile
.empty() == false)
1030 unlink(CacheFile
.c_str());
1031 CacheF
= new FileFd(CacheFile
,FileFd::WriteEmpty
);
1032 fchmod(CacheF
->Fd(),0644);
1033 Map
= new DynamicMMap(*CacheF
,MMap::Public
,MapSize
);
1034 if (_error
->PendingError() == true)
1037 std::clog
<< "Open filebased MMap" << std::endl
;
1041 // Just build it in memory..
1042 Map
= new DynamicMMap(0,MapSize
);
1044 std::clog
<< "Open memory Map (not filebased)" << std::endl
;
1047 // Lets try the source cache.
1048 unsigned long CurrentSize
= 0;
1049 unsigned long TotalSize
= 0;
1050 if (CheckValidity(SrcCacheFile
,Files
.begin(),
1051 Files
.begin()+EndOfSource
) == true)
1054 std::clog
<< "srcpkgcache.bin is valid - populate MMap with it." << std::endl
;
1055 // Preload the map with the source cache
1056 FileFd
SCacheF(SrcCacheFile
,FileFd::ReadOnly
);
1057 unsigned long const alloc
= Map
->RawAllocate(SCacheF
.Size());
1058 if ((alloc
== 0 && _error
->PendingError())
1059 || SCacheF
.Read((unsigned char *)Map
->Data() + alloc
,
1060 SCacheF
.Size()) == false)
1063 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
1065 // Build the status cache
1066 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1067 if (_error
->PendingError() == true)
1069 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1070 Files
.begin()+EndOfSource
,Files
.end()) == false)
1073 // FIXME: move me to a better place
1074 Gen
.FinishCache(Progress
);
1079 std::clog
<< "srcpkgcache.bin is NOT valid - rebuild" << std::endl
;
1080 TotalSize
= ComputeSize(Files
.begin(),Files
.end());
1082 // Build the source cache
1083 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1084 if (_error
->PendingError() == true)
1086 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1087 Files
.begin(),Files
.begin()+EndOfSource
) == false)
1091 if (Writeable
== true && SrcCacheFile
.empty() == false)
1093 FileFd
SCacheF(SrcCacheFile
,FileFd::WriteEmpty
);
1094 if (_error
->PendingError() == true)
1097 fchmod(SCacheF
.Fd(),0644);
1099 // Write out the main data
1100 if (SCacheF
.Write(Map
->Data(),Map
->Size()) == false)
1101 return _error
->Error(_("IO Error saving source cache"));
1104 // Write out the proper header
1105 Gen
.GetCache().HeaderP
->Dirty
= false;
1106 if (SCacheF
.Seek(0) == false ||
1107 SCacheF
.Write(Map
->Data(),sizeof(*Gen
.GetCache().HeaderP
)) == false)
1108 return _error
->Error(_("IO Error saving source cache"));
1109 Gen
.GetCache().HeaderP
->Dirty
= true;
1113 // Build the status cache
1114 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1115 Files
.begin()+EndOfSource
,Files
.end()) == false)
1118 // FIXME: move me to a better place
1119 Gen
.FinishCache(Progress
);
1122 std::clog
<< "Caches are ready for shipping" << std::endl
;
1124 if (_error
->PendingError() == true)
1130 delete Map
.UnGuard();
1131 *OutMap
= new MMap(*CacheF
,0);
1135 *OutMap
= Map
.UnGuard();
1142 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1143 // ---------------------------------------------------------------------
1145 bool pkgMakeOnlyStatusCache(OpProgress
&Progress
,DynamicMMap
**OutMap
)
1147 unsigned long MapSize
= _config
->FindI("APT::Cache-Limit",20*1024*1024);
1148 vector
<pkgIndexFile
*> Files
;
1149 unsigned long EndOfSource
= Files
.size();
1150 if (_system
->AddStatusFiles(Files
) == false)
1153 SPtr
<DynamicMMap
> Map
= new DynamicMMap(0,MapSize
);
1154 unsigned long CurrentSize
= 0;
1155 unsigned long TotalSize
= 0;
1157 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
1159 // Build the status cache
1160 Progress
.OverallProgress(0,1,1,_("Reading package lists"));
1161 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1162 if (_error
->PendingError() == true)
1164 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1165 Files
.begin()+EndOfSource
,Files
.end()) == false)
1168 // FIXME: move me to a better place
1169 Gen
.FinishCache(Progress
);
1171 if (_error
->PendingError() == true)
1173 *OutMap
= Map
.UnGuard();