]>
git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
6 Package Cache Generator - Generator for the cache structure.
8 This builds the cache structure from the abstract package list parser.
10 ##################################################################### */
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
27 #include <apt-pkg/tagfile.h>
38 typedef vector
<pkgIndexFile
*>::iterator FileIterator
;
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap
*pMap
,OpProgress
*Prog
) :
44 Map(*pMap
), Cache(pMap
,false), Progress(Prog
),
48 memset(UniqHash
,0,sizeof(UniqHash
));
50 if (_error
->PendingError() == true)
55 // Setup the map interface..
56 Cache
.HeaderP
= (pkgCache::Header
*)Map
.Data();
57 if (Map
.RawAllocate(sizeof(pkgCache::Header
)) == 0 && _error
->PendingError() == true)
60 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
63 *Cache
.HeaderP
= pkgCache::Header();
64 Cache
.HeaderP
->VerSysName
= Map
.WriteString(_system
->VS
->Label
);
65 Cache
.HeaderP
->Architecture
= Map
.WriteString(_config
->Find("APT::Architecture"));
70 // Map directly from the existing file
72 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
73 if (Cache
.VS
!= _system
->VS
)
75 _error
->Error(_("Cache has an incompatible versioning system"));
80 Cache
.HeaderP
->Dirty
= true;
81 Map
.Sync(0,sizeof(pkgCache::Header
));
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
90 if (_error
->PendingError() == true)
92 if (Map
.Sync() == false)
95 Cache
.HeaderP
->Dirty
= false;
96 Map
.Sync(0,sizeof(pkgCache::Header
));
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser
&List
,
104 pkgCache::VerIterator
*OutVer
)
108 unsigned int Counter
= 0;
109 while (List
.Step() == true)
111 string
const PackageName
= List
.Package();
112 if (PackageName
.empty() == true)
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector
<string
> genArch
;
118 if (List
.ArchitectureAll() == true) {
119 genArch
= APT::Configuration::getArchitectures();
120 if (genArch
.size() != 1)
121 genArch
.push_back("all");
123 genArch
.push_back(List
.Architecture());
125 for (std::vector
<string
>::const_iterator arch
= genArch
.begin();
126 arch
!= genArch
.end(); ++arch
)
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg
;
130 if (NewPackage(Pkg
, PackageName
, *arch
) == false)
131 return _error
->Error(_("Error occurred while processing %s (NewPackage)"),PackageName
.c_str());
133 if (Counter
% 100 == 0 && Progress
!= 0)
134 Progress
->Progress(List
.Offset());
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version
= List
.Version();
140 if (Version
.empty() == true)
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
145 if (List
.UsePackage(Pkg
,pkgCache::VerIterator(Cache
)) == false)
146 return _error
->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName
.c_str());
149 // Find the right version to write the description
150 MD5SumValue CurMd5
= List
.Description_md5();
151 pkgCache::VerIterator Ver
= Pkg
.VersionList();
152 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
154 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
156 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
157 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
158 bool duplicate
=false;
160 // don't add a new description if we have one for the given
162 for ( ; Desc
.end() == false; Desc
++)
163 if (MD5SumValue(Desc
.md5()) == CurMd5
&&
164 Desc
.LanguageCode() == List
.DescriptionLanguage())
169 for (Desc
= Ver
.DescriptionList();
171 LastDesc
= &Desc
->NextDesc
, Desc
++)
173 if (MD5SumValue(Desc
.md5()) == CurMd5
)
175 // Add new description
176 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), CurMd5
, *LastDesc
);
177 Desc
->ParentPkg
= Pkg
.Index();
179 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
180 return _error
->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName
.c_str());
189 pkgCache::VerIterator Ver
= Pkg
.VersionList();
190 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
192 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
194 Res
= Cache
.VS
->CmpVersion(Version
,Ver
.VerStr());
199 /* We already have a version for this item, record that we
201 unsigned long Hash
= List
.VersionHash();
202 if (Res
== 0 && Ver
->Hash
== Hash
)
204 if (List
.UsePackage(Pkg
,Ver
) == false)
205 return _error
->Error(_("Error occurred while processing %s (UsePackage2)"),
206 PackageName
.c_str());
208 if (NewFileVer(Ver
,List
) == false)
209 return _error
->Error(_("Error occurred while processing %s (NewFileVer1)"),
210 PackageName
.c_str());
212 // Read only a single record and return
216 FoundFileDeps
|= List
.HasFileDeps();
223 // Skip to the end of the same version set.
226 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
228 Res
= Cache
.VS
->CmpVersion(Version
,Ver
.VerStr());
235 *LastVer
= NewVersion(Ver
,Version
,*LastVer
);
236 Ver
->ParentPkg
= Pkg
.Index();
239 if ((*LastVer
== 0 && _error
->PendingError()) || List
.NewVersion(Ver
) == false)
240 return _error
->Error(_("Error occurred while processing %s (NewVersion1)"),
241 PackageName
.c_str());
243 if (List
.UsePackage(Pkg
,Ver
) == false)
244 return _error
->Error(_("Error occurred while processing %s (UsePackage3)"),
245 PackageName
.c_str());
247 if (NewFileVer(Ver
,List
) == false)
248 return _error
->Error(_("Error occurred while processing %s (NewVersion2)"),
249 PackageName
.c_str());
251 // Read only a single record and return
255 FoundFileDeps
|= List
.HasFileDeps();
259 /* Record the Description data. Description data always exist in
260 Packages and Translation-* files. */
261 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
262 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
264 // Skip to the end of description set
265 for (; Desc
.end() == false; LastDesc
= &Desc
->NextDesc
, Desc
++);
267 // Add new description
268 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), List
.Description_md5(), *LastDesc
);
269 Desc
->ParentPkg
= Pkg
.Index();
271 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
272 return _error
->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName
.c_str());
276 FoundFileDeps
|= List
.HasFileDeps();
278 if (Cache
.HeaderP
->PackageCount
>= (1ULL<<sizeof(Cache
.PkgP
->ID
)*8)-1)
279 return _error
->Error(_("Wow, you exceeded the number of package "
280 "names this APT is capable of."));
281 if (Cache
.HeaderP
->VersionCount
>= (1ULL<<(sizeof(Cache
.VerP
->ID
)*8))-1)
282 return _error
->Error(_("Wow, you exceeded the number of versions "
283 "this APT is capable of."));
284 if (Cache
.HeaderP
->DescriptionCount
>= (1ULL<<(sizeof(Cache
.DescP
->ID
)*8))-1)
285 return _error
->Error(_("Wow, you exceeded the number of descriptions "
286 "this APT is capable of."));
287 if (Cache
.HeaderP
->DependsCount
>= (1ULL<<(sizeof(Cache
.DepP
->ID
)*8))-1ULL)
288 return _error
->Error(_("Wow, you exceeded the number of dependencies "
289 "this APT is capable of."));
293 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
294 // ---------------------------------------------------------------------
295 /* If we found any file depends while parsing the main list we need to
296 resolve them. Since it is undesired to load the entire list of files
297 into the cache as virtual packages we do a two stage effort. MergeList
298 identifies the file depends and this creates Provdies for them by
299 re-parsing all the indexs. */
300 bool pkgCacheGenerator::MergeFileProvides(ListParser
&List
)
304 unsigned int Counter
= 0;
305 while (List
.Step() == true)
307 string PackageName
= List
.Package();
308 if (PackageName
.empty() == true)
310 string Version
= List
.Version();
311 if (Version
.empty() == true)
314 pkgCache::PkgIterator Pkg
= Cache
.FindPkg(PackageName
);
315 if (Pkg
.end() == true)
316 return _error
->Error(_("Error occurred while processing %s (FindPkg)"),
317 PackageName
.c_str());
319 if (Counter
% 100 == 0 && Progress
!= 0)
320 Progress
->Progress(List
.Offset());
322 unsigned long Hash
= List
.VersionHash();
323 pkgCache::VerIterator Ver
= Pkg
.VersionList();
324 for (; Ver
.end() == false; Ver
++)
326 if (Ver
->Hash
== Hash
&& Version
.c_str() == Ver
.VerStr())
328 if (List
.CollectFileProvides(Cache
,Ver
) == false)
329 return _error
->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName
.c_str());
334 if (Ver
.end() == true)
335 _error
->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName
.c_str(),Version
.c_str());
341 // CacheGenerator::NewGroup - Add a new group /*{{{*/
342 // ---------------------------------------------------------------------
343 /* This creates a new group structure and adds it to the hash table */
344 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator
&Grp
, const string
&Name
) {
345 Grp
= Cache
.FindGrp(Name
);
346 if (Grp
.end() == false)
350 unsigned long const Group
= Map
.Allocate(sizeof(pkgCache::Group
));
351 if (unlikely(Group
== 0))
354 Grp
= pkgCache::GrpIterator(Cache
, Cache
.GrpP
+ Group
);
355 Grp
->Name
= Map
.WriteString(Name
);
356 if (unlikely(Grp
->Name
== 0))
359 // Insert it into the hash table
360 unsigned long const Hash
= Cache
.Hash(Name
);
361 Grp
->Next
= Cache
.HeaderP
->GrpHashTable
[Hash
];
362 Cache
.HeaderP
->GrpHashTable
[Hash
] = Group
;
364 Cache
.HeaderP
->GroupCount
++;
369 // CacheGenerator::NewPackage - Add a new package /*{{{*/
370 // ---------------------------------------------------------------------
371 /* This creates a new package structure and adds it to the hash table */
372 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator
&Pkg
,const string
&Name
,
373 const string
&Arch
) {
374 pkgCache::GrpIterator Grp
;
375 if (unlikely(NewGroup(Grp
, Name
) == false))
378 Pkg
= Grp
.FindPkg(Arch
);
379 if (Pkg
.end() == false)
383 unsigned long const Package
= Map
.Allocate(sizeof(pkgCache::Package
));
384 if (unlikely(Package
== 0))
386 Pkg
= pkgCache::PkgIterator(Cache
,Cache
.PkgP
+ Package
);
388 // Insert it into the hash table
389 unsigned long const Hash
= Cache
.Hash(Name
);
390 Pkg
->NextPackage
= Cache
.HeaderP
->PkgHashTable
[Hash
];
391 Cache
.HeaderP
->PkgHashTable
[Hash
] = Package
;
393 // remember the packages in the group
394 Grp
->FirstPackage
= Package
;
395 if (Grp
->LastPackage
== 0)
396 Grp
->LastPackage
= Package
;
398 // Set the name, arch and the ID
399 Pkg
->Name
= Grp
->Name
;
400 Pkg
->Group
= Grp
.Index();
401 Pkg
->Arch
= WriteUniqString(Arch
.c_str());
402 if (unlikely(Pkg
->Arch
== 0))
404 Pkg
->ID
= Cache
.HeaderP
->PackageCount
++;
409 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
410 // ---------------------------------------------------------------------
412 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator
&Ver
,
415 if (CurrentFile
== 0)
419 unsigned long VerFile
= Map
.Allocate(sizeof(pkgCache::VerFile
));
423 pkgCache::VerFileIterator
VF(Cache
,Cache
.VerFileP
+ VerFile
);
424 VF
->File
= CurrentFile
- Cache
.PkgFileP
;
426 // Link it to the end of the list
427 map_ptrloc
*Last
= &Ver
->FileList
;
428 for (pkgCache::VerFileIterator V
= Ver
.FileList(); V
.end() == false; V
++)
430 VF
->NextFile
= *Last
;
433 VF
->Offset
= List
.Offset();
434 VF
->Size
= List
.Size();
435 if (Cache
.HeaderP
->MaxVerFileSize
< VF
->Size
)
436 Cache
.HeaderP
->MaxVerFileSize
= VF
->Size
;
437 Cache
.HeaderP
->VerFileCount
++;
442 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
443 // ---------------------------------------------------------------------
444 /* This puts a version structure in the linked list */
445 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator
&Ver
,
446 const string
&VerStr
,
450 unsigned long Version
= Map
.Allocate(sizeof(pkgCache::Version
));
455 Ver
= pkgCache::VerIterator(Cache
,Cache
.VerP
+ Version
);
457 Ver
->ID
= Cache
.HeaderP
->VersionCount
++;
458 Ver
->VerStr
= Map
.WriteString(VerStr
);
459 if (Ver
->VerStr
== 0)
465 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
466 // ---------------------------------------------------------------------
468 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator
&Desc
,
471 if (CurrentFile
== 0)
475 unsigned long DescFile
= Map
.Allocate(sizeof(pkgCache::DescFile
));
479 pkgCache::DescFileIterator
DF(Cache
,Cache
.DescFileP
+ DescFile
);
480 DF
->File
= CurrentFile
- Cache
.PkgFileP
;
482 // Link it to the end of the list
483 map_ptrloc
*Last
= &Desc
->FileList
;
484 for (pkgCache::DescFileIterator D
= Desc
.FileList(); D
.end() == false; D
++)
487 DF
->NextFile
= *Last
;
490 DF
->Offset
= List
.Offset();
491 DF
->Size
= List
.Size();
492 if (Cache
.HeaderP
->MaxDescFileSize
< DF
->Size
)
493 Cache
.HeaderP
->MaxDescFileSize
= DF
->Size
;
494 Cache
.HeaderP
->DescFileCount
++;
499 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
500 // ---------------------------------------------------------------------
501 /* This puts a description structure in the linked list */
502 map_ptrloc
pkgCacheGenerator::NewDescription(pkgCache::DescIterator
&Desc
,
504 const MD5SumValue
&md5sum
,
508 map_ptrloc Description
= Map
.Allocate(sizeof(pkgCache::Description
));
509 if (Description
== 0)
513 Desc
= pkgCache::DescIterator(Cache
,Cache
.DescP
+ Description
);
514 Desc
->NextDesc
= Next
;
515 Desc
->ID
= Cache
.HeaderP
->DescriptionCount
++;
516 Desc
->language_code
= Map
.WriteString(Lang
);
517 Desc
->md5sum
= Map
.WriteString(md5sum
.Value());
518 if (Desc
->language_code
== 0 || Desc
->md5sum
== 0)
524 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
525 // ---------------------------------------------------------------------
526 /* This prepares the Cache for delivery */
527 bool pkgCacheGenerator::FinishCache(OpProgress
&Progress
) {
528 // FIXME: add progress reporting for this operation
529 // Do we have different architectures in your groups ?
530 vector
<string
> archs
= APT::Configuration::getArchitectures();
531 if (archs
.size() > 1) {
532 // Create Conflicts in between the group
533 for (pkgCache::GrpIterator G
= GetCache().GrpBegin(); G
.end() != true; G
++) {
534 string
const PkgName
= G
.Name();
535 for (pkgCache::PkgIterator P
= G
.PackageList(); P
.end() != true; P
= G
.NextPkg(P
)) {
536 if (strcmp(P
.Arch(),"all") == 0)
538 pkgCache::PkgIterator allPkg
;
539 for (pkgCache::VerIterator V
= P
.VersionList(); V
.end() != true; V
++) {
540 string
const Arch
= V
.Arch(true);
541 map_ptrloc
*OldDepLast
= NULL
;
542 /* MultiArch handling introduces a lot of implicit Dependencies:
543 - MultiArch: same → Co-Installable if they have the same version
544 - Architecture: all → Need to be Co-Installable for internal reasons
545 - All others conflict with all other group members */
546 bool const coInstall
= (V
->MultiArch
== pkgCache::Version::All
||
547 V
->MultiArch
== pkgCache::Version::Same
);
548 if (V
->MultiArch
== pkgCache::Version::All
&& allPkg
.end() == true)
549 allPkg
= G
.FindPkg("all");
550 for (vector
<string
>::const_iterator A
= archs
.begin(); A
!= archs
.end(); ++A
) {
553 /* We allow only one installed arch at the time
554 per group, therefore each group member conflicts
555 with all other group members */
556 pkgCache::PkgIterator D
= G
.FindPkg(*A
);
559 if (coInstall
== true) {
560 // Replaces: ${self}:other ( << ${binary:Version})
561 NewDepends(D
, V
, V
.VerStr(),
562 pkgCache::Dep::Less
, pkgCache::Dep::Replaces
,
564 // Breaks: ${self}:other (!= ${binary:Version})
565 NewDepends(D
, V
, V
.VerStr(),
566 pkgCache::Dep::Less
, pkgCache::Dep::DpkgBreaks
,
568 NewDepends(D
, V
, V
.VerStr(),
569 pkgCache::Dep::Greater
, pkgCache::Dep::DpkgBreaks
,
571 if (V
->MultiArch
== pkgCache::Version::All
) {
572 // Depend on ${self}:all which does depend on nothing
573 NewDepends(allPkg
, V
, V
.VerStr(),
574 pkgCache::Dep::Equals
, pkgCache::Dep::Depends
,
578 // Conflicts: ${self}:other
580 pkgCache::Dep::NoOp
, pkgCache::Dep::Conflicts
,
591 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
592 // ---------------------------------------------------------------------
593 /* This creates a dependency element in the tree. It is linked to the
594 version and to the package that it is pointing to. */
595 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator
&Pkg
,
596 pkgCache::VerIterator
&Ver
,
597 string
const &Version
,
598 unsigned int const &Op
,
599 unsigned int const &Type
,
600 map_ptrloc
*OldDepLast
)
603 unsigned long const Dependency
= Map
.Allocate(sizeof(pkgCache::Dependency
));
604 if (unlikely(Dependency
== 0))
608 pkgCache::DepIterator
Dep(Cache
,Cache
.DepP
+ Dependency
);
609 Dep
->ParentVer
= Ver
.Index();
612 Dep
->ID
= Cache
.HeaderP
->DependsCount
++;
614 // Probe the reverse dependency list for a version string that matches
615 if (Version
.empty() == false)
617 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
618 if (I->Version != 0 && I.TargetVer() == Version)
619 Dep->Version = I->Version;*/
620 if (Dep
->Version
== 0)
621 if (unlikely((Dep
->Version
= Map
.WriteString(Version
)) == 0))
625 // Link it to the package
626 Dep
->Package
= Pkg
.Index();
627 Dep
->NextRevDepends
= Pkg
->RevDepends
;
628 Pkg
->RevDepends
= Dep
.Index();
630 // Do we know where to link the Dependency to?
631 if (OldDepLast
== NULL
)
633 OldDepLast
= &Ver
->DependsList
;
634 for (pkgCache::DepIterator D
= Ver
.DependsList(); D
.end() == false; D
++)
635 OldDepLast
= &D
->NextDepends
;
638 Dep
->NextDepends
= *OldDepLast
;
639 *OldDepLast
= Dep
.Index();
640 OldDepLast
= &Dep
->NextDepends
;
645 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
646 // ---------------------------------------------------------------------
647 /* This creates a Group and the Package to link this dependency to if
648 needed and handles also the caching of the old endpoint */
649 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver
,
650 const string
&PackageName
,
652 const string
&Version
,
656 pkgCache::GrpIterator Grp
;
657 if (unlikely(Owner
->NewGroup(Grp
, PackageName
) == false))
660 // Locate the target package
661 pkgCache::PkgIterator Pkg
= Grp
.FindPkg(Arch
);
662 if (Pkg
.end() == true) {
663 if (unlikely(Owner
->NewPackage(Pkg
, PackageName
, Arch
) == false))
667 // Is it a file dependency?
668 if (unlikely(PackageName
[0] == '/'))
669 FoundFileDeps
= true;
671 /* Caching the old end point speeds up generation substantially */
672 if (OldDepVer
!= Ver
) {
677 return Owner
->NewDepends(Pkg
, Ver
, Version
, Op
, Type
, OldDepLast
);
680 // ListParser::NewProvides - Create a Provides element /*{{{*/
681 // ---------------------------------------------------------------------
683 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver
,
684 const string
&PkgName
,
685 const string
&PkgArch
,
686 const string
&Version
)
688 pkgCache
&Cache
= Owner
->Cache
;
690 // We do not add self referencing provides
691 if (Ver
.ParentPkg().Name() == PkgName
&& PkgArch
== Ver
.Arch(true))
695 unsigned long const Provides
= Owner
->Map
.Allocate(sizeof(pkgCache::Provides
));
696 if (unlikely(Provides
== 0))
698 Cache
.HeaderP
->ProvidesCount
++;
701 pkgCache::PrvIterator
Prv(Cache
,Cache
.ProvideP
+ Provides
,Cache
.PkgP
);
702 Prv
->Version
= Ver
.Index();
703 Prv
->NextPkgProv
= Ver
->ProvidesList
;
704 Ver
->ProvidesList
= Prv
.Index();
705 if (Version
.empty() == false && unlikely((Prv
->ProvideVersion
= WriteString(Version
)) == 0))
708 // Locate the target package
709 pkgCache::PkgIterator Pkg
;
710 if (unlikely(Owner
->NewPackage(Pkg
,PkgName
, PkgArch
) == false))
713 // Link it to the package
714 Prv
->ParentPkg
= Pkg
.Index();
715 Prv
->NextProvides
= Pkg
->ProvidesList
;
716 Pkg
->ProvidesList
= Prv
.Index();
721 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
722 // ---------------------------------------------------------------------
723 /* This is used to select which file is to be associated with all newly
724 added versions. The caller is responsible for setting the IMS fields. */
725 bool pkgCacheGenerator::SelectFile(const string
&File
,const string
&Site
,
726 const pkgIndexFile
&Index
,
729 // Get some space for the structure
730 CurrentFile
= Cache
.PkgFileP
+ Map
.Allocate(sizeof(*CurrentFile
));
731 if (CurrentFile
== Cache
.PkgFileP
)
735 CurrentFile
->FileName
= Map
.WriteString(File
);
736 CurrentFile
->Site
= WriteUniqString(Site
);
737 CurrentFile
->NextFile
= Cache
.HeaderP
->FileList
;
738 CurrentFile
->Flags
= Flags
;
739 CurrentFile
->ID
= Cache
.HeaderP
->PackageFileCount
;
740 CurrentFile
->IndexType
= WriteUniqString(Index
.GetType()->Label
);
742 Cache
.HeaderP
->FileList
= CurrentFile
- Cache
.PkgFileP
;
743 Cache
.HeaderP
->PackageFileCount
++;
745 if (CurrentFile
->FileName
== 0)
749 Progress
->SubProgress(Index
.Size());
753 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
754 // ---------------------------------------------------------------------
755 /* This is used to create handles to strings. Given the same text it
756 always returns the same number */
757 unsigned long pkgCacheGenerator::WriteUniqString(const char *S
,
760 /* We use a very small transient hash table here, this speeds up generation
761 by a fair amount on slower machines */
762 pkgCache::StringItem
*&Bucket
= UniqHash
[(S
[0]*5 + S
[1]) % _count(UniqHash
)];
764 stringcmp(S
,S
+Size
,Cache
.StrP
+ Bucket
->String
) == 0)
765 return Bucket
->String
;
767 // Search for an insertion point
768 pkgCache::StringItem
*I
= Cache
.StringItemP
+ Cache
.HeaderP
->StringList
;
770 map_ptrloc
*Last
= &Cache
.HeaderP
->StringList
;
771 for (; I
!= Cache
.StringItemP
; Last
= &I
->NextItem
,
772 I
= Cache
.StringItemP
+ I
->NextItem
)
774 Res
= stringcmp(S
,S
+Size
,Cache
.StrP
+ I
->String
);
787 unsigned long Item
= Map
.Allocate(sizeof(pkgCache::StringItem
));
791 // Fill in the structure
792 pkgCache::StringItem
*ItemP
= Cache
.StringItemP
+ Item
;
793 ItemP
->NextItem
= I
- Cache
.StringItemP
;
795 ItemP
->String
= Map
.WriteString(S
,Size
);
796 if (ItemP
->String
== 0)
800 return ItemP
->String
;
803 // CheckValidity - Check that a cache is up-to-date /*{{{*/
804 // ---------------------------------------------------------------------
805 /* This just verifies that each file in the list of index files exists,
806 has matching attributes with the cache and the cache does not have
808 static bool CheckValidity(const string
&CacheFile
, FileIterator Start
,
809 FileIterator End
,MMap
**OutMap
= 0)
811 bool const Debug
= _config
->FindB("Debug::pkgCacheGen", false);
812 // No file, certainly invalid
813 if (CacheFile
.empty() == true || FileExists(CacheFile
) == false)
816 std::clog
<< "CacheFile doesn't exist" << std::endl
;
821 FileFd
CacheF(CacheFile
,FileFd::ReadOnly
);
822 SPtr
<MMap
> Map
= new MMap(CacheF
,0);
824 if (_error
->PendingError() == true || Map
->Size() == 0)
827 std::clog
<< "Errors are pending or Map is empty()" << std::endl
;
832 /* Now we check every index file, see if it is in the cache,
833 verify the IMS data and check that it is on the disk too.. */
834 SPtrArray
<bool> Visited
= new bool[Cache
.HeaderP
->PackageFileCount
];
835 memset(Visited
,0,sizeof(*Visited
)*Cache
.HeaderP
->PackageFileCount
);
836 for (; Start
!= End
; Start
++)
839 std::clog
<< "Checking PkgFile " << (*Start
)->Describe() << ": ";
840 if ((*Start
)->HasPackages() == false)
843 std::clog
<< "Has NO packages" << std::endl
;
847 if ((*Start
)->Exists() == false)
849 #if 0 // mvo: we no longer give a message here (Default Sources spec)
850 _error
->WarningE("stat",_("Couldn't stat source package list %s"),
851 (*Start
)->Describe().c_str());
854 std::clog
<< "file doesn't exist" << std::endl
;
858 // FindInCache is also expected to do an IMS check.
859 pkgCache::PkgFileIterator File
= (*Start
)->FindInCache(Cache
);
860 if (File
.end() == true)
863 std::clog
<< "FindInCache returned end-Pointer" << std::endl
;
867 Visited
[File
->ID
] = true;
869 std::clog
<< "with ID " << File
->ID
<< " is valid" << std::endl
;
872 for (unsigned I
= 0; I
!= Cache
.HeaderP
->PackageFileCount
; I
++)
873 if (Visited
[I
] == false)
876 std::clog
<< "File with ID" << I
<< " wasn't visited" << std::endl
;
880 if (_error
->PendingError() == true)
884 std::clog
<< "Validity failed because of pending errors:" << std::endl
;
885 _error
->DumpErrors();
892 *OutMap
= Map
.UnGuard();
896 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
897 // ---------------------------------------------------------------------
898 /* Size is kind of an abstract notion that is only used for the progress
900 static unsigned long ComputeSize(FileIterator Start
,FileIterator End
)
902 unsigned long TotalSize
= 0;
903 for (; Start
!= End
; Start
++)
905 if ((*Start
)->HasPackages() == false)
907 TotalSize
+= (*Start
)->Size();
912 // BuildCache - Merge the list of index files into the cache /*{{{*/
913 // ---------------------------------------------------------------------
915 static bool BuildCache(pkgCacheGenerator
&Gen
,
916 OpProgress
&Progress
,
917 unsigned long &CurrentSize
,unsigned long TotalSize
,
918 FileIterator Start
, FileIterator End
)
921 for (I
= Start
; I
!= End
; I
++)
923 if ((*I
)->HasPackages() == false)
926 if ((*I
)->Exists() == false)
929 if ((*I
)->FindInCache(Gen
.GetCache()).end() == false)
931 _error
->Warning("Duplicate sources.list entry %s",
932 (*I
)->Describe().c_str());
936 unsigned long Size
= (*I
)->Size();
937 Progress
.OverallProgress(CurrentSize
,TotalSize
,Size
,_("Reading package lists"));
940 if ((*I
)->Merge(Gen
,Progress
) == false)
944 if (Gen
.HasFileDeps() == true)
947 TotalSize
= ComputeSize(Start
, End
);
949 for (I
= Start
; I
!= End
; I
++)
951 unsigned long Size
= (*I
)->Size();
952 Progress
.OverallProgress(CurrentSize
,TotalSize
,Size
,_("Collecting File Provides"));
954 if ((*I
)->MergeFileProvides(Gen
,Progress
) == false)
962 // MakeStatusCache - Construct the status cache /*{{{*/
963 // ---------------------------------------------------------------------
964 /* This makes sure that the status cache (the cache that has all
965 index files from the sources list and all local ones) is ready
966 to be mmaped. If OutMap is not zero then a MMap object representing
967 the cache will be stored there. This is pretty much mandetory if you
968 are using AllowMem. AllowMem lets the function be run as non-root
969 where it builds the cache 'fast' into a memory buffer. */
970 bool pkgMakeStatusCache(pkgSourceList
&List
,OpProgress
&Progress
,
971 MMap
**OutMap
,bool AllowMem
)
973 bool const Debug
= _config
->FindB("Debug::pkgCacheGen", false);
974 unsigned long const MapSize
= _config
->FindI("APT::Cache-Limit",24*1024*1024);
976 vector
<pkgIndexFile
*> Files
;
977 for (vector
<metaIndex
*>::const_iterator i
= List
.begin();
981 vector
<pkgIndexFile
*> *Indexes
= (*i
)->GetIndexFiles();
982 for (vector
<pkgIndexFile
*>::const_iterator j
= Indexes
->begin();
985 Files
.push_back (*j
);
988 unsigned long const EndOfSource
= Files
.size();
989 if (_system
->AddStatusFiles(Files
) == false)
992 // Decide if we can write to the files..
993 string
const CacheFile
= _config
->FindFile("Dir::Cache::pkgcache");
994 string
const SrcCacheFile
= _config
->FindFile("Dir::Cache::srcpkgcache");
996 // Decide if we can write to the cache
997 bool Writeable
= false;
998 if (CacheFile
.empty() == false)
999 Writeable
= access(flNotFile(CacheFile
).c_str(),W_OK
) == 0;
1001 if (SrcCacheFile
.empty() == false)
1002 Writeable
= access(flNotFile(SrcCacheFile
).c_str(),W_OK
) == 0;
1004 std::clog
<< "Do we have write-access to the cache files? " << (Writeable
? "YES" : "NO") << std::endl
;
1006 if (Writeable
== false && AllowMem
== false && CacheFile
.empty() == false)
1007 return _error
->Error(_("Unable to write to %s"),flNotFile(CacheFile
).c_str());
1009 Progress
.OverallProgress(0,1,1,_("Reading package lists"));
1011 // Cache is OK, Fin.
1012 if (CheckValidity(CacheFile
,Files
.begin(),Files
.end(),OutMap
) == true)
1014 Progress
.OverallProgress(1,1,1,_("Reading package lists"));
1016 std::clog
<< "pkgcache.bin is valid - no need to build anything" << std::endl
;
1019 else if (Debug
== true)
1020 std::clog
<< "pkgcache.bin is NOT valid" << std::endl
;
1022 /* At this point we know we need to reconstruct the package cache,
1024 SPtr
<FileFd
> CacheF
;
1025 SPtr
<DynamicMMap
> Map
;
1026 if (Writeable
== true && CacheFile
.empty() == false)
1028 unlink(CacheFile
.c_str());
1029 CacheF
= new FileFd(CacheFile
,FileFd::WriteEmpty
);
1030 fchmod(CacheF
->Fd(),0644);
1031 Map
= new DynamicMMap(*CacheF
,MMap::Public
,MapSize
);
1032 if (_error
->PendingError() == true)
1035 std::clog
<< "Open filebased MMap" << std::endl
;
1039 // Just build it in memory..
1040 Map
= new DynamicMMap(0,MapSize
);
1042 std::clog
<< "Open memory Map (not filebased)" << std::endl
;
1045 // Lets try the source cache.
1046 unsigned long CurrentSize
= 0;
1047 unsigned long TotalSize
= 0;
1048 if (CheckValidity(SrcCacheFile
,Files
.begin(),
1049 Files
.begin()+EndOfSource
) == true)
1052 std::clog
<< "srcpkgcache.bin is valid - populate MMap with it." << std::endl
;
1053 // Preload the map with the source cache
1054 FileFd
SCacheF(SrcCacheFile
,FileFd::ReadOnly
);
1055 unsigned long const alloc
= Map
->RawAllocate(SCacheF
.Size());
1056 if ((alloc
== 0 && _error
->PendingError())
1057 || SCacheF
.Read((unsigned char *)Map
->Data() + alloc
,
1058 SCacheF
.Size()) == false)
1061 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
1063 // Build the status cache
1064 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1065 if (_error
->PendingError() == true)
1067 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1068 Files
.begin()+EndOfSource
,Files
.end()) == false)
1071 // FIXME: move me to a better place
1072 Gen
.FinishCache(Progress
);
1077 std::clog
<< "srcpkgcache.bin is NOT valid - rebuild" << std::endl
;
1078 TotalSize
= ComputeSize(Files
.begin(),Files
.end());
1080 // Build the source cache
1081 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1082 if (_error
->PendingError() == true)
1084 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1085 Files
.begin(),Files
.begin()+EndOfSource
) == false)
1089 if (Writeable
== true && SrcCacheFile
.empty() == false)
1091 FileFd
SCacheF(SrcCacheFile
,FileFd::WriteEmpty
);
1092 if (_error
->PendingError() == true)
1095 fchmod(SCacheF
.Fd(),0644);
1097 // Write out the main data
1098 if (SCacheF
.Write(Map
->Data(),Map
->Size()) == false)
1099 return _error
->Error(_("IO Error saving source cache"));
1102 // Write out the proper header
1103 Gen
.GetCache().HeaderP
->Dirty
= false;
1104 if (SCacheF
.Seek(0) == false ||
1105 SCacheF
.Write(Map
->Data(),sizeof(*Gen
.GetCache().HeaderP
)) == false)
1106 return _error
->Error(_("IO Error saving source cache"));
1107 Gen
.GetCache().HeaderP
->Dirty
= true;
1111 // Build the status cache
1112 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1113 Files
.begin()+EndOfSource
,Files
.end()) == false)
1116 // FIXME: move me to a better place
1117 Gen
.FinishCache(Progress
);
1120 std::clog
<< "Caches are ready for shipping" << std::endl
;
1122 if (_error
->PendingError() == true)
1128 delete Map
.UnGuard();
1129 *OutMap
= new MMap(*CacheF
,0);
1133 *OutMap
= Map
.UnGuard();
1140 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1141 // ---------------------------------------------------------------------
1143 bool pkgMakeOnlyStatusCache(OpProgress
&Progress
,DynamicMMap
**OutMap
)
1145 unsigned long MapSize
= _config
->FindI("APT::Cache-Limit",20*1024*1024);
1146 vector
<pkgIndexFile
*> Files
;
1147 unsigned long EndOfSource
= Files
.size();
1148 if (_system
->AddStatusFiles(Files
) == false)
1151 SPtr
<DynamicMMap
> Map
= new DynamicMMap(0,MapSize
);
1152 unsigned long CurrentSize
= 0;
1153 unsigned long TotalSize
= 0;
1155 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
1157 // Build the status cache
1158 Progress
.OverallProgress(0,1,1,_("Reading package lists"));
1159 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1160 if (_error
->PendingError() == true)
1162 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1163 Files
.begin()+EndOfSource
,Files
.end()) == false)
1166 // FIXME: move me to a better place
1167 Gen
.FinishCache(Progress
);
1169 if (_error
->PendingError() == true)
1171 *OutMap
= Map
.UnGuard();