]>
git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
a4ca9dfe4a30469df24a092e530c1c514a135b0c
1 // -*- mode: cpp; mode: fold -*-
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
6 Package Cache Generator - Generator for the cache structure.
8 This builds the cache structure from the abstract package list parser.
10 ##################################################################### */
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
26 #include <apt-pkg/tagfile.h>
38 typedef vector
<pkgIndexFile
*>::iterator FileIterator
;
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap
*pMap
,OpProgress
*Prog
) :
44 Map(*pMap
), Cache(pMap
,false), Progress(Prog
),
48 memset(UniqHash
,0,sizeof(UniqHash
));
50 if (_error
->PendingError() == true)
55 // Setup the map interface..
56 Cache
.HeaderP
= (pkgCache::Header
*)Map
.Data();
57 if (Map
.RawAllocate(sizeof(pkgCache::Header
)) == 0 && _error
->PendingError() == true)
60 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
63 *Cache
.HeaderP
= pkgCache::Header();
64 Cache
.HeaderP
->VerSysName
= Map
.WriteString(_system
->VS
->Label
);
65 Cache
.HeaderP
->Architecture
= Map
.WriteString(_config
->Find("APT::Architecture"));
70 // Map directly from the existing file
72 Map
.UsePools(*Cache
.HeaderP
->Pools
,sizeof(Cache
.HeaderP
->Pools
)/sizeof(Cache
.HeaderP
->Pools
[0]));
73 if (Cache
.VS
!= _system
->VS
)
75 _error
->Error(_("Cache has an incompatible versioning system"));
80 Cache
.HeaderP
->Dirty
= true;
81 Map
.Sync(0,sizeof(pkgCache::Header
));
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
90 if (_error
->PendingError() == true)
92 if (Map
.Sync() == false)
95 Cache
.HeaderP
->Dirty
= false;
96 Map
.Sync(0,sizeof(pkgCache::Header
));
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser
&List
,
104 pkgCache::VerIterator
*OutVer
)
108 unsigned int Counter
= 0;
109 while (List
.Step() == true)
111 string
const PackageName
= List
.Package();
112 if (PackageName
.empty() == true)
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector
<string
> genArch
;
118 if (List
.ArchitectureAll() == true)
119 genArch
= APT::Configuration::getArchitectures();
121 genArch
.push_back(List
.Architecture());
123 for (std::vector
<string
>::const_iterator arch
= genArch
.begin();
124 arch
!= genArch
.end(); ++arch
)
126 // Get a pointer to the package structure
127 pkgCache::PkgIterator Pkg
;
128 if (NewPackage(Pkg
, PackageName
, *arch
) == false)
129 return _error
->Error(_("Error occurred while processing %s (NewPackage)"),PackageName
.c_str());
131 if (Counter
% 100 == 0 && Progress
!= 0)
132 Progress
->Progress(List
.Offset());
134 /* Get a pointer to the version structure. We know the list is sorted
135 so we use that fact in the search. Insertion of new versions is
136 done with correct sorting */
137 string Version
= List
.Version();
138 if (Version
.empty() == true)
140 // we first process the package, then the descriptions
141 // (this has the bonus that we get MMap error when we run out
143 if (List
.UsePackage(Pkg
,pkgCache::VerIterator(Cache
)) == false)
144 return _error
->Error(_("Error occurred while processing %s (UsePackage1)"),
145 PackageName
.c_str());
147 // Find the right version to write the description
148 MD5SumValue CurMd5
= List
.Description_md5();
149 pkgCache::VerIterator Ver
= Pkg
.VersionList();
150 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
152 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
154 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
155 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
156 bool duplicate
=false;
158 // don't add a new description if we have one for the given
160 for ( ; Desc
.end() == false; Desc
++)
161 if (MD5SumValue(Desc
.md5()) == CurMd5
&&
162 Desc
.LanguageCode() == List
.DescriptionLanguage())
167 for (Desc
= Ver
.DescriptionList();
169 LastDesc
= &Desc
->NextDesc
, Desc
++)
171 if (MD5SumValue(Desc
.md5()) == CurMd5
)
173 // Add new description
174 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), CurMd5
, *LastDesc
);
175 Desc
->ParentPkg
= Pkg
.Index();
177 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
178 return _error
->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName
.c_str());
187 pkgCache::VerIterator Ver
= Pkg
.VersionList();
188 map_ptrloc
*LastVer
= &Pkg
->VersionList
;
190 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
192 Res
= Cache
.VS
->CmpVersion(Version
,Ver
.VerStr());
197 /* We already have a version for this item, record that we
199 unsigned long Hash
= List
.VersionHash();
200 if (Res
== 0 && Ver
->Hash
== Hash
)
202 if (List
.UsePackage(Pkg
,Ver
) == false)
203 return _error
->Error(_("Error occurred while processing %s (UsePackage2)"),
204 PackageName
.c_str());
206 if (NewFileVer(Ver
,List
) == false)
207 return _error
->Error(_("Error occurred while processing %s (NewFileVer1)"),
208 PackageName
.c_str());
210 // Read only a single record and return
214 FoundFileDeps
|= List
.HasFileDeps();
221 // Skip to the end of the same version set.
224 for (; Ver
.end() == false; LastVer
= &Ver
->NextVer
, Ver
++)
226 Res
= Cache
.VS
->CmpVersion(Version
,Ver
.VerStr());
233 *LastVer
= NewVersion(Ver
,Version
,*LastVer
);
234 Ver
->ParentPkg
= Pkg
.Index();
237 if ((*LastVer
== 0 && _error
->PendingError()) || List
.NewVersion(Ver
) == false)
238 return _error
->Error(_("Error occurred while processing %s (NewVersion1)"),
239 PackageName
.c_str());
241 if (List
.UsePackage(Pkg
,Ver
) == false)
242 return _error
->Error(_("Error occurred while processing %s (UsePackage3)"),
243 PackageName
.c_str());
245 if (NewFileVer(Ver
,List
) == false)
246 return _error
->Error(_("Error occurred while processing %s (NewVersion2)"),
247 PackageName
.c_str());
249 // Read only a single record and return
253 FoundFileDeps
|= List
.HasFileDeps();
257 /* Record the Description data. Description data always exist in
258 Packages and Translation-* files. */
259 pkgCache::DescIterator Desc
= Ver
.DescriptionList();
260 map_ptrloc
*LastDesc
= &Ver
->DescriptionList
;
262 // Skip to the end of description set
263 for (; Desc
.end() == false; LastDesc
= &Desc
->NextDesc
, Desc
++);
265 // Add new description
266 *LastDesc
= NewDescription(Desc
, List
.DescriptionLanguage(), List
.Description_md5(), *LastDesc
);
267 Desc
->ParentPkg
= Pkg
.Index();
269 if ((*LastDesc
== 0 && _error
->PendingError()) || NewFileDesc(Desc
,List
) == false)
270 return _error
->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName
.c_str());
274 FoundFileDeps
|= List
.HasFileDeps();
276 if (Cache
.HeaderP
->PackageCount
>= (1ULL<<sizeof(Cache
.PkgP
->ID
)*8)-1)
277 return _error
->Error(_("Wow, you exceeded the number of package "
278 "names this APT is capable of."));
279 if (Cache
.HeaderP
->VersionCount
>= (1ULL<<(sizeof(Cache
.VerP
->ID
)*8))-1)
280 return _error
->Error(_("Wow, you exceeded the number of versions "
281 "this APT is capable of."));
282 if (Cache
.HeaderP
->DescriptionCount
>= (1ULL<<(sizeof(Cache
.DescP
->ID
)*8))-1)
283 return _error
->Error(_("Wow, you exceeded the number of descriptions "
284 "this APT is capable of."));
285 if (Cache
.HeaderP
->DependsCount
>= (1ULL<<(sizeof(Cache
.DepP
->ID
)*8))-1ULL)
286 return _error
->Error(_("Wow, you exceeded the number of dependencies "
287 "this APT is capable of."));
291 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
292 // ---------------------------------------------------------------------
293 /* If we found any file depends while parsing the main list we need to
294 resolve them. Since it is undesired to load the entire list of files
295 into the cache as virtual packages we do a two stage effort. MergeList
296 identifies the file depends and this creates Provdies for them by
297 re-parsing all the indexs. */
298 bool pkgCacheGenerator::MergeFileProvides(ListParser
&List
)
302 unsigned int Counter
= 0;
303 while (List
.Step() == true)
305 string PackageName
= List
.Package();
306 if (PackageName
.empty() == true)
308 string Version
= List
.Version();
309 if (Version
.empty() == true)
312 pkgCache::PkgIterator Pkg
= Cache
.FindPkg(PackageName
);
313 if (Pkg
.end() == true)
314 return _error
->Error(_("Error occurred while processing %s (FindPkg)"),
315 PackageName
.c_str());
317 if (Counter
% 100 == 0 && Progress
!= 0)
318 Progress
->Progress(List
.Offset());
320 unsigned long Hash
= List
.VersionHash();
321 pkgCache::VerIterator Ver
= Pkg
.VersionList();
322 for (; Ver
.end() == false; Ver
++)
324 if (Ver
->Hash
== Hash
&& Version
.c_str() == Ver
.VerStr())
326 if (List
.CollectFileProvides(Cache
,Ver
) == false)
327 return _error
->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName
.c_str());
332 if (Ver
.end() == true)
333 _error
->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName
.c_str(),Version
.c_str());
339 // CacheGenerator::NewGroup - Add a new group /*{{{*/
340 // ---------------------------------------------------------------------
341 /* This creates a new group structure and adds it to the hash table */
342 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator
&Grp
, const string
&Name
) {
343 Grp
= Cache
.FindGrp(Name
);
344 if (Grp
.end() == false)
348 unsigned long const Group
= Map
.Allocate(sizeof(pkgCache::Group
));
349 if (unlikely(Group
== 0))
352 Grp
= pkgCache::GrpIterator(Cache
, Cache
.GrpP
+ Group
);
353 Grp
->Name
= Map
.WriteString(Name
);
354 if (unlikely(Grp
->Name
== 0))
357 // Insert it into the hash table
358 unsigned long const Hash
= Cache
.Hash(Name
);
359 Grp
->Next
= Cache
.HeaderP
->GrpHashTable
[Hash
];
360 Cache
.HeaderP
->GrpHashTable
[Hash
] = Group
;
362 Cache
.HeaderP
->GroupCount
++;
367 // CacheGenerator::NewPackage - Add a new package /*{{{*/
368 // ---------------------------------------------------------------------
369 /* This creates a new package structure and adds it to the hash table */
370 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator
&Pkg
,const string
&Name
,
371 const string
&Arch
) {
372 pkgCache::GrpIterator Grp
;
373 if (unlikely(NewGroup(Grp
, Name
) == false))
376 Pkg
= Grp
.FindPkg(Arch
);
377 if (Pkg
.end() == false)
381 unsigned long const Package
= Map
.Allocate(sizeof(pkgCache::Package
));
382 if (unlikely(Package
== 0))
384 Pkg
= pkgCache::PkgIterator(Cache
,Cache
.PkgP
+ Package
);
386 // Insert it into the hash table
387 unsigned long const Hash
= Cache
.Hash(Name
);
388 Pkg
->NextPackage
= Cache
.HeaderP
->PkgHashTable
[Hash
];
389 Cache
.HeaderP
->PkgHashTable
[Hash
] = Package
;
391 // remember the packages in the group
392 Grp
->FirstPackage
= Package
;
393 if (Grp
->LastPackage
== 0)
394 Grp
->LastPackage
= Package
;
396 // Set the name, arch and the ID
397 Pkg
->Name
= Grp
->Name
;
398 Pkg
->Group
= Grp
.Index();
399 Pkg
->Arch
= WriteUniqString(Arch
.c_str());
400 if (unlikely(Pkg
->Arch
== 0))
402 Pkg
->ID
= Cache
.HeaderP
->PackageCount
++;
407 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
408 // ---------------------------------------------------------------------
410 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator
&Ver
,
413 if (CurrentFile
== 0)
417 unsigned long VerFile
= Map
.Allocate(sizeof(pkgCache::VerFile
));
421 pkgCache::VerFileIterator
VF(Cache
,Cache
.VerFileP
+ VerFile
);
422 VF
->File
= CurrentFile
- Cache
.PkgFileP
;
424 // Link it to the end of the list
425 map_ptrloc
*Last
= &Ver
->FileList
;
426 for (pkgCache::VerFileIterator V
= Ver
.FileList(); V
.end() == false; V
++)
428 VF
->NextFile
= *Last
;
431 VF
->Offset
= List
.Offset();
432 VF
->Size
= List
.Size();
433 if (Cache
.HeaderP
->MaxVerFileSize
< VF
->Size
)
434 Cache
.HeaderP
->MaxVerFileSize
= VF
->Size
;
435 Cache
.HeaderP
->VerFileCount
++;
440 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
441 // ---------------------------------------------------------------------
442 /* This puts a version structure in the linked list */
443 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator
&Ver
,
444 const string
&VerStr
,
448 unsigned long Version
= Map
.Allocate(sizeof(pkgCache::Version
));
453 Ver
= pkgCache::VerIterator(Cache
,Cache
.VerP
+ Version
);
455 Ver
->ID
= Cache
.HeaderP
->VersionCount
++;
456 Ver
->VerStr
= Map
.WriteString(VerStr
);
457 if (Ver
->VerStr
== 0)
463 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
464 // ---------------------------------------------------------------------
466 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator
&Desc
,
469 if (CurrentFile
== 0)
473 unsigned long DescFile
= Map
.Allocate(sizeof(pkgCache::DescFile
));
477 pkgCache::DescFileIterator
DF(Cache
,Cache
.DescFileP
+ DescFile
);
478 DF
->File
= CurrentFile
- Cache
.PkgFileP
;
480 // Link it to the end of the list
481 map_ptrloc
*Last
= &Desc
->FileList
;
482 for (pkgCache::DescFileIterator D
= Desc
.FileList(); D
.end() == false; D
++)
485 DF
->NextFile
= *Last
;
488 DF
->Offset
= List
.Offset();
489 DF
->Size
= List
.Size();
490 if (Cache
.HeaderP
->MaxDescFileSize
< DF
->Size
)
491 Cache
.HeaderP
->MaxDescFileSize
= DF
->Size
;
492 Cache
.HeaderP
->DescFileCount
++;
497 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
498 // ---------------------------------------------------------------------
499 /* This puts a description structure in the linked list */
500 map_ptrloc
pkgCacheGenerator::NewDescription(pkgCache::DescIterator
&Desc
,
502 const MD5SumValue
&md5sum
,
506 map_ptrloc Description
= Map
.Allocate(sizeof(pkgCache::Description
));
507 if (Description
== 0)
511 Desc
= pkgCache::DescIterator(Cache
,Cache
.DescP
+ Description
);
512 Desc
->NextDesc
= Next
;
513 Desc
->ID
= Cache
.HeaderP
->DescriptionCount
++;
514 Desc
->language_code
= Map
.WriteString(Lang
);
515 Desc
->md5sum
= Map
.WriteString(md5sum
.Value());
516 if (Desc
->language_code
== 0 || Desc
->md5sum
== 0)
522 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
523 // ---------------------------------------------------------------------
524 /* This prepares the Cache for delivery */
525 bool pkgCacheGenerator::FinishCache(OpProgress
&Progress
) {
526 // FIXME: add progress reporting for this operation
527 // Do we have different architectures in your groups ?
528 vector
<string
> archs
= APT::Configuration::getArchitectures();
529 if (archs
.size() > 1) {
530 // Create Conflicts in between the group
531 for (pkgCache::GrpIterator G
= GetCache().GrpBegin(); G
.end() != true; G
++) {
532 string
const PkgName
= G
.Name();
533 for (pkgCache::PkgIterator P
= G
.PackageList(); P
.end() != true; P
= G
.NextPkg(P
)) {
534 for (pkgCache::VerIterator V
= P
.VersionList(); V
.end() != true; V
++) {
535 // Arch all packages are "co-installable"
536 if (V
->MultiArch
== pkgCache::Version::All
)
538 string
const Arch
= V
.Arch();
539 map_ptrloc
*OldDepLast
= NULL
;
540 for (vector
<string
>::const_iterator A
= archs
.begin(); A
!= archs
.end(); ++A
) {
543 /* We allow only one installed arch at the time
544 per group, therefore each group member conflicts
545 with all other group members */
546 pkgCache::PkgIterator D
= G
.FindPkg(*A
);
549 // Conflicts: ${self}:other
551 pkgCache::Dep::NoOp
, pkgCache::Dep::Conflicts
,
561 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
562 // ---------------------------------------------------------------------
563 /* This creates a dependency element in the tree. It is linked to the
564 version and to the package that it is pointing to. */
565 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator
&Pkg
,
566 pkgCache::VerIterator
&Ver
,
567 string
const &Version
,
568 unsigned int const &Op
,
569 unsigned int const &Type
,
570 map_ptrloc
*OldDepLast
)
573 unsigned long const Dependency
= Map
.Allocate(sizeof(pkgCache::Dependency
));
574 if (unlikely(Dependency
== 0))
578 pkgCache::DepIterator
Dep(Cache
,Cache
.DepP
+ Dependency
);
579 Dep
->ParentVer
= Ver
.Index();
582 Dep
->ID
= Cache
.HeaderP
->DependsCount
++;
584 // Probe the reverse dependency list for a version string that matches
585 if (Version
.empty() == false)
587 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
588 if (I->Version != 0 && I.TargetVer() == Version)
589 Dep->Version = I->Version;*/
590 if (Dep
->Version
== 0)
591 if (unlikely((Dep
->Version
= Map
.WriteString(Version
)) == 0))
595 // Link it to the package
596 Dep
->Package
= Pkg
.Index();
597 Dep
->NextRevDepends
= Pkg
->RevDepends
;
598 Pkg
->RevDepends
= Dep
.Index();
600 // Do we know where to link the Dependency to?
601 if (OldDepLast
== NULL
)
603 OldDepLast
= &Ver
->DependsList
;
604 for (pkgCache::DepIterator D
= Ver
.DependsList(); D
.end() == false; D
++)
605 OldDepLast
= &D
->NextDepends
;
608 Dep
->NextDepends
= *OldDepLast
;
609 *OldDepLast
= Dep
.Index();
610 OldDepLast
= &Dep
->NextDepends
;
615 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
616 // ---------------------------------------------------------------------
617 /* This creates a Group and the Package to link this dependency to if
618 needed and handles also the caching of the old endpoint */
619 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver
,
620 const string
&PackageName
,
622 const string
&Version
,
626 pkgCache::GrpIterator Grp
;
627 if (unlikely(Owner
->NewGroup(Grp
, PackageName
) == false))
630 // Locate the target package
631 pkgCache::PkgIterator Pkg
= Grp
.FindPkg(Arch
);
632 if (Pkg
.end() == true) {
633 if (unlikely(Owner
->NewPackage(Pkg
, PackageName
, Arch
) == false))
637 // Is it a file dependency?
638 if (unlikely(PackageName
[0] == '/'))
639 FoundFileDeps
= true;
641 /* Caching the old end point speeds up generation substantially */
642 if (OldDepVer
!= Ver
) {
647 return Owner
->NewDepends(Pkg
, Ver
, Version
, Op
, Type
, OldDepLast
);
650 // ListParser::NewProvides - Create a Provides element /*{{{*/
651 // ---------------------------------------------------------------------
653 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver
,
654 const string
&PackageName
,
655 const string
&Version
)
657 pkgCache
&Cache
= Owner
->Cache
;
659 // We do not add self referencing provides
660 if (unlikely(Ver
.ParentPkg().Name() == PackageName
))
664 unsigned long const Provides
= Owner
->Map
.Allocate(sizeof(pkgCache::Provides
));
665 if (unlikely(Provides
== 0))
667 Cache
.HeaderP
->ProvidesCount
++;
670 pkgCache::PrvIterator
Prv(Cache
,Cache
.ProvideP
+ Provides
,Cache
.PkgP
);
671 Prv
->Version
= Ver
.Index();
672 Prv
->NextPkgProv
= Ver
->ProvidesList
;
673 Ver
->ProvidesList
= Prv
.Index();
674 if (Version
.empty() == false && unlikely((Prv
->ProvideVersion
= WriteString(Version
)) == 0))
677 // Locate the target package
678 pkgCache::PkgIterator Pkg
;
679 if (unlikely(Owner
->NewPackage(Pkg
,PackageName
,string(Ver
.Arch())) == false))
682 // Link it to the package
683 Prv
->ParentPkg
= Pkg
.Index();
684 Prv
->NextProvides
= Pkg
->ProvidesList
;
685 Pkg
->ProvidesList
= Prv
.Index();
690 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
691 // ---------------------------------------------------------------------
692 /* This is used to select which file is to be associated with all newly
693 added versions. The caller is responsible for setting the IMS fields. */
694 bool pkgCacheGenerator::SelectFile(const string
&File
,const string
&Site
,
695 const pkgIndexFile
&Index
,
698 // Get some space for the structure
699 CurrentFile
= Cache
.PkgFileP
+ Map
.Allocate(sizeof(*CurrentFile
));
700 if (CurrentFile
== Cache
.PkgFileP
)
704 CurrentFile
->FileName
= Map
.WriteString(File
);
705 CurrentFile
->Site
= WriteUniqString(Site
);
706 CurrentFile
->NextFile
= Cache
.HeaderP
->FileList
;
707 CurrentFile
->Flags
= Flags
;
708 CurrentFile
->ID
= Cache
.HeaderP
->PackageFileCount
;
709 CurrentFile
->IndexType
= WriteUniqString(Index
.GetType()->Label
);
711 Cache
.HeaderP
->FileList
= CurrentFile
- Cache
.PkgFileP
;
712 Cache
.HeaderP
->PackageFileCount
++;
714 if (CurrentFile
->FileName
== 0)
718 Progress
->SubProgress(Index
.Size());
722 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
723 // ---------------------------------------------------------------------
724 /* This is used to create handles to strings. Given the same text it
725 always returns the same number */
726 unsigned long pkgCacheGenerator::WriteUniqString(const char *S
,
729 /* We use a very small transient hash table here, this speeds up generation
730 by a fair amount on slower machines */
731 pkgCache::StringItem
*&Bucket
= UniqHash
[(S
[0]*5 + S
[1]) % _count(UniqHash
)];
733 stringcmp(S
,S
+Size
,Cache
.StrP
+ Bucket
->String
) == 0)
734 return Bucket
->String
;
736 // Search for an insertion point
737 pkgCache::StringItem
*I
= Cache
.StringItemP
+ Cache
.HeaderP
->StringList
;
739 map_ptrloc
*Last
= &Cache
.HeaderP
->StringList
;
740 for (; I
!= Cache
.StringItemP
; Last
= &I
->NextItem
,
741 I
= Cache
.StringItemP
+ I
->NextItem
)
743 Res
= stringcmp(S
,S
+Size
,Cache
.StrP
+ I
->String
);
756 unsigned long Item
= Map
.Allocate(sizeof(pkgCache::StringItem
));
760 // Fill in the structure
761 pkgCache::StringItem
*ItemP
= Cache
.StringItemP
+ Item
;
762 ItemP
->NextItem
= I
- Cache
.StringItemP
;
764 ItemP
->String
= Map
.WriteString(S
,Size
);
765 if (ItemP
->String
== 0)
769 return ItemP
->String
;
772 // CheckValidity - Check that a cache is up-to-date /*{{{*/
773 // ---------------------------------------------------------------------
774 /* This just verifies that each file in the list of index files exists,
775 has matching attributes with the cache and the cache does not have
777 static bool CheckValidity(const string
&CacheFile
, FileIterator Start
,
778 FileIterator End
,MMap
**OutMap
= 0)
780 // No file, certainly invalid
781 if (CacheFile
.empty() == true || FileExists(CacheFile
) == false)
785 FileFd
CacheF(CacheFile
,FileFd::ReadOnly
);
786 SPtr
<MMap
> Map
= new MMap(CacheF
,0);
788 if (_error
->PendingError() == true || Map
->Size() == 0)
794 /* Now we check every index file, see if it is in the cache,
795 verify the IMS data and check that it is on the disk too.. */
796 SPtrArray
<bool> Visited
= new bool[Cache
.HeaderP
->PackageFileCount
];
797 memset(Visited
,0,sizeof(*Visited
)*Cache
.HeaderP
->PackageFileCount
);
798 for (; Start
!= End
; Start
++)
800 if ((*Start
)->HasPackages() == false)
803 if ((*Start
)->Exists() == false)
805 #if 0 // mvo: we no longer give a message here (Default Sources spec)
806 _error
->WarningE("stat",_("Couldn't stat source package list %s"),
807 (*Start
)->Describe().c_str());
812 // FindInCache is also expected to do an IMS check.
813 pkgCache::PkgFileIterator File
= (*Start
)->FindInCache(Cache
);
814 if (File
.end() == true)
817 Visited
[File
->ID
] = true;
820 for (unsigned I
= 0; I
!= Cache
.HeaderP
->PackageFileCount
; I
++)
821 if (Visited
[I
] == false)
824 if (_error
->PendingError() == true)
831 *OutMap
= Map
.UnGuard();
835 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
836 // ---------------------------------------------------------------------
837 /* Size is kind of an abstract notion that is only used for the progress
839 static unsigned long ComputeSize(FileIterator Start
,FileIterator End
)
841 unsigned long TotalSize
= 0;
842 for (; Start
!= End
; Start
++)
844 if ((*Start
)->HasPackages() == false)
846 TotalSize
+= (*Start
)->Size();
851 // BuildCache - Merge the list of index files into the cache /*{{{*/
852 // ---------------------------------------------------------------------
854 static bool BuildCache(pkgCacheGenerator
&Gen
,
855 OpProgress
&Progress
,
856 unsigned long &CurrentSize
,unsigned long TotalSize
,
857 FileIterator Start
, FileIterator End
)
860 for (I
= Start
; I
!= End
; I
++)
862 if ((*I
)->HasPackages() == false)
865 if ((*I
)->Exists() == false)
868 if ((*I
)->FindInCache(Gen
.GetCache()).end() == false)
870 _error
->Warning("Duplicate sources.list entry %s",
871 (*I
)->Describe().c_str());
875 unsigned long Size
= (*I
)->Size();
876 Progress
.OverallProgress(CurrentSize
,TotalSize
,Size
,_("Reading package lists"));
879 if ((*I
)->Merge(Gen
,Progress
) == false)
883 if (Gen
.HasFileDeps() == true)
886 TotalSize
= ComputeSize(Start
, End
);
888 for (I
= Start
; I
!= End
; I
++)
890 unsigned long Size
= (*I
)->Size();
891 Progress
.OverallProgress(CurrentSize
,TotalSize
,Size
,_("Collecting File Provides"));
893 if ((*I
)->MergeFileProvides(Gen
,Progress
) == false)
901 // MakeStatusCache - Construct the status cache /*{{{*/
902 // ---------------------------------------------------------------------
903 /* This makes sure that the status cache (the cache that has all
904 index files from the sources list and all local ones) is ready
905 to be mmaped. If OutMap is not zero then a MMap object representing
906 the cache will be stored there. This is pretty much mandetory if you
907 are using AllowMem. AllowMem lets the function be run as non-root
908 where it builds the cache 'fast' into a memory buffer. */
909 bool pkgMakeStatusCache(pkgSourceList
&List
,OpProgress
&Progress
,
910 MMap
**OutMap
,bool AllowMem
)
912 unsigned long MapSize
= _config
->FindI("APT::Cache-Limit",24*1024*1024);
914 vector
<pkgIndexFile
*> Files
;
915 for (vector
<metaIndex
*>::const_iterator i
= List
.begin();
919 vector
<pkgIndexFile
*> *Indexes
= (*i
)->GetIndexFiles();
920 for (vector
<pkgIndexFile
*>::const_iterator j
= Indexes
->begin();
923 Files
.push_back (*j
);
926 unsigned long EndOfSource
= Files
.size();
927 if (_system
->AddStatusFiles(Files
) == false)
930 // Decide if we can write to the files..
931 string CacheFile
= _config
->FindFile("Dir::Cache::pkgcache");
932 string SrcCacheFile
= _config
->FindFile("Dir::Cache::srcpkgcache");
934 // Decide if we can write to the cache
935 bool Writeable
= false;
936 if (CacheFile
.empty() == false)
937 Writeable
= access(flNotFile(CacheFile
).c_str(),W_OK
) == 0;
939 if (SrcCacheFile
.empty() == false)
940 Writeable
= access(flNotFile(SrcCacheFile
).c_str(),W_OK
) == 0;
942 if (Writeable
== false && AllowMem
== false && CacheFile
.empty() == false)
943 return _error
->Error(_("Unable to write to %s"),flNotFile(CacheFile
).c_str());
945 Progress
.OverallProgress(0,1,1,_("Reading package lists"));
948 if (CheckValidity(CacheFile
,Files
.begin(),Files
.end(),OutMap
) == true)
950 Progress
.OverallProgress(1,1,1,_("Reading package lists"));
954 /* At this point we know we need to reconstruct the package cache,
957 SPtr
<DynamicMMap
> Map
;
958 if (Writeable
== true && CacheFile
.empty() == false)
960 unlink(CacheFile
.c_str());
961 CacheF
= new FileFd(CacheFile
,FileFd::WriteEmpty
);
962 fchmod(CacheF
->Fd(),0644);
963 Map
= new DynamicMMap(*CacheF
,MMap::Public
,MapSize
);
964 if (_error
->PendingError() == true)
969 // Just build it in memory..
970 Map
= new DynamicMMap(0,MapSize
);
973 // Lets try the source cache.
974 unsigned long CurrentSize
= 0;
975 unsigned long TotalSize
= 0;
976 if (CheckValidity(SrcCacheFile
,Files
.begin(),
977 Files
.begin()+EndOfSource
) == true)
979 // Preload the map with the source cache
980 FileFd
SCacheF(SrcCacheFile
,FileFd::ReadOnly
);
981 unsigned long alloc
= Map
->RawAllocate(SCacheF
.Size());
982 if ((alloc
== 0 && _error
->PendingError())
983 || SCacheF
.Read((unsigned char *)Map
->Data() + alloc
,
984 SCacheF
.Size()) == false)
987 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
989 // Build the status cache
990 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
991 if (_error
->PendingError() == true)
993 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
994 Files
.begin()+EndOfSource
,Files
.end()) == false)
997 // FIXME: move me to a better place
998 Gen
.FinishCache(Progress
);
1002 TotalSize
= ComputeSize(Files
.begin(),Files
.end());
1004 // Build the source cache
1005 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1006 if (_error
->PendingError() == true)
1008 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1009 Files
.begin(),Files
.begin()+EndOfSource
) == false)
1013 if (Writeable
== true && SrcCacheFile
.empty() == false)
1015 FileFd
SCacheF(SrcCacheFile
,FileFd::WriteEmpty
);
1016 if (_error
->PendingError() == true)
1019 fchmod(SCacheF
.Fd(),0644);
1021 // Write out the main data
1022 if (SCacheF
.Write(Map
->Data(),Map
->Size()) == false)
1023 return _error
->Error(_("IO Error saving source cache"));
1026 // Write out the proper header
1027 Gen
.GetCache().HeaderP
->Dirty
= false;
1028 if (SCacheF
.Seek(0) == false ||
1029 SCacheF
.Write(Map
->Data(),sizeof(*Gen
.GetCache().HeaderP
)) == false)
1030 return _error
->Error(_("IO Error saving source cache"));
1031 Gen
.GetCache().HeaderP
->Dirty
= true;
1035 // Build the status cache
1036 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1037 Files
.begin()+EndOfSource
,Files
.end()) == false)
1040 // FIXME: move me to a better place
1041 Gen
.FinishCache(Progress
);
1044 if (_error
->PendingError() == true)
1050 delete Map
.UnGuard();
1051 *OutMap
= new MMap(*CacheF
,0);
1055 *OutMap
= Map
.UnGuard();
1062 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1063 // ---------------------------------------------------------------------
1065 bool pkgMakeOnlyStatusCache(OpProgress
&Progress
,DynamicMMap
**OutMap
)
1067 unsigned long MapSize
= _config
->FindI("APT::Cache-Limit",20*1024*1024);
1068 vector
<pkgIndexFile
*> Files
;
1069 unsigned long EndOfSource
= Files
.size();
1070 if (_system
->AddStatusFiles(Files
) == false)
1073 SPtr
<DynamicMMap
> Map
= new DynamicMMap(0,MapSize
);
1074 unsigned long CurrentSize
= 0;
1075 unsigned long TotalSize
= 0;
1077 TotalSize
= ComputeSize(Files
.begin()+EndOfSource
,Files
.end());
1079 // Build the status cache
1080 Progress
.OverallProgress(0,1,1,_("Reading package lists"));
1081 pkgCacheGenerator
Gen(Map
.Get(),&Progress
);
1082 if (_error
->PendingError() == true)
1084 if (BuildCache(Gen
,Progress
,CurrentSize
,TotalSize
,
1085 Files
.begin()+EndOfSource
,Files
.end()) == false)
1088 // FIXME: move me to a better place
1089 Gen
.FinishCache(Progress
);
1091 if (_error
->PendingError() == true)
1093 *OutMap
= Map
.UnGuard();