]> git.saurik.com Git - apt.git/blame_incremental - apt-pkg/pkgcachegen.cc
* remove all the remaining #pragma implementation
[apt.git] / apt-pkg / pkgcachegen.cc
... / ...
CommitLineData
1// -*- mode: cpp; mode: fold -*-
2// Description /*{{{*/
3// $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4/* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12// Include Files /*{{{*/
13#define APT_COMPATIBILITY 986
14
15#include <apt-pkg/pkgcachegen.h>
16#include <apt-pkg/error.h>
17#include <apt-pkg/version.h>
18#include <apt-pkg/progress.h>
19#include <apt-pkg/sourcelist.h>
20#include <apt-pkg/configuration.h>
21#include <apt-pkg/strutl.h>
22#include <apt-pkg/sptr.h>
23#include <apt-pkg/pkgsystem.h>
24
25#include <apti18n.h>
26
27#include <vector>
28
29#include <sys/stat.h>
30#include <unistd.h>
31#include <errno.h>
32#include <stdio.h>
33#include <system.h>
34 /*}}}*/
35typedef vector<pkgIndexFile *>::iterator FileIterator;
36
37// CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
38// ---------------------------------------------------------------------
39/* We set the diry flag and make sure that is written to the disk */
40pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
41 Map(*pMap), Cache(pMap,false), Progress(Prog),
42 FoundFileDeps(0)
43{
44 CurrentFile = 0;
45 memset(UniqHash,0,sizeof(UniqHash));
46
47 if (_error->PendingError() == true)
48 return;
49
50 if (Map.Size() == 0)
51 {
52 // Setup the map interface..
53 Cache.HeaderP = (pkgCache::Header *)Map.Data();
54 Map.RawAllocate(sizeof(pkgCache::Header));
55 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
56
57 // Starting header
58 *Cache.HeaderP = pkgCache::Header();
59 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
60 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
61 Cache.ReMap();
62 }
63 else
64 {
65 // Map directly from the existing file
66 Cache.ReMap();
67 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
68 if (Cache.VS != _system->VS)
69 {
70 _error->Error(_("Cache has an incompatible versioning system"));
71 return;
72 }
73 }
74
75 Cache.HeaderP->Dirty = true;
76 Map.Sync(0,sizeof(pkgCache::Header));
77}
78 /*}}}*/
79// CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
80// ---------------------------------------------------------------------
81/* We sync the data then unset the dirty flag in two steps so as to
82 advoid a problem during a crash */
83pkgCacheGenerator::~pkgCacheGenerator()
84{
85 if (_error->PendingError() == true)
86 return;
87 if (Map.Sync() == false)
88 return;
89
90 Cache.HeaderP->Dirty = false;
91 Map.Sync(0,sizeof(pkgCache::Header));
92}
93 /*}}}*/
94// CacheGenerator::MergeList - Merge the package list /*{{{*/
95// ---------------------------------------------------------------------
96/* This provides the generation of the entries in the cache. Each loop
97 goes through a single package record from the underlying parse engine. */
98bool pkgCacheGenerator::MergeList(ListParser &List,
99 pkgCache::VerIterator *OutVer)
100{
101 List.Owner = this;
102
103 unsigned int Counter = 0;
104 while (List.Step() == true)
105 {
106 // Get a pointer to the package structure
107 string PackageName = List.Package();
108 if (PackageName.empty() == true)
109 return false;
110
111 pkgCache::PkgIterator Pkg;
112 if (NewPackage(Pkg,PackageName) == false)
113 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
114 Counter++;
115 if (Counter % 100 == 0 && Progress != 0)
116 Progress->Progress(List.Offset());
117
118 /* Get a pointer to the version structure. We know the list is sorted
119 so we use that fact in the search. Insertion of new versions is
120 done with correct sorting */
121 string Version = List.Version();
122 if (Version.empty() == true)
123 {
124 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
125 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
126 PackageName.c_str());
127 continue;
128 }
129
130 pkgCache::VerIterator Ver = Pkg.VersionList();
131 map_ptrloc *Last = &Pkg->VersionList;
132 int Res = 1;
133 for (; Ver.end() == false; Last = &Ver->NextVer, Ver++)
134 {
135 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
136 if (Res >= 0)
137 break;
138 }
139
140 /* We already have a version for this item, record that we
141 saw it */
142 unsigned long Hash = List.VersionHash();
143 if (Res == 0 && Ver->Hash == Hash)
144 {
145 if (List.UsePackage(Pkg,Ver) == false)
146 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
147 PackageName.c_str());
148
149 if (NewFileVer(Ver,List) == false)
150 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
151 PackageName.c_str());
152
153 // Read only a single record and return
154 if (OutVer != 0)
155 {
156 *OutVer = Ver;
157 FoundFileDeps |= List.HasFileDeps();
158 return true;
159 }
160
161 continue;
162 }
163
164 // Skip to the end of the same version set.
165 if (Res == 0)
166 {
167 for (; Ver.end() == false; Last = &Ver->NextVer, Ver++)
168 {
169 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
170 if (Res != 0)
171 break;
172 }
173 }
174
175 // Add a new version
176 *Last = NewVersion(Ver,Version,*Last);
177 Ver->ParentPkg = Pkg.Index();
178 Ver->Hash = Hash;
179 if (List.NewVersion(Ver) == false)
180 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
181 PackageName.c_str());
182
183 if (List.UsePackage(Pkg,Ver) == false)
184 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
185 PackageName.c_str());
186
187 if (NewFileVer(Ver,List) == false)
188 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
189 PackageName.c_str());
190
191 // Read only a single record and return
192 if (OutVer != 0)
193 {
194 *OutVer = Ver;
195 FoundFileDeps |= List.HasFileDeps();
196 return true;
197 }
198 }
199
200 FoundFileDeps |= List.HasFileDeps();
201
202 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
203 return _error->Error(_("Wow, you exceeded the number of package "
204 "names this APT is capable of."));
205 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
206 return _error->Error(_("Wow, you exceeded the number of versions "
207 "this APT is capable of."));
208 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
209 return _error->Error(_("Wow, you exceeded the number of dependencies "
210 "this APT is capable of."));
211 return true;
212}
213 /*}}}*/
214// CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
215// ---------------------------------------------------------------------
216/* If we found any file depends while parsing the main list we need to
217 resolve them. Since it is undesired to load the entire list of files
218 into the cache as virtual packages we do a two stage effort. MergeList
219 identifies the file depends and this creates Provdies for them by
220 re-parsing all the indexs. */
221bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
222{
223 List.Owner = this;
224
225 unsigned int Counter = 0;
226 while (List.Step() == true)
227 {
228 string PackageName = List.Package();
229 if (PackageName.empty() == true)
230 return false;
231 string Version = List.Version();
232 if (Version.empty() == true)
233 continue;
234
235 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
236 if (Pkg.end() == true)
237 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
238 PackageName.c_str());
239 Counter++;
240 if (Counter % 100 == 0 && Progress != 0)
241 Progress->Progress(List.Offset());
242
243 unsigned long Hash = List.VersionHash();
244 pkgCache::VerIterator Ver = Pkg.VersionList();
245 for (; Ver.end() == false; Ver++)
246 {
247 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
248 {
249 if (List.CollectFileProvides(Cache,Ver) == false)
250 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
251 break;
252 }
253 }
254
255 if (Ver.end() == true)
256 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
257 }
258
259 return true;
260}
261 /*}}}*/
262// CacheGenerator::NewPackage - Add a new package /*{{{*/
263// ---------------------------------------------------------------------
264/* This creates a new package structure and adds it to the hash table */
265bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name)
266{
267 Pkg = Cache.FindPkg(Name);
268 if (Pkg.end() == false)
269 return true;
270
271 // Get a structure
272 unsigned long Package = Map.Allocate(sizeof(pkgCache::Package));
273 if (Package == 0)
274 return false;
275
276 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
277
278 // Insert it into the hash table
279 unsigned long Hash = Cache.Hash(Name);
280 Pkg->NextPackage = Cache.HeaderP->HashTable[Hash];
281 Cache.HeaderP->HashTable[Hash] = Package;
282
283 // Set the name and the ID
284 Pkg->Name = Map.WriteString(Name);
285 if (Pkg->Name == 0)
286 return false;
287 Pkg->ID = Cache.HeaderP->PackageCount++;
288
289 return true;
290}
291 /*}}}*/
292// CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
293// ---------------------------------------------------------------------
294/* */
295bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
296 ListParser &List)
297{
298 if (CurrentFile == 0)
299 return true;
300
301 // Get a structure
302 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
303 if (VerFile == 0)
304 return 0;
305
306 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
307 VF->File = CurrentFile - Cache.PkgFileP;
308
309 // Link it to the end of the list
310 map_ptrloc *Last = &Ver->FileList;
311 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
312 Last = &V->NextFile;
313 VF->NextFile = *Last;
314 *Last = VF.Index();
315
316 VF->Offset = List.Offset();
317 VF->Size = List.Size();
318 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
319 Cache.HeaderP->MaxVerFileSize = VF->Size;
320 Cache.HeaderP->VerFileCount++;
321
322 return true;
323}
324 /*}}}*/
325// CacheGenerator::NewVersion - Create a new Version /*{{{*/
326// ---------------------------------------------------------------------
327/* This puts a version structure in the linked list */
328unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
329 const string &VerStr,
330 unsigned long Next)
331{
332 // Get a structure
333 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
334 if (Version == 0)
335 return 0;
336
337 // Fill it in
338 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
339 Ver->NextVer = Next;
340 Ver->ID = Cache.HeaderP->VersionCount++;
341 Ver->VerStr = Map.WriteString(VerStr);
342 if (Ver->VerStr == 0)
343 return 0;
344
345 return Version;
346}
347 /*}}}*/
348// ListParser::NewDepends - Create a dependency element /*{{{*/
349// ---------------------------------------------------------------------
350/* This creates a dependency element in the tree. It is linked to the
351 version and to the package that it is pointing to. */
352bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
353 const string &PackageName,
354 const string &Version,
355 unsigned int Op,
356 unsigned int Type)
357{
358 pkgCache &Cache = Owner->Cache;
359
360 // Get a structure
361 unsigned long Dependency = Owner->Map.Allocate(sizeof(pkgCache::Dependency));
362 if (Dependency == 0)
363 return false;
364
365 // Fill it in
366 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
367 Dep->ParentVer = Ver.Index();
368 Dep->Type = Type;
369 Dep->CompareOp = Op;
370 Dep->ID = Cache.HeaderP->DependsCount++;
371
372 // Locate the target package
373 pkgCache::PkgIterator Pkg;
374 if (Owner->NewPackage(Pkg,PackageName) == false)
375 return false;
376
377 // Probe the reverse dependency list for a version string that matches
378 if (Version.empty() == false)
379 {
380/* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
381 if (I->Version != 0 && I.TargetVer() == Version)
382 Dep->Version = I->Version;*/
383 if (Dep->Version == 0)
384 if ((Dep->Version = WriteString(Version)) == 0)
385 return false;
386 }
387
388 // Link it to the package
389 Dep->Package = Pkg.Index();
390 Dep->NextRevDepends = Pkg->RevDepends;
391 Pkg->RevDepends = Dep.Index();
392
393 /* Link it to the version (at the end of the list)
394 Caching the old end point speeds up generation substantially */
395 if (OldDepVer != Ver)
396 {
397 OldDepLast = &Ver->DependsList;
398 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
399 OldDepLast = &D->NextDepends;
400 OldDepVer = Ver;
401 }
402
403 // Is it a file dependency?
404 if (PackageName[0] == '/')
405 FoundFileDeps = true;
406
407 Dep->NextDepends = *OldDepLast;
408 *OldDepLast = Dep.Index();
409 OldDepLast = &Dep->NextDepends;
410
411 return true;
412}
413 /*}}}*/
414// ListParser::NewProvides - Create a Provides element /*{{{*/
415// ---------------------------------------------------------------------
416/* */
417bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
418 const string &PackageName,
419 const string &Version)
420{
421 pkgCache &Cache = Owner->Cache;
422
423 // We do not add self referencing provides
424 if (Ver.ParentPkg().Name() == PackageName)
425 return true;
426
427 // Get a structure
428 unsigned long Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
429 if (Provides == 0)
430 return false;
431 Cache.HeaderP->ProvidesCount++;
432
433 // Fill it in
434 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
435 Prv->Version = Ver.Index();
436 Prv->NextPkgProv = Ver->ProvidesList;
437 Ver->ProvidesList = Prv.Index();
438 if (Version.empty() == false && (Prv->ProvideVersion = WriteString(Version)) == 0)
439 return false;
440
441 // Locate the target package
442 pkgCache::PkgIterator Pkg;
443 if (Owner->NewPackage(Pkg,PackageName) == false)
444 return false;
445
446 // Link it to the package
447 Prv->ParentPkg = Pkg.Index();
448 Prv->NextProvides = Pkg->ProvidesList;
449 Pkg->ProvidesList = Prv.Index();
450
451 return true;
452}
453 /*}}}*/
454// CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
455// ---------------------------------------------------------------------
456/* This is used to select which file is to be associated with all newly
457 added versions. The caller is responsible for setting the IMS fields. */
458bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
459 const pkgIndexFile &Index,
460 unsigned long Flags)
461{
462 // Get some space for the structure
463 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
464 if (CurrentFile == Cache.PkgFileP)
465 return false;
466
467 // Fill it in
468 CurrentFile->FileName = Map.WriteString(File);
469 CurrentFile->Site = WriteUniqString(Site);
470 CurrentFile->NextFile = Cache.HeaderP->FileList;
471 CurrentFile->Flags = Flags;
472 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
473 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
474 PkgFileName = File;
475 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
476 Cache.HeaderP->PackageFileCount++;
477
478 if (CurrentFile->FileName == 0)
479 return false;
480
481 if (Progress != 0)
482 Progress->SubProgress(Index.Size());
483 return true;
484}
485 /*}}}*/
486// CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
487// ---------------------------------------------------------------------
488/* This is used to create handles to strings. Given the same text it
489 always returns the same number */
490unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
491 unsigned int Size)
492{
493 /* We use a very small transient hash table here, this speeds up generation
494 by a fair amount on slower machines */
495 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
496 if (Bucket != 0 &&
497 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
498 return Bucket->String;
499
500 // Search for an insertion point
501 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
502 int Res = 1;
503 map_ptrloc *Last = &Cache.HeaderP->StringList;
504 for (; I != Cache.StringItemP; Last = &I->NextItem,
505 I = Cache.StringItemP + I->NextItem)
506 {
507 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
508 if (Res >= 0)
509 break;
510 }
511
512 // Match
513 if (Res == 0)
514 {
515 Bucket = I;
516 return I->String;
517 }
518
519 // Get a structure
520 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
521 if (Item == 0)
522 return 0;
523
524 // Fill in the structure
525 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
526 ItemP->NextItem = I - Cache.StringItemP;
527 *Last = Item;
528 ItemP->String = Map.WriteString(S,Size);
529 if (ItemP->String == 0)
530 return 0;
531
532 Bucket = ItemP;
533 return ItemP->String;
534}
535 /*}}}*/
536
537// CheckValidity - Check that a cache is up-to-date /*{{{*/
538// ---------------------------------------------------------------------
539/* This just verifies that each file in the list of index files exists,
540 has matching attributes with the cache and the cache does not have
541 any extra files. */
542static bool CheckValidity(const string &CacheFile, FileIterator Start,
543 FileIterator End,MMap **OutMap = 0)
544{
545 // No file, certainly invalid
546 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
547 return false;
548
549 // Map it
550 FileFd CacheF(CacheFile,FileFd::ReadOnly);
551 SPtr<MMap> Map = new MMap(CacheF,MMap::Public | MMap::ReadOnly);
552 pkgCache Cache(Map);
553 if (_error->PendingError() == true || Map->Size() == 0)
554 {
555 _error->Discard();
556 return false;
557 }
558
559 /* Now we check every index file, see if it is in the cache,
560 verify the IMS data and check that it is on the disk too.. */
561 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
562 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
563 for (; Start != End; Start++)
564 {
565 if ((*Start)->HasPackages() == false)
566 continue;
567
568 if ((*Start)->Exists() == false)
569 {
570 _error->WarningE("stat",_("Couldn't stat source package list %s"),
571 (*Start)->Describe().c_str());
572 continue;
573 }
574
575 // FindInCache is also expected to do an IMS check.
576 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
577 if (File.end() == true)
578 return false;
579
580 Visited[File->ID] = true;
581 }
582
583 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
584 if (Visited[I] == false)
585 return false;
586
587 if (_error->PendingError() == true)
588 {
589 _error->Discard();
590 return false;
591 }
592
593 if (OutMap != 0)
594 *OutMap = Map.UnGuard();
595 return true;
596}
597 /*}}}*/
598// ComputeSize - Compute the total size of a bunch of files /*{{{*/
599// ---------------------------------------------------------------------
600/* Size is kind of an abstract notion that is only used for the progress
601 meter */
602static unsigned long ComputeSize(FileIterator Start,FileIterator End)
603{
604 unsigned long TotalSize = 0;
605 for (; Start != End; Start++)
606 {
607 if ((*Start)->HasPackages() == false)
608 continue;
609 TotalSize += (*Start)->Size();
610 }
611 return TotalSize;
612}
613 /*}}}*/
614// BuildCache - Merge the list of index files into the cache /*{{{*/
615// ---------------------------------------------------------------------
616/* */
617static bool BuildCache(pkgCacheGenerator &Gen,
618 OpProgress &Progress,
619 unsigned long &CurrentSize,unsigned long TotalSize,
620 FileIterator Start, FileIterator End)
621{
622 FileIterator I;
623 for (I = Start; I != End; I++)
624 {
625 if ((*I)->HasPackages() == false)
626 continue;
627
628 if ((*I)->Exists() == false)
629 continue;
630
631 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
632 {
633 _error->Warning("Duplicate sources.list entry %s",
634 (*I)->Describe().c_str());
635 continue;
636 }
637
638 unsigned long Size = (*I)->Size();
639 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
640 CurrentSize += Size;
641
642 if ((*I)->Merge(Gen,Progress) == false)
643 return false;
644 }
645
646 if (Gen.HasFileDeps() == true)
647 {
648 Progress.Done();
649 TotalSize = ComputeSize(Start, End);
650 CurrentSize = 0;
651 for (I = Start; I != End; I++)
652 {
653 unsigned long Size = (*I)->Size();
654 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
655 CurrentSize += Size;
656 if ((*I)->MergeFileProvides(Gen,Progress) == false)
657 return false;
658 }
659 }
660
661 return true;
662}
663 /*}}}*/
664// MakeStatusCache - Construct the status cache /*{{{*/
665// ---------------------------------------------------------------------
666/* This makes sure that the status cache (the cache that has all
667 index files from the sources list and all local ones) is ready
668 to be mmaped. If OutMap is not zero then a MMap object representing
669 the cache will be stored there. This is pretty much mandetory if you
670 are using AllowMem. AllowMem lets the function be run as non-root
671 where it builds the cache 'fast' into a memory buffer. */
672bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
673 MMap **OutMap,bool AllowMem)
674{
675 unsigned long MapSize = _config->FindI("APT::Cache-Limit",12*1024*1024);
676
677 vector<pkgIndexFile *> Files;
678 for (vector<metaIndex *>::const_iterator i = List.begin();
679 i != List.end();
680 i++)
681 {
682 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
683 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
684 j != Indexes->end();
685 j++)
686 Files.push_back (*j);
687 }
688
689 unsigned long EndOfSource = Files.size();
690 if (_system->AddStatusFiles(Files) == false)
691 return false;
692
693 // Decide if we can write to the files..
694 string CacheFile = _config->FindFile("Dir::Cache::pkgcache");
695 string SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
696
697 // Decide if we can write to the cache
698 bool Writeable = false;
699 if (CacheFile.empty() == false)
700 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
701 else
702 if (SrcCacheFile.empty() == false)
703 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
704
705 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
706 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
707
708 Progress.OverallProgress(0,1,1,_("Reading package lists"));
709
710 // Cache is OK, Fin.
711 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
712 {
713 Progress.OverallProgress(1,1,1,_("Reading package lists"));
714 return true;
715 }
716
717 /* At this point we know we need to reconstruct the package cache,
718 begin. */
719 SPtr<FileFd> CacheF;
720 SPtr<DynamicMMap> Map;
721 if (Writeable == true && CacheFile.empty() == false)
722 {
723 unlink(CacheFile.c_str());
724 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
725 fchmod(CacheF->Fd(),0644);
726 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
727 if (_error->PendingError() == true)
728 return false;
729 }
730 else
731 {
732 // Just build it in memory..
733 Map = new DynamicMMap(MMap::Public,MapSize);
734 }
735
736 // Lets try the source cache.
737 unsigned long CurrentSize = 0;
738 unsigned long TotalSize = 0;
739 if (CheckValidity(SrcCacheFile,Files.begin(),
740 Files.begin()+EndOfSource) == true)
741 {
742 // Preload the map with the source cache
743 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
744 if (SCacheF.Read((unsigned char *)Map->Data() + Map->RawAllocate(SCacheF.Size()),
745 SCacheF.Size()) == false)
746 return false;
747
748 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
749
750 // Build the status cache
751 pkgCacheGenerator Gen(Map.Get(),&Progress);
752 if (_error->PendingError() == true)
753 return false;
754 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
755 Files.begin()+EndOfSource,Files.end()) == false)
756 return false;
757 }
758 else
759 {
760 TotalSize = ComputeSize(Files.begin(),Files.end());
761
762 // Build the source cache
763 pkgCacheGenerator Gen(Map.Get(),&Progress);
764 if (_error->PendingError() == true)
765 return false;
766 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
767 Files.begin(),Files.begin()+EndOfSource) == false)
768 return false;
769
770 // Write it back
771 if (Writeable == true && SrcCacheFile.empty() == false)
772 {
773 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
774 if (_error->PendingError() == true)
775 return false;
776
777 fchmod(SCacheF.Fd(),0644);
778
779 // Write out the main data
780 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
781 return _error->Error(_("IO Error saving source cache"));
782 SCacheF.Sync();
783
784 // Write out the proper header
785 Gen.GetCache().HeaderP->Dirty = false;
786 if (SCacheF.Seek(0) == false ||
787 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
788 return _error->Error(_("IO Error saving source cache"));
789 Gen.GetCache().HeaderP->Dirty = true;
790 SCacheF.Sync();
791 }
792
793 // Build the status cache
794 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
795 Files.begin()+EndOfSource,Files.end()) == false)
796 return false;
797 }
798
799 if (_error->PendingError() == true)
800 return false;
801 if (OutMap != 0)
802 {
803 if (CacheF != 0)
804 {
805 delete Map.UnGuard();
806 *OutMap = new MMap(*CacheF,MMap::Public | MMap::ReadOnly);
807 }
808 else
809 {
810 *OutMap = Map.UnGuard();
811 }
812 }
813
814 return true;
815}
816 /*}}}*/
817// MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
818// ---------------------------------------------------------------------
819/* */
820bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
821{
822 unsigned long MapSize = _config->FindI("APT::Cache-Limit",8*1024*1024);
823 vector<pkgIndexFile *> Files;
824 unsigned long EndOfSource = Files.size();
825 if (_system->AddStatusFiles(Files) == false)
826 return false;
827
828 SPtr<DynamicMMap> Map;
829 Map = new DynamicMMap(MMap::Public,MapSize);
830 unsigned long CurrentSize = 0;
831 unsigned long TotalSize = 0;
832
833 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
834
835 // Build the status cache
836 Progress.OverallProgress(0,1,1,_("Reading package lists"));
837 pkgCacheGenerator Gen(Map.Get(),&Progress);
838 if (_error->PendingError() == true)
839 return false;
840 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
841 Files.begin()+EndOfSource,Files.end()) == false)
842 return false;
843
844 if (_error->PendingError() == true)
845 return false;
846 *OutMap = Map.UnGuard();
847
848 return true;
849}
850 /*}}}*/