]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
- Make the breaks handling use the kill list. This means, that a
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/strutl.h>
22 #include <apt-pkg/sptr.h>
23 #include <apt-pkg/pkgsystem.h>
24
25 #include <apt-pkg/tagfile.h>
26
27 #include <apti18n.h>
28
29 #include <vector>
30
31 #include <sys/stat.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <stdio.h>
35 #include <system.h>
36 /*}}}*/
37 typedef vector<pkgIndexFile *>::iterator FileIterator;
38
39 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
40 // ---------------------------------------------------------------------
41 /* We set the diry flag and make sure that is written to the disk */
42 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
43 Map(*pMap), Cache(pMap,false), Progress(Prog),
44 FoundFileDeps(0)
45 {
46 CurrentFile = 0;
47 memset(UniqHash,0,sizeof(UniqHash));
48
49 if (_error->PendingError() == true)
50 return;
51
52 if (Map.Size() == 0)
53 {
54 // Setup the map interface..
55 Cache.HeaderP = (pkgCache::Header *)Map.Data();
56 Map.RawAllocate(sizeof(pkgCache::Header));
57 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
58
59 // Starting header
60 *Cache.HeaderP = pkgCache::Header();
61 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
62 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
63 Cache.ReMap();
64 }
65 else
66 {
67 // Map directly from the existing file
68 Cache.ReMap();
69 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
70 if (Cache.VS != _system->VS)
71 {
72 _error->Error(_("Cache has an incompatible versioning system"));
73 return;
74 }
75 }
76
77 Cache.HeaderP->Dirty = true;
78 Map.Sync(0,sizeof(pkgCache::Header));
79 }
80 /*}}}*/
81 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
82 // ---------------------------------------------------------------------
83 /* We sync the data then unset the dirty flag in two steps so as to
84 advoid a problem during a crash */
85 pkgCacheGenerator::~pkgCacheGenerator()
86 {
87 if (_error->PendingError() == true)
88 return;
89 if (Map.Sync() == false)
90 return;
91
92 Cache.HeaderP->Dirty = false;
93 Map.Sync(0,sizeof(pkgCache::Header));
94 }
95 /*}}}*/
96 // CacheGenerator::MergeList - Merge the package list /*{{{*/
97 // ---------------------------------------------------------------------
98 /* This provides the generation of the entries in the cache. Each loop
99 goes through a single package record from the underlying parse engine. */
100 bool pkgCacheGenerator::MergeList(ListParser &List,
101 pkgCache::VerIterator *OutVer)
102 {
103 List.Owner = this;
104
105 unsigned int Counter = 0;
106 while (List.Step() == true)
107 {
108 // Get a pointer to the package structure
109 string PackageName = List.Package();
110 if (PackageName.empty() == true)
111 return false;
112
113 pkgCache::PkgIterator Pkg;
114 if (NewPackage(Pkg,PackageName) == false)
115 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
116 Counter++;
117 if (Counter % 100 == 0 && Progress != 0)
118 Progress->Progress(List.Offset());
119
120 /* Get a pointer to the version structure. We know the list is sorted
121 so we use that fact in the search. Insertion of new versions is
122 done with correct sorting */
123 string Version = List.Version();
124 if (Version.empty() == true)
125 {
126 // we first process the package, then the descriptions
127 // (this has the bonus that we get MMap error when we run out
128 // of MMap space)
129 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
130 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
131 PackageName.c_str());
132
133 // Find the right version to write the description
134 MD5SumValue CurMd5 = List.Description_md5();
135 pkgCache::VerIterator Ver = Pkg.VersionList();
136 map_ptrloc *LastVer = &Pkg->VersionList;
137
138 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
139 {
140 pkgCache::DescIterator Desc = Ver.DescriptionList();
141 map_ptrloc *LastDesc = &Ver->DescriptionList;
142
143 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++)
144 {
145
146 if (MD5SumValue(Desc.md5()) == CurMd5)
147 {
148 // Add new description
149 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
150 Desc->ParentPkg = Pkg.Index();
151
152 if (NewFileDesc(Desc,List) == false)
153 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
154 break;
155 }
156 }
157 }
158
159 continue;
160 }
161
162 pkgCache::VerIterator Ver = Pkg.VersionList();
163 map_ptrloc *LastVer = &Pkg->VersionList;
164 int Res = 1;
165 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
166 {
167 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
168 if (Res >= 0)
169 break;
170 }
171
172 /* We already have a version for this item, record that we
173 saw it */
174 unsigned long Hash = List.VersionHash();
175 if (Res == 0 && Ver->Hash == Hash)
176 {
177 if (List.UsePackage(Pkg,Ver) == false)
178 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
179 PackageName.c_str());
180
181 if (NewFileVer(Ver,List) == false)
182 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
183 PackageName.c_str());
184
185 // Read only a single record and return
186 if (OutVer != 0)
187 {
188 *OutVer = Ver;
189 FoundFileDeps |= List.HasFileDeps();
190 return true;
191 }
192
193 continue;
194 }
195
196 // Skip to the end of the same version set.
197 if (Res == 0)
198 {
199 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
200 {
201 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
202 if (Res != 0)
203 break;
204 }
205 }
206
207 // Add a new version
208 *LastVer = NewVersion(Ver,Version,*LastVer);
209 Ver->ParentPkg = Pkg.Index();
210 Ver->Hash = Hash;
211
212 if (List.NewVersion(Ver) == false)
213 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
214 PackageName.c_str());
215
216 if (List.UsePackage(Pkg,Ver) == false)
217 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
218 PackageName.c_str());
219
220 if (NewFileVer(Ver,List) == false)
221 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
222 PackageName.c_str());
223
224 // Read only a single record and return
225 if (OutVer != 0)
226 {
227 *OutVer = Ver;
228 FoundFileDeps |= List.HasFileDeps();
229 return true;
230 }
231
232 /* Record the Description data. Description data always exist in
233 Packages and Translation-* files. */
234 pkgCache::DescIterator Desc = Ver.DescriptionList();
235 map_ptrloc *LastDesc = &Ver->DescriptionList;
236
237 // Skip to the end of description set
238 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
239
240 // Add new description
241 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
242 Desc->ParentPkg = Pkg.Index();
243
244 if (NewFileDesc(Desc,List) == false)
245 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
246 }
247
248 FoundFileDeps |= List.HasFileDeps();
249
250 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
251 return _error->Error(_("Wow, you exceeded the number of package "
252 "names this APT is capable of."));
253 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
254 return _error->Error(_("Wow, you exceeded the number of versions "
255 "this APT is capable of."));
256 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
257 return _error->Error(_("Wow, you exceeded the number of descriptions "
258 "this APT is capable of."));
259 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
260 return _error->Error(_("Wow, you exceeded the number of dependencies "
261 "this APT is capable of."));
262 return true;
263 }
264 /*}}}*/
265 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
266 // ---------------------------------------------------------------------
267 /* If we found any file depends while parsing the main list we need to
268 resolve them. Since it is undesired to load the entire list of files
269 into the cache as virtual packages we do a two stage effort. MergeList
270 identifies the file depends and this creates Provdies for them by
271 re-parsing all the indexs. */
272 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
273 {
274 List.Owner = this;
275
276 unsigned int Counter = 0;
277 while (List.Step() == true)
278 {
279 string PackageName = List.Package();
280 if (PackageName.empty() == true)
281 return false;
282 string Version = List.Version();
283 if (Version.empty() == true)
284 continue;
285
286 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
287 if (Pkg.end() == true)
288 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
289 PackageName.c_str());
290 Counter++;
291 if (Counter % 100 == 0 && Progress != 0)
292 Progress->Progress(List.Offset());
293
294 unsigned long Hash = List.VersionHash();
295 pkgCache::VerIterator Ver = Pkg.VersionList();
296 for (; Ver.end() == false; Ver++)
297 {
298 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
299 {
300 if (List.CollectFileProvides(Cache,Ver) == false)
301 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
302 break;
303 }
304 }
305
306 if (Ver.end() == true)
307 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
308 }
309
310 return true;
311 }
312 /*}}}*/
313 // CacheGenerator::NewPackage - Add a new package /*{{{*/
314 // ---------------------------------------------------------------------
315 /* This creates a new package structure and adds it to the hash table */
316 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name)
317 {
318 Pkg = Cache.FindPkg(Name);
319 if (Pkg.end() == false)
320 return true;
321
322 // Get a structure
323 unsigned long Package = Map.Allocate(sizeof(pkgCache::Package));
324 if (Package == 0)
325 return false;
326
327 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
328
329 // Insert it into the hash table
330 unsigned long Hash = Cache.Hash(Name);
331 Pkg->NextPackage = Cache.HeaderP->HashTable[Hash];
332 Cache.HeaderP->HashTable[Hash] = Package;
333
334 // Set the name and the ID
335 Pkg->Name = Map.WriteString(Name);
336 if (Pkg->Name == 0)
337 return false;
338 Pkg->ID = Cache.HeaderP->PackageCount++;
339
340 return true;
341 }
342 /*}}}*/
343 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
344 // ---------------------------------------------------------------------
345 /* */
346 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
347 ListParser &List)
348 {
349 if (CurrentFile == 0)
350 return true;
351
352 // Get a structure
353 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
354 if (VerFile == 0)
355 return 0;
356
357 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
358 VF->File = CurrentFile - Cache.PkgFileP;
359
360 // Link it to the end of the list
361 map_ptrloc *Last = &Ver->FileList;
362 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
363 Last = &V->NextFile;
364 VF->NextFile = *Last;
365 *Last = VF.Index();
366
367 VF->Offset = List.Offset();
368 VF->Size = List.Size();
369 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
370 Cache.HeaderP->MaxVerFileSize = VF->Size;
371 Cache.HeaderP->VerFileCount++;
372
373 return true;
374 }
375 /*}}}*/
376 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
377 // ---------------------------------------------------------------------
378 /* This puts a version structure in the linked list */
379 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
380 const string &VerStr,
381 unsigned long Next)
382 {
383 // Get a structure
384 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
385 if (Version == 0)
386 return 0;
387
388 // Fill it in
389 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
390 Ver->NextVer = Next;
391 Ver->ID = Cache.HeaderP->VersionCount++;
392 Ver->VerStr = Map.WriteString(VerStr);
393 if (Ver->VerStr == 0)
394 return 0;
395
396 return Version;
397 }
398 /*}}}*/
399 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
400 // ---------------------------------------------------------------------
401 /* */
402 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
403 ListParser &List)
404 {
405 if (CurrentFile == 0)
406 return true;
407
408 // Get a structure
409 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
410 if (DescFile == 0)
411 return 0;
412
413 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
414 DF->File = CurrentFile - Cache.PkgFileP;
415
416 // Link it to the end of the list
417 map_ptrloc *Last = &Desc->FileList;
418 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
419 Last = &D->NextFile;
420
421 DF->NextFile = *Last;
422 *Last = DF.Index();
423
424 DF->Offset = List.Offset();
425 DF->Size = List.Size();
426 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
427 Cache.HeaderP->MaxDescFileSize = DF->Size;
428 Cache.HeaderP->DescFileCount++;
429
430 return true;
431 }
432 /*}}}*/
433 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
434 // ---------------------------------------------------------------------
435 /* This puts a description structure in the linked list */
436 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
437 const string &Lang, const MD5SumValue &md5sum,
438 map_ptrloc Next)
439 {
440 // Get a structure
441 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
442 if (Description == 0)
443 return 0;
444
445 // Fill it in
446 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
447 Desc->NextDesc = Next;
448 Desc->ID = Cache.HeaderP->DescriptionCount++;
449 Desc->language_code = Map.WriteString(Lang);
450 Desc->md5sum = Map.WriteString(md5sum.Value());
451
452 return Description;
453 }
454 /*}}}*/
455 // ListParser::NewDepends - Create a dependency element /*{{{*/
456 // ---------------------------------------------------------------------
457 /* This creates a dependency element in the tree. It is linked to the
458 version and to the package that it is pointing to. */
459 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
460 const string &PackageName,
461 const string &Version,
462 unsigned int Op,
463 unsigned int Type)
464 {
465 pkgCache &Cache = Owner->Cache;
466
467 // Get a structure
468 unsigned long Dependency = Owner->Map.Allocate(sizeof(pkgCache::Dependency));
469 if (Dependency == 0)
470 return false;
471
472 // Fill it in
473 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
474 Dep->ParentVer = Ver.Index();
475 Dep->Type = Type;
476 Dep->CompareOp = Op;
477 Dep->ID = Cache.HeaderP->DependsCount++;
478
479 // Locate the target package
480 pkgCache::PkgIterator Pkg;
481 if (Owner->NewPackage(Pkg,PackageName) == false)
482 return false;
483
484 // Probe the reverse dependency list for a version string that matches
485 if (Version.empty() == false)
486 {
487 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
488 if (I->Version != 0 && I.TargetVer() == Version)
489 Dep->Version = I->Version;*/
490 if (Dep->Version == 0)
491 if ((Dep->Version = WriteString(Version)) == 0)
492 return false;
493 }
494
495 // Link it to the package
496 Dep->Package = Pkg.Index();
497 Dep->NextRevDepends = Pkg->RevDepends;
498 Pkg->RevDepends = Dep.Index();
499
500 /* Link it to the version (at the end of the list)
501 Caching the old end point speeds up generation substantially */
502 if (OldDepVer != Ver)
503 {
504 OldDepLast = &Ver->DependsList;
505 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
506 OldDepLast = &D->NextDepends;
507 OldDepVer = Ver;
508 }
509
510 // Is it a file dependency?
511 if (PackageName[0] == '/')
512 FoundFileDeps = true;
513
514 Dep->NextDepends = *OldDepLast;
515 *OldDepLast = Dep.Index();
516 OldDepLast = &Dep->NextDepends;
517
518 return true;
519 }
520 /*}}}*/
521 // ListParser::NewProvides - Create a Provides element /*{{{*/
522 // ---------------------------------------------------------------------
523 /* */
524 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
525 const string &PackageName,
526 const string &Version)
527 {
528 pkgCache &Cache = Owner->Cache;
529
530 // We do not add self referencing provides
531 if (Ver.ParentPkg().Name() == PackageName)
532 return true;
533
534 // Get a structure
535 unsigned long Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
536 if (Provides == 0)
537 return false;
538 Cache.HeaderP->ProvidesCount++;
539
540 // Fill it in
541 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
542 Prv->Version = Ver.Index();
543 Prv->NextPkgProv = Ver->ProvidesList;
544 Ver->ProvidesList = Prv.Index();
545 if (Version.empty() == false && (Prv->ProvideVersion = WriteString(Version)) == 0)
546 return false;
547
548 // Locate the target package
549 pkgCache::PkgIterator Pkg;
550 if (Owner->NewPackage(Pkg,PackageName) == false)
551 return false;
552
553 // Link it to the package
554 Prv->ParentPkg = Pkg.Index();
555 Prv->NextProvides = Pkg->ProvidesList;
556 Pkg->ProvidesList = Prv.Index();
557
558 return true;
559 }
560 /*}}}*/
561 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
562 // ---------------------------------------------------------------------
563 /* This is used to select which file is to be associated with all newly
564 added versions. The caller is responsible for setting the IMS fields. */
565 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
566 const pkgIndexFile &Index,
567 unsigned long Flags)
568 {
569 // Get some space for the structure
570 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
571 if (CurrentFile == Cache.PkgFileP)
572 return false;
573
574 // Fill it in
575 CurrentFile->FileName = Map.WriteString(File);
576 CurrentFile->Site = WriteUniqString(Site);
577 CurrentFile->NextFile = Cache.HeaderP->FileList;
578 CurrentFile->Flags = Flags;
579 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
580 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
581 PkgFileName = File;
582 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
583 Cache.HeaderP->PackageFileCount++;
584
585 if (CurrentFile->FileName == 0)
586 return false;
587
588 if (Progress != 0)
589 Progress->SubProgress(Index.Size());
590 return true;
591 }
592 /*}}}*/
593 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
594 // ---------------------------------------------------------------------
595 /* This is used to create handles to strings. Given the same text it
596 always returns the same number */
597 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
598 unsigned int Size)
599 {
600 /* We use a very small transient hash table here, this speeds up generation
601 by a fair amount on slower machines */
602 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
603 if (Bucket != 0 &&
604 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
605 return Bucket->String;
606
607 // Search for an insertion point
608 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
609 int Res = 1;
610 map_ptrloc *Last = &Cache.HeaderP->StringList;
611 for (; I != Cache.StringItemP; Last = &I->NextItem,
612 I = Cache.StringItemP + I->NextItem)
613 {
614 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
615 if (Res >= 0)
616 break;
617 }
618
619 // Match
620 if (Res == 0)
621 {
622 Bucket = I;
623 return I->String;
624 }
625
626 // Get a structure
627 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
628 if (Item == 0)
629 return 0;
630
631 // Fill in the structure
632 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
633 ItemP->NextItem = I - Cache.StringItemP;
634 *Last = Item;
635 ItemP->String = Map.WriteString(S,Size);
636 if (ItemP->String == 0)
637 return 0;
638
639 Bucket = ItemP;
640 return ItemP->String;
641 }
642 /*}}}*/
643
644 // CheckValidity - Check that a cache is up-to-date /*{{{*/
645 // ---------------------------------------------------------------------
646 /* This just verifies that each file in the list of index files exists,
647 has matching attributes with the cache and the cache does not have
648 any extra files. */
649 static bool CheckValidity(const string &CacheFile, FileIterator Start,
650 FileIterator End,MMap **OutMap = 0)
651 {
652 // No file, certainly invalid
653 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
654 return false;
655
656 // Map it
657 FileFd CacheF(CacheFile,FileFd::ReadOnly);
658 SPtr<MMap> Map = new MMap(CacheF,MMap::Public | MMap::ReadOnly);
659 pkgCache Cache(Map);
660 if (_error->PendingError() == true || Map->Size() == 0)
661 {
662 _error->Discard();
663 return false;
664 }
665
666 /* Now we check every index file, see if it is in the cache,
667 verify the IMS data and check that it is on the disk too.. */
668 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
669 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
670 for (; Start != End; Start++)
671 {
672 if ((*Start)->HasPackages() == false)
673 continue;
674
675 if ((*Start)->Exists() == false)
676 {
677 #if 0 // mvo: we no longer give a message here (Default Sources spec)
678 _error->WarningE("stat",_("Couldn't stat source package list %s"),
679 (*Start)->Describe().c_str());
680 #endif
681 continue;
682 }
683
684 // FindInCache is also expected to do an IMS check.
685 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
686 if (File.end() == true)
687 return false;
688
689 Visited[File->ID] = true;
690 }
691
692 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
693 if (Visited[I] == false)
694 return false;
695
696 if (_error->PendingError() == true)
697 {
698 _error->Discard();
699 return false;
700 }
701
702 if (OutMap != 0)
703 *OutMap = Map.UnGuard();
704 return true;
705 }
706 /*}}}*/
707 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
708 // ---------------------------------------------------------------------
709 /* Size is kind of an abstract notion that is only used for the progress
710 meter */
711 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
712 {
713 unsigned long TotalSize = 0;
714 for (; Start != End; Start++)
715 {
716 if ((*Start)->HasPackages() == false)
717 continue;
718 TotalSize += (*Start)->Size();
719 }
720 return TotalSize;
721 }
722 /*}}}*/
723 // BuildCache - Merge the list of index files into the cache /*{{{*/
724 // ---------------------------------------------------------------------
725 /* */
726 static bool BuildCache(pkgCacheGenerator &Gen,
727 OpProgress &Progress,
728 unsigned long &CurrentSize,unsigned long TotalSize,
729 FileIterator Start, FileIterator End)
730 {
731 FileIterator I;
732 for (I = Start; I != End; I++)
733 {
734 if ((*I)->HasPackages() == false)
735 continue;
736
737 if ((*I)->Exists() == false)
738 continue;
739
740 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
741 {
742 _error->Warning("Duplicate sources.list entry %s",
743 (*I)->Describe().c_str());
744 continue;
745 }
746
747 unsigned long Size = (*I)->Size();
748 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
749 CurrentSize += Size;
750
751 if ((*I)->Merge(Gen,Progress) == false)
752 return false;
753 }
754
755 if (Gen.HasFileDeps() == true)
756 {
757 Progress.Done();
758 TotalSize = ComputeSize(Start, End);
759 CurrentSize = 0;
760 for (I = Start; I != End; I++)
761 {
762 unsigned long Size = (*I)->Size();
763 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
764 CurrentSize += Size;
765 if ((*I)->MergeFileProvides(Gen,Progress) == false)
766 return false;
767 }
768 }
769
770 return true;
771 }
772 /*}}}*/
773 // MakeStatusCache - Construct the status cache /*{{{*/
774 // ---------------------------------------------------------------------
775 /* This makes sure that the status cache (the cache that has all
776 index files from the sources list and all local ones) is ready
777 to be mmaped. If OutMap is not zero then a MMap object representing
778 the cache will be stored there. This is pretty much mandetory if you
779 are using AllowMem. AllowMem lets the function be run as non-root
780 where it builds the cache 'fast' into a memory buffer. */
781 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
782 MMap **OutMap,bool AllowMem)
783 {
784 unsigned long MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
785
786 vector<pkgIndexFile *> Files;
787 for (vector<metaIndex *>::const_iterator i = List.begin();
788 i != List.end();
789 i++)
790 {
791 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
792 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
793 j != Indexes->end();
794 j++)
795 Files.push_back (*j);
796 }
797
798 unsigned long EndOfSource = Files.size();
799 if (_system->AddStatusFiles(Files) == false)
800 return false;
801
802 // Decide if we can write to the files..
803 string CacheFile = _config->FindFile("Dir::Cache::pkgcache");
804 string SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
805
806 // Decide if we can write to the cache
807 bool Writeable = false;
808 if (CacheFile.empty() == false)
809 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
810 else
811 if (SrcCacheFile.empty() == false)
812 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
813
814 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
815 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
816
817 Progress.OverallProgress(0,1,1,_("Reading package lists"));
818
819 // Cache is OK, Fin.
820 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
821 {
822 Progress.OverallProgress(1,1,1,_("Reading package lists"));
823 return true;
824 }
825
826 /* At this point we know we need to reconstruct the package cache,
827 begin. */
828 SPtr<FileFd> CacheF;
829 SPtr<DynamicMMap> Map;
830 if (Writeable == true && CacheFile.empty() == false)
831 {
832 unlink(CacheFile.c_str());
833 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
834 fchmod(CacheF->Fd(),0644);
835 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
836 if (_error->PendingError() == true)
837 return false;
838 }
839 else
840 {
841 // Just build it in memory..
842 Map = new DynamicMMap(MMap::Public,MapSize);
843 }
844
845 // Lets try the source cache.
846 unsigned long CurrentSize = 0;
847 unsigned long TotalSize = 0;
848 if (CheckValidity(SrcCacheFile,Files.begin(),
849 Files.begin()+EndOfSource) == true)
850 {
851 // Preload the map with the source cache
852 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
853 if (SCacheF.Read((unsigned char *)Map->Data() + Map->RawAllocate(SCacheF.Size()),
854 SCacheF.Size()) == false)
855 return false;
856
857 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
858
859 // Build the status cache
860 pkgCacheGenerator Gen(Map.Get(),&Progress);
861 if (_error->PendingError() == true)
862 return false;
863 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
864 Files.begin()+EndOfSource,Files.end()) == false)
865 return false;
866 }
867 else
868 {
869 TotalSize = ComputeSize(Files.begin(),Files.end());
870
871 // Build the source cache
872 pkgCacheGenerator Gen(Map.Get(),&Progress);
873 if (_error->PendingError() == true)
874 return false;
875 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
876 Files.begin(),Files.begin()+EndOfSource) == false)
877 return false;
878
879 // Write it back
880 if (Writeable == true && SrcCacheFile.empty() == false)
881 {
882 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
883 if (_error->PendingError() == true)
884 return false;
885
886 fchmod(SCacheF.Fd(),0644);
887
888 // Write out the main data
889 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
890 return _error->Error(_("IO Error saving source cache"));
891 SCacheF.Sync();
892
893 // Write out the proper header
894 Gen.GetCache().HeaderP->Dirty = false;
895 if (SCacheF.Seek(0) == false ||
896 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
897 return _error->Error(_("IO Error saving source cache"));
898 Gen.GetCache().HeaderP->Dirty = true;
899 SCacheF.Sync();
900 }
901
902 // Build the status cache
903 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
904 Files.begin()+EndOfSource,Files.end()) == false)
905 return false;
906 }
907
908 if (_error->PendingError() == true)
909 return false;
910 if (OutMap != 0)
911 {
912 if (CacheF != 0)
913 {
914 delete Map.UnGuard();
915 *OutMap = new MMap(*CacheF,MMap::Public | MMap::ReadOnly);
916 }
917 else
918 {
919 *OutMap = Map.UnGuard();
920 }
921 }
922
923 return true;
924 }
925 /*}}}*/
926 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
927 // ---------------------------------------------------------------------
928 /* */
929 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
930 {
931 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
932 vector<pkgIndexFile *> Files;
933 unsigned long EndOfSource = Files.size();
934 if (_system->AddStatusFiles(Files) == false)
935 return false;
936
937 SPtr<DynamicMMap> Map;
938 Map = new DynamicMMap(MMap::Public,MapSize);
939 unsigned long CurrentSize = 0;
940 unsigned long TotalSize = 0;
941
942 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
943
944 // Build the status cache
945 Progress.OverallProgress(0,1,1,_("Reading package lists"));
946 pkgCacheGenerator Gen(Map.Get(),&Progress);
947 if (_error->PendingError() == true)
948 return false;
949 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
950 Files.begin()+EndOfSource,Files.end()) == false)
951 return false;
952
953 if (_error->PendingError() == true)
954 return false;
955 *OutMap = Map.UnGuard();
956
957 return true;
958 }
959 /*}}}*/