]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
cmdline/apt-report-mirror-failure: port to latest python-apt
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/strutl.h>
22 #include <apt-pkg/sptr.h>
23 #include <apt-pkg/pkgsystem.h>
24 #include <apt-pkg/macros.h>
25
26 #include <apt-pkg/tagfile.h>
27
28 #include <apti18n.h>
29
30 #include <vector>
31
32 #include <sys/stat.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <stdio.h>
36 /*}}}*/
37 typedef vector<pkgIndexFile *>::iterator FileIterator;
38
39 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
40 // ---------------------------------------------------------------------
41 /* We set the diry flag and make sure that is written to the disk */
42 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
43 Map(*pMap), Cache(pMap,false), Progress(Prog),
44 FoundFileDeps(0)
45 {
46 CurrentFile = 0;
47 memset(UniqHash,0,sizeof(UniqHash));
48
49 if (_error->PendingError() == true)
50 return;
51
52 if (Map.Size() == 0)
53 {
54 // Setup the map interface..
55 Cache.HeaderP = (pkgCache::Header *)Map.Data();
56 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
57 return;
58
59 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
60
61 // Starting header
62 *Cache.HeaderP = pkgCache::Header();
63 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
64 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
65 Cache.ReMap();
66 }
67 else
68 {
69 // Map directly from the existing file
70 Cache.ReMap();
71 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
72 if (Cache.VS != _system->VS)
73 {
74 _error->Error(_("Cache has an incompatible versioning system"));
75 return;
76 }
77 }
78
79 Cache.HeaderP->Dirty = true;
80 Map.Sync(0,sizeof(pkgCache::Header));
81 }
82 /*}}}*/
83 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
84 // ---------------------------------------------------------------------
85 /* We sync the data then unset the dirty flag in two steps so as to
86 advoid a problem during a crash */
87 pkgCacheGenerator::~pkgCacheGenerator()
88 {
89 if (_error->PendingError() == true)
90 return;
91 if (Map.Sync() == false)
92 return;
93
94 Cache.HeaderP->Dirty = false;
95 Map.Sync(0,sizeof(pkgCache::Header));
96 }
97 /*}}}*/
98 // CacheGenerator::MergeList - Merge the package list /*{{{*/
99 // ---------------------------------------------------------------------
100 /* This provides the generation of the entries in the cache. Each loop
101 goes through a single package record from the underlying parse engine. */
102 bool pkgCacheGenerator::MergeList(ListParser &List,
103 pkgCache::VerIterator *OutVer)
104 {
105 List.Owner = this;
106
107 unsigned int Counter = 0;
108 while (List.Step() == true)
109 {
110 // Get a pointer to the package structure
111 string PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 pkgCache::PkgIterator Pkg;
116 if (NewPackage(Pkg,PackageName) == false)
117 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
118 Counter++;
119 if (Counter % 100 == 0 && Progress != 0)
120 Progress->Progress(List.Offset());
121
122 /* Get a pointer to the version structure. We know the list is sorted
123 so we use that fact in the search. Insertion of new versions is
124 done with correct sorting */
125 string Version = List.Version();
126 if (Version.empty() == true)
127 {
128 // we first process the package, then the descriptions
129 // (this has the bonus that we get MMap error when we run out
130 // of MMap space)
131 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
132 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
133 PackageName.c_str());
134
135 // Find the right version to write the description
136 MD5SumValue CurMd5 = List.Description_md5();
137 pkgCache::VerIterator Ver = Pkg.VersionList();
138 map_ptrloc *LastVer = &Pkg->VersionList;
139
140 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
141 {
142 pkgCache::DescIterator Desc = Ver.DescriptionList();
143 map_ptrloc *LastDesc = &Ver->DescriptionList;
144 bool duplicate=false;
145
146 // don't add a new description if we have one for the given
147 // md5 && language
148 for ( ; Desc.end() == false; Desc++)
149 if (MD5SumValue(Desc.md5()) == CurMd5 &&
150 Desc.LanguageCode() == List.DescriptionLanguage())
151 duplicate=true;
152 if(duplicate)
153 continue;
154
155 for (Desc = Ver.DescriptionList();
156 Desc.end() == false;
157 LastDesc = &Desc->NextDesc, Desc++)
158 {
159 if (MD5SumValue(Desc.md5()) == CurMd5)
160 {
161 // Add new description
162 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
163 Desc->ParentPkg = Pkg.Index();
164
165 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
166 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
167 break;
168 }
169 }
170 }
171
172 continue;
173 }
174
175 pkgCache::VerIterator Ver = Pkg.VersionList();
176 map_ptrloc *LastVer = &Pkg->VersionList;
177 int Res = 1;
178 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
179 {
180 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
181 if (Res >= 0)
182 break;
183 }
184
185 /* We already have a version for this item, record that we
186 saw it */
187 unsigned long Hash = List.VersionHash();
188 if (Res == 0 && Ver->Hash == Hash)
189 {
190 if (List.UsePackage(Pkg,Ver) == false)
191 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
192 PackageName.c_str());
193
194 if (NewFileVer(Ver,List) == false)
195 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
196 PackageName.c_str());
197
198 // Read only a single record and return
199 if (OutVer != 0)
200 {
201 *OutVer = Ver;
202 FoundFileDeps |= List.HasFileDeps();
203 return true;
204 }
205
206 continue;
207 }
208
209 // Skip to the end of the same version set.
210 if (Res == 0)
211 {
212 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
213 {
214 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
215 if (Res != 0)
216 break;
217 }
218 }
219
220 // Add a new version
221 *LastVer = NewVersion(Ver,Version,*LastVer);
222 Ver->ParentPkg = Pkg.Index();
223 Ver->Hash = Hash;
224
225 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
226 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
227 PackageName.c_str());
228
229 if (List.UsePackage(Pkg,Ver) == false)
230 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
231 PackageName.c_str());
232
233 if (NewFileVer(Ver,List) == false)
234 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
235 PackageName.c_str());
236
237 // Read only a single record and return
238 if (OutVer != 0)
239 {
240 *OutVer = Ver;
241 FoundFileDeps |= List.HasFileDeps();
242 return true;
243 }
244
245 /* Record the Description data. Description data always exist in
246 Packages and Translation-* files. */
247 pkgCache::DescIterator Desc = Ver.DescriptionList();
248 map_ptrloc *LastDesc = &Ver->DescriptionList;
249
250 // Skip to the end of description set
251 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
252
253 // Add new description
254 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
255 Desc->ParentPkg = Pkg.Index();
256
257 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
258 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
259 }
260
261 FoundFileDeps |= List.HasFileDeps();
262
263 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
264 return _error->Error(_("Wow, you exceeded the number of package "
265 "names this APT is capable of."));
266 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
267 return _error->Error(_("Wow, you exceeded the number of versions "
268 "this APT is capable of."));
269 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
270 return _error->Error(_("Wow, you exceeded the number of descriptions "
271 "this APT is capable of."));
272 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
273 return _error->Error(_("Wow, you exceeded the number of dependencies "
274 "this APT is capable of."));
275 return true;
276 }
277 /*}}}*/
278 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
279 // ---------------------------------------------------------------------
280 /* If we found any file depends while parsing the main list we need to
281 resolve them. Since it is undesired to load the entire list of files
282 into the cache as virtual packages we do a two stage effort. MergeList
283 identifies the file depends and this creates Provdies for them by
284 re-parsing all the indexs. */
285 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
286 {
287 List.Owner = this;
288
289 unsigned int Counter = 0;
290 while (List.Step() == true)
291 {
292 string PackageName = List.Package();
293 if (PackageName.empty() == true)
294 return false;
295 string Version = List.Version();
296 if (Version.empty() == true)
297 continue;
298
299 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
300 if (Pkg.end() == true)
301 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
302 PackageName.c_str());
303 Counter++;
304 if (Counter % 100 == 0 && Progress != 0)
305 Progress->Progress(List.Offset());
306
307 unsigned long Hash = List.VersionHash();
308 pkgCache::VerIterator Ver = Pkg.VersionList();
309 for (; Ver.end() == false; Ver++)
310 {
311 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
312 {
313 if (List.CollectFileProvides(Cache,Ver) == false)
314 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
315 break;
316 }
317 }
318
319 if (Ver.end() == true)
320 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
321 }
322
323 return true;
324 }
325 /*}}}*/
326 // CacheGenerator::NewPackage - Add a new package /*{{{*/
327 // ---------------------------------------------------------------------
328 /* This creates a new package structure and adds it to the hash table */
329 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name)
330 {
331 Pkg = Cache.FindPkg(Name);
332 if (Pkg.end() == false)
333 return true;
334
335 // Get a structure
336 unsigned long Package = Map.Allocate(sizeof(pkgCache::Package));
337 if (Package == 0)
338 return false;
339
340 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
341
342 // Insert it into the hash table
343 unsigned long Hash = Cache.Hash(Name);
344 Pkg->NextPackage = Cache.HeaderP->HashTable[Hash];
345 Cache.HeaderP->HashTable[Hash] = Package;
346
347 // Set the name and the ID
348 Pkg->Name = Map.WriteString(Name);
349 if (Pkg->Name == 0)
350 return false;
351 Pkg->ID = Cache.HeaderP->PackageCount++;
352
353 return true;
354 }
355 /*}}}*/
356 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
357 // ---------------------------------------------------------------------
358 /* */
359 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
360 ListParser &List)
361 {
362 if (CurrentFile == 0)
363 return true;
364
365 // Get a structure
366 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
367 if (VerFile == 0)
368 return 0;
369
370 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
371 VF->File = CurrentFile - Cache.PkgFileP;
372
373 // Link it to the end of the list
374 map_ptrloc *Last = &Ver->FileList;
375 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
376 Last = &V->NextFile;
377 VF->NextFile = *Last;
378 *Last = VF.Index();
379
380 VF->Offset = List.Offset();
381 VF->Size = List.Size();
382 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
383 Cache.HeaderP->MaxVerFileSize = VF->Size;
384 Cache.HeaderP->VerFileCount++;
385
386 return true;
387 }
388 /*}}}*/
389 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
390 // ---------------------------------------------------------------------
391 /* This puts a version structure in the linked list */
392 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
393 const string &VerStr,
394 unsigned long Next)
395 {
396 // Get a structure
397 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
398 if (Version == 0)
399 return 0;
400
401 // Fill it in
402 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
403 Ver->NextVer = Next;
404 Ver->ID = Cache.HeaderP->VersionCount++;
405 Ver->VerStr = Map.WriteString(VerStr);
406 if (Ver->VerStr == 0)
407 return 0;
408
409 return Version;
410 }
411 /*}}}*/
412 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
413 // ---------------------------------------------------------------------
414 /* */
415 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
416 ListParser &List)
417 {
418 if (CurrentFile == 0)
419 return true;
420
421 // Get a structure
422 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
423 if (DescFile == 0)
424 return false;
425
426 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
427 DF->File = CurrentFile - Cache.PkgFileP;
428
429 // Link it to the end of the list
430 map_ptrloc *Last = &Desc->FileList;
431 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
432 Last = &D->NextFile;
433
434 DF->NextFile = *Last;
435 *Last = DF.Index();
436
437 DF->Offset = List.Offset();
438 DF->Size = List.Size();
439 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
440 Cache.HeaderP->MaxDescFileSize = DF->Size;
441 Cache.HeaderP->DescFileCount++;
442
443 return true;
444 }
445 /*}}}*/
446 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
447 // ---------------------------------------------------------------------
448 /* This puts a description structure in the linked list */
449 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
450 const string &Lang,
451 const MD5SumValue &md5sum,
452 map_ptrloc Next)
453 {
454 // Get a structure
455 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
456 if (Description == 0)
457 return 0;
458
459 // Fill it in
460 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
461 Desc->NextDesc = Next;
462 Desc->ID = Cache.HeaderP->DescriptionCount++;
463 Desc->language_code = Map.WriteString(Lang);
464 Desc->md5sum = Map.WriteString(md5sum.Value());
465 if (Desc->language_code == 0 || Desc->md5sum == 0)
466 return 0;
467
468 return Description;
469 }
470 /*}}}*/
471 // ListParser::NewDepends - Create a dependency element /*{{{*/
472 // ---------------------------------------------------------------------
473 /* This creates a dependency element in the tree. It is linked to the
474 version and to the package that it is pointing to. */
475 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
476 const string &PackageName,
477 const string &Version,
478 unsigned int Op,
479 unsigned int Type)
480 {
481 pkgCache &Cache = Owner->Cache;
482
483 // Get a structure
484 unsigned long Dependency = Owner->Map.Allocate(sizeof(pkgCache::Dependency));
485 if (Dependency == 0)
486 return false;
487
488 // Fill it in
489 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
490 Dep->ParentVer = Ver.Index();
491 Dep->Type = Type;
492 Dep->CompareOp = Op;
493 Dep->ID = Cache.HeaderP->DependsCount++;
494
495 // Locate the target package
496 pkgCache::PkgIterator Pkg;
497 if (Owner->NewPackage(Pkg,PackageName) == false)
498 return false;
499
500 // Probe the reverse dependency list for a version string that matches
501 if (Version.empty() == false)
502 {
503 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
504 if (I->Version != 0 && I.TargetVer() == Version)
505 Dep->Version = I->Version;*/
506 if (Dep->Version == 0)
507 if ((Dep->Version = WriteString(Version)) == 0)
508 return false;
509 }
510
511 // Link it to the package
512 Dep->Package = Pkg.Index();
513 Dep->NextRevDepends = Pkg->RevDepends;
514 Pkg->RevDepends = Dep.Index();
515
516 /* Link it to the version (at the end of the list)
517 Caching the old end point speeds up generation substantially */
518 if (OldDepVer != Ver)
519 {
520 OldDepLast = &Ver->DependsList;
521 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
522 OldDepLast = &D->NextDepends;
523 OldDepVer = Ver;
524 }
525
526 // Is it a file dependency?
527 if (PackageName[0] == '/')
528 FoundFileDeps = true;
529
530 Dep->NextDepends = *OldDepLast;
531 *OldDepLast = Dep.Index();
532 OldDepLast = &Dep->NextDepends;
533
534 return true;
535 }
536 /*}}}*/
537 // ListParser::NewProvides - Create a Provides element /*{{{*/
538 // ---------------------------------------------------------------------
539 /* */
540 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
541 const string &PackageName,
542 const string &Version)
543 {
544 pkgCache &Cache = Owner->Cache;
545
546 // We do not add self referencing provides
547 if (Ver.ParentPkg().Name() == PackageName)
548 return true;
549
550 // Get a structure
551 unsigned long Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
552 if (Provides == 0)
553 return false;
554 Cache.HeaderP->ProvidesCount++;
555
556 // Fill it in
557 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
558 Prv->Version = Ver.Index();
559 Prv->NextPkgProv = Ver->ProvidesList;
560 Ver->ProvidesList = Prv.Index();
561 if (Version.empty() == false && (Prv->ProvideVersion = WriteString(Version)) == 0)
562 return false;
563
564 // Locate the target package
565 pkgCache::PkgIterator Pkg;
566 if (Owner->NewPackage(Pkg,PackageName) == false)
567 return false;
568
569 // Link it to the package
570 Prv->ParentPkg = Pkg.Index();
571 Prv->NextProvides = Pkg->ProvidesList;
572 Pkg->ProvidesList = Prv.Index();
573
574 return true;
575 }
576 /*}}}*/
577 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
578 // ---------------------------------------------------------------------
579 /* This is used to select which file is to be associated with all newly
580 added versions. The caller is responsible for setting the IMS fields. */
581 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
582 const pkgIndexFile &Index,
583 unsigned long Flags)
584 {
585 // Get some space for the structure
586 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
587 if (CurrentFile == Cache.PkgFileP)
588 return false;
589
590 // Fill it in
591 CurrentFile->FileName = Map.WriteString(File);
592 CurrentFile->Site = WriteUniqString(Site);
593 CurrentFile->NextFile = Cache.HeaderP->FileList;
594 CurrentFile->Flags = Flags;
595 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
596 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
597 PkgFileName = File;
598 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
599 Cache.HeaderP->PackageFileCount++;
600
601 if (CurrentFile->FileName == 0)
602 return false;
603
604 if (Progress != 0)
605 Progress->SubProgress(Index.Size());
606 return true;
607 }
608 /*}}}*/
609 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
610 // ---------------------------------------------------------------------
611 /* This is used to create handles to strings. Given the same text it
612 always returns the same number */
613 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
614 unsigned int Size)
615 {
616 /* We use a very small transient hash table here, this speeds up generation
617 by a fair amount on slower machines */
618 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
619 if (Bucket != 0 &&
620 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
621 return Bucket->String;
622
623 // Search for an insertion point
624 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
625 int Res = 1;
626 map_ptrloc *Last = &Cache.HeaderP->StringList;
627 for (; I != Cache.StringItemP; Last = &I->NextItem,
628 I = Cache.StringItemP + I->NextItem)
629 {
630 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
631 if (Res >= 0)
632 break;
633 }
634
635 // Match
636 if (Res == 0)
637 {
638 Bucket = I;
639 return I->String;
640 }
641
642 // Get a structure
643 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
644 if (Item == 0)
645 return 0;
646
647 // Fill in the structure
648 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
649 ItemP->NextItem = I - Cache.StringItemP;
650 *Last = Item;
651 ItemP->String = Map.WriteString(S,Size);
652 if (ItemP->String == 0)
653 return 0;
654
655 Bucket = ItemP;
656 return ItemP->String;
657 }
658 /*}}}*/
659 // CheckValidity - Check that a cache is up-to-date /*{{{*/
660 // ---------------------------------------------------------------------
661 /* This just verifies that each file in the list of index files exists,
662 has matching attributes with the cache and the cache does not have
663 any extra files. */
664 static bool CheckValidity(const string &CacheFile, FileIterator Start,
665 FileIterator End,MMap **OutMap = 0)
666 {
667 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
668 // No file, certainly invalid
669 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
670 {
671 if (Debug == true)
672 std::clog << "CacheFile doesn't exist" << std::endl;
673 return false;
674 }
675
676 // Map it
677 FileFd CacheF(CacheFile,FileFd::ReadOnly);
678 SPtr<MMap> Map = new MMap(CacheF,0);
679 pkgCache Cache(Map);
680 if (_error->PendingError() == true || Map->Size() == 0)
681 {
682 if (Debug == true)
683 std::clog << "Errors are pending or Map is empty()" << std::endl;
684 _error->Discard();
685 return false;
686 }
687
688 /* Now we check every index file, see if it is in the cache,
689 verify the IMS data and check that it is on the disk too.. */
690 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
691 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
692 for (; Start != End; Start++)
693 {
694 if (Debug == true)
695 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
696 if ((*Start)->HasPackages() == false)
697 {
698 if (Debug == true)
699 std::clog << "Has NO packages" << std::endl;
700 continue;
701 }
702
703 if ((*Start)->Exists() == false)
704 {
705 #if 0 // mvo: we no longer give a message here (Default Sources spec)
706 _error->WarningE("stat",_("Couldn't stat source package list %s"),
707 (*Start)->Describe().c_str());
708 #endif
709 if (Debug == true)
710 std::clog << "file doesn't exist" << std::endl;
711 continue;
712 }
713
714 // FindInCache is also expected to do an IMS check.
715 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
716 if (File.end() == true)
717 {
718 if (Debug == true)
719 std::clog << "FindInCache returned end-Pointer" << std::endl;
720 return false;
721 }
722
723 Visited[File->ID] = true;
724 if (Debug == true)
725 std::clog << "with ID " << File->ID << " is valid" << std::endl;
726 }
727
728 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
729 if (Visited[I] == false)
730 {
731 if (Debug == true)
732 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
733 return false;
734 }
735
736 if (_error->PendingError() == true)
737 {
738 if (Debug == true)
739 {
740 std::clog << "Validity failed because of pending errors:" << std::endl;
741 _error->DumpErrors();
742 }
743 _error->Discard();
744 return false;
745 }
746
747 if (OutMap != 0)
748 *OutMap = Map.UnGuard();
749 return true;
750 }
751 /*}}}*/
752 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
753 // ---------------------------------------------------------------------
754 /* Size is kind of an abstract notion that is only used for the progress
755 meter */
756 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
757 {
758 unsigned long TotalSize = 0;
759 for (; Start != End; Start++)
760 {
761 if ((*Start)->HasPackages() == false)
762 continue;
763 TotalSize += (*Start)->Size();
764 }
765 return TotalSize;
766 }
767 /*}}}*/
768 // BuildCache - Merge the list of index files into the cache /*{{{*/
769 // ---------------------------------------------------------------------
770 /* */
771 static bool BuildCache(pkgCacheGenerator &Gen,
772 OpProgress &Progress,
773 unsigned long &CurrentSize,unsigned long TotalSize,
774 FileIterator Start, FileIterator End)
775 {
776 FileIterator I;
777 for (I = Start; I != End; I++)
778 {
779 if ((*I)->HasPackages() == false)
780 continue;
781
782 if ((*I)->Exists() == false)
783 continue;
784
785 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
786 {
787 _error->Warning("Duplicate sources.list entry %s",
788 (*I)->Describe().c_str());
789 continue;
790 }
791
792 unsigned long Size = (*I)->Size();
793 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
794 CurrentSize += Size;
795
796 if ((*I)->Merge(Gen,Progress) == false)
797 return false;
798 }
799
800 if (Gen.HasFileDeps() == true)
801 {
802 Progress.Done();
803 TotalSize = ComputeSize(Start, End);
804 CurrentSize = 0;
805 for (I = Start; I != End; I++)
806 {
807 unsigned long Size = (*I)->Size();
808 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
809 CurrentSize += Size;
810 if ((*I)->MergeFileProvides(Gen,Progress) == false)
811 return false;
812 }
813 }
814
815 return true;
816 }
817 /*}}}*/
818 // MakeStatusCache - Construct the status cache /*{{{*/
819 // ---------------------------------------------------------------------
820 /* This makes sure that the status cache (the cache that has all
821 index files from the sources list and all local ones) is ready
822 to be mmaped. If OutMap is not zero then a MMap object representing
823 the cache will be stored there. This is pretty much mandetory if you
824 are using AllowMem. AllowMem lets the function be run as non-root
825 where it builds the cache 'fast' into a memory buffer. */
826 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
827 MMap **OutMap,bool AllowMem)
828 {
829 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
830 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
831
832 vector<pkgIndexFile *> Files;
833 for (vector<metaIndex *>::const_iterator i = List.begin();
834 i != List.end();
835 i++)
836 {
837 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
838 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
839 j != Indexes->end();
840 j++)
841 Files.push_back (*j);
842 }
843
844 unsigned long const EndOfSource = Files.size();
845 if (_system->AddStatusFiles(Files) == false)
846 return false;
847
848 // Decide if we can write to the files..
849 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
850 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
851
852 // Decide if we can write to the cache
853 bool Writeable = false;
854 if (CacheFile.empty() == false)
855 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
856 else
857 if (SrcCacheFile.empty() == false)
858 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
859 if (Debug == true)
860 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
861
862 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
863 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
864
865 Progress.OverallProgress(0,1,1,_("Reading package lists"));
866
867 // Cache is OK, Fin.
868 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
869 {
870 Progress.OverallProgress(1,1,1,_("Reading package lists"));
871 if (Debug == true)
872 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
873 return true;
874 }
875 else if (Debug == true)
876 std::clog << "pkgcache.bin is NOT valid" << std::endl;
877
878 /* At this point we know we need to reconstruct the package cache,
879 begin. */
880 SPtr<FileFd> CacheF;
881 SPtr<DynamicMMap> Map;
882 if (Writeable == true && CacheFile.empty() == false)
883 {
884 unlink(CacheFile.c_str());
885 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
886 fchmod(CacheF->Fd(),0644);
887 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
888 if (_error->PendingError() == true)
889 return false;
890 if (Debug == true)
891 std::clog << "Open filebased MMap" << std::endl;
892 }
893 else
894 {
895 // Just build it in memory..
896 Map = new DynamicMMap(0,MapSize);
897 if (Debug == true)
898 std::clog << "Open memory Map (not filebased)" << std::endl;
899 }
900
901 // Lets try the source cache.
902 unsigned long CurrentSize = 0;
903 unsigned long TotalSize = 0;
904 if (CheckValidity(SrcCacheFile,Files.begin(),
905 Files.begin()+EndOfSource) == true)
906 {
907 if (Debug == true)
908 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
909 // Preload the map with the source cache
910 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
911 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
912 if ((alloc == 0 && _error->PendingError())
913 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
914 SCacheF.Size()) == false)
915 return false;
916
917 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
918
919 // Build the status cache
920 pkgCacheGenerator Gen(Map.Get(),&Progress);
921 if (_error->PendingError() == true)
922 return false;
923 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
924 Files.begin()+EndOfSource,Files.end()) == false)
925 return false;
926 }
927 else
928 {
929 if (Debug == true)
930 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
931 TotalSize = ComputeSize(Files.begin(),Files.end());
932
933 // Build the source cache
934 pkgCacheGenerator Gen(Map.Get(),&Progress);
935 if (_error->PendingError() == true)
936 return false;
937 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
938 Files.begin(),Files.begin()+EndOfSource) == false)
939 return false;
940
941 // Write it back
942 if (Writeable == true && SrcCacheFile.empty() == false)
943 {
944 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
945 if (_error->PendingError() == true)
946 return false;
947
948 fchmod(SCacheF.Fd(),0644);
949
950 // Write out the main data
951 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
952 return _error->Error(_("IO Error saving source cache"));
953 SCacheF.Sync();
954
955 // Write out the proper header
956 Gen.GetCache().HeaderP->Dirty = false;
957 if (SCacheF.Seek(0) == false ||
958 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
959 return _error->Error(_("IO Error saving source cache"));
960 Gen.GetCache().HeaderP->Dirty = true;
961 SCacheF.Sync();
962 }
963
964 // Build the status cache
965 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
966 Files.begin()+EndOfSource,Files.end()) == false)
967 return false;
968 }
969 if (Debug == true)
970 std::clog << "Caches are ready for shipping" << std::endl;
971
972 if (_error->PendingError() == true)
973 return false;
974 if (OutMap != 0)
975 {
976 if (CacheF != 0)
977 {
978 delete Map.UnGuard();
979 *OutMap = new MMap(*CacheF,0);
980 }
981 else
982 {
983 *OutMap = Map.UnGuard();
984 }
985 }
986
987 return true;
988 }
989 /*}}}*/
990 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
991 // ---------------------------------------------------------------------
992 /* */
993 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
994 {
995 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
996 vector<pkgIndexFile *> Files;
997 unsigned long EndOfSource = Files.size();
998 if (_system->AddStatusFiles(Files) == false)
999 return false;
1000
1001 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1002 unsigned long CurrentSize = 0;
1003 unsigned long TotalSize = 0;
1004
1005 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1006
1007 // Build the status cache
1008 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1009 pkgCacheGenerator Gen(Map.Get(),&Progress);
1010 if (_error->PendingError() == true)
1011 return false;
1012 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1013 Files.begin()+EndOfSource,Files.end()) == false)
1014 return false;
1015
1016 if (_error->PendingError() == true)
1017 return false;
1018 *OutMap = Map.UnGuard();
1019
1020 return true;
1021 }
1022 /*}}}*/