]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
75a0e34f076d8f5dbb23d468cdd0fddd5a1020d1
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/strutl.h>
22 #include <apt-pkg/sptr.h>
23 #include <apt-pkg/pkgsystem.h>
24 #include <apt-pkg/macros.h>
25
26 #include <apt-pkg/tagfile.h>
27
28 #include <apti18n.h>
29
30 #include <vector>
31
32 #include <sys/stat.h>
33 #include <unistd.h>
34 #include <errno.h>
35 #include <stdio.h>
36 /*}}}*/
37 typedef vector<pkgIndexFile *>::iterator FileIterator;
38
39 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
40 // ---------------------------------------------------------------------
41 /* We set the diry flag and make sure that is written to the disk */
42 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
43 Map(*pMap), Cache(pMap,false), Progress(Prog),
44 FoundFileDeps(0)
45 {
46 CurrentFile = 0;
47 memset(UniqHash,0,sizeof(UniqHash));
48
49 if (_error->PendingError() == true)
50 return;
51
52 if (Map.Size() == 0)
53 {
54 // Setup the map interface..
55 Cache.HeaderP = (pkgCache::Header *)Map.Data();
56 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
57 return;
58
59 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
60
61 // Starting header
62 *Cache.HeaderP = pkgCache::Header();
63 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
64 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
65 Cache.ReMap();
66 }
67 else
68 {
69 // Map directly from the existing file
70 Cache.ReMap();
71 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
72 if (Cache.VS != _system->VS)
73 {
74 _error->Error(_("Cache has an incompatible versioning system"));
75 return;
76 }
77 }
78
79 Cache.HeaderP->Dirty = true;
80 Map.Sync(0,sizeof(pkgCache::Header));
81 }
82 /*}}}*/
83 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
84 // ---------------------------------------------------------------------
85 /* We sync the data then unset the dirty flag in two steps so as to
86 advoid a problem during a crash */
87 pkgCacheGenerator::~pkgCacheGenerator()
88 {
89 if (_error->PendingError() == true)
90 return;
91 if (Map.Sync() == false)
92 return;
93
94 Cache.HeaderP->Dirty = false;
95 Map.Sync(0,sizeof(pkgCache::Header));
96 }
97 /*}}}*/
98 // CacheGenerator::MergeList - Merge the package list /*{{{*/
99 // ---------------------------------------------------------------------
100 /* This provides the generation of the entries in the cache. Each loop
101 goes through a single package record from the underlying parse engine. */
102 bool pkgCacheGenerator::MergeList(ListParser &List,
103 pkgCache::VerIterator *OutVer)
104 {
105 List.Owner = this;
106
107 unsigned int Counter = 0;
108 while (List.Step() == true)
109 {
110 // Get a pointer to the package structure
111 string PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 pkgCache::PkgIterator Pkg;
116 if (NewPackage(Pkg,PackageName) == false)
117 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
118 Counter++;
119 if (Counter % 100 == 0 && Progress != 0)
120 Progress->Progress(List.Offset());
121
122 /* Get a pointer to the version structure. We know the list is sorted
123 so we use that fact in the search. Insertion of new versions is
124 done with correct sorting */
125 string Version = List.Version();
126 if (Version.empty() == true)
127 {
128 // we first process the package, then the descriptions
129 // (this has the bonus that we get MMap error when we run out
130 // of MMap space)
131 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
132 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
133 PackageName.c_str());
134
135 // Find the right version to write the description
136 MD5SumValue CurMd5 = List.Description_md5();
137 pkgCache::VerIterator Ver = Pkg.VersionList();
138 map_ptrloc *LastVer = &Pkg->VersionList;
139
140 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
141 {
142 pkgCache::DescIterator Desc = Ver.DescriptionList();
143 map_ptrloc *LastDesc = &Ver->DescriptionList;
144 bool duplicate=false;
145
146 // don't add a new description if we have one for the given
147 // md5 && language
148 for ( ; Desc.end() == false; Desc++)
149 if (MD5SumValue(Desc.md5()) == CurMd5 &&
150 Desc.LanguageCode() == List.DescriptionLanguage())
151 duplicate=true;
152 if(duplicate)
153 continue;
154
155 for (Desc = Ver.DescriptionList();
156 Desc.end() == false;
157 LastDesc = &Desc->NextDesc, Desc++)
158 {
159 if (MD5SumValue(Desc.md5()) == CurMd5)
160 {
161 // Add new description
162 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
163 Desc->ParentPkg = Pkg.Index();
164
165 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
166 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
167 break;
168 }
169 }
170 }
171
172 continue;
173 }
174
175 pkgCache::VerIterator Ver = Pkg.VersionList();
176 map_ptrloc *LastVer = &Pkg->VersionList;
177 int Res = 1;
178 unsigned long const Hash = List.VersionHash();
179 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
180 {
181 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
182 // Version is higher as current version - insert here
183 if (Res > 0)
184 break;
185 // Versionstrings are equal - is hash also equal?
186 if (Res == 0 && Ver->Hash == Hash)
187 break;
188 // proceed with the next till we have either the right
189 // or we found another version (which will be lower)
190 }
191
192 /* We already have a version for this item, record that we saw it */
193 if (Res == 0)
194 {
195 if (List.UsePackage(Pkg,Ver) == false)
196 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
197 PackageName.c_str());
198
199 if (NewFileVer(Ver,List) == false)
200 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
201 PackageName.c_str());
202
203 // Read only a single record and return
204 if (OutVer != 0)
205 {
206 *OutVer = Ver;
207 FoundFileDeps |= List.HasFileDeps();
208 return true;
209 }
210
211 continue;
212 }
213
214 // Add a new version
215 *LastVer = NewVersion(Ver,Version,*LastVer);
216 Ver->ParentPkg = Pkg.Index();
217 Ver->Hash = Hash;
218
219 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
220 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
221 PackageName.c_str());
222
223 if (List.UsePackage(Pkg,Ver) == false)
224 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
225 PackageName.c_str());
226
227 if (NewFileVer(Ver,List) == false)
228 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
229 PackageName.c_str());
230
231 // Read only a single record and return
232 if (OutVer != 0)
233 {
234 *OutVer = Ver;
235 FoundFileDeps |= List.HasFileDeps();
236 return true;
237 }
238
239 /* Record the Description data. Description data always exist in
240 Packages and Translation-* files. */
241 pkgCache::DescIterator Desc = Ver.DescriptionList();
242 map_ptrloc *LastDesc = &Ver->DescriptionList;
243
244 // Skip to the end of description set
245 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
246
247 // Add new description
248 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
249 Desc->ParentPkg = Pkg.Index();
250
251 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
252 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
253 }
254
255 FoundFileDeps |= List.HasFileDeps();
256
257 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
258 return _error->Error(_("Wow, you exceeded the number of package "
259 "names this APT is capable of."));
260 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
261 return _error->Error(_("Wow, you exceeded the number of versions "
262 "this APT is capable of."));
263 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
264 return _error->Error(_("Wow, you exceeded the number of descriptions "
265 "this APT is capable of."));
266 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
267 return _error->Error(_("Wow, you exceeded the number of dependencies "
268 "this APT is capable of."));
269 return true;
270 }
271 /*}}}*/
272 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
273 // ---------------------------------------------------------------------
274 /* If we found any file depends while parsing the main list we need to
275 resolve them. Since it is undesired to load the entire list of files
276 into the cache as virtual packages we do a two stage effort. MergeList
277 identifies the file depends and this creates Provdies for them by
278 re-parsing all the indexs. */
279 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
280 {
281 List.Owner = this;
282
283 unsigned int Counter = 0;
284 while (List.Step() == true)
285 {
286 string PackageName = List.Package();
287 if (PackageName.empty() == true)
288 return false;
289 string Version = List.Version();
290 if (Version.empty() == true)
291 continue;
292
293 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
294 if (Pkg.end() == true)
295 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
296 PackageName.c_str());
297 Counter++;
298 if (Counter % 100 == 0 && Progress != 0)
299 Progress->Progress(List.Offset());
300
301 unsigned long Hash = List.VersionHash();
302 pkgCache::VerIterator Ver = Pkg.VersionList();
303 for (; Ver.end() == false; Ver++)
304 {
305 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
306 {
307 if (List.CollectFileProvides(Cache,Ver) == false)
308 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
309 break;
310 }
311 }
312
313 if (Ver.end() == true)
314 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
315 }
316
317 return true;
318 }
319 /*}}}*/
320 // CacheGenerator::NewPackage - Add a new package /*{{{*/
321 // ---------------------------------------------------------------------
322 /* This creates a new package structure and adds it to the hash table */
323 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name)
324 {
325 Pkg = Cache.FindPkg(Name);
326 if (Pkg.end() == false)
327 return true;
328
329 // Get a structure
330 unsigned long Package = Map.Allocate(sizeof(pkgCache::Package));
331 if (Package == 0)
332 return false;
333
334 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
335
336 // Insert it into the hash table
337 unsigned long Hash = Cache.Hash(Name);
338 Pkg->NextPackage = Cache.HeaderP->HashTable[Hash];
339 Cache.HeaderP->HashTable[Hash] = Package;
340
341 // Set the name and the ID
342 Pkg->Name = Map.WriteString(Name);
343 if (Pkg->Name == 0)
344 return false;
345 Pkg->ID = Cache.HeaderP->PackageCount++;
346
347 return true;
348 }
349 /*}}}*/
350 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
351 // ---------------------------------------------------------------------
352 /* */
353 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
354 ListParser &List)
355 {
356 if (CurrentFile == 0)
357 return true;
358
359 // Get a structure
360 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
361 if (VerFile == 0)
362 return 0;
363
364 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
365 VF->File = CurrentFile - Cache.PkgFileP;
366
367 // Link it to the end of the list
368 map_ptrloc *Last = &Ver->FileList;
369 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
370 Last = &V->NextFile;
371 VF->NextFile = *Last;
372 *Last = VF.Index();
373
374 VF->Offset = List.Offset();
375 VF->Size = List.Size();
376 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
377 Cache.HeaderP->MaxVerFileSize = VF->Size;
378 Cache.HeaderP->VerFileCount++;
379
380 return true;
381 }
382 /*}}}*/
383 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
384 // ---------------------------------------------------------------------
385 /* This puts a version structure in the linked list */
386 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
387 const string &VerStr,
388 unsigned long Next)
389 {
390 // Get a structure
391 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
392 if (Version == 0)
393 return 0;
394
395 // Fill it in
396 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
397 Ver->NextVer = Next;
398 Ver->ID = Cache.HeaderP->VersionCount++;
399 Ver->VerStr = Map.WriteString(VerStr);
400 if (Ver->VerStr == 0)
401 return 0;
402
403 return Version;
404 }
405 /*}}}*/
406 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
407 // ---------------------------------------------------------------------
408 /* */
409 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
410 ListParser &List)
411 {
412 if (CurrentFile == 0)
413 return true;
414
415 // Get a structure
416 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
417 if (DescFile == 0)
418 return false;
419
420 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
421 DF->File = CurrentFile - Cache.PkgFileP;
422
423 // Link it to the end of the list
424 map_ptrloc *Last = &Desc->FileList;
425 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
426 Last = &D->NextFile;
427
428 DF->NextFile = *Last;
429 *Last = DF.Index();
430
431 DF->Offset = List.Offset();
432 DF->Size = List.Size();
433 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
434 Cache.HeaderP->MaxDescFileSize = DF->Size;
435 Cache.HeaderP->DescFileCount++;
436
437 return true;
438 }
439 /*}}}*/
440 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
441 // ---------------------------------------------------------------------
442 /* This puts a description structure in the linked list */
443 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
444 const string &Lang,
445 const MD5SumValue &md5sum,
446 map_ptrloc Next)
447 {
448 // Get a structure
449 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
450 if (Description == 0)
451 return 0;
452
453 // Fill it in
454 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
455 Desc->NextDesc = Next;
456 Desc->ID = Cache.HeaderP->DescriptionCount++;
457 Desc->language_code = Map.WriteString(Lang);
458 Desc->md5sum = Map.WriteString(md5sum.Value());
459 if (Desc->language_code == 0 || Desc->md5sum == 0)
460 return 0;
461
462 return Description;
463 }
464 /*}}}*/
465 // ListParser::NewDepends - Create a dependency element /*{{{*/
466 // ---------------------------------------------------------------------
467 /* This creates a dependency element in the tree. It is linked to the
468 version and to the package that it is pointing to. */
469 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
470 const string &PackageName,
471 const string &Version,
472 unsigned int Op,
473 unsigned int Type)
474 {
475 pkgCache &Cache = Owner->Cache;
476
477 // Get a structure
478 unsigned long Dependency = Owner->Map.Allocate(sizeof(pkgCache::Dependency));
479 if (Dependency == 0)
480 return false;
481
482 // Fill it in
483 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
484 Dep->ParentVer = Ver.Index();
485 Dep->Type = Type;
486 Dep->CompareOp = Op;
487 Dep->ID = Cache.HeaderP->DependsCount++;
488
489 // Locate the target package
490 pkgCache::PkgIterator Pkg;
491 if (Owner->NewPackage(Pkg,PackageName) == false)
492 return false;
493
494 // Probe the reverse dependency list for a version string that matches
495 if (Version.empty() == false)
496 {
497 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
498 if (I->Version != 0 && I.TargetVer() == Version)
499 Dep->Version = I->Version;*/
500 if (Dep->Version == 0)
501 if ((Dep->Version = WriteString(Version)) == 0)
502 return false;
503 }
504
505 // Link it to the package
506 Dep->Package = Pkg.Index();
507 Dep->NextRevDepends = Pkg->RevDepends;
508 Pkg->RevDepends = Dep.Index();
509
510 /* Link it to the version (at the end of the list)
511 Caching the old end point speeds up generation substantially */
512 if (OldDepVer != Ver)
513 {
514 OldDepLast = &Ver->DependsList;
515 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
516 OldDepLast = &D->NextDepends;
517 OldDepVer = Ver;
518 }
519
520 // Is it a file dependency?
521 if (PackageName[0] == '/')
522 FoundFileDeps = true;
523
524 Dep->NextDepends = *OldDepLast;
525 *OldDepLast = Dep.Index();
526 OldDepLast = &Dep->NextDepends;
527
528 return true;
529 }
530 /*}}}*/
531 // ListParser::NewProvides - Create a Provides element /*{{{*/
532 // ---------------------------------------------------------------------
533 /* */
534 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
535 const string &PackageName,
536 const string &Version)
537 {
538 pkgCache &Cache = Owner->Cache;
539
540 // We do not add self referencing provides
541 if (Ver.ParentPkg().Name() == PackageName)
542 return true;
543
544 // Get a structure
545 unsigned long Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
546 if (Provides == 0)
547 return false;
548 Cache.HeaderP->ProvidesCount++;
549
550 // Fill it in
551 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
552 Prv->Version = Ver.Index();
553 Prv->NextPkgProv = Ver->ProvidesList;
554 Ver->ProvidesList = Prv.Index();
555 if (Version.empty() == false && (Prv->ProvideVersion = WriteString(Version)) == 0)
556 return false;
557
558 // Locate the target package
559 pkgCache::PkgIterator Pkg;
560 if (Owner->NewPackage(Pkg,PackageName) == false)
561 return false;
562
563 // Link it to the package
564 Prv->ParentPkg = Pkg.Index();
565 Prv->NextProvides = Pkg->ProvidesList;
566 Pkg->ProvidesList = Prv.Index();
567
568 return true;
569 }
570 /*}}}*/
571 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
572 // ---------------------------------------------------------------------
573 /* This is used to select which file is to be associated with all newly
574 added versions. The caller is responsible for setting the IMS fields. */
575 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
576 const pkgIndexFile &Index,
577 unsigned long Flags)
578 {
579 // Get some space for the structure
580 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
581 if (CurrentFile == Cache.PkgFileP)
582 return false;
583
584 // Fill it in
585 CurrentFile->FileName = Map.WriteString(File);
586 CurrentFile->Site = WriteUniqString(Site);
587 CurrentFile->NextFile = Cache.HeaderP->FileList;
588 CurrentFile->Flags = Flags;
589 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
590 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
591 PkgFileName = File;
592 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
593 Cache.HeaderP->PackageFileCount++;
594
595 if (CurrentFile->FileName == 0)
596 return false;
597
598 if (Progress != 0)
599 Progress->SubProgress(Index.Size());
600 return true;
601 }
602 /*}}}*/
603 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
604 // ---------------------------------------------------------------------
605 /* This is used to create handles to strings. Given the same text it
606 always returns the same number */
607 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
608 unsigned int Size)
609 {
610 /* We use a very small transient hash table here, this speeds up generation
611 by a fair amount on slower machines */
612 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
613 if (Bucket != 0 &&
614 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
615 return Bucket->String;
616
617 // Search for an insertion point
618 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
619 int Res = 1;
620 map_ptrloc *Last = &Cache.HeaderP->StringList;
621 for (; I != Cache.StringItemP; Last = &I->NextItem,
622 I = Cache.StringItemP + I->NextItem)
623 {
624 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
625 if (Res >= 0)
626 break;
627 }
628
629 // Match
630 if (Res == 0)
631 {
632 Bucket = I;
633 return I->String;
634 }
635
636 // Get a structure
637 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
638 if (Item == 0)
639 return 0;
640
641 // Fill in the structure
642 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
643 ItemP->NextItem = I - Cache.StringItemP;
644 *Last = Item;
645 ItemP->String = Map.WriteString(S,Size);
646 if (ItemP->String == 0)
647 return 0;
648
649 Bucket = ItemP;
650 return ItemP->String;
651 }
652 /*}}}*/
653 // CheckValidity - Check that a cache is up-to-date /*{{{*/
654 // ---------------------------------------------------------------------
655 /* This just verifies that each file in the list of index files exists,
656 has matching attributes with the cache and the cache does not have
657 any extra files. */
658 static bool CheckValidity(const string &CacheFile, FileIterator Start,
659 FileIterator End,MMap **OutMap = 0)
660 {
661 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
662 // No file, certainly invalid
663 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
664 {
665 if (Debug == true)
666 std::clog << "CacheFile doesn't exist" << std::endl;
667 return false;
668 }
669
670 // Map it
671 FileFd CacheF(CacheFile,FileFd::ReadOnly);
672 SPtr<MMap> Map = new MMap(CacheF,0);
673 pkgCache Cache(Map);
674 if (_error->PendingError() == true || Map->Size() == 0)
675 {
676 if (Debug == true)
677 std::clog << "Errors are pending or Map is empty()" << std::endl;
678 _error->Discard();
679 return false;
680 }
681
682 /* Now we check every index file, see if it is in the cache,
683 verify the IMS data and check that it is on the disk too.. */
684 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
685 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
686 for (; Start != End; Start++)
687 {
688 if (Debug == true)
689 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
690 if ((*Start)->HasPackages() == false)
691 {
692 if (Debug == true)
693 std::clog << "Has NO packages" << std::endl;
694 continue;
695 }
696
697 if ((*Start)->Exists() == false)
698 {
699 #if 0 // mvo: we no longer give a message here (Default Sources spec)
700 _error->WarningE("stat",_("Couldn't stat source package list %s"),
701 (*Start)->Describe().c_str());
702 #endif
703 if (Debug == true)
704 std::clog << "file doesn't exist" << std::endl;
705 continue;
706 }
707
708 // FindInCache is also expected to do an IMS check.
709 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
710 if (File.end() == true)
711 {
712 if (Debug == true)
713 std::clog << "FindInCache returned end-Pointer" << std::endl;
714 return false;
715 }
716
717 Visited[File->ID] = true;
718 if (Debug == true)
719 std::clog << "with ID " << File->ID << " is valid" << std::endl;
720 }
721
722 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
723 if (Visited[I] == false)
724 {
725 if (Debug == true)
726 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
727 return false;
728 }
729
730 if (_error->PendingError() == true)
731 {
732 if (Debug == true)
733 {
734 std::clog << "Validity failed because of pending errors:" << std::endl;
735 _error->DumpErrors();
736 }
737 _error->Discard();
738 return false;
739 }
740
741 if (OutMap != 0)
742 *OutMap = Map.UnGuard();
743 return true;
744 }
745 /*}}}*/
746 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
747 // ---------------------------------------------------------------------
748 /* Size is kind of an abstract notion that is only used for the progress
749 meter */
750 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
751 {
752 unsigned long TotalSize = 0;
753 for (; Start != End; Start++)
754 {
755 if ((*Start)->HasPackages() == false)
756 continue;
757 TotalSize += (*Start)->Size();
758 }
759 return TotalSize;
760 }
761 /*}}}*/
762 // BuildCache - Merge the list of index files into the cache /*{{{*/
763 // ---------------------------------------------------------------------
764 /* */
765 static bool BuildCache(pkgCacheGenerator &Gen,
766 OpProgress &Progress,
767 unsigned long &CurrentSize,unsigned long TotalSize,
768 FileIterator Start, FileIterator End)
769 {
770 FileIterator I;
771 for (I = Start; I != End; I++)
772 {
773 if ((*I)->HasPackages() == false)
774 continue;
775
776 if ((*I)->Exists() == false)
777 continue;
778
779 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
780 {
781 _error->Warning("Duplicate sources.list entry %s",
782 (*I)->Describe().c_str());
783 continue;
784 }
785
786 unsigned long Size = (*I)->Size();
787 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
788 CurrentSize += Size;
789
790 if ((*I)->Merge(Gen,Progress) == false)
791 return false;
792 }
793
794 if (Gen.HasFileDeps() == true)
795 {
796 Progress.Done();
797 TotalSize = ComputeSize(Start, End);
798 CurrentSize = 0;
799 for (I = Start; I != End; I++)
800 {
801 unsigned long Size = (*I)->Size();
802 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
803 CurrentSize += Size;
804 if ((*I)->MergeFileProvides(Gen,Progress) == false)
805 return false;
806 }
807 }
808
809 return true;
810 }
811 /*}}}*/
812 // MakeStatusCache - Construct the status cache /*{{{*/
813 // ---------------------------------------------------------------------
814 /* This makes sure that the status cache (the cache that has all
815 index files from the sources list and all local ones) is ready
816 to be mmaped. If OutMap is not zero then a MMap object representing
817 the cache will be stored there. This is pretty much mandetory if you
818 are using AllowMem. AllowMem lets the function be run as non-root
819 where it builds the cache 'fast' into a memory buffer. */
820 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
821 MMap **OutMap,bool AllowMem)
822 {
823 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
824 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
825
826 vector<pkgIndexFile *> Files;
827 for (vector<metaIndex *>::const_iterator i = List.begin();
828 i != List.end();
829 i++)
830 {
831 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
832 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
833 j != Indexes->end();
834 j++)
835 Files.push_back (*j);
836 }
837
838 unsigned long const EndOfSource = Files.size();
839 if (_system->AddStatusFiles(Files) == false)
840 return false;
841
842 // Decide if we can write to the files..
843 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
844 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
845
846 // Decide if we can write to the cache
847 bool Writeable = false;
848 if (CacheFile.empty() == false)
849 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
850 else
851 if (SrcCacheFile.empty() == false)
852 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
853 if (Debug == true)
854 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
855
856 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
857 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
858
859 Progress.OverallProgress(0,1,1,_("Reading package lists"));
860
861 // Cache is OK, Fin.
862 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
863 {
864 Progress.OverallProgress(1,1,1,_("Reading package lists"));
865 if (Debug == true)
866 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
867 return true;
868 }
869 else if (Debug == true)
870 std::clog << "pkgcache.bin is NOT valid" << std::endl;
871
872 /* At this point we know we need to reconstruct the package cache,
873 begin. */
874 SPtr<FileFd> CacheF;
875 SPtr<DynamicMMap> Map;
876 if (Writeable == true && CacheFile.empty() == false)
877 {
878 unlink(CacheFile.c_str());
879 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
880 fchmod(CacheF->Fd(),0644);
881 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
882 if (_error->PendingError() == true)
883 return false;
884 if (Debug == true)
885 std::clog << "Open filebased MMap" << std::endl;
886 }
887 else
888 {
889 // Just build it in memory..
890 Map = new DynamicMMap(0,MapSize);
891 if (Debug == true)
892 std::clog << "Open memory Map (not filebased)" << std::endl;
893 }
894
895 // Lets try the source cache.
896 unsigned long CurrentSize = 0;
897 unsigned long TotalSize = 0;
898 if (CheckValidity(SrcCacheFile,Files.begin(),
899 Files.begin()+EndOfSource) == true)
900 {
901 if (Debug == true)
902 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
903 // Preload the map with the source cache
904 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
905 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
906 if ((alloc == 0 && _error->PendingError())
907 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
908 SCacheF.Size()) == false)
909 return false;
910
911 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
912
913 // Build the status cache
914 pkgCacheGenerator Gen(Map.Get(),&Progress);
915 if (_error->PendingError() == true)
916 return false;
917 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
918 Files.begin()+EndOfSource,Files.end()) == false)
919 return false;
920 }
921 else
922 {
923 if (Debug == true)
924 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
925 TotalSize = ComputeSize(Files.begin(),Files.end());
926
927 // Build the source cache
928 pkgCacheGenerator Gen(Map.Get(),&Progress);
929 if (_error->PendingError() == true)
930 return false;
931 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
932 Files.begin(),Files.begin()+EndOfSource) == false)
933 return false;
934
935 // Write it back
936 if (Writeable == true && SrcCacheFile.empty() == false)
937 {
938 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
939 if (_error->PendingError() == true)
940 return false;
941
942 fchmod(SCacheF.Fd(),0644);
943
944 // Write out the main data
945 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
946 return _error->Error(_("IO Error saving source cache"));
947 SCacheF.Sync();
948
949 // Write out the proper header
950 Gen.GetCache().HeaderP->Dirty = false;
951 if (SCacheF.Seek(0) == false ||
952 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
953 return _error->Error(_("IO Error saving source cache"));
954 Gen.GetCache().HeaderP->Dirty = true;
955 SCacheF.Sync();
956 }
957
958 // Build the status cache
959 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
960 Files.begin()+EndOfSource,Files.end()) == false)
961 return false;
962 }
963 if (Debug == true)
964 std::clog << "Caches are ready for shipping" << std::endl;
965
966 if (_error->PendingError() == true)
967 return false;
968 if (OutMap != 0)
969 {
970 if (CacheF != 0)
971 {
972 delete Map.UnGuard();
973 *OutMap = new MMap(*CacheF,0);
974 }
975 else
976 {
977 *OutMap = Map.UnGuard();
978 }
979 }
980
981 return true;
982 }
983 /*}}}*/
984 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
985 // ---------------------------------------------------------------------
986 /* */
987 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
988 {
989 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
990 vector<pkgIndexFile *> Files;
991 unsigned long EndOfSource = Files.size();
992 if (_system->AddStatusFiles(Files) == false)
993 return false;
994
995 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
996 unsigned long CurrentSize = 0;
997 unsigned long TotalSize = 0;
998
999 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1000
1001 // Build the status cache
1002 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1003 pkgCacheGenerator Gen(Map.Get(),&Progress);
1004 if (_error->PendingError() == true)
1005 return false;
1006 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1007 Files.begin()+EndOfSource,Files.end()) == false)
1008 return false;
1009
1010 if (_error->PendingError() == true)
1011 return false;
1012 *OutMap = Map.UnGuard();
1013
1014 return true;
1015 }
1016 /*}}}*/