]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
merge Goswin Brederlow "support download of index files for different archs"
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/strutl.h>
22 #include <apt-pkg/sptr.h>
23 #include <apt-pkg/pkgsystem.h>
24
25 #include <apt-pkg/tagfile.h>
26
27 #include <apti18n.h>
28
29 #include <vector>
30
31 #include <sys/stat.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <stdio.h>
35 #include <system.h>
36 /*}}}*/
37 typedef vector<pkgIndexFile *>::iterator FileIterator;
38
39 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
40 // ---------------------------------------------------------------------
41 /* We set the diry flag and make sure that is written to the disk */
42 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
43 Map(*pMap), Cache(pMap,false), Progress(Prog),
44 FoundFileDeps(0)
45 {
46 CurrentFile = 0;
47 memset(UniqHash,0,sizeof(UniqHash));
48
49 if (_error->PendingError() == true)
50 return;
51
52 if (Map.Size() == 0)
53 {
54 // Setup the map interface..
55 Cache.HeaderP = (pkgCache::Header *)Map.Data();
56 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
57 return;
58
59 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
60
61 // Starting header
62 *Cache.HeaderP = pkgCache::Header();
63 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
64 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
65 Cache.ReMap();
66 }
67 else
68 {
69 // Map directly from the existing file
70 Cache.ReMap();
71 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
72 if (Cache.VS != _system->VS)
73 {
74 _error->Error(_("Cache has an incompatible versioning system"));
75 return;
76 }
77 }
78
79 Cache.HeaderP->Dirty = true;
80 Map.Sync(0,sizeof(pkgCache::Header));
81 }
82 /*}}}*/
83 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
84 // ---------------------------------------------------------------------
85 /* We sync the data then unset the dirty flag in two steps so as to
86 advoid a problem during a crash */
87 pkgCacheGenerator::~pkgCacheGenerator()
88 {
89 if (_error->PendingError() == true)
90 return;
91 if (Map.Sync() == false)
92 return;
93
94 Cache.HeaderP->Dirty = false;
95 Map.Sync(0,sizeof(pkgCache::Header));
96 }
97 /*}}}*/
98 // CacheGenerator::MergeList - Merge the package list /*{{{*/
99 // ---------------------------------------------------------------------
100 /* This provides the generation of the entries in the cache. Each loop
101 goes through a single package record from the underlying parse engine. */
102 bool pkgCacheGenerator::MergeList(ListParser &List,
103 pkgCache::VerIterator *OutVer)
104 {
105 List.Owner = this;
106
107 unsigned int Counter = 0;
108 while (List.Step() == true)
109 {
110 // Get a pointer to the package structure
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 pkgCache::PkgIterator Pkg;
116 if (NewPackage(Pkg, PackageName, List.Architecture()) == false)
117 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
118 Counter++;
119 if (Counter % 100 == 0 && Progress != 0)
120 Progress->Progress(List.Offset());
121
122 /* Get a pointer to the version structure. We know the list is sorted
123 so we use that fact in the search. Insertion of new versions is
124 done with correct sorting */
125 string Version = List.Version();
126 if (Version.empty() == true)
127 {
128 // we first process the package, then the descriptions
129 // (this has the bonus that we get MMap error when we run out
130 // of MMap space)
131 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
132 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
133 PackageName.c_str());
134
135 // Find the right version to write the description
136 MD5SumValue CurMd5 = List.Description_md5();
137 pkgCache::VerIterator Ver = Pkg.VersionList();
138 map_ptrloc *LastVer = &Pkg->VersionList;
139
140 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
141 {
142 pkgCache::DescIterator Desc = Ver.DescriptionList();
143 map_ptrloc *LastDesc = &Ver->DescriptionList;
144 bool duplicate=false;
145
146 // don't add a new description if we have one for the given
147 // md5 && language
148 for ( ; Desc.end() == false; Desc++)
149 if (MD5SumValue(Desc.md5()) == CurMd5 &&
150 Desc.LanguageCode() == List.DescriptionLanguage())
151 duplicate=true;
152 if(duplicate)
153 continue;
154
155 for (Desc = Ver.DescriptionList();
156 Desc.end() == false;
157 LastDesc = &Desc->NextDesc, Desc++)
158 {
159 if (MD5SumValue(Desc.md5()) == CurMd5)
160 {
161 // Add new description
162 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
163 Desc->ParentPkg = Pkg.Index();
164
165 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
166 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
167 break;
168 }
169 }
170 }
171
172 continue;
173 }
174
175 pkgCache::VerIterator Ver = Pkg.VersionList();
176 map_ptrloc *LastVer = &Pkg->VersionList;
177 int Res = 1;
178 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
179 {
180 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
181 if (Res >= 0)
182 break;
183 }
184
185 /* We already have a version for this item, record that we
186 saw it */
187 unsigned long Hash = List.VersionHash();
188 if (Res == 0 && Ver->Hash == Hash)
189 {
190 if (List.UsePackage(Pkg,Ver) == false)
191 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
192 PackageName.c_str());
193
194 if (NewFileVer(Ver,List) == false)
195 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
196 PackageName.c_str());
197
198 // Read only a single record and return
199 if (OutVer != 0)
200 {
201 *OutVer = Ver;
202 FoundFileDeps |= List.HasFileDeps();
203 return true;
204 }
205
206 continue;
207 }
208
209 // Skip to the end of the same version set.
210 if (Res == 0)
211 {
212 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
213 {
214 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
215 if (Res != 0)
216 break;
217 }
218 }
219
220 // Add a new version
221 *LastVer = NewVersion(Ver,Version,*LastVer);
222 Ver->ParentPkg = Pkg.Index();
223 Ver->Hash = Hash;
224
225 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
226 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
227 PackageName.c_str());
228
229 if (List.UsePackage(Pkg,Ver) == false)
230 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
231 PackageName.c_str());
232
233 if (NewFileVer(Ver,List) == false)
234 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
235 PackageName.c_str());
236
237 // Read only a single record and return
238 if (OutVer != 0)
239 {
240 *OutVer = Ver;
241 FoundFileDeps |= List.HasFileDeps();
242 return true;
243 }
244
245 /* Record the Description data. Description data always exist in
246 Packages and Translation-* files. */
247 pkgCache::DescIterator Desc = Ver.DescriptionList();
248 map_ptrloc *LastDesc = &Ver->DescriptionList;
249
250 // Skip to the end of description set
251 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
252
253 // Add new description
254 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
255 Desc->ParentPkg = Pkg.Index();
256
257 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
258 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
259 }
260
261 FoundFileDeps |= List.HasFileDeps();
262
263 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
264 return _error->Error(_("Wow, you exceeded the number of package "
265 "names this APT is capable of."));
266 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
267 return _error->Error(_("Wow, you exceeded the number of versions "
268 "this APT is capable of."));
269 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
270 return _error->Error(_("Wow, you exceeded the number of descriptions "
271 "this APT is capable of."));
272 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
273 return _error->Error(_("Wow, you exceeded the number of dependencies "
274 "this APT is capable of."));
275 return true;
276 }
277 /*}}}*/
278 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
279 // ---------------------------------------------------------------------
280 /* If we found any file depends while parsing the main list we need to
281 resolve them. Since it is undesired to load the entire list of files
282 into the cache as virtual packages we do a two stage effort. MergeList
283 identifies the file depends and this creates Provdies for them by
284 re-parsing all the indexs. */
285 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
286 {
287 List.Owner = this;
288
289 unsigned int Counter = 0;
290 while (List.Step() == true)
291 {
292 string PackageName = List.Package();
293 if (PackageName.empty() == true)
294 return false;
295 string Version = List.Version();
296 if (Version.empty() == true)
297 continue;
298
299 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
300 if (Pkg.end() == true)
301 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
302 PackageName.c_str());
303 Counter++;
304 if (Counter % 100 == 0 && Progress != 0)
305 Progress->Progress(List.Offset());
306
307 unsigned long Hash = List.VersionHash();
308 pkgCache::VerIterator Ver = Pkg.VersionList();
309 for (; Ver.end() == false; Ver++)
310 {
311 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
312 {
313 if (List.CollectFileProvides(Cache,Ver) == false)
314 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
315 break;
316 }
317 }
318
319 if (Ver.end() == true)
320 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
321 }
322
323 return true;
324 }
325 /*}}}*/
326 // CacheGenerator::NewGroup - Add a new group /*{{{*/
327 // ---------------------------------------------------------------------
328 /* This creates a new group structure and adds it to the hash table */
329 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
330 Grp = Cache.FindGrp(Name);
331 if (Grp.end() == false)
332 return true;
333
334 // Get a structure
335 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
336 if (unlikely(Group == 0))
337 return false;
338
339 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
340 Grp->Name = Map.WriteString(Name);
341 if (unlikely(Grp->Name == 0))
342 return false;
343
344 // Insert it into the hash table
345 unsigned long const Hash = Cache.Hash(Name);
346 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
347 Cache.HeaderP->GrpHashTable[Hash] = Group;
348
349 Cache.HeaderP->GroupCount++;
350
351 return true;
352 }
353 /*}}}*/
354 // CacheGenerator::NewPackage - Add a new package /*{{{*/
355 // ---------------------------------------------------------------------
356 /* This creates a new package structure and adds it to the hash table */
357 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
358 const string &Arch) {
359 pkgCache::GrpIterator Grp;
360 if (unlikely(NewGroup(Grp, Name) == false))
361 return false;
362
363 Pkg = Grp.FindPkg(Arch);
364 if (Pkg.end() == false)
365 return true;
366
367 // Get a structure
368 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
369 if (unlikely(Package == 0))
370 return false;
371 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
372
373 // Insert it into the hash table
374 unsigned long const Hash = Cache.Hash(Name);
375 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
376 Cache.HeaderP->PkgHashTable[Hash] = Package;
377
378 // remember the packages in the group
379 Grp->FirstPackage = Package;
380 if (Grp->LastPackage == 0)
381 Grp->LastPackage = Package;
382
383 // Set the name, arch and the ID
384 Pkg->Name = Grp->Name;
385 Pkg->Group = Grp.Index();
386 Pkg->Arch = WriteUniqString(Arch.c_str());
387 if (unlikely(Pkg->Arch == 0))
388 return false;
389 Pkg->ID = Cache.HeaderP->PackageCount++;
390
391 return true;
392 }
393 /*}}}*/
394 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
395 // ---------------------------------------------------------------------
396 /* */
397 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
398 ListParser &List)
399 {
400 if (CurrentFile == 0)
401 return true;
402
403 // Get a structure
404 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
405 if (VerFile == 0)
406 return 0;
407
408 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
409 VF->File = CurrentFile - Cache.PkgFileP;
410
411 // Link it to the end of the list
412 map_ptrloc *Last = &Ver->FileList;
413 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
414 Last = &V->NextFile;
415 VF->NextFile = *Last;
416 *Last = VF.Index();
417
418 VF->Offset = List.Offset();
419 VF->Size = List.Size();
420 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
421 Cache.HeaderP->MaxVerFileSize = VF->Size;
422 Cache.HeaderP->VerFileCount++;
423
424 return true;
425 }
426 /*}}}*/
427 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
428 // ---------------------------------------------------------------------
429 /* This puts a version structure in the linked list */
430 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
431 const string &VerStr,
432 unsigned long Next)
433 {
434 // Get a structure
435 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
436 if (Version == 0)
437 return 0;
438
439 // Fill it in
440 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
441 Ver->NextVer = Next;
442 Ver->ID = Cache.HeaderP->VersionCount++;
443 Ver->VerStr = Map.WriteString(VerStr);
444 if (Ver->VerStr == 0)
445 return 0;
446
447 return Version;
448 }
449 /*}}}*/
450 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
451 // ---------------------------------------------------------------------
452 /* */
453 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
454 ListParser &List)
455 {
456 if (CurrentFile == 0)
457 return true;
458
459 // Get a structure
460 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
461 if (DescFile == 0)
462 return false;
463
464 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
465 DF->File = CurrentFile - Cache.PkgFileP;
466
467 // Link it to the end of the list
468 map_ptrloc *Last = &Desc->FileList;
469 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
470 Last = &D->NextFile;
471
472 DF->NextFile = *Last;
473 *Last = DF.Index();
474
475 DF->Offset = List.Offset();
476 DF->Size = List.Size();
477 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
478 Cache.HeaderP->MaxDescFileSize = DF->Size;
479 Cache.HeaderP->DescFileCount++;
480
481 return true;
482 }
483 /*}}}*/
484 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
485 // ---------------------------------------------------------------------
486 /* This puts a description structure in the linked list */
487 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
488 const string &Lang,
489 const MD5SumValue &md5sum,
490 map_ptrloc Next)
491 {
492 // Get a structure
493 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
494 if (Description == 0)
495 return 0;
496
497 // Fill it in
498 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
499 Desc->NextDesc = Next;
500 Desc->ID = Cache.HeaderP->DescriptionCount++;
501 Desc->language_code = Map.WriteString(Lang);
502 Desc->md5sum = Map.WriteString(md5sum.Value());
503 if (Desc->language_code == 0 || Desc->md5sum == 0)
504 return 0;
505
506 return Description;
507 }
508 /*}}}*/
509 // ListParser::NewDepends - Create a dependency element /*{{{*/
510 // ---------------------------------------------------------------------
511 /* This creates a dependency element in the tree. It is linked to the
512 version and to the package that it is pointing to. */
513 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
514 const string &PackageName,
515 const string &Arch,
516 const string &Version,
517 unsigned int Op,
518 unsigned int Type)
519 {
520 pkgCache &Cache = Owner->Cache;
521
522 // Get a structure
523 unsigned long const Dependency = Owner->Map.Allocate(sizeof(pkgCache::Dependency));
524 if (unlikely(Dependency == 0))
525 return false;
526
527 // Fill it in
528 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
529 Dep->ParentVer = Ver.Index();
530 Dep->Type = Type;
531 Dep->CompareOp = Op;
532 Dep->ID = Cache.HeaderP->DependsCount++;
533
534 pkgCache::GrpIterator Grp;
535 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
536 return false;
537
538 // Locate the target package
539 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
540 if (Pkg.end() == true) {
541 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
542 return false;
543 }
544
545 // Probe the reverse dependency list for a version string that matches
546 if (Version.empty() == false)
547 {
548 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
549 if (I->Version != 0 && I.TargetVer() == Version)
550 Dep->Version = I->Version;*/
551 if (Dep->Version == 0)
552 if (unlikely((Dep->Version = WriteString(Version)) == 0))
553 return false;
554 }
555
556 // Link it to the package
557 Dep->Package = Pkg.Index();
558 Dep->NextRevDepends = Pkg->RevDepends;
559 Pkg->RevDepends = Dep.Index();
560
561 /* Link it to the version (at the end of the list)
562 Caching the old end point speeds up generation substantially */
563 if (OldDepVer != Ver)
564 {
565 OldDepLast = &Ver->DependsList;
566 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
567 OldDepLast = &D->NextDepends;
568 OldDepVer = Ver;
569 }
570
571 // Is it a file dependency?
572 if (unlikely(PackageName[0] == '/'))
573 FoundFileDeps = true;
574
575 Dep->NextDepends = *OldDepLast;
576 *OldDepLast = Dep.Index();
577 OldDepLast = &Dep->NextDepends;
578
579 return true;
580 }
581 /*}}}*/
582 // ListParser::NewProvides - Create a Provides element /*{{{*/
583 // ---------------------------------------------------------------------
584 /* */
585 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
586 const string &PackageName,
587 const string &Version)
588 {
589 pkgCache &Cache = Owner->Cache;
590
591 // We do not add self referencing provides
592 if (unlikely(Ver.ParentPkg().Name() == PackageName))
593 return true;
594
595 // Get a structure
596 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
597 if (unlikely(Provides == 0))
598 return false;
599 Cache.HeaderP->ProvidesCount++;
600
601 // Fill it in
602 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
603 Prv->Version = Ver.Index();
604 Prv->NextPkgProv = Ver->ProvidesList;
605 Ver->ProvidesList = Prv.Index();
606 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
607 return false;
608
609 // Locate the target package
610 pkgCache::PkgIterator Pkg;
611 if (unlikely(Owner->NewPackage(Pkg,PackageName,string(Ver.Arch())) == false))
612 return false;
613
614 // Link it to the package
615 Prv->ParentPkg = Pkg.Index();
616 Prv->NextProvides = Pkg->ProvidesList;
617 Pkg->ProvidesList = Prv.Index();
618
619 return true;
620 }
621 /*}}}*/
622 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
623 // ---------------------------------------------------------------------
624 /* This is used to select which file is to be associated with all newly
625 added versions. The caller is responsible for setting the IMS fields. */
626 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
627 const pkgIndexFile &Index,
628 unsigned long Flags)
629 {
630 // Get some space for the structure
631 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
632 if (CurrentFile == Cache.PkgFileP)
633 return false;
634
635 // Fill it in
636 CurrentFile->FileName = Map.WriteString(File);
637 CurrentFile->Site = WriteUniqString(Site);
638 CurrentFile->NextFile = Cache.HeaderP->FileList;
639 CurrentFile->Flags = Flags;
640 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
641 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
642 PkgFileName = File;
643 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
644 Cache.HeaderP->PackageFileCount++;
645
646 if (CurrentFile->FileName == 0)
647 return false;
648
649 if (Progress != 0)
650 Progress->SubProgress(Index.Size());
651 return true;
652 }
653 /*}}}*/
654 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
655 // ---------------------------------------------------------------------
656 /* This is used to create handles to strings. Given the same text it
657 always returns the same number */
658 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
659 unsigned int Size)
660 {
661 /* We use a very small transient hash table here, this speeds up generation
662 by a fair amount on slower machines */
663 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
664 if (Bucket != 0 &&
665 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
666 return Bucket->String;
667
668 // Search for an insertion point
669 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
670 int Res = 1;
671 map_ptrloc *Last = &Cache.HeaderP->StringList;
672 for (; I != Cache.StringItemP; Last = &I->NextItem,
673 I = Cache.StringItemP + I->NextItem)
674 {
675 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
676 if (Res >= 0)
677 break;
678 }
679
680 // Match
681 if (Res == 0)
682 {
683 Bucket = I;
684 return I->String;
685 }
686
687 // Get a structure
688 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
689 if (Item == 0)
690 return 0;
691
692 // Fill in the structure
693 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
694 ItemP->NextItem = I - Cache.StringItemP;
695 *Last = Item;
696 ItemP->String = Map.WriteString(S,Size);
697 if (ItemP->String == 0)
698 return 0;
699
700 Bucket = ItemP;
701 return ItemP->String;
702 }
703 /*}}}*/
704 // CheckValidity - Check that a cache is up-to-date /*{{{*/
705 // ---------------------------------------------------------------------
706 /* This just verifies that each file in the list of index files exists,
707 has matching attributes with the cache and the cache does not have
708 any extra files. */
709 static bool CheckValidity(const string &CacheFile, FileIterator Start,
710 FileIterator End,MMap **OutMap = 0)
711 {
712 // No file, certainly invalid
713 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
714 return false;
715
716 // Map it
717 FileFd CacheF(CacheFile,FileFd::ReadOnly);
718 SPtr<MMap> Map = new MMap(CacheF,0);
719 pkgCache Cache(Map);
720 if (_error->PendingError() == true || Map->Size() == 0)
721 {
722 _error->Discard();
723 return false;
724 }
725
726 /* Now we check every index file, see if it is in the cache,
727 verify the IMS data and check that it is on the disk too.. */
728 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
729 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
730 for (; Start != End; Start++)
731 {
732 if ((*Start)->HasPackages() == false)
733 continue;
734
735 if ((*Start)->Exists() == false)
736 {
737 #if 0 // mvo: we no longer give a message here (Default Sources spec)
738 _error->WarningE("stat",_("Couldn't stat source package list %s"),
739 (*Start)->Describe().c_str());
740 #endif
741 continue;
742 }
743
744 // FindInCache is also expected to do an IMS check.
745 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
746 if (File.end() == true)
747 return false;
748
749 Visited[File->ID] = true;
750 }
751
752 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
753 if (Visited[I] == false)
754 return false;
755
756 if (_error->PendingError() == true)
757 {
758 _error->Discard();
759 return false;
760 }
761
762 if (OutMap != 0)
763 *OutMap = Map.UnGuard();
764 return true;
765 }
766 /*}}}*/
767 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
768 // ---------------------------------------------------------------------
769 /* Size is kind of an abstract notion that is only used for the progress
770 meter */
771 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
772 {
773 unsigned long TotalSize = 0;
774 for (; Start != End; Start++)
775 {
776 if ((*Start)->HasPackages() == false)
777 continue;
778 TotalSize += (*Start)->Size();
779 }
780 return TotalSize;
781 }
782 /*}}}*/
783 // BuildCache - Merge the list of index files into the cache /*{{{*/
784 // ---------------------------------------------------------------------
785 /* */
786 static bool BuildCache(pkgCacheGenerator &Gen,
787 OpProgress &Progress,
788 unsigned long &CurrentSize,unsigned long TotalSize,
789 FileIterator Start, FileIterator End)
790 {
791 FileIterator I;
792 for (I = Start; I != End; I++)
793 {
794 if ((*I)->HasPackages() == false)
795 continue;
796
797 if ((*I)->Exists() == false)
798 continue;
799
800 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
801 {
802 _error->Warning("Duplicate sources.list entry %s",
803 (*I)->Describe().c_str());
804 continue;
805 }
806
807 unsigned long Size = (*I)->Size();
808 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
809 CurrentSize += Size;
810
811 if ((*I)->Merge(Gen,Progress) == false)
812 return false;
813 }
814
815 if (Gen.HasFileDeps() == true)
816 {
817 Progress.Done();
818 TotalSize = ComputeSize(Start, End);
819 CurrentSize = 0;
820 for (I = Start; I != End; I++)
821 {
822 unsigned long Size = (*I)->Size();
823 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
824 CurrentSize += Size;
825 if ((*I)->MergeFileProvides(Gen,Progress) == false)
826 return false;
827 }
828 }
829
830 return true;
831 }
832 /*}}}*/
833 // MakeStatusCache - Construct the status cache /*{{{*/
834 // ---------------------------------------------------------------------
835 /* This makes sure that the status cache (the cache that has all
836 index files from the sources list and all local ones) is ready
837 to be mmaped. If OutMap is not zero then a MMap object representing
838 the cache will be stored there. This is pretty much mandetory if you
839 are using AllowMem. AllowMem lets the function be run as non-root
840 where it builds the cache 'fast' into a memory buffer. */
841 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
842 MMap **OutMap,bool AllowMem)
843 {
844 unsigned long MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
845
846 vector<pkgIndexFile *> Files;
847 for (vector<metaIndex *>::const_iterator i = List.begin();
848 i != List.end();
849 i++)
850 {
851 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
852 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
853 j != Indexes->end();
854 j++)
855 Files.push_back (*j);
856 }
857
858 unsigned long EndOfSource = Files.size();
859 if (_system->AddStatusFiles(Files) == false)
860 return false;
861
862 // Decide if we can write to the files..
863 string CacheFile = _config->FindFile("Dir::Cache::pkgcache");
864 string SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
865
866 // Decide if we can write to the cache
867 bool Writeable = false;
868 if (CacheFile.empty() == false)
869 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
870 else
871 if (SrcCacheFile.empty() == false)
872 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
873
874 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
875 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
876
877 Progress.OverallProgress(0,1,1,_("Reading package lists"));
878
879 // Cache is OK, Fin.
880 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
881 {
882 Progress.OverallProgress(1,1,1,_("Reading package lists"));
883 return true;
884 }
885
886 /* At this point we know we need to reconstruct the package cache,
887 begin. */
888 SPtr<FileFd> CacheF;
889 SPtr<DynamicMMap> Map;
890 if (Writeable == true && CacheFile.empty() == false)
891 {
892 unlink(CacheFile.c_str());
893 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
894 fchmod(CacheF->Fd(),0644);
895 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
896 if (_error->PendingError() == true)
897 return false;
898 }
899 else
900 {
901 // Just build it in memory..
902 Map = new DynamicMMap(0,MapSize);
903 }
904
905 // Lets try the source cache.
906 unsigned long CurrentSize = 0;
907 unsigned long TotalSize = 0;
908 if (CheckValidity(SrcCacheFile,Files.begin(),
909 Files.begin()+EndOfSource) == true)
910 {
911 // Preload the map with the source cache
912 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
913 unsigned long alloc = Map->RawAllocate(SCacheF.Size());
914 if ((alloc == 0 && _error->PendingError())
915 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
916 SCacheF.Size()) == false)
917 return false;
918
919 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
920
921 // Build the status cache
922 pkgCacheGenerator Gen(Map.Get(),&Progress);
923 if (_error->PendingError() == true)
924 return false;
925 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
926 Files.begin()+EndOfSource,Files.end()) == false)
927 return false;
928 }
929 else
930 {
931 TotalSize = ComputeSize(Files.begin(),Files.end());
932
933 // Build the source cache
934 pkgCacheGenerator Gen(Map.Get(),&Progress);
935 if (_error->PendingError() == true)
936 return false;
937 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
938 Files.begin(),Files.begin()+EndOfSource) == false)
939 return false;
940
941 // Write it back
942 if (Writeable == true && SrcCacheFile.empty() == false)
943 {
944 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
945 if (_error->PendingError() == true)
946 return false;
947
948 fchmod(SCacheF.Fd(),0644);
949
950 // Write out the main data
951 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
952 return _error->Error(_("IO Error saving source cache"));
953 SCacheF.Sync();
954
955 // Write out the proper header
956 Gen.GetCache().HeaderP->Dirty = false;
957 if (SCacheF.Seek(0) == false ||
958 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
959 return _error->Error(_("IO Error saving source cache"));
960 Gen.GetCache().HeaderP->Dirty = true;
961 SCacheF.Sync();
962 }
963
964 // Build the status cache
965 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
966 Files.begin()+EndOfSource,Files.end()) == false)
967 return false;
968 }
969
970 if (_error->PendingError() == true)
971 return false;
972 if (OutMap != 0)
973 {
974 if (CacheF != 0)
975 {
976 delete Map.UnGuard();
977 *OutMap = new MMap(*CacheF,0);
978 }
979 else
980 {
981 *OutMap = Map.UnGuard();
982 }
983 }
984
985 return true;
986 }
987 /*}}}*/
988 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
989 // ---------------------------------------------------------------------
990 /* */
991 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
992 {
993 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
994 vector<pkgIndexFile *> Files;
995 unsigned long EndOfSource = Files.size();
996 if (_system->AddStatusFiles(Files) == false)
997 return false;
998
999 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1000 unsigned long CurrentSize = 0;
1001 unsigned long TotalSize = 0;
1002
1003 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1004
1005 // Build the status cache
1006 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1007 pkgCacheGenerator Gen(Map.Get(),&Progress);
1008 if (_error->PendingError() == true)
1009 return false;
1010 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1011 Files.begin()+EndOfSource,Files.end()) == false)
1012 return false;
1013
1014 if (_error->PendingError() == true)
1015 return false;
1016 *OutMap = Map.UnGuard();
1017
1018 return true;
1019 }
1020 /*}}}*/