]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
577e2f1d41d93406d991613e78eebc2d8d121f1d
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true) {
119 genArch = APT::Configuration::getArchitectures();
120 if (genArch.size() != 1)
121 genArch.push_back("all");
122 } else
123 genArch.push_back(List.Architecture());
124
125 for (std::vector<string>::const_iterator arch = genArch.begin();
126 arch != genArch.end(); ++arch)
127 {
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg;
130 if (NewPackage(Pkg, PackageName, *arch) == false)
131 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
132 Counter++;
133 if (Counter % 100 == 0 && Progress != 0)
134 Progress->Progress(List.Offset());
135
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version = List.Version();
140 if (Version.empty() == true)
141 {
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
144 // of MMap space)
145 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
146 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName.c_str());
148
149 // Find the right version to write the description
150 MD5SumValue CurMd5 = List.Description_md5();
151 pkgCache::VerIterator Ver = Pkg.VersionList();
152 map_ptrloc *LastVer = &Pkg->VersionList;
153
154 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
155 {
156 pkgCache::DescIterator Desc = Ver.DescriptionList();
157 map_ptrloc *LastDesc = &Ver->DescriptionList;
158 bool duplicate=false;
159
160 // don't add a new description if we have one for the given
161 // md5 && language
162 for ( ; Desc.end() == false; Desc++)
163 if (MD5SumValue(Desc.md5()) == CurMd5 &&
164 Desc.LanguageCode() == List.DescriptionLanguage())
165 duplicate=true;
166 if(duplicate)
167 continue;
168
169 for (Desc = Ver.DescriptionList();
170 Desc.end() == false;
171 LastDesc = &Desc->NextDesc, Desc++)
172 {
173 if (MD5SumValue(Desc.md5()) == CurMd5)
174 {
175 // Add new description
176 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
177 Desc->ParentPkg = Pkg.Index();
178
179 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
180 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
181 break;
182 }
183 }
184 }
185
186 continue;
187 }
188
189 pkgCache::VerIterator Ver = Pkg.VersionList();
190 map_ptrloc *LastVer = &Pkg->VersionList;
191 int Res = 1;
192 unsigned long const Hash = List.VersionHash();
193 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
194 {
195 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
196 // Version is higher as current version - insert here
197 if (Res > 0)
198 break;
199 // Versionstrings are equal - is hash also equal?
200 if (Res == 0 && Ver->Hash == Hash)
201 break;
202 // proceed with the next till we have either the right
203 // or we found another version (which will be lower)
204 }
205
206 /* We already have a version for this item, record that we saw it */
207 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
208 {
209 if (List.UsePackage(Pkg,Ver) == false)
210 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
211 PackageName.c_str());
212
213 if (NewFileVer(Ver,List) == false)
214 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
215 PackageName.c_str());
216
217 // Read only a single record and return
218 if (OutVer != 0)
219 {
220 *OutVer = Ver;
221 FoundFileDeps |= List.HasFileDeps();
222 return true;
223 }
224
225 continue;
226 }
227
228 // Add a new version
229 *LastVer = NewVersion(Ver,Version,*LastVer);
230 Ver->ParentPkg = Pkg.Index();
231 Ver->Hash = Hash;
232
233 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
234 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
235 PackageName.c_str());
236
237 if (List.UsePackage(Pkg,Ver) == false)
238 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
239 PackageName.c_str());
240
241 if (NewFileVer(Ver,List) == false)
242 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
243 PackageName.c_str());
244
245 // Read only a single record and return
246 if (OutVer != 0)
247 {
248 *OutVer = Ver;
249 FoundFileDeps |= List.HasFileDeps();
250 return true;
251 }
252
253 /* Record the Description data. Description data always exist in
254 Packages and Translation-* files. */
255 pkgCache::DescIterator Desc = Ver.DescriptionList();
256 map_ptrloc *LastDesc = &Ver->DescriptionList;
257
258 // Skip to the end of description set
259 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
260
261 // Add new description
262 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
263 Desc->ParentPkg = Pkg.Index();
264
265 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
266 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
267 }
268 }
269
270 FoundFileDeps |= List.HasFileDeps();
271
272 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
273 return _error->Error(_("Wow, you exceeded the number of package "
274 "names this APT is capable of."));
275 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
276 return _error->Error(_("Wow, you exceeded the number of versions "
277 "this APT is capable of."));
278 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
279 return _error->Error(_("Wow, you exceeded the number of descriptions "
280 "this APT is capable of."));
281 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
282 return _error->Error(_("Wow, you exceeded the number of dependencies "
283 "this APT is capable of."));
284 return true;
285 }
286 /*}}}*/
287 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
288 // ---------------------------------------------------------------------
289 /* If we found any file depends while parsing the main list we need to
290 resolve them. Since it is undesired to load the entire list of files
291 into the cache as virtual packages we do a two stage effort. MergeList
292 identifies the file depends and this creates Provdies for them by
293 re-parsing all the indexs. */
294 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
295 {
296 List.Owner = this;
297
298 unsigned int Counter = 0;
299 while (List.Step() == true)
300 {
301 string PackageName = List.Package();
302 if (PackageName.empty() == true)
303 return false;
304 string Version = List.Version();
305 if (Version.empty() == true)
306 continue;
307
308 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
309 if (Pkg.end() == true)
310 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
311 PackageName.c_str());
312 Counter++;
313 if (Counter % 100 == 0 && Progress != 0)
314 Progress->Progress(List.Offset());
315
316 unsigned long Hash = List.VersionHash();
317 pkgCache::VerIterator Ver = Pkg.VersionList();
318 for (; Ver.end() == false; Ver++)
319 {
320 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
321 {
322 if (List.CollectFileProvides(Cache,Ver) == false)
323 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
324 break;
325 }
326 }
327
328 if (Ver.end() == true)
329 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
330 }
331
332 return true;
333 }
334 /*}}}*/
335 // CacheGenerator::NewGroup - Add a new group /*{{{*/
336 // ---------------------------------------------------------------------
337 /* This creates a new group structure and adds it to the hash table */
338 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
339 Grp = Cache.FindGrp(Name);
340 if (Grp.end() == false)
341 return true;
342
343 // Get a structure
344 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
345 if (unlikely(Group == 0))
346 return false;
347
348 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
349 Grp->Name = Map.WriteString(Name);
350 if (unlikely(Grp->Name == 0))
351 return false;
352
353 // Insert it into the hash table
354 unsigned long const Hash = Cache.Hash(Name);
355 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
356 Cache.HeaderP->GrpHashTable[Hash] = Group;
357
358 Cache.HeaderP->GroupCount++;
359
360 return true;
361 }
362 /*}}}*/
363 // CacheGenerator::NewPackage - Add a new package /*{{{*/
364 // ---------------------------------------------------------------------
365 /* This creates a new package structure and adds it to the hash table */
366 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
367 const string &Arch) {
368 pkgCache::GrpIterator Grp;
369 if (unlikely(NewGroup(Grp, Name) == false))
370 return false;
371
372 Pkg = Grp.FindPkg(Arch);
373 if (Pkg.end() == false)
374 return true;
375
376 // Get a structure
377 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
378 if (unlikely(Package == 0))
379 return false;
380 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
381
382 // Insert the package into our package list
383 if (Grp->FirstPackage == 0) // the group is new
384 {
385 // Insert it into the hash table
386 unsigned long const Hash = Cache.Hash(Name);
387 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
388 Cache.HeaderP->PkgHashTable[Hash] = Package;
389 Grp->FirstPackage = Package;
390 }
391 else // Group the Packages together
392 {
393 // this package is the new last package
394 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
395 Pkg->NextPackage = LastPkg->NextPackage;
396 LastPkg->NextPackage = Package;
397 }
398 Grp->LastPackage = Package;
399
400 // Set the name, arch and the ID
401 Pkg->Name = Grp->Name;
402 Pkg->Group = Grp.Index();
403 Pkg->Arch = WriteUniqString(Arch.c_str());
404 if (unlikely(Pkg->Arch == 0))
405 return false;
406 Pkg->ID = Cache.HeaderP->PackageCount++;
407
408 return true;
409 }
410 /*}}}*/
411 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
412 // ---------------------------------------------------------------------
413 /* */
414 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
415 ListParser &List)
416 {
417 if (CurrentFile == 0)
418 return true;
419
420 // Get a structure
421 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
422 if (VerFile == 0)
423 return 0;
424
425 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
426 VF->File = CurrentFile - Cache.PkgFileP;
427
428 // Link it to the end of the list
429 map_ptrloc *Last = &Ver->FileList;
430 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
431 Last = &V->NextFile;
432 VF->NextFile = *Last;
433 *Last = VF.Index();
434
435 VF->Offset = List.Offset();
436 VF->Size = List.Size();
437 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
438 Cache.HeaderP->MaxVerFileSize = VF->Size;
439 Cache.HeaderP->VerFileCount++;
440
441 return true;
442 }
443 /*}}}*/
444 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
445 // ---------------------------------------------------------------------
446 /* This puts a version structure in the linked list */
447 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
448 const string &VerStr,
449 unsigned long Next)
450 {
451 // Get a structure
452 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
453 if (Version == 0)
454 return 0;
455
456 // Fill it in
457 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
458 Ver->NextVer = Next;
459 Ver->ID = Cache.HeaderP->VersionCount++;
460 Ver->VerStr = Map.WriteString(VerStr);
461 if (Ver->VerStr == 0)
462 return 0;
463
464 return Version;
465 }
466 /*}}}*/
467 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
468 // ---------------------------------------------------------------------
469 /* */
470 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
471 ListParser &List)
472 {
473 if (CurrentFile == 0)
474 return true;
475
476 // Get a structure
477 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
478 if (DescFile == 0)
479 return false;
480
481 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
482 DF->File = CurrentFile - Cache.PkgFileP;
483
484 // Link it to the end of the list
485 map_ptrloc *Last = &Desc->FileList;
486 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
487 Last = &D->NextFile;
488
489 DF->NextFile = *Last;
490 *Last = DF.Index();
491
492 DF->Offset = List.Offset();
493 DF->Size = List.Size();
494 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
495 Cache.HeaderP->MaxDescFileSize = DF->Size;
496 Cache.HeaderP->DescFileCount++;
497
498 return true;
499 }
500 /*}}}*/
501 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
502 // ---------------------------------------------------------------------
503 /* This puts a description structure in the linked list */
504 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
505 const string &Lang,
506 const MD5SumValue &md5sum,
507 map_ptrloc Next)
508 {
509 // Get a structure
510 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
511 if (Description == 0)
512 return 0;
513
514 // Fill it in
515 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
516 Desc->NextDesc = Next;
517 Desc->ID = Cache.HeaderP->DescriptionCount++;
518 Desc->language_code = Map.WriteString(Lang);
519 Desc->md5sum = Map.WriteString(md5sum.Value());
520 if (Desc->language_code == 0 || Desc->md5sum == 0)
521 return 0;
522
523 return Description;
524 }
525 /*}}}*/
526 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
527 // ---------------------------------------------------------------------
528 /* This prepares the Cache for delivery */
529 bool pkgCacheGenerator::FinishCache(OpProgress &Progress) {
530 // FIXME: add progress reporting for this operation
531 // Do we have different architectures in your groups ?
532 vector<string> archs = APT::Configuration::getArchitectures();
533 if (archs.size() > 1) {
534 // Create Conflicts in between the group
535 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++) {
536 string const PkgName = G.Name();
537 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P)) {
538 if (strcmp(P.Arch(),"all") == 0)
539 continue;
540 pkgCache::PkgIterator allPkg;
541 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++) {
542 string const Arch = V.Arch(true);
543 map_ptrloc *OldDepLast = NULL;
544 /* MultiArch handling introduces a lot of implicit Dependencies:
545 - MultiArch: same → Co-Installable if they have the same version
546 - Architecture: all → Need to be Co-Installable for internal reasons
547 - All others conflict with all other group members */
548 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
549 V->MultiArch == pkgCache::Version::Same);
550 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
551 allPkg = G.FindPkg("all");
552 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A) {
553 if (*A == Arch)
554 continue;
555 /* We allow only one installed arch at the time
556 per group, therefore each group member conflicts
557 with all other group members */
558 pkgCache::PkgIterator D = G.FindPkg(*A);
559 if (D.end() == true)
560 continue;
561 if (coInstall == true) {
562 // Replaces: ${self}:other ( << ${binary:Version})
563 NewDepends(D, V, V.VerStr(),
564 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
565 OldDepLast);
566 // Breaks: ${self}:other (!= ${binary:Version})
567 NewDepends(D, V, V.VerStr(),
568 pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
569 OldDepLast);
570 NewDepends(D, V, V.VerStr(),
571 pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
572 OldDepLast);
573 if (V->MultiArch == pkgCache::Version::All) {
574 // Depend on ${self}:all which does depend on nothing
575 NewDepends(allPkg, V, V.VerStr(),
576 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
577 OldDepLast);
578 }
579 } else {
580 // Conflicts: ${self}:other
581 NewDepends(D, V, "",
582 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
583 OldDepLast);
584 }
585 }
586 }
587 }
588 }
589 }
590 return true;
591 }
592 /*}}}*/
593 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
594 // ---------------------------------------------------------------------
595 /* This creates a dependency element in the tree. It is linked to the
596 version and to the package that it is pointing to. */
597 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
598 pkgCache::VerIterator &Ver,
599 string const &Version,
600 unsigned int const &Op,
601 unsigned int const &Type,
602 map_ptrloc *OldDepLast)
603 {
604 // Get a structure
605 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
606 if (unlikely(Dependency == 0))
607 return false;
608
609 // Fill it in
610 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
611 Dep->ParentVer = Ver.Index();
612 Dep->Type = Type;
613 Dep->CompareOp = Op;
614 Dep->ID = Cache.HeaderP->DependsCount++;
615
616 // Probe the reverse dependency list for a version string that matches
617 if (Version.empty() == false)
618 {
619 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
620 if (I->Version != 0 && I.TargetVer() == Version)
621 Dep->Version = I->Version;*/
622 if (Dep->Version == 0)
623 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
624 return false;
625 }
626
627 // Link it to the package
628 Dep->Package = Pkg.Index();
629 Dep->NextRevDepends = Pkg->RevDepends;
630 Pkg->RevDepends = Dep.Index();
631
632 // Do we know where to link the Dependency to?
633 if (OldDepLast == NULL)
634 {
635 OldDepLast = &Ver->DependsList;
636 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
637 OldDepLast = &D->NextDepends;
638 }
639
640 Dep->NextDepends = *OldDepLast;
641 *OldDepLast = Dep.Index();
642 OldDepLast = &Dep->NextDepends;
643
644 return true;
645 }
646 /*}}}*/
647 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
648 // ---------------------------------------------------------------------
649 /* This creates a Group and the Package to link this dependency to if
650 needed and handles also the caching of the old endpoint */
651 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
652 const string &PackageName,
653 const string &Arch,
654 const string &Version,
655 unsigned int Op,
656 unsigned int Type)
657 {
658 pkgCache::GrpIterator Grp;
659 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
660 return false;
661
662 // Locate the target package
663 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
664 if (Pkg.end() == true) {
665 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
666 return false;
667 }
668
669 // Is it a file dependency?
670 if (unlikely(PackageName[0] == '/'))
671 FoundFileDeps = true;
672
673 /* Caching the old end point speeds up generation substantially */
674 if (OldDepVer != Ver) {
675 OldDepLast = NULL;
676 OldDepVer = Ver;
677 }
678
679 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
680 }
681 /*}}}*/
682 // ListParser::NewProvides - Create a Provides element /*{{{*/
683 // ---------------------------------------------------------------------
684 /* */
685 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
686 const string &PkgName,
687 const string &PkgArch,
688 const string &Version)
689 {
690 pkgCache &Cache = Owner->Cache;
691
692 // We do not add self referencing provides
693 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
694 return true;
695
696 // Get a structure
697 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
698 if (unlikely(Provides == 0))
699 return false;
700 Cache.HeaderP->ProvidesCount++;
701
702 // Fill it in
703 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
704 Prv->Version = Ver.Index();
705 Prv->NextPkgProv = Ver->ProvidesList;
706 Ver->ProvidesList = Prv.Index();
707 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
708 return false;
709
710 // Locate the target package
711 pkgCache::PkgIterator Pkg;
712 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
713 return false;
714
715 // Link it to the package
716 Prv->ParentPkg = Pkg.Index();
717 Prv->NextProvides = Pkg->ProvidesList;
718 Pkg->ProvidesList = Prv.Index();
719
720 return true;
721 }
722 /*}}}*/
723 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
724 // ---------------------------------------------------------------------
725 /* This is used to select which file is to be associated with all newly
726 added versions. The caller is responsible for setting the IMS fields. */
727 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
728 const pkgIndexFile &Index,
729 unsigned long Flags)
730 {
731 // Get some space for the structure
732 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
733 if (CurrentFile == Cache.PkgFileP)
734 return false;
735
736 // Fill it in
737 CurrentFile->FileName = Map.WriteString(File);
738 CurrentFile->Site = WriteUniqString(Site);
739 CurrentFile->NextFile = Cache.HeaderP->FileList;
740 CurrentFile->Flags = Flags;
741 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
742 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
743 PkgFileName = File;
744 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
745 Cache.HeaderP->PackageFileCount++;
746
747 if (CurrentFile->FileName == 0)
748 return false;
749
750 if (Progress != 0)
751 Progress->SubProgress(Index.Size());
752 return true;
753 }
754 /*}}}*/
755 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
756 // ---------------------------------------------------------------------
757 /* This is used to create handles to strings. Given the same text it
758 always returns the same number */
759 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
760 unsigned int Size)
761 {
762 /* We use a very small transient hash table here, this speeds up generation
763 by a fair amount on slower machines */
764 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
765 if (Bucket != 0 &&
766 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
767 return Bucket->String;
768
769 // Search for an insertion point
770 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
771 int Res = 1;
772 map_ptrloc *Last = &Cache.HeaderP->StringList;
773 for (; I != Cache.StringItemP; Last = &I->NextItem,
774 I = Cache.StringItemP + I->NextItem)
775 {
776 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
777 if (Res >= 0)
778 break;
779 }
780
781 // Match
782 if (Res == 0)
783 {
784 Bucket = I;
785 return I->String;
786 }
787
788 // Get a structure
789 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
790 if (Item == 0)
791 return 0;
792
793 // Fill in the structure
794 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
795 ItemP->NextItem = I - Cache.StringItemP;
796 *Last = Item;
797 ItemP->String = Map.WriteString(S,Size);
798 if (ItemP->String == 0)
799 return 0;
800
801 Bucket = ItemP;
802 return ItemP->String;
803 }
804 /*}}}*/
805 // CheckValidity - Check that a cache is up-to-date /*{{{*/
806 // ---------------------------------------------------------------------
807 /* This just verifies that each file in the list of index files exists,
808 has matching attributes with the cache and the cache does not have
809 any extra files. */
810 static bool CheckValidity(const string &CacheFile, FileIterator Start,
811 FileIterator End,MMap **OutMap = 0)
812 {
813 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
814 // No file, certainly invalid
815 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
816 {
817 if (Debug == true)
818 std::clog << "CacheFile doesn't exist" << std::endl;
819 return false;
820 }
821
822 // Map it
823 FileFd CacheF(CacheFile,FileFd::ReadOnly);
824 SPtr<MMap> Map = new MMap(CacheF,0);
825 pkgCache Cache(Map);
826 if (_error->PendingError() == true || Map->Size() == 0)
827 {
828 if (Debug == true)
829 std::clog << "Errors are pending or Map is empty()" << std::endl;
830 _error->Discard();
831 return false;
832 }
833
834 /* Now we check every index file, see if it is in the cache,
835 verify the IMS data and check that it is on the disk too.. */
836 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
837 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
838 for (; Start != End; Start++)
839 {
840 if (Debug == true)
841 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
842 if ((*Start)->HasPackages() == false)
843 {
844 if (Debug == true)
845 std::clog << "Has NO packages" << std::endl;
846 continue;
847 }
848
849 if ((*Start)->Exists() == false)
850 {
851 #if 0 // mvo: we no longer give a message here (Default Sources spec)
852 _error->WarningE("stat",_("Couldn't stat source package list %s"),
853 (*Start)->Describe().c_str());
854 #endif
855 if (Debug == true)
856 std::clog << "file doesn't exist" << std::endl;
857 continue;
858 }
859
860 // FindInCache is also expected to do an IMS check.
861 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
862 if (File.end() == true)
863 {
864 if (Debug == true)
865 std::clog << "FindInCache returned end-Pointer" << std::endl;
866 return false;
867 }
868
869 Visited[File->ID] = true;
870 if (Debug == true)
871 std::clog << "with ID " << File->ID << " is valid" << std::endl;
872 }
873
874 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
875 if (Visited[I] == false)
876 {
877 if (Debug == true)
878 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
879 return false;
880 }
881
882 if (_error->PendingError() == true)
883 {
884 if (Debug == true)
885 {
886 std::clog << "Validity failed because of pending errors:" << std::endl;
887 _error->DumpErrors();
888 }
889 _error->Discard();
890 return false;
891 }
892
893 if (OutMap != 0)
894 *OutMap = Map.UnGuard();
895 return true;
896 }
897 /*}}}*/
898 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
899 // ---------------------------------------------------------------------
900 /* Size is kind of an abstract notion that is only used for the progress
901 meter */
902 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
903 {
904 unsigned long TotalSize = 0;
905 for (; Start != End; Start++)
906 {
907 if ((*Start)->HasPackages() == false)
908 continue;
909 TotalSize += (*Start)->Size();
910 }
911 return TotalSize;
912 }
913 /*}}}*/
914 // BuildCache - Merge the list of index files into the cache /*{{{*/
915 // ---------------------------------------------------------------------
916 /* */
917 static bool BuildCache(pkgCacheGenerator &Gen,
918 OpProgress &Progress,
919 unsigned long &CurrentSize,unsigned long TotalSize,
920 FileIterator Start, FileIterator End)
921 {
922 FileIterator I;
923 for (I = Start; I != End; I++)
924 {
925 if ((*I)->HasPackages() == false)
926 continue;
927
928 if ((*I)->Exists() == false)
929 continue;
930
931 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
932 {
933 _error->Warning("Duplicate sources.list entry %s",
934 (*I)->Describe().c_str());
935 continue;
936 }
937
938 unsigned long Size = (*I)->Size();
939 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
940 CurrentSize += Size;
941
942 if ((*I)->Merge(Gen,Progress) == false)
943 return false;
944 }
945
946 if (Gen.HasFileDeps() == true)
947 {
948 Progress.Done();
949 TotalSize = ComputeSize(Start, End);
950 CurrentSize = 0;
951 for (I = Start; I != End; I++)
952 {
953 unsigned long Size = (*I)->Size();
954 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
955 CurrentSize += Size;
956 if ((*I)->MergeFileProvides(Gen,Progress) == false)
957 return false;
958 }
959 }
960
961 return true;
962 }
963 /*}}}*/
964 // MakeStatusCache - Construct the status cache /*{{{*/
965 // ---------------------------------------------------------------------
966 /* This makes sure that the status cache (the cache that has all
967 index files from the sources list and all local ones) is ready
968 to be mmaped. If OutMap is not zero then a MMap object representing
969 the cache will be stored there. This is pretty much mandetory if you
970 are using AllowMem. AllowMem lets the function be run as non-root
971 where it builds the cache 'fast' into a memory buffer. */
972 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
973 MMap **OutMap,bool AllowMem)
974 {
975 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
976 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
977
978 vector<pkgIndexFile *> Files;
979 for (vector<metaIndex *>::const_iterator i = List.begin();
980 i != List.end();
981 i++)
982 {
983 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
984 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
985 j != Indexes->end();
986 j++)
987 Files.push_back (*j);
988 }
989
990 unsigned long const EndOfSource = Files.size();
991 if (_system->AddStatusFiles(Files) == false)
992 return false;
993
994 // Decide if we can write to the files..
995 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
996 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
997
998 // Decide if we can write to the cache
999 bool Writeable = false;
1000 if (CacheFile.empty() == false)
1001 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1002 else
1003 if (SrcCacheFile.empty() == false)
1004 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1005 if (Debug == true)
1006 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1007
1008 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1009 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1010
1011 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1012
1013 // Cache is OK, Fin.
1014 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1015 {
1016 Progress.OverallProgress(1,1,1,_("Reading package lists"));
1017 if (Debug == true)
1018 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1019 return true;
1020 }
1021 else if (Debug == true)
1022 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1023
1024 /* At this point we know we need to reconstruct the package cache,
1025 begin. */
1026 SPtr<FileFd> CacheF;
1027 SPtr<DynamicMMap> Map;
1028 if (Writeable == true && CacheFile.empty() == false)
1029 {
1030 unlink(CacheFile.c_str());
1031 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
1032 fchmod(CacheF->Fd(),0644);
1033 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
1034 if (_error->PendingError() == true)
1035 return false;
1036 if (Debug == true)
1037 std::clog << "Open filebased MMap" << std::endl;
1038 }
1039 else
1040 {
1041 // Just build it in memory..
1042 Map = new DynamicMMap(0,MapSize);
1043 if (Debug == true)
1044 std::clog << "Open memory Map (not filebased)" << std::endl;
1045 }
1046
1047 // Lets try the source cache.
1048 unsigned long CurrentSize = 0;
1049 unsigned long TotalSize = 0;
1050 if (CheckValidity(SrcCacheFile,Files.begin(),
1051 Files.begin()+EndOfSource) == true)
1052 {
1053 if (Debug == true)
1054 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1055 // Preload the map with the source cache
1056 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1057 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1058 if ((alloc == 0 && _error->PendingError())
1059 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1060 SCacheF.Size()) == false)
1061 return false;
1062
1063 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1064
1065 // Build the status cache
1066 pkgCacheGenerator Gen(Map.Get(),&Progress);
1067 if (_error->PendingError() == true)
1068 return false;
1069 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1070 Files.begin()+EndOfSource,Files.end()) == false)
1071 return false;
1072
1073 // FIXME: move me to a better place
1074 Gen.FinishCache(Progress);
1075 }
1076 else
1077 {
1078 if (Debug == true)
1079 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1080 TotalSize = ComputeSize(Files.begin(),Files.end());
1081
1082 // Build the source cache
1083 pkgCacheGenerator Gen(Map.Get(),&Progress);
1084 if (_error->PendingError() == true)
1085 return false;
1086 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1087 Files.begin(),Files.begin()+EndOfSource) == false)
1088 return false;
1089
1090 // Write it back
1091 if (Writeable == true && SrcCacheFile.empty() == false)
1092 {
1093 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1094 if (_error->PendingError() == true)
1095 return false;
1096
1097 fchmod(SCacheF.Fd(),0644);
1098
1099 // Write out the main data
1100 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1101 return _error->Error(_("IO Error saving source cache"));
1102 SCacheF.Sync();
1103
1104 // Write out the proper header
1105 Gen.GetCache().HeaderP->Dirty = false;
1106 if (SCacheF.Seek(0) == false ||
1107 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1108 return _error->Error(_("IO Error saving source cache"));
1109 Gen.GetCache().HeaderP->Dirty = true;
1110 SCacheF.Sync();
1111 }
1112
1113 // Build the status cache
1114 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1115 Files.begin()+EndOfSource,Files.end()) == false)
1116 return false;
1117
1118 // FIXME: move me to a better place
1119 Gen.FinishCache(Progress);
1120 }
1121 if (Debug == true)
1122 std::clog << "Caches are ready for shipping" << std::endl;
1123
1124 if (_error->PendingError() == true)
1125 return false;
1126 if (OutMap != 0)
1127 {
1128 if (CacheF != 0)
1129 {
1130 delete Map.UnGuard();
1131 *OutMap = new MMap(*CacheF,0);
1132 }
1133 else
1134 {
1135 *OutMap = Map.UnGuard();
1136 }
1137 }
1138
1139 return true;
1140 }
1141 /*}}}*/
1142 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1143 // ---------------------------------------------------------------------
1144 /* */
1145 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1146 {
1147 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1148 vector<pkgIndexFile *> Files;
1149 unsigned long EndOfSource = Files.size();
1150 if (_system->AddStatusFiles(Files) == false)
1151 return false;
1152
1153 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1154 unsigned long CurrentSize = 0;
1155 unsigned long TotalSize = 0;
1156
1157 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1158
1159 // Build the status cache
1160 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1161 pkgCacheGenerator Gen(Map.Get(),&Progress);
1162 if (_error->PendingError() == true)
1163 return false;
1164 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1165 Files.begin()+EndOfSource,Files.end()) == false)
1166 return false;
1167
1168 // FIXME: move me to a better place
1169 Gen.FinishCache(Progress);
1170
1171 if (_error->PendingError() == true)
1172 return false;
1173 *OutMap = Map.UnGuard();
1174
1175 return true;
1176 }
1177 /*}}}*/