]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
* apt-pkg/sourcelist.cc:
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true) {
119 genArch = APT::Configuration::getArchitectures();
120 if (genArch.size() != 1)
121 genArch.push_back("all");
122 } else
123 genArch.push_back(List.Architecture());
124
125 for (std::vector<string>::const_iterator arch = genArch.begin();
126 arch != genArch.end(); ++arch)
127 {
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg;
130 if (NewPackage(Pkg, PackageName, *arch) == false)
131 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
132 Counter++;
133 if (Counter % 100 == 0 && Progress != 0)
134 Progress->Progress(List.Offset());
135
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version = List.Version();
140 if (Version.empty() == true)
141 {
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
144 // of MMap space)
145 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
146 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName.c_str());
148
149 // Find the right version to write the description
150 MD5SumValue CurMd5 = List.Description_md5();
151 pkgCache::VerIterator Ver = Pkg.VersionList();
152 map_ptrloc *LastVer = &Pkg->VersionList;
153
154 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
155 {
156 pkgCache::DescIterator Desc = Ver.DescriptionList();
157 map_ptrloc *LastDesc = &Ver->DescriptionList;
158 bool duplicate=false;
159
160 // don't add a new description if we have one for the given
161 // md5 && language
162 for ( ; Desc.end() == false; Desc++)
163 if (MD5SumValue(Desc.md5()) == CurMd5 &&
164 Desc.LanguageCode() == List.DescriptionLanguage())
165 duplicate=true;
166 if(duplicate)
167 continue;
168
169 for (Desc = Ver.DescriptionList();
170 Desc.end() == false;
171 LastDesc = &Desc->NextDesc, Desc++)
172 {
173 if (MD5SumValue(Desc.md5()) == CurMd5)
174 {
175 // Add new description
176 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
177 Desc->ParentPkg = Pkg.Index();
178
179 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
180 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
181 break;
182 }
183 }
184 }
185
186 continue;
187 }
188
189 pkgCache::VerIterator Ver = Pkg.VersionList();
190 map_ptrloc *LastVer = &Pkg->VersionList;
191 int Res = 1;
192 unsigned long const Hash = List.VersionHash();
193 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
194 {
195 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
196 // Version is higher as current version - insert here
197 if (Res > 0)
198 break;
199 // Versionstrings are equal - is hash also equal?
200 if (Res == 0 && Ver->Hash == Hash)
201 break;
202 // proceed with the next till we have either the right
203 // or we found another version (which will be lower)
204 }
205
206 /* We already have a version for this item, record that we saw it */
207 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
208 {
209 if (List.UsePackage(Pkg,Ver) == false)
210 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
211 PackageName.c_str());
212
213 if (NewFileVer(Ver,List) == false)
214 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
215 PackageName.c_str());
216
217 // Read only a single record and return
218 if (OutVer != 0)
219 {
220 *OutVer = Ver;
221 FoundFileDeps |= List.HasFileDeps();
222 return true;
223 }
224
225 continue;
226 }
227
228 // Add a new version
229 *LastVer = NewVersion(Ver,Version,*LastVer);
230 Ver->ParentPkg = Pkg.Index();
231 Ver->Hash = Hash;
232
233 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
234 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
235 PackageName.c_str());
236
237 if (List.UsePackage(Pkg,Ver) == false)
238 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
239 PackageName.c_str());
240
241 if (NewFileVer(Ver,List) == false)
242 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
243 PackageName.c_str());
244
245 // Read only a single record and return
246 if (OutVer != 0)
247 {
248 *OutVer = Ver;
249 FoundFileDeps |= List.HasFileDeps();
250 return true;
251 }
252
253 /* Record the Description data. Description data always exist in
254 Packages and Translation-* files. */
255 pkgCache::DescIterator Desc = Ver.DescriptionList();
256 map_ptrloc *LastDesc = &Ver->DescriptionList;
257
258 // Skip to the end of description set
259 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
260
261 // Add new description
262 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
263 Desc->ParentPkg = Pkg.Index();
264
265 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
266 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
267 }
268 }
269
270 FoundFileDeps |= List.HasFileDeps();
271
272 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
273 return _error->Error(_("Wow, you exceeded the number of package "
274 "names this APT is capable of."));
275 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
276 return _error->Error(_("Wow, you exceeded the number of versions "
277 "this APT is capable of."));
278 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
279 return _error->Error(_("Wow, you exceeded the number of descriptions "
280 "this APT is capable of."));
281 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
282 return _error->Error(_("Wow, you exceeded the number of dependencies "
283 "this APT is capable of."));
284 return true;
285 }
286 /*}}}*/
287 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
288 // ---------------------------------------------------------------------
289 /* If we found any file depends while parsing the main list we need to
290 resolve them. Since it is undesired to load the entire list of files
291 into the cache as virtual packages we do a two stage effort. MergeList
292 identifies the file depends and this creates Provdies for them by
293 re-parsing all the indexs. */
294 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
295 {
296 List.Owner = this;
297
298 unsigned int Counter = 0;
299 while (List.Step() == true)
300 {
301 string PackageName = List.Package();
302 if (PackageName.empty() == true)
303 return false;
304 string Version = List.Version();
305 if (Version.empty() == true)
306 continue;
307
308 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
309 if (Pkg.end() == true)
310 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
311 PackageName.c_str());
312 Counter++;
313 if (Counter % 100 == 0 && Progress != 0)
314 Progress->Progress(List.Offset());
315
316 unsigned long Hash = List.VersionHash();
317 pkgCache::VerIterator Ver = Pkg.VersionList();
318 for (; Ver.end() == false; Ver++)
319 {
320 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
321 {
322 if (List.CollectFileProvides(Cache,Ver) == false)
323 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
324 break;
325 }
326 }
327
328 if (Ver.end() == true)
329 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
330 }
331
332 return true;
333 }
334 /*}}}*/
335 // CacheGenerator::NewGroup - Add a new group /*{{{*/
336 // ---------------------------------------------------------------------
337 /* This creates a new group structure and adds it to the hash table */
338 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
339 {
340 Grp = Cache.FindGrp(Name);
341 if (Grp.end() == false)
342 return true;
343
344 // Get a structure
345 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
346 if (unlikely(Group == 0))
347 return false;
348
349 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
350 Grp->Name = Map.WriteString(Name);
351 if (unlikely(Grp->Name == 0))
352 return false;
353
354 // Insert it into the hash table
355 unsigned long const Hash = Cache.Hash(Name);
356 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
357 Cache.HeaderP->GrpHashTable[Hash] = Group;
358
359 Cache.HeaderP->GroupCount++;
360
361 return true;
362 }
363 /*}}}*/
364 // CacheGenerator::NewPackage - Add a new package /*{{{*/
365 // ---------------------------------------------------------------------
366 /* This creates a new package structure and adds it to the hash table */
367 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
368 const string &Arch) {
369 pkgCache::GrpIterator Grp;
370 if (unlikely(NewGroup(Grp, Name) == false))
371 return false;
372
373 Pkg = Grp.FindPkg(Arch);
374 if (Pkg.end() == false)
375 return true;
376
377 // Get a structure
378 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
379 if (unlikely(Package == 0))
380 return false;
381 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
382
383 // Insert the package into our package list
384 if (Grp->FirstPackage == 0) // the group is new
385 {
386 // Insert it into the hash table
387 unsigned long const Hash = Cache.Hash(Name);
388 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
389 Cache.HeaderP->PkgHashTable[Hash] = Package;
390 Grp->FirstPackage = Package;
391 }
392 else // Group the Packages together
393 {
394 // this package is the new last package
395 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
396 Pkg->NextPackage = LastPkg->NextPackage;
397 LastPkg->NextPackage = Package;
398 }
399 Grp->LastPackage = Package;
400
401 // Set the name, arch and the ID
402 Pkg->Name = Grp->Name;
403 Pkg->Group = Grp.Index();
404 Pkg->Arch = WriteUniqString(Arch.c_str());
405 if (unlikely(Pkg->Arch == 0))
406 return false;
407 Pkg->ID = Cache.HeaderP->PackageCount++;
408
409 return true;
410 }
411 /*}}}*/
412 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
413 // ---------------------------------------------------------------------
414 /* */
415 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
416 ListParser &List)
417 {
418 if (CurrentFile == 0)
419 return true;
420
421 // Get a structure
422 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
423 if (VerFile == 0)
424 return 0;
425
426 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
427 VF->File = CurrentFile - Cache.PkgFileP;
428
429 // Link it to the end of the list
430 map_ptrloc *Last = &Ver->FileList;
431 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
432 Last = &V->NextFile;
433 VF->NextFile = *Last;
434 *Last = VF.Index();
435
436 VF->Offset = List.Offset();
437 VF->Size = List.Size();
438 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
439 Cache.HeaderP->MaxVerFileSize = VF->Size;
440 Cache.HeaderP->VerFileCount++;
441
442 return true;
443 }
444 /*}}}*/
445 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
446 // ---------------------------------------------------------------------
447 /* This puts a version structure in the linked list */
448 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
449 const string &VerStr,
450 unsigned long Next)
451 {
452 // Get a structure
453 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
454 if (Version == 0)
455 return 0;
456
457 // Fill it in
458 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
459 Ver->NextVer = Next;
460 Ver->ID = Cache.HeaderP->VersionCount++;
461 Ver->VerStr = Map.WriteString(VerStr);
462 if (Ver->VerStr == 0)
463 return 0;
464
465 return Version;
466 }
467 /*}}}*/
468 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
469 // ---------------------------------------------------------------------
470 /* */
471 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
472 ListParser &List)
473 {
474 if (CurrentFile == 0)
475 return true;
476
477 // Get a structure
478 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
479 if (DescFile == 0)
480 return false;
481
482 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
483 DF->File = CurrentFile - Cache.PkgFileP;
484
485 // Link it to the end of the list
486 map_ptrloc *Last = &Desc->FileList;
487 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
488 Last = &D->NextFile;
489
490 DF->NextFile = *Last;
491 *Last = DF.Index();
492
493 DF->Offset = List.Offset();
494 DF->Size = List.Size();
495 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
496 Cache.HeaderP->MaxDescFileSize = DF->Size;
497 Cache.HeaderP->DescFileCount++;
498
499 return true;
500 }
501 /*}}}*/
502 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
503 // ---------------------------------------------------------------------
504 /* This puts a description structure in the linked list */
505 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
506 const string &Lang,
507 const MD5SumValue &md5sum,
508 map_ptrloc Next)
509 {
510 // Get a structure
511 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
512 if (Description == 0)
513 return 0;
514
515 // Fill it in
516 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
517 Desc->NextDesc = Next;
518 Desc->ID = Cache.HeaderP->DescriptionCount++;
519 Desc->language_code = Map.WriteString(Lang);
520 Desc->md5sum = Map.WriteString(md5sum.Value());
521 if (Desc->language_code == 0 || Desc->md5sum == 0)
522 return 0;
523
524 return Description;
525 }
526 /*}}}*/
527 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
528 // ---------------------------------------------------------------------
529 /* This prepares the Cache for delivery */
530 bool pkgCacheGenerator::FinishCache(OpProgress &Progress)
531 {
532 // FIXME: add progress reporting for this operation
533 // Do we have different architectures in your groups ?
534 vector<string> archs = APT::Configuration::getArchitectures();
535 if (archs.size() > 1)
536 {
537 // Create Conflicts in between the group
538 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++)
539 {
540 string const PkgName = G.Name();
541 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P))
542 {
543 if (strcmp(P.Arch(),"all") == 0)
544 continue;
545 pkgCache::PkgIterator allPkg;
546 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++)
547 {
548 string const Arch = V.Arch(true);
549 map_ptrloc *OldDepLast = NULL;
550 /* MultiArch handling introduces a lot of implicit Dependencies:
551 - MultiArch: same → Co-Installable if they have the same version
552 - Architecture: all → Need to be Co-Installable for internal reasons
553 - All others conflict with all other group members */
554 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
555 V->MultiArch == pkgCache::Version::Same);
556 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
557 allPkg = G.FindPkg("all");
558 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
559 {
560 if (*A == Arch)
561 continue;
562 /* We allow only one installed arch at the time
563 per group, therefore each group member conflicts
564 with all other group members */
565 pkgCache::PkgIterator D = G.FindPkg(*A);
566 if (D.end() == true)
567 continue;
568 if (coInstall == true)
569 {
570 // Replaces: ${self}:other ( << ${binary:Version})
571 NewDepends(D, V, V.VerStr(),
572 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
573 OldDepLast);
574 // Breaks: ${self}:other (!= ${binary:Version})
575 NewDepends(D, V, V.VerStr(),
576 pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
577 OldDepLast);
578 NewDepends(D, V, V.VerStr(),
579 pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
580 OldDepLast);
581 if (V->MultiArch == pkgCache::Version::All)
582 {
583 // Depend on ${self}:all which does depend on nothing
584 NewDepends(allPkg, V, V.VerStr(),
585 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
586 OldDepLast);
587 }
588 } else {
589 // Conflicts: ${self}:other
590 NewDepends(D, V, "",
591 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
592 OldDepLast);
593 }
594 }
595 }
596 }
597 }
598 }
599 return true;
600 }
601 /*}}}*/
602 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
603 // ---------------------------------------------------------------------
604 /* This creates a dependency element in the tree. It is linked to the
605 version and to the package that it is pointing to. */
606 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
607 pkgCache::VerIterator &Ver,
608 string const &Version,
609 unsigned int const &Op,
610 unsigned int const &Type,
611 map_ptrloc *OldDepLast)
612 {
613 // Get a structure
614 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
615 if (unlikely(Dependency == 0))
616 return false;
617
618 // Fill it in
619 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
620 Dep->ParentVer = Ver.Index();
621 Dep->Type = Type;
622 Dep->CompareOp = Op;
623 Dep->ID = Cache.HeaderP->DependsCount++;
624
625 // Probe the reverse dependency list for a version string that matches
626 if (Version.empty() == false)
627 {
628 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
629 if (I->Version != 0 && I.TargetVer() == Version)
630 Dep->Version = I->Version;*/
631 if (Dep->Version == 0)
632 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
633 return false;
634 }
635
636 // Link it to the package
637 Dep->Package = Pkg.Index();
638 Dep->NextRevDepends = Pkg->RevDepends;
639 Pkg->RevDepends = Dep.Index();
640
641 // Do we know where to link the Dependency to?
642 if (OldDepLast == NULL)
643 {
644 OldDepLast = &Ver->DependsList;
645 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
646 OldDepLast = &D->NextDepends;
647 }
648
649 Dep->NextDepends = *OldDepLast;
650 *OldDepLast = Dep.Index();
651 OldDepLast = &Dep->NextDepends;
652
653 return true;
654 }
655 /*}}}*/
656 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
657 // ---------------------------------------------------------------------
658 /* This creates a Group and the Package to link this dependency to if
659 needed and handles also the caching of the old endpoint */
660 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
661 const string &PackageName,
662 const string &Arch,
663 const string &Version,
664 unsigned int Op,
665 unsigned int Type)
666 {
667 pkgCache::GrpIterator Grp;
668 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
669 return false;
670
671 // Locate the target package
672 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
673 if (Pkg.end() == true) {
674 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
675 return false;
676 }
677
678 // Is it a file dependency?
679 if (unlikely(PackageName[0] == '/'))
680 FoundFileDeps = true;
681
682 /* Caching the old end point speeds up generation substantially */
683 if (OldDepVer != Ver) {
684 OldDepLast = NULL;
685 OldDepVer = Ver;
686 }
687
688 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
689 }
690 /*}}}*/
691 // ListParser::NewProvides - Create a Provides element /*{{{*/
692 // ---------------------------------------------------------------------
693 /* */
694 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
695 const string &PkgName,
696 const string &PkgArch,
697 const string &Version)
698 {
699 pkgCache &Cache = Owner->Cache;
700
701 // We do not add self referencing provides
702 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
703 return true;
704
705 // Get a structure
706 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
707 if (unlikely(Provides == 0))
708 return false;
709 Cache.HeaderP->ProvidesCount++;
710
711 // Fill it in
712 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
713 Prv->Version = Ver.Index();
714 Prv->NextPkgProv = Ver->ProvidesList;
715 Ver->ProvidesList = Prv.Index();
716 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
717 return false;
718
719 // Locate the target package
720 pkgCache::PkgIterator Pkg;
721 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
722 return false;
723
724 // Link it to the package
725 Prv->ParentPkg = Pkg.Index();
726 Prv->NextProvides = Pkg->ProvidesList;
727 Pkg->ProvidesList = Prv.Index();
728
729 return true;
730 }
731 /*}}}*/
732 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
733 // ---------------------------------------------------------------------
734 /* This is used to select which file is to be associated with all newly
735 added versions. The caller is responsible for setting the IMS fields. */
736 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
737 const pkgIndexFile &Index,
738 unsigned long Flags)
739 {
740 // Get some space for the structure
741 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
742 if (CurrentFile == Cache.PkgFileP)
743 return false;
744
745 // Fill it in
746 CurrentFile->FileName = Map.WriteString(File);
747 CurrentFile->Site = WriteUniqString(Site);
748 CurrentFile->NextFile = Cache.HeaderP->FileList;
749 CurrentFile->Flags = Flags;
750 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
751 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
752 PkgFileName = File;
753 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
754 Cache.HeaderP->PackageFileCount++;
755
756 if (CurrentFile->FileName == 0)
757 return false;
758
759 if (Progress != 0)
760 Progress->SubProgress(Index.Size());
761 return true;
762 }
763 /*}}}*/
764 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
765 // ---------------------------------------------------------------------
766 /* This is used to create handles to strings. Given the same text it
767 always returns the same number */
768 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
769 unsigned int Size)
770 {
771 /* We use a very small transient hash table here, this speeds up generation
772 by a fair amount on slower machines */
773 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
774 if (Bucket != 0 &&
775 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
776 return Bucket->String;
777
778 // Search for an insertion point
779 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
780 int Res = 1;
781 map_ptrloc *Last = &Cache.HeaderP->StringList;
782 for (; I != Cache.StringItemP; Last = &I->NextItem,
783 I = Cache.StringItemP + I->NextItem)
784 {
785 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
786 if (Res >= 0)
787 break;
788 }
789
790 // Match
791 if (Res == 0)
792 {
793 Bucket = I;
794 return I->String;
795 }
796
797 // Get a structure
798 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
799 if (Item == 0)
800 return 0;
801
802 // Fill in the structure
803 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
804 ItemP->NextItem = I - Cache.StringItemP;
805 *Last = Item;
806 ItemP->String = Map.WriteString(S,Size);
807 if (ItemP->String == 0)
808 return 0;
809
810 Bucket = ItemP;
811 return ItemP->String;
812 }
813 /*}}}*/
814 // CheckValidity - Check that a cache is up-to-date /*{{{*/
815 // ---------------------------------------------------------------------
816 /* This just verifies that each file in the list of index files exists,
817 has matching attributes with the cache and the cache does not have
818 any extra files. */
819 static bool CheckValidity(const string &CacheFile, FileIterator Start,
820 FileIterator End,MMap **OutMap = 0)
821 {
822 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
823 // No file, certainly invalid
824 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
825 {
826 if (Debug == true)
827 std::clog << "CacheFile doesn't exist" << std::endl;
828 return false;
829 }
830
831 // Map it
832 FileFd CacheF(CacheFile,FileFd::ReadOnly);
833 SPtr<MMap> Map = new MMap(CacheF,0);
834 pkgCache Cache(Map);
835 if (_error->PendingError() == true || Map->Size() == 0)
836 {
837 if (Debug == true)
838 std::clog << "Errors are pending or Map is empty()" << std::endl;
839 _error->Discard();
840 return false;
841 }
842
843 /* Now we check every index file, see if it is in the cache,
844 verify the IMS data and check that it is on the disk too.. */
845 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
846 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
847 for (; Start != End; Start++)
848 {
849 if (Debug == true)
850 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
851 if ((*Start)->HasPackages() == false)
852 {
853 if (Debug == true)
854 std::clog << "Has NO packages" << std::endl;
855 continue;
856 }
857
858 if ((*Start)->Exists() == false)
859 {
860 #if 0 // mvo: we no longer give a message here (Default Sources spec)
861 _error->WarningE("stat",_("Couldn't stat source package list %s"),
862 (*Start)->Describe().c_str());
863 #endif
864 if (Debug == true)
865 std::clog << "file doesn't exist" << std::endl;
866 continue;
867 }
868
869 // FindInCache is also expected to do an IMS check.
870 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
871 if (File.end() == true)
872 {
873 if (Debug == true)
874 std::clog << "FindInCache returned end-Pointer" << std::endl;
875 return false;
876 }
877
878 Visited[File->ID] = true;
879 if (Debug == true)
880 std::clog << "with ID " << File->ID << " is valid" << std::endl;
881 }
882
883 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
884 if (Visited[I] == false)
885 {
886 if (Debug == true)
887 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
888 return false;
889 }
890
891 if (_error->PendingError() == true)
892 {
893 if (Debug == true)
894 {
895 std::clog << "Validity failed because of pending errors:" << std::endl;
896 _error->DumpErrors();
897 }
898 _error->Discard();
899 return false;
900 }
901
902 if (OutMap != 0)
903 *OutMap = Map.UnGuard();
904 return true;
905 }
906 /*}}}*/
907 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
908 // ---------------------------------------------------------------------
909 /* Size is kind of an abstract notion that is only used for the progress
910 meter */
911 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
912 {
913 unsigned long TotalSize = 0;
914 for (; Start != End; Start++)
915 {
916 if ((*Start)->HasPackages() == false)
917 continue;
918 TotalSize += (*Start)->Size();
919 }
920 return TotalSize;
921 }
922 /*}}}*/
923 // BuildCache - Merge the list of index files into the cache /*{{{*/
924 // ---------------------------------------------------------------------
925 /* */
926 static bool BuildCache(pkgCacheGenerator &Gen,
927 OpProgress &Progress,
928 unsigned long &CurrentSize,unsigned long TotalSize,
929 FileIterator Start, FileIterator End)
930 {
931 FileIterator I;
932 for (I = Start; I != End; I++)
933 {
934 if ((*I)->HasPackages() == false)
935 continue;
936
937 if ((*I)->Exists() == false)
938 continue;
939
940 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
941 {
942 _error->Warning("Duplicate sources.list entry %s",
943 (*I)->Describe().c_str());
944 continue;
945 }
946
947 unsigned long Size = (*I)->Size();
948 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
949 CurrentSize += Size;
950
951 if ((*I)->Merge(Gen,Progress) == false)
952 return false;
953 }
954
955 if (Gen.HasFileDeps() == true)
956 {
957 Progress.Done();
958 TotalSize = ComputeSize(Start, End);
959 CurrentSize = 0;
960 for (I = Start; I != End; I++)
961 {
962 unsigned long Size = (*I)->Size();
963 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
964 CurrentSize += Size;
965 if ((*I)->MergeFileProvides(Gen,Progress) == false)
966 return false;
967 }
968 }
969
970 return true;
971 }
972 /*}}}*/
973 // MakeStatusCache - Construct the status cache /*{{{*/
974 // ---------------------------------------------------------------------
975 /* This makes sure that the status cache (the cache that has all
976 index files from the sources list and all local ones) is ready
977 to be mmaped. If OutMap is not zero then a MMap object representing
978 the cache will be stored there. This is pretty much mandetory if you
979 are using AllowMem. AllowMem lets the function be run as non-root
980 where it builds the cache 'fast' into a memory buffer. */
981 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
982 MMap **OutMap,bool AllowMem)
983 {
984 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
985 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
986
987 vector<pkgIndexFile *> Files;
988 for (vector<metaIndex *>::const_iterator i = List.begin();
989 i != List.end();
990 i++)
991 {
992 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
993 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
994 j != Indexes->end();
995 j++)
996 Files.push_back (*j);
997 }
998
999 unsigned long const EndOfSource = Files.size();
1000 if (_system->AddStatusFiles(Files) == false)
1001 return false;
1002
1003 // Decide if we can write to the files..
1004 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1005 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1006
1007 // ensure the cache directory exists
1008 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1009 {
1010 string dir = _config->FindDir("Dir::Cache");
1011 size_t const len = dir.size();
1012 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1013 dir = dir.substr(0, len - 5);
1014 if (CacheFile.empty() == false)
1015 CreateDirectory(dir, flNotFile(CacheFile));
1016 if (SrcCacheFile.empty() == false)
1017 CreateDirectory(dir, flNotFile(SrcCacheFile));
1018 }
1019
1020 // Decide if we can write to the cache
1021 bool Writeable = false;
1022 if (CacheFile.empty() == false)
1023 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1024 else
1025 if (SrcCacheFile.empty() == false)
1026 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1027 if (Debug == true)
1028 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1029
1030 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1031 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1032
1033 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1034
1035 // Cache is OK, Fin.
1036 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1037 {
1038 Progress.OverallProgress(1,1,1,_("Reading package lists"));
1039 if (Debug == true)
1040 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1041 return true;
1042 }
1043 else if (Debug == true)
1044 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1045
1046 /* At this point we know we need to reconstruct the package cache,
1047 begin. */
1048 SPtr<FileFd> CacheF;
1049 SPtr<DynamicMMap> Map;
1050 if (Writeable == true && CacheFile.empty() == false)
1051 {
1052 unlink(CacheFile.c_str());
1053 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
1054 fchmod(CacheF->Fd(),0644);
1055 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
1056 if (_error->PendingError() == true)
1057 return false;
1058 if (Debug == true)
1059 std::clog << "Open filebased MMap" << std::endl;
1060 }
1061 else
1062 {
1063 // Just build it in memory..
1064 Map = new DynamicMMap(0,MapSize);
1065 if (Debug == true)
1066 std::clog << "Open memory Map (not filebased)" << std::endl;
1067 }
1068
1069 // Lets try the source cache.
1070 unsigned long CurrentSize = 0;
1071 unsigned long TotalSize = 0;
1072 if (CheckValidity(SrcCacheFile,Files.begin(),
1073 Files.begin()+EndOfSource) == true)
1074 {
1075 if (Debug == true)
1076 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1077 // Preload the map with the source cache
1078 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1079 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1080 if ((alloc == 0 && _error->PendingError())
1081 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1082 SCacheF.Size()) == false)
1083 return false;
1084
1085 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1086
1087 // Build the status cache
1088 pkgCacheGenerator Gen(Map.Get(),&Progress);
1089 if (_error->PendingError() == true)
1090 return false;
1091 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1092 Files.begin()+EndOfSource,Files.end()) == false)
1093 return false;
1094
1095 // FIXME: move me to a better place
1096 Gen.FinishCache(Progress);
1097 }
1098 else
1099 {
1100 if (Debug == true)
1101 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1102 TotalSize = ComputeSize(Files.begin(),Files.end());
1103
1104 // Build the source cache
1105 pkgCacheGenerator Gen(Map.Get(),&Progress);
1106 if (_error->PendingError() == true)
1107 return false;
1108 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1109 Files.begin(),Files.begin()+EndOfSource) == false)
1110 return false;
1111
1112 // Write it back
1113 if (Writeable == true && SrcCacheFile.empty() == false)
1114 {
1115 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1116 if (_error->PendingError() == true)
1117 return false;
1118
1119 fchmod(SCacheF.Fd(),0644);
1120
1121 // Write out the main data
1122 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1123 return _error->Error(_("IO Error saving source cache"));
1124 SCacheF.Sync();
1125
1126 // Write out the proper header
1127 Gen.GetCache().HeaderP->Dirty = false;
1128 if (SCacheF.Seek(0) == false ||
1129 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1130 return _error->Error(_("IO Error saving source cache"));
1131 Gen.GetCache().HeaderP->Dirty = true;
1132 SCacheF.Sync();
1133 }
1134
1135 // Build the status cache
1136 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1137 Files.begin()+EndOfSource,Files.end()) == false)
1138 return false;
1139
1140 // FIXME: move me to a better place
1141 Gen.FinishCache(Progress);
1142 }
1143 if (Debug == true)
1144 std::clog << "Caches are ready for shipping" << std::endl;
1145
1146 if (_error->PendingError() == true)
1147 return false;
1148 if (OutMap != 0)
1149 {
1150 if (CacheF != 0)
1151 {
1152 delete Map.UnGuard();
1153 *OutMap = new MMap(*CacheF,0);
1154 }
1155 else
1156 {
1157 *OutMap = Map.UnGuard();
1158 }
1159 }
1160
1161 return true;
1162 }
1163 /*}}}*/
1164 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1165 // ---------------------------------------------------------------------
1166 /* */
1167 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1168 {
1169 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1170 vector<pkgIndexFile *> Files;
1171 unsigned long EndOfSource = Files.size();
1172 if (_system->AddStatusFiles(Files) == false)
1173 return false;
1174
1175 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1176 unsigned long CurrentSize = 0;
1177 unsigned long TotalSize = 0;
1178
1179 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1180
1181 // Build the status cache
1182 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1183 pkgCacheGenerator Gen(Map.Get(),&Progress);
1184 if (_error->PendingError() == true)
1185 return false;
1186 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1187 Files.begin()+EndOfSource,Files.end()) == false)
1188 return false;
1189
1190 // FIXME: move me to a better place
1191 Gen.FinishCache(Progress);
1192
1193 if (_error->PendingError() == true)
1194 return false;
1195 *OutMap = Map.UnGuard();
1196
1197 return true;
1198 }
1199 /*}}}*/