]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
make the specify order of packages irrelevant (half-close #196021)
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true) {
119 genArch = APT::Configuration::getArchitectures();
120 if (genArch.size() != 1)
121 genArch.push_back("all");
122 } else
123 genArch.push_back(List.Architecture());
124
125 for (std::vector<string>::const_iterator arch = genArch.begin();
126 arch != genArch.end(); ++arch)
127 {
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg;
130 if (NewPackage(Pkg, PackageName, *arch) == false)
131 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
132 Counter++;
133 if (Counter % 100 == 0 && Progress != 0)
134 Progress->Progress(List.Offset());
135
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version = List.Version();
140 if (Version.empty() == true)
141 {
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
144 // of MMap space)
145 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
146 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName.c_str());
148
149 // Find the right version to write the description
150 MD5SumValue CurMd5 = List.Description_md5();
151 pkgCache::VerIterator Ver = Pkg.VersionList();
152 map_ptrloc *LastVer = &Pkg->VersionList;
153
154 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
155 {
156 pkgCache::DescIterator Desc = Ver.DescriptionList();
157 map_ptrloc *LastDesc = &Ver->DescriptionList;
158 bool duplicate=false;
159
160 // don't add a new description if we have one for the given
161 // md5 && language
162 for ( ; Desc.end() == false; Desc++)
163 if (MD5SumValue(Desc.md5()) == CurMd5 &&
164 Desc.LanguageCode() == List.DescriptionLanguage())
165 duplicate=true;
166 if(duplicate)
167 continue;
168
169 for (Desc = Ver.DescriptionList();
170 Desc.end() == false;
171 LastDesc = &Desc->NextDesc, Desc++)
172 {
173 if (MD5SumValue(Desc.md5()) == CurMd5)
174 {
175 // Add new description
176 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
177 Desc->ParentPkg = Pkg.Index();
178
179 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
180 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
181 break;
182 }
183 }
184 }
185
186 continue;
187 }
188
189 pkgCache::VerIterator Ver = Pkg.VersionList();
190 map_ptrloc *LastVer = &Pkg->VersionList;
191 int Res = 1;
192 unsigned long const Hash = List.VersionHash();
193 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
194 {
195 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
196 // Version is higher as current version - insert here
197 if (Res > 0)
198 break;
199 // Versionstrings are equal - is hash also equal?
200 if (Res == 0 && Ver->Hash == Hash)
201 break;
202 // proceed with the next till we have either the right
203 // or we found another version (which will be lower)
204 }
205
206 /* We already have a version for this item, record that we saw it */
207 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
208 {
209 if (List.UsePackage(Pkg,Ver) == false)
210 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
211 PackageName.c_str());
212
213 if (NewFileVer(Ver,List) == false)
214 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
215 PackageName.c_str());
216
217 // Read only a single record and return
218 if (OutVer != 0)
219 {
220 *OutVer = Ver;
221 FoundFileDeps |= List.HasFileDeps();
222 return true;
223 }
224
225 continue;
226 }
227
228 // Add a new version
229 *LastVer = NewVersion(Ver,Version,*LastVer);
230 Ver->ParentPkg = Pkg.Index();
231 Ver->Hash = Hash;
232
233 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
234 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
235 PackageName.c_str());
236
237 if (List.UsePackage(Pkg,Ver) == false)
238 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
239 PackageName.c_str());
240
241 if (NewFileVer(Ver,List) == false)
242 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
243 PackageName.c_str());
244
245 // Read only a single record and return
246 if (OutVer != 0)
247 {
248 *OutVer = Ver;
249 FoundFileDeps |= List.HasFileDeps();
250 return true;
251 }
252
253 /* Record the Description data. Description data always exist in
254 Packages and Translation-* files. */
255 pkgCache::DescIterator Desc = Ver.DescriptionList();
256 map_ptrloc *LastDesc = &Ver->DescriptionList;
257
258 // Skip to the end of description set
259 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
260
261 // Add new description
262 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
263 Desc->ParentPkg = Pkg.Index();
264
265 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
266 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
267 }
268 }
269
270 FoundFileDeps |= List.HasFileDeps();
271
272 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
273 return _error->Error(_("Wow, you exceeded the number of package "
274 "names this APT is capable of."));
275 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
276 return _error->Error(_("Wow, you exceeded the number of versions "
277 "this APT is capable of."));
278 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
279 return _error->Error(_("Wow, you exceeded the number of descriptions "
280 "this APT is capable of."));
281 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
282 return _error->Error(_("Wow, you exceeded the number of dependencies "
283 "this APT is capable of."));
284 return true;
285 }
286 /*}}}*/
287 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
288 // ---------------------------------------------------------------------
289 /* If we found any file depends while parsing the main list we need to
290 resolve them. Since it is undesired to load the entire list of files
291 into the cache as virtual packages we do a two stage effort. MergeList
292 identifies the file depends and this creates Provdies for them by
293 re-parsing all the indexs. */
294 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
295 {
296 List.Owner = this;
297
298 unsigned int Counter = 0;
299 while (List.Step() == true)
300 {
301 string PackageName = List.Package();
302 if (PackageName.empty() == true)
303 return false;
304 string Version = List.Version();
305 if (Version.empty() == true)
306 continue;
307
308 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
309 if (Pkg.end() == true)
310 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
311 PackageName.c_str());
312 Counter++;
313 if (Counter % 100 == 0 && Progress != 0)
314 Progress->Progress(List.Offset());
315
316 unsigned long Hash = List.VersionHash();
317 pkgCache::VerIterator Ver = Pkg.VersionList();
318 for (; Ver.end() == false; Ver++)
319 {
320 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
321 {
322 if (List.CollectFileProvides(Cache,Ver) == false)
323 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
324 break;
325 }
326 }
327
328 if (Ver.end() == true)
329 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
330 }
331
332 return true;
333 }
334 /*}}}*/
335 // CacheGenerator::NewGroup - Add a new group /*{{{*/
336 // ---------------------------------------------------------------------
337 /* This creates a new group structure and adds it to the hash table */
338 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
339 {
340 Grp = Cache.FindGrp(Name);
341 if (Grp.end() == false)
342 return true;
343
344 // Get a structure
345 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
346 if (unlikely(Group == 0))
347 return false;
348
349 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
350 Grp->Name = Map.WriteString(Name);
351 if (unlikely(Grp->Name == 0))
352 return false;
353
354 // Insert it into the hash table
355 unsigned long const Hash = Cache.Hash(Name);
356 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
357 Cache.HeaderP->GrpHashTable[Hash] = Group;
358
359 Grp->ID = Cache.HeaderP->GroupCount++;
360 return true;
361 }
362 /*}}}*/
363 // CacheGenerator::NewPackage - Add a new package /*{{{*/
364 // ---------------------------------------------------------------------
365 /* This creates a new package structure and adds it to the hash table */
366 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
367 const string &Arch) {
368 pkgCache::GrpIterator Grp;
369 if (unlikely(NewGroup(Grp, Name) == false))
370 return false;
371
372 Pkg = Grp.FindPkg(Arch);
373 if (Pkg.end() == false)
374 return true;
375
376 // Get a structure
377 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
378 if (unlikely(Package == 0))
379 return false;
380 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
381
382 // Insert the package into our package list
383 if (Grp->FirstPackage == 0) // the group is new
384 {
385 // Insert it into the hash table
386 unsigned long const Hash = Cache.Hash(Name);
387 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
388 Cache.HeaderP->PkgHashTable[Hash] = Package;
389 Grp->FirstPackage = Package;
390 }
391 else // Group the Packages together
392 {
393 // this package is the new last package
394 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
395 Pkg->NextPackage = LastPkg->NextPackage;
396 LastPkg->NextPackage = Package;
397 }
398 Grp->LastPackage = Package;
399
400 // Set the name, arch and the ID
401 Pkg->Name = Grp->Name;
402 Pkg->Group = Grp.Index();
403 Pkg->Arch = WriteUniqString(Arch.c_str());
404 if (unlikely(Pkg->Arch == 0))
405 return false;
406 Pkg->ID = Cache.HeaderP->PackageCount++;
407
408 return true;
409 }
410 /*}}}*/
411 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
412 // ---------------------------------------------------------------------
413 /* */
414 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
415 ListParser &List)
416 {
417 if (CurrentFile == 0)
418 return true;
419
420 // Get a structure
421 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
422 if (VerFile == 0)
423 return 0;
424
425 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
426 VF->File = CurrentFile - Cache.PkgFileP;
427
428 // Link it to the end of the list
429 map_ptrloc *Last = &Ver->FileList;
430 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
431 Last = &V->NextFile;
432 VF->NextFile = *Last;
433 *Last = VF.Index();
434
435 VF->Offset = List.Offset();
436 VF->Size = List.Size();
437 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
438 Cache.HeaderP->MaxVerFileSize = VF->Size;
439 Cache.HeaderP->VerFileCount++;
440
441 return true;
442 }
443 /*}}}*/
444 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
445 // ---------------------------------------------------------------------
446 /* This puts a version structure in the linked list */
447 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
448 const string &VerStr,
449 unsigned long Next)
450 {
451 // Get a structure
452 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
453 if (Version == 0)
454 return 0;
455
456 // Fill it in
457 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
458 Ver->NextVer = Next;
459 Ver->ID = Cache.HeaderP->VersionCount++;
460 Ver->VerStr = Map.WriteString(VerStr);
461 if (Ver->VerStr == 0)
462 return 0;
463
464 return Version;
465 }
466 /*}}}*/
467 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
468 // ---------------------------------------------------------------------
469 /* */
470 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
471 ListParser &List)
472 {
473 if (CurrentFile == 0)
474 return true;
475
476 // Get a structure
477 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
478 if (DescFile == 0)
479 return false;
480
481 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
482 DF->File = CurrentFile - Cache.PkgFileP;
483
484 // Link it to the end of the list
485 map_ptrloc *Last = &Desc->FileList;
486 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
487 Last = &D->NextFile;
488
489 DF->NextFile = *Last;
490 *Last = DF.Index();
491
492 DF->Offset = List.Offset();
493 DF->Size = List.Size();
494 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
495 Cache.HeaderP->MaxDescFileSize = DF->Size;
496 Cache.HeaderP->DescFileCount++;
497
498 return true;
499 }
500 /*}}}*/
501 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
502 // ---------------------------------------------------------------------
503 /* This puts a description structure in the linked list */
504 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
505 const string &Lang,
506 const MD5SumValue &md5sum,
507 map_ptrloc Next)
508 {
509 // Get a structure
510 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
511 if (Description == 0)
512 return 0;
513
514 // Fill it in
515 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
516 Desc->NextDesc = Next;
517 Desc->ID = Cache.HeaderP->DescriptionCount++;
518 Desc->language_code = Map.WriteString(Lang);
519 Desc->md5sum = Map.WriteString(md5sum.Value());
520 if (Desc->language_code == 0 || Desc->md5sum == 0)
521 return 0;
522
523 return Description;
524 }
525 /*}}}*/
526 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
527 // ---------------------------------------------------------------------
528 /* This prepares the Cache for delivery */
529 bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
530 {
531 // FIXME: add progress reporting for this operation
532 // Do we have different architectures in your groups ?
533 vector<string> archs = APT::Configuration::getArchitectures();
534 if (archs.size() > 1)
535 {
536 // Create Conflicts in between the group
537 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++)
538 {
539 string const PkgName = G.Name();
540 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P))
541 {
542 if (strcmp(P.Arch(),"all") == 0)
543 continue;
544 pkgCache::PkgIterator allPkg;
545 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++)
546 {
547 string const Arch = V.Arch(true);
548 map_ptrloc *OldDepLast = NULL;
549 /* MultiArch handling introduces a lot of implicit Dependencies:
550 - MultiArch: same → Co-Installable if they have the same version
551 - Architecture: all → Need to be Co-Installable for internal reasons
552 - All others conflict with all other group members */
553 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
554 V->MultiArch == pkgCache::Version::Same);
555 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
556 allPkg = G.FindPkg("all");
557 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
558 {
559 if (*A == Arch)
560 continue;
561 /* We allow only one installed arch at the time
562 per group, therefore each group member conflicts
563 with all other group members */
564 pkgCache::PkgIterator D = G.FindPkg(*A);
565 if (D.end() == true)
566 continue;
567 if (coInstall == true)
568 {
569 // Replaces: ${self}:other ( << ${binary:Version})
570 NewDepends(D, V, V.VerStr(),
571 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
572 OldDepLast);
573 // Breaks: ${self}:other (!= ${binary:Version})
574 NewDepends(D, V, V.VerStr(),
575 pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
576 OldDepLast);
577 NewDepends(D, V, V.VerStr(),
578 pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
579 OldDepLast);
580 if (V->MultiArch == pkgCache::Version::All)
581 {
582 // Depend on ${self}:all which does depend on nothing
583 NewDepends(allPkg, V, V.VerStr(),
584 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
585 OldDepLast);
586 }
587 } else {
588 // Conflicts: ${self}:other
589 NewDepends(D, V, "",
590 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
591 OldDepLast);
592 }
593 }
594 }
595 }
596 }
597 }
598 return true;
599 }
600 /*}}}*/
601 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
602 // ---------------------------------------------------------------------
603 /* This creates a dependency element in the tree. It is linked to the
604 version and to the package that it is pointing to. */
605 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
606 pkgCache::VerIterator &Ver,
607 string const &Version,
608 unsigned int const &Op,
609 unsigned int const &Type,
610 map_ptrloc *OldDepLast)
611 {
612 // Get a structure
613 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
614 if (unlikely(Dependency == 0))
615 return false;
616
617 // Fill it in
618 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
619 Dep->ParentVer = Ver.Index();
620 Dep->Type = Type;
621 Dep->CompareOp = Op;
622 Dep->ID = Cache.HeaderP->DependsCount++;
623
624 // Probe the reverse dependency list for a version string that matches
625 if (Version.empty() == false)
626 {
627 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
628 if (I->Version != 0 && I.TargetVer() == Version)
629 Dep->Version = I->Version;*/
630 if (Dep->Version == 0)
631 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
632 return false;
633 }
634
635 // Link it to the package
636 Dep->Package = Pkg.Index();
637 Dep->NextRevDepends = Pkg->RevDepends;
638 Pkg->RevDepends = Dep.Index();
639
640 // Do we know where to link the Dependency to?
641 if (OldDepLast == NULL)
642 {
643 OldDepLast = &Ver->DependsList;
644 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
645 OldDepLast = &D->NextDepends;
646 }
647
648 Dep->NextDepends = *OldDepLast;
649 *OldDepLast = Dep.Index();
650 OldDepLast = &Dep->NextDepends;
651
652 return true;
653 }
654 /*}}}*/
655 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
656 // ---------------------------------------------------------------------
657 /* This creates a Group and the Package to link this dependency to if
658 needed and handles also the caching of the old endpoint */
659 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
660 const string &PackageName,
661 const string &Arch,
662 const string &Version,
663 unsigned int Op,
664 unsigned int Type)
665 {
666 pkgCache::GrpIterator Grp;
667 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
668 return false;
669
670 // Locate the target package
671 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
672 if (Pkg.end() == true) {
673 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
674 return false;
675 }
676
677 // Is it a file dependency?
678 if (unlikely(PackageName[0] == '/'))
679 FoundFileDeps = true;
680
681 /* Caching the old end point speeds up generation substantially */
682 if (OldDepVer != Ver) {
683 OldDepLast = NULL;
684 OldDepVer = Ver;
685 }
686
687 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
688 }
689 /*}}}*/
690 // ListParser::NewProvides - Create a Provides element /*{{{*/
691 // ---------------------------------------------------------------------
692 /* */
693 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
694 const string &PkgName,
695 const string &PkgArch,
696 const string &Version)
697 {
698 pkgCache &Cache = Owner->Cache;
699
700 // We do not add self referencing provides
701 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
702 return true;
703
704 // Get a structure
705 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
706 if (unlikely(Provides == 0))
707 return false;
708 Cache.HeaderP->ProvidesCount++;
709
710 // Fill it in
711 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
712 Prv->Version = Ver.Index();
713 Prv->NextPkgProv = Ver->ProvidesList;
714 Ver->ProvidesList = Prv.Index();
715 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
716 return false;
717
718 // Locate the target package
719 pkgCache::PkgIterator Pkg;
720 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
721 return false;
722
723 // Link it to the package
724 Prv->ParentPkg = Pkg.Index();
725 Prv->NextProvides = Pkg->ProvidesList;
726 Pkg->ProvidesList = Prv.Index();
727
728 return true;
729 }
730 /*}}}*/
731 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
732 // ---------------------------------------------------------------------
733 /* This is used to select which file is to be associated with all newly
734 added versions. The caller is responsible for setting the IMS fields. */
735 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
736 const pkgIndexFile &Index,
737 unsigned long Flags)
738 {
739 // Get some space for the structure
740 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
741 if (CurrentFile == Cache.PkgFileP)
742 return false;
743
744 // Fill it in
745 CurrentFile->FileName = Map.WriteString(File);
746 CurrentFile->Site = WriteUniqString(Site);
747 CurrentFile->NextFile = Cache.HeaderP->FileList;
748 CurrentFile->Flags = Flags;
749 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
750 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
751 PkgFileName = File;
752 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
753 Cache.HeaderP->PackageFileCount++;
754
755 if (CurrentFile->FileName == 0)
756 return false;
757
758 if (Progress != 0)
759 Progress->SubProgress(Index.Size());
760 return true;
761 }
762 /*}}}*/
763 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
764 // ---------------------------------------------------------------------
765 /* This is used to create handles to strings. Given the same text it
766 always returns the same number */
767 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
768 unsigned int Size)
769 {
770 /* We use a very small transient hash table here, this speeds up generation
771 by a fair amount on slower machines */
772 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
773 if (Bucket != 0 &&
774 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
775 return Bucket->String;
776
777 // Search for an insertion point
778 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
779 int Res = 1;
780 map_ptrloc *Last = &Cache.HeaderP->StringList;
781 for (; I != Cache.StringItemP; Last = &I->NextItem,
782 I = Cache.StringItemP + I->NextItem)
783 {
784 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
785 if (Res >= 0)
786 break;
787 }
788
789 // Match
790 if (Res == 0)
791 {
792 Bucket = I;
793 return I->String;
794 }
795
796 // Get a structure
797 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
798 if (Item == 0)
799 return 0;
800
801 // Fill in the structure
802 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
803 ItemP->NextItem = I - Cache.StringItemP;
804 *Last = Item;
805 ItemP->String = Map.WriteString(S,Size);
806 if (ItemP->String == 0)
807 return 0;
808
809 Bucket = ItemP;
810 return ItemP->String;
811 }
812 /*}}}*/
813 // CheckValidity - Check that a cache is up-to-date /*{{{*/
814 // ---------------------------------------------------------------------
815 /* This just verifies that each file in the list of index files exists,
816 has matching attributes with the cache and the cache does not have
817 any extra files. */
818 static bool CheckValidity(const string &CacheFile, FileIterator Start,
819 FileIterator End,MMap **OutMap = 0)
820 {
821 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
822 // No file, certainly invalid
823 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
824 {
825 if (Debug == true)
826 std::clog << "CacheFile doesn't exist" << std::endl;
827 return false;
828 }
829
830 // Map it
831 FileFd CacheF(CacheFile,FileFd::ReadOnly);
832 SPtr<MMap> Map = new MMap(CacheF,0);
833 pkgCache Cache(Map);
834 if (_error->PendingError() == true || Map->Size() == 0)
835 {
836 if (Debug == true)
837 std::clog << "Errors are pending or Map is empty()" << std::endl;
838 _error->Discard();
839 return false;
840 }
841
842 /* Now we check every index file, see if it is in the cache,
843 verify the IMS data and check that it is on the disk too.. */
844 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
845 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
846 for (; Start != End; Start++)
847 {
848 if (Debug == true)
849 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
850 if ((*Start)->HasPackages() == false)
851 {
852 if (Debug == true)
853 std::clog << "Has NO packages" << std::endl;
854 continue;
855 }
856
857 if ((*Start)->Exists() == false)
858 {
859 #if 0 // mvo: we no longer give a message here (Default Sources spec)
860 _error->WarningE("stat",_("Couldn't stat source package list %s"),
861 (*Start)->Describe().c_str());
862 #endif
863 if (Debug == true)
864 std::clog << "file doesn't exist" << std::endl;
865 continue;
866 }
867
868 // FindInCache is also expected to do an IMS check.
869 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
870 if (File.end() == true)
871 {
872 if (Debug == true)
873 std::clog << "FindInCache returned end-Pointer" << std::endl;
874 return false;
875 }
876
877 Visited[File->ID] = true;
878 if (Debug == true)
879 std::clog << "with ID " << File->ID << " is valid" << std::endl;
880 }
881
882 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
883 if (Visited[I] == false)
884 {
885 if (Debug == true)
886 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
887 return false;
888 }
889
890 if (_error->PendingError() == true)
891 {
892 if (Debug == true)
893 {
894 std::clog << "Validity failed because of pending errors:" << std::endl;
895 _error->DumpErrors();
896 }
897 _error->Discard();
898 return false;
899 }
900
901 if (OutMap != 0)
902 *OutMap = Map.UnGuard();
903 return true;
904 }
905 /*}}}*/
906 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
907 // ---------------------------------------------------------------------
908 /* Size is kind of an abstract notion that is only used for the progress
909 meter */
910 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
911 {
912 unsigned long TotalSize = 0;
913 for (; Start != End; Start++)
914 {
915 if ((*Start)->HasPackages() == false)
916 continue;
917 TotalSize += (*Start)->Size();
918 }
919 return TotalSize;
920 }
921 /*}}}*/
922 // BuildCache - Merge the list of index files into the cache /*{{{*/
923 // ---------------------------------------------------------------------
924 /* */
925 static bool BuildCache(pkgCacheGenerator &Gen,
926 OpProgress *Progress,
927 unsigned long &CurrentSize,unsigned long TotalSize,
928 FileIterator Start, FileIterator End)
929 {
930 FileIterator I;
931 for (I = Start; I != End; I++)
932 {
933 if ((*I)->HasPackages() == false)
934 continue;
935
936 if ((*I)->Exists() == false)
937 continue;
938
939 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
940 {
941 _error->Warning("Duplicate sources.list entry %s",
942 (*I)->Describe().c_str());
943 continue;
944 }
945
946 unsigned long Size = (*I)->Size();
947 if (Progress != NULL)
948 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
949 CurrentSize += Size;
950
951 if ((*I)->Merge(Gen,Progress) == false)
952 return false;
953 }
954
955 if (Gen.HasFileDeps() == true)
956 {
957 if (Progress != NULL)
958 Progress->Done();
959 TotalSize = ComputeSize(Start, End);
960 CurrentSize = 0;
961 for (I = Start; I != End; I++)
962 {
963 unsigned long Size = (*I)->Size();
964 if (Progress != NULL)
965 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
966 CurrentSize += Size;
967 if ((*I)->MergeFileProvides(Gen,Progress) == false)
968 return false;
969 }
970 }
971
972 return true;
973 }
974 /*}}}*/
975 // CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
976 // ---------------------------------------------------------------------
977 /* This makes sure that the status cache (the cache that has all
978 index files from the sources list and all local ones) is ready
979 to be mmaped. If OutMap is not zero then a MMap object representing
980 the cache will be stored there. This is pretty much mandetory if you
981 are using AllowMem. AllowMem lets the function be run as non-root
982 where it builds the cache 'fast' into a memory buffer. */
983 __deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
984 MMap **OutMap, bool AllowMem)
985 { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
986 bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
987 MMap **OutMap,bool AllowMem)
988 {
989 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
990 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
991
992 vector<pkgIndexFile *> Files;
993 for (vector<metaIndex *>::const_iterator i = List.begin();
994 i != List.end();
995 i++)
996 {
997 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
998 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
999 j != Indexes->end();
1000 j++)
1001 Files.push_back (*j);
1002 }
1003
1004 unsigned long const EndOfSource = Files.size();
1005 if (_system->AddStatusFiles(Files) == false)
1006 return false;
1007
1008 // Decide if we can write to the files..
1009 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1010 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1011
1012 // ensure the cache directory exists
1013 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1014 {
1015 string dir = _config->FindDir("Dir::Cache");
1016 size_t const len = dir.size();
1017 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1018 dir = dir.substr(0, len - 5);
1019 if (CacheFile.empty() == false)
1020 CreateDirectory(dir, flNotFile(CacheFile));
1021 if (SrcCacheFile.empty() == false)
1022 CreateDirectory(dir, flNotFile(SrcCacheFile));
1023 }
1024
1025 // Decide if we can write to the cache
1026 bool Writeable = false;
1027 if (CacheFile.empty() == false)
1028 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1029 else
1030 if (SrcCacheFile.empty() == false)
1031 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1032 if (Debug == true)
1033 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1034
1035 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1036 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1037
1038 if (Progress != NULL)
1039 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1040
1041 // Cache is OK, Fin.
1042 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1043 {
1044 if (Progress != NULL)
1045 Progress->OverallProgress(1,1,1,_("Reading package lists"));
1046 if (Debug == true)
1047 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1048 return true;
1049 }
1050 else if (Debug == true)
1051 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1052
1053 /* At this point we know we need to reconstruct the package cache,
1054 begin. */
1055 SPtr<FileFd> CacheF;
1056 SPtr<DynamicMMap> Map;
1057 if (Writeable == true && CacheFile.empty() == false)
1058 {
1059 unlink(CacheFile.c_str());
1060 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
1061 fchmod(CacheF->Fd(),0644);
1062 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
1063 if (_error->PendingError() == true)
1064 return false;
1065 if (Debug == true)
1066 std::clog << "Open filebased MMap" << std::endl;
1067 }
1068 else
1069 {
1070 // Just build it in memory..
1071 Map = new DynamicMMap(0,MapSize);
1072 if (Debug == true)
1073 std::clog << "Open memory Map (not filebased)" << std::endl;
1074 }
1075
1076 // Lets try the source cache.
1077 unsigned long CurrentSize = 0;
1078 unsigned long TotalSize = 0;
1079 if (CheckValidity(SrcCacheFile,Files.begin(),
1080 Files.begin()+EndOfSource) == true)
1081 {
1082 if (Debug == true)
1083 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1084 // Preload the map with the source cache
1085 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1086 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1087 if ((alloc == 0 && _error->PendingError())
1088 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1089 SCacheF.Size()) == false)
1090 return false;
1091
1092 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1093
1094 // Build the status cache
1095 pkgCacheGenerator Gen(Map.Get(),Progress);
1096 if (_error->PendingError() == true)
1097 return false;
1098 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1099 Files.begin()+EndOfSource,Files.end()) == false)
1100 return false;
1101
1102 // FIXME: move me to a better place
1103 Gen.FinishCache(Progress);
1104 }
1105 else
1106 {
1107 if (Debug == true)
1108 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1109 TotalSize = ComputeSize(Files.begin(),Files.end());
1110
1111 // Build the source cache
1112 pkgCacheGenerator Gen(Map.Get(),Progress);
1113 if (_error->PendingError() == true)
1114 return false;
1115 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1116 Files.begin(),Files.begin()+EndOfSource) == false)
1117 return false;
1118
1119 // Write it back
1120 if (Writeable == true && SrcCacheFile.empty() == false)
1121 {
1122 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1123 if (_error->PendingError() == true)
1124 return false;
1125
1126 fchmod(SCacheF.Fd(),0644);
1127
1128 // Write out the main data
1129 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1130 return _error->Error(_("IO Error saving source cache"));
1131 SCacheF.Sync();
1132
1133 // Write out the proper header
1134 Gen.GetCache().HeaderP->Dirty = false;
1135 if (SCacheF.Seek(0) == false ||
1136 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1137 return _error->Error(_("IO Error saving source cache"));
1138 Gen.GetCache().HeaderP->Dirty = true;
1139 SCacheF.Sync();
1140 }
1141
1142 // Build the status cache
1143 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1144 Files.begin()+EndOfSource,Files.end()) == false)
1145 return false;
1146
1147 // FIXME: move me to a better place
1148 Gen.FinishCache(Progress);
1149 }
1150 if (Debug == true)
1151 std::clog << "Caches are ready for shipping" << std::endl;
1152
1153 if (_error->PendingError() == true)
1154 return false;
1155 if (OutMap != 0)
1156 {
1157 if (CacheF != 0)
1158 {
1159 delete Map.UnGuard();
1160 *OutMap = new MMap(*CacheF,0);
1161 }
1162 else
1163 {
1164 *OutMap = Map.UnGuard();
1165 }
1166 }
1167
1168 return true;
1169 }
1170 /*}}}*/
1171 // CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1172 // ---------------------------------------------------------------------
1173 /* */
1174 __deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1175 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
1176 bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
1177 {
1178 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1179 vector<pkgIndexFile *> Files;
1180 unsigned long EndOfSource = Files.size();
1181 if (_system->AddStatusFiles(Files) == false)
1182 return false;
1183
1184 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1185 unsigned long CurrentSize = 0;
1186 unsigned long TotalSize = 0;
1187
1188 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1189
1190 // Build the status cache
1191 if (Progress != NULL)
1192 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1193 pkgCacheGenerator Gen(Map.Get(),Progress);
1194 if (_error->PendingError() == true)
1195 return false;
1196 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1197 Files.begin()+EndOfSource,Files.end()) == false)
1198 return false;
1199
1200 // FIXME: move me to a better place
1201 Gen.FinishCache(Progress);
1202
1203 if (_error->PendingError() == true)
1204 return false;
1205 *OutMap = Map.UnGuard();
1206
1207 return true;
1208 }
1209 /*}}}*/