]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
6a9da4a9236db03aeabb03875ec2fa76dec9bf9e
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = WriteStringInMap(_system->VS->Label);
65 Cache.HeaderP->Architecture = WriteStringInMap(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::WriteStringInMap /*{{{*/
100 unsigned long pkgCacheGenerator::WriteStringInMap(const char *String,
101 const unsigned long &Len) {
102 return Map.WriteString(String, Len);
103 }
104 /*}}}*/
105 // CacheGenerator::WriteStringInMap /*{{{*/
106 unsigned long pkgCacheGenerator::WriteStringInMap(const char *String) {
107 return Map.WriteString(String);
108 }
109 /*}}}*/
110 unsigned long pkgCacheGenerator::AllocateInMap(const unsigned long &size) {/*{{{*/
111 return Map.Allocate(size);
112 }
113 /*}}}*/
114 // CacheGenerator::MergeList - Merge the package list /*{{{*/
115 // ---------------------------------------------------------------------
116 /* This provides the generation of the entries in the cache. Each loop
117 goes through a single package record from the underlying parse engine. */
118 bool pkgCacheGenerator::MergeList(ListParser &List,
119 pkgCache::VerIterator *OutVer)
120 {
121 List.Owner = this;
122
123 unsigned int Counter = 0;
124 while (List.Step() == true)
125 {
126 string const PackageName = List.Package();
127 if (PackageName.empty() == true)
128 return false;
129
130 /* As we handle Arch all packages as architecture bounded
131 we add all information to every (simulated) arch package */
132 std::vector<string> genArch;
133 if (List.ArchitectureAll() == true) {
134 genArch = APT::Configuration::getArchitectures();
135 if (genArch.size() != 1)
136 genArch.push_back("all");
137 } else
138 genArch.push_back(List.Architecture());
139
140 for (std::vector<string>::const_iterator arch = genArch.begin();
141 arch != genArch.end(); ++arch)
142 {
143 // Get a pointer to the package structure
144 pkgCache::PkgIterator Pkg;
145 if (NewPackage(Pkg, PackageName, *arch) == false)
146 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
147 Counter++;
148 if (Counter % 100 == 0 && Progress != 0)
149 Progress->Progress(List.Offset());
150
151 /* Get a pointer to the version structure. We know the list is sorted
152 so we use that fact in the search. Insertion of new versions is
153 done with correct sorting */
154 string Version = List.Version();
155 if (Version.empty() == true)
156 {
157 // we first process the package, then the descriptions
158 // (this has the bonus that we get MMap error when we run out
159 // of MMap space)
160 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
161 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
162 PackageName.c_str());
163
164 // Find the right version to write the description
165 MD5SumValue CurMd5 = List.Description_md5();
166 pkgCache::VerIterator Ver = Pkg.VersionList();
167 map_ptrloc *LastVer = &Pkg->VersionList;
168
169 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
170 {
171 pkgCache::DescIterator Desc = Ver.DescriptionList();
172 map_ptrloc *LastDesc = &Ver->DescriptionList;
173 bool duplicate=false;
174
175 // don't add a new description if we have one for the given
176 // md5 && language
177 for ( ; Desc.end() == false; Desc++)
178 if (MD5SumValue(Desc.md5()) == CurMd5 &&
179 Desc.LanguageCode() == List.DescriptionLanguage())
180 duplicate=true;
181 if(duplicate)
182 continue;
183
184 for (Desc = Ver.DescriptionList();
185 Desc.end() == false;
186 LastDesc = &Desc->NextDesc, Desc++)
187 {
188 if (MD5SumValue(Desc.md5()) == CurMd5)
189 {
190 // Add new description
191 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
192 Desc->ParentPkg = Pkg.Index();
193
194 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
195 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
196 break;
197 }
198 }
199 }
200
201 continue;
202 }
203
204 pkgCache::VerIterator Ver = Pkg.VersionList();
205 map_ptrloc *LastVer = &Pkg->VersionList;
206 int Res = 1;
207 unsigned long const Hash = List.VersionHash();
208 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
209 {
210 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
211 // Version is higher as current version - insert here
212 if (Res > 0)
213 break;
214 // Versionstrings are equal - is hash also equal?
215 if (Res == 0 && Ver->Hash == Hash)
216 break;
217 // proceed with the next till we have either the right
218 // or we found another version (which will be lower)
219 }
220
221 /* We already have a version for this item, record that we saw it */
222 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
223 {
224 if (List.UsePackage(Pkg,Ver) == false)
225 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
226 PackageName.c_str());
227
228 if (NewFileVer(Ver,List) == false)
229 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
230 PackageName.c_str());
231
232 // Read only a single record and return
233 if (OutVer != 0)
234 {
235 *OutVer = Ver;
236 FoundFileDeps |= List.HasFileDeps();
237 return true;
238 }
239
240 continue;
241 }
242
243 // Add a new version
244 *LastVer = NewVersion(Ver,Version,*LastVer);
245 Ver->ParentPkg = Pkg.Index();
246 Ver->Hash = Hash;
247
248 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
249 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
250 PackageName.c_str());
251
252 if (List.UsePackage(Pkg,Ver) == false)
253 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
254 PackageName.c_str());
255
256 if (NewFileVer(Ver,List) == false)
257 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
258 PackageName.c_str());
259
260 // Read only a single record and return
261 if (OutVer != 0)
262 {
263 *OutVer = Ver;
264 FoundFileDeps |= List.HasFileDeps();
265 return true;
266 }
267
268 /* Record the Description data. Description data always exist in
269 Packages and Translation-* files. */
270 pkgCache::DescIterator Desc = Ver.DescriptionList();
271 map_ptrloc *LastDesc = &Ver->DescriptionList;
272
273 // Skip to the end of description set
274 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
275
276 // Add new description
277 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
278 Desc->ParentPkg = Pkg.Index();
279
280 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
281 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
282 }
283 }
284
285 FoundFileDeps |= List.HasFileDeps();
286
287 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
288 return _error->Error(_("Wow, you exceeded the number of package "
289 "names this APT is capable of."));
290 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
291 return _error->Error(_("Wow, you exceeded the number of versions "
292 "this APT is capable of."));
293 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
294 return _error->Error(_("Wow, you exceeded the number of descriptions "
295 "this APT is capable of."));
296 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
297 return _error->Error(_("Wow, you exceeded the number of dependencies "
298 "this APT is capable of."));
299 return true;
300 }
301 /*}}}*/
302 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
303 // ---------------------------------------------------------------------
304 /* If we found any file depends while parsing the main list we need to
305 resolve them. Since it is undesired to load the entire list of files
306 into the cache as virtual packages we do a two stage effort. MergeList
307 identifies the file depends and this creates Provdies for them by
308 re-parsing all the indexs. */
309 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
310 {
311 List.Owner = this;
312
313 unsigned int Counter = 0;
314 while (List.Step() == true)
315 {
316 string PackageName = List.Package();
317 if (PackageName.empty() == true)
318 return false;
319 string Version = List.Version();
320 if (Version.empty() == true)
321 continue;
322
323 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
324 if (Pkg.end() == true)
325 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
326 PackageName.c_str());
327 Counter++;
328 if (Counter % 100 == 0 && Progress != 0)
329 Progress->Progress(List.Offset());
330
331 unsigned long Hash = List.VersionHash();
332 pkgCache::VerIterator Ver = Pkg.VersionList();
333 for (; Ver.end() == false; Ver++)
334 {
335 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
336 {
337 if (List.CollectFileProvides(Cache,Ver) == false)
338 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
339 break;
340 }
341 }
342
343 if (Ver.end() == true)
344 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
345 }
346
347 return true;
348 }
349 /*}}}*/
350 // CacheGenerator::NewGroup - Add a new group /*{{{*/
351 // ---------------------------------------------------------------------
352 /* This creates a new group structure and adds it to the hash table */
353 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
354 {
355 Grp = Cache.FindGrp(Name);
356 if (Grp.end() == false)
357 return true;
358
359 // Get a structure
360 unsigned long const Group = AllocateInMap(sizeof(pkgCache::Group));
361 if (unlikely(Group == 0))
362 return false;
363
364 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
365 Grp->Name = WriteStringInMap(Name);
366 if (unlikely(Grp->Name == 0))
367 return false;
368
369 // Insert it into the hash table
370 unsigned long const Hash = Cache.Hash(Name);
371 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
372 Cache.HeaderP->GrpHashTable[Hash] = Group;
373
374 Grp->ID = Cache.HeaderP->GroupCount++;
375 return true;
376 }
377 /*}}}*/
378 // CacheGenerator::NewPackage - Add a new package /*{{{*/
379 // ---------------------------------------------------------------------
380 /* This creates a new package structure and adds it to the hash table */
381 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
382 const string &Arch) {
383 pkgCache::GrpIterator Grp;
384 if (unlikely(NewGroup(Grp, Name) == false))
385 return false;
386
387 Pkg = Grp.FindPkg(Arch);
388 if (Pkg.end() == false)
389 return true;
390
391 // Get a structure
392 unsigned long const Package = AllocateInMap(sizeof(pkgCache::Package));
393 if (unlikely(Package == 0))
394 return false;
395 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
396
397 // Insert the package into our package list
398 if (Grp->FirstPackage == 0) // the group is new
399 {
400 // Insert it into the hash table
401 unsigned long const Hash = Cache.Hash(Name);
402 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
403 Cache.HeaderP->PkgHashTable[Hash] = Package;
404 Grp->FirstPackage = Package;
405 }
406 else // Group the Packages together
407 {
408 // this package is the new last package
409 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
410 Pkg->NextPackage = LastPkg->NextPackage;
411 LastPkg->NextPackage = Package;
412 }
413 Grp->LastPackage = Package;
414
415 // Set the name, arch and the ID
416 Pkg->Name = Grp->Name;
417 Pkg->Group = Grp.Index();
418 Pkg->Arch = WriteUniqString(Arch.c_str());
419 if (unlikely(Pkg->Arch == 0))
420 return false;
421 Pkg->ID = Cache.HeaderP->PackageCount++;
422
423 return true;
424 }
425 /*}}}*/
426 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
427 // ---------------------------------------------------------------------
428 /* */
429 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
430 ListParser &List)
431 {
432 if (CurrentFile == 0)
433 return true;
434
435 // Get a structure
436 unsigned long VerFile = AllocateInMap(sizeof(pkgCache::VerFile));
437 if (VerFile == 0)
438 return 0;
439
440 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
441 VF->File = CurrentFile - Cache.PkgFileP;
442
443 // Link it to the end of the list
444 map_ptrloc *Last = &Ver->FileList;
445 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
446 Last = &V->NextFile;
447 VF->NextFile = *Last;
448 *Last = VF.Index();
449
450 VF->Offset = List.Offset();
451 VF->Size = List.Size();
452 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
453 Cache.HeaderP->MaxVerFileSize = VF->Size;
454 Cache.HeaderP->VerFileCount++;
455
456 return true;
457 }
458 /*}}}*/
459 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
460 // ---------------------------------------------------------------------
461 /* This puts a version structure in the linked list */
462 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
463 const string &VerStr,
464 unsigned long Next)
465 {
466 // Get a structure
467 unsigned long Version = AllocateInMap(sizeof(pkgCache::Version));
468 if (Version == 0)
469 return 0;
470
471 // Fill it in
472 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
473 Ver->NextVer = Next;
474 Ver->ID = Cache.HeaderP->VersionCount++;
475 Ver->VerStr = WriteStringInMap(VerStr);
476 if (Ver->VerStr == 0)
477 return 0;
478
479 return Version;
480 }
481 /*}}}*/
482 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
483 // ---------------------------------------------------------------------
484 /* */
485 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
486 ListParser &List)
487 {
488 if (CurrentFile == 0)
489 return true;
490
491 // Get a structure
492 unsigned long DescFile = AllocateInMap(sizeof(pkgCache::DescFile));
493 if (DescFile == 0)
494 return false;
495
496 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
497 DF->File = CurrentFile - Cache.PkgFileP;
498
499 // Link it to the end of the list
500 map_ptrloc *Last = &Desc->FileList;
501 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
502 Last = &D->NextFile;
503
504 DF->NextFile = *Last;
505 *Last = DF.Index();
506
507 DF->Offset = List.Offset();
508 DF->Size = List.Size();
509 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
510 Cache.HeaderP->MaxDescFileSize = DF->Size;
511 Cache.HeaderP->DescFileCount++;
512
513 return true;
514 }
515 /*}}}*/
516 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
517 // ---------------------------------------------------------------------
518 /* This puts a description structure in the linked list */
519 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
520 const string &Lang,
521 const MD5SumValue &md5sum,
522 map_ptrloc Next)
523 {
524 // Get a structure
525 map_ptrloc Description = AllocateInMap(sizeof(pkgCache::Description));
526 if (Description == 0)
527 return 0;
528
529 // Fill it in
530 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
531 Desc->NextDesc = Next;
532 Desc->ID = Cache.HeaderP->DescriptionCount++;
533 Desc->language_code = WriteStringInMap(Lang);
534 Desc->md5sum = WriteStringInMap(md5sum.Value());
535 if (Desc->language_code == 0 || Desc->md5sum == 0)
536 return 0;
537
538 return Description;
539 }
540 /*}}}*/
541 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
542 // ---------------------------------------------------------------------
543 /* This prepares the Cache for delivery */
544 bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
545 {
546 // FIXME: add progress reporting for this operation
547 // Do we have different architectures in your groups ?
548 vector<string> archs = APT::Configuration::getArchitectures();
549 if (archs.size() > 1)
550 {
551 // Create Conflicts in between the group
552 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++)
553 {
554 string const PkgName = G.Name();
555 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P))
556 {
557 if (strcmp(P.Arch(),"all") == 0)
558 continue;
559 pkgCache::PkgIterator allPkg;
560 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++)
561 {
562 string const Arch = V.Arch(true);
563 map_ptrloc *OldDepLast = NULL;
564 /* MultiArch handling introduces a lot of implicit Dependencies:
565 - MultiArch: same → Co-Installable if they have the same version
566 - Architecture: all → Need to be Co-Installable for internal reasons
567 - All others conflict with all other group members */
568 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
569 V->MultiArch == pkgCache::Version::Same);
570 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
571 allPkg = G.FindPkg("all");
572 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
573 {
574 if (*A == Arch)
575 continue;
576 /* We allow only one installed arch at the time
577 per group, therefore each group member conflicts
578 with all other group members */
579 pkgCache::PkgIterator D = G.FindPkg(*A);
580 if (D.end() == true)
581 continue;
582 if (coInstall == true)
583 {
584 // Replaces: ${self}:other ( << ${binary:Version})
585 NewDepends(D, V, V.VerStr(),
586 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
587 OldDepLast);
588 // Breaks: ${self}:other (!= ${binary:Version})
589 NewDepends(D, V, V.VerStr(),
590 pkgCache::Dep::NotEquals, pkgCache::Dep::DpkgBreaks,
591 OldDepLast);
592 if (V->MultiArch == pkgCache::Version::All)
593 {
594 // Depend on ${self}:all which does depend on nothing
595 NewDepends(allPkg, V, V.VerStr(),
596 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
597 OldDepLast);
598 }
599 } else {
600 // Conflicts: ${self}:other
601 NewDepends(D, V, "",
602 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
603 OldDepLast);
604 }
605 }
606 }
607 }
608 }
609 }
610 return true;
611 }
612 /*}}}*/
613 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
614 // ---------------------------------------------------------------------
615 /* This creates a dependency element in the tree. It is linked to the
616 version and to the package that it is pointing to. */
617 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
618 pkgCache::VerIterator &Ver,
619 string const &Version,
620 unsigned int const &Op,
621 unsigned int const &Type,
622 map_ptrloc *OldDepLast)
623 {
624 // Get a structure
625 unsigned long const Dependency = AllocateInMap(sizeof(pkgCache::Dependency));
626 if (unlikely(Dependency == 0))
627 return false;
628
629 // Fill it in
630 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
631 Dep->ParentVer = Ver.Index();
632 Dep->Type = Type;
633 Dep->CompareOp = Op;
634 Dep->ID = Cache.HeaderP->DependsCount++;
635
636 // Probe the reverse dependency list for a version string that matches
637 if (Version.empty() == false)
638 {
639 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
640 if (I->Version != 0 && I.TargetVer() == Version)
641 Dep->Version = I->Version;*/
642 if (Dep->Version == 0)
643 if (unlikely((Dep->Version = WriteStringInMap(Version)) == 0))
644 return false;
645 }
646
647 // Link it to the package
648 Dep->Package = Pkg.Index();
649 Dep->NextRevDepends = Pkg->RevDepends;
650 Pkg->RevDepends = Dep.Index();
651
652 // Do we know where to link the Dependency to?
653 if (OldDepLast == NULL)
654 {
655 OldDepLast = &Ver->DependsList;
656 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
657 OldDepLast = &D->NextDepends;
658 }
659
660 Dep->NextDepends = *OldDepLast;
661 *OldDepLast = Dep.Index();
662 OldDepLast = &Dep->NextDepends;
663
664 return true;
665 }
666 /*}}}*/
667 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
668 // ---------------------------------------------------------------------
669 /* This creates a Group and the Package to link this dependency to if
670 needed and handles also the caching of the old endpoint */
671 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
672 const string &PackageName,
673 const string &Arch,
674 const string &Version,
675 unsigned int Op,
676 unsigned int Type)
677 {
678 pkgCache::GrpIterator Grp;
679 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
680 return false;
681
682 // Locate the target package
683 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
684 if (Pkg.end() == true) {
685 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
686 return false;
687 }
688
689 // Is it a file dependency?
690 if (unlikely(PackageName[0] == '/'))
691 FoundFileDeps = true;
692
693 /* Caching the old end point speeds up generation substantially */
694 if (OldDepVer != Ver) {
695 OldDepLast = NULL;
696 OldDepVer = Ver;
697 }
698
699 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
700 }
701 /*}}}*/
702 // ListParser::NewProvides - Create a Provides element /*{{{*/
703 // ---------------------------------------------------------------------
704 /* */
705 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
706 const string &PkgName,
707 const string &PkgArch,
708 const string &Version)
709 {
710 pkgCache &Cache = Owner->Cache;
711
712 // We do not add self referencing provides
713 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
714 return true;
715
716 // Get a structure
717 unsigned long const Provides = Owner->AllocateInMap(sizeof(pkgCache::Provides));
718 if (unlikely(Provides == 0))
719 return false;
720 Cache.HeaderP->ProvidesCount++;
721
722 // Fill it in
723 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
724 Prv->Version = Ver.Index();
725 Prv->NextPkgProv = Ver->ProvidesList;
726 Ver->ProvidesList = Prv.Index();
727 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
728 return false;
729
730 // Locate the target package
731 pkgCache::PkgIterator Pkg;
732 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
733 return false;
734
735 // Link it to the package
736 Prv->ParentPkg = Pkg.Index();
737 Prv->NextProvides = Pkg->ProvidesList;
738 Pkg->ProvidesList = Prv.Index();
739
740 return true;
741 }
742 /*}}}*/
743 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
744 // ---------------------------------------------------------------------
745 /* This is used to select which file is to be associated with all newly
746 added versions. The caller is responsible for setting the IMS fields. */
747 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
748 const pkgIndexFile &Index,
749 unsigned long Flags)
750 {
751 // Get some space for the structure
752 CurrentFile = Cache.PkgFileP + AllocateInMap(sizeof(*CurrentFile));
753 if (CurrentFile == Cache.PkgFileP)
754 return false;
755
756 // Fill it in
757 CurrentFile->FileName = WriteStringInMap(File);
758 CurrentFile->Site = WriteUniqString(Site);
759 CurrentFile->NextFile = Cache.HeaderP->FileList;
760 CurrentFile->Flags = Flags;
761 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
762 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
763 PkgFileName = File;
764 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
765 Cache.HeaderP->PackageFileCount++;
766
767 if (CurrentFile->FileName == 0)
768 return false;
769
770 if (Progress != 0)
771 Progress->SubProgress(Index.Size());
772 return true;
773 }
774 /*}}}*/
775 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
776 // ---------------------------------------------------------------------
777 /* This is used to create handles to strings. Given the same text it
778 always returns the same number */
779 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
780 unsigned int Size)
781 {
782 /* We use a very small transient hash table here, this speeds up generation
783 by a fair amount on slower machines */
784 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
785 if (Bucket != 0 &&
786 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
787 return Bucket->String;
788
789 // Search for an insertion point
790 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
791 int Res = 1;
792 map_ptrloc *Last = &Cache.HeaderP->StringList;
793 for (; I != Cache.StringItemP; Last = &I->NextItem,
794 I = Cache.StringItemP + I->NextItem)
795 {
796 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
797 if (Res >= 0)
798 break;
799 }
800
801 // Match
802 if (Res == 0)
803 {
804 Bucket = I;
805 return I->String;
806 }
807
808 // Get a structure
809 unsigned long Item = AllocateInMap(sizeof(pkgCache::StringItem));
810 if (Item == 0)
811 return 0;
812
813 // Fill in the structure
814 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
815 ItemP->NextItem = I - Cache.StringItemP;
816 *Last = Item;
817 ItemP->String = WriteStringInMap(S,Size);
818 if (ItemP->String == 0)
819 return 0;
820
821 Bucket = ItemP;
822 return ItemP->String;
823 }
824 /*}}}*/
825 // CheckValidity - Check that a cache is up-to-date /*{{{*/
826 // ---------------------------------------------------------------------
827 /* This just verifies that each file in the list of index files exists,
828 has matching attributes with the cache and the cache does not have
829 any extra files. */
830 static bool CheckValidity(const string &CacheFile, FileIterator Start,
831 FileIterator End,MMap **OutMap = 0)
832 {
833 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
834 // No file, certainly invalid
835 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
836 {
837 if (Debug == true)
838 std::clog << "CacheFile doesn't exist" << std::endl;
839 return false;
840 }
841
842 // Map it
843 FileFd CacheF(CacheFile,FileFd::ReadOnly);
844 SPtr<MMap> Map = new MMap(CacheF,0);
845 pkgCache Cache(Map);
846 if (_error->PendingError() == true || Map->Size() == 0)
847 {
848 if (Debug == true)
849 std::clog << "Errors are pending or Map is empty()" << std::endl;
850 _error->Discard();
851 return false;
852 }
853
854 /* Now we check every index file, see if it is in the cache,
855 verify the IMS data and check that it is on the disk too.. */
856 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
857 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
858 for (; Start != End; Start++)
859 {
860 if (Debug == true)
861 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
862 if ((*Start)->HasPackages() == false)
863 {
864 if (Debug == true)
865 std::clog << "Has NO packages" << std::endl;
866 continue;
867 }
868
869 if ((*Start)->Exists() == false)
870 {
871 #if 0 // mvo: we no longer give a message here (Default Sources spec)
872 _error->WarningE("stat",_("Couldn't stat source package list %s"),
873 (*Start)->Describe().c_str());
874 #endif
875 if (Debug == true)
876 std::clog << "file doesn't exist" << std::endl;
877 continue;
878 }
879
880 // FindInCache is also expected to do an IMS check.
881 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
882 if (File.end() == true)
883 {
884 if (Debug == true)
885 std::clog << "FindInCache returned end-Pointer" << std::endl;
886 return false;
887 }
888
889 Visited[File->ID] = true;
890 if (Debug == true)
891 std::clog << "with ID " << File->ID << " is valid" << std::endl;
892 }
893
894 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
895 if (Visited[I] == false)
896 {
897 if (Debug == true)
898 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
899 return false;
900 }
901
902 if (_error->PendingError() == true)
903 {
904 if (Debug == true)
905 {
906 std::clog << "Validity failed because of pending errors:" << std::endl;
907 _error->DumpErrors();
908 }
909 _error->Discard();
910 return false;
911 }
912
913 if (OutMap != 0)
914 *OutMap = Map.UnGuard();
915 return true;
916 }
917 /*}}}*/
918 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
919 // ---------------------------------------------------------------------
920 /* Size is kind of an abstract notion that is only used for the progress
921 meter */
922 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
923 {
924 unsigned long TotalSize = 0;
925 for (; Start != End; Start++)
926 {
927 if ((*Start)->HasPackages() == false)
928 continue;
929 TotalSize += (*Start)->Size();
930 }
931 return TotalSize;
932 }
933 /*}}}*/
934 // BuildCache - Merge the list of index files into the cache /*{{{*/
935 // ---------------------------------------------------------------------
936 /* */
937 static bool BuildCache(pkgCacheGenerator &Gen,
938 OpProgress *Progress,
939 unsigned long &CurrentSize,unsigned long TotalSize,
940 FileIterator Start, FileIterator End)
941 {
942 FileIterator I;
943 for (I = Start; I != End; I++)
944 {
945 if ((*I)->HasPackages() == false)
946 continue;
947
948 if ((*I)->Exists() == false)
949 continue;
950
951 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
952 {
953 _error->Warning("Duplicate sources.list entry %s",
954 (*I)->Describe().c_str());
955 continue;
956 }
957
958 unsigned long Size = (*I)->Size();
959 if (Progress != NULL)
960 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
961 CurrentSize += Size;
962
963 if ((*I)->Merge(Gen,Progress) == false)
964 return false;
965 }
966
967 if (Gen.HasFileDeps() == true)
968 {
969 if (Progress != NULL)
970 Progress->Done();
971 TotalSize = ComputeSize(Start, End);
972 CurrentSize = 0;
973 for (I = Start; I != End; I++)
974 {
975 unsigned long Size = (*I)->Size();
976 if (Progress != NULL)
977 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
978 CurrentSize += Size;
979 if ((*I)->MergeFileProvides(Gen,Progress) == false)
980 return false;
981 }
982 }
983
984 return true;
985 }
986 /*}}}*/
987 // CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
988 // ---------------------------------------------------------------------
989 /* This makes sure that the status cache (the cache that has all
990 index files from the sources list and all local ones) is ready
991 to be mmaped. If OutMap is not zero then a MMap object representing
992 the cache will be stored there. This is pretty much mandetory if you
993 are using AllowMem. AllowMem lets the function be run as non-root
994 where it builds the cache 'fast' into a memory buffer. */
995 __deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
996 MMap **OutMap, bool AllowMem)
997 { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
998 bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
999 MMap **OutMap,bool AllowMem)
1000 {
1001 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
1002 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
1003
1004 vector<pkgIndexFile *> Files;
1005 for (vector<metaIndex *>::const_iterator i = List.begin();
1006 i != List.end();
1007 i++)
1008 {
1009 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
1010 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
1011 j != Indexes->end();
1012 j++)
1013 Files.push_back (*j);
1014 }
1015
1016 unsigned long const EndOfSource = Files.size();
1017 if (_system->AddStatusFiles(Files) == false)
1018 return false;
1019
1020 // Decide if we can write to the files..
1021 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1022 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1023
1024 // ensure the cache directory exists
1025 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1026 {
1027 string dir = _config->FindDir("Dir::Cache");
1028 size_t const len = dir.size();
1029 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1030 dir = dir.substr(0, len - 5);
1031 if (CacheFile.empty() == false)
1032 CreateDirectory(dir, flNotFile(CacheFile));
1033 if (SrcCacheFile.empty() == false)
1034 CreateDirectory(dir, flNotFile(SrcCacheFile));
1035 }
1036
1037 // Decide if we can write to the cache
1038 bool Writeable = false;
1039 if (CacheFile.empty() == false)
1040 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1041 else
1042 if (SrcCacheFile.empty() == false)
1043 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1044 if (Debug == true)
1045 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1046
1047 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1048 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1049
1050 if (Progress != NULL)
1051 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1052
1053 // Cache is OK, Fin.
1054 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1055 {
1056 if (Progress != NULL)
1057 Progress->OverallProgress(1,1,1,_("Reading package lists"));
1058 if (Debug == true)
1059 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1060 return true;
1061 }
1062 else if (Debug == true)
1063 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1064
1065 /* At this point we know we need to reconstruct the package cache,
1066 begin. */
1067 SPtr<FileFd> CacheF;
1068 SPtr<DynamicMMap> Map;
1069 if (Writeable == true && CacheFile.empty() == false)
1070 {
1071 unlink(CacheFile.c_str());
1072 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
1073 fchmod(CacheF->Fd(),0644);
1074 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
1075 if (_error->PendingError() == true)
1076 return false;
1077 if (Debug == true)
1078 std::clog << "Open filebased MMap" << std::endl;
1079 }
1080 else
1081 {
1082 // Just build it in memory..
1083 Map = new DynamicMMap(0,MapSize);
1084 if (Debug == true)
1085 std::clog << "Open memory Map (not filebased)" << std::endl;
1086 }
1087
1088 // Lets try the source cache.
1089 unsigned long CurrentSize = 0;
1090 unsigned long TotalSize = 0;
1091 if (CheckValidity(SrcCacheFile,Files.begin(),
1092 Files.begin()+EndOfSource) == true)
1093 {
1094 if (Debug == true)
1095 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1096 // Preload the map with the source cache
1097 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1098 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1099 if ((alloc == 0 && _error->PendingError())
1100 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1101 SCacheF.Size()) == false)
1102 return false;
1103
1104 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1105
1106 // Build the status cache
1107 pkgCacheGenerator Gen(Map.Get(),Progress);
1108 if (_error->PendingError() == true)
1109 return false;
1110 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1111 Files.begin()+EndOfSource,Files.end()) == false)
1112 return false;
1113
1114 // FIXME: move me to a better place
1115 Gen.FinishCache(Progress);
1116 }
1117 else
1118 {
1119 if (Debug == true)
1120 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1121 TotalSize = ComputeSize(Files.begin(),Files.end());
1122
1123 // Build the source cache
1124 pkgCacheGenerator Gen(Map.Get(),Progress);
1125 if (_error->PendingError() == true)
1126 return false;
1127 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1128 Files.begin(),Files.begin()+EndOfSource) == false)
1129 return false;
1130
1131 // Write it back
1132 if (Writeable == true && SrcCacheFile.empty() == false)
1133 {
1134 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1135 if (_error->PendingError() == true)
1136 return false;
1137
1138 fchmod(SCacheF.Fd(),0644);
1139
1140 // Write out the main data
1141 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1142 return _error->Error(_("IO Error saving source cache"));
1143 SCacheF.Sync();
1144
1145 // Write out the proper header
1146 Gen.GetCache().HeaderP->Dirty = false;
1147 if (SCacheF.Seek(0) == false ||
1148 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1149 return _error->Error(_("IO Error saving source cache"));
1150 Gen.GetCache().HeaderP->Dirty = true;
1151 SCacheF.Sync();
1152 }
1153
1154 // Build the status cache
1155 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1156 Files.begin()+EndOfSource,Files.end()) == false)
1157 return false;
1158
1159 // FIXME: move me to a better place
1160 Gen.FinishCache(Progress);
1161 }
1162 if (Debug == true)
1163 std::clog << "Caches are ready for shipping" << std::endl;
1164
1165 if (_error->PendingError() == true)
1166 return false;
1167 if (OutMap != 0)
1168 {
1169 if (CacheF != 0)
1170 {
1171 delete Map.UnGuard();
1172 *OutMap = new MMap(*CacheF,0);
1173 }
1174 else
1175 {
1176 *OutMap = Map.UnGuard();
1177 }
1178 }
1179
1180 return true;
1181 }
1182 /*}}}*/
1183 // CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1184 // ---------------------------------------------------------------------
1185 /* */
1186 __deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1187 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
1188 bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
1189 {
1190 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1191 vector<pkgIndexFile *> Files;
1192 unsigned long EndOfSource = Files.size();
1193 if (_system->AddStatusFiles(Files) == false)
1194 return false;
1195
1196 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1197 unsigned long CurrentSize = 0;
1198 unsigned long TotalSize = 0;
1199
1200 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1201
1202 // Build the status cache
1203 if (Progress != NULL)
1204 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1205 pkgCacheGenerator Gen(Map.Get(),Progress);
1206 if (_error->PendingError() == true)
1207 return false;
1208 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1209 Files.begin()+EndOfSource,Files.end()) == false)
1210 return false;
1211
1212 // FIXME: move me to a better place
1213 Gen.FinishCache(Progress);
1214
1215 if (_error->PendingError() == true)
1216 return false;
1217 *OutMap = Map.UnGuard();
1218
1219 return true;
1220 }
1221 /*}}}*/