]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
add a missing non-const & Accessors to the Iterator class
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41 // ---------------------------------------------------------------------
42 /* We set the dirty flag and make sure that is written to the disk */
43 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46 {
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82 }
83 /*}}}*/
84 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85 // ---------------------------------------------------------------------
86 /* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88 pkgCacheGenerator::~pkgCacheGenerator()
89 {
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97 }
98 /*}}}*/
99 // CacheGenerator::MergeList - Merge the package list /*{{{*/
100 // ---------------------------------------------------------------------
101 /* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103 bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105 {
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true) {
119 genArch = APT::Configuration::getArchitectures();
120 if (genArch.size() != 1)
121 genArch.push_back("all");
122 } else
123 genArch.push_back(List.Architecture());
124
125 for (std::vector<string>::const_iterator arch = genArch.begin();
126 arch != genArch.end(); ++arch)
127 {
128 // Get a pointer to the package structure
129 pkgCache::PkgIterator Pkg;
130 if (NewPackage(Pkg, PackageName, *arch) == false)
131 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
132 Counter++;
133 if (Counter % 100 == 0 && Progress != 0)
134 Progress->Progress(List.Offset());
135
136 /* Get a pointer to the version structure. We know the list is sorted
137 so we use that fact in the search. Insertion of new versions is
138 done with correct sorting */
139 string Version = List.Version();
140 if (Version.empty() == true)
141 {
142 // we first process the package, then the descriptions
143 // (this has the bonus that we get MMap error when we run out
144 // of MMap space)
145 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
146 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
147 PackageName.c_str());
148
149 // Find the right version to write the description
150 MD5SumValue CurMd5 = List.Description_md5();
151 pkgCache::VerIterator Ver = Pkg.VersionList();
152 map_ptrloc *LastVer = &Pkg->VersionList;
153
154 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
155 {
156 pkgCache::DescIterator Desc = Ver.DescriptionList();
157 map_ptrloc *LastDesc = &Ver->DescriptionList;
158 bool duplicate=false;
159
160 // don't add a new description if we have one for the given
161 // md5 && language
162 for ( ; Desc.end() == false; Desc++)
163 if (MD5SumValue(Desc.md5()) == CurMd5 &&
164 Desc.LanguageCode() == List.DescriptionLanguage())
165 duplicate=true;
166 if(duplicate)
167 continue;
168
169 for (Desc = Ver.DescriptionList();
170 Desc.end() == false;
171 LastDesc = &Desc->NextDesc, Desc++)
172 {
173 if (MD5SumValue(Desc.md5()) == CurMd5)
174 {
175 // Add new description
176 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
177 Desc->ParentPkg = Pkg.Index();
178
179 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
180 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
181 break;
182 }
183 }
184 }
185
186 continue;
187 }
188
189 pkgCache::VerIterator Ver = Pkg.VersionList();
190 map_ptrloc *LastVer = &Pkg->VersionList;
191 int Res = 1;
192 unsigned long const Hash = List.VersionHash();
193 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
194 {
195 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
196 // Version is higher as current version - insert here
197 if (Res > 0)
198 break;
199 // Versionstrings are equal - is hash also equal?
200 if (Res == 0 && Ver->Hash == Hash)
201 break;
202 // proceed with the next till we have either the right
203 // or we found another version (which will be lower)
204 }
205
206 /* We already have a version for this item, record that we saw it */
207 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
208 {
209 if (List.UsePackage(Pkg,Ver) == false)
210 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
211 PackageName.c_str());
212
213 if (NewFileVer(Ver,List) == false)
214 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
215 PackageName.c_str());
216
217 // Read only a single record and return
218 if (OutVer != 0)
219 {
220 *OutVer = Ver;
221 FoundFileDeps |= List.HasFileDeps();
222 return true;
223 }
224
225 continue;
226 }
227
228 // Add a new version
229 *LastVer = NewVersion(Ver,Version,*LastVer);
230 Ver->ParentPkg = Pkg.Index();
231 Ver->Hash = Hash;
232
233 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
234 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
235 PackageName.c_str());
236
237 if (List.UsePackage(Pkg,Ver) == false)
238 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
239 PackageName.c_str());
240
241 if (NewFileVer(Ver,List) == false)
242 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
243 PackageName.c_str());
244
245 // Read only a single record and return
246 if (OutVer != 0)
247 {
248 *OutVer = Ver;
249 FoundFileDeps |= List.HasFileDeps();
250 return true;
251 }
252
253 /* Record the Description data. Description data always exist in
254 Packages and Translation-* files. */
255 pkgCache::DescIterator Desc = Ver.DescriptionList();
256 map_ptrloc *LastDesc = &Ver->DescriptionList;
257
258 // Skip to the end of description set
259 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
260
261 // Add new description
262 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
263 Desc->ParentPkg = Pkg.Index();
264
265 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
266 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
267 }
268 }
269
270 FoundFileDeps |= List.HasFileDeps();
271
272 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
273 return _error->Error(_("Wow, you exceeded the number of package "
274 "names this APT is capable of."));
275 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
276 return _error->Error(_("Wow, you exceeded the number of versions "
277 "this APT is capable of."));
278 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
279 return _error->Error(_("Wow, you exceeded the number of descriptions "
280 "this APT is capable of."));
281 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
282 return _error->Error(_("Wow, you exceeded the number of dependencies "
283 "this APT is capable of."));
284 return true;
285 }
286 /*}}}*/
287 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
288 // ---------------------------------------------------------------------
289 /* If we found any file depends while parsing the main list we need to
290 resolve them. Since it is undesired to load the entire list of files
291 into the cache as virtual packages we do a two stage effort. MergeList
292 identifies the file depends and this creates Provdies for them by
293 re-parsing all the indexs. */
294 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
295 {
296 List.Owner = this;
297
298 unsigned int Counter = 0;
299 while (List.Step() == true)
300 {
301 string PackageName = List.Package();
302 if (PackageName.empty() == true)
303 return false;
304 string Version = List.Version();
305 if (Version.empty() == true)
306 continue;
307
308 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
309 if (Pkg.end() == true)
310 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
311 PackageName.c_str());
312 Counter++;
313 if (Counter % 100 == 0 && Progress != 0)
314 Progress->Progress(List.Offset());
315
316 unsigned long Hash = List.VersionHash();
317 pkgCache::VerIterator Ver = Pkg.VersionList();
318 for (; Ver.end() == false; Ver++)
319 {
320 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
321 {
322 if (List.CollectFileProvides(Cache,Ver) == false)
323 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
324 break;
325 }
326 }
327
328 if (Ver.end() == true)
329 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
330 }
331
332 return true;
333 }
334 /*}}}*/
335 // CacheGenerator::NewGroup - Add a new group /*{{{*/
336 // ---------------------------------------------------------------------
337 /* This creates a new group structure and adds it to the hash table */
338 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
339 Grp = Cache.FindGrp(Name);
340 if (Grp.end() == false)
341 return true;
342
343 // Get a structure
344 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
345 if (unlikely(Group == 0))
346 return false;
347
348 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
349 Grp->Name = Map.WriteString(Name);
350 if (unlikely(Grp->Name == 0))
351 return false;
352
353 // Insert it into the hash table
354 unsigned long const Hash = Cache.Hash(Name);
355 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
356 Cache.HeaderP->GrpHashTable[Hash] = Group;
357
358 Cache.HeaderP->GroupCount++;
359
360 return true;
361 }
362 /*}}}*/
363 // CacheGenerator::NewPackage - Add a new package /*{{{*/
364 // ---------------------------------------------------------------------
365 /* This creates a new package structure and adds it to the hash table */
366 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
367 const string &Arch) {
368 pkgCache::GrpIterator Grp;
369 if (unlikely(NewGroup(Grp, Name) == false))
370 return false;
371
372 Pkg = Grp.FindPkg(Arch);
373 if (Pkg.end() == false)
374 return true;
375
376 // Get a structure
377 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
378 if (unlikely(Package == 0))
379 return false;
380 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
381
382 // Insert it into the hash table
383 unsigned long const Hash = Cache.Hash(Name);
384 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
385 Cache.HeaderP->PkgHashTable[Hash] = Package;
386
387 // remember the packages in the group
388 Grp->FirstPackage = Package;
389 if (Grp->LastPackage == 0)
390 Grp->LastPackage = Package;
391
392 // Set the name, arch and the ID
393 Pkg->Name = Grp->Name;
394 Pkg->Group = Grp.Index();
395 Pkg->Arch = WriteUniqString(Arch.c_str());
396 if (unlikely(Pkg->Arch == 0))
397 return false;
398 Pkg->ID = Cache.HeaderP->PackageCount++;
399
400 return true;
401 }
402 /*}}}*/
403 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
404 // ---------------------------------------------------------------------
405 /* */
406 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
407 ListParser &List)
408 {
409 if (CurrentFile == 0)
410 return true;
411
412 // Get a structure
413 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
414 if (VerFile == 0)
415 return 0;
416
417 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
418 VF->File = CurrentFile - Cache.PkgFileP;
419
420 // Link it to the end of the list
421 map_ptrloc *Last = &Ver->FileList;
422 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
423 Last = &V->NextFile;
424 VF->NextFile = *Last;
425 *Last = VF.Index();
426
427 VF->Offset = List.Offset();
428 VF->Size = List.Size();
429 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
430 Cache.HeaderP->MaxVerFileSize = VF->Size;
431 Cache.HeaderP->VerFileCount++;
432
433 return true;
434 }
435 /*}}}*/
436 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
437 // ---------------------------------------------------------------------
438 /* This puts a version structure in the linked list */
439 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
440 const string &VerStr,
441 unsigned long Next)
442 {
443 // Get a structure
444 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
445 if (Version == 0)
446 return 0;
447
448 // Fill it in
449 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
450 Ver->NextVer = Next;
451 Ver->ID = Cache.HeaderP->VersionCount++;
452 Ver->VerStr = Map.WriteString(VerStr);
453 if (Ver->VerStr == 0)
454 return 0;
455
456 return Version;
457 }
458 /*}}}*/
459 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
460 // ---------------------------------------------------------------------
461 /* */
462 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
463 ListParser &List)
464 {
465 if (CurrentFile == 0)
466 return true;
467
468 // Get a structure
469 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
470 if (DescFile == 0)
471 return false;
472
473 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
474 DF->File = CurrentFile - Cache.PkgFileP;
475
476 // Link it to the end of the list
477 map_ptrloc *Last = &Desc->FileList;
478 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
479 Last = &D->NextFile;
480
481 DF->NextFile = *Last;
482 *Last = DF.Index();
483
484 DF->Offset = List.Offset();
485 DF->Size = List.Size();
486 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
487 Cache.HeaderP->MaxDescFileSize = DF->Size;
488 Cache.HeaderP->DescFileCount++;
489
490 return true;
491 }
492 /*}}}*/
493 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
494 // ---------------------------------------------------------------------
495 /* This puts a description structure in the linked list */
496 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
497 const string &Lang,
498 const MD5SumValue &md5sum,
499 map_ptrloc Next)
500 {
501 // Get a structure
502 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
503 if (Description == 0)
504 return 0;
505
506 // Fill it in
507 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
508 Desc->NextDesc = Next;
509 Desc->ID = Cache.HeaderP->DescriptionCount++;
510 Desc->language_code = Map.WriteString(Lang);
511 Desc->md5sum = Map.WriteString(md5sum.Value());
512 if (Desc->language_code == 0 || Desc->md5sum == 0)
513 return 0;
514
515 return Description;
516 }
517 /*}}}*/
518 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
519 // ---------------------------------------------------------------------
520 /* This prepares the Cache for delivery */
521 bool pkgCacheGenerator::FinishCache(OpProgress &Progress) {
522 // FIXME: add progress reporting for this operation
523 // Do we have different architectures in your groups ?
524 vector<string> archs = APT::Configuration::getArchitectures();
525 if (archs.size() > 1) {
526 // Create Conflicts in between the group
527 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++) {
528 string const PkgName = G.Name();
529 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P)) {
530 if (strcmp(P.Arch(),"all") == 0)
531 continue;
532 pkgCache::PkgIterator allPkg;
533 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++) {
534 string const Arch = V.Arch(true);
535 map_ptrloc *OldDepLast = NULL;
536 /* MultiArch handling introduces a lot of implicit Dependencies:
537 - MultiArch: same → Co-Installable if they have the same version
538 - Architecture: all → Need to be Co-Installable for internal reasons
539 - All others conflict with all other group members */
540 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
541 V->MultiArch == pkgCache::Version::Same);
542 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
543 allPkg = G.FindPkg("all");
544 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A) {
545 if (*A == Arch)
546 continue;
547 /* We allow only one installed arch at the time
548 per group, therefore each group member conflicts
549 with all other group members */
550 pkgCache::PkgIterator D = G.FindPkg(*A);
551 if (D.end() == true)
552 continue;
553 if (coInstall == true) {
554 // Replaces: ${self}:other ( << ${binary:Version})
555 NewDepends(D, V, V.VerStr(),
556 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
557 OldDepLast);
558 // Breaks: ${self}:other (!= ${binary:Version})
559 NewDepends(D, V, V.VerStr(),
560 pkgCache::Dep::Less, pkgCache::Dep::DpkgBreaks,
561 OldDepLast);
562 NewDepends(D, V, V.VerStr(),
563 pkgCache::Dep::Greater, pkgCache::Dep::DpkgBreaks,
564 OldDepLast);
565 if (V->MultiArch == pkgCache::Version::All) {
566 // Depend on ${self}:all which does depend on nothing
567 NewDepends(allPkg, V, V.VerStr(),
568 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
569 OldDepLast);
570 }
571 } else {
572 // Conflicts: ${self}:other
573 NewDepends(D, V, "",
574 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
575 OldDepLast);
576 }
577 }
578 }
579 }
580 }
581 }
582 return true;
583 }
584 /*}}}*/
585 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
586 // ---------------------------------------------------------------------
587 /* This creates a dependency element in the tree. It is linked to the
588 version and to the package that it is pointing to. */
589 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
590 pkgCache::VerIterator &Ver,
591 string const &Version,
592 unsigned int const &Op,
593 unsigned int const &Type,
594 map_ptrloc *OldDepLast)
595 {
596 // Get a structure
597 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
598 if (unlikely(Dependency == 0))
599 return false;
600
601 // Fill it in
602 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
603 Dep->ParentVer = Ver.Index();
604 Dep->Type = Type;
605 Dep->CompareOp = Op;
606 Dep->ID = Cache.HeaderP->DependsCount++;
607
608 // Probe the reverse dependency list for a version string that matches
609 if (Version.empty() == false)
610 {
611 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
612 if (I->Version != 0 && I.TargetVer() == Version)
613 Dep->Version = I->Version;*/
614 if (Dep->Version == 0)
615 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
616 return false;
617 }
618
619 // Link it to the package
620 Dep->Package = Pkg.Index();
621 Dep->NextRevDepends = Pkg->RevDepends;
622 Pkg->RevDepends = Dep.Index();
623
624 // Do we know where to link the Dependency to?
625 if (OldDepLast == NULL)
626 {
627 OldDepLast = &Ver->DependsList;
628 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
629 OldDepLast = &D->NextDepends;
630 }
631
632 Dep->NextDepends = *OldDepLast;
633 *OldDepLast = Dep.Index();
634 OldDepLast = &Dep->NextDepends;
635
636 return true;
637 }
638 /*}}}*/
639 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
640 // ---------------------------------------------------------------------
641 /* This creates a Group and the Package to link this dependency to if
642 needed and handles also the caching of the old endpoint */
643 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
644 const string &PackageName,
645 const string &Arch,
646 const string &Version,
647 unsigned int Op,
648 unsigned int Type)
649 {
650 pkgCache::GrpIterator Grp;
651 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
652 return false;
653
654 // Locate the target package
655 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
656 if (Pkg.end() == true) {
657 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
658 return false;
659 }
660
661 // Is it a file dependency?
662 if (unlikely(PackageName[0] == '/'))
663 FoundFileDeps = true;
664
665 /* Caching the old end point speeds up generation substantially */
666 if (OldDepVer != Ver) {
667 OldDepLast = NULL;
668 OldDepVer = Ver;
669 }
670
671 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
672 }
673 /*}}}*/
674 // ListParser::NewProvides - Create a Provides element /*{{{*/
675 // ---------------------------------------------------------------------
676 /* */
677 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
678 const string &PkgName,
679 const string &PkgArch,
680 const string &Version)
681 {
682 pkgCache &Cache = Owner->Cache;
683
684 // We do not add self referencing provides
685 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
686 return true;
687
688 // Get a structure
689 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
690 if (unlikely(Provides == 0))
691 return false;
692 Cache.HeaderP->ProvidesCount++;
693
694 // Fill it in
695 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
696 Prv->Version = Ver.Index();
697 Prv->NextPkgProv = Ver->ProvidesList;
698 Ver->ProvidesList = Prv.Index();
699 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
700 return false;
701
702 // Locate the target package
703 pkgCache::PkgIterator Pkg;
704 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
705 return false;
706
707 // Link it to the package
708 Prv->ParentPkg = Pkg.Index();
709 Prv->NextProvides = Pkg->ProvidesList;
710 Pkg->ProvidesList = Prv.Index();
711
712 return true;
713 }
714 /*}}}*/
715 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
716 // ---------------------------------------------------------------------
717 /* This is used to select which file is to be associated with all newly
718 added versions. The caller is responsible for setting the IMS fields. */
719 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
720 const pkgIndexFile &Index,
721 unsigned long Flags)
722 {
723 // Get some space for the structure
724 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
725 if (CurrentFile == Cache.PkgFileP)
726 return false;
727
728 // Fill it in
729 CurrentFile->FileName = Map.WriteString(File);
730 CurrentFile->Site = WriteUniqString(Site);
731 CurrentFile->NextFile = Cache.HeaderP->FileList;
732 CurrentFile->Flags = Flags;
733 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
734 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
735 PkgFileName = File;
736 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
737 Cache.HeaderP->PackageFileCount++;
738
739 if (CurrentFile->FileName == 0)
740 return false;
741
742 if (Progress != 0)
743 Progress->SubProgress(Index.Size());
744 return true;
745 }
746 /*}}}*/
747 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
748 // ---------------------------------------------------------------------
749 /* This is used to create handles to strings. Given the same text it
750 always returns the same number */
751 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
752 unsigned int Size)
753 {
754 /* We use a very small transient hash table here, this speeds up generation
755 by a fair amount on slower machines */
756 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
757 if (Bucket != 0 &&
758 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
759 return Bucket->String;
760
761 // Search for an insertion point
762 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
763 int Res = 1;
764 map_ptrloc *Last = &Cache.HeaderP->StringList;
765 for (; I != Cache.StringItemP; Last = &I->NextItem,
766 I = Cache.StringItemP + I->NextItem)
767 {
768 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
769 if (Res >= 0)
770 break;
771 }
772
773 // Match
774 if (Res == 0)
775 {
776 Bucket = I;
777 return I->String;
778 }
779
780 // Get a structure
781 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
782 if (Item == 0)
783 return 0;
784
785 // Fill in the structure
786 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
787 ItemP->NextItem = I - Cache.StringItemP;
788 *Last = Item;
789 ItemP->String = Map.WriteString(S,Size);
790 if (ItemP->String == 0)
791 return 0;
792
793 Bucket = ItemP;
794 return ItemP->String;
795 }
796 /*}}}*/
797 // CheckValidity - Check that a cache is up-to-date /*{{{*/
798 // ---------------------------------------------------------------------
799 /* This just verifies that each file in the list of index files exists,
800 has matching attributes with the cache and the cache does not have
801 any extra files. */
802 static bool CheckValidity(const string &CacheFile, FileIterator Start,
803 FileIterator End,MMap **OutMap = 0)
804 {
805 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
806 // No file, certainly invalid
807 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
808 {
809 if (Debug == true)
810 std::clog << "CacheFile doesn't exist" << std::endl;
811 return false;
812 }
813
814 // Map it
815 FileFd CacheF(CacheFile,FileFd::ReadOnly);
816 SPtr<MMap> Map = new MMap(CacheF,0);
817 pkgCache Cache(Map);
818 if (_error->PendingError() == true || Map->Size() == 0)
819 {
820 if (Debug == true)
821 std::clog << "Errors are pending or Map is empty()" << std::endl;
822 _error->Discard();
823 return false;
824 }
825
826 /* Now we check every index file, see if it is in the cache,
827 verify the IMS data and check that it is on the disk too.. */
828 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
829 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
830 for (; Start != End; Start++)
831 {
832 if (Debug == true)
833 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
834 if ((*Start)->HasPackages() == false)
835 {
836 if (Debug == true)
837 std::clog << "Has NO packages" << std::endl;
838 continue;
839 }
840
841 if ((*Start)->Exists() == false)
842 {
843 #if 0 // mvo: we no longer give a message here (Default Sources spec)
844 _error->WarningE("stat",_("Couldn't stat source package list %s"),
845 (*Start)->Describe().c_str());
846 #endif
847 if (Debug == true)
848 std::clog << "file doesn't exist" << std::endl;
849 continue;
850 }
851
852 // FindInCache is also expected to do an IMS check.
853 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
854 if (File.end() == true)
855 {
856 if (Debug == true)
857 std::clog << "FindInCache returned end-Pointer" << std::endl;
858 return false;
859 }
860
861 Visited[File->ID] = true;
862 if (Debug == true)
863 std::clog << "with ID " << File->ID << " is valid" << std::endl;
864 }
865
866 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
867 if (Visited[I] == false)
868 {
869 if (Debug == true)
870 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
871 return false;
872 }
873
874 if (_error->PendingError() == true)
875 {
876 if (Debug == true)
877 {
878 std::clog << "Validity failed because of pending errors:" << std::endl;
879 _error->DumpErrors();
880 }
881 _error->Discard();
882 return false;
883 }
884
885 if (OutMap != 0)
886 *OutMap = Map.UnGuard();
887 return true;
888 }
889 /*}}}*/
890 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
891 // ---------------------------------------------------------------------
892 /* Size is kind of an abstract notion that is only used for the progress
893 meter */
894 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
895 {
896 unsigned long TotalSize = 0;
897 for (; Start != End; Start++)
898 {
899 if ((*Start)->HasPackages() == false)
900 continue;
901 TotalSize += (*Start)->Size();
902 }
903 return TotalSize;
904 }
905 /*}}}*/
906 // BuildCache - Merge the list of index files into the cache /*{{{*/
907 // ---------------------------------------------------------------------
908 /* */
909 static bool BuildCache(pkgCacheGenerator &Gen,
910 OpProgress &Progress,
911 unsigned long &CurrentSize,unsigned long TotalSize,
912 FileIterator Start, FileIterator End)
913 {
914 FileIterator I;
915 for (I = Start; I != End; I++)
916 {
917 if ((*I)->HasPackages() == false)
918 continue;
919
920 if ((*I)->Exists() == false)
921 continue;
922
923 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
924 {
925 _error->Warning("Duplicate sources.list entry %s",
926 (*I)->Describe().c_str());
927 continue;
928 }
929
930 unsigned long Size = (*I)->Size();
931 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
932 CurrentSize += Size;
933
934 if ((*I)->Merge(Gen,Progress) == false)
935 return false;
936 }
937
938 if (Gen.HasFileDeps() == true)
939 {
940 Progress.Done();
941 TotalSize = ComputeSize(Start, End);
942 CurrentSize = 0;
943 for (I = Start; I != End; I++)
944 {
945 unsigned long Size = (*I)->Size();
946 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
947 CurrentSize += Size;
948 if ((*I)->MergeFileProvides(Gen,Progress) == false)
949 return false;
950 }
951 }
952
953 return true;
954 }
955 /*}}}*/
956 // MakeStatusCache - Construct the status cache /*{{{*/
957 // ---------------------------------------------------------------------
958 /* This makes sure that the status cache (the cache that has all
959 index files from the sources list and all local ones) is ready
960 to be mmaped. If OutMap is not zero then a MMap object representing
961 the cache will be stored there. This is pretty much mandetory if you
962 are using AllowMem. AllowMem lets the function be run as non-root
963 where it builds the cache 'fast' into a memory buffer. */
964 bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
965 MMap **OutMap,bool AllowMem)
966 {
967 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
968 unsigned long const MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
969
970 vector<pkgIndexFile *> Files;
971 for (vector<metaIndex *>::const_iterator i = List.begin();
972 i != List.end();
973 i++)
974 {
975 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
976 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
977 j != Indexes->end();
978 j++)
979 Files.push_back (*j);
980 }
981
982 unsigned long const EndOfSource = Files.size();
983 if (_system->AddStatusFiles(Files) == false)
984 return false;
985
986 // Decide if we can write to the files..
987 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
988 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
989
990 // Decide if we can write to the cache
991 bool Writeable = false;
992 if (CacheFile.empty() == false)
993 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
994 else
995 if (SrcCacheFile.empty() == false)
996 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
997 if (Debug == true)
998 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
999
1000 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1001 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1002
1003 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1004
1005 // Cache is OK, Fin.
1006 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1007 {
1008 Progress.OverallProgress(1,1,1,_("Reading package lists"));
1009 if (Debug == true)
1010 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1011 return true;
1012 }
1013 else if (Debug == true)
1014 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1015
1016 /* At this point we know we need to reconstruct the package cache,
1017 begin. */
1018 SPtr<FileFd> CacheF;
1019 SPtr<DynamicMMap> Map;
1020 if (Writeable == true && CacheFile.empty() == false)
1021 {
1022 unlink(CacheFile.c_str());
1023 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
1024 fchmod(CacheF->Fd(),0644);
1025 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
1026 if (_error->PendingError() == true)
1027 return false;
1028 if (Debug == true)
1029 std::clog << "Open filebased MMap" << std::endl;
1030 }
1031 else
1032 {
1033 // Just build it in memory..
1034 Map = new DynamicMMap(0,MapSize);
1035 if (Debug == true)
1036 std::clog << "Open memory Map (not filebased)" << std::endl;
1037 }
1038
1039 // Lets try the source cache.
1040 unsigned long CurrentSize = 0;
1041 unsigned long TotalSize = 0;
1042 if (CheckValidity(SrcCacheFile,Files.begin(),
1043 Files.begin()+EndOfSource) == true)
1044 {
1045 if (Debug == true)
1046 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1047 // Preload the map with the source cache
1048 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1049 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1050 if ((alloc == 0 && _error->PendingError())
1051 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1052 SCacheF.Size()) == false)
1053 return false;
1054
1055 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1056
1057 // Build the status cache
1058 pkgCacheGenerator Gen(Map.Get(),&Progress);
1059 if (_error->PendingError() == true)
1060 return false;
1061 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1062 Files.begin()+EndOfSource,Files.end()) == false)
1063 return false;
1064
1065 // FIXME: move me to a better place
1066 Gen.FinishCache(Progress);
1067 }
1068 else
1069 {
1070 if (Debug == true)
1071 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1072 TotalSize = ComputeSize(Files.begin(),Files.end());
1073
1074 // Build the source cache
1075 pkgCacheGenerator Gen(Map.Get(),&Progress);
1076 if (_error->PendingError() == true)
1077 return false;
1078 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1079 Files.begin(),Files.begin()+EndOfSource) == false)
1080 return false;
1081
1082 // Write it back
1083 if (Writeable == true && SrcCacheFile.empty() == false)
1084 {
1085 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1086 if (_error->PendingError() == true)
1087 return false;
1088
1089 fchmod(SCacheF.Fd(),0644);
1090
1091 // Write out the main data
1092 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1093 return _error->Error(_("IO Error saving source cache"));
1094 SCacheF.Sync();
1095
1096 // Write out the proper header
1097 Gen.GetCache().HeaderP->Dirty = false;
1098 if (SCacheF.Seek(0) == false ||
1099 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1100 return _error->Error(_("IO Error saving source cache"));
1101 Gen.GetCache().HeaderP->Dirty = true;
1102 SCacheF.Sync();
1103 }
1104
1105 // Build the status cache
1106 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1107 Files.begin()+EndOfSource,Files.end()) == false)
1108 return false;
1109
1110 // FIXME: move me to a better place
1111 Gen.FinishCache(Progress);
1112 }
1113 if (Debug == true)
1114 std::clog << "Caches are ready for shipping" << std::endl;
1115
1116 if (_error->PendingError() == true)
1117 return false;
1118 if (OutMap != 0)
1119 {
1120 if (CacheF != 0)
1121 {
1122 delete Map.UnGuard();
1123 *OutMap = new MMap(*CacheF,0);
1124 }
1125 else
1126 {
1127 *OutMap = Map.UnGuard();
1128 }
1129 }
1130
1131 return true;
1132 }
1133 /*}}}*/
1134 // MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1135 // ---------------------------------------------------------------------
1136 /* */
1137 bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1138 {
1139 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1140 vector<pkgIndexFile *> Files;
1141 unsigned long EndOfSource = Files.size();
1142 if (_system->AddStatusFiles(Files) == false)
1143 return false;
1144
1145 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1146 unsigned long CurrentSize = 0;
1147 unsigned long TotalSize = 0;
1148
1149 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1150
1151 // Build the status cache
1152 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1153 pkgCacheGenerator Gen(Map.Get(),&Progress);
1154 if (_error->PendingError() == true)
1155 return false;
1156 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1157 Files.begin()+EndOfSource,Files.end()) == false)
1158 return false;
1159
1160 // FIXME: move me to a better place
1161 Gen.FinishCache(Progress);
1162
1163 if (_error->PendingError() == true)
1164 return false;
1165 *OutMap = Map.UnGuard();
1166
1167 return true;
1168 }
1169 /*}}}*/