]> git.saurik.com Git - apt.git/blame_incremental - apt-pkg/pkgcachegen.cc
Pre-MultiArch a package which depends on a package with architecture "all"
[apt.git] / apt-pkg / pkgcachegen.cc
... / ...
CommitLineData
1// -*- mode: cpp; mode: fold -*-
2// Description /*{{{*/
3// $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4/* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12// Include Files /*{{{*/
13#define APT_COMPATIBILITY 986
14
15#include <apt-pkg/pkgcachegen.h>
16#include <apt-pkg/error.h>
17#include <apt-pkg/version.h>
18#include <apt-pkg/progress.h>
19#include <apt-pkg/sourcelist.h>
20#include <apt-pkg/configuration.h>
21#include <apt-pkg/aptconfiguration.h>
22#include <apt-pkg/strutl.h>
23#include <apt-pkg/sptr.h>
24#include <apt-pkg/pkgsystem.h>
25
26#include <apt-pkg/tagfile.h>
27
28#include <apti18n.h>
29
30#include <vector>
31
32#include <sys/stat.h>
33#include <unistd.h>
34#include <errno.h>
35#include <stdio.h>
36#include <system.h>
37 /*}}}*/
38typedef vector<pkgIndexFile *>::iterator FileIterator;
39
40// CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
41// ---------------------------------------------------------------------
42/* We set the dirty flag and make sure that is written to the disk */
43pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
44 Map(*pMap), Cache(pMap,false), Progress(Prog),
45 FoundFileDeps(0)
46{
47 CurrentFile = 0;
48 memset(UniqHash,0,sizeof(UniqHash));
49
50 if (_error->PendingError() == true)
51 return;
52
53 if (Map.Size() == 0)
54 {
55 // Setup the map interface..
56 Cache.HeaderP = (pkgCache::Header *)Map.Data();
57 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
58 return;
59
60 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
61
62 // Starting header
63 *Cache.HeaderP = pkgCache::Header();
64 Cache.HeaderP->VerSysName = Map.WriteString(_system->VS->Label);
65 Cache.HeaderP->Architecture = Map.WriteString(_config->Find("APT::Architecture"));
66 Cache.ReMap();
67 }
68 else
69 {
70 // Map directly from the existing file
71 Cache.ReMap();
72 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
73 if (Cache.VS != _system->VS)
74 {
75 _error->Error(_("Cache has an incompatible versioning system"));
76 return;
77 }
78 }
79
80 Cache.HeaderP->Dirty = true;
81 Map.Sync(0,sizeof(pkgCache::Header));
82}
83 /*}}}*/
84// CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
85// ---------------------------------------------------------------------
86/* We sync the data then unset the dirty flag in two steps so as to
87 advoid a problem during a crash */
88pkgCacheGenerator::~pkgCacheGenerator()
89{
90 if (_error->PendingError() == true)
91 return;
92 if (Map.Sync() == false)
93 return;
94
95 Cache.HeaderP->Dirty = false;
96 Map.Sync(0,sizeof(pkgCache::Header));
97}
98 /*}}}*/
99// CacheGenerator::MergeList - Merge the package list /*{{{*/
100// ---------------------------------------------------------------------
101/* This provides the generation of the entries in the cache. Each loop
102 goes through a single package record from the underlying parse engine. */
103bool pkgCacheGenerator::MergeList(ListParser &List,
104 pkgCache::VerIterator *OutVer)
105{
106 List.Owner = this;
107
108 unsigned int Counter = 0;
109 while (List.Step() == true)
110 {
111 string const PackageName = List.Package();
112 if (PackageName.empty() == true)
113 return false;
114
115 /* As we handle Arch all packages as architecture bounded
116 we add all information to every (simulated) arch package */
117 std::vector<string> genArch;
118 if (List.ArchitectureAll() == true)
119 genArch = APT::Configuration::getArchitectures();
120 else
121 genArch.push_back(List.Architecture());
122
123 for (std::vector<string>::const_iterator arch = genArch.begin();
124 arch != genArch.end(); ++arch)
125 {
126 // Get a pointer to the package structure
127 pkgCache::PkgIterator Pkg;
128 if (NewPackage(Pkg, PackageName, *arch) == false)
129 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
130 Counter++;
131 if (Counter % 100 == 0 && Progress != 0)
132 Progress->Progress(List.Offset());
133
134 /* Get a pointer to the version structure. We know the list is sorted
135 so we use that fact in the search. Insertion of new versions is
136 done with correct sorting */
137 string Version = List.Version();
138 if (Version.empty() == true)
139 {
140 // we first process the package, then the descriptions
141 // (this has the bonus that we get MMap error when we run out
142 // of MMap space)
143 if (List.UsePackage(Pkg,pkgCache::VerIterator(Cache)) == false)
144 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
145 PackageName.c_str());
146
147 // Find the right version to write the description
148 MD5SumValue CurMd5 = List.Description_md5();
149 pkgCache::VerIterator Ver = Pkg.VersionList();
150 map_ptrloc *LastVer = &Pkg->VersionList;
151
152 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
153 {
154 pkgCache::DescIterator Desc = Ver.DescriptionList();
155 map_ptrloc *LastDesc = &Ver->DescriptionList;
156 bool duplicate=false;
157
158 // don't add a new description if we have one for the given
159 // md5 && language
160 for ( ; Desc.end() == false; Desc++)
161 if (MD5SumValue(Desc.md5()) == CurMd5 &&
162 Desc.LanguageCode() == List.DescriptionLanguage())
163 duplicate=true;
164 if(duplicate)
165 continue;
166
167 for (Desc = Ver.DescriptionList();
168 Desc.end() == false;
169 LastDesc = &Desc->NextDesc, Desc++)
170 {
171 if (MD5SumValue(Desc.md5()) == CurMd5)
172 {
173 // Add new description
174 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
175 Desc->ParentPkg = Pkg.Index();
176
177 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
178 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
179 break;
180 }
181 }
182 }
183
184 continue;
185 }
186
187 pkgCache::VerIterator Ver = Pkg.VersionList();
188 map_ptrloc *LastVer = &Pkg->VersionList;
189 int Res = 1;
190 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
191 {
192 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
193 if (Res >= 0)
194 break;
195 }
196
197 /* We already have a version for this item, record that we
198 saw it */
199 unsigned long Hash = List.VersionHash();
200 if (Res == 0 && Ver->Hash == Hash)
201 {
202 if (List.UsePackage(Pkg,Ver) == false)
203 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
204 PackageName.c_str());
205
206 if (NewFileVer(Ver,List) == false)
207 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
208 PackageName.c_str());
209
210 // Read only a single record and return
211 if (OutVer != 0)
212 {
213 *OutVer = Ver;
214 FoundFileDeps |= List.HasFileDeps();
215 return true;
216 }
217
218 continue;
219 }
220
221 // Skip to the end of the same version set.
222 if (Res == 0)
223 {
224 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
225 {
226 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
227 if (Res != 0)
228 break;
229 }
230 }
231
232 // Add a new version
233 *LastVer = NewVersion(Ver,Version,*LastVer);
234 Ver->ParentPkg = Pkg.Index();
235 Ver->Hash = Hash;
236
237 if ((*LastVer == 0 && _error->PendingError()) || List.NewVersion(Ver) == false)
238 return _error->Error(_("Error occurred while processing %s (NewVersion1)"),
239 PackageName.c_str());
240
241 if (List.UsePackage(Pkg,Ver) == false)
242 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
243 PackageName.c_str());
244
245 if (NewFileVer(Ver,List) == false)
246 return _error->Error(_("Error occurred while processing %s (NewVersion2)"),
247 PackageName.c_str());
248
249 // Read only a single record and return
250 if (OutVer != 0)
251 {
252 *OutVer = Ver;
253 FoundFileDeps |= List.HasFileDeps();
254 return true;
255 }
256
257 /* Record the Description data. Description data always exist in
258 Packages and Translation-* files. */
259 pkgCache::DescIterator Desc = Ver.DescriptionList();
260 map_ptrloc *LastDesc = &Ver->DescriptionList;
261
262 // Skip to the end of description set
263 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
264
265 // Add new description
266 *LastDesc = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
267 Desc->ParentPkg = Pkg.Index();
268
269 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
270 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
271 }
272 }
273
274 FoundFileDeps |= List.HasFileDeps();
275
276 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
277 return _error->Error(_("Wow, you exceeded the number of package "
278 "names this APT is capable of."));
279 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
280 return _error->Error(_("Wow, you exceeded the number of versions "
281 "this APT is capable of."));
282 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
283 return _error->Error(_("Wow, you exceeded the number of descriptions "
284 "this APT is capable of."));
285 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
286 return _error->Error(_("Wow, you exceeded the number of dependencies "
287 "this APT is capable of."));
288 return true;
289}
290 /*}}}*/
291// CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
292// ---------------------------------------------------------------------
293/* If we found any file depends while parsing the main list we need to
294 resolve them. Since it is undesired to load the entire list of files
295 into the cache as virtual packages we do a two stage effort. MergeList
296 identifies the file depends and this creates Provdies for them by
297 re-parsing all the indexs. */
298bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
299{
300 List.Owner = this;
301
302 unsigned int Counter = 0;
303 while (List.Step() == true)
304 {
305 string PackageName = List.Package();
306 if (PackageName.empty() == true)
307 return false;
308 string Version = List.Version();
309 if (Version.empty() == true)
310 continue;
311
312 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
313 if (Pkg.end() == true)
314 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
315 PackageName.c_str());
316 Counter++;
317 if (Counter % 100 == 0 && Progress != 0)
318 Progress->Progress(List.Offset());
319
320 unsigned long Hash = List.VersionHash();
321 pkgCache::VerIterator Ver = Pkg.VersionList();
322 for (; Ver.end() == false; Ver++)
323 {
324 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
325 {
326 if (List.CollectFileProvides(Cache,Ver) == false)
327 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
328 break;
329 }
330 }
331
332 if (Ver.end() == true)
333 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
334 }
335
336 return true;
337}
338 /*}}}*/
339// CacheGenerator::NewGroup - Add a new group /*{{{*/
340// ---------------------------------------------------------------------
341/* This creates a new group structure and adds it to the hash table */
342bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name) {
343 Grp = Cache.FindGrp(Name);
344 if (Grp.end() == false)
345 return true;
346
347 // Get a structure
348 unsigned long const Group = Map.Allocate(sizeof(pkgCache::Group));
349 if (unlikely(Group == 0))
350 return false;
351
352 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
353 Grp->Name = Map.WriteString(Name);
354 if (unlikely(Grp->Name == 0))
355 return false;
356
357 // Insert it into the hash table
358 unsigned long const Hash = Cache.Hash(Name);
359 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
360 Cache.HeaderP->GrpHashTable[Hash] = Group;
361
362 Cache.HeaderP->GroupCount++;
363
364 return true;
365}
366 /*}}}*/
367// CacheGenerator::NewPackage - Add a new package /*{{{*/
368// ---------------------------------------------------------------------
369/* This creates a new package structure and adds it to the hash table */
370bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
371 const string &Arch) {
372 pkgCache::GrpIterator Grp;
373 if (unlikely(NewGroup(Grp, Name) == false))
374 return false;
375
376 Pkg = Grp.FindPkg(Arch);
377 if (Pkg.end() == false)
378 return true;
379
380 // Get a structure
381 unsigned long const Package = Map.Allocate(sizeof(pkgCache::Package));
382 if (unlikely(Package == 0))
383 return false;
384 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
385
386 // Insert it into the hash table
387 unsigned long const Hash = Cache.Hash(Name);
388 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
389 Cache.HeaderP->PkgHashTable[Hash] = Package;
390
391 // remember the packages in the group
392 Grp->FirstPackage = Package;
393 if (Grp->LastPackage == 0)
394 Grp->LastPackage = Package;
395
396 // Set the name, arch and the ID
397 Pkg->Name = Grp->Name;
398 Pkg->Group = Grp.Index();
399 Pkg->Arch = WriteUniqString(Arch.c_str());
400 if (unlikely(Pkg->Arch == 0))
401 return false;
402 Pkg->ID = Cache.HeaderP->PackageCount++;
403
404 return true;
405}
406 /*}}}*/
407// CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
408// ---------------------------------------------------------------------
409/* */
410bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
411 ListParser &List)
412{
413 if (CurrentFile == 0)
414 return true;
415
416 // Get a structure
417 unsigned long VerFile = Map.Allocate(sizeof(pkgCache::VerFile));
418 if (VerFile == 0)
419 return 0;
420
421 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
422 VF->File = CurrentFile - Cache.PkgFileP;
423
424 // Link it to the end of the list
425 map_ptrloc *Last = &Ver->FileList;
426 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
427 Last = &V->NextFile;
428 VF->NextFile = *Last;
429 *Last = VF.Index();
430
431 VF->Offset = List.Offset();
432 VF->Size = List.Size();
433 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
434 Cache.HeaderP->MaxVerFileSize = VF->Size;
435 Cache.HeaderP->VerFileCount++;
436
437 return true;
438}
439 /*}}}*/
440// CacheGenerator::NewVersion - Create a new Version /*{{{*/
441// ---------------------------------------------------------------------
442/* This puts a version structure in the linked list */
443unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
444 const string &VerStr,
445 unsigned long Next)
446{
447 // Get a structure
448 unsigned long Version = Map.Allocate(sizeof(pkgCache::Version));
449 if (Version == 0)
450 return 0;
451
452 // Fill it in
453 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
454 Ver->NextVer = Next;
455 Ver->ID = Cache.HeaderP->VersionCount++;
456 Ver->VerStr = Map.WriteString(VerStr);
457 if (Ver->VerStr == 0)
458 return 0;
459
460 return Version;
461}
462 /*}}}*/
463// CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
464// ---------------------------------------------------------------------
465/* */
466bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
467 ListParser &List)
468{
469 if (CurrentFile == 0)
470 return true;
471
472 // Get a structure
473 unsigned long DescFile = Map.Allocate(sizeof(pkgCache::DescFile));
474 if (DescFile == 0)
475 return false;
476
477 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
478 DF->File = CurrentFile - Cache.PkgFileP;
479
480 // Link it to the end of the list
481 map_ptrloc *Last = &Desc->FileList;
482 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
483 Last = &D->NextFile;
484
485 DF->NextFile = *Last;
486 *Last = DF.Index();
487
488 DF->Offset = List.Offset();
489 DF->Size = List.Size();
490 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
491 Cache.HeaderP->MaxDescFileSize = DF->Size;
492 Cache.HeaderP->DescFileCount++;
493
494 return true;
495}
496 /*}}}*/
497// CacheGenerator::NewDescription - Create a new Description /*{{{*/
498// ---------------------------------------------------------------------
499/* This puts a description structure in the linked list */
500map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
501 const string &Lang,
502 const MD5SumValue &md5sum,
503 map_ptrloc Next)
504{
505 // Get a structure
506 map_ptrloc Description = Map.Allocate(sizeof(pkgCache::Description));
507 if (Description == 0)
508 return 0;
509
510 // Fill it in
511 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
512 Desc->NextDesc = Next;
513 Desc->ID = Cache.HeaderP->DescriptionCount++;
514 Desc->language_code = Map.WriteString(Lang);
515 Desc->md5sum = Map.WriteString(md5sum.Value());
516 if (Desc->language_code == 0 || Desc->md5sum == 0)
517 return 0;
518
519 return Description;
520}
521 /*}}}*/
522// CacheGenerator::FinishCache - do various finish operations /*{{{*/
523// ---------------------------------------------------------------------
524/* This prepares the Cache for delivery */
525bool pkgCacheGenerator::FinishCache(OpProgress &Progress) {
526 // FIXME: add progress reporting for this operation
527 // Do we have different architectures in your groups ?
528 vector<string> archs = APT::Configuration::getArchitectures();
529 if (archs.size() > 1) {
530 // Create Conflicts in between the group
531 for (pkgCache::GrpIterator G = GetCache().GrpBegin(); G.end() != true; G++) {
532 string const PkgName = G.Name();
533 for (pkgCache::PkgIterator P = G.PackageList(); P.end() != true; P = G.NextPkg(P)) {
534 for (pkgCache::VerIterator V = P.VersionList(); V.end() != true; V++) {
535 // Arch all packages are "co-installable"
536 if (V->MultiArch == pkgCache::Version::All)
537 continue;
538 string const Arch = V.Arch();
539 map_ptrloc *OldDepLast = NULL;
540 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A) {
541 if (*A == Arch)
542 continue;
543 /* We allow only one installed arch at the time
544 per group, therefore each group member conflicts
545 with all other group members */
546 pkgCache::PkgIterator D = G.FindPkg(*A);
547 if (D.end() == true)
548 continue;
549 // Conflicts: ${self}:other
550 NewDepends(D, V, "",
551 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
552 OldDepLast);
553 }
554 }
555 }
556 }
557 }
558 return true;
559}
560 /*}}}*/
561// CacheGenerator::NewDepends - Create a dependency element /*{{{*/
562// ---------------------------------------------------------------------
563/* This creates a dependency element in the tree. It is linked to the
564 version and to the package that it is pointing to. */
565bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
566 pkgCache::VerIterator &Ver,
567 string const &Version,
568 unsigned int const &Op,
569 unsigned int const &Type,
570 map_ptrloc *OldDepLast)
571{
572 // Get a structure
573 unsigned long const Dependency = Map.Allocate(sizeof(pkgCache::Dependency));
574 if (unlikely(Dependency == 0))
575 return false;
576
577 // Fill it in
578 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
579 Dep->ParentVer = Ver.Index();
580 Dep->Type = Type;
581 Dep->CompareOp = Op;
582 Dep->ID = Cache.HeaderP->DependsCount++;
583
584 // Probe the reverse dependency list for a version string that matches
585 if (Version.empty() == false)
586 {
587/* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
588 if (I->Version != 0 && I.TargetVer() == Version)
589 Dep->Version = I->Version;*/
590 if (Dep->Version == 0)
591 if (unlikely((Dep->Version = Map.WriteString(Version)) == 0))
592 return false;
593 }
594
595 // Link it to the package
596 Dep->Package = Pkg.Index();
597 Dep->NextRevDepends = Pkg->RevDepends;
598 Pkg->RevDepends = Dep.Index();
599
600 // Do we know where to link the Dependency to?
601 if (OldDepLast == NULL)
602 {
603 OldDepLast = &Ver->DependsList;
604 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
605 OldDepLast = &D->NextDepends;
606 }
607
608 Dep->NextDepends = *OldDepLast;
609 *OldDepLast = Dep.Index();
610 OldDepLast = &Dep->NextDepends;
611
612 return true;
613}
614 /*}}}*/
615// ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
616// ---------------------------------------------------------------------
617/* This creates a Group and the Package to link this dependency to if
618 needed and handles also the caching of the old endpoint */
619bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator Ver,
620 const string &PackageName,
621 const string &Arch,
622 const string &Version,
623 unsigned int Op,
624 unsigned int Type)
625{
626 pkgCache::GrpIterator Grp;
627 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
628 return false;
629
630 // Locate the target package
631 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
632 if (Pkg.end() == true) {
633 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
634 return false;
635 }
636
637 // Is it a file dependency?
638 if (unlikely(PackageName[0] == '/'))
639 FoundFileDeps = true;
640
641 /* Caching the old end point speeds up generation substantially */
642 if (OldDepVer != Ver) {
643 OldDepLast = NULL;
644 OldDepVer = Ver;
645 }
646
647 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
648}
649 /*}}}*/
650// ListParser::NewProvides - Create a Provides element /*{{{*/
651// ---------------------------------------------------------------------
652/* */
653bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator Ver,
654 const string &PackageName,
655 const string &Version)
656{
657 pkgCache &Cache = Owner->Cache;
658
659 // We do not add self referencing provides
660 if (unlikely(Ver.ParentPkg().Name() == PackageName))
661 return true;
662
663 // Get a structure
664 unsigned long const Provides = Owner->Map.Allocate(sizeof(pkgCache::Provides));
665 if (unlikely(Provides == 0))
666 return false;
667 Cache.HeaderP->ProvidesCount++;
668
669 // Fill it in
670 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
671 Prv->Version = Ver.Index();
672 Prv->NextPkgProv = Ver->ProvidesList;
673 Ver->ProvidesList = Prv.Index();
674 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
675 return false;
676
677 // Locate the target package
678 pkgCache::PkgIterator Pkg;
679 if (unlikely(Owner->NewPackage(Pkg,PackageName,string(Ver.Arch())) == false))
680 return false;
681
682 // Link it to the package
683 Prv->ParentPkg = Pkg.Index();
684 Prv->NextProvides = Pkg->ProvidesList;
685 Pkg->ProvidesList = Prv.Index();
686
687 return true;
688}
689 /*}}}*/
690// CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
691// ---------------------------------------------------------------------
692/* This is used to select which file is to be associated with all newly
693 added versions. The caller is responsible for setting the IMS fields. */
694bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
695 const pkgIndexFile &Index,
696 unsigned long Flags)
697{
698 // Get some space for the structure
699 CurrentFile = Cache.PkgFileP + Map.Allocate(sizeof(*CurrentFile));
700 if (CurrentFile == Cache.PkgFileP)
701 return false;
702
703 // Fill it in
704 CurrentFile->FileName = Map.WriteString(File);
705 CurrentFile->Site = WriteUniqString(Site);
706 CurrentFile->NextFile = Cache.HeaderP->FileList;
707 CurrentFile->Flags = Flags;
708 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
709 CurrentFile->IndexType = WriteUniqString(Index.GetType()->Label);
710 PkgFileName = File;
711 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
712 Cache.HeaderP->PackageFileCount++;
713
714 if (CurrentFile->FileName == 0)
715 return false;
716
717 if (Progress != 0)
718 Progress->SubProgress(Index.Size());
719 return true;
720}
721 /*}}}*/
722// CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
723// ---------------------------------------------------------------------
724/* This is used to create handles to strings. Given the same text it
725 always returns the same number */
726unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
727 unsigned int Size)
728{
729 /* We use a very small transient hash table here, this speeds up generation
730 by a fair amount on slower machines */
731 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
732 if (Bucket != 0 &&
733 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
734 return Bucket->String;
735
736 // Search for an insertion point
737 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
738 int Res = 1;
739 map_ptrloc *Last = &Cache.HeaderP->StringList;
740 for (; I != Cache.StringItemP; Last = &I->NextItem,
741 I = Cache.StringItemP + I->NextItem)
742 {
743 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
744 if (Res >= 0)
745 break;
746 }
747
748 // Match
749 if (Res == 0)
750 {
751 Bucket = I;
752 return I->String;
753 }
754
755 // Get a structure
756 unsigned long Item = Map.Allocate(sizeof(pkgCache::StringItem));
757 if (Item == 0)
758 return 0;
759
760 // Fill in the structure
761 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
762 ItemP->NextItem = I - Cache.StringItemP;
763 *Last = Item;
764 ItemP->String = Map.WriteString(S,Size);
765 if (ItemP->String == 0)
766 return 0;
767
768 Bucket = ItemP;
769 return ItemP->String;
770}
771 /*}}}*/
772// CheckValidity - Check that a cache is up-to-date /*{{{*/
773// ---------------------------------------------------------------------
774/* This just verifies that each file in the list of index files exists,
775 has matching attributes with the cache and the cache does not have
776 any extra files. */
777static bool CheckValidity(const string &CacheFile, FileIterator Start,
778 FileIterator End,MMap **OutMap = 0)
779{
780 // No file, certainly invalid
781 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
782 return false;
783
784 // Map it
785 FileFd CacheF(CacheFile,FileFd::ReadOnly);
786 SPtr<MMap> Map = new MMap(CacheF,0);
787 pkgCache Cache(Map);
788 if (_error->PendingError() == true || Map->Size() == 0)
789 {
790 _error->Discard();
791 return false;
792 }
793
794 /* Now we check every index file, see if it is in the cache,
795 verify the IMS data and check that it is on the disk too.. */
796 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
797 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
798 for (; Start != End; Start++)
799 {
800 if ((*Start)->HasPackages() == false)
801 continue;
802
803 if ((*Start)->Exists() == false)
804 {
805#if 0 // mvo: we no longer give a message here (Default Sources spec)
806 _error->WarningE("stat",_("Couldn't stat source package list %s"),
807 (*Start)->Describe().c_str());
808#endif
809 continue;
810 }
811
812 // FindInCache is also expected to do an IMS check.
813 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
814 if (File.end() == true)
815 return false;
816
817 Visited[File->ID] = true;
818 }
819
820 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
821 if (Visited[I] == false)
822 return false;
823
824 if (_error->PendingError() == true)
825 {
826 _error->Discard();
827 return false;
828 }
829
830 if (OutMap != 0)
831 *OutMap = Map.UnGuard();
832 return true;
833}
834 /*}}}*/
835// ComputeSize - Compute the total size of a bunch of files /*{{{*/
836// ---------------------------------------------------------------------
837/* Size is kind of an abstract notion that is only used for the progress
838 meter */
839static unsigned long ComputeSize(FileIterator Start,FileIterator End)
840{
841 unsigned long TotalSize = 0;
842 for (; Start != End; Start++)
843 {
844 if ((*Start)->HasPackages() == false)
845 continue;
846 TotalSize += (*Start)->Size();
847 }
848 return TotalSize;
849}
850 /*}}}*/
851// BuildCache - Merge the list of index files into the cache /*{{{*/
852// ---------------------------------------------------------------------
853/* */
854static bool BuildCache(pkgCacheGenerator &Gen,
855 OpProgress &Progress,
856 unsigned long &CurrentSize,unsigned long TotalSize,
857 FileIterator Start, FileIterator End)
858{
859 FileIterator I;
860 for (I = Start; I != End; I++)
861 {
862 if ((*I)->HasPackages() == false)
863 continue;
864
865 if ((*I)->Exists() == false)
866 continue;
867
868 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
869 {
870 _error->Warning("Duplicate sources.list entry %s",
871 (*I)->Describe().c_str());
872 continue;
873 }
874
875 unsigned long Size = (*I)->Size();
876 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
877 CurrentSize += Size;
878
879 if ((*I)->Merge(Gen,Progress) == false)
880 return false;
881 }
882
883 if (Gen.HasFileDeps() == true)
884 {
885 Progress.Done();
886 TotalSize = ComputeSize(Start, End);
887 CurrentSize = 0;
888 for (I = Start; I != End; I++)
889 {
890 unsigned long Size = (*I)->Size();
891 Progress.OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
892 CurrentSize += Size;
893 if ((*I)->MergeFileProvides(Gen,Progress) == false)
894 return false;
895 }
896 }
897
898 return true;
899}
900 /*}}}*/
901// MakeStatusCache - Construct the status cache /*{{{*/
902// ---------------------------------------------------------------------
903/* This makes sure that the status cache (the cache that has all
904 index files from the sources list and all local ones) is ready
905 to be mmaped. If OutMap is not zero then a MMap object representing
906 the cache will be stored there. This is pretty much mandetory if you
907 are using AllowMem. AllowMem lets the function be run as non-root
908 where it builds the cache 'fast' into a memory buffer. */
909bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
910 MMap **OutMap,bool AllowMem)
911{
912 unsigned long MapSize = _config->FindI("APT::Cache-Limit",24*1024*1024);
913
914 vector<pkgIndexFile *> Files;
915 for (vector<metaIndex *>::const_iterator i = List.begin();
916 i != List.end();
917 i++)
918 {
919 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
920 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
921 j != Indexes->end();
922 j++)
923 Files.push_back (*j);
924 }
925
926 unsigned long EndOfSource = Files.size();
927 if (_system->AddStatusFiles(Files) == false)
928 return false;
929
930 // Decide if we can write to the files..
931 string CacheFile = _config->FindFile("Dir::Cache::pkgcache");
932 string SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
933
934 // Decide if we can write to the cache
935 bool Writeable = false;
936 if (CacheFile.empty() == false)
937 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
938 else
939 if (SrcCacheFile.empty() == false)
940 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
941
942 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
943 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
944
945 Progress.OverallProgress(0,1,1,_("Reading package lists"));
946
947 // Cache is OK, Fin.
948 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
949 {
950 Progress.OverallProgress(1,1,1,_("Reading package lists"));
951 return true;
952 }
953
954 /* At this point we know we need to reconstruct the package cache,
955 begin. */
956 SPtr<FileFd> CacheF;
957 SPtr<DynamicMMap> Map;
958 if (Writeable == true && CacheFile.empty() == false)
959 {
960 unlink(CacheFile.c_str());
961 CacheF = new FileFd(CacheFile,FileFd::WriteEmpty);
962 fchmod(CacheF->Fd(),0644);
963 Map = new DynamicMMap(*CacheF,MMap::Public,MapSize);
964 if (_error->PendingError() == true)
965 return false;
966 }
967 else
968 {
969 // Just build it in memory..
970 Map = new DynamicMMap(0,MapSize);
971 }
972
973 // Lets try the source cache.
974 unsigned long CurrentSize = 0;
975 unsigned long TotalSize = 0;
976 if (CheckValidity(SrcCacheFile,Files.begin(),
977 Files.begin()+EndOfSource) == true)
978 {
979 // Preload the map with the source cache
980 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
981 unsigned long alloc = Map->RawAllocate(SCacheF.Size());
982 if ((alloc == 0 && _error->PendingError())
983 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
984 SCacheF.Size()) == false)
985 return false;
986
987 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
988
989 // Build the status cache
990 pkgCacheGenerator Gen(Map.Get(),&Progress);
991 if (_error->PendingError() == true)
992 return false;
993 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
994 Files.begin()+EndOfSource,Files.end()) == false)
995 return false;
996
997 // FIXME: move me to a better place
998 Gen.FinishCache(Progress);
999 }
1000 else
1001 {
1002 TotalSize = ComputeSize(Files.begin(),Files.end());
1003
1004 // Build the source cache
1005 pkgCacheGenerator Gen(Map.Get(),&Progress);
1006 if (_error->PendingError() == true)
1007 return false;
1008 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1009 Files.begin(),Files.begin()+EndOfSource) == false)
1010 return false;
1011
1012 // Write it back
1013 if (Writeable == true && SrcCacheFile.empty() == false)
1014 {
1015 FileFd SCacheF(SrcCacheFile,FileFd::WriteEmpty);
1016 if (_error->PendingError() == true)
1017 return false;
1018
1019 fchmod(SCacheF.Fd(),0644);
1020
1021 // Write out the main data
1022 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1023 return _error->Error(_("IO Error saving source cache"));
1024 SCacheF.Sync();
1025
1026 // Write out the proper header
1027 Gen.GetCache().HeaderP->Dirty = false;
1028 if (SCacheF.Seek(0) == false ||
1029 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1030 return _error->Error(_("IO Error saving source cache"));
1031 Gen.GetCache().HeaderP->Dirty = true;
1032 SCacheF.Sync();
1033 }
1034
1035 // Build the status cache
1036 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1037 Files.begin()+EndOfSource,Files.end()) == false)
1038 return false;
1039
1040 // FIXME: move me to a better place
1041 Gen.FinishCache(Progress);
1042 }
1043
1044 if (_error->PendingError() == true)
1045 return false;
1046 if (OutMap != 0)
1047 {
1048 if (CacheF != 0)
1049 {
1050 delete Map.UnGuard();
1051 *OutMap = new MMap(*CacheF,0);
1052 }
1053 else
1054 {
1055 *OutMap = Map.UnGuard();
1056 }
1057 }
1058
1059 return true;
1060}
1061 /*}}}*/
1062// MakeOnlyStatusCache - Build a cache with just the status files /*{{{*/
1063// ---------------------------------------------------------------------
1064/* */
1065bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1066{
1067 unsigned long MapSize = _config->FindI("APT::Cache-Limit",20*1024*1024);
1068 vector<pkgIndexFile *> Files;
1069 unsigned long EndOfSource = Files.size();
1070 if (_system->AddStatusFiles(Files) == false)
1071 return false;
1072
1073 SPtr<DynamicMMap> Map = new DynamicMMap(0,MapSize);
1074 unsigned long CurrentSize = 0;
1075 unsigned long TotalSize = 0;
1076
1077 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1078
1079 // Build the status cache
1080 Progress.OverallProgress(0,1,1,_("Reading package lists"));
1081 pkgCacheGenerator Gen(Map.Get(),&Progress);
1082 if (_error->PendingError() == true)
1083 return false;
1084 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1085 Files.begin()+EndOfSource,Files.end()) == false)
1086 return false;
1087
1088 // FIXME: move me to a better place
1089 Gen.FinishCache(Progress);
1090
1091 if (_error->PendingError() == true)
1092 return false;
1093 *OutMap = Map.UnGuard();
1094
1095 return true;
1096}
1097 /*}}}*/