]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
d0ea2e6e3ea8b5f4431bc26090f056b1c3a1ec51
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39 template <typename Iter> std::vector<Iter*> pkgCacheGenerator::Dynamic<Iter>::toReMap;
40
41 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
42 // ---------------------------------------------------------------------
43 /* We set the dirty flag and make sure that is written to the disk */
44 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
45 Map(*pMap), Cache(pMap,false), Progress(Prog),
46 FoundFileDeps(0)
47 {
48 CurrentFile = 0;
49 memset(UniqHash,0,sizeof(UniqHash));
50
51 if (_error->PendingError() == true)
52 return;
53
54 if (Map.Size() == 0)
55 {
56 // Setup the map interface..
57 Cache.HeaderP = (pkgCache::Header *)Map.Data();
58 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
59 return;
60
61 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
62
63 // Starting header
64 *Cache.HeaderP = pkgCache::Header();
65 map_ptrloc const idxVerSysName = WriteStringInMap(_system->VS->Label);
66 Cache.HeaderP->VerSysName = idxVerSysName;
67 map_ptrloc const idxArchitecture = WriteStringInMap(_config->Find("APT::Architecture"));
68 Cache.HeaderP->Architecture = idxArchitecture;
69 if (unlikely(idxVerSysName == 0 || idxArchitecture == 0))
70 return;
71 Cache.ReMap();
72 }
73 else
74 {
75 // Map directly from the existing file
76 Cache.ReMap();
77 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
78 if (Cache.VS != _system->VS)
79 {
80 _error->Error(_("Cache has an incompatible versioning system"));
81 return;
82 }
83 }
84
85 Cache.HeaderP->Dirty = true;
86 Map.Sync(0,sizeof(pkgCache::Header));
87 }
88 /*}}}*/
89 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
90 // ---------------------------------------------------------------------
91 /* We sync the data then unset the dirty flag in two steps so as to
92 advoid a problem during a crash */
93 pkgCacheGenerator::~pkgCacheGenerator()
94 {
95 if (_error->PendingError() == true)
96 return;
97 if (Map.Sync() == false)
98 return;
99
100 Cache.HeaderP->Dirty = false;
101 Map.Sync(0,sizeof(pkgCache::Header));
102 }
103 /*}}}*/
104 void pkgCacheGenerator::ReMap(void const * const oldMap, void const * const newMap) {/*{{{*/
105 if (oldMap == newMap)
106 return;
107
108 Cache.ReMap(false);
109
110 CurrentFile += (pkgCache::PackageFile*) newMap - (pkgCache::PackageFile*) oldMap;
111
112 for (size_t i = 0; i < _count(UniqHash); ++i)
113 if (UniqHash[i] != 0)
114 UniqHash[i] += (pkgCache::StringItem*) newMap - (pkgCache::StringItem*) oldMap;
115
116 for (std::vector<pkgCache::GrpIterator*>::const_iterator i = Dynamic<pkgCache::GrpIterator>::toReMap.begin();
117 i != Dynamic<pkgCache::GrpIterator>::toReMap.end(); ++i)
118 (*i)->ReMap(oldMap, newMap);
119 for (std::vector<pkgCache::PkgIterator*>::const_iterator i = Dynamic<pkgCache::PkgIterator>::toReMap.begin();
120 i != Dynamic<pkgCache::PkgIterator>::toReMap.end(); ++i)
121 (*i)->ReMap(oldMap, newMap);
122 for (std::vector<pkgCache::VerIterator*>::const_iterator i = Dynamic<pkgCache::VerIterator>::toReMap.begin();
123 i != Dynamic<pkgCache::VerIterator>::toReMap.end(); ++i)
124 (*i)->ReMap(oldMap, newMap);
125 for (std::vector<pkgCache::DepIterator*>::const_iterator i = Dynamic<pkgCache::DepIterator>::toReMap.begin();
126 i != Dynamic<pkgCache::DepIterator>::toReMap.end(); ++i)
127 (*i)->ReMap(oldMap, newMap);
128 for (std::vector<pkgCache::DescIterator*>::const_iterator i = Dynamic<pkgCache::DescIterator>::toReMap.begin();
129 i != Dynamic<pkgCache::DescIterator>::toReMap.end(); ++i)
130 (*i)->ReMap(oldMap, newMap);
131 for (std::vector<pkgCache::PrvIterator*>::const_iterator i = Dynamic<pkgCache::PrvIterator>::toReMap.begin();
132 i != Dynamic<pkgCache::PrvIterator>::toReMap.end(); ++i)
133 (*i)->ReMap(oldMap, newMap);
134 for (std::vector<pkgCache::PkgFileIterator*>::const_iterator i = Dynamic<pkgCache::PkgFileIterator>::toReMap.begin();
135 i != Dynamic<pkgCache::PkgFileIterator>::toReMap.end(); ++i)
136 (*i)->ReMap(oldMap, newMap);
137 } /*}}}*/
138 // CacheGenerator::WriteStringInMap /*{{{*/
139 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String,
140 const unsigned long &Len) {
141 void const * const oldMap = Map.Data();
142 map_ptrloc const index = Map.WriteString(String, Len);
143 if (index != 0)
144 ReMap(oldMap, Map.Data());
145 return index;
146 }
147 /*}}}*/
148 // CacheGenerator::WriteStringInMap /*{{{*/
149 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String) {
150 void const * const oldMap = Map.Data();
151 map_ptrloc const index = Map.WriteString(String);
152 if (index != 0)
153 ReMap(oldMap, Map.Data());
154 return index;
155 }
156 /*}}}*/
157 map_ptrloc pkgCacheGenerator::AllocateInMap(const unsigned long &size) {/*{{{*/
158 void const * const oldMap = Map.Data();
159 map_ptrloc const index = Map.Allocate(size);
160 if (index != 0)
161 ReMap(oldMap, Map.Data());
162 return index;
163 }
164 /*}}}*/
165 // CacheGenerator::MergeList - Merge the package list /*{{{*/
166 // ---------------------------------------------------------------------
167 /* This provides the generation of the entries in the cache. Each loop
168 goes through a single package record from the underlying parse engine. */
169 bool pkgCacheGenerator::MergeList(ListParser &List,
170 pkgCache::VerIterator *OutVer)
171 {
172 List.Owner = this;
173
174 unsigned int Counter = 0;
175 while (List.Step() == true)
176 {
177 string const PackageName = List.Package();
178 if (PackageName.empty() == true)
179 return false;
180
181 string const Arch = List.Architecture();
182
183 // Get a pointer to the package structure
184 pkgCache::PkgIterator Pkg;
185 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
186 if (NewPackage(Pkg, PackageName, Arch) == false)
187 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
188 Counter++;
189 if (Counter % 100 == 0 && Progress != 0)
190 Progress->Progress(List.Offset());
191
192 /* Get a pointer to the version structure. We know the list is sorted
193 so we use that fact in the search. Insertion of new versions is
194 done with correct sorting */
195 string Version = List.Version();
196 if (Version.empty() == true)
197 {
198 // we first process the package, then the descriptions
199 // (this has the bonus that we get MMap error when we run out
200 // of MMap space)
201 pkgCache::VerIterator Ver(Cache);
202 Dynamic<pkgCache::VerIterator> DynVer(Ver);
203 if (List.UsePackage(Pkg, Ver) == false)
204 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
205 PackageName.c_str());
206
207 // Find the right version to write the description
208 MD5SumValue CurMd5 = List.Description_md5();
209 Ver = Pkg.VersionList();
210
211 for (; Ver.end() == false; ++Ver)
212 {
213 pkgCache::DescIterator Desc = Ver.DescriptionList();
214 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
215 map_ptrloc *LastDesc = &Ver->DescriptionList;
216 bool duplicate=false;
217
218 // don't add a new description if we have one for the given
219 // md5 && language
220 for ( ; Desc.end() == false; Desc++)
221 if (MD5SumValue(Desc.md5()) == CurMd5 &&
222 Desc.LanguageCode() == List.DescriptionLanguage())
223 duplicate=true;
224 if(duplicate)
225 continue;
226
227 for (Desc = Ver.DescriptionList();
228 Desc.end() == false;
229 LastDesc = &Desc->NextDesc, Desc++)
230 {
231 if (MD5SumValue(Desc.md5()) == CurMd5)
232 {
233 // Add new description
234 void const * const oldMap = Map.Data();
235 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
236 if (oldMap != Map.Data())
237 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
238 *LastDesc = descindex;
239 Desc->ParentPkg = Pkg.Index();
240
241 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
242 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
243 break;
244 }
245 }
246 }
247
248 continue;
249 }
250
251 pkgCache::VerIterator Ver = Pkg.VersionList();
252 Dynamic<pkgCache::VerIterator> DynVer(Ver);
253 map_ptrloc *LastVer = &Pkg->VersionList;
254 void const * oldMap = Map.Data();
255 int Res = 1;
256 unsigned long const Hash = List.VersionHash();
257 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
258 {
259 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
260 // Version is higher as current version - insert here
261 if (Res > 0)
262 break;
263 // Versionstrings are equal - is hash also equal?
264 if (Res == 0 && Ver->Hash == Hash)
265 break;
266 // proceed with the next till we have either the right
267 // or we found another version (which will be lower)
268 }
269
270 /* We already have a version for this item, record that we saw it */
271 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
272 {
273 if (List.UsePackage(Pkg,Ver) == false)
274 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
275 PackageName.c_str());
276
277 if (NewFileVer(Ver,List) == false)
278 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
279 PackageName.c_str());
280
281 // Read only a single record and return
282 if (OutVer != 0)
283 {
284 *OutVer = Ver;
285 FoundFileDeps |= List.HasFileDeps();
286 return true;
287 }
288
289 continue;
290 }
291
292 // Add a new version
293 map_ptrloc const verindex = NewVersion(Ver,Version,*LastVer);
294 if (verindex == 0 && _error->PendingError())
295 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
296 PackageName.c_str(), 1);
297
298 if (oldMap != Map.Data())
299 LastVer += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
300 *LastVer = verindex;
301 Ver->ParentPkg = Pkg.Index();
302 Ver->Hash = Hash;
303
304 if (List.NewVersion(Ver) == false)
305 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
306 PackageName.c_str(), 2);
307
308 if (List.UsePackage(Pkg,Ver) == false)
309 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
310 PackageName.c_str());
311
312 if (NewFileVer(Ver,List) == false)
313 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
314 PackageName.c_str(), 3);
315
316 // Read only a single record and return
317 if (OutVer != 0)
318 {
319 *OutVer = Ver;
320 FoundFileDeps |= List.HasFileDeps();
321 return true;
322 }
323
324 /* Record the Description data. Description data always exist in
325 Packages and Translation-* files. */
326 pkgCache::DescIterator Desc = Ver.DescriptionList();
327 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
328 map_ptrloc *LastDesc = &Ver->DescriptionList;
329
330 // Skip to the end of description set
331 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
332
333 // Add new description
334 oldMap = Map.Data();
335 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
336 if (oldMap != Map.Data())
337 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
338 *LastDesc = descindex;
339 Desc->ParentPkg = Pkg.Index();
340
341 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
342 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
343 }
344
345 FoundFileDeps |= List.HasFileDeps();
346
347 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
348 return _error->Error(_("Wow, you exceeded the number of package "
349 "names this APT is capable of."));
350 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
351 return _error->Error(_("Wow, you exceeded the number of versions "
352 "this APT is capable of."));
353 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
354 return _error->Error(_("Wow, you exceeded the number of descriptions "
355 "this APT is capable of."));
356 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
357 return _error->Error(_("Wow, you exceeded the number of dependencies "
358 "this APT is capable of."));
359 return true;
360 }
361 /*}}}*/
362 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
363 // ---------------------------------------------------------------------
364 /* If we found any file depends while parsing the main list we need to
365 resolve them. Since it is undesired to load the entire list of files
366 into the cache as virtual packages we do a two stage effort. MergeList
367 identifies the file depends and this creates Provdies for them by
368 re-parsing all the indexs. */
369 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
370 {
371 List.Owner = this;
372
373 unsigned int Counter = 0;
374 while (List.Step() == true)
375 {
376 string PackageName = List.Package();
377 if (PackageName.empty() == true)
378 return false;
379 string Version = List.Version();
380 if (Version.empty() == true)
381 continue;
382
383 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
384 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
385 if (Pkg.end() == true)
386 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
387 PackageName.c_str());
388 Counter++;
389 if (Counter % 100 == 0 && Progress != 0)
390 Progress->Progress(List.Offset());
391
392 unsigned long Hash = List.VersionHash();
393 pkgCache::VerIterator Ver = Pkg.VersionList();
394 Dynamic<pkgCache::VerIterator> DynVer(Ver);
395 for (; Ver.end() == false; Ver++)
396 {
397 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
398 {
399 if (List.CollectFileProvides(Cache,Ver) == false)
400 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
401 break;
402 }
403 }
404
405 if (Ver.end() == true)
406 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
407 }
408
409 return true;
410 }
411 /*}}}*/
412 // CacheGenerator::NewGroup - Add a new group /*{{{*/
413 // ---------------------------------------------------------------------
414 /* This creates a new group structure and adds it to the hash table */
415 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
416 {
417 Grp = Cache.FindGrp(Name);
418 if (Grp.end() == false)
419 return true;
420
421 // Get a structure
422 map_ptrloc const Group = AllocateInMap(sizeof(pkgCache::Group));
423 if (unlikely(Group == 0))
424 return false;
425
426 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
427 map_ptrloc const idxName = WriteStringInMap(Name);
428 if (unlikely(idxName == 0))
429 return false;
430 Grp->Name = idxName;
431
432 // Insert it into the hash table
433 unsigned long const Hash = Cache.Hash(Name);
434 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
435 Cache.HeaderP->GrpHashTable[Hash] = Group;
436
437 Grp->ID = Cache.HeaderP->GroupCount++;
438 return true;
439 }
440 /*}}}*/
441 // CacheGenerator::NewPackage - Add a new package /*{{{*/
442 // ---------------------------------------------------------------------
443 /* This creates a new package structure and adds it to the hash table */
444 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
445 const string &Arch) {
446 pkgCache::GrpIterator Grp;
447 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
448 if (unlikely(NewGroup(Grp, Name) == false))
449 return false;
450
451 Pkg = Grp.FindPkg(Arch);
452 if (Pkg.end() == false)
453 return true;
454
455 // Get a structure
456 map_ptrloc const Package = AllocateInMap(sizeof(pkgCache::Package));
457 if (unlikely(Package == 0))
458 return false;
459 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
460
461 // Insert the package into our package list
462 if (Grp->FirstPackage == 0) // the group is new
463 {
464 // Insert it into the hash table
465 unsigned long const Hash = Cache.Hash(Name);
466 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
467 Cache.HeaderP->PkgHashTable[Hash] = Package;
468 Grp->FirstPackage = Package;
469 }
470 else // Group the Packages together
471 {
472 // this package is the new last package
473 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
474 Pkg->NextPackage = LastPkg->NextPackage;
475 LastPkg->NextPackage = Package;
476 }
477 Grp->LastPackage = Package;
478
479 // Set the name, arch and the ID
480 Pkg->Name = Grp->Name;
481 Pkg->Group = Grp.Index();
482 // all is mapped to the native architecture
483 map_ptrloc const idxArch = (Arch == "all") ? Cache.HeaderP->Architecture : WriteUniqString(Arch.c_str());
484 if (unlikely(idxArch == 0))
485 return false;
486 Pkg->Arch = idxArch;
487 Pkg->ID = Cache.HeaderP->PackageCount++;
488
489 return true;
490 }
491 /*}}}*/
492 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
493 // ---------------------------------------------------------------------
494 /* */
495 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
496 ListParser &List)
497 {
498 if (CurrentFile == 0)
499 return true;
500
501 // Get a structure
502 map_ptrloc const VerFile = AllocateInMap(sizeof(pkgCache::VerFile));
503 if (VerFile == 0)
504 return 0;
505
506 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
507 VF->File = CurrentFile - Cache.PkgFileP;
508
509 // Link it to the end of the list
510 map_ptrloc *Last = &Ver->FileList;
511 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
512 Last = &V->NextFile;
513 VF->NextFile = *Last;
514 *Last = VF.Index();
515
516 VF->Offset = List.Offset();
517 VF->Size = List.Size();
518 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
519 Cache.HeaderP->MaxVerFileSize = VF->Size;
520 Cache.HeaderP->VerFileCount++;
521
522 return true;
523 }
524 /*}}}*/
525 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
526 // ---------------------------------------------------------------------
527 /* This puts a version structure in the linked list */
528 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
529 const string &VerStr,
530 unsigned long Next)
531 {
532 // Get a structure
533 map_ptrloc const Version = AllocateInMap(sizeof(pkgCache::Version));
534 if (Version == 0)
535 return 0;
536
537 // Fill it in
538 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
539 Ver->NextVer = Next;
540 Ver->ID = Cache.HeaderP->VersionCount++;
541 map_ptrloc const idxVerStr = WriteStringInMap(VerStr);
542 if (unlikely(idxVerStr == 0))
543 return 0;
544 Ver->VerStr = idxVerStr;
545
546 return Version;
547 }
548 /*}}}*/
549 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
550 // ---------------------------------------------------------------------
551 /* */
552 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
553 ListParser &List)
554 {
555 if (CurrentFile == 0)
556 return true;
557
558 // Get a structure
559 map_ptrloc const DescFile = AllocateInMap(sizeof(pkgCache::DescFile));
560 if (DescFile == 0)
561 return false;
562
563 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
564 DF->File = CurrentFile - Cache.PkgFileP;
565
566 // Link it to the end of the list
567 map_ptrloc *Last = &Desc->FileList;
568 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
569 Last = &D->NextFile;
570
571 DF->NextFile = *Last;
572 *Last = DF.Index();
573
574 DF->Offset = List.Offset();
575 DF->Size = List.Size();
576 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
577 Cache.HeaderP->MaxDescFileSize = DF->Size;
578 Cache.HeaderP->DescFileCount++;
579
580 return true;
581 }
582 /*}}}*/
583 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
584 // ---------------------------------------------------------------------
585 /* This puts a description structure in the linked list */
586 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
587 const string &Lang,
588 const MD5SumValue &md5sum,
589 map_ptrloc Next)
590 {
591 // Get a structure
592 map_ptrloc const Description = AllocateInMap(sizeof(pkgCache::Description));
593 if (Description == 0)
594 return 0;
595
596 // Fill it in
597 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
598 Desc->NextDesc = Next;
599 Desc->ID = Cache.HeaderP->DescriptionCount++;
600 map_ptrloc const idxlanguage_code = WriteStringInMap(Lang);
601 map_ptrloc const idxmd5sum = WriteStringInMap(md5sum.Value());
602 if (unlikely(idxlanguage_code == 0 || idxmd5sum == 0))
603 return 0;
604 Desc->language_code = idxlanguage_code;
605 Desc->md5sum = idxmd5sum;
606
607 return Description;
608 }
609 /*}}}*/
610 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
611 // ---------------------------------------------------------------------
612 /* This prepares the Cache for delivery */
613 bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
614 {
615 // FIXME: add progress reporting for this operation
616 // Do we have different architectures in your groups ?
617 vector<string> archs = APT::Configuration::getArchitectures();
618 if (archs.size() > 1)
619 {
620 // Create Conflicts in between the group
621 pkgCache::GrpIterator G = GetCache().GrpBegin();
622 Dynamic<pkgCache::GrpIterator> DynG(G);
623 for (; G.end() != true; G++)
624 {
625 string const PkgName = G.Name();
626 pkgCache::PkgIterator P = G.PackageList();
627 Dynamic<pkgCache::PkgIterator> DynP(P);
628 for (; P.end() != true; P = G.NextPkg(P))
629 {
630 pkgCache::PkgIterator allPkg;
631 Dynamic<pkgCache::PkgIterator> DynallPkg(allPkg);
632 pkgCache::VerIterator V = P.VersionList();
633 Dynamic<pkgCache::VerIterator> DynV(V);
634 for (; V.end() != true; V++)
635 {
636 char const * const Arch = P.Arch();
637 map_ptrloc *OldDepLast = NULL;
638 /* MultiArch handling introduces a lot of implicit Dependencies:
639 - MultiArch: same → Co-Installable if they have the same version
640 - Architecture: all → Need to be Co-Installable for internal reasons
641 - All others conflict with all other group members */
642 bool const coInstall = ((V->MultiArch & pkgCache::Version::Same) == pkgCache::Version::Same);
643 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
644 {
645 if (*A == Arch)
646 continue;
647 /* We allow only one installed arch at the time
648 per group, therefore each group member conflicts
649 with all other group members */
650 pkgCache::PkgIterator D = G.FindPkg(*A);
651 Dynamic<pkgCache::PkgIterator> DynD(D);
652 if (D.end() == true)
653 continue;
654 if (coInstall == true)
655 {
656 // Replaces: ${self}:other ( << ${binary:Version})
657 NewDepends(D, V, V.VerStr(),
658 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
659 OldDepLast);
660 // Breaks: ${self}:other (!= ${binary:Version})
661 NewDepends(D, V, V.VerStr(),
662 pkgCache::Dep::NotEquals, pkgCache::Dep::DpkgBreaks,
663 OldDepLast);
664 } else {
665 // Conflicts: ${self}:other
666 NewDepends(D, V, "",
667 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
668 OldDepLast);
669 }
670 }
671 }
672 }
673 }
674 }
675 return true;
676 }
677 /*}}}*/
678 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
679 // ---------------------------------------------------------------------
680 /* This creates a dependency element in the tree. It is linked to the
681 version and to the package that it is pointing to. */
682 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
683 pkgCache::VerIterator &Ver,
684 string const &Version,
685 unsigned int const &Op,
686 unsigned int const &Type,
687 map_ptrloc *OldDepLast)
688 {
689 void const * const oldMap = Map.Data();
690 // Get a structure
691 map_ptrloc const Dependency = AllocateInMap(sizeof(pkgCache::Dependency));
692 if (unlikely(Dependency == 0))
693 return false;
694
695 // Fill it in
696 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
697 Dynamic<pkgCache::DepIterator> DynDep(Dep);
698 Dep->ParentVer = Ver.Index();
699 Dep->Type = Type;
700 Dep->CompareOp = Op;
701 Dep->ID = Cache.HeaderP->DependsCount++;
702
703 // Probe the reverse dependency list for a version string that matches
704 if (Version.empty() == false)
705 {
706 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
707 if (I->Version != 0 && I.TargetVer() == Version)
708 Dep->Version = I->Version;*/
709 if (Dep->Version == 0) {
710 map_ptrloc const index = WriteStringInMap(Version);
711 if (unlikely(index == 0))
712 return false;
713 Dep->Version = index;
714 }
715 }
716
717 // Link it to the package
718 Dep->Package = Pkg.Index();
719 Dep->NextRevDepends = Pkg->RevDepends;
720 Pkg->RevDepends = Dep.Index();
721
722 // Do we know where to link the Dependency to?
723 if (OldDepLast == NULL)
724 {
725 OldDepLast = &Ver->DependsList;
726 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
727 OldDepLast = &D->NextDepends;
728 } else if (oldMap != Map.Data())
729 OldDepLast += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
730
731 Dep->NextDepends = *OldDepLast;
732 *OldDepLast = Dep.Index();
733 OldDepLast = &Dep->NextDepends;
734
735 return true;
736 }
737 /*}}}*/
738 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
739 // ---------------------------------------------------------------------
740 /* This creates a Group and the Package to link this dependency to if
741 needed and handles also the caching of the old endpoint */
742 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator &Ver,
743 const string &PackageName,
744 const string &Arch,
745 const string &Version,
746 unsigned int Op,
747 unsigned int Type)
748 {
749 pkgCache::GrpIterator Grp;
750 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
751 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
752 return false;
753
754 // Locate the target package
755 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
756 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
757 if (Pkg.end() == true) {
758 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
759 return false;
760 }
761
762 // Is it a file dependency?
763 if (unlikely(PackageName[0] == '/'))
764 FoundFileDeps = true;
765
766 /* Caching the old end point speeds up generation substantially */
767 if (OldDepVer != Ver) {
768 OldDepLast = NULL;
769 OldDepVer = Ver;
770 }
771
772 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
773 }
774 /*}}}*/
775 // ListParser::NewProvides - Create a Provides element /*{{{*/
776 // ---------------------------------------------------------------------
777 /* */
778 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator &Ver,
779 const string &PkgName,
780 const string &PkgArch,
781 const string &Version)
782 {
783 pkgCache &Cache = Owner->Cache;
784
785 // We do not add self referencing provides
786 if (Ver.ParentPkg().Name() == PkgName && (PkgArch == Ver.ParentPkg().Arch() ||
787 (PkgArch == "all" && strcmp((Cache.StrP + Cache.HeaderP->Architecture), Ver.ParentPkg().Arch()) == 0)))
788 return true;
789
790 // Get a structure
791 map_ptrloc const Provides = Owner->AllocateInMap(sizeof(pkgCache::Provides));
792 if (unlikely(Provides == 0))
793 return false;
794 Cache.HeaderP->ProvidesCount++;
795
796 // Fill it in
797 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
798 Dynamic<pkgCache::PrvIterator> DynPrv(Prv);
799 Prv->Version = Ver.Index();
800 Prv->NextPkgProv = Ver->ProvidesList;
801 Ver->ProvidesList = Prv.Index();
802 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
803 return false;
804
805 // Locate the target package
806 pkgCache::PkgIterator Pkg;
807 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
808 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
809 return false;
810
811 // Link it to the package
812 Prv->ParentPkg = Pkg.Index();
813 Prv->NextProvides = Pkg->ProvidesList;
814 Pkg->ProvidesList = Prv.Index();
815
816 return true;
817 }
818 /*}}}*/
819 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
820 // ---------------------------------------------------------------------
821 /* This is used to select which file is to be associated with all newly
822 added versions. The caller is responsible for setting the IMS fields. */
823 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
824 const pkgIndexFile &Index,
825 unsigned long Flags)
826 {
827 // Get some space for the structure
828 map_ptrloc const idxFile = AllocateInMap(sizeof(*CurrentFile));
829 if (unlikely(idxFile == 0))
830 return false;
831 CurrentFile = Cache.PkgFileP + idxFile;
832
833 // Fill it in
834 map_ptrloc const idxFileName = WriteStringInMap(File);
835 map_ptrloc const idxSite = WriteUniqString(Site);
836 if (unlikely(idxFileName == 0 || idxSite == 0))
837 return false;
838 CurrentFile->FileName = idxFileName;
839 CurrentFile->Site = idxSite;
840 CurrentFile->NextFile = Cache.HeaderP->FileList;
841 CurrentFile->Flags = Flags;
842 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
843 map_ptrloc const idxIndexType = WriteUniqString(Index.GetType()->Label);
844 if (unlikely(idxIndexType == 0))
845 return false;
846 CurrentFile->IndexType = idxIndexType;
847 PkgFileName = File;
848 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
849 Cache.HeaderP->PackageFileCount++;
850
851 if (Progress != 0)
852 Progress->SubProgress(Index.Size());
853 return true;
854 }
855 /*}}}*/
856 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
857 // ---------------------------------------------------------------------
858 /* This is used to create handles to strings. Given the same text it
859 always returns the same number */
860 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
861 unsigned int Size)
862 {
863 /* We use a very small transient hash table here, this speeds up generation
864 by a fair amount on slower machines */
865 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
866 if (Bucket != 0 &&
867 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
868 return Bucket->String;
869
870 // Search for an insertion point
871 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
872 int Res = 1;
873 map_ptrloc *Last = &Cache.HeaderP->StringList;
874 for (; I != Cache.StringItemP; Last = &I->NextItem,
875 I = Cache.StringItemP + I->NextItem)
876 {
877 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
878 if (Res >= 0)
879 break;
880 }
881
882 // Match
883 if (Res == 0)
884 {
885 Bucket = I;
886 return I->String;
887 }
888
889 // Get a structure
890 void const * const oldMap = Map.Data();
891 map_ptrloc const Item = AllocateInMap(sizeof(pkgCache::StringItem));
892 if (Item == 0)
893 return 0;
894
895 map_ptrloc const idxString = WriteStringInMap(S,Size);
896 if (unlikely(idxString == 0))
897 return 0;
898 if (oldMap != Map.Data()) {
899 Last += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
900 I += (pkgCache::StringItem*) Map.Data() - (pkgCache::StringItem*) oldMap;
901 }
902 *Last = Item;
903
904 // Fill in the structure
905 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
906 ItemP->NextItem = I - Cache.StringItemP;
907 ItemP->String = idxString;
908
909 Bucket = ItemP;
910 return ItemP->String;
911 }
912 /*}}}*/
913 // CheckValidity - Check that a cache is up-to-date /*{{{*/
914 // ---------------------------------------------------------------------
915 /* This just verifies that each file in the list of index files exists,
916 has matching attributes with the cache and the cache does not have
917 any extra files. */
918 static bool CheckValidity(const string &CacheFile,
919 pkgSourceList &List,
920 FileIterator Start,
921 FileIterator End,
922 MMap **OutMap = 0)
923 {
924 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
925 // No file, certainly invalid
926 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
927 {
928 if (Debug == true)
929 std::clog << "CacheFile doesn't exist" << std::endl;
930 return false;
931 }
932
933 if (List.GetLastModifiedTime() < GetModificationTime(CacheFile))
934 {
935 if (Debug == true)
936 std::clog << "sources.list is newer than the cache" << std::endl;
937 return false;
938 }
939
940 // Map it
941 FileFd CacheF(CacheFile,FileFd::ReadOnly);
942 SPtr<MMap> Map = new MMap(CacheF,0);
943 pkgCache Cache(Map);
944 if (_error->PendingError() == true || Map->Size() == 0)
945 {
946 if (Debug == true)
947 std::clog << "Errors are pending or Map is empty()" << std::endl;
948 _error->Discard();
949 return false;
950 }
951
952 /* Now we check every index file, see if it is in the cache,
953 verify the IMS data and check that it is on the disk too.. */
954 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
955 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
956 for (; Start != End; Start++)
957 {
958 if (Debug == true)
959 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
960 if ((*Start)->HasPackages() == false)
961 {
962 if (Debug == true)
963 std::clog << "Has NO packages" << std::endl;
964 continue;
965 }
966
967 if ((*Start)->Exists() == false)
968 {
969 #if 0 // mvo: we no longer give a message here (Default Sources spec)
970 _error->WarningE("stat",_("Couldn't stat source package list %s"),
971 (*Start)->Describe().c_str());
972 #endif
973 if (Debug == true)
974 std::clog << "file doesn't exist" << std::endl;
975 continue;
976 }
977
978 // FindInCache is also expected to do an IMS check.
979 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
980 if (File.end() == true)
981 {
982 if (Debug == true)
983 std::clog << "FindInCache returned end-Pointer" << std::endl;
984 return false;
985 }
986
987 Visited[File->ID] = true;
988 if (Debug == true)
989 std::clog << "with ID " << File->ID << " is valid" << std::endl;
990 }
991
992 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
993 if (Visited[I] == false)
994 {
995 if (Debug == true)
996 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
997 return false;
998 }
999
1000 if (_error->PendingError() == true)
1001 {
1002 if (Debug == true)
1003 {
1004 std::clog << "Validity failed because of pending errors:" << std::endl;
1005 _error->DumpErrors();
1006 }
1007 _error->Discard();
1008 return false;
1009 }
1010
1011 if (OutMap != 0)
1012 *OutMap = Map.UnGuard();
1013 return true;
1014 }
1015 /*}}}*/
1016 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
1017 // ---------------------------------------------------------------------
1018 /* Size is kind of an abstract notion that is only used for the progress
1019 meter */
1020 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
1021 {
1022 unsigned long TotalSize = 0;
1023 for (; Start != End; Start++)
1024 {
1025 if ((*Start)->HasPackages() == false)
1026 continue;
1027 TotalSize += (*Start)->Size();
1028 }
1029 return TotalSize;
1030 }
1031 /*}}}*/
1032 // BuildCache - Merge the list of index files into the cache /*{{{*/
1033 // ---------------------------------------------------------------------
1034 /* */
1035 static bool BuildCache(pkgCacheGenerator &Gen,
1036 OpProgress *Progress,
1037 unsigned long &CurrentSize,unsigned long TotalSize,
1038 FileIterator Start, FileIterator End)
1039 {
1040 FileIterator I;
1041 for (I = Start; I != End; I++)
1042 {
1043 if ((*I)->HasPackages() == false)
1044 continue;
1045
1046 if ((*I)->Exists() == false)
1047 continue;
1048
1049 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
1050 {
1051 _error->Warning("Duplicate sources.list entry %s",
1052 (*I)->Describe().c_str());
1053 continue;
1054 }
1055
1056 unsigned long Size = (*I)->Size();
1057 if (Progress != NULL)
1058 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
1059 CurrentSize += Size;
1060
1061 if ((*I)->Merge(Gen,Progress) == false)
1062 return false;
1063 }
1064
1065 if (Gen.HasFileDeps() == true)
1066 {
1067 if (Progress != NULL)
1068 Progress->Done();
1069 TotalSize = ComputeSize(Start, End);
1070 CurrentSize = 0;
1071 for (I = Start; I != End; I++)
1072 {
1073 unsigned long Size = (*I)->Size();
1074 if (Progress != NULL)
1075 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
1076 CurrentSize += Size;
1077 if ((*I)->MergeFileProvides(Gen,Progress) == false)
1078 return false;
1079 }
1080 }
1081
1082 return true;
1083 }
1084 /*}}}*/
1085 // CacheGenerator::CreateDynamicMMap - load an mmap with configuration options /*{{{*/
1086 DynamicMMap* pkgCacheGenerator::CreateDynamicMMap(FileFd *CacheF, unsigned long Flags) {
1087 unsigned long const MapStart = _config->FindI("APT::Cache-Start", 24*1024*1024);
1088 unsigned long const MapGrow = _config->FindI("APT::Cache-Grow", 1*1024*1024);
1089 unsigned long const MapLimit = _config->FindI("APT::Cache-Limit", 0);
1090 Flags |= MMap::Moveable;
1091 if (_config->FindB("APT::Cache-Fallback", false) == true)
1092 Flags |= MMap::Fallback;
1093 if (CacheF != NULL)
1094 return new DynamicMMap(*CacheF, Flags, MapStart, MapGrow, MapLimit);
1095 else
1096 return new DynamicMMap(Flags, MapStart, MapGrow, MapLimit);
1097 }
1098 /*}}}*/
1099 // CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
1100 // ---------------------------------------------------------------------
1101 /* This makes sure that the status cache (the cache that has all
1102 index files from the sources list and all local ones) is ready
1103 to be mmaped. If OutMap is not zero then a MMap object representing
1104 the cache will be stored there. This is pretty much mandetory if you
1105 are using AllowMem. AllowMem lets the function be run as non-root
1106 where it builds the cache 'fast' into a memory buffer. */
1107 __deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
1108 MMap **OutMap, bool AllowMem)
1109 { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
1110 bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
1111 MMap **OutMap,bool AllowMem)
1112 {
1113 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
1114
1115 vector<pkgIndexFile *> Files;
1116 for (vector<metaIndex *>::const_iterator i = List.begin();
1117 i != List.end();
1118 i++)
1119 {
1120 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
1121 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
1122 j != Indexes->end();
1123 j++)
1124 Files.push_back (*j);
1125 }
1126
1127 unsigned long const EndOfSource = Files.size();
1128 if (_system->AddStatusFiles(Files) == false)
1129 return false;
1130
1131 // Decide if we can write to the files..
1132 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1133 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1134
1135 // ensure the cache directory exists
1136 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1137 {
1138 string dir = _config->FindDir("Dir::Cache");
1139 size_t const len = dir.size();
1140 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1141 dir = dir.substr(0, len - 5);
1142 if (CacheFile.empty() == false)
1143 CreateDirectory(dir, flNotFile(CacheFile));
1144 if (SrcCacheFile.empty() == false)
1145 CreateDirectory(dir, flNotFile(SrcCacheFile));
1146 }
1147
1148 // Decide if we can write to the cache
1149 bool Writeable = false;
1150 if (CacheFile.empty() == false)
1151 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1152 else
1153 if (SrcCacheFile.empty() == false)
1154 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1155 if (Debug == true)
1156 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1157
1158 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1159 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1160
1161 if (Progress != NULL)
1162 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1163
1164 // Cache is OK, Fin.
1165 if (CheckValidity(CacheFile, List, Files.begin(),Files.end(),OutMap) == true)
1166 {
1167 if (Progress != NULL)
1168 Progress->OverallProgress(1,1,1,_("Reading package lists"));
1169 if (Debug == true)
1170 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1171 return true;
1172 }
1173 else if (Debug == true)
1174 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1175
1176 /* At this point we know we need to reconstruct the package cache,
1177 begin. */
1178 SPtr<FileFd> CacheF;
1179 SPtr<DynamicMMap> Map;
1180 if (Writeable == true && CacheFile.empty() == false)
1181 {
1182 unlink(CacheFile.c_str());
1183 CacheF = new FileFd(CacheFile,FileFd::WriteAtomic);
1184 fchmod(CacheF->Fd(),0644);
1185 Map = CreateDynamicMMap(CacheF, MMap::Public);
1186 if (_error->PendingError() == true)
1187 return false;
1188 if (Debug == true)
1189 std::clog << "Open filebased MMap" << std::endl;
1190 }
1191 else
1192 {
1193 // Just build it in memory..
1194 Map = CreateDynamicMMap(NULL);
1195 if (Debug == true)
1196 std::clog << "Open memory Map (not filebased)" << std::endl;
1197 }
1198
1199 // Lets try the source cache.
1200 unsigned long CurrentSize = 0;
1201 unsigned long TotalSize = 0;
1202 if (CheckValidity(SrcCacheFile, List, Files.begin(),
1203 Files.begin()+EndOfSource) == true)
1204 {
1205 if (Debug == true)
1206 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1207 // Preload the map with the source cache
1208 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1209 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1210 if ((alloc == 0 && _error->PendingError())
1211 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1212 SCacheF.Size()) == false)
1213 return false;
1214
1215 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1216
1217 // Build the status cache
1218 pkgCacheGenerator Gen(Map.Get(),Progress);
1219 if (_error->PendingError() == true)
1220 return false;
1221 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1222 Files.begin()+EndOfSource,Files.end()) == false)
1223 return false;
1224
1225 // FIXME: move me to a better place
1226 Gen.FinishCache(Progress);
1227 }
1228 else
1229 {
1230 if (Debug == true)
1231 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1232 TotalSize = ComputeSize(Files.begin(),Files.end());
1233
1234 // Build the source cache
1235 pkgCacheGenerator Gen(Map.Get(),Progress);
1236 if (_error->PendingError() == true)
1237 return false;
1238 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1239 Files.begin(),Files.begin()+EndOfSource) == false)
1240 return false;
1241
1242 // Write it back
1243 if (Writeable == true && SrcCacheFile.empty() == false)
1244 {
1245 FileFd SCacheF(SrcCacheFile,FileFd::WriteAtomic);
1246 if (_error->PendingError() == true)
1247 return false;
1248
1249 fchmod(SCacheF.Fd(),0644);
1250
1251 // Write out the main data
1252 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1253 return _error->Error(_("IO Error saving source cache"));
1254 SCacheF.Sync();
1255
1256 // Write out the proper header
1257 Gen.GetCache().HeaderP->Dirty = false;
1258 if (SCacheF.Seek(0) == false ||
1259 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1260 return _error->Error(_("IO Error saving source cache"));
1261 Gen.GetCache().HeaderP->Dirty = true;
1262 SCacheF.Sync();
1263 }
1264
1265 // Build the status cache
1266 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1267 Files.begin()+EndOfSource,Files.end()) == false)
1268 return false;
1269
1270 // FIXME: move me to a better place
1271 Gen.FinishCache(Progress);
1272 }
1273 if (Debug == true)
1274 std::clog << "Caches are ready for shipping" << std::endl;
1275
1276 if (_error->PendingError() == true)
1277 return false;
1278 if (OutMap != 0)
1279 {
1280 if (CacheF != 0)
1281 {
1282 delete Map.UnGuard();
1283 *OutMap = new MMap(*CacheF,0);
1284 }
1285 else
1286 {
1287 *OutMap = Map.UnGuard();
1288 }
1289 }
1290
1291 return true;
1292 }
1293 /*}}}*/
1294 // CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1295 // ---------------------------------------------------------------------
1296 /* */
1297 __deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1298 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
1299 bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
1300 {
1301 vector<pkgIndexFile *> Files;
1302 unsigned long EndOfSource = Files.size();
1303 if (_system->AddStatusFiles(Files) == false)
1304 return false;
1305
1306 SPtr<DynamicMMap> Map = CreateDynamicMMap(NULL);
1307 unsigned long CurrentSize = 0;
1308 unsigned long TotalSize = 0;
1309
1310 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1311
1312 // Build the status cache
1313 if (Progress != NULL)
1314 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1315 pkgCacheGenerator Gen(Map.Get(),Progress);
1316 if (_error->PendingError() == true)
1317 return false;
1318 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1319 Files.begin()+EndOfSource,Files.end()) == false)
1320 return false;
1321
1322 // FIXME: move me to a better place
1323 Gen.FinishCache(Progress);
1324
1325 if (_error->PendingError() == true)
1326 return false;
1327 *OutMap = Map.UnGuard();
1328
1329 return true;
1330 }
1331 /*}}}*/