]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
add options to disable specific checksums for Indexes
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39 template <typename Iter> std::vector<Iter*> pkgCacheGenerator::Dynamic<Iter>::toReMap;
40
41 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
42 // ---------------------------------------------------------------------
43 /* We set the dirty flag and make sure that is written to the disk */
44 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
45 Map(*pMap), Cache(pMap,false), Progress(Prog),
46 FoundFileDeps(0)
47 {
48 CurrentFile = 0;
49 memset(UniqHash,0,sizeof(UniqHash));
50
51 if (_error->PendingError() == true)
52 return;
53
54 if (Map.Size() == 0)
55 {
56 // Setup the map interface..
57 Cache.HeaderP = (pkgCache::Header *)Map.Data();
58 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
59 return;
60
61 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
62
63 // Starting header
64 *Cache.HeaderP = pkgCache::Header();
65 map_ptrloc const idxVerSysName = WriteStringInMap(_system->VS->Label);
66 Cache.HeaderP->VerSysName = idxVerSysName;
67 map_ptrloc const idxArchitecture = WriteStringInMap(_config->Find("APT::Architecture"));
68 Cache.HeaderP->Architecture = idxArchitecture;
69 if (unlikely(idxVerSysName == 0 || idxArchitecture == 0))
70 return;
71 Cache.ReMap();
72 }
73 else
74 {
75 // Map directly from the existing file
76 Cache.ReMap();
77 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
78 if (Cache.VS != _system->VS)
79 {
80 _error->Error(_("Cache has an incompatible versioning system"));
81 return;
82 }
83 }
84
85 Cache.HeaderP->Dirty = true;
86 Map.Sync(0,sizeof(pkgCache::Header));
87 }
88 /*}}}*/
89 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
90 // ---------------------------------------------------------------------
91 /* We sync the data then unset the dirty flag in two steps so as to
92 advoid a problem during a crash */
93 pkgCacheGenerator::~pkgCacheGenerator()
94 {
95 if (_error->PendingError() == true)
96 return;
97 if (Map.Sync() == false)
98 return;
99
100 Cache.HeaderP->Dirty = false;
101 Map.Sync(0,sizeof(pkgCache::Header));
102 }
103 /*}}}*/
104 void pkgCacheGenerator::ReMap(void const * const oldMap, void const * const newMap) {/*{{{*/
105 if (oldMap == newMap)
106 return;
107
108 Cache.ReMap(false);
109
110 CurrentFile += (pkgCache::PackageFile*) newMap - (pkgCache::PackageFile*) oldMap;
111
112 for (size_t i = 0; i < _count(UniqHash); ++i)
113 if (UniqHash[i] != 0)
114 UniqHash[i] += (pkgCache::StringItem*) newMap - (pkgCache::StringItem*) oldMap;
115
116 for (std::vector<pkgCache::GrpIterator*>::const_iterator i = Dynamic<pkgCache::GrpIterator>::toReMap.begin();
117 i != Dynamic<pkgCache::GrpIterator>::toReMap.end(); ++i)
118 (*i)->ReMap(oldMap, newMap);
119 for (std::vector<pkgCache::PkgIterator*>::const_iterator i = Dynamic<pkgCache::PkgIterator>::toReMap.begin();
120 i != Dynamic<pkgCache::PkgIterator>::toReMap.end(); ++i)
121 (*i)->ReMap(oldMap, newMap);
122 for (std::vector<pkgCache::VerIterator*>::const_iterator i = Dynamic<pkgCache::VerIterator>::toReMap.begin();
123 i != Dynamic<pkgCache::VerIterator>::toReMap.end(); ++i)
124 (*i)->ReMap(oldMap, newMap);
125 for (std::vector<pkgCache::DepIterator*>::const_iterator i = Dynamic<pkgCache::DepIterator>::toReMap.begin();
126 i != Dynamic<pkgCache::DepIterator>::toReMap.end(); ++i)
127 (*i)->ReMap(oldMap, newMap);
128 for (std::vector<pkgCache::DescIterator*>::const_iterator i = Dynamic<pkgCache::DescIterator>::toReMap.begin();
129 i != Dynamic<pkgCache::DescIterator>::toReMap.end(); ++i)
130 (*i)->ReMap(oldMap, newMap);
131 for (std::vector<pkgCache::PrvIterator*>::const_iterator i = Dynamic<pkgCache::PrvIterator>::toReMap.begin();
132 i != Dynamic<pkgCache::PrvIterator>::toReMap.end(); ++i)
133 (*i)->ReMap(oldMap, newMap);
134 for (std::vector<pkgCache::PkgFileIterator*>::const_iterator i = Dynamic<pkgCache::PkgFileIterator>::toReMap.begin();
135 i != Dynamic<pkgCache::PkgFileIterator>::toReMap.end(); ++i)
136 (*i)->ReMap(oldMap, newMap);
137 } /*}}}*/
138 // CacheGenerator::WriteStringInMap /*{{{*/
139 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String,
140 const unsigned long &Len) {
141 void const * const oldMap = Map.Data();
142 map_ptrloc const index = Map.WriteString(String, Len);
143 if (index != 0)
144 ReMap(oldMap, Map.Data());
145 return index;
146 }
147 /*}}}*/
148 // CacheGenerator::WriteStringInMap /*{{{*/
149 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String) {
150 void const * const oldMap = Map.Data();
151 map_ptrloc const index = Map.WriteString(String);
152 if (index != 0)
153 ReMap(oldMap, Map.Data());
154 return index;
155 }
156 /*}}}*/
157 map_ptrloc pkgCacheGenerator::AllocateInMap(const unsigned long &size) {/*{{{*/
158 void const * const oldMap = Map.Data();
159 map_ptrloc const index = Map.Allocate(size);
160 if (index != 0)
161 ReMap(oldMap, Map.Data());
162 return index;
163 }
164 /*}}}*/
165 // CacheGenerator::MergeList - Merge the package list /*{{{*/
166 // ---------------------------------------------------------------------
167 /* This provides the generation of the entries in the cache. Each loop
168 goes through a single package record from the underlying parse engine. */
169 bool pkgCacheGenerator::MergeList(ListParser &List,
170 pkgCache::VerIterator *OutVer)
171 {
172 List.Owner = this;
173
174 unsigned int Counter = 0;
175 while (List.Step() == true)
176 {
177 string const PackageName = List.Package();
178 if (PackageName.empty() == true)
179 return false;
180
181 /* As we handle Arch all packages as architecture bounded
182 we add all information to every (simulated) arch package */
183 std::vector<string> genArch;
184 if (List.ArchitectureAll() == true) {
185 genArch = APT::Configuration::getArchitectures();
186 if (genArch.size() != 1)
187 genArch.push_back("all");
188 } else
189 genArch.push_back(List.Architecture());
190
191 for (std::vector<string>::const_iterator arch = genArch.begin();
192 arch != genArch.end(); ++arch)
193 {
194 // Get a pointer to the package structure
195 pkgCache::PkgIterator Pkg;
196 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
197 if (NewPackage(Pkg, PackageName, *arch) == false)
198 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
199 Counter++;
200 if (Counter % 100 == 0 && Progress != 0)
201 Progress->Progress(List.Offset());
202
203 /* Get a pointer to the version structure. We know the list is sorted
204 so we use that fact in the search. Insertion of new versions is
205 done with correct sorting */
206 string Version = List.Version();
207 if (Version.empty() == true)
208 {
209 // we first process the package, then the descriptions
210 // (this has the bonus that we get MMap error when we run out
211 // of MMap space)
212 pkgCache::VerIterator Ver(Cache);
213 Dynamic<pkgCache::VerIterator> DynVer(Ver);
214 if (List.UsePackage(Pkg, Ver) == false)
215 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
216 PackageName.c_str());
217
218 // Find the right version to write the description
219 MD5SumValue CurMd5 = List.Description_md5();
220 Ver = Pkg.VersionList();
221
222 for (; Ver.end() == false; ++Ver)
223 {
224 pkgCache::DescIterator Desc = Ver.DescriptionList();
225 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
226 map_ptrloc *LastDesc = &Ver->DescriptionList;
227 bool duplicate=false;
228
229 // don't add a new description if we have one for the given
230 // md5 && language
231 for ( ; Desc.end() == false; Desc++)
232 if (MD5SumValue(Desc.md5()) == CurMd5 &&
233 Desc.LanguageCode() == List.DescriptionLanguage())
234 duplicate=true;
235 if(duplicate)
236 continue;
237
238 for (Desc = Ver.DescriptionList();
239 Desc.end() == false;
240 LastDesc = &Desc->NextDesc, Desc++)
241 {
242 if (MD5SumValue(Desc.md5()) == CurMd5)
243 {
244 // Add new description
245 void const * const oldMap = Map.Data();
246 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
247 if (oldMap != Map.Data())
248 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
249 *LastDesc = descindex;
250 Desc->ParentPkg = Pkg.Index();
251
252 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
253 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
254 break;
255 }
256 }
257 }
258
259 continue;
260 }
261
262 pkgCache::VerIterator Ver = Pkg.VersionList();
263 Dynamic<pkgCache::VerIterator> DynVer(Ver);
264 map_ptrloc *LastVer = &Pkg->VersionList;
265 void const * oldMap = Map.Data();
266 int Res = 1;
267 unsigned long const Hash = List.VersionHash();
268 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
269 {
270 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
271 // Version is higher as current version - insert here
272 if (Res > 0)
273 break;
274 // Versionstrings are equal - is hash also equal?
275 if (Res == 0 && Ver->Hash == Hash)
276 break;
277 // proceed with the next till we have either the right
278 // or we found another version (which will be lower)
279 }
280
281 /* We already have a version for this item, record that we saw it */
282 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
283 {
284 if (List.UsePackage(Pkg,Ver) == false)
285 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
286 PackageName.c_str());
287
288 if (NewFileVer(Ver,List) == false)
289 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
290 PackageName.c_str());
291
292 // Read only a single record and return
293 if (OutVer != 0)
294 {
295 *OutVer = Ver;
296 FoundFileDeps |= List.HasFileDeps();
297 return true;
298 }
299
300 continue;
301 }
302
303 // Add a new version
304 map_ptrloc const verindex = NewVersion(Ver,Version,*LastVer);
305 if (verindex == 0 && _error->PendingError())
306 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
307 PackageName.c_str(), 1);
308
309 if (oldMap != Map.Data())
310 LastVer += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
311 *LastVer = verindex;
312 Ver->ParentPkg = Pkg.Index();
313 Ver->Hash = Hash;
314
315 if (List.NewVersion(Ver) == false)
316 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
317 PackageName.c_str(), 2);
318
319 if (List.UsePackage(Pkg,Ver) == false)
320 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
321 PackageName.c_str());
322
323 if (NewFileVer(Ver,List) == false)
324 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
325 PackageName.c_str(), 3);
326
327 // Read only a single record and return
328 if (OutVer != 0)
329 {
330 *OutVer = Ver;
331 FoundFileDeps |= List.HasFileDeps();
332 return true;
333 }
334
335 /* Record the Description data. Description data always exist in
336 Packages and Translation-* files. */
337 pkgCache::DescIterator Desc = Ver.DescriptionList();
338 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
339 map_ptrloc *LastDesc = &Ver->DescriptionList;
340
341 // Skip to the end of description set
342 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
343
344 // Add new description
345 oldMap = Map.Data();
346 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
347 if (oldMap != Map.Data())
348 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
349 *LastDesc = descindex;
350 Desc->ParentPkg = Pkg.Index();
351
352 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
353 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
354 }
355 }
356
357 FoundFileDeps |= List.HasFileDeps();
358
359 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
360 return _error->Error(_("Wow, you exceeded the number of package "
361 "names this APT is capable of."));
362 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
363 return _error->Error(_("Wow, you exceeded the number of versions "
364 "this APT is capable of."));
365 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
366 return _error->Error(_("Wow, you exceeded the number of descriptions "
367 "this APT is capable of."));
368 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
369 return _error->Error(_("Wow, you exceeded the number of dependencies "
370 "this APT is capable of."));
371 return true;
372 }
373 /*}}}*/
374 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
375 // ---------------------------------------------------------------------
376 /* If we found any file depends while parsing the main list we need to
377 resolve them. Since it is undesired to load the entire list of files
378 into the cache as virtual packages we do a two stage effort. MergeList
379 identifies the file depends and this creates Provdies for them by
380 re-parsing all the indexs. */
381 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
382 {
383 List.Owner = this;
384
385 unsigned int Counter = 0;
386 while (List.Step() == true)
387 {
388 string PackageName = List.Package();
389 if (PackageName.empty() == true)
390 return false;
391 string Version = List.Version();
392 if (Version.empty() == true)
393 continue;
394
395 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
396 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
397 if (Pkg.end() == true)
398 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
399 PackageName.c_str());
400 Counter++;
401 if (Counter % 100 == 0 && Progress != 0)
402 Progress->Progress(List.Offset());
403
404 unsigned long Hash = List.VersionHash();
405 pkgCache::VerIterator Ver = Pkg.VersionList();
406 Dynamic<pkgCache::VerIterator> DynVer(Ver);
407 for (; Ver.end() == false; Ver++)
408 {
409 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
410 {
411 if (List.CollectFileProvides(Cache,Ver) == false)
412 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
413 break;
414 }
415 }
416
417 if (Ver.end() == true)
418 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
419 }
420
421 return true;
422 }
423 /*}}}*/
424 // CacheGenerator::NewGroup - Add a new group /*{{{*/
425 // ---------------------------------------------------------------------
426 /* This creates a new group structure and adds it to the hash table */
427 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
428 {
429 Grp = Cache.FindGrp(Name);
430 if (Grp.end() == false)
431 return true;
432
433 // Get a structure
434 map_ptrloc const Group = AllocateInMap(sizeof(pkgCache::Group));
435 if (unlikely(Group == 0))
436 return false;
437
438 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
439 map_ptrloc const idxName = WriteStringInMap(Name);
440 if (unlikely(idxName == 0))
441 return false;
442 Grp->Name = idxName;
443
444 // Insert it into the hash table
445 unsigned long const Hash = Cache.Hash(Name);
446 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
447 Cache.HeaderP->GrpHashTable[Hash] = Group;
448
449 Grp->ID = Cache.HeaderP->GroupCount++;
450 return true;
451 }
452 /*}}}*/
453 // CacheGenerator::NewPackage - Add a new package /*{{{*/
454 // ---------------------------------------------------------------------
455 /* This creates a new package structure and adds it to the hash table */
456 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
457 const string &Arch) {
458 pkgCache::GrpIterator Grp;
459 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
460 if (unlikely(NewGroup(Grp, Name) == false))
461 return false;
462
463 Pkg = Grp.FindPkg(Arch);
464 if (Pkg.end() == false)
465 return true;
466
467 // Get a structure
468 map_ptrloc const Package = AllocateInMap(sizeof(pkgCache::Package));
469 if (unlikely(Package == 0))
470 return false;
471 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
472
473 // Insert the package into our package list
474 if (Grp->FirstPackage == 0) // the group is new
475 {
476 // Insert it into the hash table
477 unsigned long const Hash = Cache.Hash(Name);
478 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
479 Cache.HeaderP->PkgHashTable[Hash] = Package;
480 Grp->FirstPackage = Package;
481 }
482 else // Group the Packages together
483 {
484 // this package is the new last package
485 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
486 Pkg->NextPackage = LastPkg->NextPackage;
487 LastPkg->NextPackage = Package;
488 }
489 Grp->LastPackage = Package;
490
491 // Set the name, arch and the ID
492 Pkg->Name = Grp->Name;
493 Pkg->Group = Grp.Index();
494 map_ptrloc const idxArch = WriteUniqString(Arch.c_str());
495 if (unlikely(idxArch == 0))
496 return false;
497 Pkg->Arch = idxArch;
498 Pkg->ID = Cache.HeaderP->PackageCount++;
499
500 return true;
501 }
502 /*}}}*/
503 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
504 // ---------------------------------------------------------------------
505 /* */
506 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
507 ListParser &List)
508 {
509 if (CurrentFile == 0)
510 return true;
511
512 // Get a structure
513 map_ptrloc const VerFile = AllocateInMap(sizeof(pkgCache::VerFile));
514 if (VerFile == 0)
515 return 0;
516
517 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
518 VF->File = CurrentFile - Cache.PkgFileP;
519
520 // Link it to the end of the list
521 map_ptrloc *Last = &Ver->FileList;
522 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
523 Last = &V->NextFile;
524 VF->NextFile = *Last;
525 *Last = VF.Index();
526
527 VF->Offset = List.Offset();
528 VF->Size = List.Size();
529 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
530 Cache.HeaderP->MaxVerFileSize = VF->Size;
531 Cache.HeaderP->VerFileCount++;
532
533 return true;
534 }
535 /*}}}*/
536 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
537 // ---------------------------------------------------------------------
538 /* This puts a version structure in the linked list */
539 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
540 const string &VerStr,
541 unsigned long Next)
542 {
543 // Get a structure
544 map_ptrloc const Version = AllocateInMap(sizeof(pkgCache::Version));
545 if (Version == 0)
546 return 0;
547
548 // Fill it in
549 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
550 Ver->NextVer = Next;
551 Ver->ID = Cache.HeaderP->VersionCount++;
552 map_ptrloc const idxVerStr = WriteStringInMap(VerStr);
553 if (unlikely(idxVerStr == 0))
554 return 0;
555 Ver->VerStr = idxVerStr;
556
557 return Version;
558 }
559 /*}}}*/
560 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
561 // ---------------------------------------------------------------------
562 /* */
563 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
564 ListParser &List)
565 {
566 if (CurrentFile == 0)
567 return true;
568
569 // Get a structure
570 map_ptrloc const DescFile = AllocateInMap(sizeof(pkgCache::DescFile));
571 if (DescFile == 0)
572 return false;
573
574 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
575 DF->File = CurrentFile - Cache.PkgFileP;
576
577 // Link it to the end of the list
578 map_ptrloc *Last = &Desc->FileList;
579 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
580 Last = &D->NextFile;
581
582 DF->NextFile = *Last;
583 *Last = DF.Index();
584
585 DF->Offset = List.Offset();
586 DF->Size = List.Size();
587 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
588 Cache.HeaderP->MaxDescFileSize = DF->Size;
589 Cache.HeaderP->DescFileCount++;
590
591 return true;
592 }
593 /*}}}*/
594 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
595 // ---------------------------------------------------------------------
596 /* This puts a description structure in the linked list */
597 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
598 const string &Lang,
599 const MD5SumValue &md5sum,
600 map_ptrloc Next)
601 {
602 // Get a structure
603 map_ptrloc const Description = AllocateInMap(sizeof(pkgCache::Description));
604 if (Description == 0)
605 return 0;
606
607 // Fill it in
608 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
609 Desc->NextDesc = Next;
610 Desc->ID = Cache.HeaderP->DescriptionCount++;
611 map_ptrloc const idxlanguage_code = WriteStringInMap(Lang);
612 map_ptrloc const idxmd5sum = WriteStringInMap(md5sum.Value());
613 if (unlikely(idxlanguage_code == 0 || idxmd5sum == 0))
614 return 0;
615 Desc->language_code = idxlanguage_code;
616 Desc->md5sum = idxmd5sum;
617
618 return Description;
619 }
620 /*}}}*/
621 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
622 // ---------------------------------------------------------------------
623 /* This prepares the Cache for delivery */
624 bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
625 {
626 // FIXME: add progress reporting for this operation
627 // Do we have different architectures in your groups ?
628 vector<string> archs = APT::Configuration::getArchitectures();
629 if (archs.size() > 1)
630 {
631 // Create Conflicts in between the group
632 pkgCache::GrpIterator G = GetCache().GrpBegin();
633 Dynamic<pkgCache::GrpIterator> DynG(G);
634 for (; G.end() != true; G++)
635 {
636 string const PkgName = G.Name();
637 pkgCache::PkgIterator P = G.PackageList();
638 Dynamic<pkgCache::PkgIterator> DynP(P);
639 for (; P.end() != true; P = G.NextPkg(P))
640 {
641 pkgCache::PkgIterator allPkg;
642 Dynamic<pkgCache::PkgIterator> DynallPkg(allPkg);
643 pkgCache::VerIterator V = P.VersionList();
644 Dynamic<pkgCache::VerIterator> DynV(V);
645 for (; V.end() != true; V++)
646 {
647 char const * const Arch = P.Arch();
648 map_ptrloc *OldDepLast = NULL;
649 /* MultiArch handling introduces a lot of implicit Dependencies:
650 - MultiArch: same → Co-Installable if they have the same version
651 - Architecture: all → Need to be Co-Installable for internal reasons
652 - All others conflict with all other group members */
653 bool const coInstall = ((V->MultiArch == pkgCache::Version::All && strcmp(Arch, "all") != 0) ||
654 V->MultiArch == pkgCache::Version::Same);
655 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
656 allPkg = G.FindPkg("all");
657 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
658 {
659 if (*A == Arch)
660 continue;
661 /* We allow only one installed arch at the time
662 per group, therefore each group member conflicts
663 with all other group members */
664 pkgCache::PkgIterator D = G.FindPkg(*A);
665 Dynamic<pkgCache::PkgIterator> DynD(D);
666 if (D.end() == true)
667 continue;
668 if (coInstall == true)
669 {
670 // Replaces: ${self}:other ( << ${binary:Version})
671 NewDepends(D, V, V.VerStr(),
672 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
673 OldDepLast);
674 // Breaks: ${self}:other (!= ${binary:Version})
675 NewDepends(D, V, V.VerStr(),
676 pkgCache::Dep::NotEquals, pkgCache::Dep::DpkgBreaks,
677 OldDepLast);
678 if (V->MultiArch == pkgCache::Version::All)
679 {
680 // Depend on ${self}:all which does depend on nothing
681 NewDepends(allPkg, V, V.VerStr(),
682 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
683 OldDepLast);
684 }
685 } else {
686 // Conflicts: ${self}:other
687 if (strcmp(Arch, "all") == 0) {
688 NewDepends(D, V, V.VerStr(),
689 pkgCache::Dep::NotEquals, pkgCache::Dep::Conflicts,
690 OldDepLast);
691 } else {
692 NewDepends(D, V, "",
693 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
694 OldDepLast);
695 }
696 }
697 }
698 }
699 }
700 }
701 }
702 return true;
703 }
704 /*}}}*/
705 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
706 // ---------------------------------------------------------------------
707 /* This creates a dependency element in the tree. It is linked to the
708 version and to the package that it is pointing to. */
709 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
710 pkgCache::VerIterator &Ver,
711 string const &Version,
712 unsigned int const &Op,
713 unsigned int const &Type,
714 map_ptrloc *OldDepLast)
715 {
716 void const * const oldMap = Map.Data();
717 // Get a structure
718 map_ptrloc const Dependency = AllocateInMap(sizeof(pkgCache::Dependency));
719 if (unlikely(Dependency == 0))
720 return false;
721
722 // Fill it in
723 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
724 Dynamic<pkgCache::DepIterator> DynDep(Dep);
725 Dep->ParentVer = Ver.Index();
726 Dep->Type = Type;
727 Dep->CompareOp = Op;
728 Dep->ID = Cache.HeaderP->DependsCount++;
729
730 // Probe the reverse dependency list for a version string that matches
731 if (Version.empty() == false)
732 {
733 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
734 if (I->Version != 0 && I.TargetVer() == Version)
735 Dep->Version = I->Version;*/
736 if (Dep->Version == 0) {
737 map_ptrloc const index = WriteStringInMap(Version);
738 if (unlikely(index == 0))
739 return false;
740 Dep->Version = index;
741 }
742 }
743
744 // Link it to the package
745 Dep->Package = Pkg.Index();
746 Dep->NextRevDepends = Pkg->RevDepends;
747 Pkg->RevDepends = Dep.Index();
748
749 // Do we know where to link the Dependency to?
750 if (OldDepLast == NULL)
751 {
752 OldDepLast = &Ver->DependsList;
753 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
754 OldDepLast = &D->NextDepends;
755 } else if (oldMap != Map.Data())
756 OldDepLast += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
757
758 Dep->NextDepends = *OldDepLast;
759 *OldDepLast = Dep.Index();
760 OldDepLast = &Dep->NextDepends;
761
762 return true;
763 }
764 /*}}}*/
765 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
766 // ---------------------------------------------------------------------
767 /* This creates a Group and the Package to link this dependency to if
768 needed and handles also the caching of the old endpoint */
769 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator &Ver,
770 const string &PackageName,
771 const string &Arch,
772 const string &Version,
773 unsigned int Op,
774 unsigned int Type)
775 {
776 pkgCache::GrpIterator Grp;
777 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
778 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
779 return false;
780
781 // Locate the target package
782 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
783 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
784 if (Pkg.end() == true) {
785 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
786 return false;
787 }
788
789 // Is it a file dependency?
790 if (unlikely(PackageName[0] == '/'))
791 FoundFileDeps = true;
792
793 /* Caching the old end point speeds up generation substantially */
794 if (OldDepVer != Ver) {
795 OldDepLast = NULL;
796 OldDepVer = Ver;
797 }
798
799 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
800 }
801 /*}}}*/
802 // ListParser::NewProvides - Create a Provides element /*{{{*/
803 // ---------------------------------------------------------------------
804 /* */
805 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator &Ver,
806 const string &PkgName,
807 const string &PkgArch,
808 const string &Version)
809 {
810 pkgCache &Cache = Owner->Cache;
811
812 // We do not add self referencing provides
813 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
814 return true;
815
816 // Get a structure
817 map_ptrloc const Provides = Owner->AllocateInMap(sizeof(pkgCache::Provides));
818 if (unlikely(Provides == 0))
819 return false;
820 Cache.HeaderP->ProvidesCount++;
821
822 // Fill it in
823 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
824 Dynamic<pkgCache::PrvIterator> DynPrv(Prv);
825 Prv->Version = Ver.Index();
826 Prv->NextPkgProv = Ver->ProvidesList;
827 Ver->ProvidesList = Prv.Index();
828 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
829 return false;
830
831 // Locate the target package
832 pkgCache::PkgIterator Pkg;
833 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
834 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
835 return false;
836
837 // Link it to the package
838 Prv->ParentPkg = Pkg.Index();
839 Prv->NextProvides = Pkg->ProvidesList;
840 Pkg->ProvidesList = Prv.Index();
841
842 return true;
843 }
844 /*}}}*/
845 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
846 // ---------------------------------------------------------------------
847 /* This is used to select which file is to be associated with all newly
848 added versions. The caller is responsible for setting the IMS fields. */
849 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
850 const pkgIndexFile &Index,
851 unsigned long Flags)
852 {
853 // Get some space for the structure
854 map_ptrloc const idxFile = AllocateInMap(sizeof(*CurrentFile));
855 if (unlikely(idxFile == 0))
856 return false;
857 CurrentFile = Cache.PkgFileP + idxFile;
858
859 // Fill it in
860 map_ptrloc const idxFileName = WriteStringInMap(File);
861 map_ptrloc const idxSite = WriteUniqString(Site);
862 if (unlikely(idxFileName == 0 || idxSite == 0))
863 return false;
864 CurrentFile->FileName = idxFileName;
865 CurrentFile->Site = idxSite;
866 CurrentFile->NextFile = Cache.HeaderP->FileList;
867 CurrentFile->Flags = Flags;
868 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
869 map_ptrloc const idxIndexType = WriteUniqString(Index.GetType()->Label);
870 if (unlikely(idxIndexType == 0))
871 return false;
872 CurrentFile->IndexType = idxIndexType;
873 PkgFileName = File;
874 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
875 Cache.HeaderP->PackageFileCount++;
876
877 if (Progress != 0)
878 Progress->SubProgress(Index.Size());
879 return true;
880 }
881 /*}}}*/
882 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
883 // ---------------------------------------------------------------------
884 /* This is used to create handles to strings. Given the same text it
885 always returns the same number */
886 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
887 unsigned int Size)
888 {
889 /* We use a very small transient hash table here, this speeds up generation
890 by a fair amount on slower machines */
891 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
892 if (Bucket != 0 &&
893 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
894 return Bucket->String;
895
896 // Search for an insertion point
897 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
898 int Res = 1;
899 map_ptrloc *Last = &Cache.HeaderP->StringList;
900 for (; I != Cache.StringItemP; Last = &I->NextItem,
901 I = Cache.StringItemP + I->NextItem)
902 {
903 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
904 if (Res >= 0)
905 break;
906 }
907
908 // Match
909 if (Res == 0)
910 {
911 Bucket = I;
912 return I->String;
913 }
914
915 // Get a structure
916 void const * const oldMap = Map.Data();
917 map_ptrloc const Item = AllocateInMap(sizeof(pkgCache::StringItem));
918 if (Item == 0)
919 return 0;
920
921 map_ptrloc const idxString = WriteStringInMap(S,Size);
922 if (unlikely(idxString == 0))
923 return 0;
924 if (oldMap != Map.Data()) {
925 Last += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
926 I += (pkgCache::StringItem*) Map.Data() - (pkgCache::StringItem*) oldMap;
927 }
928 *Last = Item;
929
930 // Fill in the structure
931 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
932 ItemP->NextItem = I - Cache.StringItemP;
933 ItemP->String = idxString;
934
935 Bucket = ItemP;
936 return ItemP->String;
937 }
938 /*}}}*/
939 // CheckValidity - Check that a cache is up-to-date /*{{{*/
940 // ---------------------------------------------------------------------
941 /* This just verifies that each file in the list of index files exists,
942 has matching attributes with the cache and the cache does not have
943 any extra files. */
944 static bool CheckValidity(const string &CacheFile, FileIterator Start,
945 FileIterator End,MMap **OutMap = 0)
946 {
947 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
948 // No file, certainly invalid
949 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
950 {
951 if (Debug == true)
952 std::clog << "CacheFile doesn't exist" << std::endl;
953 return false;
954 }
955
956 // Map it
957 FileFd CacheF(CacheFile,FileFd::ReadOnly);
958 SPtr<MMap> Map = new MMap(CacheF,0);
959 pkgCache Cache(Map);
960 if (_error->PendingError() == true || Map->Size() == 0)
961 {
962 if (Debug == true)
963 std::clog << "Errors are pending or Map is empty()" << std::endl;
964 _error->Discard();
965 return false;
966 }
967
968 /* Now we check every index file, see if it is in the cache,
969 verify the IMS data and check that it is on the disk too.. */
970 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
971 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
972 for (; Start != End; Start++)
973 {
974 if (Debug == true)
975 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
976 if ((*Start)->HasPackages() == false)
977 {
978 if (Debug == true)
979 std::clog << "Has NO packages" << std::endl;
980 continue;
981 }
982
983 if ((*Start)->Exists() == false)
984 {
985 #if 0 // mvo: we no longer give a message here (Default Sources spec)
986 _error->WarningE("stat",_("Couldn't stat source package list %s"),
987 (*Start)->Describe().c_str());
988 #endif
989 if (Debug == true)
990 std::clog << "file doesn't exist" << std::endl;
991 continue;
992 }
993
994 // FindInCache is also expected to do an IMS check.
995 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
996 if (File.end() == true)
997 {
998 if (Debug == true)
999 std::clog << "FindInCache returned end-Pointer" << std::endl;
1000 return false;
1001 }
1002
1003 Visited[File->ID] = true;
1004 if (Debug == true)
1005 std::clog << "with ID " << File->ID << " is valid" << std::endl;
1006 }
1007
1008 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
1009 if (Visited[I] == false)
1010 {
1011 if (Debug == true)
1012 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
1013 return false;
1014 }
1015
1016 if (_error->PendingError() == true)
1017 {
1018 if (Debug == true)
1019 {
1020 std::clog << "Validity failed because of pending errors:" << std::endl;
1021 _error->DumpErrors();
1022 }
1023 _error->Discard();
1024 return false;
1025 }
1026
1027 if (OutMap != 0)
1028 *OutMap = Map.UnGuard();
1029 return true;
1030 }
1031 /*}}}*/
1032 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
1033 // ---------------------------------------------------------------------
1034 /* Size is kind of an abstract notion that is only used for the progress
1035 meter */
1036 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
1037 {
1038 unsigned long TotalSize = 0;
1039 for (; Start != End; Start++)
1040 {
1041 if ((*Start)->HasPackages() == false)
1042 continue;
1043 TotalSize += (*Start)->Size();
1044 }
1045 return TotalSize;
1046 }
1047 /*}}}*/
1048 // BuildCache - Merge the list of index files into the cache /*{{{*/
1049 // ---------------------------------------------------------------------
1050 /* */
1051 static bool BuildCache(pkgCacheGenerator &Gen,
1052 OpProgress *Progress,
1053 unsigned long &CurrentSize,unsigned long TotalSize,
1054 FileIterator Start, FileIterator End)
1055 {
1056 FileIterator I;
1057 for (I = Start; I != End; I++)
1058 {
1059 if ((*I)->HasPackages() == false)
1060 continue;
1061
1062 if ((*I)->Exists() == false)
1063 continue;
1064
1065 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
1066 {
1067 _error->Warning("Duplicate sources.list entry %s",
1068 (*I)->Describe().c_str());
1069 continue;
1070 }
1071
1072 unsigned long Size = (*I)->Size();
1073 if (Progress != NULL)
1074 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
1075 CurrentSize += Size;
1076
1077 if ((*I)->Merge(Gen,Progress) == false)
1078 return false;
1079 }
1080
1081 if (Gen.HasFileDeps() == true)
1082 {
1083 if (Progress != NULL)
1084 Progress->Done();
1085 TotalSize = ComputeSize(Start, End);
1086 CurrentSize = 0;
1087 for (I = Start; I != End; I++)
1088 {
1089 unsigned long Size = (*I)->Size();
1090 if (Progress != NULL)
1091 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
1092 CurrentSize += Size;
1093 if ((*I)->MergeFileProvides(Gen,Progress) == false)
1094 return false;
1095 }
1096 }
1097
1098 return true;
1099 }
1100 /*}}}*/
1101 // CacheGenerator::CreateDynamicMMap - load an mmap with configuration options /*{{{*/
1102 DynamicMMap* pkgCacheGenerator::CreateDynamicMMap(FileFd *CacheF, unsigned long Flags) {
1103 unsigned long const MapStart = _config->FindI("APT::Cache-Start", 24*1024*1024);
1104 unsigned long const MapGrow = _config->FindI("APT::Cache-Grow", 1*1024*1024);
1105 unsigned long const MapLimit = _config->FindI("APT::Cache-Limit", 0);
1106 Flags |= MMap::Moveable;
1107 if (_config->FindB("APT::Cache-Fallback", false) == true)
1108 Flags |= MMap::Fallback;
1109 if (CacheF != NULL)
1110 return new DynamicMMap(*CacheF, Flags, MapStart, MapGrow, MapLimit);
1111 else
1112 return new DynamicMMap(Flags, MapStart, MapGrow, MapLimit);
1113 }
1114 /*}}}*/
1115 // CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
1116 // ---------------------------------------------------------------------
1117 /* This makes sure that the status cache (the cache that has all
1118 index files from the sources list and all local ones) is ready
1119 to be mmaped. If OutMap is not zero then a MMap object representing
1120 the cache will be stored there. This is pretty much mandetory if you
1121 are using AllowMem. AllowMem lets the function be run as non-root
1122 where it builds the cache 'fast' into a memory buffer. */
1123 __deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
1124 MMap **OutMap, bool AllowMem)
1125 { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
1126 bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
1127 MMap **OutMap,bool AllowMem)
1128 {
1129 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
1130
1131 vector<pkgIndexFile *> Files;
1132 for (vector<metaIndex *>::const_iterator i = List.begin();
1133 i != List.end();
1134 i++)
1135 {
1136 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
1137 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
1138 j != Indexes->end();
1139 j++)
1140 Files.push_back (*j);
1141 }
1142
1143 unsigned long const EndOfSource = Files.size();
1144 if (_system->AddStatusFiles(Files) == false)
1145 return false;
1146
1147 // Decide if we can write to the files..
1148 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1149 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1150
1151 // ensure the cache directory exists
1152 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1153 {
1154 string dir = _config->FindDir("Dir::Cache");
1155 size_t const len = dir.size();
1156 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1157 dir = dir.substr(0, len - 5);
1158 if (CacheFile.empty() == false)
1159 CreateDirectory(dir, flNotFile(CacheFile));
1160 if (SrcCacheFile.empty() == false)
1161 CreateDirectory(dir, flNotFile(SrcCacheFile));
1162 }
1163
1164 // Decide if we can write to the cache
1165 bool Writeable = false;
1166 if (CacheFile.empty() == false)
1167 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1168 else
1169 if (SrcCacheFile.empty() == false)
1170 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1171 if (Debug == true)
1172 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1173
1174 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1175 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1176
1177 if (Progress != NULL)
1178 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1179
1180 // Cache is OK, Fin.
1181 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1182 {
1183 if (Progress != NULL)
1184 Progress->OverallProgress(1,1,1,_("Reading package lists"));
1185 if (Debug == true)
1186 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1187 return true;
1188 }
1189 else if (Debug == true)
1190 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1191
1192 /* At this point we know we need to reconstruct the package cache,
1193 begin. */
1194 SPtr<FileFd> CacheF;
1195 SPtr<DynamicMMap> Map;
1196 if (Writeable == true && CacheFile.empty() == false)
1197 {
1198 unlink(CacheFile.c_str());
1199 CacheF = new FileFd(CacheFile,FileFd::WriteAtomic);
1200 fchmod(CacheF->Fd(),0644);
1201 Map = CreateDynamicMMap(CacheF, MMap::Public);
1202 if (_error->PendingError() == true)
1203 return false;
1204 if (Debug == true)
1205 std::clog << "Open filebased MMap" << std::endl;
1206 }
1207 else
1208 {
1209 // Just build it in memory..
1210 Map = CreateDynamicMMap(NULL);
1211 if (Debug == true)
1212 std::clog << "Open memory Map (not filebased)" << std::endl;
1213 }
1214
1215 // Lets try the source cache.
1216 unsigned long CurrentSize = 0;
1217 unsigned long TotalSize = 0;
1218 if (CheckValidity(SrcCacheFile,Files.begin(),
1219 Files.begin()+EndOfSource) == true)
1220 {
1221 if (Debug == true)
1222 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1223 // Preload the map with the source cache
1224 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1225 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1226 if ((alloc == 0 && _error->PendingError())
1227 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1228 SCacheF.Size()) == false)
1229 return false;
1230
1231 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1232
1233 // Build the status cache
1234 pkgCacheGenerator Gen(Map.Get(),Progress);
1235 if (_error->PendingError() == true)
1236 return false;
1237 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1238 Files.begin()+EndOfSource,Files.end()) == false)
1239 return false;
1240
1241 // FIXME: move me to a better place
1242 Gen.FinishCache(Progress);
1243 }
1244 else
1245 {
1246 if (Debug == true)
1247 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1248 TotalSize = ComputeSize(Files.begin(),Files.end());
1249
1250 // Build the source cache
1251 pkgCacheGenerator Gen(Map.Get(),Progress);
1252 if (_error->PendingError() == true)
1253 return false;
1254 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1255 Files.begin(),Files.begin()+EndOfSource) == false)
1256 return false;
1257
1258 // Write it back
1259 if (Writeable == true && SrcCacheFile.empty() == false)
1260 {
1261 FileFd SCacheF(SrcCacheFile,FileFd::WriteAtomic);
1262 if (_error->PendingError() == true)
1263 return false;
1264
1265 fchmod(SCacheF.Fd(),0644);
1266
1267 // Write out the main data
1268 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1269 return _error->Error(_("IO Error saving source cache"));
1270 SCacheF.Sync();
1271
1272 // Write out the proper header
1273 Gen.GetCache().HeaderP->Dirty = false;
1274 if (SCacheF.Seek(0) == false ||
1275 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1276 return _error->Error(_("IO Error saving source cache"));
1277 Gen.GetCache().HeaderP->Dirty = true;
1278 SCacheF.Sync();
1279 }
1280
1281 // Build the status cache
1282 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1283 Files.begin()+EndOfSource,Files.end()) == false)
1284 return false;
1285
1286 // FIXME: move me to a better place
1287 Gen.FinishCache(Progress);
1288 }
1289 if (Debug == true)
1290 std::clog << "Caches are ready for shipping" << std::endl;
1291
1292 if (_error->PendingError() == true)
1293 return false;
1294 if (OutMap != 0)
1295 {
1296 if (CacheF != 0)
1297 {
1298 delete Map.UnGuard();
1299 *OutMap = new MMap(*CacheF,0);
1300 }
1301 else
1302 {
1303 *OutMap = Map.UnGuard();
1304 }
1305 }
1306
1307 return true;
1308 }
1309 /*}}}*/
1310 // CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1311 // ---------------------------------------------------------------------
1312 /* */
1313 __deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1314 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
1315 bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
1316 {
1317 vector<pkgIndexFile *> Files;
1318 unsigned long EndOfSource = Files.size();
1319 if (_system->AddStatusFiles(Files) == false)
1320 return false;
1321
1322 SPtr<DynamicMMap> Map = CreateDynamicMMap(NULL);
1323 unsigned long CurrentSize = 0;
1324 unsigned long TotalSize = 0;
1325
1326 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1327
1328 // Build the status cache
1329 if (Progress != NULL)
1330 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1331 pkgCacheGenerator Gen(Map.Get(),Progress);
1332 if (_error->PendingError() == true)
1333 return false;
1334 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1335 Files.begin()+EndOfSource,Files.end()) == false)
1336 return false;
1337
1338 // FIXME: move me to a better place
1339 Gen.FinishCache(Progress);
1340
1341 if (_error->PendingError() == true)
1342 return false;
1343 *OutMap = Map.UnGuard();
1344
1345 return true;
1346 }
1347 /*}}}*/