]> git.saurik.com Git - apt.git/blob - apt-pkg/pkgcachegen.cc
cleanup the debian/rules file a bit
[apt.git] / apt-pkg / pkgcachegen.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: pkgcachegen.cc,v 1.53.2.1 2003/12/24 23:09:17 mdz Exp $
4 /* ######################################################################
5
6 Package Cache Generator - Generator for the cache structure.
7
8 This builds the cache structure from the abstract package list parser.
9
10 ##################################################################### */
11 /*}}}*/
12 // Include Files /*{{{*/
13 #define APT_COMPATIBILITY 986
14
15 #include <apt-pkg/pkgcachegen.h>
16 #include <apt-pkg/error.h>
17 #include <apt-pkg/version.h>
18 #include <apt-pkg/progress.h>
19 #include <apt-pkg/sourcelist.h>
20 #include <apt-pkg/configuration.h>
21 #include <apt-pkg/aptconfiguration.h>
22 #include <apt-pkg/strutl.h>
23 #include <apt-pkg/sptr.h>
24 #include <apt-pkg/pkgsystem.h>
25 #include <apt-pkg/macros.h>
26
27 #include <apt-pkg/tagfile.h>
28
29 #include <apti18n.h>
30
31 #include <vector>
32
33 #include <sys/stat.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <stdio.h>
37 /*}}}*/
38 typedef vector<pkgIndexFile *>::iterator FileIterator;
39 template <typename Iter> std::vector<Iter*> pkgCacheGenerator::Dynamic<Iter>::toReMap;
40
41 // CacheGenerator::pkgCacheGenerator - Constructor /*{{{*/
42 // ---------------------------------------------------------------------
43 /* We set the dirty flag and make sure that is written to the disk */
44 pkgCacheGenerator::pkgCacheGenerator(DynamicMMap *pMap,OpProgress *Prog) :
45 Map(*pMap), Cache(pMap,false), Progress(Prog),
46 FoundFileDeps(0)
47 {
48 CurrentFile = 0;
49 memset(UniqHash,0,sizeof(UniqHash));
50
51 if (_error->PendingError() == true)
52 return;
53
54 if (Map.Size() == 0)
55 {
56 // Setup the map interface..
57 Cache.HeaderP = (pkgCache::Header *)Map.Data();
58 if (Map.RawAllocate(sizeof(pkgCache::Header)) == 0 && _error->PendingError() == true)
59 return;
60
61 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
62
63 // Starting header
64 *Cache.HeaderP = pkgCache::Header();
65 map_ptrloc const idxVerSysName = WriteStringInMap(_system->VS->Label);
66 Cache.HeaderP->VerSysName = idxVerSysName;
67 map_ptrloc const idxArchitecture = WriteStringInMap(_config->Find("APT::Architecture"));
68 Cache.HeaderP->Architecture = idxArchitecture;
69 if (unlikely(idxVerSysName == 0 || idxArchitecture == 0))
70 return;
71 Cache.ReMap();
72 }
73 else
74 {
75 // Map directly from the existing file
76 Cache.ReMap();
77 Map.UsePools(*Cache.HeaderP->Pools,sizeof(Cache.HeaderP->Pools)/sizeof(Cache.HeaderP->Pools[0]));
78 if (Cache.VS != _system->VS)
79 {
80 _error->Error(_("Cache has an incompatible versioning system"));
81 return;
82 }
83 }
84
85 Cache.HeaderP->Dirty = true;
86 Map.Sync(0,sizeof(pkgCache::Header));
87 }
88 /*}}}*/
89 // CacheGenerator::~pkgCacheGenerator - Destructor /*{{{*/
90 // ---------------------------------------------------------------------
91 /* We sync the data then unset the dirty flag in two steps so as to
92 advoid a problem during a crash */
93 pkgCacheGenerator::~pkgCacheGenerator()
94 {
95 if (_error->PendingError() == true)
96 return;
97 if (Map.Sync() == false)
98 return;
99
100 Cache.HeaderP->Dirty = false;
101 Map.Sync(0,sizeof(pkgCache::Header));
102 }
103 /*}}}*/
104 void pkgCacheGenerator::ReMap(void const * const oldMap, void const * const newMap) {/*{{{*/
105 if (oldMap == newMap)
106 return;
107
108 Cache.ReMap(false);
109
110 CurrentFile += (pkgCache::PackageFile*) newMap - (pkgCache::PackageFile*) oldMap;
111
112 for (size_t i = 0; i < _count(UniqHash); ++i)
113 if (UniqHash[i] != 0)
114 UniqHash[i] += (pkgCache::StringItem*) newMap - (pkgCache::StringItem*) oldMap;
115
116 for (std::vector<pkgCache::GrpIterator*>::const_iterator i = Dynamic<pkgCache::GrpIterator>::toReMap.begin();
117 i != Dynamic<pkgCache::GrpIterator>::toReMap.end(); ++i)
118 (*i)->ReMap(oldMap, newMap);
119 for (std::vector<pkgCache::PkgIterator*>::const_iterator i = Dynamic<pkgCache::PkgIterator>::toReMap.begin();
120 i != Dynamic<pkgCache::PkgIterator>::toReMap.end(); ++i)
121 (*i)->ReMap(oldMap, newMap);
122 for (std::vector<pkgCache::VerIterator*>::const_iterator i = Dynamic<pkgCache::VerIterator>::toReMap.begin();
123 i != Dynamic<pkgCache::VerIterator>::toReMap.end(); ++i)
124 (*i)->ReMap(oldMap, newMap);
125 for (std::vector<pkgCache::DepIterator*>::const_iterator i = Dynamic<pkgCache::DepIterator>::toReMap.begin();
126 i != Dynamic<pkgCache::DepIterator>::toReMap.end(); ++i)
127 (*i)->ReMap(oldMap, newMap);
128 for (std::vector<pkgCache::DescIterator*>::const_iterator i = Dynamic<pkgCache::DescIterator>::toReMap.begin();
129 i != Dynamic<pkgCache::DescIterator>::toReMap.end(); ++i)
130 (*i)->ReMap(oldMap, newMap);
131 for (std::vector<pkgCache::PrvIterator*>::const_iterator i = Dynamic<pkgCache::PrvIterator>::toReMap.begin();
132 i != Dynamic<pkgCache::PrvIterator>::toReMap.end(); ++i)
133 (*i)->ReMap(oldMap, newMap);
134 for (std::vector<pkgCache::PkgFileIterator*>::const_iterator i = Dynamic<pkgCache::PkgFileIterator>::toReMap.begin();
135 i != Dynamic<pkgCache::PkgFileIterator>::toReMap.end(); ++i)
136 (*i)->ReMap(oldMap, newMap);
137 } /*}}}*/
138 // CacheGenerator::WriteStringInMap /*{{{*/
139 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String,
140 const unsigned long &Len) {
141 void const * const oldMap = Map.Data();
142 map_ptrloc const index = Map.WriteString(String, Len);
143 if (index != 0)
144 ReMap(oldMap, Map.Data());
145 return index;
146 }
147 /*}}}*/
148 // CacheGenerator::WriteStringInMap /*{{{*/
149 map_ptrloc pkgCacheGenerator::WriteStringInMap(const char *String) {
150 void const * const oldMap = Map.Data();
151 map_ptrloc const index = Map.WriteString(String);
152 if (index != 0)
153 ReMap(oldMap, Map.Data());
154 return index;
155 }
156 /*}}}*/
157 map_ptrloc pkgCacheGenerator::AllocateInMap(const unsigned long &size) {/*{{{*/
158 void const * const oldMap = Map.Data();
159 map_ptrloc const index = Map.Allocate(size);
160 if (index != 0)
161 ReMap(oldMap, Map.Data());
162 return index;
163 }
164 /*}}}*/
165 // CacheGenerator::MergeList - Merge the package list /*{{{*/
166 // ---------------------------------------------------------------------
167 /* This provides the generation of the entries in the cache. Each loop
168 goes through a single package record from the underlying parse engine. */
169 bool pkgCacheGenerator::MergeList(ListParser &List,
170 pkgCache::VerIterator *OutVer)
171 {
172 List.Owner = this;
173
174 unsigned int Counter = 0;
175 while (List.Step() == true)
176 {
177 string const PackageName = List.Package();
178 if (PackageName.empty() == true)
179 return false;
180
181 /* As we handle Arch all packages as architecture bounded
182 we add all information to every (simulated) arch package */
183 std::vector<string> genArch;
184 if (List.ArchitectureAll() == true) {
185 genArch = APT::Configuration::getArchitectures();
186 if (genArch.size() != 1)
187 genArch.push_back("all");
188 } else
189 genArch.push_back(List.Architecture());
190
191 for (std::vector<string>::const_iterator arch = genArch.begin();
192 arch != genArch.end(); ++arch)
193 {
194 // Get a pointer to the package structure
195 pkgCache::PkgIterator Pkg;
196 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
197 if (NewPackage(Pkg, PackageName, *arch) == false)
198 return _error->Error(_("Error occurred while processing %s (NewPackage)"),PackageName.c_str());
199 Counter++;
200 if (Counter % 100 == 0 && Progress != 0)
201 Progress->Progress(List.Offset());
202
203 /* Get a pointer to the version structure. We know the list is sorted
204 so we use that fact in the search. Insertion of new versions is
205 done with correct sorting */
206 string Version = List.Version();
207 if (Version.empty() == true)
208 {
209 // we first process the package, then the descriptions
210 // (this has the bonus that we get MMap error when we run out
211 // of MMap space)
212 pkgCache::VerIterator Ver(Cache);
213 Dynamic<pkgCache::VerIterator> DynVer(Ver);
214 if (List.UsePackage(Pkg, Ver) == false)
215 return _error->Error(_("Error occurred while processing %s (UsePackage1)"),
216 PackageName.c_str());
217
218 // Find the right version to write the description
219 MD5SumValue CurMd5 = List.Description_md5();
220 Ver = Pkg.VersionList();
221
222 for (; Ver.end() == false; ++Ver)
223 {
224 pkgCache::DescIterator Desc = Ver.DescriptionList();
225 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
226 map_ptrloc *LastDesc = &Ver->DescriptionList;
227 bool duplicate=false;
228
229 // don't add a new description if we have one for the given
230 // md5 && language
231 for ( ; Desc.end() == false; Desc++)
232 if (MD5SumValue(Desc.md5()) == CurMd5 &&
233 Desc.LanguageCode() == List.DescriptionLanguage())
234 duplicate=true;
235 if(duplicate)
236 continue;
237
238 for (Desc = Ver.DescriptionList();
239 Desc.end() == false;
240 LastDesc = &Desc->NextDesc, Desc++)
241 {
242 if (MD5SumValue(Desc.md5()) == CurMd5)
243 {
244 // Add new description
245 void const * const oldMap = Map.Data();
246 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), CurMd5, *LastDesc);
247 if (oldMap != Map.Data())
248 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
249 *LastDesc = descindex;
250 Desc->ParentPkg = Pkg.Index();
251
252 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
253 return _error->Error(_("Error occurred while processing %s (NewFileDesc1)"),PackageName.c_str());
254 break;
255 }
256 }
257 }
258
259 continue;
260 }
261
262 pkgCache::VerIterator Ver = Pkg.VersionList();
263 Dynamic<pkgCache::VerIterator> DynVer(Ver);
264 map_ptrloc *LastVer = &Pkg->VersionList;
265 void const * oldMap = Map.Data();
266 int Res = 1;
267 unsigned long const Hash = List.VersionHash();
268 for (; Ver.end() == false; LastVer = &Ver->NextVer, Ver++)
269 {
270 Res = Cache.VS->CmpVersion(Version,Ver.VerStr());
271 // Version is higher as current version - insert here
272 if (Res > 0)
273 break;
274 // Versionstrings are equal - is hash also equal?
275 if (Res == 0 && Ver->Hash == Hash)
276 break;
277 // proceed with the next till we have either the right
278 // or we found another version (which will be lower)
279 }
280
281 /* We already have a version for this item, record that we saw it */
282 if (Res == 0 && Ver.end() == false && Ver->Hash == Hash)
283 {
284 if (List.UsePackage(Pkg,Ver) == false)
285 return _error->Error(_("Error occurred while processing %s (UsePackage2)"),
286 PackageName.c_str());
287
288 if (NewFileVer(Ver,List) == false)
289 return _error->Error(_("Error occurred while processing %s (NewFileVer1)"),
290 PackageName.c_str());
291
292 // Read only a single record and return
293 if (OutVer != 0)
294 {
295 *OutVer = Ver;
296 FoundFileDeps |= List.HasFileDeps();
297 return true;
298 }
299
300 continue;
301 }
302
303 // Add a new version
304 map_ptrloc const verindex = NewVersion(Ver,Version,*LastVer);
305 if (verindex == 0 && _error->PendingError())
306 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
307 PackageName.c_str(), 1);
308
309 if (oldMap != Map.Data())
310 LastVer += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
311 *LastVer = verindex;
312 Ver->ParentPkg = Pkg.Index();
313 Ver->Hash = Hash;
314
315 if (List.NewVersion(Ver) == false)
316 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
317 PackageName.c_str(), 2);
318
319 if (List.UsePackage(Pkg,Ver) == false)
320 return _error->Error(_("Error occurred while processing %s (UsePackage3)"),
321 PackageName.c_str());
322
323 if (NewFileVer(Ver,List) == false)
324 return _error->Error(_("Error occurred while processing %s (NewVersion%d)"),
325 PackageName.c_str(), 3);
326
327 // Read only a single record and return
328 if (OutVer != 0)
329 {
330 *OutVer = Ver;
331 FoundFileDeps |= List.HasFileDeps();
332 return true;
333 }
334
335 /* Record the Description data. Description data always exist in
336 Packages and Translation-* files. */
337 pkgCache::DescIterator Desc = Ver.DescriptionList();
338 Dynamic<pkgCache::DescIterator> DynDesc(Desc);
339 map_ptrloc *LastDesc = &Ver->DescriptionList;
340
341 // Skip to the end of description set
342 for (; Desc.end() == false; LastDesc = &Desc->NextDesc, Desc++);
343
344 // Add new description
345 oldMap = Map.Data();
346 map_ptrloc const descindex = NewDescription(Desc, List.DescriptionLanguage(), List.Description_md5(), *LastDesc);
347 if (oldMap != Map.Data())
348 LastDesc += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
349 *LastDesc = descindex;
350 Desc->ParentPkg = Pkg.Index();
351
352 if ((*LastDesc == 0 && _error->PendingError()) || NewFileDesc(Desc,List) == false)
353 return _error->Error(_("Error occurred while processing %s (NewFileDesc2)"),PackageName.c_str());
354 }
355 }
356
357 FoundFileDeps |= List.HasFileDeps();
358
359 if (Cache.HeaderP->PackageCount >= (1ULL<<sizeof(Cache.PkgP->ID)*8)-1)
360 return _error->Error(_("Wow, you exceeded the number of package "
361 "names this APT is capable of."));
362 if (Cache.HeaderP->VersionCount >= (1ULL<<(sizeof(Cache.VerP->ID)*8))-1)
363 return _error->Error(_("Wow, you exceeded the number of versions "
364 "this APT is capable of."));
365 if (Cache.HeaderP->DescriptionCount >= (1ULL<<(sizeof(Cache.DescP->ID)*8))-1)
366 return _error->Error(_("Wow, you exceeded the number of descriptions "
367 "this APT is capable of."));
368 if (Cache.HeaderP->DependsCount >= (1ULL<<(sizeof(Cache.DepP->ID)*8))-1ULL)
369 return _error->Error(_("Wow, you exceeded the number of dependencies "
370 "this APT is capable of."));
371 return true;
372 }
373 /*}}}*/
374 // CacheGenerator::MergeFileProvides - Merge file provides /*{{{*/
375 // ---------------------------------------------------------------------
376 /* If we found any file depends while parsing the main list we need to
377 resolve them. Since it is undesired to load the entire list of files
378 into the cache as virtual packages we do a two stage effort. MergeList
379 identifies the file depends and this creates Provdies for them by
380 re-parsing all the indexs. */
381 bool pkgCacheGenerator::MergeFileProvides(ListParser &List)
382 {
383 List.Owner = this;
384
385 unsigned int Counter = 0;
386 while (List.Step() == true)
387 {
388 string PackageName = List.Package();
389 if (PackageName.empty() == true)
390 return false;
391 string Version = List.Version();
392 if (Version.empty() == true)
393 continue;
394
395 pkgCache::PkgIterator Pkg = Cache.FindPkg(PackageName);
396 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
397 if (Pkg.end() == true)
398 return _error->Error(_("Error occurred while processing %s (FindPkg)"),
399 PackageName.c_str());
400 Counter++;
401 if (Counter % 100 == 0 && Progress != 0)
402 Progress->Progress(List.Offset());
403
404 unsigned long Hash = List.VersionHash();
405 pkgCache::VerIterator Ver = Pkg.VersionList();
406 Dynamic<pkgCache::VerIterator> DynVer(Ver);
407 for (; Ver.end() == false; Ver++)
408 {
409 if (Ver->Hash == Hash && Version.c_str() == Ver.VerStr())
410 {
411 if (List.CollectFileProvides(Cache,Ver) == false)
412 return _error->Error(_("Error occurred while processing %s (CollectFileProvides)"),PackageName.c_str());
413 break;
414 }
415 }
416
417 if (Ver.end() == true)
418 _error->Warning(_("Package %s %s was not found while processing file dependencies"),PackageName.c_str(),Version.c_str());
419 }
420
421 return true;
422 }
423 /*}}}*/
424 // CacheGenerator::NewGroup - Add a new group /*{{{*/
425 // ---------------------------------------------------------------------
426 /* This creates a new group structure and adds it to the hash table */
427 bool pkgCacheGenerator::NewGroup(pkgCache::GrpIterator &Grp, const string &Name)
428 {
429 Grp = Cache.FindGrp(Name);
430 if (Grp.end() == false)
431 return true;
432
433 // Get a structure
434 map_ptrloc const Group = AllocateInMap(sizeof(pkgCache::Group));
435 if (unlikely(Group == 0))
436 return false;
437
438 Grp = pkgCache::GrpIterator(Cache, Cache.GrpP + Group);
439 map_ptrloc const idxName = WriteStringInMap(Name);
440 if (unlikely(idxName == 0))
441 return false;
442 Grp->Name = idxName;
443
444 // Insert it into the hash table
445 unsigned long const Hash = Cache.Hash(Name);
446 Grp->Next = Cache.HeaderP->GrpHashTable[Hash];
447 Cache.HeaderP->GrpHashTable[Hash] = Group;
448
449 Grp->ID = Cache.HeaderP->GroupCount++;
450 return true;
451 }
452 /*}}}*/
453 // CacheGenerator::NewPackage - Add a new package /*{{{*/
454 // ---------------------------------------------------------------------
455 /* This creates a new package structure and adds it to the hash table */
456 bool pkgCacheGenerator::NewPackage(pkgCache::PkgIterator &Pkg,const string &Name,
457 const string &Arch) {
458 pkgCache::GrpIterator Grp;
459 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
460 if (unlikely(NewGroup(Grp, Name) == false))
461 return false;
462
463 Pkg = Grp.FindPkg(Arch);
464 if (Pkg.end() == false)
465 return true;
466
467 // Get a structure
468 map_ptrloc const Package = AllocateInMap(sizeof(pkgCache::Package));
469 if (unlikely(Package == 0))
470 return false;
471 Pkg = pkgCache::PkgIterator(Cache,Cache.PkgP + Package);
472
473 // Insert the package into our package list
474 if (Grp->FirstPackage == 0) // the group is new
475 {
476 // Insert it into the hash table
477 unsigned long const Hash = Cache.Hash(Name);
478 Pkg->NextPackage = Cache.HeaderP->PkgHashTable[Hash];
479 Cache.HeaderP->PkgHashTable[Hash] = Package;
480 Grp->FirstPackage = Package;
481 }
482 else // Group the Packages together
483 {
484 // this package is the new last package
485 pkgCache::PkgIterator LastPkg(Cache, Cache.PkgP + Grp->LastPackage);
486 Pkg->NextPackage = LastPkg->NextPackage;
487 LastPkg->NextPackage = Package;
488 }
489 Grp->LastPackage = Package;
490
491 // Set the name, arch and the ID
492 Pkg->Name = Grp->Name;
493 Pkg->Group = Grp.Index();
494 map_ptrloc const idxArch = WriteUniqString(Arch.c_str());
495 if (unlikely(idxArch == 0))
496 return false;
497 Pkg->Arch = idxArch;
498 Pkg->ID = Cache.HeaderP->PackageCount++;
499
500 return true;
501 }
502 /*}}}*/
503 // CacheGenerator::NewFileVer - Create a new File<->Version association /*{{{*/
504 // ---------------------------------------------------------------------
505 /* */
506 bool pkgCacheGenerator::NewFileVer(pkgCache::VerIterator &Ver,
507 ListParser &List)
508 {
509 if (CurrentFile == 0)
510 return true;
511
512 // Get a structure
513 map_ptrloc const VerFile = AllocateInMap(sizeof(pkgCache::VerFile));
514 if (VerFile == 0)
515 return 0;
516
517 pkgCache::VerFileIterator VF(Cache,Cache.VerFileP + VerFile);
518 VF->File = CurrentFile - Cache.PkgFileP;
519
520 // Link it to the end of the list
521 map_ptrloc *Last = &Ver->FileList;
522 for (pkgCache::VerFileIterator V = Ver.FileList(); V.end() == false; V++)
523 Last = &V->NextFile;
524 VF->NextFile = *Last;
525 *Last = VF.Index();
526
527 VF->Offset = List.Offset();
528 VF->Size = List.Size();
529 if (Cache.HeaderP->MaxVerFileSize < VF->Size)
530 Cache.HeaderP->MaxVerFileSize = VF->Size;
531 Cache.HeaderP->VerFileCount++;
532
533 return true;
534 }
535 /*}}}*/
536 // CacheGenerator::NewVersion - Create a new Version /*{{{*/
537 // ---------------------------------------------------------------------
538 /* This puts a version structure in the linked list */
539 unsigned long pkgCacheGenerator::NewVersion(pkgCache::VerIterator &Ver,
540 const string &VerStr,
541 unsigned long Next)
542 {
543 // Get a structure
544 map_ptrloc const Version = AllocateInMap(sizeof(pkgCache::Version));
545 if (Version == 0)
546 return 0;
547
548 // Fill it in
549 Ver = pkgCache::VerIterator(Cache,Cache.VerP + Version);
550 Ver->NextVer = Next;
551 Ver->ID = Cache.HeaderP->VersionCount++;
552 map_ptrloc const idxVerStr = WriteStringInMap(VerStr);
553 if (unlikely(idxVerStr == 0))
554 return 0;
555 Ver->VerStr = idxVerStr;
556
557 return Version;
558 }
559 /*}}}*/
560 // CacheGenerator::NewFileDesc - Create a new File<->Desc association /*{{{*/
561 // ---------------------------------------------------------------------
562 /* */
563 bool pkgCacheGenerator::NewFileDesc(pkgCache::DescIterator &Desc,
564 ListParser &List)
565 {
566 if (CurrentFile == 0)
567 return true;
568
569 // Get a structure
570 map_ptrloc const DescFile = AllocateInMap(sizeof(pkgCache::DescFile));
571 if (DescFile == 0)
572 return false;
573
574 pkgCache::DescFileIterator DF(Cache,Cache.DescFileP + DescFile);
575 DF->File = CurrentFile - Cache.PkgFileP;
576
577 // Link it to the end of the list
578 map_ptrloc *Last = &Desc->FileList;
579 for (pkgCache::DescFileIterator D = Desc.FileList(); D.end() == false; D++)
580 Last = &D->NextFile;
581
582 DF->NextFile = *Last;
583 *Last = DF.Index();
584
585 DF->Offset = List.Offset();
586 DF->Size = List.Size();
587 if (Cache.HeaderP->MaxDescFileSize < DF->Size)
588 Cache.HeaderP->MaxDescFileSize = DF->Size;
589 Cache.HeaderP->DescFileCount++;
590
591 return true;
592 }
593 /*}}}*/
594 // CacheGenerator::NewDescription - Create a new Description /*{{{*/
595 // ---------------------------------------------------------------------
596 /* This puts a description structure in the linked list */
597 map_ptrloc pkgCacheGenerator::NewDescription(pkgCache::DescIterator &Desc,
598 const string &Lang,
599 const MD5SumValue &md5sum,
600 map_ptrloc Next)
601 {
602 // Get a structure
603 map_ptrloc const Description = AllocateInMap(sizeof(pkgCache::Description));
604 if (Description == 0)
605 return 0;
606
607 // Fill it in
608 Desc = pkgCache::DescIterator(Cache,Cache.DescP + Description);
609 Desc->NextDesc = Next;
610 Desc->ID = Cache.HeaderP->DescriptionCount++;
611 map_ptrloc const idxlanguage_code = WriteStringInMap(Lang);
612 map_ptrloc const idxmd5sum = WriteStringInMap(md5sum.Value());
613 if (unlikely(idxlanguage_code == 0 || idxmd5sum == 0))
614 return 0;
615 Desc->language_code = idxlanguage_code;
616 Desc->md5sum = idxmd5sum;
617
618 return Description;
619 }
620 /*}}}*/
621 // CacheGenerator::FinishCache - do various finish operations /*{{{*/
622 // ---------------------------------------------------------------------
623 /* This prepares the Cache for delivery */
624 bool pkgCacheGenerator::FinishCache(OpProgress *Progress)
625 {
626 // FIXME: add progress reporting for this operation
627 // Do we have different architectures in your groups ?
628 vector<string> archs = APT::Configuration::getArchitectures();
629 if (archs.size() > 1)
630 {
631 // Create Conflicts in between the group
632 pkgCache::GrpIterator G = GetCache().GrpBegin();
633 Dynamic<pkgCache::GrpIterator> DynG(G);
634 for (; G.end() != true; G++)
635 {
636 string const PkgName = G.Name();
637 pkgCache::PkgIterator P = G.PackageList();
638 Dynamic<pkgCache::PkgIterator> DynP(P);
639 for (; P.end() != true; P = G.NextPkg(P))
640 {
641 if (strcmp(P.Arch(),"all") == 0)
642 continue;
643 pkgCache::PkgIterator allPkg;
644 Dynamic<pkgCache::PkgIterator> DynallPkg(allPkg);
645 pkgCache::VerIterator V = P.VersionList();
646 Dynamic<pkgCache::VerIterator> DynV(V);
647 for (; V.end() != true; V++)
648 {
649 string const Arch = V.Arch(true);
650 map_ptrloc *OldDepLast = NULL;
651 /* MultiArch handling introduces a lot of implicit Dependencies:
652 - MultiArch: same → Co-Installable if they have the same version
653 - Architecture: all → Need to be Co-Installable for internal reasons
654 - All others conflict with all other group members */
655 bool const coInstall = (V->MultiArch == pkgCache::Version::All ||
656 V->MultiArch == pkgCache::Version::Same);
657 if (V->MultiArch == pkgCache::Version::All && allPkg.end() == true)
658 allPkg = G.FindPkg("all");
659 for (vector<string>::const_iterator A = archs.begin(); A != archs.end(); ++A)
660 {
661 if (*A == Arch)
662 continue;
663 /* We allow only one installed arch at the time
664 per group, therefore each group member conflicts
665 with all other group members */
666 pkgCache::PkgIterator D = G.FindPkg(*A);
667 Dynamic<pkgCache::PkgIterator> DynD(D);
668 if (D.end() == true)
669 continue;
670 if (coInstall == true)
671 {
672 // Replaces: ${self}:other ( << ${binary:Version})
673 NewDepends(D, V, V.VerStr(),
674 pkgCache::Dep::Less, pkgCache::Dep::Replaces,
675 OldDepLast);
676 // Breaks: ${self}:other (!= ${binary:Version})
677 NewDepends(D, V, V.VerStr(),
678 pkgCache::Dep::NotEquals, pkgCache::Dep::DpkgBreaks,
679 OldDepLast);
680 if (V->MultiArch == pkgCache::Version::All)
681 {
682 // Depend on ${self}:all which does depend on nothing
683 NewDepends(allPkg, V, V.VerStr(),
684 pkgCache::Dep::Equals, pkgCache::Dep::Depends,
685 OldDepLast);
686 }
687 } else {
688 // Conflicts: ${self}:other
689 NewDepends(D, V, "",
690 pkgCache::Dep::NoOp, pkgCache::Dep::Conflicts,
691 OldDepLast);
692 }
693 }
694 }
695 }
696 }
697 }
698 return true;
699 }
700 /*}}}*/
701 // CacheGenerator::NewDepends - Create a dependency element /*{{{*/
702 // ---------------------------------------------------------------------
703 /* This creates a dependency element in the tree. It is linked to the
704 version and to the package that it is pointing to. */
705 bool pkgCacheGenerator::NewDepends(pkgCache::PkgIterator &Pkg,
706 pkgCache::VerIterator &Ver,
707 string const &Version,
708 unsigned int const &Op,
709 unsigned int const &Type,
710 map_ptrloc *OldDepLast)
711 {
712 void const * const oldMap = Map.Data();
713 // Get a structure
714 map_ptrloc const Dependency = AllocateInMap(sizeof(pkgCache::Dependency));
715 if (unlikely(Dependency == 0))
716 return false;
717
718 // Fill it in
719 pkgCache::DepIterator Dep(Cache,Cache.DepP + Dependency);
720 Dynamic<pkgCache::DepIterator> DynDep(Dep);
721 Dep->ParentVer = Ver.Index();
722 Dep->Type = Type;
723 Dep->CompareOp = Op;
724 Dep->ID = Cache.HeaderP->DependsCount++;
725
726 // Probe the reverse dependency list for a version string that matches
727 if (Version.empty() == false)
728 {
729 /* for (pkgCache::DepIterator I = Pkg.RevDependsList(); I.end() == false; I++)
730 if (I->Version != 0 && I.TargetVer() == Version)
731 Dep->Version = I->Version;*/
732 if (Dep->Version == 0) {
733 map_ptrloc const index = WriteStringInMap(Version);
734 if (unlikely(index == 0))
735 return false;
736 Dep->Version = index;
737 }
738 }
739
740 // Link it to the package
741 Dep->Package = Pkg.Index();
742 Dep->NextRevDepends = Pkg->RevDepends;
743 Pkg->RevDepends = Dep.Index();
744
745 // Do we know where to link the Dependency to?
746 if (OldDepLast == NULL)
747 {
748 OldDepLast = &Ver->DependsList;
749 for (pkgCache::DepIterator D = Ver.DependsList(); D.end() == false; D++)
750 OldDepLast = &D->NextDepends;
751 } else if (oldMap != Map.Data())
752 OldDepLast += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
753
754 Dep->NextDepends = *OldDepLast;
755 *OldDepLast = Dep.Index();
756 OldDepLast = &Dep->NextDepends;
757
758 return true;
759 }
760 /*}}}*/
761 // ListParser::NewDepends - Create the environment for a new dependency /*{{{*/
762 // ---------------------------------------------------------------------
763 /* This creates a Group and the Package to link this dependency to if
764 needed and handles also the caching of the old endpoint */
765 bool pkgCacheGenerator::ListParser::NewDepends(pkgCache::VerIterator &Ver,
766 const string &PackageName,
767 const string &Arch,
768 const string &Version,
769 unsigned int Op,
770 unsigned int Type)
771 {
772 pkgCache::GrpIterator Grp;
773 Dynamic<pkgCache::GrpIterator> DynGrp(Grp);
774 if (unlikely(Owner->NewGroup(Grp, PackageName) == false))
775 return false;
776
777 // Locate the target package
778 pkgCache::PkgIterator Pkg = Grp.FindPkg(Arch);
779 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
780 if (Pkg.end() == true) {
781 if (unlikely(Owner->NewPackage(Pkg, PackageName, Arch) == false))
782 return false;
783 }
784
785 // Is it a file dependency?
786 if (unlikely(PackageName[0] == '/'))
787 FoundFileDeps = true;
788
789 /* Caching the old end point speeds up generation substantially */
790 if (OldDepVer != Ver) {
791 OldDepLast = NULL;
792 OldDepVer = Ver;
793 }
794
795 return Owner->NewDepends(Pkg, Ver, Version, Op, Type, OldDepLast);
796 }
797 /*}}}*/
798 // ListParser::NewProvides - Create a Provides element /*{{{*/
799 // ---------------------------------------------------------------------
800 /* */
801 bool pkgCacheGenerator::ListParser::NewProvides(pkgCache::VerIterator &Ver,
802 const string &PkgName,
803 const string &PkgArch,
804 const string &Version)
805 {
806 pkgCache &Cache = Owner->Cache;
807
808 // We do not add self referencing provides
809 if (Ver.ParentPkg().Name() == PkgName && PkgArch == Ver.Arch(true))
810 return true;
811
812 // Get a structure
813 map_ptrloc const Provides = Owner->AllocateInMap(sizeof(pkgCache::Provides));
814 if (unlikely(Provides == 0))
815 return false;
816 Cache.HeaderP->ProvidesCount++;
817
818 // Fill it in
819 pkgCache::PrvIterator Prv(Cache,Cache.ProvideP + Provides,Cache.PkgP);
820 Dynamic<pkgCache::PrvIterator> DynPrv(Prv);
821 Prv->Version = Ver.Index();
822 Prv->NextPkgProv = Ver->ProvidesList;
823 Ver->ProvidesList = Prv.Index();
824 if (Version.empty() == false && unlikely((Prv->ProvideVersion = WriteString(Version)) == 0))
825 return false;
826
827 // Locate the target package
828 pkgCache::PkgIterator Pkg;
829 Dynamic<pkgCache::PkgIterator> DynPkg(Pkg);
830 if (unlikely(Owner->NewPackage(Pkg,PkgName, PkgArch) == false))
831 return false;
832
833 // Link it to the package
834 Prv->ParentPkg = Pkg.Index();
835 Prv->NextProvides = Pkg->ProvidesList;
836 Pkg->ProvidesList = Prv.Index();
837
838 return true;
839 }
840 /*}}}*/
841 // CacheGenerator::SelectFile - Select the current file being parsed /*{{{*/
842 // ---------------------------------------------------------------------
843 /* This is used to select which file is to be associated with all newly
844 added versions. The caller is responsible for setting the IMS fields. */
845 bool pkgCacheGenerator::SelectFile(const string &File,const string &Site,
846 const pkgIndexFile &Index,
847 unsigned long Flags)
848 {
849 // Get some space for the structure
850 map_ptrloc const idxFile = AllocateInMap(sizeof(*CurrentFile));
851 if (unlikely(idxFile == 0))
852 return false;
853 CurrentFile = Cache.PkgFileP + idxFile;
854
855 // Fill it in
856 map_ptrloc const idxFileName = WriteStringInMap(File);
857 map_ptrloc const idxSite = WriteUniqString(Site);
858 if (unlikely(idxFileName == 0 || idxSite == 0))
859 return false;
860 CurrentFile->FileName = idxFileName;
861 CurrentFile->Site = idxSite;
862 CurrentFile->NextFile = Cache.HeaderP->FileList;
863 CurrentFile->Flags = Flags;
864 CurrentFile->ID = Cache.HeaderP->PackageFileCount;
865 map_ptrloc const idxIndexType = WriteUniqString(Index.GetType()->Label);
866 if (unlikely(idxIndexType == 0))
867 return false;
868 CurrentFile->IndexType = idxIndexType;
869 PkgFileName = File;
870 Cache.HeaderP->FileList = CurrentFile - Cache.PkgFileP;
871 Cache.HeaderP->PackageFileCount++;
872
873 if (Progress != 0)
874 Progress->SubProgress(Index.Size());
875 return true;
876 }
877 /*}}}*/
878 // CacheGenerator::WriteUniqueString - Insert a unique string /*{{{*/
879 // ---------------------------------------------------------------------
880 /* This is used to create handles to strings. Given the same text it
881 always returns the same number */
882 unsigned long pkgCacheGenerator::WriteUniqString(const char *S,
883 unsigned int Size)
884 {
885 /* We use a very small transient hash table here, this speeds up generation
886 by a fair amount on slower machines */
887 pkgCache::StringItem *&Bucket = UniqHash[(S[0]*5 + S[1]) % _count(UniqHash)];
888 if (Bucket != 0 &&
889 stringcmp(S,S+Size,Cache.StrP + Bucket->String) == 0)
890 return Bucket->String;
891
892 // Search for an insertion point
893 pkgCache::StringItem *I = Cache.StringItemP + Cache.HeaderP->StringList;
894 int Res = 1;
895 map_ptrloc *Last = &Cache.HeaderP->StringList;
896 for (; I != Cache.StringItemP; Last = &I->NextItem,
897 I = Cache.StringItemP + I->NextItem)
898 {
899 Res = stringcmp(S,S+Size,Cache.StrP + I->String);
900 if (Res >= 0)
901 break;
902 }
903
904 // Match
905 if (Res == 0)
906 {
907 Bucket = I;
908 return I->String;
909 }
910
911 // Get a structure
912 void const * const oldMap = Map.Data();
913 map_ptrloc const Item = AllocateInMap(sizeof(pkgCache::StringItem));
914 if (Item == 0)
915 return 0;
916
917 map_ptrloc const idxString = WriteStringInMap(S,Size);
918 if (unlikely(idxString == 0))
919 return 0;
920 if (oldMap != Map.Data()) {
921 Last += (map_ptrloc*) Map.Data() - (map_ptrloc*) oldMap;
922 I += (pkgCache::StringItem*) Map.Data() - (pkgCache::StringItem*) oldMap;
923 }
924 *Last = Item;
925
926 // Fill in the structure
927 pkgCache::StringItem *ItemP = Cache.StringItemP + Item;
928 ItemP->NextItem = I - Cache.StringItemP;
929 ItemP->String = idxString;
930
931 Bucket = ItemP;
932 return ItemP->String;
933 }
934 /*}}}*/
935 // CheckValidity - Check that a cache is up-to-date /*{{{*/
936 // ---------------------------------------------------------------------
937 /* This just verifies that each file in the list of index files exists,
938 has matching attributes with the cache and the cache does not have
939 any extra files. */
940 static bool CheckValidity(const string &CacheFile, FileIterator Start,
941 FileIterator End,MMap **OutMap = 0)
942 {
943 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
944 // No file, certainly invalid
945 if (CacheFile.empty() == true || FileExists(CacheFile) == false)
946 {
947 if (Debug == true)
948 std::clog << "CacheFile doesn't exist" << std::endl;
949 return false;
950 }
951
952 // Map it
953 FileFd CacheF(CacheFile,FileFd::ReadOnly);
954 SPtr<MMap> Map = new MMap(CacheF,0);
955 pkgCache Cache(Map);
956 if (_error->PendingError() == true || Map->Size() == 0)
957 {
958 if (Debug == true)
959 std::clog << "Errors are pending or Map is empty()" << std::endl;
960 _error->Discard();
961 return false;
962 }
963
964 /* Now we check every index file, see if it is in the cache,
965 verify the IMS data and check that it is on the disk too.. */
966 SPtrArray<bool> Visited = new bool[Cache.HeaderP->PackageFileCount];
967 memset(Visited,0,sizeof(*Visited)*Cache.HeaderP->PackageFileCount);
968 for (; Start != End; Start++)
969 {
970 if (Debug == true)
971 std::clog << "Checking PkgFile " << (*Start)->Describe() << ": ";
972 if ((*Start)->HasPackages() == false)
973 {
974 if (Debug == true)
975 std::clog << "Has NO packages" << std::endl;
976 continue;
977 }
978
979 if ((*Start)->Exists() == false)
980 {
981 #if 0 // mvo: we no longer give a message here (Default Sources spec)
982 _error->WarningE("stat",_("Couldn't stat source package list %s"),
983 (*Start)->Describe().c_str());
984 #endif
985 if (Debug == true)
986 std::clog << "file doesn't exist" << std::endl;
987 continue;
988 }
989
990 // FindInCache is also expected to do an IMS check.
991 pkgCache::PkgFileIterator File = (*Start)->FindInCache(Cache);
992 if (File.end() == true)
993 {
994 if (Debug == true)
995 std::clog << "FindInCache returned end-Pointer" << std::endl;
996 return false;
997 }
998
999 Visited[File->ID] = true;
1000 if (Debug == true)
1001 std::clog << "with ID " << File->ID << " is valid" << std::endl;
1002 }
1003
1004 for (unsigned I = 0; I != Cache.HeaderP->PackageFileCount; I++)
1005 if (Visited[I] == false)
1006 {
1007 if (Debug == true)
1008 std::clog << "File with ID" << I << " wasn't visited" << std::endl;
1009 return false;
1010 }
1011
1012 if (_error->PendingError() == true)
1013 {
1014 if (Debug == true)
1015 {
1016 std::clog << "Validity failed because of pending errors:" << std::endl;
1017 _error->DumpErrors();
1018 }
1019 _error->Discard();
1020 return false;
1021 }
1022
1023 if (OutMap != 0)
1024 *OutMap = Map.UnGuard();
1025 return true;
1026 }
1027 /*}}}*/
1028 // ComputeSize - Compute the total size of a bunch of files /*{{{*/
1029 // ---------------------------------------------------------------------
1030 /* Size is kind of an abstract notion that is only used for the progress
1031 meter */
1032 static unsigned long ComputeSize(FileIterator Start,FileIterator End)
1033 {
1034 unsigned long TotalSize = 0;
1035 for (; Start != End; Start++)
1036 {
1037 if ((*Start)->HasPackages() == false)
1038 continue;
1039 TotalSize += (*Start)->Size();
1040 }
1041 return TotalSize;
1042 }
1043 /*}}}*/
1044 // BuildCache - Merge the list of index files into the cache /*{{{*/
1045 // ---------------------------------------------------------------------
1046 /* */
1047 static bool BuildCache(pkgCacheGenerator &Gen,
1048 OpProgress *Progress,
1049 unsigned long &CurrentSize,unsigned long TotalSize,
1050 FileIterator Start, FileIterator End)
1051 {
1052 FileIterator I;
1053 for (I = Start; I != End; I++)
1054 {
1055 if ((*I)->HasPackages() == false)
1056 continue;
1057
1058 if ((*I)->Exists() == false)
1059 continue;
1060
1061 if ((*I)->FindInCache(Gen.GetCache()).end() == false)
1062 {
1063 _error->Warning("Duplicate sources.list entry %s",
1064 (*I)->Describe().c_str());
1065 continue;
1066 }
1067
1068 unsigned long Size = (*I)->Size();
1069 if (Progress != NULL)
1070 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Reading package lists"));
1071 CurrentSize += Size;
1072
1073 if ((*I)->Merge(Gen,Progress) == false)
1074 return false;
1075 }
1076
1077 if (Gen.HasFileDeps() == true)
1078 {
1079 if (Progress != NULL)
1080 Progress->Done();
1081 TotalSize = ComputeSize(Start, End);
1082 CurrentSize = 0;
1083 for (I = Start; I != End; I++)
1084 {
1085 unsigned long Size = (*I)->Size();
1086 if (Progress != NULL)
1087 Progress->OverallProgress(CurrentSize,TotalSize,Size,_("Collecting File Provides"));
1088 CurrentSize += Size;
1089 if ((*I)->MergeFileProvides(Gen,Progress) == false)
1090 return false;
1091 }
1092 }
1093
1094 return true;
1095 }
1096 /*}}}*/
1097 // CacheGenerator::CreateDynamicMMap - load an mmap with configuration options /*{{{*/
1098 DynamicMMap* pkgCacheGenerator::CreateDynamicMMap(FileFd *CacheF, unsigned long Flags) {
1099 unsigned long const MapStart = _config->FindI("APT::Cache-Start", 24*1024*1024);
1100 unsigned long const MapGrow = _config->FindI("APT::Cache-Grow", 1*1024*1024);
1101 unsigned long const MapLimit = _config->FindI("APT::Cache-Limit", 0);
1102 Flags |= MMap::Moveable;
1103 if (_config->FindB("APT::Cache-Fallback", false) == true)
1104 Flags |= MMap::Fallback;
1105 if (CacheF != NULL)
1106 return new DynamicMMap(*CacheF, Flags, MapStart, MapGrow, MapLimit);
1107 else
1108 return new DynamicMMap(Flags, MapStart, MapGrow, MapLimit);
1109 }
1110 /*}}}*/
1111 // CacheGenerator::MakeStatusCache - Construct the status cache /*{{{*/
1112 // ---------------------------------------------------------------------
1113 /* This makes sure that the status cache (the cache that has all
1114 index files from the sources list and all local ones) is ready
1115 to be mmaped. If OutMap is not zero then a MMap object representing
1116 the cache will be stored there. This is pretty much mandetory if you
1117 are using AllowMem. AllowMem lets the function be run as non-root
1118 where it builds the cache 'fast' into a memory buffer. */
1119 __deprecated bool pkgMakeStatusCache(pkgSourceList &List,OpProgress &Progress,
1120 MMap **OutMap, bool AllowMem)
1121 { return pkgCacheGenerator::MakeStatusCache(List, &Progress, OutMap, AllowMem); }
1122 bool pkgCacheGenerator::MakeStatusCache(pkgSourceList &List,OpProgress *Progress,
1123 MMap **OutMap,bool AllowMem)
1124 {
1125 bool const Debug = _config->FindB("Debug::pkgCacheGen", false);
1126
1127 vector<pkgIndexFile *> Files;
1128 for (vector<metaIndex *>::const_iterator i = List.begin();
1129 i != List.end();
1130 i++)
1131 {
1132 vector <pkgIndexFile *> *Indexes = (*i)->GetIndexFiles();
1133 for (vector<pkgIndexFile *>::const_iterator j = Indexes->begin();
1134 j != Indexes->end();
1135 j++)
1136 Files.push_back (*j);
1137 }
1138
1139 unsigned long const EndOfSource = Files.size();
1140 if (_system->AddStatusFiles(Files) == false)
1141 return false;
1142
1143 // Decide if we can write to the files..
1144 string const CacheFile = _config->FindFile("Dir::Cache::pkgcache");
1145 string const SrcCacheFile = _config->FindFile("Dir::Cache::srcpkgcache");
1146
1147 // ensure the cache directory exists
1148 if (CacheFile.empty() == false || SrcCacheFile.empty() == false)
1149 {
1150 string dir = _config->FindDir("Dir::Cache");
1151 size_t const len = dir.size();
1152 if (len > 5 && dir.find("/apt/", len - 6, 5) == len - 5)
1153 dir = dir.substr(0, len - 5);
1154 if (CacheFile.empty() == false)
1155 CreateDirectory(dir, flNotFile(CacheFile));
1156 if (SrcCacheFile.empty() == false)
1157 CreateDirectory(dir, flNotFile(SrcCacheFile));
1158 }
1159
1160 // Decide if we can write to the cache
1161 bool Writeable = false;
1162 if (CacheFile.empty() == false)
1163 Writeable = access(flNotFile(CacheFile).c_str(),W_OK) == 0;
1164 else
1165 if (SrcCacheFile.empty() == false)
1166 Writeable = access(flNotFile(SrcCacheFile).c_str(),W_OK) == 0;
1167 if (Debug == true)
1168 std::clog << "Do we have write-access to the cache files? " << (Writeable ? "YES" : "NO") << std::endl;
1169
1170 if (Writeable == false && AllowMem == false && CacheFile.empty() == false)
1171 return _error->Error(_("Unable to write to %s"),flNotFile(CacheFile).c_str());
1172
1173 if (Progress != NULL)
1174 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1175
1176 // Cache is OK, Fin.
1177 if (CheckValidity(CacheFile,Files.begin(),Files.end(),OutMap) == true)
1178 {
1179 if (Progress != NULL)
1180 Progress->OverallProgress(1,1,1,_("Reading package lists"));
1181 if (Debug == true)
1182 std::clog << "pkgcache.bin is valid - no need to build anything" << std::endl;
1183 return true;
1184 }
1185 else if (Debug == true)
1186 std::clog << "pkgcache.bin is NOT valid" << std::endl;
1187
1188 /* At this point we know we need to reconstruct the package cache,
1189 begin. */
1190 SPtr<FileFd> CacheF;
1191 SPtr<DynamicMMap> Map;
1192 if (Writeable == true && CacheFile.empty() == false)
1193 {
1194 unlink(CacheFile.c_str());
1195 CacheF = new FileFd(CacheFile,FileFd::WriteAtomic);
1196 fchmod(CacheF->Fd(),0644);
1197 Map = CreateDynamicMMap(CacheF, MMap::Public);
1198 if (_error->PendingError() == true)
1199 return false;
1200 if (Debug == true)
1201 std::clog << "Open filebased MMap" << std::endl;
1202 }
1203 else
1204 {
1205 // Just build it in memory..
1206 Map = CreateDynamicMMap(NULL);
1207 if (Debug == true)
1208 std::clog << "Open memory Map (not filebased)" << std::endl;
1209 }
1210
1211 // Lets try the source cache.
1212 unsigned long CurrentSize = 0;
1213 unsigned long TotalSize = 0;
1214 if (CheckValidity(SrcCacheFile,Files.begin(),
1215 Files.begin()+EndOfSource) == true)
1216 {
1217 if (Debug == true)
1218 std::clog << "srcpkgcache.bin is valid - populate MMap with it." << std::endl;
1219 // Preload the map with the source cache
1220 FileFd SCacheF(SrcCacheFile,FileFd::ReadOnly);
1221 unsigned long const alloc = Map->RawAllocate(SCacheF.Size());
1222 if ((alloc == 0 && _error->PendingError())
1223 || SCacheF.Read((unsigned char *)Map->Data() + alloc,
1224 SCacheF.Size()) == false)
1225 return false;
1226
1227 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1228
1229 // Build the status cache
1230 pkgCacheGenerator Gen(Map.Get(),Progress);
1231 if (_error->PendingError() == true)
1232 return false;
1233 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1234 Files.begin()+EndOfSource,Files.end()) == false)
1235 return false;
1236
1237 // FIXME: move me to a better place
1238 Gen.FinishCache(Progress);
1239 }
1240 else
1241 {
1242 if (Debug == true)
1243 std::clog << "srcpkgcache.bin is NOT valid - rebuild" << std::endl;
1244 TotalSize = ComputeSize(Files.begin(),Files.end());
1245
1246 // Build the source cache
1247 pkgCacheGenerator Gen(Map.Get(),Progress);
1248 if (_error->PendingError() == true)
1249 return false;
1250 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1251 Files.begin(),Files.begin()+EndOfSource) == false)
1252 return false;
1253
1254 // Write it back
1255 if (Writeable == true && SrcCacheFile.empty() == false)
1256 {
1257 FileFd SCacheF(SrcCacheFile,FileFd::WriteAtomic);
1258 if (_error->PendingError() == true)
1259 return false;
1260
1261 fchmod(SCacheF.Fd(),0644);
1262
1263 // Write out the main data
1264 if (SCacheF.Write(Map->Data(),Map->Size()) == false)
1265 return _error->Error(_("IO Error saving source cache"));
1266 SCacheF.Sync();
1267
1268 // Write out the proper header
1269 Gen.GetCache().HeaderP->Dirty = false;
1270 if (SCacheF.Seek(0) == false ||
1271 SCacheF.Write(Map->Data(),sizeof(*Gen.GetCache().HeaderP)) == false)
1272 return _error->Error(_("IO Error saving source cache"));
1273 Gen.GetCache().HeaderP->Dirty = true;
1274 SCacheF.Sync();
1275 }
1276
1277 // Build the status cache
1278 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1279 Files.begin()+EndOfSource,Files.end()) == false)
1280 return false;
1281
1282 // FIXME: move me to a better place
1283 Gen.FinishCache(Progress);
1284 }
1285 if (Debug == true)
1286 std::clog << "Caches are ready for shipping" << std::endl;
1287
1288 if (_error->PendingError() == true)
1289 return false;
1290 if (OutMap != 0)
1291 {
1292 if (CacheF != 0)
1293 {
1294 delete Map.UnGuard();
1295 *OutMap = new MMap(*CacheF,0);
1296 }
1297 else
1298 {
1299 *OutMap = Map.UnGuard();
1300 }
1301 }
1302
1303 return true;
1304 }
1305 /*}}}*/
1306 // CacheGenerator::MakeOnlyStatusCache - Build only a status files cache/*{{{*/
1307 // ---------------------------------------------------------------------
1308 /* */
1309 __deprecated bool pkgMakeOnlyStatusCache(OpProgress &Progress,DynamicMMap **OutMap)
1310 { return pkgCacheGenerator::MakeOnlyStatusCache(&Progress, OutMap); }
1311 bool pkgCacheGenerator::MakeOnlyStatusCache(OpProgress *Progress,DynamicMMap **OutMap)
1312 {
1313 vector<pkgIndexFile *> Files;
1314 unsigned long EndOfSource = Files.size();
1315 if (_system->AddStatusFiles(Files) == false)
1316 return false;
1317
1318 SPtr<DynamicMMap> Map = CreateDynamicMMap(NULL);
1319 unsigned long CurrentSize = 0;
1320 unsigned long TotalSize = 0;
1321
1322 TotalSize = ComputeSize(Files.begin()+EndOfSource,Files.end());
1323
1324 // Build the status cache
1325 if (Progress != NULL)
1326 Progress->OverallProgress(0,1,1,_("Reading package lists"));
1327 pkgCacheGenerator Gen(Map.Get(),Progress);
1328 if (_error->PendingError() == true)
1329 return false;
1330 if (BuildCache(Gen,Progress,CurrentSize,TotalSize,
1331 Files.begin()+EndOfSource,Files.end()) == false)
1332 return false;
1333
1334 // FIXME: move me to a better place
1335 Gen.FinishCache(Progress);
1336
1337 if (_error->PendingError() == true)
1338 return false;
1339 *OutMap = Map.UnGuard();
1340
1341 return true;
1342 }
1343 /*}}}*/