]> git.saurik.com Git - apt.git/blob - apt-pkg/acquire-item.cc
Merge pull request julian-klode/apt#3 from adrian17/master
[apt.git] / apt-pkg / acquire-item.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: acquire-item.cc,v 1.46.2.9 2004/01/16 18:51:11 mdz Exp $
4 /* ######################################################################
5
6 Acquire Item - Item to acquire
7
8 Each item can download to exactly one file at a time. This means you
9 cannot create an item that fetches two uri's to two files at the same
10 time. The pkgAcqIndex class creates a second class upon instantiation
11 to fetch the other index files because of this.
12
13 ##################################################################### */
14 /*}}}*/
15 // Include Files /*{{{*/
16 #include <config.h>
17
18 #include <apt-pkg/acquire-item.h>
19 #include <apt-pkg/configuration.h>
20 #include <apt-pkg/aptconfiguration.h>
21 #include <apt-pkg/sourcelist.h>
22 #include <apt-pkg/error.h>
23 #include <apt-pkg/strutl.h>
24 #include <apt-pkg/fileutl.h>
25 #include <apt-pkg/tagfile.h>
26 #include <apt-pkg/metaindex.h>
27 #include <apt-pkg/acquire.h>
28 #include <apt-pkg/hashes.h>
29 #include <apt-pkg/indexfile.h>
30 #include <apt-pkg/pkgcache.h>
31 #include <apt-pkg/cacheiterators.h>
32 #include <apt-pkg/pkgrecords.h>
33 #include <apt-pkg/gpgv.h>
34
35 #include <algorithm>
36 #include <stddef.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <iostream>
40 #include <vector>
41 #include <sys/stat.h>
42 #include <unistd.h>
43 #include <errno.h>
44 #include <string>
45 #include <stdio.h>
46 #include <ctime>
47 #include <sstream>
48 #include <numeric>
49
50 #include <apti18n.h>
51 /*}}}*/
52
53 using namespace std;
54
55 static void printHashSumComparison(std::string const &URI, HashStringList const &Expected, HashStringList const &Actual) /*{{{*/
56 {
57 if (_config->FindB("Debug::Acquire::HashSumMismatch", false) == false)
58 return;
59 std::cerr << std::endl << URI << ":" << std::endl << " Expected Hash: " << std::endl;
60 for (HashStringList::const_iterator hs = Expected.begin(); hs != Expected.end(); ++hs)
61 std::cerr << "\t- " << hs->toStr() << std::endl;
62 std::cerr << " Actual Hash: " << std::endl;
63 for (HashStringList::const_iterator hs = Actual.begin(); hs != Actual.end(); ++hs)
64 std::cerr << "\t- " << hs->toStr() << std::endl;
65 }
66 /*}}}*/
67 static std::string GetPartialFileName(std::string const &file) /*{{{*/
68 {
69 std::string DestFile = _config->FindDir("Dir::State::lists") + "partial/";
70 DestFile += file;
71 return DestFile;
72 }
73 /*}}}*/
74 static std::string GetPartialFileNameFromURI(std::string const &uri) /*{{{*/
75 {
76 return GetPartialFileName(URItoFileName(uri));
77 }
78 /*}}}*/
79 static std::string GetFinalFileNameFromURI(std::string const &uri) /*{{{*/
80 {
81 return _config->FindDir("Dir::State::lists") + URItoFileName(uri);
82 }
83 /*}}}*/
84 static std::string GetKeepCompressedFileName(std::string file, IndexTarget const &Target)/*{{{*/
85 {
86 if (Target.KeepCompressed == false)
87 return file;
88
89 std::string const KeepCompressedAs = Target.Option(IndexTarget::KEEPCOMPRESSEDAS);
90 if (KeepCompressedAs.empty() == false)
91 {
92 std::string const ext = KeepCompressedAs.substr(0, KeepCompressedAs.find(' '));
93 if (ext != "uncompressed")
94 file.append(".").append(ext);
95 }
96 return file;
97 }
98 /*}}}*/
99 static std::string GetMergeDiffsPatchFileName(std::string const &Final, std::string const &Patch)/*{{{*/
100 {
101 // rred expects the patch as $FinalFile.ed.$patchname.gz
102 return Final + ".ed." + Patch + ".gz";
103 }
104 /*}}}*/
105 static std::string GetDiffsPatchFileName(std::string const &Final) /*{{{*/
106 {
107 // rred expects the patch as $FinalFile.ed
108 return Final + ".ed";
109 }
110 /*}}}*/
111 static std::string GetExistingFilename(std::string const &File) /*{{{*/
112 {
113 if (RealFileExists(File))
114 return File;
115 for (auto const &type : APT::Configuration::getCompressorExtensions())
116 {
117 std::string const Final = File + type;
118 if (RealFileExists(Final))
119 return Final;
120 }
121 return "";
122 }
123 /*}}}*/
124 static std::string GetDiffIndexFileName(std::string const &Name) /*{{{*/
125 {
126 return Name + ".diff/Index";
127 }
128 /*}}}*/
129 static std::string GetDiffIndexURI(IndexTarget const &Target) /*{{{*/
130 {
131 return Target.URI + ".diff/Index";
132 }
133 /*}}}*/
134
135 static void ReportMirrorFailureToCentral(pkgAcquire::Item const &I, std::string const &FailCode, std::string const &Details)/*{{{*/
136 {
137 // we only act if a mirror was used at all
138 if(I.UsedMirror.empty())
139 return;
140 #if 0
141 std::cerr << "\nReportMirrorFailure: "
142 << UsedMirror
143 << " Uri: " << DescURI()
144 << " FailCode: "
145 << FailCode << std::endl;
146 #endif
147 string const report = _config->Find("Methods::Mirror::ProblemReporting",
148 "/usr/lib/apt/apt-report-mirror-failure");
149 if(!FileExists(report))
150 return;
151
152 std::vector<char const*> const Args = {
153 report.c_str(),
154 I.UsedMirror.c_str(),
155 I.DescURI().c_str(),
156 FailCode.c_str(),
157 Details.c_str(),
158 NULL
159 };
160
161 pid_t pid = ExecFork();
162 if(pid < 0)
163 {
164 _error->Error("ReportMirrorFailure Fork failed");
165 return;
166 }
167 else if(pid == 0)
168 {
169 execvp(Args[0], (char**)Args.data());
170 std::cerr << "Could not exec " << Args[0] << std::endl;
171 _exit(100);
172 }
173 if(!ExecWait(pid, "report-mirror-failure"))
174 _error->Warning("Couldn't report problem to '%s'", report.c_str());
175 }
176 /*}}}*/
177
178 static bool MessageInsecureRepository(bool const isError, std::string const &msg)/*{{{*/
179 {
180 if (isError)
181 {
182 _error->Error("%s", msg.c_str());
183 _error->Notice("%s", _("Updating from such a repository can't be done securely, and is therefore disabled by default."));
184 }
185 else
186 {
187 _error->Warning("%s", msg.c_str());
188 _error->Notice("%s", _("Data from such a repository can't be authenticated and is therefore potentially dangerous to use."));
189 }
190 _error->Notice("%s", _("See apt-secure(8) manpage for repository creation and user configuration details."));
191 return false;
192 }
193 static bool APT_NONNULL(2) MessageInsecureRepository(bool const isError, char const * const msg, std::string const &repo)
194 {
195 std::string m;
196 strprintf(m, msg, repo.c_str());
197 return MessageInsecureRepository(isError, m);
198 }
199 /*}}}*/
200 static bool APT_NONNULL(1, 3, 4, 5) AllowInsecureRepositories(char const * const msg, std::string const &repo,/*{{{*/
201 metaIndex const * const MetaIndexParser, pkgAcqMetaClearSig * const TransactionManager, pkgAcquire::Item * const I)
202 {
203 if(MetaIndexParser->GetTrusted() == metaIndex::TRI_YES)
204 return true;
205
206 if (_config->FindB("Acquire::AllowInsecureRepositories") == true)
207 {
208 MessageInsecureRepository(false, msg, repo);
209 return true;
210 }
211
212 MessageInsecureRepository(true, msg, repo);
213 TransactionManager->AbortTransaction();
214 I->Status = pkgAcquire::Item::StatError;
215 return false;
216 }
217 /*}}}*/
218 static HashStringList GetExpectedHashesFromFor(metaIndex * const Parser, std::string const &MetaKey)/*{{{*/
219 {
220 if (Parser == NULL)
221 return HashStringList();
222 metaIndex::checkSum * const R = Parser->Lookup(MetaKey);
223 if (R == NULL)
224 return HashStringList();
225 return R->Hashes;
226 }
227 /*}}}*/
228
229 // all ::HashesRequired and ::GetExpectedHashes implementations /*{{{*/
230 /* ::GetExpectedHashes is abstract and has to be implemented by all subclasses.
231 It is best to implement it as broadly as possible, while ::HashesRequired defaults
232 to true and should be as restrictive as possible for false cases. Note that if
233 a hash is returned by ::GetExpectedHashes it must match. Only if it doesn't
234 ::HashesRequired is called to evaluate if its okay to have no hashes. */
235 APT_CONST bool pkgAcqTransactionItem::HashesRequired() const
236 {
237 /* signed repositories obviously have a parser and good hashes.
238 unsigned repositories, too, as even if we can't trust them for security,
239 we can at least trust them for integrity of the download itself.
240 Only repositories without a Release file can (obviously) not have
241 hashes – and they are very uncommon and strongly discouraged */
242 return TransactionManager->MetaIndexParser->GetLoadedSuccessfully() == metaIndex::TRI_YES;
243 }
244 HashStringList pkgAcqTransactionItem::GetExpectedHashes() const
245 {
246 return GetExpectedHashesFor(GetMetaKey());
247 }
248
249 APT_CONST bool pkgAcqMetaBase::HashesRequired() const
250 {
251 // Release and co have no hashes 'by design'.
252 return false;
253 }
254 HashStringList pkgAcqMetaBase::GetExpectedHashes() const
255 {
256 return HashStringList();
257 }
258
259 APT_CONST bool pkgAcqIndexDiffs::HashesRequired() const
260 {
261 /* We can't check hashes of rred result as we don't know what the
262 hash of the file will be. We just know the hash of the patch(es),
263 the hash of the file they will apply on and the hash of the resulting
264 file. */
265 if (State == StateFetchDiff)
266 return true;
267 return false;
268 }
269 HashStringList pkgAcqIndexDiffs::GetExpectedHashes() const
270 {
271 if (State == StateFetchDiff)
272 return available_patches[0].download_hashes;
273 return HashStringList();
274 }
275
276 APT_CONST bool pkgAcqIndexMergeDiffs::HashesRequired() const
277 {
278 /* @see #pkgAcqIndexDiffs::HashesRequired, with the difference that
279 we can check the rred result after all patches are applied as
280 we know the expected result rather than potentially apply more patches */
281 if (State == StateFetchDiff)
282 return true;
283 return State == StateApplyDiff;
284 }
285 HashStringList pkgAcqIndexMergeDiffs::GetExpectedHashes() const
286 {
287 if (State == StateFetchDiff)
288 return patch.download_hashes;
289 else if (State == StateApplyDiff)
290 return GetExpectedHashesFor(Target.MetaKey);
291 return HashStringList();
292 }
293
294 APT_CONST bool pkgAcqArchive::HashesRequired() const
295 {
296 return LocalSource == false;
297 }
298 HashStringList pkgAcqArchive::GetExpectedHashes() const
299 {
300 // figured out while parsing the records
301 return ExpectedHashes;
302 }
303
304 APT_CONST bool pkgAcqFile::HashesRequired() const
305 {
306 // supplied as parameter at creation time, so the caller decides
307 return ExpectedHashes.usable();
308 }
309 HashStringList pkgAcqFile::GetExpectedHashes() const
310 {
311 return ExpectedHashes;
312 }
313 /*}}}*/
314 // Acquire::Item::QueueURI and specialisations from child classes /*{{{*/
315 bool pkgAcquire::Item::QueueURI(pkgAcquire::ItemDesc &Item)
316 {
317 Owner->Enqueue(Item);
318 return true;
319 }
320 /* The idea here is that an item isn't queued if it exists on disk and the
321 transition manager was a hit as this means that the files it contains
322 the checksums for can't be updated either (or they are and we are asking
323 for a hashsum mismatch to happen which helps nobody) */
324 bool pkgAcqTransactionItem::QueueURI(pkgAcquire::ItemDesc &Item)
325 {
326 if (TransactionManager->State != TransactionStarted)
327 {
328 if (_config->FindB("Debug::Acquire::Transaction", false))
329 std::clog << "Skip " << Target.URI << " as transaction was already dealt with!" << std::endl;
330 return false;
331 }
332 std::string const FinalFile = GetFinalFilename();
333 if (TransactionManager->IMSHit == true && FileExists(FinalFile) == true)
334 {
335 PartialFile = DestFile = FinalFile;
336 Status = StatDone;
337 return false;
338 }
339 // If we got the InRelease file via a mirror, pick all indexes directly from this mirror, too
340 if (TransactionManager->BaseURI.empty() == false &&
341 URI::SiteOnly(Item.URI) != URI::SiteOnly(TransactionManager->BaseURI))
342 {
343 // this ensures we rewrite only once and only the first step
344 auto const OldBaseURI = Target.Option(IndexTarget::BASE_URI);
345 if (OldBaseURI.empty() == false && APT::String::Startswith(Item.URI, OldBaseURI))
346 {
347 auto const ExtraPath = Item.URI.substr(OldBaseURI.length());
348 Item.URI = flCombine(TransactionManager->BaseURI, ExtraPath);
349 UsedMirror = TransactionManager->UsedMirror;
350 if (Item.Description.find(" ") != string::npos)
351 Item.Description.replace(0, Item.Description.find(" "), UsedMirror);
352 }
353 }
354 return pkgAcquire::Item::QueueURI(Item);
355 }
356 /* The transition manager InRelease itself (or its older sisters-in-law
357 Release & Release.gpg) is always queued as this allows us to rerun gpgv
358 on it to verify that we aren't stalled with old files */
359 bool pkgAcqMetaBase::QueueURI(pkgAcquire::ItemDesc &Item)
360 {
361 return pkgAcquire::Item::QueueURI(Item);
362 }
363 /* the Diff/Index needs to queue also the up-to-date complete index file
364 to ensure that the list cleaner isn't eating it */
365 bool pkgAcqDiffIndex::QueueURI(pkgAcquire::ItemDesc &Item)
366 {
367 if (pkgAcqTransactionItem::QueueURI(Item) == true)
368 return true;
369 QueueOnIMSHit();
370 return false;
371 }
372 /*}}}*/
373 // Acquire::Item::GetFinalFilename and specialisations for child classes /*{{{*/
374 std::string pkgAcquire::Item::GetFinalFilename() const
375 {
376 // Beware: Desc.URI is modified by redirections
377 return GetFinalFileNameFromURI(Desc.URI);
378 }
379 std::string pkgAcqDiffIndex::GetFinalFilename() const
380 {
381 return GetFinalFileNameFromURI(GetDiffIndexURI(Target));
382 }
383 std::string pkgAcqIndex::GetFinalFilename() const
384 {
385 std::string const FinalFile = GetFinalFileNameFromURI(Target.URI);
386 return GetKeepCompressedFileName(FinalFile, Target);
387 }
388 std::string pkgAcqMetaSig::GetFinalFilename() const
389 {
390 return GetFinalFileNameFromURI(Target.URI);
391 }
392 std::string pkgAcqBaseIndex::GetFinalFilename() const
393 {
394 return GetFinalFileNameFromURI(Target.URI);
395 }
396 std::string pkgAcqMetaBase::GetFinalFilename() const
397 {
398 return GetFinalFileNameFromURI(Target.URI);
399 }
400 std::string pkgAcqArchive::GetFinalFilename() const
401 {
402 return _config->FindDir("Dir::Cache::Archives") + flNotDir(StoreFilename);
403 }
404 /*}}}*/
405 // pkgAcqTransactionItem::GetMetaKey and specialisations for child classes /*{{{*/
406 std::string pkgAcqTransactionItem::GetMetaKey() const
407 {
408 return Target.MetaKey;
409 }
410 std::string pkgAcqIndex::GetMetaKey() const
411 {
412 if (Stage == STAGE_DECOMPRESS_AND_VERIFY || CurrentCompressionExtension == "uncompressed")
413 return Target.MetaKey;
414 return Target.MetaKey + "." + CurrentCompressionExtension;
415 }
416 std::string pkgAcqDiffIndex::GetMetaKey() const
417 {
418 return GetDiffIndexFileName(Target.MetaKey);
419 }
420 /*}}}*/
421 //pkgAcqTransactionItem::TransactionState and specialisations for child classes /*{{{*/
422 bool pkgAcqTransactionItem::TransactionState(TransactionStates const state)
423 {
424 bool const Debug = _config->FindB("Debug::Acquire::Transaction", false);
425 switch(state)
426 {
427 case TransactionStarted: _error->Fatal("Item %s changed to invalid transaction start state!", Target.URI.c_str()); break;
428 case TransactionAbort:
429 if(Debug == true)
430 std::clog << " Cancel: " << DestFile << std::endl;
431 if (Status == pkgAcquire::Item::StatIdle)
432 {
433 Status = pkgAcquire::Item::StatDone;
434 Dequeue();
435 }
436 break;
437 case TransactionCommit:
438 if(PartialFile.empty() == false)
439 {
440 bool sameFile = (PartialFile == DestFile);
441 // we use symlinks on IMS-Hit to avoid copies
442 if (RealFileExists(DestFile))
443 {
444 struct stat Buf;
445 if (lstat(PartialFile.c_str(), &Buf) != -1)
446 {
447 if (S_ISLNK(Buf.st_mode) && Buf.st_size > 0)
448 {
449 char partial[Buf.st_size + 1];
450 ssize_t const sp = readlink(PartialFile.c_str(), partial, Buf.st_size);
451 if (sp == -1)
452 _error->Errno("pkgAcqTransactionItem::TransactionState-sp", _("Failed to readlink %s"), PartialFile.c_str());
453 else
454 {
455 partial[sp] = '\0';
456 sameFile = (DestFile == partial);
457 }
458 }
459 }
460 else
461 _error->Errno("pkgAcqTransactionItem::TransactionState-stat", _("Failed to stat %s"), PartialFile.c_str());
462 }
463 if (sameFile == false)
464 {
465 // ensure that even without lists-cleanup all compressions are nuked
466 std::string FinalFile = GetFinalFileNameFromURI(Target.URI);
467 if (FileExists(FinalFile))
468 {
469 if(Debug == true)
470 std::clog << "rm " << FinalFile << " # " << DescURI() << std::endl;
471 if (RemoveFile("TransactionStates-Cleanup", FinalFile) == false)
472 return false;
473 }
474 for (auto const &ext: APT::Configuration::getCompressorExtensions())
475 {
476 auto const Final = FinalFile + ext;
477 if (FileExists(Final))
478 {
479 if(Debug == true)
480 std::clog << "rm " << Final << " # " << DescURI() << std::endl;
481 if (RemoveFile("TransactionStates-Cleanup", Final) == false)
482 return false;
483 }
484 }
485 if(Debug == true)
486 std::clog << "mv " << PartialFile << " -> "<< DestFile << " # " << DescURI() << std::endl;
487 if (Rename(PartialFile, DestFile) == false)
488 return false;
489 }
490 else if(Debug == true)
491 std::clog << "keep " << PartialFile << " # " << DescURI() << std::endl;
492
493 } else {
494 if(Debug == true)
495 std::clog << "rm " << DestFile << " # " << DescURI() << std::endl;
496 if (RemoveFile("TransItem::TransactionCommit", DestFile) == false)
497 return false;
498 }
499 break;
500 }
501 return true;
502 }
503 bool pkgAcqMetaBase::TransactionState(TransactionStates const state)
504 {
505 // Do not remove InRelease on IMSHit of Release.gpg [yes, this is very edgecasey]
506 if (TransactionManager->IMSHit == false)
507 return pkgAcqTransactionItem::TransactionState(state);
508 return true;
509 }
510 bool pkgAcqIndex::TransactionState(TransactionStates const state)
511 {
512 if (pkgAcqTransactionItem::TransactionState(state) == false)
513 return false;
514
515 switch (state)
516 {
517 case TransactionStarted: _error->Fatal("AcqIndex %s changed to invalid transaction start state!", Target.URI.c_str()); break;
518 case TransactionAbort:
519 if (Stage == STAGE_DECOMPRESS_AND_VERIFY)
520 {
521 // keep the compressed file, but drop the decompressed
522 EraseFileName.clear();
523 if (PartialFile.empty() == false && flExtension(PartialFile) != CurrentCompressionExtension)
524 RemoveFile("TransactionAbort", PartialFile);
525 }
526 break;
527 case TransactionCommit:
528 if (EraseFileName.empty() == false)
529 RemoveFile("AcqIndex::TransactionCommit", EraseFileName);
530 break;
531 }
532 return true;
533 }
534 bool pkgAcqDiffIndex::TransactionState(TransactionStates const state)
535 {
536 if (pkgAcqTransactionItem::TransactionState(state) == false)
537 return false;
538
539 switch (state)
540 {
541 case TransactionStarted: _error->Fatal("Item %s changed to invalid transaction start state!", Target.URI.c_str()); break;
542 case TransactionCommit:
543 break;
544 case TransactionAbort:
545 std::string const Partial = GetPartialFileNameFromURI(Target.URI);
546 RemoveFile("TransactionAbort", Partial);
547 break;
548 }
549
550 return true;
551 }
552 /*}}}*/
553
554 class APT_HIDDEN NoActionItem : public pkgAcquire::Item /*{{{*/
555 /* The sole purpose of this class is having an item which does nothing to
556 reach its done state to prevent cleanup deleting the mentioned file.
557 Handy in cases in which we know we have the file already, like IMS-Hits. */
558 {
559 IndexTarget const Target;
560 public:
561 virtual std::string DescURI() const APT_OVERRIDE {return Target.URI;};
562 virtual HashStringList GetExpectedHashes() const APT_OVERRIDE {return HashStringList();};
563
564 NoActionItem(pkgAcquire * const Owner, IndexTarget const &Target) :
565 pkgAcquire::Item(Owner), Target(Target)
566 {
567 Status = StatDone;
568 DestFile = GetFinalFileNameFromURI(Target.URI);
569 }
570 NoActionItem(pkgAcquire * const Owner, IndexTarget const &Target, std::string const &FinalFile) :
571 pkgAcquire::Item(Owner), Target(Target)
572 {
573 Status = StatDone;
574 DestFile = FinalFile;
575 }
576 };
577 /*}}}*/
578 class APT_HIDDEN CleanupItem : public pkgAcqTransactionItem /*{{{*/
579 /* This class ensures that a file which was configured but isn't downloaded
580 for various reasons isn't kept in an old version in the lists directory.
581 In a way its the reverse of NoActionItem as it helps with removing files
582 even if the lists-cleanup is deactivated. */
583 {
584 public:
585 virtual std::string DescURI() const APT_OVERRIDE {return Target.URI;};
586 virtual HashStringList GetExpectedHashes() const APT_OVERRIDE {return HashStringList();};
587
588 CleanupItem(pkgAcquire * const Owner, pkgAcqMetaClearSig * const TransactionManager, IndexTarget const &Target) :
589 pkgAcqTransactionItem(Owner, TransactionManager, Target)
590 {
591 Status = StatDone;
592 DestFile = GetFinalFileNameFromURI(Target.URI);
593 }
594 bool TransactionState(TransactionStates const state) APT_OVERRIDE
595 {
596 switch (state)
597 {
598 case TransactionStarted:
599 break;
600 case TransactionAbort:
601 break;
602 case TransactionCommit:
603 if (_config->FindB("Debug::Acquire::Transaction", false) == true)
604 std::clog << "rm " << DestFile << " # " << DescURI() << std::endl;
605 if (RemoveFile("TransItem::TransactionCommit", DestFile) == false)
606 return false;
607 break;
608 }
609 return true;
610 }
611 };
612 /*}}}*/
613
614 // Acquire::Item::Item - Constructor /*{{{*/
615 APT_IGNORE_DEPRECATED_PUSH
616 pkgAcquire::Item::Item(pkgAcquire * const owner) :
617 FileSize(0), PartialSize(0), Mode(0), ID(0), Complete(false), Local(false),
618 QueueCounter(0), ExpectedAdditionalItems(0), Owner(owner), d(NULL)
619 {
620 Owner->Add(this);
621 Status = StatIdle;
622 }
623 APT_IGNORE_DEPRECATED_POP
624 /*}}}*/
625 // Acquire::Item::~Item - Destructor /*{{{*/
626 pkgAcquire::Item::~Item()
627 {
628 Owner->Remove(this);
629 }
630 /*}}}*/
631 std::string pkgAcquire::Item::Custom600Headers() const /*{{{*/
632 {
633 return std::string();
634 }
635 /*}}}*/
636 std::string pkgAcquire::Item::ShortDesc() const /*{{{*/
637 {
638 return DescURI();
639 }
640 /*}}}*/
641 APT_CONST void pkgAcquire::Item::Finished() /*{{{*/
642 {
643 }
644 /*}}}*/
645 APT_PURE pkgAcquire * pkgAcquire::Item::GetOwner() const /*{{{*/
646 {
647 return Owner;
648 }
649 /*}}}*/
650 APT_CONST pkgAcquire::ItemDesc &pkgAcquire::Item::GetItemDesc() /*{{{*/
651 {
652 return Desc;
653 }
654 /*}}}*/
655 APT_CONST bool pkgAcquire::Item::IsTrusted() const /*{{{*/
656 {
657 return false;
658 }
659 /*}}}*/
660 // Acquire::Item::Failed - Item failed to download /*{{{*/
661 // ---------------------------------------------------------------------
662 /* We return to an idle state if there are still other queues that could
663 fetch this object */
664 void pkgAcquire::Item::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
665 {
666 if (QueueCounter <= 1)
667 {
668 /* This indicates that the file is not available right now but might
669 be sometime later. If we do a retry cycle then this should be
670 retried [CDROMs] */
671 if (Cnf != NULL && Cnf->LocalOnly == true &&
672 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
673 {
674 Status = StatIdle;
675 Dequeue();
676 return;
677 }
678
679 switch (Status)
680 {
681 case StatIdle:
682 case StatFetching:
683 case StatDone:
684 Status = StatError;
685 break;
686 case StatAuthError:
687 case StatError:
688 case StatTransientNetworkError:
689 break;
690 }
691 Complete = false;
692 Dequeue();
693 }
694
695 string const FailReason = LookupTag(Message, "FailReason");
696 enum { MAXIMUM_SIZE_EXCEEDED, HASHSUM_MISMATCH, OTHER } failreason = OTHER;
697 if ( FailReason == "MaximumSizeExceeded")
698 failreason = MAXIMUM_SIZE_EXCEEDED;
699 else if (Status == StatAuthError)
700 failreason = HASHSUM_MISMATCH;
701
702 if(ErrorText.empty())
703 {
704 if (Status == StatAuthError)
705 {
706 std::ostringstream out;
707 switch (failreason)
708 {
709 case HASHSUM_MISMATCH:
710 out << _("Hash Sum mismatch") << std::endl;
711 break;
712 case MAXIMUM_SIZE_EXCEEDED:
713 case OTHER:
714 out << LookupTag(Message, "Message") << std::endl;
715 break;
716 }
717 auto const ExpectedHashes = GetExpectedHashes();
718 if (ExpectedHashes.empty() == false)
719 {
720 out << "Hashes of expected file:" << std::endl;
721 for (auto const &hs: ExpectedHashes)
722 out << " - " << hs.toStr() << std::endl;
723 }
724 if (failreason == HASHSUM_MISMATCH)
725 {
726 out << "Hashes of received file:" << std::endl;
727 for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
728 {
729 std::string const tagname = std::string(*type) + "-Hash";
730 std::string const hashsum = LookupTag(Message, tagname.c_str());
731 if (hashsum.empty() == false)
732 out << " - " << HashString(*type, hashsum).toStr() << std::endl;
733 }
734 out << "Last modification reported: " << LookupTag(Message, "Last-Modified", "<none>") << std::endl;
735 }
736 ErrorText = out.str();
737 }
738 else
739 ErrorText = LookupTag(Message,"Message");
740 }
741
742 switch (failreason)
743 {
744 case MAXIMUM_SIZE_EXCEEDED: RenameOnError(MaximumSizeExceeded); break;
745 case HASHSUM_MISMATCH: RenameOnError(HashSumMismatch); break;
746 case OTHER: break;
747 }
748
749 if (FailReason.empty() == false)
750 ReportMirrorFailureToCentral(*this, FailReason, ErrorText);
751 else
752 ReportMirrorFailureToCentral(*this, ErrorText, ErrorText);
753
754 if (QueueCounter > 1)
755 Status = StatIdle;
756 }
757 /*}}}*/
758 // Acquire::Item::Start - Item has begun to download /*{{{*/
759 // ---------------------------------------------------------------------
760 /* Stash status and the file size. Note that setting Complete means
761 sub-phases of the acquire process such as decompresion are operating */
762 void pkgAcquire::Item::Start(string const &/*Message*/, unsigned long long const Size)
763 {
764 Status = StatFetching;
765 ErrorText.clear();
766 if (FileSize == 0 && Complete == false)
767 FileSize = Size;
768 }
769 /*}}}*/
770 // Acquire::Item::VerifyDone - check if Item was downloaded OK /*{{{*/
771 /* Note that hash-verification is 'hardcoded' in acquire-worker and has
772 * already passed if this method is called. */
773 bool pkgAcquire::Item::VerifyDone(std::string const &Message,
774 pkgAcquire::MethodConfig const * const /*Cnf*/)
775 {
776 std::string const FileName = LookupTag(Message,"Filename");
777 if (FileName.empty() == true)
778 {
779 Status = StatError;
780 ErrorText = "Method gave a blank filename";
781 return false;
782 }
783
784 return true;
785 }
786 /*}}}*/
787 // Acquire::Item::Done - Item downloaded OK /*{{{*/
788 void pkgAcquire::Item::Done(string const &/*Message*/, HashStringList const &Hashes,
789 pkgAcquire::MethodConfig const * const /*Cnf*/)
790 {
791 // We just downloaded something..
792 if (FileSize == 0)
793 {
794 unsigned long long const downloadedSize = Hashes.FileSize();
795 if (downloadedSize != 0)
796 {
797 FileSize = downloadedSize;
798 }
799 }
800 Status = StatDone;
801 ErrorText = string();
802 Owner->Dequeue(this);
803 }
804 /*}}}*/
805 // Acquire::Item::Rename - Rename a file /*{{{*/
806 // ---------------------------------------------------------------------
807 /* This helper function is used by a lot of item methods as their final
808 step */
809 bool pkgAcquire::Item::Rename(string const &From,string const &To)
810 {
811 if (From == To || rename(From.c_str(),To.c_str()) == 0)
812 return true;
813
814 std::string S;
815 strprintf(S, _("rename failed, %s (%s -> %s)."), strerror(errno),
816 From.c_str(),To.c_str());
817 Status = StatError;
818 if (ErrorText.empty())
819 ErrorText = S;
820 else
821 ErrorText = ErrorText + ": " + S;
822 return false;
823 }
824 /*}}}*/
825 void pkgAcquire::Item::Dequeue() /*{{{*/
826 {
827 Owner->Dequeue(this);
828 }
829 /*}}}*/
830 bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const error)/*{{{*/
831 {
832 if (RealFileExists(DestFile))
833 Rename(DestFile, DestFile + ".FAILED");
834
835 std::string errtext;
836 switch (error)
837 {
838 case HashSumMismatch:
839 errtext = _("Hash Sum mismatch");
840 break;
841 case SizeMismatch:
842 errtext = _("Size mismatch");
843 Status = StatAuthError;
844 break;
845 case InvalidFormat:
846 errtext = _("Invalid file format");
847 Status = StatError;
848 // do not report as usually its not the mirrors fault, but Portal/Proxy
849 break;
850 case SignatureError:
851 errtext = _("Signature error");
852 Status = StatError;
853 break;
854 case NotClearsigned:
855 strprintf(errtext, _("Clearsigned file isn't valid, got '%s' (does the network require authentication?)"), "NOSPLIT");
856 Status = StatAuthError;
857 break;
858 case MaximumSizeExceeded:
859 // the method is expected to report a good error for this
860 break;
861 case PDiffError:
862 // no handling here, done by callers
863 break;
864 }
865 if (ErrorText.empty())
866 ErrorText = errtext;
867 return false;
868 }
869 /*}}}*/
870 void pkgAcquire::Item::SetActiveSubprocess(const std::string &subprocess)/*{{{*/
871 {
872 ActiveSubprocess = subprocess;
873 APT_IGNORE_DEPRECATED(Mode = ActiveSubprocess.c_str();)
874 }
875 /*}}}*/
876 // Acquire::Item::ReportMirrorFailure /*{{{*/
877 void pkgAcquire::Item::ReportMirrorFailure(std::string const &FailCode)
878 {
879 ReportMirrorFailureToCentral(*this, FailCode, FailCode);
880 }
881 /*}}}*/
882 std::string pkgAcquire::Item::HashSum() const /*{{{*/
883 {
884 HashStringList const hashes = GetExpectedHashes();
885 HashString const * const hs = hashes.find(NULL);
886 return hs != NULL ? hs->toStr() : "";
887 }
888 /*}}}*/
889
890 pkgAcqTransactionItem::pkgAcqTransactionItem(pkgAcquire * const Owner, /*{{{*/
891 pkgAcqMetaClearSig * const transactionManager, IndexTarget const &target) :
892 pkgAcquire::Item(Owner), d(NULL), Target(target), TransactionManager(transactionManager)
893 {
894 if (TransactionManager != this)
895 TransactionManager->Add(this);
896 }
897 /*}}}*/
898 pkgAcqTransactionItem::~pkgAcqTransactionItem() /*{{{*/
899 {
900 }
901 /*}}}*/
902 HashStringList pkgAcqTransactionItem::GetExpectedHashesFor(std::string const &MetaKey) const /*{{{*/
903 {
904 return GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, MetaKey);
905 }
906 /*}}}*/
907
908 static void LoadLastMetaIndexParser(pkgAcqMetaClearSig * const TransactionManager, std::string const &FinalRelease, std::string const &FinalInRelease)/*{{{*/
909 {
910 if (TransactionManager->IMSHit == true)
911 return;
912 if (RealFileExists(FinalInRelease) || RealFileExists(FinalRelease))
913 {
914 TransactionManager->LastMetaIndexParser = TransactionManager->MetaIndexParser->UnloadedClone();
915 if (TransactionManager->LastMetaIndexParser != NULL)
916 {
917 _error->PushToStack();
918 if (RealFileExists(FinalInRelease))
919 TransactionManager->LastMetaIndexParser->Load(FinalInRelease, NULL);
920 else
921 TransactionManager->LastMetaIndexParser->Load(FinalRelease, NULL);
922 // its unlikely to happen, but if what we have is bad ignore it
923 if (_error->PendingError())
924 {
925 delete TransactionManager->LastMetaIndexParser;
926 TransactionManager->LastMetaIndexParser = NULL;
927 }
928 _error->RevertToStack();
929 }
930 }
931 }
932 /*}}}*/
933
934 // AcqMetaBase - Constructor /*{{{*/
935 pkgAcqMetaBase::pkgAcqMetaBase(pkgAcquire * const Owner,
936 pkgAcqMetaClearSig * const TransactionManager,
937 IndexTarget const &DataTarget)
938 : pkgAcqTransactionItem(Owner, TransactionManager, DataTarget), d(NULL),
939 AuthPass(false), IMSHit(false), State(TransactionStarted)
940 {
941 }
942 /*}}}*/
943 // AcqMetaBase::Add - Add a item to the current Transaction /*{{{*/
944 void pkgAcqMetaBase::Add(pkgAcqTransactionItem * const I)
945 {
946 Transaction.push_back(I);
947 }
948 /*}}}*/
949 // AcqMetaBase::AbortTransaction - Abort the current Transaction /*{{{*/
950 void pkgAcqMetaBase::AbortTransaction()
951 {
952 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
953 std::clog << "AbortTransaction: " << TransactionManager << std::endl;
954
955 switch (TransactionManager->State)
956 {
957 case TransactionStarted: break;
958 case TransactionAbort: _error->Fatal("Transaction %s was already aborted and is aborted again", TransactionManager->Target.URI.c_str()); return;
959 case TransactionCommit: _error->Fatal("Transaction %s was already aborted and is now commited", TransactionManager->Target.URI.c_str()); return;
960 }
961 TransactionManager->State = TransactionAbort;
962
963 // ensure the toplevel is in error state too
964 for (std::vector<pkgAcqTransactionItem*>::iterator I = Transaction.begin();
965 I != Transaction.end(); ++I)
966 {
967 if ((*I)->Status != pkgAcquire::Item::StatFetching)
968 Owner->Dequeue(*I);
969 (*I)->TransactionState(TransactionAbort);
970 }
971 Transaction.clear();
972 }
973 /*}}}*/
974 // AcqMetaBase::TransactionHasError - Check for errors in Transaction /*{{{*/
975 APT_PURE bool pkgAcqMetaBase::TransactionHasError() const
976 {
977 for (std::vector<pkgAcqTransactionItem*>::const_iterator I = Transaction.begin();
978 I != Transaction.end(); ++I)
979 {
980 switch((*I)->Status) {
981 case StatDone: break;
982 case StatIdle: break;
983 case StatAuthError: return true;
984 case StatError: return true;
985 case StatTransientNetworkError: return true;
986 case StatFetching: break;
987 }
988 }
989 return false;
990 }
991 /*}}}*/
992 // AcqMetaBase::CommitTransaction - Commit a transaction /*{{{*/
993 void pkgAcqMetaBase::CommitTransaction()
994 {
995 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
996 std::clog << "CommitTransaction: " << this << std::endl;
997
998 switch (TransactionManager->State)
999 {
1000 case TransactionStarted: break;
1001 case TransactionAbort: _error->Fatal("Transaction %s was already commited and is now aborted", TransactionManager->Target.URI.c_str()); return;
1002 case TransactionCommit: _error->Fatal("Transaction %s was already commited and is again commited", TransactionManager->Target.URI.c_str()); return;
1003 }
1004 TransactionManager->State = TransactionCommit;
1005
1006 // move new files into place *and* remove files that are not
1007 // part of the transaction but are still on disk
1008 for (std::vector<pkgAcqTransactionItem*>::iterator I = Transaction.begin();
1009 I != Transaction.end(); ++I)
1010 {
1011 (*I)->TransactionState(TransactionCommit);
1012 }
1013 Transaction.clear();
1014 }
1015 /*}}}*/
1016 // AcqMetaBase::TransactionStageCopy - Stage a file for copying /*{{{*/
1017 void pkgAcqMetaBase::TransactionStageCopy(pkgAcqTransactionItem * const I,
1018 const std::string &From,
1019 const std::string &To)
1020 {
1021 I->PartialFile = From;
1022 I->DestFile = To;
1023 }
1024 /*}}}*/
1025 // AcqMetaBase::TransactionStageRemoval - Stage a file for removal /*{{{*/
1026 void pkgAcqMetaBase::TransactionStageRemoval(pkgAcqTransactionItem * const I,
1027 const std::string &FinalFile)
1028 {
1029 I->PartialFile = "";
1030 I->DestFile = FinalFile;
1031 }
1032 /*}}}*/
1033 // AcqMetaBase::GenerateAuthWarning - Check gpg authentication error /*{{{*/
1034 /* This method is called from ::Failed handlers. If it returns true,
1035 no fallback to other files or modi is performed */
1036 bool pkgAcqMetaBase::CheckStopAuthentication(pkgAcquire::Item * const I, const std::string &Message)
1037 {
1038 string const Final = I->GetFinalFilename();
1039 std::string const GPGError = LookupTag(Message, "Message");
1040 if (FileExists(Final))
1041 {
1042 I->Status = StatTransientNetworkError;
1043 _error->Warning(_("An error occurred during the signature verification. "
1044 "The repository is not updated and the previous index files will be used. "
1045 "GPG error: %s: %s"),
1046 Desc.Description.c_str(),
1047 GPGError.c_str());
1048 RunScripts("APT::Update::Auth-Failure");
1049 return true;
1050 } else if (LookupTag(Message,"Message").find("NODATA") != string::npos) {
1051 /* Invalid signature file, reject (LP: #346386) (Closes: #627642) */
1052 _error->Error(_("GPG error: %s: %s"),
1053 Desc.Description.c_str(),
1054 GPGError.c_str());
1055 I->Status = StatAuthError;
1056 return true;
1057 } else {
1058 _error->Warning(_("GPG error: %s: %s"),
1059 Desc.Description.c_str(),
1060 GPGError.c_str());
1061 }
1062 // gpgv method failed
1063 ReportMirrorFailureToCentral(*this, "GPGFailure", GPGError);
1064 return false;
1065 }
1066 /*}}}*/
1067 // AcqMetaBase::Custom600Headers - Get header for AcqMetaBase /*{{{*/
1068 // ---------------------------------------------------------------------
1069 string pkgAcqMetaBase::Custom600Headers() const
1070 {
1071 std::string Header = "\nIndex-File: true";
1072 std::string MaximumSize;
1073 strprintf(MaximumSize, "\nMaximum-Size: %i",
1074 _config->FindI("Acquire::MaxReleaseFileSize", 10*1000*1000));
1075 Header += MaximumSize;
1076
1077 string const FinalFile = GetFinalFilename();
1078 struct stat Buf;
1079 if (stat(FinalFile.c_str(),&Buf) == 0)
1080 Header += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
1081
1082 return Header;
1083 }
1084 /*}}}*/
1085 // AcqMetaBase::QueueForSignatureVerify /*{{{*/
1086 void pkgAcqMetaBase::QueueForSignatureVerify(pkgAcqTransactionItem * const I, std::string const &File, std::string const &Signature)
1087 {
1088 AuthPass = true;
1089 I->Desc.URI = "gpgv:" + Signature;
1090 I->DestFile = File;
1091 QueueURI(I->Desc);
1092 I->SetActiveSubprocess("gpgv");
1093 }
1094 /*}}}*/
1095 // AcqMetaBase::CheckDownloadDone /*{{{*/
1096 bool pkgAcqMetaBase::CheckDownloadDone(pkgAcqTransactionItem * const I, const std::string &Message, HashStringList const &Hashes) const
1097 {
1098 // We have just finished downloading a Release file (it is not
1099 // verified yet)
1100
1101 // Save the final base URI we got this Release file from
1102 if (I->UsedMirror.empty() == false && _config->FindB("Acquire::SameMirrorForAllIndexes", true))
1103 {
1104 if (APT::String::Endswith(I->Desc.URI, "InRelease"))
1105 TransactionManager->BaseURI = I->Desc.URI.substr(0, I->Desc.URI.length() - strlen("InRelease"));
1106 else if (APT::String::Endswith(I->Desc.URI, "Release"))
1107 TransactionManager->BaseURI = I->Desc.URI.substr(0, I->Desc.URI.length() - strlen("Release"));
1108 }
1109
1110 std::string const FileName = LookupTag(Message,"Filename");
1111 if (FileName != I->DestFile && RealFileExists(I->DestFile) == false)
1112 {
1113 I->Local = true;
1114 I->Desc.URI = "copy:" + FileName;
1115 I->QueueURI(I->Desc);
1116 return false;
1117 }
1118
1119 // make sure to verify against the right file on I-M-S hit
1120 bool IMSHit = StringToBool(LookupTag(Message,"IMS-Hit"), false);
1121 if (IMSHit == false && Hashes.usable())
1122 {
1123 // detect IMS-Hits servers haven't detected by Hash comparison
1124 std::string const FinalFile = I->GetFinalFilename();
1125 if (RealFileExists(FinalFile) && Hashes.VerifyFile(FinalFile) == true)
1126 {
1127 IMSHit = true;
1128 RemoveFile("CheckDownloadDone", I->DestFile);
1129 }
1130 }
1131
1132 if(IMSHit == true)
1133 {
1134 // for simplicity, the transaction manager is always InRelease
1135 // even if it doesn't exist.
1136 TransactionManager->IMSHit = true;
1137 I->PartialFile = I->DestFile = I->GetFinalFilename();
1138 }
1139
1140 // set Item to complete as the remaining work is all local (verify etc)
1141 I->Complete = true;
1142
1143 return true;
1144 }
1145 /*}}}*/
1146 bool pkgAcqMetaBase::CheckAuthDone(string const &Message) /*{{{*/
1147 {
1148 // At this point, the gpgv method has succeeded, so there is a
1149 // valid signature from a key in the trusted keyring. We
1150 // perform additional verification of its contents, and use them
1151 // to verify the indexes we are about to download
1152
1153 if (TransactionManager->IMSHit == false)
1154 {
1155 // open the last (In)Release if we have it
1156 std::string const FinalFile = GetFinalFilename();
1157 std::string FinalRelease;
1158 std::string FinalInRelease;
1159 if (APT::String::Endswith(FinalFile, "InRelease"))
1160 {
1161 FinalInRelease = FinalFile;
1162 FinalRelease = FinalFile.substr(0, FinalFile.length() - strlen("InRelease")) + "Release";
1163 }
1164 else
1165 {
1166 FinalInRelease = FinalFile.substr(0, FinalFile.length() - strlen("Release")) + "InRelease";
1167 FinalRelease = FinalFile;
1168 }
1169 LoadLastMetaIndexParser(TransactionManager, FinalRelease, FinalInRelease);
1170 }
1171
1172 if (TransactionManager->MetaIndexParser->Load(DestFile, &ErrorText) == false)
1173 {
1174 Status = StatAuthError;
1175 return false;
1176 }
1177
1178 if (!VerifyVendor(Message))
1179 {
1180 Status = StatAuthError;
1181 return false;
1182 }
1183
1184 if (_config->FindB("Debug::pkgAcquire::Auth", false))
1185 std::cerr << "Signature verification succeeded: "
1186 << DestFile << std::endl;
1187
1188 // Download further indexes with verification
1189 TransactionManager->QueueIndexes(true);
1190
1191 return true;
1192 }
1193 /*}}}*/
1194 void pkgAcqMetaClearSig::QueueIndexes(bool const verify) /*{{{*/
1195 {
1196 // at this point the real Items are loaded in the fetcher
1197 ExpectedAdditionalItems = 0;
1198
1199 std::set<std::string> targetsSeen;
1200 bool const metaBaseSupportsByHash = TransactionManager->MetaIndexParser->GetSupportsAcquireByHash();
1201 for (auto &Target: TransactionManager->MetaIndexParser->GetIndexTargets())
1202 {
1203 // if we have seen a target which is created-by a target this one here is declared a
1204 // fallback to, we skip acquiring the fallback (but we make sure we clean up)
1205 if (targetsSeen.find(Target.Option(IndexTarget::FALLBACK_OF)) != targetsSeen.end())
1206 {
1207 targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
1208 new CleanupItem(Owner, TransactionManager, Target);
1209 continue;
1210 }
1211 // all is an implementation detail. Users shouldn't use this as arch
1212 // We need this support trickery here as e.g. Debian has binary-all files already,
1213 // but arch:all packages are still in the arch:any files, so we would waste precious
1214 // download time, bandwidth and diskspace for nothing, BUT Debian doesn't feature all
1215 // in the set of supported architectures, so we can filter based on this property rather
1216 // than invent an entirely new flag we would need to carry for all of eternity.
1217 if (Target.Option(IndexTarget::ARCHITECTURE) == "all")
1218 {
1219 if (TransactionManager->MetaIndexParser->IsArchitectureSupported("all") == false ||
1220 TransactionManager->MetaIndexParser->IsArchitectureAllSupportedFor(Target) == false)
1221 {
1222 new CleanupItem(Owner, TransactionManager, Target);
1223 continue;
1224 }
1225 }
1226
1227 bool trypdiff = Target.OptionBool(IndexTarget::PDIFFS);
1228 if (verify == true)
1229 {
1230 if (TransactionManager->MetaIndexParser->Exists(Target.MetaKey) == false)
1231 {
1232 // optional targets that we do not have in the Release file are skipped
1233 if (Target.IsOptional)
1234 {
1235 new CleanupItem(Owner, TransactionManager, Target);
1236 continue;
1237 }
1238
1239 std::string const &arch = Target.Option(IndexTarget::ARCHITECTURE);
1240 if (arch.empty() == false)
1241 {
1242 if (TransactionManager->MetaIndexParser->IsArchitectureSupported(arch) == false)
1243 {
1244 new CleanupItem(Owner, TransactionManager, Target);
1245 _error->Notice(_("Skipping acquire of configured file '%s' as repository '%s' doesn't support architecture '%s'"),
1246 Target.MetaKey.c_str(), TransactionManager->Target.Description.c_str(), arch.c_str());
1247 continue;
1248 }
1249 // if the architecture is officially supported but currently no packages for it available,
1250 // ignore silently as this is pretty much the same as just shipping an empty file.
1251 // if we don't know which architectures are supported, we do NOT ignore it to notify user about this
1252 if (TransactionManager->MetaIndexParser->IsArchitectureSupported("*undefined*") == false)
1253 {
1254 new CleanupItem(Owner, TransactionManager, Target);
1255 continue;
1256 }
1257 }
1258
1259 Status = StatAuthError;
1260 strprintf(ErrorText, _("Unable to find expected entry '%s' in Release file (Wrong sources.list entry or malformed file)"), Target.MetaKey.c_str());
1261 return;
1262 }
1263 else
1264 {
1265 auto const hashes = GetExpectedHashesFor(Target.MetaKey);
1266 if (hashes.empty() == false)
1267 {
1268 if (hashes.usable() == false)
1269 {
1270 new CleanupItem(Owner, TransactionManager, Target);
1271 _error->Warning(_("Skipping acquire of configured file '%s' as repository '%s' provides only weak security information for it"),
1272 Target.MetaKey.c_str(), TransactionManager->Target.Description.c_str());
1273 continue;
1274 }
1275 // empty files are skipped as acquiring the very small compressed files is a waste of time
1276 else if (hashes.FileSize() == 0)
1277 {
1278 new CleanupItem(Owner, TransactionManager, Target);
1279 targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
1280 continue;
1281 }
1282 }
1283 }
1284
1285 // autoselect the compression method
1286 std::vector<std::string> types = VectorizeString(Target.Option(IndexTarget::COMPRESSIONTYPES), ' ');
1287 types.erase(std::remove_if(types.begin(), types.end(), [&](std::string const &t) {
1288 if (t == "uncompressed")
1289 return TransactionManager->MetaIndexParser->Exists(Target.MetaKey) == false;
1290 std::string const MetaKey = Target.MetaKey + "." + t;
1291 return TransactionManager->MetaIndexParser->Exists(MetaKey) == false;
1292 }), types.end());
1293 if (types.empty() == false)
1294 {
1295 std::ostringstream os;
1296 // add the special compressiontype byhash first if supported
1297 std::string const useByHashConf = Target.Option(IndexTarget::BY_HASH);
1298 bool useByHash = false;
1299 if(useByHashConf == "force")
1300 useByHash = true;
1301 else
1302 useByHash = StringToBool(useByHashConf) == true && metaBaseSupportsByHash;
1303 if (useByHash == true)
1304 os << "by-hash ";
1305 std::copy(types.begin(), types.end()-1, std::ostream_iterator<std::string>(os, " "));
1306 os << *types.rbegin();
1307 Target.Options["COMPRESSIONTYPES"] = os.str();
1308 }
1309 else
1310 Target.Options["COMPRESSIONTYPES"].clear();
1311
1312 std::string filename = GetExistingFilename(GetFinalFileNameFromURI(Target.URI));
1313 if (filename.empty() == false)
1314 {
1315 // if the Release file is a hit and we have an index it must be the current one
1316 if (TransactionManager->IMSHit == true)
1317 ;
1318 else if (TransactionManager->LastMetaIndexParser != NULL)
1319 {
1320 // see if the file changed since the last Release file
1321 // we use the uncompressed files as we might compress differently compared to the server,
1322 // so the hashes might not match, even if they contain the same data.
1323 HashStringList const newFile = GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, Target.MetaKey);
1324 HashStringList const oldFile = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target.MetaKey);
1325 if (newFile != oldFile)
1326 filename.clear();
1327 }
1328 else
1329 filename.clear();
1330 }
1331 else
1332 trypdiff = false; // no file to patch
1333
1334 if (filename.empty() == false)
1335 {
1336 new NoActionItem(Owner, Target, filename);
1337 std::string const idxfilename = GetFinalFileNameFromURI(GetDiffIndexURI(Target));
1338 if (FileExists(idxfilename))
1339 new NoActionItem(Owner, Target, idxfilename);
1340 targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
1341 continue;
1342 }
1343
1344 // check if we have patches available
1345 trypdiff &= TransactionManager->MetaIndexParser->Exists(GetDiffIndexFileName(Target.MetaKey));
1346 }
1347 else
1348 {
1349 // if we have no file to patch, no point in trying
1350 trypdiff &= (GetExistingFilename(GetFinalFileNameFromURI(Target.URI)).empty() == false);
1351 }
1352
1353 // no point in patching from local sources
1354 if (trypdiff)
1355 {
1356 std::string const proto = Target.URI.substr(0, strlen("file:/"));
1357 if (proto == "file:/" || proto == "copy:/" || proto == "cdrom:")
1358 trypdiff = false;
1359 }
1360
1361 // Queue the Index file (Packages, Sources, Translation-$foo, …)
1362 targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
1363 if (trypdiff)
1364 new pkgAcqDiffIndex(Owner, TransactionManager, Target);
1365 else
1366 new pkgAcqIndex(Owner, TransactionManager, Target);
1367 }
1368 }
1369 /*}}}*/
1370 bool pkgAcqMetaBase::VerifyVendor(string const &) /*{{{*/
1371 {
1372 string Transformed = TransactionManager->MetaIndexParser->GetExpectedDist();
1373
1374 if (Transformed == "../project/experimental")
1375 {
1376 Transformed = "experimental";
1377 }
1378
1379 auto pos = Transformed.rfind('/');
1380 if (pos != string::npos)
1381 {
1382 Transformed = Transformed.substr(0, pos);
1383 }
1384
1385 if (Transformed == ".")
1386 {
1387 Transformed = "";
1388 }
1389
1390 if (TransactionManager->MetaIndexParser->GetValidUntil() > 0)
1391 {
1392 time_t const invalid_since = time(NULL) - TransactionManager->MetaIndexParser->GetValidUntil();
1393 if (invalid_since > 0)
1394 {
1395 std::string errmsg;
1396 strprintf(errmsg,
1397 // TRANSLATOR: The first %s is the URL of the bad Release file, the second is
1398 // the time since then the file is invalid - formatted in the same way as in
1399 // the download progress display (e.g. 7d 3h 42min 1s)
1400 _("Release file for %s is expired (invalid since %s). "
1401 "Updates for this repository will not be applied."),
1402 Target.URI.c_str(), TimeToStr(invalid_since).c_str());
1403 if (ErrorText.empty())
1404 ErrorText = errmsg;
1405 return _error->Error("%s", errmsg.c_str());
1406 }
1407 }
1408
1409 /* Did we get a file older than what we have? This is a last minute IMS hit and doubles
1410 as a prevention of downgrading us to older (still valid) files */
1411 if (TransactionManager->IMSHit == false && TransactionManager->LastMetaIndexParser != NULL &&
1412 TransactionManager->LastMetaIndexParser->GetDate() > TransactionManager->MetaIndexParser->GetDate())
1413 {
1414 TransactionManager->IMSHit = true;
1415 RemoveFile("VerifyVendor", DestFile);
1416 PartialFile = DestFile = GetFinalFilename();
1417 // load the 'old' file in the 'new' one instead of flipping pointers as
1418 // the new one isn't owned by us, while the old one is so cleanup would be confused.
1419 TransactionManager->MetaIndexParser->swapLoad(TransactionManager->LastMetaIndexParser);
1420 delete TransactionManager->LastMetaIndexParser;
1421 TransactionManager->LastMetaIndexParser = NULL;
1422 }
1423
1424 if (_config->FindB("Debug::pkgAcquire::Auth", false))
1425 {
1426 std::cerr << "Got Codename: " << TransactionManager->MetaIndexParser->GetCodename() << std::endl;
1427 std::cerr << "Expecting Dist: " << TransactionManager->MetaIndexParser->GetExpectedDist() << std::endl;
1428 std::cerr << "Transformed Dist: " << Transformed << std::endl;
1429 }
1430
1431 if (TransactionManager->MetaIndexParser->CheckDist(Transformed) == false)
1432 {
1433 // This might become fatal one day
1434 // Status = StatAuthError;
1435 // ErrorText = "Conflicting distribution; expected "
1436 // + MetaIndexParser->GetExpectedDist() + " but got "
1437 // + MetaIndexParser->GetCodename();
1438 // return false;
1439 if (!Transformed.empty())
1440 {
1441 _error->Warning(_("Conflicting distribution: %s (expected %s but got %s)"),
1442 Desc.Description.c_str(),
1443 Transformed.c_str(),
1444 TransactionManager->MetaIndexParser->GetCodename().c_str());
1445 }
1446 }
1447
1448 return true;
1449 }
1450 /*}}}*/
1451 pkgAcqMetaBase::~pkgAcqMetaBase()
1452 {
1453 }
1454
1455 pkgAcqMetaClearSig::pkgAcqMetaClearSig(pkgAcquire * const Owner, /*{{{*/
1456 IndexTarget const &ClearsignedTarget,
1457 IndexTarget const &DetachedDataTarget, IndexTarget const &DetachedSigTarget,
1458 metaIndex * const MetaIndexParser) :
1459 pkgAcqMetaIndex(Owner, this, ClearsignedTarget, DetachedSigTarget),
1460 d(NULL), ClearsignedTarget(ClearsignedTarget),
1461 DetachedDataTarget(DetachedDataTarget),
1462 MetaIndexParser(MetaIndexParser), LastMetaIndexParser(NULL)
1463 {
1464 // index targets + (worst case:) Release/Release.gpg
1465 ExpectedAdditionalItems = std::numeric_limits<decltype(ExpectedAdditionalItems)>::max();
1466 TransactionManager->Add(this);
1467 }
1468 /*}}}*/
1469 pkgAcqMetaClearSig::~pkgAcqMetaClearSig() /*{{{*/
1470 {
1471 if (LastMetaIndexParser != NULL)
1472 delete LastMetaIndexParser;
1473 }
1474 /*}}}*/
1475 // pkgAcqMetaClearSig::Custom600Headers - Insert custom request headers /*{{{*/
1476 string pkgAcqMetaClearSig::Custom600Headers() const
1477 {
1478 string Header = pkgAcqMetaBase::Custom600Headers();
1479 Header += "\nFail-Ignore: true";
1480 std::string const key = TransactionManager->MetaIndexParser->GetSignedBy();
1481 if (key.empty() == false)
1482 Header += "\nSigned-By: " + key;
1483
1484 return Header;
1485 }
1486 /*}}}*/
1487 void pkgAcqMetaClearSig::Finished() /*{{{*/
1488 {
1489 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1490 std::clog << "Finished: " << DestFile <<std::endl;
1491 if(TransactionManager->State == TransactionStarted &&
1492 TransactionManager->TransactionHasError() == false)
1493 TransactionManager->CommitTransaction();
1494 }
1495 /*}}}*/
1496 bool pkgAcqMetaClearSig::VerifyDone(std::string const &Message, /*{{{*/
1497 pkgAcquire::MethodConfig const * const Cnf)
1498 {
1499 Item::VerifyDone(Message, Cnf);
1500
1501 if (FileExists(DestFile) && !StartsWithGPGClearTextSignature(DestFile))
1502 return RenameOnError(NotClearsigned);
1503
1504 return true;
1505 }
1506 /*}}}*/
1507 // pkgAcqMetaClearSig::Done - We got a file /*{{{*/
1508 void pkgAcqMetaClearSig::Done(std::string const &Message,
1509 HashStringList const &Hashes,
1510 pkgAcquire::MethodConfig const * const Cnf)
1511 {
1512 Item::Done(Message, Hashes, Cnf);
1513
1514 if(AuthPass == false)
1515 {
1516 if(CheckDownloadDone(this, Message, Hashes) == true)
1517 QueueForSignatureVerify(this, DestFile, DestFile);
1518 return;
1519 }
1520 else if(CheckAuthDone(Message) == true)
1521 {
1522 if (TransactionManager->IMSHit == false)
1523 TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
1524 else if (RealFileExists(GetFinalFilename()) == false)
1525 {
1526 // We got an InRelease file IMSHit, but we haven't one, which means
1527 // we had a valid Release/Release.gpg combo stepping in, which we have
1528 // to 'acquire' now to ensure list cleanup isn't removing them
1529 new NoActionItem(Owner, DetachedDataTarget);
1530 new NoActionItem(Owner, DetachedSigTarget);
1531 }
1532 }
1533 }
1534 /*}}}*/
1535 void pkgAcqMetaClearSig::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf) /*{{{*/
1536 {
1537 Item::Failed(Message, Cnf);
1538
1539 if (AuthPass == false)
1540 {
1541 if (Status == StatAuthError || Status == StatTransientNetworkError)
1542 {
1543 // if we expected a ClearTextSignature (InRelease) but got a network
1544 // error or got a file, but it wasn't valid, we end up here (see VerifyDone).
1545 // As these is usually called by web-portals we do not try Release/Release.gpg
1546 // as this is gonna fail anyway and instead abort our try (LP#346386)
1547 TransactionManager->AbortTransaction();
1548 return;
1549 }
1550
1551 // Queue the 'old' InRelease file for removal if we try Release.gpg
1552 // as otherwise the file will stay around and gives a false-auth
1553 // impression (CVE-2012-0214)
1554 TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
1555 Status = StatDone;
1556
1557 new pkgAcqMetaIndex(Owner, TransactionManager, DetachedDataTarget, DetachedSigTarget);
1558 }
1559 else
1560 {
1561 if(CheckStopAuthentication(this, Message))
1562 return;
1563
1564 // No Release file was present, or verification failed, so fall
1565 // back to queueing Packages files without verification
1566 // only allow going further if the user explicitly wants it
1567 if(AllowInsecureRepositories(_("The repository '%s' is not signed."), ClearsignedTarget.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == true)
1568 {
1569 Status = StatDone;
1570
1571 /* InRelease files become Release files, otherwise
1572 * they would be considered as trusted later on */
1573 string const FinalRelease = GetFinalFileNameFromURI(DetachedDataTarget.URI);
1574 string const PartialRelease = GetPartialFileNameFromURI(DetachedDataTarget.URI);
1575 string const FinalReleasegpg = GetFinalFileNameFromURI(DetachedSigTarget.URI);
1576 string const FinalInRelease = GetFinalFilename();
1577 Rename(DestFile, PartialRelease);
1578 TransactionManager->TransactionStageCopy(this, PartialRelease, FinalRelease);
1579 LoadLastMetaIndexParser(TransactionManager, FinalRelease, FinalInRelease);
1580
1581 // we parse the indexes here because at this point the user wanted
1582 // a repository that may potentially harm him
1583 if (TransactionManager->MetaIndexParser->Load(PartialRelease, &ErrorText) == false || VerifyVendor(Message) == false)
1584 /* expired Release files are still a problem you need extra force for */;
1585 else
1586 TransactionManager->QueueIndexes(true);
1587 }
1588 }
1589 }
1590 /*}}}*/
1591
1592 pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire * const Owner, /*{{{*/
1593 pkgAcqMetaClearSig * const TransactionManager,
1594 IndexTarget const &DataTarget,
1595 IndexTarget const &DetachedSigTarget) :
1596 pkgAcqMetaBase(Owner, TransactionManager, DataTarget), d(NULL),
1597 DetachedSigTarget(DetachedSigTarget)
1598 {
1599 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1600 std::clog << "New pkgAcqMetaIndex with TransactionManager "
1601 << this->TransactionManager << std::endl;
1602
1603 DestFile = GetPartialFileNameFromURI(DataTarget.URI);
1604
1605 // Create the item
1606 Desc.Description = DataTarget.Description;
1607 Desc.Owner = this;
1608 Desc.ShortDesc = DataTarget.ShortDesc;
1609 Desc.URI = DataTarget.URI;
1610 QueueURI(Desc);
1611 }
1612 /*}}}*/
1613 void pkgAcqMetaIndex::Done(string const &Message, /*{{{*/
1614 HashStringList const &Hashes,
1615 pkgAcquire::MethodConfig const * const Cfg)
1616 {
1617 Item::Done(Message,Hashes,Cfg);
1618
1619 if(CheckDownloadDone(this, Message, Hashes))
1620 {
1621 // we have a Release file, now download the Signature, all further
1622 // verify/queue for additional downloads will be done in the
1623 // pkgAcqMetaSig::Done() code
1624 new pkgAcqMetaSig(Owner, TransactionManager, DetachedSigTarget, this);
1625 }
1626 }
1627 /*}}}*/
1628 // pkgAcqMetaIndex::Failed - no Release file present /*{{{*/
1629 void pkgAcqMetaIndex::Failed(string const &Message,
1630 pkgAcquire::MethodConfig const * const Cnf)
1631 {
1632 pkgAcquire::Item::Failed(Message, Cnf);
1633 Status = StatDone;
1634
1635 // No Release file was present so fall
1636 // back to queueing Packages files without verification
1637 // only allow going further if the user explicitly wants it
1638 if(AllowInsecureRepositories(_("The repository '%s' does not have a Release file."), Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == true)
1639 {
1640 // ensure old Release files are removed
1641 TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
1642
1643 // queue without any kind of hashsum support
1644 TransactionManager->QueueIndexes(false);
1645 }
1646 }
1647 /*}}}*/
1648 std::string pkgAcqMetaIndex::DescURI() const /*{{{*/
1649 {
1650 return Target.URI;
1651 }
1652 /*}}}*/
1653 pkgAcqMetaIndex::~pkgAcqMetaIndex() {}
1654
1655 // AcqMetaSig::AcqMetaSig - Constructor /*{{{*/
1656 pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire * const Owner,
1657 pkgAcqMetaClearSig * const TransactionManager,
1658 IndexTarget const &Target,
1659 pkgAcqMetaIndex * const MetaIndex) :
1660 pkgAcqTransactionItem(Owner, TransactionManager, Target), d(NULL), MetaIndex(MetaIndex)
1661 {
1662 DestFile = GetPartialFileNameFromURI(Target.URI);
1663
1664 // remove any partial downloaded sig-file in partial/.
1665 // it may confuse proxies and is too small to warrant a
1666 // partial download anyway
1667 RemoveFile("pkgAcqMetaSig", DestFile);
1668
1669 // set the TransactionManager
1670 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1671 std::clog << "New pkgAcqMetaSig with TransactionManager "
1672 << TransactionManager << std::endl;
1673
1674 // Create the item
1675 Desc.Description = Target.Description;
1676 Desc.Owner = this;
1677 Desc.ShortDesc = Target.ShortDesc;
1678 Desc.URI = Target.URI;
1679
1680 // If we got a hit for Release, we will get one for Release.gpg too (or obscure errors),
1681 // so we skip the download step and go instantly to verification
1682 if (TransactionManager->IMSHit == true && RealFileExists(GetFinalFilename()))
1683 {
1684 Complete = true;
1685 Status = StatDone;
1686 PartialFile = DestFile = GetFinalFilename();
1687 MetaIndexFileSignature = DestFile;
1688 MetaIndex->QueueForSignatureVerify(this, MetaIndex->DestFile, DestFile);
1689 }
1690 else
1691 QueueURI(Desc);
1692 }
1693 /*}}}*/
1694 pkgAcqMetaSig::~pkgAcqMetaSig() /*{{{*/
1695 {
1696 }
1697 /*}}}*/
1698 // pkgAcqMetaSig::Custom600Headers - Insert custom request headers /*{{{*/
1699 std::string pkgAcqMetaSig::Custom600Headers() const
1700 {
1701 std::string Header = pkgAcqTransactionItem::Custom600Headers();
1702 std::string const key = TransactionManager->MetaIndexParser->GetSignedBy();
1703 if (key.empty() == false)
1704 Header += "\nSigned-By: " + key;
1705 return Header;
1706 }
1707 /*}}}*/
1708 // AcqMetaSig::Done - The signature was downloaded/verified /*{{{*/
1709 void pkgAcqMetaSig::Done(string const &Message, HashStringList const &Hashes,
1710 pkgAcquire::MethodConfig const * const Cfg)
1711 {
1712 if (MetaIndexFileSignature.empty() == false)
1713 {
1714 DestFile = MetaIndexFileSignature;
1715 MetaIndexFileSignature.clear();
1716 }
1717 Item::Done(Message, Hashes, Cfg);
1718
1719 if(MetaIndex->AuthPass == false)
1720 {
1721 if(MetaIndex->CheckDownloadDone(this, Message, Hashes) == true)
1722 {
1723 // destfile will be modified to point to MetaIndexFile for the
1724 // gpgv method, so we need to save it here
1725 MetaIndexFileSignature = DestFile;
1726 MetaIndex->QueueForSignatureVerify(this, MetaIndex->DestFile, DestFile);
1727 }
1728 return;
1729 }
1730 else if(MetaIndex->CheckAuthDone(Message) == true)
1731 {
1732 if (TransactionManager->IMSHit == false)
1733 {
1734 TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
1735 TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, MetaIndex->GetFinalFilename());
1736 }
1737 }
1738 }
1739 /*}}}*/
1740 void pkgAcqMetaSig::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
1741 {
1742 Item::Failed(Message,Cnf);
1743
1744 // check if we need to fail at this point
1745 if (MetaIndex->AuthPass == true && MetaIndex->CheckStopAuthentication(this, Message))
1746 return;
1747
1748 string const FinalRelease = MetaIndex->GetFinalFilename();
1749 string const FinalReleasegpg = GetFinalFilename();
1750 string const FinalInRelease = TransactionManager->GetFinalFilename();
1751
1752 if (RealFileExists(FinalReleasegpg) || RealFileExists(FinalInRelease))
1753 {
1754 std::string downgrade_msg;
1755 strprintf(downgrade_msg, _("The repository '%s' is no longer signed."),
1756 MetaIndex->Target.Description.c_str());
1757 if(_config->FindB("Acquire::AllowDowngradeToInsecureRepositories"))
1758 {
1759 // meh, the users wants to take risks (we still mark the packages
1760 // from this repository as unauthenticated)
1761 _error->Warning("%s", downgrade_msg.c_str());
1762 _error->Warning(_("This is normally not allowed, but the option "
1763 "Acquire::AllowDowngradeToInsecureRepositories was "
1764 "given to override it."));
1765 Status = StatDone;
1766 } else {
1767 MessageInsecureRepository(true, downgrade_msg);
1768 if (TransactionManager->IMSHit == false)
1769 Rename(MetaIndex->DestFile, MetaIndex->DestFile + ".FAILED");
1770 Item::Failed("Message: " + downgrade_msg, Cnf);
1771 TransactionManager->AbortTransaction();
1772 return;
1773 }
1774 }
1775
1776 // ensures that a Release.gpg file in the lists/ is removed by the transaction
1777 TransactionManager->TransactionStageRemoval(this, DestFile);
1778
1779 // only allow going further if the user explicitly wants it
1780 if (AllowInsecureRepositories(_("The repository '%s' is not signed."), MetaIndex->Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == true)
1781 {
1782 LoadLastMetaIndexParser(TransactionManager, FinalRelease, FinalInRelease);
1783
1784 // we parse the indexes here because at this point the user wanted
1785 // a repository that may potentially harm him
1786 bool const GoodLoad = TransactionManager->MetaIndexParser->Load(MetaIndex->DestFile, &ErrorText);
1787 if (MetaIndex->VerifyVendor(Message) == false)
1788 /* expired Release files are still a problem you need extra force for */;
1789 else
1790 TransactionManager->QueueIndexes(GoodLoad);
1791
1792 TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, MetaIndex->GetFinalFilename());
1793 }
1794
1795 // FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor
1796 if (Cnf->LocalOnly == true ||
1797 StringToBool(LookupTag(Message,"Transient-Failure"),false) == false)
1798 {
1799 // Ignore this
1800 Status = StatDone;
1801 }
1802 }
1803 /*}}}*/
1804
1805
1806 // AcqBaseIndex - Constructor /*{{{*/
1807 pkgAcqBaseIndex::pkgAcqBaseIndex(pkgAcquire * const Owner,
1808 pkgAcqMetaClearSig * const TransactionManager,
1809 IndexTarget const &Target)
1810 : pkgAcqTransactionItem(Owner, TransactionManager, Target), d(NULL)
1811 {
1812 }
1813 /*}}}*/
1814 void pkgAcqBaseIndex::Failed(std::string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
1815 {
1816 pkgAcquire::Item::Failed(Message, Cnf);
1817 if (Status != StatAuthError)
1818 return;
1819
1820 ErrorText.append("Release file created at: ");
1821 auto const timespec = TransactionManager->MetaIndexParser->GetDate();
1822 if (timespec == 0)
1823 ErrorText.append("<unknown>");
1824 else
1825 ErrorText.append(TimeRFC1123(timespec));
1826 ErrorText.append("\n");
1827 }
1828 /*}}}*/
1829 pkgAcqBaseIndex::~pkgAcqBaseIndex() {}
1830
1831 // AcqDiffIndex::AcqDiffIndex - Constructor /*{{{*/
1832 // ---------------------------------------------------------------------
1833 /* Get the DiffIndex file first and see if there are patches available
1834 * If so, create a pkgAcqIndexDiffs fetcher that will get and apply the
1835 * patches. If anything goes wrong in that process, it will fall back to
1836 * the original packages file
1837 */
1838 pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire * const Owner,
1839 pkgAcqMetaClearSig * const TransactionManager,
1840 IndexTarget const &Target)
1841 : pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL), diffs(NULL)
1842 {
1843 // FIXME: Magic number as an upper bound on pdiffs we will reasonably acquire
1844 ExpectedAdditionalItems = 40;
1845
1846 Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
1847
1848 Desc.Owner = this;
1849 Desc.Description = GetDiffIndexFileName(Target.Description);
1850 Desc.ShortDesc = Target.ShortDesc;
1851 Desc.URI = GetDiffIndexURI(Target);
1852
1853 DestFile = GetPartialFileNameFromURI(Desc.URI);
1854
1855 if(Debug)
1856 std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl;
1857
1858 QueueURI(Desc);
1859 }
1860 /*}}}*/
1861 // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
1862 // ---------------------------------------------------------------------
1863 /* The only header we use is the last-modified header. */
1864 string pkgAcqDiffIndex::Custom600Headers() const
1865 {
1866 if (TransactionManager->LastMetaIndexParser != NULL)
1867 return "\nIndex-File: true";
1868
1869 string const Final = GetFinalFilename();
1870
1871 if(Debug)
1872 std::clog << "Custom600Header-IMS: " << Final << std::endl;
1873
1874 struct stat Buf;
1875 if (stat(Final.c_str(),&Buf) != 0)
1876 return "\nIndex-File: true";
1877
1878 return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
1879 }
1880 /*}}}*/
1881 void pkgAcqDiffIndex::QueueOnIMSHit() const /*{{{*/
1882 {
1883 // list cleanup needs to know that this file as well as the already
1884 // present index is ours, so we create an empty diff to save it for us
1885 new pkgAcqIndexDiffs(Owner, TransactionManager, Target);
1886 }
1887 /*}}}*/
1888 bool pkgAcqDiffIndex::ParseDiffIndex(string const &IndexDiffFile) /*{{{*/
1889 {
1890 ExpectedAdditionalItems = 0;
1891 // failing here is fine: our caller will take care of trying to
1892 // get the complete file if patching fails
1893 if(Debug)
1894 std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile
1895 << std::endl;
1896
1897 FileFd Fd(IndexDiffFile,FileFd::ReadOnly);
1898 pkgTagFile TF(&Fd);
1899 if (Fd.IsOpen() == false || Fd.Failed())
1900 return false;
1901
1902 pkgTagSection Tags;
1903 if(unlikely(TF.Step(Tags) == false))
1904 return false;
1905
1906 HashStringList ServerHashes;
1907 unsigned long long ServerSize = 0;
1908
1909 for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
1910 {
1911 std::string tagname = *type;
1912 tagname.append("-Current");
1913 std::string const tmp = Tags.FindS(tagname.c_str());
1914 if (tmp.empty() == true)
1915 continue;
1916
1917 string hash;
1918 unsigned long long size;
1919 std::stringstream ss(tmp);
1920 ss >> hash >> size;
1921 if (unlikely(hash.empty() == true))
1922 continue;
1923 if (unlikely(ServerSize != 0 && ServerSize != size))
1924 continue;
1925 ServerHashes.push_back(HashString(*type, hash));
1926 ServerSize = size;
1927 }
1928
1929 if (ServerHashes.usable() == false)
1930 {
1931 if (Debug == true)
1932 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl;
1933 return false;
1934 }
1935
1936 std::string const CurrentPackagesFile = GetFinalFileNameFromURI(Target.URI);
1937 HashStringList const TargetFileHashes = GetExpectedHashesFor(Target.MetaKey);
1938 if (TargetFileHashes.usable() == false || ServerHashes != TargetFileHashes)
1939 {
1940 if (Debug == true)
1941 {
1942 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl;
1943 printHashSumComparison(CurrentPackagesFile, ServerHashes, TargetFileHashes);
1944 }
1945 return false;
1946 }
1947
1948 HashStringList LocalHashes;
1949 // try avoiding calculating the hash here as this is costly
1950 if (TransactionManager->LastMetaIndexParser != NULL)
1951 LocalHashes = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target.MetaKey);
1952 if (LocalHashes.usable() == false)
1953 {
1954 FileFd fd(CurrentPackagesFile, FileFd::ReadOnly, FileFd::Auto);
1955 Hashes LocalHashesCalc(ServerHashes);
1956 LocalHashesCalc.AddFD(fd);
1957 LocalHashes = LocalHashesCalc.GetHashStringList();
1958 }
1959
1960 if (ServerHashes == LocalHashes)
1961 {
1962 // we have the same sha1 as the server so we are done here
1963 if(Debug)
1964 std::clog << "pkgAcqDiffIndex: Package file " << CurrentPackagesFile << " is up-to-date" << std::endl;
1965 QueueOnIMSHit();
1966 return true;
1967 }
1968
1969 if(Debug)
1970 std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at "
1971 << CurrentPackagesFile << " " << LocalHashes.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl;
1972
1973 // historically, older hashes have more info than newer ones, so start
1974 // collecting with older ones first to avoid implementing complicated
1975 // information merging techniques… a failure is after all always
1976 // recoverable with a complete file and hashes aren't changed that often.
1977 std::vector<char const *> types;
1978 for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
1979 types.push_back(*type);
1980
1981 // parse all of (provided) history
1982 vector<DiffInfo> available_patches;
1983 bool firstAcceptedHashes = true;
1984 for (auto type = types.crbegin(); type != types.crend(); ++type)
1985 {
1986 if (LocalHashes.find(*type) == NULL)
1987 continue;
1988
1989 std::string tagname = *type;
1990 tagname.append("-History");
1991 std::string const tmp = Tags.FindS(tagname.c_str());
1992 if (tmp.empty() == true)
1993 continue;
1994
1995 string hash, filename;
1996 unsigned long long size;
1997 std::stringstream ss(tmp);
1998
1999 while (ss >> hash >> size >> filename)
2000 {
2001 if (unlikely(hash.empty() == true || filename.empty() == true))
2002 continue;
2003
2004 // see if we have a record for this file already
2005 std::vector<DiffInfo>::iterator cur = available_patches.begin();
2006 for (; cur != available_patches.end(); ++cur)
2007 {
2008 if (cur->file != filename)
2009 continue;
2010 cur->result_hashes.push_back(HashString(*type, hash));
2011 break;
2012 }
2013 if (cur != available_patches.end())
2014 continue;
2015 if (firstAcceptedHashes == true)
2016 {
2017 DiffInfo next;
2018 next.file = filename;
2019 next.result_hashes.push_back(HashString(*type, hash));
2020 next.result_hashes.FileSize(size);
2021 available_patches.push_back(next);
2022 }
2023 else
2024 {
2025 if (Debug == true)
2026 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
2027 << " wasn't in the list for the first parsed hash! (history)" << std::endl;
2028 break;
2029 }
2030 }
2031 firstAcceptedHashes = false;
2032 }
2033
2034 if (unlikely(available_patches.empty() == true))
2035 {
2036 if (Debug)
2037 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
2038 << "Couldn't find any patches for the patch series." << std::endl;
2039 return false;
2040 }
2041
2042 for (auto type = types.crbegin(); type != types.crend(); ++type)
2043 {
2044 if (LocalHashes.find(*type) == NULL)
2045 continue;
2046
2047 std::string tagname = *type;
2048 tagname.append("-Patches");
2049 std::string const tmp = Tags.FindS(tagname.c_str());
2050 if (tmp.empty() == true)
2051 continue;
2052
2053 string hash, filename;
2054 unsigned long long size;
2055 std::stringstream ss(tmp);
2056
2057 while (ss >> hash >> size >> filename)
2058 {
2059 if (unlikely(hash.empty() == true || filename.empty() == true))
2060 continue;
2061
2062 // see if we have a record for this file already
2063 std::vector<DiffInfo>::iterator cur = available_patches.begin();
2064 for (; cur != available_patches.end(); ++cur)
2065 {
2066 if (cur->file != filename)
2067 continue;
2068 if (cur->patch_hashes.empty())
2069 cur->patch_hashes.FileSize(size);
2070 cur->patch_hashes.push_back(HashString(*type, hash));
2071 break;
2072 }
2073 if (cur != available_patches.end())
2074 continue;
2075 if (Debug == true)
2076 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
2077 << " wasn't in the list for the first parsed hash! (patches)" << std::endl;
2078 break;
2079 }
2080 }
2081
2082 for (auto type = types.crbegin(); type != types.crend(); ++type)
2083 {
2084 std::string tagname = *type;
2085 tagname.append("-Download");
2086 std::string const tmp = Tags.FindS(tagname.c_str());
2087 if (tmp.empty() == true)
2088 continue;
2089
2090 string hash, filename;
2091 unsigned long long size;
2092 std::stringstream ss(tmp);
2093
2094 // FIXME: all of pdiff supports only .gz compressed patches
2095 while (ss >> hash >> size >> filename)
2096 {
2097 if (unlikely(hash.empty() == true || filename.empty() == true))
2098 continue;
2099 if (unlikely(APT::String::Endswith(filename, ".gz") == false))
2100 continue;
2101 filename.erase(filename.length() - 3);
2102
2103 // see if we have a record for this file already
2104 std::vector<DiffInfo>::iterator cur = available_patches.begin();
2105 for (; cur != available_patches.end(); ++cur)
2106 {
2107 if (cur->file != filename)
2108 continue;
2109 if (cur->download_hashes.empty())
2110 cur->download_hashes.FileSize(size);
2111 cur->download_hashes.push_back(HashString(*type, hash));
2112 break;
2113 }
2114 if (cur != available_patches.end())
2115 continue;
2116 if (Debug == true)
2117 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
2118 << " wasn't in the list for the first parsed hash! (download)" << std::endl;
2119 break;
2120 }
2121 }
2122
2123
2124 bool foundStart = false;
2125 for (std::vector<DiffInfo>::iterator cur = available_patches.begin();
2126 cur != available_patches.end(); ++cur)
2127 {
2128 if (LocalHashes != cur->result_hashes)
2129 continue;
2130
2131 available_patches.erase(available_patches.begin(), cur);
2132 foundStart = true;
2133 break;
2134 }
2135
2136 if (foundStart == false || unlikely(available_patches.empty() == true))
2137 {
2138 if (Debug)
2139 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
2140 << "Couldn't find the start of the patch series." << std::endl;
2141 return false;
2142 }
2143
2144 for (auto const &patch: available_patches)
2145 if (patch.result_hashes.usable() == false ||
2146 patch.patch_hashes.usable() == false ||
2147 patch.download_hashes.usable() == false)
2148 {
2149 if (Debug)
2150 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": provides no usable hashes for " << patch.file
2151 << " so fallback to complete download" << std::endl;
2152 return false;
2153 }
2154
2155 // patching with too many files is rather slow compared to a fast download
2156 unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
2157 if (fileLimit != 0 && fileLimit < available_patches.size())
2158 {
2159 if (Debug)
2160 std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
2161 << ") so fallback to complete download" << std::endl;
2162 return false;
2163 }
2164
2165 // calculate the size of all patches we have to get
2166 unsigned short const sizeLimitPercent = _config->FindI("Acquire::PDiffs::SizeLimit", 100);
2167 if (sizeLimitPercent > 0)
2168 {
2169 unsigned long long downloadSize = std::accumulate(available_patches.begin(),
2170 available_patches.end(), 0llu, [](unsigned long long const T, DiffInfo const &I) {
2171 return T + I.download_hashes.FileSize();
2172 });
2173 if (downloadSize != 0)
2174 {
2175 unsigned long long downloadSizeIdx = 0;
2176 auto const types = VectorizeString(Target.Option(IndexTarget::COMPRESSIONTYPES), ' ');
2177 for (auto const &t : types)
2178 {
2179 std::string MetaKey = Target.MetaKey;
2180 if (t != "uncompressed")
2181 MetaKey += '.' + t;
2182 HashStringList const hsl = GetExpectedHashesFor(MetaKey);
2183 if (unlikely(hsl.usable() == false))
2184 continue;
2185 downloadSizeIdx = hsl.FileSize();
2186 break;
2187 }
2188 unsigned long long const sizeLimit = downloadSizeIdx * sizeLimitPercent;
2189 if ((sizeLimit/100) < downloadSize)
2190 {
2191 if (Debug)
2192 std::clog << "Need " << downloadSize << " compressed bytes (Limit is " << (sizeLimit/100) << ", "
2193 << "original is " << downloadSizeIdx << ") so fallback to complete download" << std::endl;
2194 return false;
2195 }
2196 }
2197 }
2198
2199 // we have something, queue the diffs
2200 string::size_type const last_space = Description.rfind(" ");
2201 if(last_space != string::npos)
2202 Description.erase(last_space, Description.size()-last_space);
2203
2204 /* decide if we should download patches one by one or in one go:
2205 The first is good if the server merges patches, but many don't so client
2206 based merging can be attempt in which case the second is better.
2207 "bad things" will happen if patches are merged on the server,
2208 but client side merging is attempt as well */
2209 bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
2210 if (pdiff_merge == true)
2211 {
2212 // reprepro adds this flag if it has merged patches on the server
2213 std::string const precedence = Tags.FindS("X-Patch-Precedence");
2214 pdiff_merge = (precedence != "merged");
2215 }
2216
2217 // clean the plate
2218 {
2219 std::string const Final = GetExistingFilename(CurrentPackagesFile);
2220 if (unlikely(Final.empty())) // because we wouldn't be called in such a case
2221 return false;
2222 std::string const PartialFile = GetPartialFileNameFromURI(Target.URI);
2223 if (FileExists(PartialFile) && RemoveFile("Bootstrap-linking", PartialFile) == false)
2224 {
2225 if (Debug)
2226 std::clog << "Bootstrap-linking for patching " << CurrentPackagesFile
2227 << " by removing stale " << PartialFile << " failed!" << std::endl;
2228 return false;
2229 }
2230 for (auto const &ext : APT::Configuration::getCompressorExtensions())
2231 {
2232 std::string const Partial = PartialFile + ext;
2233 if (FileExists(Partial) && RemoveFile("Bootstrap-linking", Partial) == false)
2234 {
2235 if (Debug)
2236 std::clog << "Bootstrap-linking for patching " << CurrentPackagesFile
2237 << " by removing stale " << Partial << " failed!" << std::endl;
2238 return false;
2239 }
2240 }
2241 std::string const Ext = Final.substr(CurrentPackagesFile.length());
2242 std::string const Partial = PartialFile + Ext;
2243 if (symlink(Final.c_str(), Partial.c_str()) != 0)
2244 {
2245 if (Debug)
2246 std::clog << "Bootstrap-linking for patching " << CurrentPackagesFile
2247 << " by linking " << Final << " to " << Partial << " failed!" << std::endl;
2248 return false;
2249 }
2250 }
2251
2252 if (pdiff_merge == false)
2253 new pkgAcqIndexDiffs(Owner, TransactionManager, Target, available_patches);
2254 else
2255 {
2256 diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
2257 for(size_t i = 0; i < available_patches.size(); ++i)
2258 (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, TransactionManager,
2259 Target,
2260 available_patches[i],
2261 diffs);
2262 }
2263
2264 Complete = false;
2265 Status = StatDone;
2266 Dequeue();
2267 return true;
2268 }
2269 /*}}}*/
2270 void pkgAcqDiffIndex::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
2271 {
2272 pkgAcqBaseIndex::Failed(Message,Cnf);
2273 Status = StatDone;
2274 ExpectedAdditionalItems = 0;
2275
2276 if(Debug)
2277 std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << " with " << Message << std::endl
2278 << "Falling back to normal index file acquire" << std::endl;
2279
2280 new pkgAcqIndex(Owner, TransactionManager, Target);
2281 }
2282 /*}}}*/
2283 void pkgAcqDiffIndex::Done(string const &Message,HashStringList const &Hashes, /*{{{*/
2284 pkgAcquire::MethodConfig const * const Cnf)
2285 {
2286 if(Debug)
2287 std::clog << "pkgAcqDiffIndex::Done(): " << Desc.URI << std::endl;
2288
2289 Item::Done(Message, Hashes, Cnf);
2290
2291 string const FinalFile = GetFinalFilename();
2292 if(StringToBool(LookupTag(Message,"IMS-Hit"),false))
2293 DestFile = FinalFile;
2294
2295 if(ParseDiffIndex(DestFile) == false)
2296 {
2297 Failed("Message: Couldn't parse pdiff index", Cnf);
2298 // queue for final move - this should happen even if we fail
2299 // while parsing (e.g. on sizelimit) and download the complete file.
2300 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
2301 return;
2302 }
2303
2304 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
2305
2306 Complete = true;
2307 Status = StatDone;
2308 Dequeue();
2309
2310 return;
2311 }
2312 /*}}}*/
2313 pkgAcqDiffIndex::~pkgAcqDiffIndex()
2314 {
2315 if (diffs != NULL)
2316 delete diffs;
2317 }
2318
2319 // AcqIndexDiffs::AcqIndexDiffs - Constructor /*{{{*/
2320 // ---------------------------------------------------------------------
2321 /* The package diff is added to the queue. one object is constructed
2322 * for each diff and the index
2323 */
2324 pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire * const Owner,
2325 pkgAcqMetaClearSig * const TransactionManager,
2326 IndexTarget const &Target,
2327 vector<DiffInfo> const &diffs)
2328 : pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL),
2329 available_patches(diffs)
2330 {
2331 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI), Target);
2332
2333 Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
2334
2335 Desc.Owner = this;
2336 Description = Target.Description;
2337 Desc.ShortDesc = Target.ShortDesc;
2338
2339 if(available_patches.empty() == true)
2340 {
2341 // we are done (yeah!), check hashes against the final file
2342 DestFile = GetKeepCompressedFileName(GetFinalFileNameFromURI(Target.URI), Target);
2343 Finish(true);
2344 }
2345 else
2346 {
2347 State = StateFetchDiff;
2348 QueueNextDiff();
2349 }
2350 }
2351 /*}}}*/
2352 void pkgAcqIndexDiffs::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
2353 {
2354 pkgAcqBaseIndex::Failed(Message,Cnf);
2355 Status = StatDone;
2356
2357 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI), Target);
2358 if(Debug)
2359 std::clog << "pkgAcqIndexDiffs failed: " << Desc.URI << " with " << Message << std::endl
2360 << "Falling back to normal index file acquire " << std::endl;
2361 RenameOnError(PDiffError);
2362 std::string const patchname = GetDiffsPatchFileName(DestFile);
2363 if (RealFileExists(patchname))
2364 Rename(patchname, patchname + ".FAILED");
2365 std::string const UnpatchedFile = GetExistingFilename(GetPartialFileNameFromURI(Target.URI));
2366 if (UnpatchedFile.empty() == false && FileExists(UnpatchedFile))
2367 Rename(UnpatchedFile, UnpatchedFile + ".FAILED");
2368 new pkgAcqIndex(Owner, TransactionManager, Target);
2369 Finish();
2370 }
2371 /*}}}*/
2372 // Finish - helper that cleans the item out of the fetcher queue /*{{{*/
2373 void pkgAcqIndexDiffs::Finish(bool allDone)
2374 {
2375 if(Debug)
2376 std::clog << "pkgAcqIndexDiffs::Finish(): "
2377 << allDone << " "
2378 << Desc.URI << std::endl;
2379
2380 // we restore the original name, this is required, otherwise
2381 // the file will be cleaned
2382 if(allDone)
2383 {
2384 std::string const Final = GetKeepCompressedFileName(GetFinalFilename(), Target);
2385 TransactionManager->TransactionStageCopy(this, DestFile, Final);
2386
2387 // this is for the "real" finish
2388 Complete = true;
2389 Status = StatDone;
2390 Dequeue();
2391 if(Debug)
2392 std::clog << "\n\nallDone: " << DestFile << "\n" << std::endl;
2393 return;
2394 }
2395 else
2396 DestFile.clear();
2397
2398 if(Debug)
2399 std::clog << "Finishing: " << Desc.URI << std::endl;
2400 Complete = false;
2401 Status = StatDone;
2402 Dequeue();
2403 return;
2404 }
2405 /*}}}*/
2406 bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/
2407 {
2408 // calc sha1 of the just patched file
2409 std::string const PartialFile = GetExistingFilename(GetPartialFileNameFromURI(Target.URI));
2410 if(unlikely(PartialFile.empty()))
2411 {
2412 Failed("Message: The file " + GetPartialFileNameFromURI(Target.URI) + " isn't available", NULL);
2413 return false;
2414 }
2415
2416 FileFd fd(PartialFile, FileFd::ReadOnly, FileFd::Extension);
2417 Hashes LocalHashesCalc;
2418 LocalHashesCalc.AddFD(fd);
2419 HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
2420
2421 if(Debug)
2422 std::clog << "QueueNextDiff: " << PartialFile << " (" << LocalHashes.find(NULL)->toStr() << ")" << std::endl;
2423
2424 HashStringList const TargetFileHashes = GetExpectedHashesFor(Target.MetaKey);
2425 if (unlikely(LocalHashes.usable() == false || TargetFileHashes.usable() == false))
2426 {
2427 Failed("Local/Expected hashes are not usable for " + PartialFile, NULL);
2428 return false;
2429 }
2430
2431 // final file reached before all patches are applied
2432 if(LocalHashes == TargetFileHashes)
2433 {
2434 Finish(true);
2435 return true;
2436 }
2437
2438 // remove all patches until the next matching patch is found
2439 // this requires the Index file to be ordered
2440 available_patches.erase(available_patches.begin(),
2441 std::find_if(available_patches.begin(), available_patches.end(), [&](DiffInfo const &I) {
2442 return I.result_hashes == LocalHashes;
2443 }));
2444
2445 // error checking and falling back if no patch was found
2446 if(available_patches.empty() == true)
2447 {
2448 Failed("No patches left to reach target for " + PartialFile, NULL);
2449 return false;
2450 }
2451
2452 // queue the right diff
2453 Desc.URI = Target.URI + ".diff/" + available_patches[0].file + ".gz";
2454 Desc.Description = Description + " " + available_patches[0].file + string(".pdiff");
2455 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI + ".diff/" + available_patches[0].file), Target);
2456
2457 if(Debug)
2458 std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl;
2459
2460 QueueURI(Desc);
2461
2462 return true;
2463 }
2464 /*}}}*/
2465 void pkgAcqIndexDiffs::Done(string const &Message, HashStringList const &Hashes, /*{{{*/
2466 pkgAcquire::MethodConfig const * const Cnf)
2467 {
2468 if (Debug)
2469 std::clog << "pkgAcqIndexDiffs::Done(): " << Desc.URI << std::endl;
2470
2471 Item::Done(Message, Hashes, Cnf);
2472
2473 std::string const UncompressedUnpatchedFile = GetPartialFileNameFromURI(Target.URI);
2474 std::string const UnpatchedFile = GetExistingFilename(UncompressedUnpatchedFile);
2475 std::string const PatchFile = GetDiffsPatchFileName(UnpatchedFile);
2476 std::string const PatchedFile = GetKeepCompressedFileName(UncompressedUnpatchedFile, Target);
2477
2478 switch (State)
2479 {
2480 // success in downloading a diff, enter ApplyDiff state
2481 case StateFetchDiff:
2482 Rename(DestFile, PatchFile);
2483 DestFile = GetKeepCompressedFileName(UncompressedUnpatchedFile + "-patched", Target);
2484 if(Debug)
2485 std::clog << "Sending to rred method: " << UnpatchedFile << std::endl;
2486 State = StateApplyDiff;
2487 Local = true;
2488 Desc.URI = "rred:" + UnpatchedFile;
2489 QueueURI(Desc);
2490 SetActiveSubprocess("rred");
2491 return;
2492 // success in download/apply a diff, queue next (if needed)
2493 case StateApplyDiff:
2494 // remove the just applied patch and base file
2495 available_patches.erase(available_patches.begin());
2496 RemoveFile("pkgAcqIndexDiffs::Done", PatchFile);
2497 RemoveFile("pkgAcqIndexDiffs::Done", UnpatchedFile);
2498 if(Debug)
2499 std::clog << "Moving patched file in place: " << std::endl
2500 << DestFile << " -> " << PatchedFile << std::endl;
2501 Rename(DestFile, PatchedFile);
2502
2503 // see if there is more to download
2504 if(available_patches.empty() == false)
2505 {
2506 new pkgAcqIndexDiffs(Owner, TransactionManager, Target, available_patches);
2507 Finish();
2508 } else {
2509 DestFile = PatchedFile;
2510 Finish(true);
2511 }
2512 return;
2513 }
2514 }
2515 /*}}}*/
2516 std::string pkgAcqIndexDiffs::Custom600Headers() const /*{{{*/
2517 {
2518 if(State != StateApplyDiff)
2519 return pkgAcqBaseIndex::Custom600Headers();
2520 std::ostringstream patchhashes;
2521 HashStringList const ExpectedHashes = available_patches[0].patch_hashes;
2522 for (HashStringList::const_iterator hs = ExpectedHashes.begin(); hs != ExpectedHashes.end(); ++hs)
2523 patchhashes << "\nPatch-0-" << hs->HashType() << "-Hash: " << hs->HashValue();
2524 patchhashes << pkgAcqBaseIndex::Custom600Headers();
2525 return patchhashes.str();
2526 }
2527 /*}}}*/
2528 pkgAcqIndexDiffs::~pkgAcqIndexDiffs() {}
2529
2530 // AcqIndexMergeDiffs::AcqIndexMergeDiffs - Constructor /*{{{*/
2531 pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire * const Owner,
2532 pkgAcqMetaClearSig * const TransactionManager,
2533 IndexTarget const &Target,
2534 DiffInfo const &patch,
2535 std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches)
2536 : pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL),
2537 patch(patch), allPatches(allPatches), State(StateFetchDiff)
2538 {
2539 Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
2540
2541 Desc.Owner = this;
2542 Description = Target.Description;
2543 Desc.ShortDesc = Target.ShortDesc;
2544 Desc.URI = Target.URI + ".diff/" + patch.file + ".gz";
2545 Desc.Description = Description + " " + patch.file + ".pdiff";
2546 DestFile = GetPartialFileNameFromURI(Desc.URI);
2547
2548 if(Debug)
2549 std::clog << "pkgAcqIndexMergeDiffs: " << Desc.URI << std::endl;
2550
2551 QueueURI(Desc);
2552 }
2553 /*}}}*/
2554 void pkgAcqIndexMergeDiffs::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
2555 {
2556 if(Debug)
2557 std::clog << "pkgAcqIndexMergeDiffs failed: " << Desc.URI << " with " << Message << std::endl;
2558
2559 pkgAcqBaseIndex::Failed(Message,Cnf);
2560 Status = StatDone;
2561
2562 // check if we are the first to fail, otherwise we are done here
2563 State = StateDoneDiff;
2564 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
2565 I != allPatches->end(); ++I)
2566 if ((*I)->State == StateErrorDiff)
2567 {
2568 State = StateErrorDiff;
2569 return;
2570 }
2571
2572 // first failure means we should fallback
2573 State = StateErrorDiff;
2574 if (Debug)
2575 std::clog << "Falling back to normal index file acquire" << std::endl;
2576 RenameOnError(PDiffError);
2577 if (RealFileExists(DestFile))
2578 Rename(DestFile, DestFile + ".FAILED");
2579 std::string const UnpatchedFile = GetExistingFilename(GetPartialFileNameFromURI(Target.URI));
2580 if (UnpatchedFile.empty() == false && FileExists(UnpatchedFile))
2581 Rename(UnpatchedFile, UnpatchedFile + ".FAILED");
2582 DestFile.clear();
2583 new pkgAcqIndex(Owner, TransactionManager, Target);
2584 }
2585 /*}}}*/
2586 void pkgAcqIndexMergeDiffs::Done(string const &Message, HashStringList const &Hashes, /*{{{*/
2587 pkgAcquire::MethodConfig const * const Cnf)
2588 {
2589 if(Debug)
2590 std::clog << "pkgAcqIndexMergeDiffs::Done(): " << Desc.URI << std::endl;
2591
2592 Item::Done(Message, Hashes, Cnf);
2593
2594 if (std::any_of(allPatches->begin(), allPatches->end(),
2595 [](pkgAcqIndexMergeDiffs const * const P) { return P->State == StateErrorDiff; }))
2596 {
2597 if(Debug)
2598 std::clog << "Another patch failed already, no point in processing this one." << std::endl;
2599 State = StateErrorDiff;
2600 return;
2601 }
2602
2603 std::string const UncompressedUnpatchedFile = GetPartialFileNameFromURI(Target.URI);
2604 std::string const UnpatchedFile = GetExistingFilename(UncompressedUnpatchedFile);
2605 if (UnpatchedFile.empty())
2606 {
2607 _error->Fatal("Unpatched file %s doesn't exist (anymore)!", UncompressedUnpatchedFile.c_str());
2608 State = StateErrorDiff;
2609 return;
2610 }
2611 std::string const PatchFile = GetMergeDiffsPatchFileName(UnpatchedFile, patch.file);
2612 std::string const PatchedFile = GetKeepCompressedFileName(UncompressedUnpatchedFile, Target);
2613
2614 switch (State)
2615 {
2616 case StateFetchDiff:
2617 Rename(DestFile, PatchFile);
2618
2619 // check if this is the last completed diff
2620 State = StateDoneDiff;
2621 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
2622 I != allPatches->end(); ++I)
2623 if ((*I)->State != StateDoneDiff)
2624 {
2625 if(Debug)
2626 std::clog << "Not the last done diff in the batch: " << Desc.URI << std::endl;
2627 return;
2628 }
2629 // this is the last completed diff, so we are ready to apply now
2630 DestFile = GetKeepCompressedFileName(UncompressedUnpatchedFile + "-patched", Target);
2631 if(Debug)
2632 std::clog << "Sending to rred method: " << UnpatchedFile << std::endl;
2633 State = StateApplyDiff;
2634 Local = true;
2635 Desc.URI = "rred:" + UnpatchedFile;
2636 QueueURI(Desc);
2637 SetActiveSubprocess("rred");
2638 return;
2639 case StateApplyDiff:
2640 // success in download & apply all diffs, finialize and clean up
2641 if(Debug)
2642 std::clog << "Queue patched file in place: " << std::endl
2643 << DestFile << " -> " << PatchedFile << std::endl;
2644
2645 // queue for copy by the transaction manager
2646 TransactionManager->TransactionStageCopy(this, DestFile, GetKeepCompressedFileName(GetFinalFilename(), Target));
2647
2648 // ensure the ed's are gone regardless of list-cleanup
2649 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
2650 I != allPatches->end(); ++I)
2651 RemoveFile("pkgAcqIndexMergeDiffs::Done", GetMergeDiffsPatchFileName(UnpatchedFile, (*I)->patch.file));
2652 RemoveFile("pkgAcqIndexMergeDiffs::Done", UnpatchedFile);
2653
2654 // all set and done
2655 Complete = true;
2656 if(Debug)
2657 std::clog << "allDone: " << DestFile << "\n" << std::endl;
2658 return;
2659 case StateDoneDiff: _error->Fatal("Done called for %s which is in an invalid Done state", PatchFile.c_str()); break;
2660 case StateErrorDiff: _error->Fatal("Done called for %s which is in an invalid Error state", PatchFile.c_str()); break;
2661 }
2662 }
2663 /*}}}*/
2664 std::string pkgAcqIndexMergeDiffs::Custom600Headers() const /*{{{*/
2665 {
2666 if(State != StateApplyDiff)
2667 return pkgAcqBaseIndex::Custom600Headers();
2668 std::ostringstream patchhashes;
2669 unsigned int seen_patches = 0;
2670 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
2671 I != allPatches->end(); ++I)
2672 {
2673 HashStringList const ExpectedHashes = (*I)->patch.patch_hashes;
2674 for (HashStringList::const_iterator hs = ExpectedHashes.begin(); hs != ExpectedHashes.end(); ++hs)
2675 patchhashes << "\nPatch-" << seen_patches << "-" << hs->HashType() << "-Hash: " << hs->HashValue();
2676 ++seen_patches;
2677 }
2678 patchhashes << pkgAcqBaseIndex::Custom600Headers();
2679 return patchhashes.str();
2680 }
2681 /*}}}*/
2682 pkgAcqIndexMergeDiffs::~pkgAcqIndexMergeDiffs() {}
2683
2684 // AcqIndex::AcqIndex - Constructor /*{{{*/
2685 pkgAcqIndex::pkgAcqIndex(pkgAcquire * const Owner,
2686 pkgAcqMetaClearSig * const TransactionManager,
2687 IndexTarget const &Target)
2688 : pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL), Stage(STAGE_DOWNLOAD),
2689 CompressionExtensions(Target.Option(IndexTarget::COMPRESSIONTYPES))
2690 {
2691 Init(Target.URI, Target.Description, Target.ShortDesc);
2692
2693 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
2694 std::clog << "New pkgIndex with TransactionManager "
2695 << TransactionManager << std::endl;
2696 }
2697 /*}}}*/
2698 // AcqIndex::Init - defered Constructor /*{{{*/
2699 static void NextCompressionExtension(std::string &CurrentCompressionExtension, std::string &CompressionExtensions, bool const preview)
2700 {
2701 size_t const nextExt = CompressionExtensions.find(' ');
2702 if (nextExt == std::string::npos)
2703 {
2704 CurrentCompressionExtension = CompressionExtensions;
2705 if (preview == false)
2706 CompressionExtensions.clear();
2707 }
2708 else
2709 {
2710 CurrentCompressionExtension = CompressionExtensions.substr(0, nextExt);
2711 if (preview == false)
2712 CompressionExtensions = CompressionExtensions.substr(nextExt+1);
2713 }
2714 }
2715 void pkgAcqIndex::Init(string const &URI, string const &URIDesc,
2716 string const &ShortDesc)
2717 {
2718 Stage = STAGE_DOWNLOAD;
2719
2720 DestFile = GetPartialFileNameFromURI(URI);
2721 NextCompressionExtension(CurrentCompressionExtension, CompressionExtensions, false);
2722
2723 // store file size of the download to ensure the fetcher gives
2724 // accurate progress reporting
2725 FileSize = GetExpectedHashes().FileSize();
2726
2727 if (CurrentCompressionExtension == "uncompressed")
2728 {
2729 Desc.URI = URI;
2730 }
2731 else if (CurrentCompressionExtension == "by-hash")
2732 {
2733 NextCompressionExtension(CurrentCompressionExtension, CompressionExtensions, true);
2734 if(unlikely(CurrentCompressionExtension.empty()))
2735 return;
2736 if (CurrentCompressionExtension != "uncompressed")
2737 {
2738 Desc.URI = URI + '.' + CurrentCompressionExtension;
2739 DestFile = DestFile + '.' + CurrentCompressionExtension;
2740 }
2741
2742 HashStringList const Hashes = GetExpectedHashes();
2743 HashString const * const TargetHash = Hashes.find(NULL);
2744 if (unlikely(TargetHash == nullptr))
2745 return;
2746 std::string const ByHash = "/by-hash/" + TargetHash->HashType() + "/" + TargetHash->HashValue();
2747 size_t const trailing_slash = Desc.URI.find_last_of("/");
2748 if (unlikely(trailing_slash == std::string::npos))
2749 return;
2750 Desc.URI = Desc.URI.replace(
2751 trailing_slash,
2752 Desc.URI.substr(trailing_slash+1).size()+1,
2753 ByHash);
2754 }
2755 else if (unlikely(CurrentCompressionExtension.empty()))
2756 return;
2757 else
2758 {
2759 Desc.URI = URI + '.' + CurrentCompressionExtension;
2760 DestFile = DestFile + '.' + CurrentCompressionExtension;
2761 }
2762
2763
2764 Desc.Description = URIDesc;
2765 Desc.Owner = this;
2766 Desc.ShortDesc = ShortDesc;
2767
2768 QueueURI(Desc);
2769 }
2770 /*}}}*/
2771 // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
2772 // ---------------------------------------------------------------------
2773 /* The only header we use is the last-modified header. */
2774 string pkgAcqIndex::Custom600Headers() const
2775 {
2776
2777 string msg = "\nIndex-File: true";
2778
2779 if (TransactionManager->LastMetaIndexParser == NULL)
2780 {
2781 std::string const Final = GetFinalFilename();
2782
2783 struct stat Buf;
2784 if (stat(Final.c_str(),&Buf) == 0)
2785 msg += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
2786 }
2787
2788 if(Target.IsOptional)
2789 msg += "\nFail-Ignore: true";
2790
2791 return msg;
2792 }
2793 /*}}}*/
2794 // AcqIndex::Failed - getting the indexfile failed /*{{{*/
2795 void pkgAcqIndex::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
2796 {
2797 pkgAcqBaseIndex::Failed(Message,Cnf);
2798
2799 // authorisation matches will not be fixed by other compression types
2800 if (Status != StatAuthError)
2801 {
2802 if (CompressionExtensions.empty() == false)
2803 {
2804 Init(Target.URI, Desc.Description, Desc.ShortDesc);
2805 Status = StatIdle;
2806 return;
2807 }
2808 }
2809
2810 if(Target.IsOptional && GetExpectedHashes().empty() && Stage == STAGE_DOWNLOAD)
2811 Status = StatDone;
2812 else
2813 TransactionManager->AbortTransaction();
2814 }
2815 /*}}}*/
2816 // AcqIndex::Done - Finished a fetch /*{{{*/
2817 // ---------------------------------------------------------------------
2818 /* This goes through a number of states.. On the initial fetch the
2819 method could possibly return an alternate filename which points
2820 to the uncompressed version of the file. If this is so the file
2821 is copied into the partial directory. In all other cases the file
2822 is decompressed with a compressed uri. */
2823 void pkgAcqIndex::Done(string const &Message,
2824 HashStringList const &Hashes,
2825 pkgAcquire::MethodConfig const * const Cfg)
2826 {
2827 Item::Done(Message,Hashes,Cfg);
2828
2829 switch(Stage)
2830 {
2831 case STAGE_DOWNLOAD:
2832 StageDownloadDone(Message);
2833 break;
2834 case STAGE_DECOMPRESS_AND_VERIFY:
2835 StageDecompressDone();
2836 break;
2837 }
2838 }
2839 /*}}}*/
2840 // AcqIndex::StageDownloadDone - Queue for decompress and verify /*{{{*/
2841 void pkgAcqIndex::StageDownloadDone(string const &Message)
2842 {
2843 Local = true;
2844 Complete = true;
2845
2846 std::string const AltFilename = LookupTag(Message,"Alt-Filename");
2847 std::string Filename = LookupTag(Message,"Filename");
2848
2849 // we need to verify the file against the current Release file again
2850 // on if-modfied-since hit to avoid a stale attack against us
2851 if(StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
2852 {
2853 // copy FinalFile into partial/ so that we check the hash again
2854 string const FinalFile = GetExistingFilename(GetFinalFileNameFromURI(Target.URI));
2855 if (symlink(FinalFile.c_str(), DestFile.c_str()) != 0)
2856 _error->WarningE("pkgAcqIndex::StageDownloadDone", "Symlinking final file %s back to %s failed", FinalFile.c_str(), DestFile.c_str());
2857 else
2858 {
2859 EraseFileName = DestFile;
2860 Filename = DestFile;
2861 }
2862 Stage = STAGE_DECOMPRESS_AND_VERIFY;
2863 Desc.URI = "store:" + Filename;
2864 QueueURI(Desc);
2865 SetActiveSubprocess(::URI(Desc.URI).Access);
2866 return;
2867 }
2868 // methods like file:// give us an alternative (uncompressed) file
2869 else if (Target.KeepCompressed == false && AltFilename.empty() == false)
2870 {
2871 Filename = AltFilename;
2872 EraseFileName.clear();
2873 }
2874 // Methods like e.g. "file:" will give us a (compressed) FileName that is
2875 // not the "DestFile" we set, in this case we uncompress from the local file
2876 else if (Filename != DestFile && RealFileExists(DestFile) == false)
2877 {
2878 // symlinking ensures that the filename can be used for compression detection
2879 // that is e.g. needed for by-hash which has no extension over file
2880 if (symlink(Filename.c_str(),DestFile.c_str()) != 0)
2881 _error->WarningE("pkgAcqIndex::StageDownloadDone", "Symlinking file %s to %s failed", Filename.c_str(), DestFile.c_str());
2882 else
2883 {
2884 EraseFileName = DestFile;
2885 Filename = DestFile;
2886 }
2887 }
2888
2889 Stage = STAGE_DECOMPRESS_AND_VERIFY;
2890 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI), Target);
2891 if (Filename != DestFile && flExtension(Filename) == flExtension(DestFile))
2892 Desc.URI = "copy:" + Filename;
2893 else
2894 Desc.URI = "store:" + Filename;
2895 if (DestFile == Filename)
2896 {
2897 if (CurrentCompressionExtension == "uncompressed")
2898 return StageDecompressDone();
2899 DestFile = "/dev/null";
2900 }
2901
2902 if (EraseFileName.empty() && Filename != AltFilename)
2903 EraseFileName = Filename;
2904
2905 // queue uri for the next stage
2906 QueueURI(Desc);
2907 SetActiveSubprocess(::URI(Desc.URI).Access);
2908 }
2909 /*}}}*/
2910 // AcqIndex::StageDecompressDone - Final verification /*{{{*/
2911 void pkgAcqIndex::StageDecompressDone()
2912 {
2913 if (DestFile == "/dev/null")
2914 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI), Target);
2915
2916 // Done, queue for rename on transaction finished
2917 TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
2918 }
2919 /*}}}*/
2920 pkgAcqIndex::~pkgAcqIndex() {}
2921
2922
2923 // AcqArchive::AcqArchive - Constructor /*{{{*/
2924 // ---------------------------------------------------------------------
2925 /* This just sets up the initial fetch environment and queues the first
2926 possibilitiy */
2927 pkgAcqArchive::pkgAcqArchive(pkgAcquire * const Owner,pkgSourceList * const Sources,
2928 pkgRecords * const Recs,pkgCache::VerIterator const &Version,
2929 string &StoreFilename) :
2930 Item(Owner), d(NULL), LocalSource(false), Version(Version), Sources(Sources), Recs(Recs),
2931 StoreFilename(StoreFilename), Vf(Version.FileList()),
2932 Trusted(false)
2933 {
2934 Retries = _config->FindI("Acquire::Retries",0);
2935
2936 if (Version.Arch() == 0)
2937 {
2938 _error->Error(_("I wasn't able to locate a file for the %s package. "
2939 "This might mean you need to manually fix this package. "
2940 "(due to missing arch)"),
2941 Version.ParentPkg().FullName().c_str());
2942 return;
2943 }
2944
2945 /* We need to find a filename to determine the extension. We make the
2946 assumption here that all the available sources for this version share
2947 the same extension.. */
2948 // Skip not source sources, they do not have file fields.
2949 for (; Vf.end() == false; ++Vf)
2950 {
2951 if (Vf.File().Flagged(pkgCache::Flag::NotSource))
2952 continue;
2953 break;
2954 }
2955
2956 // Does not really matter here.. we are going to fail out below
2957 if (Vf.end() != true)
2958 {
2959 // If this fails to get a file name we will bomb out below.
2960 pkgRecords::Parser &Parse = Recs->Lookup(Vf);
2961 if (_error->PendingError() == true)
2962 return;
2963
2964 // Generate the final file name as: package_version_arch.foo
2965 StoreFilename = QuoteString(Version.ParentPkg().Name(),"_:") + '_' +
2966 QuoteString(Version.VerStr(),"_:") + '_' +
2967 QuoteString(Version.Arch(),"_:.") +
2968 "." + flExtension(Parse.FileName());
2969 }
2970
2971 // check if we have one trusted source for the package. if so, switch
2972 // to "TrustedOnly" mode - but only if not in AllowUnauthenticated mode
2973 bool const allowUnauth = _config->FindB("APT::Get::AllowUnauthenticated", false);
2974 bool const debugAuth = _config->FindB("Debug::pkgAcquire::Auth", false);
2975 bool seenUntrusted = false;
2976 for (pkgCache::VerFileIterator i = Version.FileList(); i.end() == false; ++i)
2977 {
2978 pkgIndexFile *Index;
2979 if (Sources->FindIndex(i.File(),Index) == false)
2980 continue;
2981
2982 if (debugAuth == true)
2983 std::cerr << "Checking index: " << Index->Describe()
2984 << "(Trusted=" << Index->IsTrusted() << ")" << std::endl;
2985
2986 if (Index->IsTrusted() == true)
2987 {
2988 Trusted = true;
2989 if (allowUnauth == false)
2990 break;
2991 }
2992 else
2993 seenUntrusted = true;
2994 }
2995
2996 // "allow-unauthenticated" restores apts old fetching behaviour
2997 // that means that e.g. unauthenticated file:// uris are higher
2998 // priority than authenticated http:// uris
2999 if (allowUnauth == true && seenUntrusted == true)
3000 Trusted = false;
3001
3002 // Select a source
3003 if (QueueNext() == false && _error->PendingError() == false)
3004 _error->Error(_("Can't find a source to download version '%s' of '%s'"),
3005 Version.VerStr(), Version.ParentPkg().FullName(false).c_str());
3006 }
3007 /*}}}*/
3008 // AcqArchive::QueueNext - Queue the next file source /*{{{*/
3009 // ---------------------------------------------------------------------
3010 /* This queues the next available file version for download. It checks if
3011 the archive is already available in the cache and stashs the MD5 for
3012 checking later. */
3013 bool pkgAcqArchive::QueueNext()
3014 {
3015 for (; Vf.end() == false; ++Vf)
3016 {
3017 pkgCache::PkgFileIterator const PkgF = Vf.File();
3018 // Ignore not source sources
3019 if (PkgF.Flagged(pkgCache::Flag::NotSource))
3020 continue;
3021
3022 // Try to cross match against the source list
3023 pkgIndexFile *Index;
3024 if (Sources->FindIndex(PkgF, Index) == false)
3025 continue;
3026 LocalSource = PkgF.Flagged(pkgCache::Flag::LocalSource);
3027
3028 // only try to get a trusted package from another source if that source
3029 // is also trusted
3030 if(Trusted && !Index->IsTrusted())
3031 continue;
3032
3033 // Grab the text package record
3034 pkgRecords::Parser &Parse = Recs->Lookup(Vf);
3035 if (_error->PendingError() == true)
3036 return false;
3037
3038 string PkgFile = Parse.FileName();
3039 ExpectedHashes = Parse.Hashes();
3040
3041 if (PkgFile.empty() == true)
3042 return _error->Error(_("The package index files are corrupted. No Filename: "
3043 "field for package %s."),
3044 Version.ParentPkg().Name());
3045
3046 Desc.URI = Index->ArchiveURI(PkgFile);
3047 Desc.Description = Index->ArchiveInfo(Version);
3048 Desc.Owner = this;
3049 Desc.ShortDesc = Version.ParentPkg().FullName(true);
3050
3051 // See if we already have the file. (Legacy filenames)
3052 FileSize = Version->Size;
3053 string FinalFile = _config->FindDir("Dir::Cache::Archives") + flNotDir(PkgFile);
3054 struct stat Buf;
3055 if (stat(FinalFile.c_str(),&Buf) == 0)
3056 {
3057 // Make sure the size matches
3058 if ((unsigned long long)Buf.st_size == Version->Size)
3059 {
3060 Complete = true;
3061 Local = true;
3062 Status = StatDone;
3063 StoreFilename = DestFile = FinalFile;
3064 return true;
3065 }
3066
3067 /* Hmm, we have a file and its size does not match, this means it is
3068 an old style mismatched arch */
3069 RemoveFile("pkgAcqArchive::QueueNext", FinalFile);
3070 }
3071
3072 // Check it again using the new style output filenames
3073 FinalFile = _config->FindDir("Dir::Cache::Archives") + flNotDir(StoreFilename);
3074 if (stat(FinalFile.c_str(),&Buf) == 0)
3075 {
3076 // Make sure the size matches
3077 if ((unsigned long long)Buf.st_size == Version->Size)
3078 {
3079 Complete = true;
3080 Local = true;
3081 Status = StatDone;
3082 StoreFilename = DestFile = FinalFile;
3083 return true;
3084 }
3085
3086 /* Hmm, we have a file and its size does not match, this shouldn't
3087 happen.. */
3088 RemoveFile("pkgAcqArchive::QueueNext", FinalFile);
3089 }
3090
3091 DestFile = _config->FindDir("Dir::Cache::Archives") + "partial/" + flNotDir(StoreFilename);
3092
3093 // Check the destination file
3094 if (stat(DestFile.c_str(),&Buf) == 0)
3095 {
3096 // Hmm, the partial file is too big, erase it
3097 if ((unsigned long long)Buf.st_size > Version->Size)
3098 RemoveFile("pkgAcqArchive::QueueNext", DestFile);
3099 else
3100 PartialSize = Buf.st_size;
3101 }
3102
3103 // Disables download of archives - useful if no real installation follows,
3104 // e.g. if we are just interested in proposed installation order
3105 if (_config->FindB("Debug::pkgAcqArchive::NoQueue", false) == true)
3106 {
3107 Complete = true;
3108 Local = true;
3109 Status = StatDone;
3110 StoreFilename = DestFile = FinalFile;
3111 return true;
3112 }
3113
3114 // Create the item
3115 Local = false;
3116 QueueURI(Desc);
3117
3118 ++Vf;
3119 return true;
3120 }
3121 return false;
3122 }
3123 /*}}}*/
3124 // AcqArchive::Done - Finished fetching /*{{{*/
3125 // ---------------------------------------------------------------------
3126 /* */
3127 void pkgAcqArchive::Done(string const &Message, HashStringList const &Hashes,
3128 pkgAcquire::MethodConfig const * const Cfg)
3129 {
3130 Item::Done(Message, Hashes, Cfg);
3131
3132 // Grab the output filename
3133 std::string const FileName = LookupTag(Message,"Filename");
3134 if (DestFile != FileName && RealFileExists(DestFile) == false)
3135 {
3136 StoreFilename = DestFile = FileName;
3137 Local = true;
3138 Complete = true;
3139 return;
3140 }
3141
3142 // Done, move it into position
3143 string const FinalFile = GetFinalFilename();
3144 Rename(DestFile,FinalFile);
3145 StoreFilename = DestFile = FinalFile;
3146 Complete = true;
3147 }
3148 /*}}}*/
3149 // AcqArchive::Failed - Failure handler /*{{{*/
3150 // ---------------------------------------------------------------------
3151 /* Here we try other sources */
3152 void pkgAcqArchive::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
3153 {
3154 Item::Failed(Message,Cnf);
3155
3156 /* We don't really want to retry on failed media swaps, this prevents
3157 that. An interesting observation is that permanent failures are not
3158 recorded. */
3159 if (Cnf->Removable == true &&
3160 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
3161 {
3162 // Vf = Version.FileList();
3163 while (Vf.end() == false) ++Vf;
3164 StoreFilename = string();
3165 return;
3166 }
3167
3168 Status = StatIdle;
3169 if (QueueNext() == false)
3170 {
3171 // This is the retry counter
3172 if (Retries != 0 &&
3173 Cnf->LocalOnly == false &&
3174 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
3175 {
3176 Retries--;
3177 Vf = Version.FileList();
3178 if (QueueNext() == true)
3179 return;
3180 }
3181
3182 StoreFilename = string();
3183 Status = StatError;
3184 }
3185 }
3186 /*}}}*/
3187 APT_PURE bool pkgAcqArchive::IsTrusted() const /*{{{*/
3188 {
3189 return Trusted;
3190 }
3191 /*}}}*/
3192 void pkgAcqArchive::Finished() /*{{{*/
3193 {
3194 if (Status == pkgAcquire::Item::StatDone &&
3195 Complete == true)
3196 return;
3197 StoreFilename = string();
3198 }
3199 /*}}}*/
3200 std::string pkgAcqArchive::DescURI() const /*{{{*/
3201 {
3202 return Desc.URI;
3203 }
3204 /*}}}*/
3205 std::string pkgAcqArchive::ShortDesc() const /*{{{*/
3206 {
3207 return Desc.ShortDesc;
3208 }
3209 /*}}}*/
3210 pkgAcqArchive::~pkgAcqArchive() {}
3211
3212 // AcqChangelog::pkgAcqChangelog - Constructors /*{{{*/
3213 class pkgAcqChangelog::Private
3214 {
3215 public:
3216 std::string FinalFile;
3217 };
3218 pkgAcqChangelog::pkgAcqChangelog(pkgAcquire * const Owner, pkgCache::VerIterator const &Ver,
3219 std::string const &DestDir, std::string const &DestFilename) :
3220 pkgAcquire::Item(Owner), d(new pkgAcqChangelog::Private()), SrcName(Ver.SourcePkgName()), SrcVersion(Ver.SourceVerStr())
3221 {
3222 Desc.URI = URI(Ver);
3223 Init(DestDir, DestFilename);
3224 }
3225 // some parameters are char* here as they come likely from char* interfaces – which can also return NULL
3226 pkgAcqChangelog::pkgAcqChangelog(pkgAcquire * const Owner, pkgCache::RlsFileIterator const &RlsFile,
3227 char const * const Component, char const * const SrcName, char const * const SrcVersion,
3228 const string &DestDir, const string &DestFilename) :
3229 pkgAcquire::Item(Owner), d(new pkgAcqChangelog::Private()), SrcName(SrcName), SrcVersion(SrcVersion)
3230 {
3231 Desc.URI = URI(RlsFile, Component, SrcName, SrcVersion);
3232 Init(DestDir, DestFilename);
3233 }
3234 pkgAcqChangelog::pkgAcqChangelog(pkgAcquire * const Owner,
3235 std::string const &URI, char const * const SrcName, char const * const SrcVersion,
3236 const string &DestDir, const string &DestFilename) :
3237 pkgAcquire::Item(Owner), d(new pkgAcqChangelog::Private()), SrcName(SrcName), SrcVersion(SrcVersion)
3238 {
3239 Desc.URI = URI;
3240 Init(DestDir, DestFilename);
3241 }
3242 void pkgAcqChangelog::Init(std::string const &DestDir, std::string const &DestFilename)
3243 {
3244 if (Desc.URI.empty())
3245 {
3246 Status = StatError;
3247 // TRANSLATOR: %s=%s is sourcename=sourceversion, e.g. apt=1.1
3248 strprintf(ErrorText, _("Changelog unavailable for %s=%s"), SrcName.c_str(), SrcVersion.c_str());
3249 // Let the error message print something sensible rather than "Failed to fetch /"
3250 if (DestFilename.empty())
3251 DestFile = SrcName + ".changelog";
3252 else
3253 DestFile = DestFilename;
3254 Desc.URI = "changelog:/" + DestFile;
3255 return;
3256 }
3257
3258 std::string DestFileName;
3259 if (DestFilename.empty())
3260 DestFileName = flCombine(DestFile, SrcName + ".changelog");
3261 else
3262 DestFileName = flCombine(DestFile, DestFilename);
3263
3264 std::string const SandboxUser = _config->Find("APT::Sandbox::User");
3265 std::string const systemTemp = GetTempDir(SandboxUser);
3266 char tmpname[1000];
3267 snprintf(tmpname, sizeof(tmpname), "%s/apt-changelog-XXXXXX", systemTemp.c_str());
3268 if (NULL == mkdtemp(tmpname))
3269 {
3270 _error->Errno("mkdtemp", "mkdtemp failed in changelog acquire of %s %s", SrcName.c_str(), SrcVersion.c_str());
3271 Status = StatError;
3272 return;
3273 }
3274 TemporaryDirectory = tmpname;
3275
3276 ChangeOwnerAndPermissionOfFile("Item::QueueURI", TemporaryDirectory.c_str(),
3277 SandboxUser.c_str(), "root", 0700);
3278
3279 DestFile = flCombine(TemporaryDirectory, DestFileName);
3280 if (DestDir.empty() == false)
3281 {
3282 d->FinalFile = flCombine(DestDir, DestFileName);
3283 if (RealFileExists(d->FinalFile))
3284 {
3285 FileFd file1, file2;
3286 if (file1.Open(DestFile, FileFd::WriteOnly | FileFd::Create | FileFd::Exclusive) &&
3287 file2.Open(d->FinalFile, FileFd::ReadOnly) && CopyFile(file2, file1))
3288 {
3289 struct timeval times[2];
3290 times[0].tv_sec = times[1].tv_sec = file2.ModificationTime();
3291 times[0].tv_usec = times[1].tv_usec = 0;
3292 utimes(DestFile.c_str(), times);
3293 }
3294 }
3295 }
3296
3297 Desc.ShortDesc = "Changelog";
3298 strprintf(Desc.Description, "%s %s %s Changelog", URI::SiteOnly(Desc.URI).c_str(), SrcName.c_str(), SrcVersion.c_str());
3299 Desc.Owner = this;
3300 QueueURI(Desc);
3301 }
3302 /*}}}*/
3303 std::string pkgAcqChangelog::URI(pkgCache::VerIterator const &Ver) /*{{{*/
3304 {
3305 std::string const confOnline = "Acquire::Changelogs::AlwaysOnline";
3306 bool AlwaysOnline = _config->FindB(confOnline, false);
3307 if (AlwaysOnline == false)
3308 for (pkgCache::VerFileIterator VF = Ver.FileList(); VF.end() == false; ++VF)
3309 {
3310 pkgCache::PkgFileIterator const PF = VF.File();
3311 if (PF.Flagged(pkgCache::Flag::NotSource) || PF->Release == 0)
3312 continue;
3313 pkgCache::RlsFileIterator const RF = PF.ReleaseFile();
3314 if (RF->Origin != 0 && _config->FindB(confOnline + "::Origin::" + RF.Origin(), false))
3315 {
3316 AlwaysOnline = true;
3317 break;
3318 }
3319 }
3320 if (AlwaysOnline == false)
3321 {
3322 pkgCache::PkgIterator const Pkg = Ver.ParentPkg();
3323 if (Pkg->CurrentVer != 0 && Pkg.CurrentVer() == Ver)
3324 {
3325 std::string const basename = std::string("/usr/share/doc/") + Pkg.Name() + "/changelog";
3326 std::string const debianname = basename + ".Debian";
3327 if (FileExists(debianname))
3328 return "copy://" + debianname;
3329 else if (FileExists(debianname + ".gz"))
3330 return "gzip://" + debianname + ".gz";
3331 else if (FileExists(basename))
3332 return "copy://" + basename;
3333 else if (FileExists(basename + ".gz"))
3334 return "gzip://" + basename + ".gz";
3335 }
3336 }
3337
3338 char const * const SrcName = Ver.SourcePkgName();
3339 char const * const SrcVersion = Ver.SourceVerStr();
3340 // find the first source for this version which promises a changelog
3341 for (pkgCache::VerFileIterator VF = Ver.FileList(); VF.end() == false; ++VF)
3342 {
3343 pkgCache::PkgFileIterator const PF = VF.File();
3344 if (PF.Flagged(pkgCache::Flag::NotSource) || PF->Release == 0)
3345 continue;
3346 pkgCache::RlsFileIterator const RF = PF.ReleaseFile();
3347 std::string const uri = URI(RF, PF.Component(), SrcName, SrcVersion);
3348 if (uri.empty())
3349 continue;
3350 return uri;
3351 }
3352 return "";
3353 }
3354 std::string pkgAcqChangelog::URITemplate(pkgCache::RlsFileIterator const &Rls)
3355 {
3356 if (Rls.end() == true || (Rls->Label == 0 && Rls->Origin == 0))
3357 return "";
3358 std::string const serverConfig = "Acquire::Changelogs::URI";
3359 std::string server;
3360 #define APT_EMPTY_SERVER \
3361 if (server.empty() == false) \
3362 { \
3363 if (server != "no") \
3364 return server; \
3365 return ""; \
3366 }
3367 #define APT_CHECK_SERVER(X, Y) \
3368 if (Rls->X != 0) \
3369 { \
3370 std::string const specialServerConfig = serverConfig + "::" + Y + #X + "::" + Rls.X(); \
3371 server = _config->Find(specialServerConfig); \
3372 APT_EMPTY_SERVER \
3373 }
3374 // this way e.g. Debian-Security can fallback to Debian
3375 APT_CHECK_SERVER(Label, "Override::")
3376 APT_CHECK_SERVER(Origin, "Override::")
3377
3378 if (RealFileExists(Rls.FileName()))
3379 {
3380 _error->PushToStack();
3381 FileFd rf;
3382 /* This can be costly. A caller wanting to get millions of URIs might
3383 want to do this on its own once and use Override settings.
3384 We don't do this here as Origin/Label are not as unique as they
3385 should be so this could produce request order-dependent anomalies */
3386 if (OpenMaybeClearSignedFile(Rls.FileName(), rf) == true)
3387 {
3388 pkgTagFile TagFile(&rf, rf.Size());
3389 pkgTagSection Section;
3390 if (TagFile.Step(Section) == true)
3391 server = Section.FindS("Changelogs");
3392 }
3393 _error->RevertToStack();
3394 APT_EMPTY_SERVER
3395 }
3396
3397 APT_CHECK_SERVER(Label, "")
3398 APT_CHECK_SERVER(Origin, "")
3399 #undef APT_CHECK_SERVER
3400 #undef APT_EMPTY_SERVER
3401 return "";
3402 }
3403 std::string pkgAcqChangelog::URI(pkgCache::RlsFileIterator const &Rls,
3404 char const * const Component, char const * const SrcName,
3405 char const * const SrcVersion)
3406 {
3407 return URI(URITemplate(Rls), Component, SrcName, SrcVersion);
3408 }
3409 std::string pkgAcqChangelog::URI(std::string const &Template,
3410 char const * const Component, char const * const SrcName,
3411 char const * const SrcVersion)
3412 {
3413 if (Template.find("@CHANGEPATH@") == std::string::npos)
3414 return "";
3415
3416 // the path is: COMPONENT/SRC/SRCNAME/SRCNAME_SRCVER, e.g. main/a/apt/1.1 or contrib/liba/libapt/2.0
3417 std::string Src = SrcName;
3418 std::string path = APT::String::Startswith(SrcName, "lib") ? Src.substr(0, 4) : Src.substr(0,1);
3419 path.append("/").append(Src).append("/");
3420 path.append(Src).append("_").append(StripEpoch(SrcVersion));
3421 // we omit component for releases without one (= flat-style repositories)
3422 if (Component != NULL && strlen(Component) != 0)
3423 path = std::string(Component) + "/" + path;
3424
3425 return SubstVar(Template, "@CHANGEPATH@", path);
3426 }
3427 /*}}}*/
3428 // AcqChangelog::Failed - Failure handler /*{{{*/
3429 void pkgAcqChangelog::Failed(string const &Message, pkgAcquire::MethodConfig const * const Cnf)
3430 {
3431 Item::Failed(Message,Cnf);
3432
3433 std::string errText;
3434 // TRANSLATOR: %s=%s is sourcename=sourceversion, e.g. apt=1.1
3435 strprintf(errText, _("Changelog unavailable for %s=%s"), SrcName.c_str(), SrcVersion.c_str());
3436
3437 // Error is probably something techy like 404 Not Found
3438 if (ErrorText.empty())
3439 ErrorText = errText;
3440 else
3441 ErrorText = errText + " (" + ErrorText + ")";
3442 }
3443 /*}}}*/
3444 // AcqChangelog::Done - Item downloaded OK /*{{{*/
3445 void pkgAcqChangelog::Done(string const &Message,HashStringList const &CalcHashes,
3446 pkgAcquire::MethodConfig const * const Cnf)
3447 {
3448 Item::Done(Message,CalcHashes,Cnf);
3449 if (d->FinalFile.empty() == false)
3450 {
3451 if (RemoveFile("pkgAcqChangelog::Done", d->FinalFile) == false ||
3452 Rename(DestFile, d->FinalFile) == false)
3453 Status = StatError;
3454 }
3455
3456 Complete = true;
3457 }
3458 /*}}}*/
3459 pkgAcqChangelog::~pkgAcqChangelog() /*{{{*/
3460 {
3461 if (TemporaryDirectory.empty() == false)
3462 {
3463 RemoveFile("~pkgAcqChangelog", DestFile);
3464 rmdir(TemporaryDirectory.c_str());
3465 }
3466 delete d;
3467 }
3468 /*}}}*/
3469
3470 // AcqFile::pkgAcqFile - Constructor /*{{{*/
3471 pkgAcqFile::pkgAcqFile(pkgAcquire * const Owner,string const &URI, HashStringList const &Hashes,
3472 unsigned long long const Size,string const &Dsc,string const &ShortDesc,
3473 const string &DestDir, const string &DestFilename,
3474 bool const IsIndexFile) :
3475 Item(Owner), d(NULL), IsIndexFile(IsIndexFile), ExpectedHashes(Hashes)
3476 {
3477 Retries = _config->FindI("Acquire::Retries",0);
3478
3479 if(!DestFilename.empty())
3480 DestFile = DestFilename;
3481 else if(!DestDir.empty())
3482 DestFile = DestDir + "/" + flNotDir(URI);
3483 else
3484 DestFile = flNotDir(URI);
3485
3486 // Create the item
3487 Desc.URI = URI;
3488 Desc.Description = Dsc;
3489 Desc.Owner = this;
3490
3491 // Set the short description to the archive component
3492 Desc.ShortDesc = ShortDesc;
3493
3494 // Get the transfer sizes
3495 FileSize = Size;
3496 struct stat Buf;
3497 if (stat(DestFile.c_str(),&Buf) == 0)
3498 {
3499 // Hmm, the partial file is too big, erase it
3500 if ((Size > 0) && (unsigned long long)Buf.st_size > Size)
3501 RemoveFile("pkgAcqFile", DestFile);
3502 else
3503 PartialSize = Buf.st_size;
3504 }
3505
3506 QueueURI(Desc);
3507 }
3508 /*}}}*/
3509 // AcqFile::Done - Item downloaded OK /*{{{*/
3510 void pkgAcqFile::Done(string const &Message,HashStringList const &CalcHashes,
3511 pkgAcquire::MethodConfig const * const Cnf)
3512 {
3513 Item::Done(Message,CalcHashes,Cnf);
3514
3515 std::string const FileName = LookupTag(Message,"Filename");
3516 Complete = true;
3517
3518 // The files timestamp matches
3519 if (StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
3520 return;
3521
3522 // We have to copy it into place
3523 if (RealFileExists(DestFile.c_str()) == false)
3524 {
3525 Local = true;
3526 if (_config->FindB("Acquire::Source-Symlinks",true) == false ||
3527 Cnf->Removable == true)
3528 {
3529 Desc.URI = "copy:" + FileName;
3530 QueueURI(Desc);
3531 return;
3532 }
3533
3534 // Erase the file if it is a symlink so we can overwrite it
3535 struct stat St;
3536 if (lstat(DestFile.c_str(),&St) == 0)
3537 {
3538 if (S_ISLNK(St.st_mode) != 0)
3539 RemoveFile("pkgAcqFile::Done", DestFile);
3540 }
3541
3542 // Symlink the file
3543 if (symlink(FileName.c_str(),DestFile.c_str()) != 0)
3544 {
3545 _error->PushToStack();
3546 _error->Errno("pkgAcqFile::Done", "Symlinking file %s failed", DestFile.c_str());
3547 std::stringstream msg;
3548 _error->DumpErrors(msg, GlobalError::DEBUG, false);
3549 _error->RevertToStack();
3550 ErrorText = msg.str();
3551 Status = StatError;
3552 Complete = false;
3553 }
3554 }
3555 }
3556 /*}}}*/
3557 // AcqFile::Failed - Failure handler /*{{{*/
3558 // ---------------------------------------------------------------------
3559 /* Here we try other sources */
3560 void pkgAcqFile::Failed(string const &Message, pkgAcquire::MethodConfig const * const Cnf)
3561 {
3562 Item::Failed(Message,Cnf);
3563
3564 // This is the retry counter
3565 if (Retries != 0 &&
3566 Cnf->LocalOnly == false &&
3567 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
3568 {
3569 --Retries;
3570 QueueURI(Desc);
3571 Status = StatIdle;
3572 return;
3573 }
3574
3575 }
3576 /*}}}*/
3577 string pkgAcqFile::Custom600Headers() const /*{{{*/
3578 {
3579 if (IsIndexFile)
3580 return "\nIndex-File: true";
3581 return "";
3582 }
3583 /*}}}*/
3584 pkgAcqFile::~pkgAcqFile() {}