]> git.saurik.com Git - apt.git/blob - apt-pkg/acquire-item.cc
support getting only-uncompressed files via by-hash
[apt.git] / apt-pkg / acquire-item.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: acquire-item.cc,v 1.46.2.9 2004/01/16 18:51:11 mdz Exp $
4 /* ######################################################################
5
6 Acquire Item - Item to acquire
7
8 Each item can download to exactly one file at a time. This means you
9 cannot create an item that fetches two uri's to two files at the same
10 time. The pkgAcqIndex class creates a second class upon instantiation
11 to fetch the other index files because of this.
12
13 ##################################################################### */
14 /*}}}*/
15 // Include Files /*{{{*/
16 #include <config.h>
17
18 #include <apt-pkg/acquire-item.h>
19 #include <apt-pkg/configuration.h>
20 #include <apt-pkg/aptconfiguration.h>
21 #include <apt-pkg/sourcelist.h>
22 #include <apt-pkg/error.h>
23 #include <apt-pkg/strutl.h>
24 #include <apt-pkg/fileutl.h>
25 #include <apt-pkg/tagfile.h>
26 #include <apt-pkg/metaindex.h>
27 #include <apt-pkg/acquire.h>
28 #include <apt-pkg/hashes.h>
29 #include <apt-pkg/indexfile.h>
30 #include <apt-pkg/pkgcache.h>
31 #include <apt-pkg/cacheiterators.h>
32 #include <apt-pkg/pkgrecords.h>
33 #include <apt-pkg/gpgv.h>
34
35 #include <algorithm>
36 #include <stddef.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <iostream>
40 #include <vector>
41 #include <sys/stat.h>
42 #include <unistd.h>
43 #include <errno.h>
44 #include <string>
45 #include <stdio.h>
46 #include <ctime>
47 #include <sstream>
48 #include <numeric>
49
50 #include <apti18n.h>
51 /*}}}*/
52
53 using namespace std;
54
55 static void printHashSumComparison(std::string const &URI, HashStringList const &Expected, HashStringList const &Actual) /*{{{*/
56 {
57 if (_config->FindB("Debug::Acquire::HashSumMismatch", false) == false)
58 return;
59 std::cerr << std::endl << URI << ":" << std::endl << " Expected Hash: " << std::endl;
60 for (HashStringList::const_iterator hs = Expected.begin(); hs != Expected.end(); ++hs)
61 std::cerr << "\t- " << hs->toStr() << std::endl;
62 std::cerr << " Actual Hash: " << std::endl;
63 for (HashStringList::const_iterator hs = Actual.begin(); hs != Actual.end(); ++hs)
64 std::cerr << "\t- " << hs->toStr() << std::endl;
65 }
66 /*}}}*/
67 static std::string GetPartialFileName(std::string const &file) /*{{{*/
68 {
69 std::string DestFile = _config->FindDir("Dir::State::lists") + "partial/";
70 DestFile += file;
71 return DestFile;
72 }
73 /*}}}*/
74 static std::string GetPartialFileNameFromURI(std::string const &uri) /*{{{*/
75 {
76 return GetPartialFileName(URItoFileName(uri));
77 }
78 /*}}}*/
79 static std::string GetFinalFileNameFromURI(std::string const &uri) /*{{{*/
80 {
81 return _config->FindDir("Dir::State::lists") + URItoFileName(uri);
82 }
83 /*}}}*/
84 static std::string GetKeepCompressedFileName(std::string file, IndexTarget const &Target)/*{{{*/
85 {
86 if (Target.KeepCompressed == false)
87 return file;
88
89 std::string const KeepCompressedAs = Target.Option(IndexTarget::KEEPCOMPRESSEDAS);
90 if (KeepCompressedAs.empty() == false)
91 {
92 std::string const ext = KeepCompressedAs.substr(0, KeepCompressedAs.find(' '));
93 if (ext != "uncompressed")
94 file.append(".").append(ext);
95 }
96 return file;
97 }
98 /*}}}*/
99 static std::string GetMergeDiffsPatchFileName(std::string const &Final, std::string const &Patch)/*{{{*/
100 {
101 // rred expects the patch as $FinalFile.ed.$patchname.gz
102 return Final + ".ed." + Patch + ".gz";
103 }
104 /*}}}*/
105 static std::string GetDiffsPatchFileName(std::string const &Final) /*{{{*/
106 {
107 // rred expects the patch as $FinalFile.ed
108 return Final + ".ed";
109 }
110 /*}}}*/
111 static std::string GetExistingFilename(std::string const &File) /*{{{*/
112 {
113 if (RealFileExists(File))
114 return File;
115 for (auto const &type : APT::Configuration::getCompressorExtensions())
116 {
117 std::string const Final = File + type;
118 if (RealFileExists(Final))
119 return Final;
120 }
121 return "";
122 }
123 /*}}}*/
124 static std::string GetDiffIndexFileName(std::string const &Name) /*{{{*/
125 {
126 return Name + ".diff/Index";
127 }
128 /*}}}*/
129 static std::string GetDiffIndexURI(IndexTarget const &Target) /*{{{*/
130 {
131 return Target.URI + ".diff/Index";
132 }
133 /*}}}*/
134
135 static void ReportMirrorFailureToCentral(pkgAcquire::Item const &I, std::string const &FailCode, std::string const &Details)/*{{{*/
136 {
137 // we only act if a mirror was used at all
138 if(I.UsedMirror.empty())
139 return;
140 #if 0
141 std::cerr << "\nReportMirrorFailure: "
142 << UsedMirror
143 << " Uri: " << DescURI()
144 << " FailCode: "
145 << FailCode << std::endl;
146 #endif
147 string const report = _config->Find("Methods::Mirror::ProblemReporting",
148 "/usr/lib/apt/apt-report-mirror-failure");
149 if(!FileExists(report))
150 return;
151
152 std::vector<char const*> const Args = {
153 report.c_str(),
154 I.UsedMirror.c_str(),
155 I.DescURI().c_str(),
156 FailCode.c_str(),
157 Details.c_str(),
158 NULL
159 };
160
161 pid_t pid = ExecFork();
162 if(pid < 0)
163 {
164 _error->Error("ReportMirrorFailure Fork failed");
165 return;
166 }
167 else if(pid == 0)
168 {
169 execvp(Args[0], (char**)Args.data());
170 std::cerr << "Could not exec " << Args[0] << std::endl;
171 _exit(100);
172 }
173 if(!ExecWait(pid, "report-mirror-failure"))
174 _error->Warning("Couldn't report problem to '%s'", report.c_str());
175 }
176 /*}}}*/
177
178 static APT_NONNULL(2) bool MessageInsecureRepository(bool const isError, char const * const msg, std::string const &repo)/*{{{*/
179 {
180 std::string m;
181 strprintf(m, msg, repo.c_str());
182 if (isError)
183 {
184 _error->Error("%s", m.c_str());
185 _error->Notice("%s", _("Updating from such a repository can't be done securely, and is therefore disabled by default."));
186 }
187 else
188 {
189 _error->Warning("%s", m.c_str());
190 _error->Notice("%s", _("Data from such a repository can't be authenticated and is therefore potentially dangerous to use."));
191 }
192 _error->Notice("%s", _("See apt-secure(8) manpage for repository creation and user configuration details."));
193 return false;
194 }
195 /*}}}*/
196 // AllowInsecureRepositories /*{{{*/
197 enum class InsecureType { UNSIGNED, WEAK, NORELEASE };
198 static bool TargetIsAllowedToBe(IndexTarget const &Target, InsecureType const type)
199 {
200 if (_config->FindB("Acquire::AllowInsecureRepositories"))
201 return true;
202
203 if (Target.OptionBool(IndexTarget::ALLOW_INSECURE))
204 return true;
205
206 switch (type)
207 {
208 case InsecureType::UNSIGNED: break;
209 case InsecureType::NORELEASE: break;
210 case InsecureType::WEAK:
211 if (_config->FindB("Acquire::AllowWeakRepositories"))
212 return true;
213 if (Target.OptionBool(IndexTarget::ALLOW_WEAK))
214 return true;
215 break;
216 }
217 return false;
218 }
219 static bool APT_NONNULL(3, 4, 5) AllowInsecureRepositories(InsecureType const msg, std::string const &repo,
220 metaIndex const * const MetaIndexParser, pkgAcqMetaClearSig * const TransactionManager, pkgAcquire::Item * const I)
221 {
222 // we skip weak downgrades as its unlikely that a repository gets really weaker –
223 // its more realistic that apt got pickier in a newer version
224 if (msg != InsecureType::WEAK)
225 {
226 std::string const FinalInRelease = TransactionManager->GetFinalFilename();
227 std::string const FinalReleasegpg = FinalInRelease.substr(0, FinalInRelease.length() - strlen("InRelease")) + "Release.gpg";
228 if (RealFileExists(FinalReleasegpg) || RealFileExists(FinalInRelease))
229 {
230 char const * msgstr = nullptr;
231 switch (msg)
232 {
233 case InsecureType::UNSIGNED: msgstr = _("The repository '%s' is no longer signed."); break;
234 case InsecureType::NORELEASE: msgstr = _("The repository '%s' does no longer have a Release file."); break;
235 case InsecureType::WEAK: /* unreachable */ break;
236 }
237 if (_config->FindB("Acquire::AllowDowngradeToInsecureRepositories") ||
238 TransactionManager->Target.OptionBool(IndexTarget::ALLOW_DOWNGRADE_TO_INSECURE))
239 {
240 // meh, the users wants to take risks (we still mark the packages
241 // from this repository as unauthenticated)
242 _error->Warning(msgstr, repo.c_str());
243 _error->Warning(_("This is normally not allowed, but the option "
244 "Acquire::AllowDowngradeToInsecureRepositories was "
245 "given to override it."));
246 } else {
247 MessageInsecureRepository(true, msgstr, repo);
248 TransactionManager->AbortTransaction();
249 I->Status = pkgAcquire::Item::StatError;
250 return false;
251 }
252 }
253 }
254
255 if(MetaIndexParser->GetTrusted() == metaIndex::TRI_YES)
256 return true;
257
258 char const * msgstr = nullptr;
259 switch (msg)
260 {
261 case InsecureType::UNSIGNED: msgstr = _("The repository '%s' is not signed."); break;
262 case InsecureType::NORELEASE: msgstr = _("The repository '%s' does not have a Release file."); break;
263 case InsecureType::WEAK: msgstr = _("The repository '%s' provides only weak security information."); break;
264 }
265
266 if (TargetIsAllowedToBe(TransactionManager->Target, msg) == true)
267 {
268 MessageInsecureRepository(false, msgstr, repo);
269 return true;
270 }
271
272 MessageInsecureRepository(true, msgstr, repo);
273 TransactionManager->AbortTransaction();
274 I->Status = pkgAcquire::Item::StatError;
275 return false;
276 }
277 /*}}}*/
278 static HashStringList GetExpectedHashesFromFor(metaIndex * const Parser, std::string const &MetaKey)/*{{{*/
279 {
280 if (Parser == NULL)
281 return HashStringList();
282 metaIndex::checkSum * const R = Parser->Lookup(MetaKey);
283 if (R == NULL)
284 return HashStringList();
285 return R->Hashes;
286 }
287 /*}}}*/
288
289 // all ::HashesRequired and ::GetExpectedHashes implementations /*{{{*/
290 /* ::GetExpectedHashes is abstract and has to be implemented by all subclasses.
291 It is best to implement it as broadly as possible, while ::HashesRequired defaults
292 to true and should be as restrictive as possible for false cases. Note that if
293 a hash is returned by ::GetExpectedHashes it must match. Only if it doesn't
294 ::HashesRequired is called to evaluate if its okay to have no hashes. */
295 APT_CONST bool pkgAcqTransactionItem::HashesRequired() const
296 {
297 /* signed repositories obviously have a parser and good hashes.
298 unsigned repositories, too, as even if we can't trust them for security,
299 we can at least trust them for integrity of the download itself.
300 Only repositories without a Release file can (obviously) not have
301 hashes – and they are very uncommon and strongly discouraged */
302 if (TransactionManager->MetaIndexParser->GetLoadedSuccessfully() != metaIndex::TRI_YES)
303 return false;
304 if (TargetIsAllowedToBe(Target, InsecureType::WEAK))
305 {
306 /* If we allow weak hashes, we check that we have some (weak) and then
307 declare hashes not needed. That will tip us in the right direction
308 as if hashes exist, they will be used, even if not required */
309 auto const hsl = GetExpectedHashes();
310 if (hsl.usable())
311 return true;
312 if (hsl.empty() == false)
313 return false;
314 }
315 return true;
316 }
317 HashStringList pkgAcqTransactionItem::GetExpectedHashes() const
318 {
319 return GetExpectedHashesFor(GetMetaKey());
320 }
321
322 APT_CONST bool pkgAcqMetaBase::HashesRequired() const
323 {
324 // Release and co have no hashes 'by design'.
325 return false;
326 }
327 HashStringList pkgAcqMetaBase::GetExpectedHashes() const
328 {
329 return HashStringList();
330 }
331
332 APT_CONST bool pkgAcqIndexDiffs::HashesRequired() const
333 {
334 /* We can't check hashes of rred result as we don't know what the
335 hash of the file will be. We just know the hash of the patch(es),
336 the hash of the file they will apply on and the hash of the resulting
337 file. */
338 if (State == StateFetchDiff)
339 return true;
340 return false;
341 }
342 HashStringList pkgAcqIndexDiffs::GetExpectedHashes() const
343 {
344 if (State == StateFetchDiff)
345 return available_patches[0].download_hashes;
346 return HashStringList();
347 }
348
349 APT_CONST bool pkgAcqIndexMergeDiffs::HashesRequired() const
350 {
351 /* @see #pkgAcqIndexDiffs::HashesRequired, with the difference that
352 we can check the rred result after all patches are applied as
353 we know the expected result rather than potentially apply more patches */
354 if (State == StateFetchDiff)
355 return true;
356 return State == StateApplyDiff;
357 }
358 HashStringList pkgAcqIndexMergeDiffs::GetExpectedHashes() const
359 {
360 if (State == StateFetchDiff)
361 return patch.download_hashes;
362 else if (State == StateApplyDiff)
363 return GetExpectedHashesFor(Target.MetaKey);
364 return HashStringList();
365 }
366
367 APT_CONST bool pkgAcqArchive::HashesRequired() const
368 {
369 return LocalSource == false;
370 }
371 HashStringList pkgAcqArchive::GetExpectedHashes() const
372 {
373 // figured out while parsing the records
374 return ExpectedHashes;
375 }
376
377 APT_CONST bool pkgAcqFile::HashesRequired() const
378 {
379 // supplied as parameter at creation time, so the caller decides
380 return ExpectedHashes.usable();
381 }
382 HashStringList pkgAcqFile::GetExpectedHashes() const
383 {
384 return ExpectedHashes;
385 }
386 /*}}}*/
387 // Acquire::Item::QueueURI and specialisations from child classes /*{{{*/
388 bool pkgAcquire::Item::QueueURI(pkgAcquire::ItemDesc &Item)
389 {
390 Owner->Enqueue(Item);
391 return true;
392 }
393 /* The idea here is that an item isn't queued if it exists on disk and the
394 transition manager was a hit as this means that the files it contains
395 the checksums for can't be updated either (or they are and we are asking
396 for a hashsum mismatch to happen which helps nobody) */
397 bool pkgAcqTransactionItem::QueueURI(pkgAcquire::ItemDesc &Item)
398 {
399 if (TransactionManager->State != TransactionStarted)
400 {
401 if (_config->FindB("Debug::Acquire::Transaction", false))
402 std::clog << "Skip " << Target.URI << " as transaction was already dealt with!" << std::endl;
403 return false;
404 }
405 std::string const FinalFile = GetFinalFilename();
406 if (TransactionManager->IMSHit == true && FileExists(FinalFile) == true)
407 {
408 PartialFile = DestFile = FinalFile;
409 Status = StatDone;
410 return false;
411 }
412 // If we got the InRelease file via a mirror, pick all indexes directly from this mirror, too
413 if (TransactionManager->BaseURI.empty() == false && UsedMirror.empty() &&
414 URI::SiteOnly(Item.URI) != URI::SiteOnly(TransactionManager->BaseURI))
415 {
416 // this ensures we rewrite only once and only the first step
417 auto const OldBaseURI = Target.Option(IndexTarget::BASE_URI);
418 if (OldBaseURI.empty() == false && APT::String::Startswith(Item.URI, OldBaseURI))
419 {
420 auto const ExtraPath = Item.URI.substr(OldBaseURI.length());
421 Item.URI = flCombine(TransactionManager->BaseURI, ExtraPath);
422 UsedMirror = TransactionManager->UsedMirror;
423 if (Item.Description.find(" ") != string::npos)
424 Item.Description.replace(0, Item.Description.find(" "), UsedMirror);
425 }
426 }
427 return pkgAcquire::Item::QueueURI(Item);
428 }
429 /* The transition manager InRelease itself (or its older sisters-in-law
430 Release & Release.gpg) is always queued as this allows us to rerun gpgv
431 on it to verify that we aren't stalled with old files */
432 bool pkgAcqMetaBase::QueueURI(pkgAcquire::ItemDesc &Item)
433 {
434 return pkgAcquire::Item::QueueURI(Item);
435 }
436 /* the Diff/Index needs to queue also the up-to-date complete index file
437 to ensure that the list cleaner isn't eating it */
438 bool pkgAcqDiffIndex::QueueURI(pkgAcquire::ItemDesc &Item)
439 {
440 if (pkgAcqTransactionItem::QueueURI(Item) == true)
441 return true;
442 QueueOnIMSHit();
443 return false;
444 }
445 /*}}}*/
446 // Acquire::Item::GetFinalFilename and specialisations for child classes /*{{{*/
447 std::string pkgAcquire::Item::GetFinalFilename() const
448 {
449 // Beware: Desc.URI is modified by redirections
450 return GetFinalFileNameFromURI(Desc.URI);
451 }
452 std::string pkgAcqDiffIndex::GetFinalFilename() const
453 {
454 return GetFinalFileNameFromURI(GetDiffIndexURI(Target));
455 }
456 std::string pkgAcqIndex::GetFinalFilename() const
457 {
458 std::string const FinalFile = GetFinalFileNameFromURI(Target.URI);
459 return GetKeepCompressedFileName(FinalFile, Target);
460 }
461 std::string pkgAcqMetaSig::GetFinalFilename() const
462 {
463 return GetFinalFileNameFromURI(Target.URI);
464 }
465 std::string pkgAcqBaseIndex::GetFinalFilename() const
466 {
467 return GetFinalFileNameFromURI(Target.URI);
468 }
469 std::string pkgAcqMetaBase::GetFinalFilename() const
470 {
471 return GetFinalFileNameFromURI(Target.URI);
472 }
473 std::string pkgAcqArchive::GetFinalFilename() const
474 {
475 return _config->FindDir("Dir::Cache::Archives") + flNotDir(StoreFilename);
476 }
477 /*}}}*/
478 // pkgAcqTransactionItem::GetMetaKey and specialisations for child classes /*{{{*/
479 std::string pkgAcqTransactionItem::GetMetaKey() const
480 {
481 return Target.MetaKey;
482 }
483 std::string pkgAcqIndex::GetMetaKey() const
484 {
485 if (Stage == STAGE_DECOMPRESS_AND_VERIFY || CurrentCompressionExtension == "uncompressed")
486 return Target.MetaKey;
487 return Target.MetaKey + "." + CurrentCompressionExtension;
488 }
489 std::string pkgAcqDiffIndex::GetMetaKey() const
490 {
491 return GetDiffIndexFileName(Target.MetaKey);
492 }
493 /*}}}*/
494 //pkgAcqTransactionItem::TransactionState and specialisations for child classes /*{{{*/
495 bool pkgAcqTransactionItem::TransactionState(TransactionStates const state)
496 {
497 bool const Debug = _config->FindB("Debug::Acquire::Transaction", false);
498 switch(state)
499 {
500 case TransactionStarted: _error->Fatal("Item %s changed to invalid transaction start state!", Target.URI.c_str()); break;
501 case TransactionAbort:
502 if(Debug == true)
503 std::clog << " Cancel: " << DestFile << std::endl;
504 if (Status == pkgAcquire::Item::StatIdle)
505 {
506 Status = pkgAcquire::Item::StatDone;
507 Dequeue();
508 }
509 break;
510 case TransactionCommit:
511 if(PartialFile.empty() == false)
512 {
513 bool sameFile = (PartialFile == DestFile);
514 // we use symlinks on IMS-Hit to avoid copies
515 if (RealFileExists(DestFile))
516 {
517 struct stat Buf;
518 if (lstat(PartialFile.c_str(), &Buf) != -1)
519 {
520 if (S_ISLNK(Buf.st_mode) && Buf.st_size > 0)
521 {
522 char partial[Buf.st_size + 1];
523 ssize_t const sp = readlink(PartialFile.c_str(), partial, Buf.st_size);
524 if (sp == -1)
525 _error->Errno("pkgAcqTransactionItem::TransactionState-sp", _("Failed to readlink %s"), PartialFile.c_str());
526 else
527 {
528 partial[sp] = '\0';
529 sameFile = (DestFile == partial);
530 }
531 }
532 }
533 else
534 _error->Errno("pkgAcqTransactionItem::TransactionState-stat", _("Failed to stat %s"), PartialFile.c_str());
535 }
536 if (sameFile == false)
537 {
538 // ensure that even without lists-cleanup all compressions are nuked
539 std::string FinalFile = GetFinalFileNameFromURI(Target.URI);
540 if (FileExists(FinalFile))
541 {
542 if(Debug == true)
543 std::clog << "rm " << FinalFile << " # " << DescURI() << std::endl;
544 if (RemoveFile("TransactionStates-Cleanup", FinalFile) == false)
545 return false;
546 }
547 for (auto const &ext: APT::Configuration::getCompressorExtensions())
548 {
549 auto const Final = FinalFile + ext;
550 if (FileExists(Final))
551 {
552 if(Debug == true)
553 std::clog << "rm " << Final << " # " << DescURI() << std::endl;
554 if (RemoveFile("TransactionStates-Cleanup", Final) == false)
555 return false;
556 }
557 }
558 if(Debug == true)
559 std::clog << "mv " << PartialFile << " -> "<< DestFile << " # " << DescURI() << std::endl;
560 if (Rename(PartialFile, DestFile) == false)
561 return false;
562 }
563 else if(Debug == true)
564 std::clog << "keep " << PartialFile << " # " << DescURI() << std::endl;
565
566 } else {
567 if(Debug == true)
568 std::clog << "rm " << DestFile << " # " << DescURI() << std::endl;
569 if (RemoveFile("TransItem::TransactionCommit", DestFile) == false)
570 return false;
571 }
572 break;
573 }
574 return true;
575 }
576 bool pkgAcqMetaBase::TransactionState(TransactionStates const state)
577 {
578 // Do not remove InRelease on IMSHit of Release.gpg [yes, this is very edgecasey]
579 if (TransactionManager->IMSHit == false)
580 return pkgAcqTransactionItem::TransactionState(state);
581 return true;
582 }
583 bool pkgAcqIndex::TransactionState(TransactionStates const state)
584 {
585 if (pkgAcqTransactionItem::TransactionState(state) == false)
586 return false;
587
588 switch (state)
589 {
590 case TransactionStarted: _error->Fatal("AcqIndex %s changed to invalid transaction start state!", Target.URI.c_str()); break;
591 case TransactionAbort:
592 if (Stage == STAGE_DECOMPRESS_AND_VERIFY)
593 {
594 // keep the compressed file, but drop the decompressed
595 EraseFileName.clear();
596 if (PartialFile.empty() == false && flExtension(PartialFile) != CurrentCompressionExtension)
597 RemoveFile("TransactionAbort", PartialFile);
598 }
599 break;
600 case TransactionCommit:
601 if (EraseFileName.empty() == false)
602 RemoveFile("AcqIndex::TransactionCommit", EraseFileName);
603 break;
604 }
605 return true;
606 }
607 bool pkgAcqDiffIndex::TransactionState(TransactionStates const state)
608 {
609 if (pkgAcqTransactionItem::TransactionState(state) == false)
610 return false;
611
612 switch (state)
613 {
614 case TransactionStarted: _error->Fatal("Item %s changed to invalid transaction start state!", Target.URI.c_str()); break;
615 case TransactionCommit:
616 break;
617 case TransactionAbort:
618 std::string const Partial = GetPartialFileNameFromURI(Target.URI);
619 RemoveFile("TransactionAbort", Partial);
620 break;
621 }
622
623 return true;
624 }
625 /*}}}*/
626
627 class APT_HIDDEN NoActionItem : public pkgAcquire::Item /*{{{*/
628 /* The sole purpose of this class is having an item which does nothing to
629 reach its done state to prevent cleanup deleting the mentioned file.
630 Handy in cases in which we know we have the file already, like IMS-Hits. */
631 {
632 IndexTarget const Target;
633 public:
634 virtual std::string DescURI() const APT_OVERRIDE {return Target.URI;};
635 virtual HashStringList GetExpectedHashes() const APT_OVERRIDE {return HashStringList();};
636
637 NoActionItem(pkgAcquire * const Owner, IndexTarget const &Target) :
638 pkgAcquire::Item(Owner), Target(Target)
639 {
640 Status = StatDone;
641 DestFile = GetFinalFileNameFromURI(Target.URI);
642 }
643 NoActionItem(pkgAcquire * const Owner, IndexTarget const &Target, std::string const &FinalFile) :
644 pkgAcquire::Item(Owner), Target(Target)
645 {
646 Status = StatDone;
647 DestFile = FinalFile;
648 }
649 };
650 /*}}}*/
651 class APT_HIDDEN CleanupItem : public pkgAcqTransactionItem /*{{{*/
652 /* This class ensures that a file which was configured but isn't downloaded
653 for various reasons isn't kept in an old version in the lists directory.
654 In a way its the reverse of NoActionItem as it helps with removing files
655 even if the lists-cleanup is deactivated. */
656 {
657 public:
658 virtual std::string DescURI() const APT_OVERRIDE {return Target.URI;};
659 virtual HashStringList GetExpectedHashes() const APT_OVERRIDE {return HashStringList();};
660
661 CleanupItem(pkgAcquire * const Owner, pkgAcqMetaClearSig * const TransactionManager, IndexTarget const &Target) :
662 pkgAcqTransactionItem(Owner, TransactionManager, Target)
663 {
664 Status = StatDone;
665 DestFile = GetFinalFileNameFromURI(Target.URI);
666 }
667 bool TransactionState(TransactionStates const state) APT_OVERRIDE
668 {
669 switch (state)
670 {
671 case TransactionStarted:
672 break;
673 case TransactionAbort:
674 break;
675 case TransactionCommit:
676 if (_config->FindB("Debug::Acquire::Transaction", false) == true)
677 std::clog << "rm " << DestFile << " # " << DescURI() << std::endl;
678 if (RemoveFile("TransItem::TransactionCommit", DestFile) == false)
679 return false;
680 break;
681 }
682 return true;
683 }
684 };
685 /*}}}*/
686
687 // Acquire::Item::Item - Constructor /*{{{*/
688 class pkgAcquire::Item::Private
689 {
690 public:
691 std::vector<std::string> PastRedirections;
692 };
693 APT_IGNORE_DEPRECATED_PUSH
694 pkgAcquire::Item::Item(pkgAcquire * const owner) :
695 FileSize(0), PartialSize(0), Mode(0), ID(0), Complete(false), Local(false),
696 QueueCounter(0), ExpectedAdditionalItems(0), Owner(owner), d(new Private())
697 {
698 Owner->Add(this);
699 Status = StatIdle;
700 }
701 APT_IGNORE_DEPRECATED_POP
702 /*}}}*/
703 // Acquire::Item::~Item - Destructor /*{{{*/
704 pkgAcquire::Item::~Item()
705 {
706 Owner->Remove(this);
707 delete d;
708 }
709 /*}}}*/
710 std::string pkgAcquire::Item::Custom600Headers() const /*{{{*/
711 {
712 return std::string();
713 }
714 /*}}}*/
715 std::string pkgAcquire::Item::ShortDesc() const /*{{{*/
716 {
717 return DescURI();
718 }
719 /*}}}*/
720 APT_CONST void pkgAcquire::Item::Finished() /*{{{*/
721 {
722 }
723 /*}}}*/
724 APT_PURE pkgAcquire * pkgAcquire::Item::GetOwner() const /*{{{*/
725 {
726 return Owner;
727 }
728 /*}}}*/
729 APT_CONST pkgAcquire::ItemDesc &pkgAcquire::Item::GetItemDesc() /*{{{*/
730 {
731 return Desc;
732 }
733 /*}}}*/
734 APT_CONST bool pkgAcquire::Item::IsTrusted() const /*{{{*/
735 {
736 return false;
737 }
738 /*}}}*/
739 // Acquire::Item::Failed - Item failed to download /*{{{*/
740 // ---------------------------------------------------------------------
741 /* We return to an idle state if there are still other queues that could
742 fetch this object */
743 void pkgAcquire::Item::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
744 {
745 if (QueueCounter <= 1)
746 {
747 /* This indicates that the file is not available right now but might
748 be sometime later. If we do a retry cycle then this should be
749 retried [CDROMs] */
750 if (Cnf != NULL && Cnf->LocalOnly == true &&
751 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
752 {
753 Status = StatIdle;
754 Dequeue();
755 return;
756 }
757
758 switch (Status)
759 {
760 case StatIdle:
761 case StatFetching:
762 case StatDone:
763 Status = StatError;
764 break;
765 case StatAuthError:
766 case StatError:
767 case StatTransientNetworkError:
768 break;
769 }
770 Complete = false;
771 Dequeue();
772 }
773
774 string const FailReason = LookupTag(Message, "FailReason");
775 enum { MAXIMUM_SIZE_EXCEEDED, HASHSUM_MISMATCH, WEAK_HASHSUMS, REDIRECTION_LOOP, OTHER } failreason = OTHER;
776 if ( FailReason == "MaximumSizeExceeded")
777 failreason = MAXIMUM_SIZE_EXCEEDED;
778 else if ( FailReason == "WeakHashSums")
779 failreason = WEAK_HASHSUMS;
780 else if (FailReason == "RedirectionLoop")
781 failreason = REDIRECTION_LOOP;
782 else if (Status == StatAuthError)
783 failreason = HASHSUM_MISMATCH;
784
785 if(ErrorText.empty())
786 {
787 std::ostringstream out;
788 switch (failreason)
789 {
790 case HASHSUM_MISMATCH:
791 out << _("Hash Sum mismatch") << std::endl;
792 break;
793 case WEAK_HASHSUMS:
794 out << _("Insufficient information available to perform this download securely") << std::endl;
795 break;
796 case REDIRECTION_LOOP:
797 out << "Redirection loop encountered" << std::endl;
798 break;
799 case MAXIMUM_SIZE_EXCEEDED:
800 out << LookupTag(Message, "Message") << std::endl;
801 break;
802 case OTHER:
803 out << LookupTag(Message, "Message");
804 break;
805 }
806
807 if (Status == StatAuthError)
808 {
809 auto const ExpectedHashes = GetExpectedHashes();
810 if (ExpectedHashes.empty() == false)
811 {
812 out << "Hashes of expected file:" << std::endl;
813 for (auto const &hs: ExpectedHashes)
814 {
815 out << " - " << hs.toStr();
816 if (hs.usable() == false)
817 out << " [weak]";
818 out << std::endl;
819 }
820 }
821 if (failreason == HASHSUM_MISMATCH)
822 {
823 out << "Hashes of received file:" << std::endl;
824 for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
825 {
826 std::string const tagname = std::string(*type) + "-Hash";
827 std::string const hashsum = LookupTag(Message, tagname.c_str());
828 if (hashsum.empty() == false)
829 {
830 auto const hs = HashString(*type, hashsum);
831 out << " - " << hs.toStr();
832 if (hs.usable() == false)
833 out << " [weak]";
834 out << std::endl;
835 }
836 }
837 out << "Last modification reported: " << LookupTag(Message, "Last-Modified", "<none>") << std::endl;
838 }
839 }
840 ErrorText = out.str();
841 }
842
843 switch (failreason)
844 {
845 case MAXIMUM_SIZE_EXCEEDED: RenameOnError(MaximumSizeExceeded); break;
846 case HASHSUM_MISMATCH: RenameOnError(HashSumMismatch); break;
847 case WEAK_HASHSUMS: break;
848 case REDIRECTION_LOOP: break;
849 case OTHER: break;
850 }
851
852 if (FailReason.empty() == false)
853 ReportMirrorFailureToCentral(*this, FailReason, ErrorText);
854 else
855 ReportMirrorFailureToCentral(*this, ErrorText, ErrorText);
856
857 if (QueueCounter > 1)
858 Status = StatIdle;
859 }
860 /*}}}*/
861 // Acquire::Item::Start - Item has begun to download /*{{{*/
862 // ---------------------------------------------------------------------
863 /* Stash status and the file size. Note that setting Complete means
864 sub-phases of the acquire process such as decompresion are operating */
865 void pkgAcquire::Item::Start(string const &/*Message*/, unsigned long long const Size)
866 {
867 Status = StatFetching;
868 ErrorText.clear();
869 if (FileSize == 0 && Complete == false)
870 FileSize = Size;
871 }
872 /*}}}*/
873 // Acquire::Item::VerifyDone - check if Item was downloaded OK /*{{{*/
874 /* Note that hash-verification is 'hardcoded' in acquire-worker and has
875 * already passed if this method is called. */
876 bool pkgAcquire::Item::VerifyDone(std::string const &Message,
877 pkgAcquire::MethodConfig const * const /*Cnf*/)
878 {
879 std::string const FileName = LookupTag(Message,"Filename");
880 if (FileName.empty() == true)
881 {
882 Status = StatError;
883 ErrorText = "Method gave a blank filename";
884 return false;
885 }
886
887 return true;
888 }
889 /*}}}*/
890 // Acquire::Item::Done - Item downloaded OK /*{{{*/
891 void pkgAcquire::Item::Done(string const &/*Message*/, HashStringList const &Hashes,
892 pkgAcquire::MethodConfig const * const /*Cnf*/)
893 {
894 // We just downloaded something..
895 if (FileSize == 0)
896 {
897 unsigned long long const downloadedSize = Hashes.FileSize();
898 if (downloadedSize != 0)
899 {
900 FileSize = downloadedSize;
901 }
902 }
903 Status = StatDone;
904 ErrorText = string();
905 Owner->Dequeue(this);
906 }
907 /*}}}*/
908 // Acquire::Item::Rename - Rename a file /*{{{*/
909 // ---------------------------------------------------------------------
910 /* This helper function is used by a lot of item methods as their final
911 step */
912 bool pkgAcquire::Item::Rename(string const &From,string const &To)
913 {
914 if (From == To || rename(From.c_str(),To.c_str()) == 0)
915 return true;
916
917 std::string S;
918 strprintf(S, _("rename failed, %s (%s -> %s)."), strerror(errno),
919 From.c_str(),To.c_str());
920 Status = StatError;
921 if (ErrorText.empty())
922 ErrorText = S;
923 else
924 ErrorText = ErrorText + ": " + S;
925 return false;
926 }
927 /*}}}*/
928 void pkgAcquire::Item::Dequeue() /*{{{*/
929 {
930 Owner->Dequeue(this);
931 }
932 /*}}}*/
933 bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const error)/*{{{*/
934 {
935 if (RealFileExists(DestFile))
936 Rename(DestFile, DestFile + ".FAILED");
937
938 std::string errtext;
939 switch (error)
940 {
941 case HashSumMismatch:
942 errtext = _("Hash Sum mismatch");
943 break;
944 case SizeMismatch:
945 errtext = _("Size mismatch");
946 Status = StatAuthError;
947 break;
948 case InvalidFormat:
949 errtext = _("Invalid file format");
950 Status = StatError;
951 // do not report as usually its not the mirrors fault, but Portal/Proxy
952 break;
953 case SignatureError:
954 errtext = _("Signature error");
955 Status = StatError;
956 break;
957 case NotClearsigned:
958 strprintf(errtext, _("Clearsigned file isn't valid, got '%s' (does the network require authentication?)"), "NOSPLIT");
959 Status = StatAuthError;
960 break;
961 case MaximumSizeExceeded:
962 // the method is expected to report a good error for this
963 break;
964 case PDiffError:
965 // no handling here, done by callers
966 break;
967 }
968 if (ErrorText.empty())
969 ErrorText = errtext;
970 return false;
971 }
972 /*}}}*/
973 void pkgAcquire::Item::SetActiveSubprocess(const std::string &subprocess)/*{{{*/
974 {
975 ActiveSubprocess = subprocess;
976 APT_IGNORE_DEPRECATED(Mode = ActiveSubprocess.c_str();)
977 }
978 /*}}}*/
979 // Acquire::Item::ReportMirrorFailure /*{{{*/
980 void pkgAcquire::Item::ReportMirrorFailure(std::string const &FailCode)
981 {
982 ReportMirrorFailureToCentral(*this, FailCode, FailCode);
983 }
984 /*}}}*/
985 std::string pkgAcquire::Item::HashSum() const /*{{{*/
986 {
987 HashStringList const hashes = GetExpectedHashes();
988 HashString const * const hs = hashes.find(NULL);
989 return hs != NULL ? hs->toStr() : "";
990 }
991 /*}}}*/
992 bool pkgAcquire::Item::IsRedirectionLoop(std::string const &NewURI) /*{{{*/
993 {
994 // store can fail due to permission errors and the item will "loop" then
995 if (APT::String::Startswith(NewURI, "store:"))
996 return false;
997 if (d->PastRedirections.empty())
998 {
999 d->PastRedirections.push_back(NewURI);
1000 return false;
1001 }
1002 auto const LastURI = std::prev(d->PastRedirections.end());
1003 // redirections to the same file are a way of restarting/resheduling,
1004 // individual methods will have to make sure that they aren't looping this way
1005 if (*LastURI == NewURI)
1006 return false;
1007 if (std::find(d->PastRedirections.begin(), LastURI, NewURI) != LastURI)
1008 return true;
1009 d->PastRedirections.push_back(NewURI);
1010 return false;
1011 }
1012 /*}}}*/
1013
1014 pkgAcqTransactionItem::pkgAcqTransactionItem(pkgAcquire * const Owner, /*{{{*/
1015 pkgAcqMetaClearSig * const transactionManager, IndexTarget const &target) :
1016 pkgAcquire::Item(Owner), d(NULL), Target(target), TransactionManager(transactionManager)
1017 {
1018 if (TransactionManager != this)
1019 TransactionManager->Add(this);
1020 }
1021 /*}}}*/
1022 pkgAcqTransactionItem::~pkgAcqTransactionItem() /*{{{*/
1023 {
1024 }
1025 /*}}}*/
1026 HashStringList pkgAcqTransactionItem::GetExpectedHashesFor(std::string const &MetaKey) const /*{{{*/
1027 {
1028 return GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, MetaKey);
1029 }
1030 /*}}}*/
1031
1032 static void LoadLastMetaIndexParser(pkgAcqMetaClearSig * const TransactionManager, std::string const &FinalRelease, std::string const &FinalInRelease)/*{{{*/
1033 {
1034 if (TransactionManager->IMSHit == true)
1035 return;
1036 if (RealFileExists(FinalInRelease) || RealFileExists(FinalRelease))
1037 {
1038 TransactionManager->LastMetaIndexParser = TransactionManager->MetaIndexParser->UnloadedClone();
1039 if (TransactionManager->LastMetaIndexParser != NULL)
1040 {
1041 _error->PushToStack();
1042 if (RealFileExists(FinalInRelease))
1043 TransactionManager->LastMetaIndexParser->Load(FinalInRelease, NULL);
1044 else
1045 TransactionManager->LastMetaIndexParser->Load(FinalRelease, NULL);
1046 // its unlikely to happen, but if what we have is bad ignore it
1047 if (_error->PendingError())
1048 {
1049 delete TransactionManager->LastMetaIndexParser;
1050 TransactionManager->LastMetaIndexParser = NULL;
1051 }
1052 _error->RevertToStack();
1053 }
1054 }
1055 }
1056 /*}}}*/
1057
1058 // AcqMetaBase - Constructor /*{{{*/
1059 pkgAcqMetaBase::pkgAcqMetaBase(pkgAcquire * const Owner,
1060 pkgAcqMetaClearSig * const TransactionManager,
1061 IndexTarget const &DataTarget)
1062 : pkgAcqTransactionItem(Owner, TransactionManager, DataTarget), d(NULL),
1063 AuthPass(false), IMSHit(false), State(TransactionStarted)
1064 {
1065 }
1066 /*}}}*/
1067 // AcqMetaBase::Add - Add a item to the current Transaction /*{{{*/
1068 void pkgAcqMetaBase::Add(pkgAcqTransactionItem * const I)
1069 {
1070 Transaction.push_back(I);
1071 }
1072 /*}}}*/
1073 // AcqMetaBase::AbortTransaction - Abort the current Transaction /*{{{*/
1074 void pkgAcqMetaBase::AbortTransaction()
1075 {
1076 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1077 std::clog << "AbortTransaction: " << TransactionManager << std::endl;
1078
1079 switch (TransactionManager->State)
1080 {
1081 case TransactionStarted: break;
1082 case TransactionAbort: _error->Fatal("Transaction %s was already aborted and is aborted again", TransactionManager->Target.URI.c_str()); return;
1083 case TransactionCommit: _error->Fatal("Transaction %s was already aborted and is now committed", TransactionManager->Target.URI.c_str()); return;
1084 }
1085 TransactionManager->State = TransactionAbort;
1086
1087 // ensure the toplevel is in error state too
1088 for (std::vector<pkgAcqTransactionItem*>::iterator I = Transaction.begin();
1089 I != Transaction.end(); ++I)
1090 {
1091 if ((*I)->Status != pkgAcquire::Item::StatFetching)
1092 Owner->Dequeue(*I);
1093 (*I)->TransactionState(TransactionAbort);
1094 }
1095 Transaction.clear();
1096 }
1097 /*}}}*/
1098 // AcqMetaBase::TransactionHasError - Check for errors in Transaction /*{{{*/
1099 APT_PURE bool pkgAcqMetaBase::TransactionHasError() const
1100 {
1101 for (std::vector<pkgAcqTransactionItem*>::const_iterator I = Transaction.begin();
1102 I != Transaction.end(); ++I)
1103 {
1104 switch((*I)->Status) {
1105 case StatDone: break;
1106 case StatIdle: break;
1107 case StatAuthError: return true;
1108 case StatError: return true;
1109 case StatTransientNetworkError: return true;
1110 case StatFetching: break;
1111 }
1112 }
1113 return false;
1114 }
1115 /*}}}*/
1116 // AcqMetaBase::CommitTransaction - Commit a transaction /*{{{*/
1117 void pkgAcqMetaBase::CommitTransaction()
1118 {
1119 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1120 std::clog << "CommitTransaction: " << this << std::endl;
1121
1122 switch (TransactionManager->State)
1123 {
1124 case TransactionStarted: break;
1125 case TransactionAbort: _error->Fatal("Transaction %s was already committed and is now aborted", TransactionManager->Target.URI.c_str()); return;
1126 case TransactionCommit: _error->Fatal("Transaction %s was already committed and is again committed", TransactionManager->Target.URI.c_str()); return;
1127 }
1128 TransactionManager->State = TransactionCommit;
1129
1130 // move new files into place *and* remove files that are not
1131 // part of the transaction but are still on disk
1132 for (std::vector<pkgAcqTransactionItem*>::iterator I = Transaction.begin();
1133 I != Transaction.end(); ++I)
1134 {
1135 (*I)->TransactionState(TransactionCommit);
1136 }
1137 Transaction.clear();
1138 }
1139 /*}}}*/
1140 // AcqMetaBase::TransactionStageCopy - Stage a file for copying /*{{{*/
1141 void pkgAcqMetaBase::TransactionStageCopy(pkgAcqTransactionItem * const I,
1142 const std::string &From,
1143 const std::string &To)
1144 {
1145 I->PartialFile = From;
1146 I->DestFile = To;
1147 }
1148 /*}}}*/
1149 // AcqMetaBase::TransactionStageRemoval - Stage a file for removal /*{{{*/
1150 void pkgAcqMetaBase::TransactionStageRemoval(pkgAcqTransactionItem * const I,
1151 const std::string &FinalFile)
1152 {
1153 I->PartialFile = "";
1154 I->DestFile = FinalFile;
1155 }
1156 /*}}}*/
1157 // AcqMetaBase::GenerateAuthWarning - Check gpg authentication error /*{{{*/
1158 /* This method is called from ::Failed handlers. If it returns true,
1159 no fallback to other files or modi is performed */
1160 bool pkgAcqMetaBase::CheckStopAuthentication(pkgAcquire::Item * const I, const std::string &Message)
1161 {
1162 string const Final = I->GetFinalFilename();
1163 std::string const GPGError = LookupTag(Message, "Message");
1164 if (FileExists(Final))
1165 {
1166 I->Status = StatTransientNetworkError;
1167 _error->Warning(_("An error occurred during the signature verification. "
1168 "The repository is not updated and the previous index files will be used. "
1169 "GPG error: %s: %s"),
1170 Desc.Description.c_str(),
1171 GPGError.c_str());
1172 RunScripts("APT::Update::Auth-Failure");
1173 return true;
1174 } else if (LookupTag(Message,"Message").find("NODATA") != string::npos) {
1175 /* Invalid signature file, reject (LP: #346386) (Closes: #627642) */
1176 _error->Error(_("GPG error: %s: %s"),
1177 Desc.Description.c_str(),
1178 GPGError.c_str());
1179 I->Status = StatAuthError;
1180 return true;
1181 } else {
1182 _error->Warning(_("GPG error: %s: %s"),
1183 Desc.Description.c_str(),
1184 GPGError.c_str());
1185 }
1186 // gpgv method failed
1187 ReportMirrorFailureToCentral(*this, "GPGFailure", GPGError);
1188 return false;
1189 }
1190 /*}}}*/
1191 // AcqMetaBase::Custom600Headers - Get header for AcqMetaBase /*{{{*/
1192 // ---------------------------------------------------------------------
1193 string pkgAcqMetaBase::Custom600Headers() const
1194 {
1195 std::string Header = "\nIndex-File: true";
1196 std::string MaximumSize;
1197 strprintf(MaximumSize, "\nMaximum-Size: %i",
1198 _config->FindI("Acquire::MaxReleaseFileSize", 10*1000*1000));
1199 Header += MaximumSize;
1200
1201 string const FinalFile = GetFinalFilename();
1202 struct stat Buf;
1203 if (stat(FinalFile.c_str(),&Buf) == 0)
1204 Header += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime, false);
1205
1206 return Header;
1207 }
1208 /*}}}*/
1209 // AcqMetaBase::QueueForSignatureVerify /*{{{*/
1210 void pkgAcqMetaBase::QueueForSignatureVerify(pkgAcqTransactionItem * const I, std::string const &File, std::string const &Signature)
1211 {
1212 AuthPass = true;
1213 I->Desc.URI = "gpgv:" + Signature;
1214 I->DestFile = File;
1215 QueueURI(I->Desc);
1216 I->SetActiveSubprocess("gpgv");
1217 }
1218 /*}}}*/
1219 // AcqMetaBase::CheckDownloadDone /*{{{*/
1220 bool pkgAcqMetaBase::CheckDownloadDone(pkgAcqTransactionItem * const I, const std::string &Message, HashStringList const &Hashes) const
1221 {
1222 // We have just finished downloading a Release file (it is not
1223 // verified yet)
1224
1225 // Save the final base URI we got this Release file from
1226 if (I->UsedMirror.empty() == false && _config->FindB("Acquire::SameMirrorForAllIndexes", true))
1227 {
1228 if (APT::String::Endswith(I->Desc.URI, "InRelease"))
1229 {
1230 TransactionManager->BaseURI = I->Desc.URI.substr(0, I->Desc.URI.length() - strlen("InRelease"));
1231 TransactionManager->UsedMirror = I->UsedMirror;
1232 }
1233 else if (APT::String::Endswith(I->Desc.URI, "Release"))
1234 {
1235 TransactionManager->BaseURI = I->Desc.URI.substr(0, I->Desc.URI.length() - strlen("Release"));
1236 TransactionManager->UsedMirror = I->UsedMirror;
1237 }
1238 }
1239
1240 std::string const FileName = LookupTag(Message,"Filename");
1241 if (FileName != I->DestFile && RealFileExists(I->DestFile) == false)
1242 {
1243 I->Local = true;
1244 I->Desc.URI = "copy:" + FileName;
1245 I->QueueURI(I->Desc);
1246 return false;
1247 }
1248
1249 // make sure to verify against the right file on I-M-S hit
1250 bool IMSHit = StringToBool(LookupTag(Message,"IMS-Hit"), false);
1251 if (IMSHit == false && Hashes.usable())
1252 {
1253 // detect IMS-Hits servers haven't detected by Hash comparison
1254 std::string const FinalFile = I->GetFinalFilename();
1255 if (RealFileExists(FinalFile) && Hashes.VerifyFile(FinalFile) == true)
1256 {
1257 IMSHit = true;
1258 RemoveFile("CheckDownloadDone", I->DestFile);
1259 }
1260 }
1261
1262 if(IMSHit == true)
1263 {
1264 // for simplicity, the transaction manager is always InRelease
1265 // even if it doesn't exist.
1266 TransactionManager->IMSHit = true;
1267 I->PartialFile = I->DestFile = I->GetFinalFilename();
1268 }
1269
1270 // set Item to complete as the remaining work is all local (verify etc)
1271 I->Complete = true;
1272
1273 return true;
1274 }
1275 /*}}}*/
1276 bool pkgAcqMetaBase::CheckAuthDone(string const &Message) /*{{{*/
1277 {
1278 // At this point, the gpgv method has succeeded, so there is a
1279 // valid signature from a key in the trusted keyring. We
1280 // perform additional verification of its contents, and use them
1281 // to verify the indexes we are about to download
1282 if (_config->FindB("Debug::pkgAcquire::Auth", false))
1283 std::cerr << "Signature verification succeeded: " << DestFile << std::endl;
1284
1285 if (TransactionManager->IMSHit == false)
1286 {
1287 // open the last (In)Release if we have it
1288 std::string const FinalFile = GetFinalFilename();
1289 std::string FinalRelease;
1290 std::string FinalInRelease;
1291 if (APT::String::Endswith(FinalFile, "InRelease"))
1292 {
1293 FinalInRelease = FinalFile;
1294 FinalRelease = FinalFile.substr(0, FinalFile.length() - strlen("InRelease")) + "Release";
1295 }
1296 else
1297 {
1298 FinalInRelease = FinalFile.substr(0, FinalFile.length() - strlen("Release")) + "InRelease";
1299 FinalRelease = FinalFile;
1300 }
1301 LoadLastMetaIndexParser(TransactionManager, FinalRelease, FinalInRelease);
1302 }
1303
1304 bool const GoodAuth = TransactionManager->MetaIndexParser->Load(DestFile, &ErrorText);
1305 if (GoodAuth == false && AllowInsecureRepositories(InsecureType::WEAK, Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == false)
1306 {
1307 Status = StatAuthError;
1308 return false;
1309 }
1310
1311 if (!VerifyVendor(Message))
1312 {
1313 Status = StatAuthError;
1314 return false;
1315 }
1316
1317 // Download further indexes with verification
1318 TransactionManager->QueueIndexes(GoodAuth);
1319
1320 return GoodAuth;
1321 }
1322 /*}}}*/
1323 void pkgAcqMetaClearSig::QueueIndexes(bool const verify) /*{{{*/
1324 {
1325 // at this point the real Items are loaded in the fetcher
1326 ExpectedAdditionalItems = 0;
1327
1328 std::set<std::string> targetsSeen;
1329 bool const hasReleaseFile = TransactionManager->MetaIndexParser != NULL;
1330 bool const metaBaseSupportsByHash = hasReleaseFile && TransactionManager->MetaIndexParser->GetSupportsAcquireByHash();
1331 bool hasHashes = true;
1332 auto IndexTargets = TransactionManager->MetaIndexParser->GetIndexTargets();
1333 if (hasReleaseFile && verify == false)
1334 hasHashes = std::any_of(IndexTargets.begin(), IndexTargets.end(),
1335 [&](IndexTarget const &Target) { return TransactionManager->MetaIndexParser->Exists(Target.MetaKey); });
1336 for (auto&& Target: IndexTargets)
1337 {
1338 // if we have seen a target which is created-by a target this one here is declared a
1339 // fallback to, we skip acquiring the fallback (but we make sure we clean up)
1340 if (targetsSeen.find(Target.Option(IndexTarget::FALLBACK_OF)) != targetsSeen.end())
1341 {
1342 targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
1343 new CleanupItem(Owner, TransactionManager, Target);
1344 continue;
1345 }
1346 // all is an implementation detail. Users shouldn't use this as arch
1347 // We need this support trickery here as e.g. Debian has binary-all files already,
1348 // but arch:all packages are still in the arch:any files, so we would waste precious
1349 // download time, bandwidth and diskspace for nothing, BUT Debian doesn't feature all
1350 // in the set of supported architectures, so we can filter based on this property rather
1351 // than invent an entirely new flag we would need to carry for all of eternity.
1352 if (hasReleaseFile && Target.Option(IndexTarget::ARCHITECTURE) == "all")
1353 {
1354 if (TransactionManager->MetaIndexParser->IsArchitectureAllSupportedFor(Target) == false)
1355 {
1356 new CleanupItem(Owner, TransactionManager, Target);
1357 continue;
1358 }
1359 }
1360
1361 bool trypdiff = Target.OptionBool(IndexTarget::PDIFFS);
1362 if (hasReleaseFile == true)
1363 {
1364 if (TransactionManager->MetaIndexParser->Exists(Target.MetaKey) == false)
1365 {
1366 // optional targets that we do not have in the Release file are skipped
1367 if (hasHashes == true && Target.IsOptional)
1368 {
1369 new CleanupItem(Owner, TransactionManager, Target);
1370 continue;
1371 }
1372
1373 std::string const &arch = Target.Option(IndexTarget::ARCHITECTURE);
1374 if (arch.empty() == false)
1375 {
1376 if (TransactionManager->MetaIndexParser->IsArchitectureSupported(arch) == false)
1377 {
1378 new CleanupItem(Owner, TransactionManager, Target);
1379 _error->Notice(_("Skipping acquire of configured file '%s' as repository '%s' doesn't support architecture '%s'"),
1380 Target.MetaKey.c_str(), TransactionManager->Target.Description.c_str(), arch.c_str());
1381 continue;
1382 }
1383 // if the architecture is officially supported but currently no packages for it available,
1384 // ignore silently as this is pretty much the same as just shipping an empty file.
1385 // if we don't know which architectures are supported, we do NOT ignore it to notify user about this
1386 if (hasHashes == true && TransactionManager->MetaIndexParser->IsArchitectureSupported("*undefined*") == false)
1387 {
1388 new CleanupItem(Owner, TransactionManager, Target);
1389 continue;
1390 }
1391 }
1392
1393 if (hasHashes == true)
1394 {
1395 Status = StatAuthError;
1396 strprintf(ErrorText, _("Unable to find expected entry '%s' in Release file (Wrong sources.list entry or malformed file)"), Target.MetaKey.c_str());
1397 return;
1398 }
1399 else
1400 {
1401 new pkgAcqIndex(Owner, TransactionManager, Target);
1402 continue;
1403 }
1404 }
1405 else if (verify)
1406 {
1407 auto const hashes = GetExpectedHashesFor(Target.MetaKey);
1408 if (hashes.empty() == false)
1409 {
1410 if (hashes.usable() == false && TargetIsAllowedToBe(TransactionManager->Target, InsecureType::WEAK) == false)
1411 {
1412 new CleanupItem(Owner, TransactionManager, Target);
1413 _error->Warning(_("Skipping acquire of configured file '%s' as repository '%s' provides only weak security information for it"),
1414 Target.MetaKey.c_str(), TransactionManager->Target.Description.c_str());
1415 continue;
1416 }
1417 // empty files are skipped as acquiring the very small compressed files is a waste of time
1418 else if (hashes.FileSize() == 0)
1419 {
1420 new CleanupItem(Owner, TransactionManager, Target);
1421 targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
1422 continue;
1423 }
1424 }
1425 }
1426
1427 // autoselect the compression method
1428 std::vector<std::string> types = VectorizeString(Target.Option(IndexTarget::COMPRESSIONTYPES), ' ');
1429 types.erase(std::remove_if(types.begin(), types.end(), [&](std::string const &t) {
1430 if (t == "uncompressed")
1431 return TransactionManager->MetaIndexParser->Exists(Target.MetaKey) == false;
1432 std::string const MetaKey = Target.MetaKey + "." + t;
1433 return TransactionManager->MetaIndexParser->Exists(MetaKey) == false;
1434 }), types.end());
1435 if (types.empty() == false)
1436 {
1437 std::ostringstream os;
1438 // add the special compressiontype byhash first if supported
1439 std::string const useByHashConf = Target.Option(IndexTarget::BY_HASH);
1440 bool useByHash = false;
1441 if(useByHashConf == "force")
1442 useByHash = true;
1443 else
1444 useByHash = StringToBool(useByHashConf) == true && metaBaseSupportsByHash;
1445 if (useByHash == true)
1446 os << "by-hash ";
1447 std::copy(types.begin(), types.end()-1, std::ostream_iterator<std::string>(os, " "));
1448 os << *types.rbegin();
1449 Target.Options["COMPRESSIONTYPES"] = os.str();
1450 }
1451 else
1452 Target.Options["COMPRESSIONTYPES"].clear();
1453
1454 std::string filename = GetExistingFilename(GetFinalFileNameFromURI(Target.URI));
1455 if (filename.empty() == false)
1456 {
1457 // if the Release file is a hit and we have an index it must be the current one
1458 if (TransactionManager->IMSHit == true)
1459 ;
1460 else if (TransactionManager->LastMetaIndexParser != NULL)
1461 {
1462 // see if the file changed since the last Release file
1463 // we use the uncompressed files as we might compress differently compared to the server,
1464 // so the hashes might not match, even if they contain the same data.
1465 HashStringList const newFile = GetExpectedHashesFromFor(TransactionManager->MetaIndexParser, Target.MetaKey);
1466 HashStringList const oldFile = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target.MetaKey);
1467 if (newFile != oldFile)
1468 filename.clear();
1469 }
1470 else
1471 filename.clear();
1472 }
1473 else
1474 trypdiff = false; // no file to patch
1475
1476 if (filename.empty() == false)
1477 {
1478 new NoActionItem(Owner, Target, filename);
1479 std::string const idxfilename = GetFinalFileNameFromURI(GetDiffIndexURI(Target));
1480 if (FileExists(idxfilename))
1481 new NoActionItem(Owner, Target, idxfilename);
1482 targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
1483 continue;
1484 }
1485
1486 // check if we have patches available
1487 trypdiff &= TransactionManager->MetaIndexParser->Exists(GetDiffIndexFileName(Target.MetaKey));
1488 }
1489 else
1490 {
1491 // if we have no file to patch, no point in trying
1492 trypdiff &= (GetExistingFilename(GetFinalFileNameFromURI(Target.URI)).empty() == false);
1493 }
1494
1495 // no point in patching from local sources
1496 if (trypdiff)
1497 {
1498 std::string const proto = Target.URI.substr(0, strlen("file:/"));
1499 if (proto == "file:/" || proto == "copy:/" || proto == "cdrom:")
1500 trypdiff = false;
1501 }
1502
1503 // Queue the Index file (Packages, Sources, Translation-$foo, …)
1504 targetsSeen.emplace(Target.Option(IndexTarget::CREATED_BY));
1505 if (trypdiff)
1506 new pkgAcqDiffIndex(Owner, TransactionManager, Target);
1507 else
1508 new pkgAcqIndex(Owner, TransactionManager, Target);
1509 }
1510 }
1511 /*}}}*/
1512 bool pkgAcqMetaBase::VerifyVendor(string const &) /*{{{*/
1513 {
1514 string Transformed = TransactionManager->MetaIndexParser->GetExpectedDist();
1515
1516 if (Transformed == "../project/experimental")
1517 {
1518 Transformed = "experimental";
1519 }
1520
1521 auto pos = Transformed.rfind('/');
1522 if (pos != string::npos)
1523 {
1524 Transformed = Transformed.substr(0, pos);
1525 }
1526
1527 if (Transformed == ".")
1528 {
1529 Transformed = "";
1530 }
1531
1532 if (TransactionManager->MetaIndexParser->GetValidUntil() > 0)
1533 {
1534 time_t const invalid_since = time(NULL) - TransactionManager->MetaIndexParser->GetValidUntil();
1535 if (invalid_since > 0)
1536 {
1537 std::string errmsg;
1538 strprintf(errmsg,
1539 // TRANSLATOR: The first %s is the URL of the bad Release file, the second is
1540 // the time since then the file is invalid - formatted in the same way as in
1541 // the download progress display (e.g. 7d 3h 42min 1s)
1542 _("Release file for %s is expired (invalid since %s). "
1543 "Updates for this repository will not be applied."),
1544 Target.URI.c_str(), TimeToStr(invalid_since).c_str());
1545 if (ErrorText.empty())
1546 ErrorText = errmsg;
1547 return _error->Error("%s", errmsg.c_str());
1548 }
1549 }
1550
1551 /* Did we get a file older than what we have? This is a last minute IMS hit and doubles
1552 as a prevention of downgrading us to older (still valid) files */
1553 if (TransactionManager->IMSHit == false && TransactionManager->LastMetaIndexParser != NULL &&
1554 TransactionManager->LastMetaIndexParser->GetDate() > TransactionManager->MetaIndexParser->GetDate())
1555 {
1556 TransactionManager->IMSHit = true;
1557 RemoveFile("VerifyVendor", DestFile);
1558 PartialFile = DestFile = GetFinalFilename();
1559 // load the 'old' file in the 'new' one instead of flipping pointers as
1560 // the new one isn't owned by us, while the old one is so cleanup would be confused.
1561 TransactionManager->MetaIndexParser->swapLoad(TransactionManager->LastMetaIndexParser);
1562 delete TransactionManager->LastMetaIndexParser;
1563 TransactionManager->LastMetaIndexParser = NULL;
1564 }
1565
1566 if (_config->FindB("Debug::pkgAcquire::Auth", false))
1567 {
1568 std::cerr << "Got Codename: " << TransactionManager->MetaIndexParser->GetCodename() << std::endl;
1569 std::cerr << "Expecting Dist: " << TransactionManager->MetaIndexParser->GetExpectedDist() << std::endl;
1570 std::cerr << "Transformed Dist: " << Transformed << std::endl;
1571 }
1572
1573 if (TransactionManager->MetaIndexParser->CheckDist(Transformed) == false)
1574 {
1575 // This might become fatal one day
1576 // Status = StatAuthError;
1577 // ErrorText = "Conflicting distribution; expected "
1578 // + MetaIndexParser->GetExpectedDist() + " but got "
1579 // + MetaIndexParser->GetCodename();
1580 // return false;
1581 if (!Transformed.empty())
1582 {
1583 _error->Warning(_("Conflicting distribution: %s (expected %s but got %s)"),
1584 Desc.Description.c_str(),
1585 Transformed.c_str(),
1586 TransactionManager->MetaIndexParser->GetCodename().c_str());
1587 }
1588 }
1589
1590 return true;
1591 }
1592 /*}}}*/
1593 pkgAcqMetaBase::~pkgAcqMetaBase()
1594 {
1595 }
1596
1597 pkgAcqMetaClearSig::pkgAcqMetaClearSig(pkgAcquire * const Owner, /*{{{*/
1598 IndexTarget const &ClearsignedTarget,
1599 IndexTarget const &DetachedDataTarget, IndexTarget const &DetachedSigTarget,
1600 metaIndex * const MetaIndexParser) :
1601 pkgAcqMetaIndex(Owner, this, ClearsignedTarget, DetachedSigTarget),
1602 d(NULL), DetachedDataTarget(DetachedDataTarget),
1603 MetaIndexParser(MetaIndexParser), LastMetaIndexParser(NULL)
1604 {
1605 // index targets + (worst case:) Release/Release.gpg
1606 ExpectedAdditionalItems = std::numeric_limits<decltype(ExpectedAdditionalItems)>::max();
1607 TransactionManager->Add(this);
1608 }
1609 /*}}}*/
1610 pkgAcqMetaClearSig::~pkgAcqMetaClearSig() /*{{{*/
1611 {
1612 if (LastMetaIndexParser != NULL)
1613 delete LastMetaIndexParser;
1614 }
1615 /*}}}*/
1616 // pkgAcqMetaClearSig::Custom600Headers - Insert custom request headers /*{{{*/
1617 string pkgAcqMetaClearSig::Custom600Headers() const
1618 {
1619 string Header = pkgAcqMetaBase::Custom600Headers();
1620 Header += "\nFail-Ignore: true";
1621 std::string const key = TransactionManager->MetaIndexParser->GetSignedBy();
1622 if (key.empty() == false)
1623 Header += "\nSigned-By: " + key;
1624
1625 return Header;
1626 }
1627 /*}}}*/
1628 void pkgAcqMetaClearSig::Finished() /*{{{*/
1629 {
1630 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1631 std::clog << "Finished: " << DestFile <<std::endl;
1632 if(TransactionManager->State == TransactionStarted &&
1633 TransactionManager->TransactionHasError() == false)
1634 TransactionManager->CommitTransaction();
1635 }
1636 /*}}}*/
1637 bool pkgAcqMetaClearSig::VerifyDone(std::string const &Message, /*{{{*/
1638 pkgAcquire::MethodConfig const * const Cnf)
1639 {
1640 Item::VerifyDone(Message, Cnf);
1641
1642 if (FileExists(DestFile) && !StartsWithGPGClearTextSignature(DestFile))
1643 return RenameOnError(NotClearsigned);
1644
1645 return true;
1646 }
1647 /*}}}*/
1648 // pkgAcqMetaClearSig::Done - We got a file /*{{{*/
1649 void pkgAcqMetaClearSig::Done(std::string const &Message,
1650 HashStringList const &Hashes,
1651 pkgAcquire::MethodConfig const * const Cnf)
1652 {
1653 Item::Done(Message, Hashes, Cnf);
1654
1655 if(AuthPass == false)
1656 {
1657 if(CheckDownloadDone(this, Message, Hashes) == true)
1658 QueueForSignatureVerify(this, DestFile, DestFile);
1659 return;
1660 }
1661 else if(CheckAuthDone(Message) == true)
1662 {
1663 if (TransactionManager->IMSHit == false)
1664 TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
1665 else if (RealFileExists(GetFinalFilename()) == false)
1666 {
1667 // We got an InRelease file IMSHit, but we haven't one, which means
1668 // we had a valid Release/Release.gpg combo stepping in, which we have
1669 // to 'acquire' now to ensure list cleanup isn't removing them
1670 new NoActionItem(Owner, DetachedDataTarget);
1671 new NoActionItem(Owner, DetachedSigTarget);
1672 }
1673 }
1674 else if (Status != StatAuthError)
1675 {
1676 string const FinalFile = GetFinalFileNameFromURI(DetachedDataTarget.URI);
1677 string const OldFile = GetFinalFilename();
1678 if (TransactionManager->IMSHit == false)
1679 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
1680 else if (RealFileExists(OldFile) == false)
1681 new NoActionItem(Owner, DetachedDataTarget);
1682 else
1683 TransactionManager->TransactionStageCopy(this, OldFile, FinalFile);
1684 }
1685 }
1686 /*}}}*/
1687 void pkgAcqMetaClearSig::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf) /*{{{*/
1688 {
1689 Item::Failed(Message, Cnf);
1690
1691 if (AuthPass == false)
1692 {
1693 if (Status == StatAuthError || Status == StatTransientNetworkError)
1694 {
1695 // if we expected a ClearTextSignature (InRelease) but got a network
1696 // error or got a file, but it wasn't valid, we end up here (see VerifyDone).
1697 // As these is usually called by web-portals we do not try Release/Release.gpg
1698 // as this is gonna fail anyway and instead abort our try (LP#346386)
1699 TransactionManager->AbortTransaction();
1700 return;
1701 }
1702
1703 // Queue the 'old' InRelease file for removal if we try Release.gpg
1704 // as otherwise the file will stay around and gives a false-auth
1705 // impression (CVE-2012-0214)
1706 TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
1707 Status = StatDone;
1708
1709 new pkgAcqMetaIndex(Owner, TransactionManager, DetachedDataTarget, DetachedSigTarget);
1710 }
1711 else
1712 {
1713 if(CheckStopAuthentication(this, Message))
1714 return;
1715
1716 if(AllowInsecureRepositories(InsecureType::UNSIGNED, Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == true)
1717 {
1718 Status = StatDone;
1719
1720 /* InRelease files become Release files, otherwise
1721 * they would be considered as trusted later on */
1722 string const FinalRelease = GetFinalFileNameFromURI(DetachedDataTarget.URI);
1723 string const PartialRelease = GetPartialFileNameFromURI(DetachedDataTarget.URI);
1724 string const FinalReleasegpg = GetFinalFileNameFromURI(DetachedSigTarget.URI);
1725 string const FinalInRelease = GetFinalFilename();
1726 Rename(DestFile, PartialRelease);
1727 TransactionManager->TransactionStageCopy(this, PartialRelease, FinalRelease);
1728 LoadLastMetaIndexParser(TransactionManager, FinalRelease, FinalInRelease);
1729
1730 // we parse the indexes here because at this point the user wanted
1731 // a repository that may potentially harm him
1732 if (TransactionManager->MetaIndexParser->Load(PartialRelease, &ErrorText) == false || VerifyVendor(Message) == false)
1733 /* expired Release files are still a problem you need extra force for */;
1734 else
1735 TransactionManager->QueueIndexes(true);
1736 }
1737 }
1738 }
1739 /*}}}*/
1740
1741 pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire * const Owner, /*{{{*/
1742 pkgAcqMetaClearSig * const TransactionManager,
1743 IndexTarget const &DataTarget,
1744 IndexTarget const &DetachedSigTarget) :
1745 pkgAcqMetaBase(Owner, TransactionManager, DataTarget), d(NULL),
1746 DetachedSigTarget(DetachedSigTarget)
1747 {
1748 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1749 std::clog << "New pkgAcqMetaIndex with TransactionManager "
1750 << this->TransactionManager << std::endl;
1751
1752 DestFile = GetPartialFileNameFromURI(DataTarget.URI);
1753
1754 // Create the item
1755 Desc.Description = DataTarget.Description;
1756 Desc.Owner = this;
1757 Desc.ShortDesc = DataTarget.ShortDesc;
1758 Desc.URI = DataTarget.URI;
1759 QueueURI(Desc);
1760 }
1761 /*}}}*/
1762 void pkgAcqMetaIndex::Done(string const &Message, /*{{{*/
1763 HashStringList const &Hashes,
1764 pkgAcquire::MethodConfig const * const Cfg)
1765 {
1766 Item::Done(Message,Hashes,Cfg);
1767
1768 if(CheckDownloadDone(this, Message, Hashes))
1769 {
1770 // we have a Release file, now download the Signature, all further
1771 // verify/queue for additional downloads will be done in the
1772 // pkgAcqMetaSig::Done() code
1773 new pkgAcqMetaSig(Owner, TransactionManager, DetachedSigTarget, this);
1774 }
1775 }
1776 /*}}}*/
1777 // pkgAcqMetaIndex::Failed - no Release file present /*{{{*/
1778 void pkgAcqMetaIndex::Failed(string const &Message,
1779 pkgAcquire::MethodConfig const * const Cnf)
1780 {
1781 pkgAcquire::Item::Failed(Message, Cnf);
1782 Status = StatDone;
1783
1784 // No Release file was present so fall
1785 // back to queueing Packages files without verification
1786 // only allow going further if the user explicitly wants it
1787 if(AllowInsecureRepositories(InsecureType::NORELEASE, Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == true)
1788 {
1789 // ensure old Release files are removed
1790 TransactionManager->TransactionStageRemoval(this, GetFinalFilename());
1791
1792 // queue without any kind of hashsum support
1793 TransactionManager->QueueIndexes(false);
1794 }
1795 }
1796 /*}}}*/
1797 std::string pkgAcqMetaIndex::DescURI() const /*{{{*/
1798 {
1799 return Target.URI;
1800 }
1801 /*}}}*/
1802 pkgAcqMetaIndex::~pkgAcqMetaIndex() {}
1803
1804 // AcqMetaSig::AcqMetaSig - Constructor /*{{{*/
1805 pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire * const Owner,
1806 pkgAcqMetaClearSig * const TransactionManager,
1807 IndexTarget const &Target,
1808 pkgAcqMetaIndex * const MetaIndex) :
1809 pkgAcqTransactionItem(Owner, TransactionManager, Target), d(NULL), MetaIndex(MetaIndex)
1810 {
1811 DestFile = GetPartialFileNameFromURI(Target.URI);
1812
1813 // remove any partial downloaded sig-file in partial/.
1814 // it may confuse proxies and is too small to warrant a
1815 // partial download anyway
1816 RemoveFile("pkgAcqMetaSig", DestFile);
1817
1818 // set the TransactionManager
1819 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1820 std::clog << "New pkgAcqMetaSig with TransactionManager "
1821 << TransactionManager << std::endl;
1822
1823 // Create the item
1824 Desc.Description = Target.Description;
1825 Desc.Owner = this;
1826 Desc.ShortDesc = Target.ShortDesc;
1827 Desc.URI = Target.URI;
1828
1829 // If we got a hit for Release, we will get one for Release.gpg too (or obscure errors),
1830 // so we skip the download step and go instantly to verification
1831 if (TransactionManager->IMSHit == true && RealFileExists(GetFinalFilename()))
1832 {
1833 Complete = true;
1834 Status = StatDone;
1835 PartialFile = DestFile = GetFinalFilename();
1836 MetaIndexFileSignature = DestFile;
1837 MetaIndex->QueueForSignatureVerify(this, MetaIndex->DestFile, DestFile);
1838 }
1839 else
1840 QueueURI(Desc);
1841 }
1842 /*}}}*/
1843 pkgAcqMetaSig::~pkgAcqMetaSig() /*{{{*/
1844 {
1845 }
1846 /*}}}*/
1847 // pkgAcqMetaSig::Custom600Headers - Insert custom request headers /*{{{*/
1848 std::string pkgAcqMetaSig::Custom600Headers() const
1849 {
1850 std::string Header = pkgAcqTransactionItem::Custom600Headers();
1851 std::string const key = TransactionManager->MetaIndexParser->GetSignedBy();
1852 if (key.empty() == false)
1853 Header += "\nSigned-By: " + key;
1854 return Header;
1855 }
1856 /*}}}*/
1857 // AcqMetaSig::Done - The signature was downloaded/verified /*{{{*/
1858 void pkgAcqMetaSig::Done(string const &Message, HashStringList const &Hashes,
1859 pkgAcquire::MethodConfig const * const Cfg)
1860 {
1861 if (MetaIndexFileSignature.empty() == false)
1862 {
1863 DestFile = MetaIndexFileSignature;
1864 MetaIndexFileSignature.clear();
1865 }
1866 Item::Done(Message, Hashes, Cfg);
1867
1868 if(MetaIndex->AuthPass == false)
1869 {
1870 if(MetaIndex->CheckDownloadDone(this, Message, Hashes) == true)
1871 {
1872 // destfile will be modified to point to MetaIndexFile for the
1873 // gpgv method, so we need to save it here
1874 MetaIndexFileSignature = DestFile;
1875 MetaIndex->QueueForSignatureVerify(this, MetaIndex->DestFile, DestFile);
1876 }
1877 return;
1878 }
1879 else if(MetaIndex->CheckAuthDone(Message) == true)
1880 {
1881 if (TransactionManager->IMSHit == false)
1882 {
1883 TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
1884 TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, MetaIndex->GetFinalFilename());
1885 }
1886 }
1887 else if (MetaIndex->Status != StatAuthError)
1888 {
1889 std::string const FinalFile = MetaIndex->GetFinalFilename();
1890 if (TransactionManager->IMSHit == false)
1891 TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, FinalFile);
1892 else
1893 TransactionManager->TransactionStageCopy(MetaIndex, FinalFile, FinalFile);
1894 }
1895 }
1896 /*}}}*/
1897 void pkgAcqMetaSig::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
1898 {
1899 Item::Failed(Message,Cnf);
1900
1901 // check if we need to fail at this point
1902 if (MetaIndex->AuthPass == true && MetaIndex->CheckStopAuthentication(this, Message))
1903 return;
1904
1905 // ensures that a Release.gpg file in the lists/ is removed by the transaction
1906 TransactionManager->TransactionStageRemoval(this, DestFile);
1907
1908 // only allow going further if the user explicitly wants it
1909 if (AllowInsecureRepositories(InsecureType::UNSIGNED, MetaIndex->Target.Description, TransactionManager->MetaIndexParser, TransactionManager, this) == true)
1910 {
1911 string const FinalRelease = MetaIndex->GetFinalFilename();
1912 string const FinalInRelease = TransactionManager->GetFinalFilename();
1913 LoadLastMetaIndexParser(TransactionManager, FinalRelease, FinalInRelease);
1914
1915 // we parse the indexes here because at this point the user wanted
1916 // a repository that may potentially harm him
1917 bool const GoodLoad = TransactionManager->MetaIndexParser->Load(MetaIndex->DestFile, &ErrorText);
1918 if (MetaIndex->VerifyVendor(Message) == false)
1919 /* expired Release files are still a problem you need extra force for */;
1920 else
1921 TransactionManager->QueueIndexes(GoodLoad);
1922
1923 TransactionManager->TransactionStageCopy(MetaIndex, MetaIndex->DestFile, FinalRelease);
1924 }
1925 else if (TransactionManager->IMSHit == false)
1926 Rename(MetaIndex->DestFile, MetaIndex->DestFile + ".FAILED");
1927
1928 // FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor
1929 if (Cnf->LocalOnly == true ||
1930 StringToBool(LookupTag(Message,"Transient-Failure"),false) == false)
1931 {
1932 // Ignore this
1933 Status = StatDone;
1934 }
1935 }
1936 /*}}}*/
1937
1938
1939 // AcqBaseIndex - Constructor /*{{{*/
1940 pkgAcqBaseIndex::pkgAcqBaseIndex(pkgAcquire * const Owner,
1941 pkgAcqMetaClearSig * const TransactionManager,
1942 IndexTarget const &Target)
1943 : pkgAcqTransactionItem(Owner, TransactionManager, Target), d(NULL)
1944 {
1945 }
1946 /*}}}*/
1947 void pkgAcqBaseIndex::Failed(std::string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
1948 {
1949 pkgAcquire::Item::Failed(Message, Cnf);
1950 if (Status != StatAuthError)
1951 return;
1952
1953 ErrorText.append("Release file created at: ");
1954 auto const timespec = TransactionManager->MetaIndexParser->GetDate();
1955 if (timespec == 0)
1956 ErrorText.append("<unknown>");
1957 else
1958 ErrorText.append(TimeRFC1123(timespec, true));
1959 ErrorText.append("\n");
1960 }
1961 /*}}}*/
1962 pkgAcqBaseIndex::~pkgAcqBaseIndex() {}
1963
1964 // AcqDiffIndex::AcqDiffIndex - Constructor /*{{{*/
1965 // ---------------------------------------------------------------------
1966 /* Get the DiffIndex file first and see if there are patches available
1967 * If so, create a pkgAcqIndexDiffs fetcher that will get and apply the
1968 * patches. If anything goes wrong in that process, it will fall back to
1969 * the original packages file
1970 */
1971 pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire * const Owner,
1972 pkgAcqMetaClearSig * const TransactionManager,
1973 IndexTarget const &Target)
1974 : pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL), diffs(NULL)
1975 {
1976 // FIXME: Magic number as an upper bound on pdiffs we will reasonably acquire
1977 ExpectedAdditionalItems = 40;
1978
1979 Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
1980
1981 Desc.Owner = this;
1982 Desc.Description = GetDiffIndexFileName(Target.Description);
1983 Desc.ShortDesc = Target.ShortDesc;
1984 Desc.URI = GetDiffIndexURI(Target);
1985
1986 DestFile = GetPartialFileNameFromURI(Desc.URI);
1987
1988 if(Debug)
1989 std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl;
1990
1991 QueueURI(Desc);
1992 }
1993 /*}}}*/
1994 // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
1995 // ---------------------------------------------------------------------
1996 /* The only header we use is the last-modified header. */
1997 string pkgAcqDiffIndex::Custom600Headers() const
1998 {
1999 if (TransactionManager->LastMetaIndexParser != NULL)
2000 return "\nIndex-File: true";
2001
2002 string const Final = GetFinalFilename();
2003
2004 if(Debug)
2005 std::clog << "Custom600Header-IMS: " << Final << std::endl;
2006
2007 struct stat Buf;
2008 if (stat(Final.c_str(),&Buf) != 0)
2009 return "\nIndex-File: true";
2010
2011 return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime, false);
2012 }
2013 /*}}}*/
2014 void pkgAcqDiffIndex::QueueOnIMSHit() const /*{{{*/
2015 {
2016 // list cleanup needs to know that this file as well as the already
2017 // present index is ours, so we create an empty diff to save it for us
2018 new pkgAcqIndexDiffs(Owner, TransactionManager, Target);
2019 }
2020 /*}}}*/
2021 static bool RemoveFileForBootstrapLinking(bool const Debug, std::string const &For, std::string const &Boot)/*{{{*/
2022 {
2023 if (FileExists(Boot) && RemoveFile("Bootstrap-linking", Boot) == false)
2024 {
2025 if (Debug)
2026 std::clog << "Bootstrap-linking for patching " << For
2027 << " by removing stale " << Boot << " failed!" << std::endl;
2028 return false;
2029 }
2030 return true;
2031 }
2032 /*}}}*/
2033 bool pkgAcqDiffIndex::ParseDiffIndex(string const &IndexDiffFile) /*{{{*/
2034 {
2035 ExpectedAdditionalItems = 0;
2036 // failing here is fine: our caller will take care of trying to
2037 // get the complete file if patching fails
2038 if(Debug)
2039 std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile
2040 << std::endl;
2041
2042 FileFd Fd(IndexDiffFile,FileFd::ReadOnly);
2043 pkgTagFile TF(&Fd);
2044 if (Fd.IsOpen() == false || Fd.Failed())
2045 return false;
2046
2047 pkgTagSection Tags;
2048 if(unlikely(TF.Step(Tags) == false))
2049 return false;
2050
2051 HashStringList ServerHashes;
2052 unsigned long long ServerSize = 0;
2053
2054 auto const &posix = std::locale("C.UTF-8");
2055 for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
2056 {
2057 std::string tagname = *type;
2058 tagname.append("-Current");
2059 std::string const tmp = Tags.FindS(tagname.c_str());
2060 if (tmp.empty() == true)
2061 continue;
2062
2063 string hash;
2064 unsigned long long size;
2065 std::stringstream ss(tmp);
2066 ss.imbue(posix);
2067 ss >> hash >> size;
2068 if (unlikely(hash.empty() == true))
2069 continue;
2070 if (unlikely(ServerSize != 0 && ServerSize != size))
2071 continue;
2072 ServerHashes.push_back(HashString(*type, hash));
2073 ServerSize = size;
2074 }
2075
2076 if (ServerHashes.usable() == false)
2077 {
2078 if (Debug == true)
2079 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl;
2080 return false;
2081 }
2082
2083 std::string const CurrentPackagesFile = GetFinalFileNameFromURI(Target.URI);
2084 HashStringList const TargetFileHashes = GetExpectedHashesFor(Target.MetaKey);
2085 if (TargetFileHashes.usable() == false || ServerHashes != TargetFileHashes)
2086 {
2087 if (Debug == true)
2088 {
2089 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl;
2090 printHashSumComparison(CurrentPackagesFile, ServerHashes, TargetFileHashes);
2091 }
2092 return false;
2093 }
2094
2095 HashStringList LocalHashes;
2096 // try avoiding calculating the hash here as this is costly
2097 if (TransactionManager->LastMetaIndexParser != NULL)
2098 LocalHashes = GetExpectedHashesFromFor(TransactionManager->LastMetaIndexParser, Target.MetaKey);
2099 if (LocalHashes.usable() == false)
2100 {
2101 FileFd fd(CurrentPackagesFile, FileFd::ReadOnly, FileFd::Auto);
2102 Hashes LocalHashesCalc(ServerHashes);
2103 LocalHashesCalc.AddFD(fd);
2104 LocalHashes = LocalHashesCalc.GetHashStringList();
2105 }
2106
2107 if (ServerHashes == LocalHashes)
2108 {
2109 // we have the same sha1 as the server so we are done here
2110 if(Debug)
2111 std::clog << "pkgAcqDiffIndex: Package file " << CurrentPackagesFile << " is up-to-date" << std::endl;
2112 QueueOnIMSHit();
2113 return true;
2114 }
2115
2116 if(Debug)
2117 std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at "
2118 << CurrentPackagesFile << " " << LocalHashes.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl;
2119
2120 // historically, older hashes have more info than newer ones, so start
2121 // collecting with older ones first to avoid implementing complicated
2122 // information merging techniques… a failure is after all always
2123 // recoverable with a complete file and hashes aren't changed that often.
2124 std::vector<char const *> types;
2125 for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
2126 types.push_back(*type);
2127
2128 // parse all of (provided) history
2129 vector<DiffInfo> available_patches;
2130 bool firstAcceptedHashes = true;
2131 for (auto type = types.crbegin(); type != types.crend(); ++type)
2132 {
2133 if (LocalHashes.find(*type) == NULL)
2134 continue;
2135
2136 std::string tagname = *type;
2137 tagname.append("-History");
2138 std::string const tmp = Tags.FindS(tagname.c_str());
2139 if (tmp.empty() == true)
2140 continue;
2141
2142 string hash, filename;
2143 unsigned long long size;
2144 std::stringstream ss(tmp);
2145 ss.imbue(posix);
2146
2147 while (ss >> hash >> size >> filename)
2148 {
2149 if (unlikely(hash.empty() == true || filename.empty() == true))
2150 continue;
2151
2152 // see if we have a record for this file already
2153 std::vector<DiffInfo>::iterator cur = available_patches.begin();
2154 for (; cur != available_patches.end(); ++cur)
2155 {
2156 if (cur->file != filename)
2157 continue;
2158 cur->result_hashes.push_back(HashString(*type, hash));
2159 break;
2160 }
2161 if (cur != available_patches.end())
2162 continue;
2163 if (firstAcceptedHashes == true)
2164 {
2165 DiffInfo next;
2166 next.file = filename;
2167 next.result_hashes.push_back(HashString(*type, hash));
2168 next.result_hashes.FileSize(size);
2169 available_patches.push_back(next);
2170 }
2171 else
2172 {
2173 if (Debug == true)
2174 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
2175 << " wasn't in the list for the first parsed hash! (history)" << std::endl;
2176 break;
2177 }
2178 }
2179 firstAcceptedHashes = false;
2180 }
2181
2182 if (unlikely(available_patches.empty() == true))
2183 {
2184 if (Debug)
2185 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
2186 << "Couldn't find any patches for the patch series." << std::endl;
2187 return false;
2188 }
2189
2190 for (auto type = types.crbegin(); type != types.crend(); ++type)
2191 {
2192 if (LocalHashes.find(*type) == NULL)
2193 continue;
2194
2195 std::string tagname = *type;
2196 tagname.append("-Patches");
2197 std::string const tmp = Tags.FindS(tagname.c_str());
2198 if (tmp.empty() == true)
2199 continue;
2200
2201 string hash, filename;
2202 unsigned long long size;
2203 std::stringstream ss(tmp);
2204 ss.imbue(posix);
2205
2206 while (ss >> hash >> size >> filename)
2207 {
2208 if (unlikely(hash.empty() == true || filename.empty() == true))
2209 continue;
2210
2211 // see if we have a record for this file already
2212 std::vector<DiffInfo>::iterator cur = available_patches.begin();
2213 for (; cur != available_patches.end(); ++cur)
2214 {
2215 if (cur->file != filename)
2216 continue;
2217 if (cur->patch_hashes.empty())
2218 cur->patch_hashes.FileSize(size);
2219 cur->patch_hashes.push_back(HashString(*type, hash));
2220 break;
2221 }
2222 if (cur != available_patches.end())
2223 continue;
2224 if (Debug == true)
2225 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
2226 << " wasn't in the list for the first parsed hash! (patches)" << std::endl;
2227 break;
2228 }
2229 }
2230
2231 for (auto type = types.crbegin(); type != types.crend(); ++type)
2232 {
2233 std::string tagname = *type;
2234 tagname.append("-Download");
2235 std::string const tmp = Tags.FindS(tagname.c_str());
2236 if (tmp.empty() == true)
2237 continue;
2238
2239 string hash, filename;
2240 unsigned long long size;
2241 std::stringstream ss(tmp);
2242 ss.imbue(posix);
2243
2244 // FIXME: all of pdiff supports only .gz compressed patches
2245 while (ss >> hash >> size >> filename)
2246 {
2247 if (unlikely(hash.empty() == true || filename.empty() == true))
2248 continue;
2249 if (unlikely(APT::String::Endswith(filename, ".gz") == false))
2250 continue;
2251 filename.erase(filename.length() - 3);
2252
2253 // see if we have a record for this file already
2254 std::vector<DiffInfo>::iterator cur = available_patches.begin();
2255 for (; cur != available_patches.end(); ++cur)
2256 {
2257 if (cur->file != filename)
2258 continue;
2259 if (cur->download_hashes.empty())
2260 cur->download_hashes.FileSize(size);
2261 cur->download_hashes.push_back(HashString(*type, hash));
2262 break;
2263 }
2264 if (cur != available_patches.end())
2265 continue;
2266 if (Debug == true)
2267 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
2268 << " wasn't in the list for the first parsed hash! (download)" << std::endl;
2269 break;
2270 }
2271 }
2272
2273
2274 bool foundStart = false;
2275 for (std::vector<DiffInfo>::iterator cur = available_patches.begin();
2276 cur != available_patches.end(); ++cur)
2277 {
2278 if (LocalHashes != cur->result_hashes)
2279 continue;
2280
2281 available_patches.erase(available_patches.begin(), cur);
2282 foundStart = true;
2283 break;
2284 }
2285
2286 if (foundStart == false || unlikely(available_patches.empty() == true))
2287 {
2288 if (Debug)
2289 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
2290 << "Couldn't find the start of the patch series." << std::endl;
2291 return false;
2292 }
2293
2294 for (auto const &patch: available_patches)
2295 if (patch.result_hashes.usable() == false ||
2296 patch.patch_hashes.usable() == false ||
2297 patch.download_hashes.usable() == false)
2298 {
2299 if (Debug)
2300 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": provides no usable hashes for " << patch.file
2301 << " so fallback to complete download" << std::endl;
2302 return false;
2303 }
2304
2305 // patching with too many files is rather slow compared to a fast download
2306 unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
2307 if (fileLimit != 0 && fileLimit < available_patches.size())
2308 {
2309 if (Debug)
2310 std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
2311 << ") so fallback to complete download" << std::endl;
2312 return false;
2313 }
2314
2315 // calculate the size of all patches we have to get
2316 unsigned short const sizeLimitPercent = _config->FindI("Acquire::PDiffs::SizeLimit", 100);
2317 if (sizeLimitPercent > 0)
2318 {
2319 unsigned long long downloadSize = std::accumulate(available_patches.begin(),
2320 available_patches.end(), 0llu, [](unsigned long long const T, DiffInfo const &I) {
2321 return T + I.download_hashes.FileSize();
2322 });
2323 if (downloadSize != 0)
2324 {
2325 unsigned long long downloadSizeIdx = 0;
2326 auto const types = VectorizeString(Target.Option(IndexTarget::COMPRESSIONTYPES), ' ');
2327 for (auto const &t : types)
2328 {
2329 std::string MetaKey = Target.MetaKey;
2330 if (t != "uncompressed")
2331 MetaKey += '.' + t;
2332 HashStringList const hsl = GetExpectedHashesFor(MetaKey);
2333 if (unlikely(hsl.usable() == false))
2334 continue;
2335 downloadSizeIdx = hsl.FileSize();
2336 break;
2337 }
2338 unsigned long long const sizeLimit = downloadSizeIdx * sizeLimitPercent;
2339 if ((sizeLimit/100) < downloadSize)
2340 {
2341 if (Debug)
2342 std::clog << "Need " << downloadSize << " compressed bytes (Limit is " << (sizeLimit/100) << ", "
2343 << "original is " << downloadSizeIdx << ") so fallback to complete download" << std::endl;
2344 return false;
2345 }
2346 }
2347 }
2348
2349 // we have something, queue the diffs
2350 string::size_type const last_space = Description.rfind(" ");
2351 if(last_space != string::npos)
2352 Description.erase(last_space, Description.size()-last_space);
2353
2354 /* decide if we should download patches one by one or in one go:
2355 The first is good if the server merges patches, but many don't so client
2356 based merging can be attempt in which case the second is better.
2357 "bad things" will happen if patches are merged on the server,
2358 but client side merging is attempt as well */
2359 bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
2360 if (pdiff_merge == true)
2361 {
2362 // reprepro adds this flag if it has merged patches on the server
2363 std::string const precedence = Tags.FindS("X-Patch-Precedence");
2364 pdiff_merge = (precedence != "merged");
2365 }
2366
2367 // clean the plate
2368 {
2369 std::string const Final = GetExistingFilename(CurrentPackagesFile);
2370 if (unlikely(Final.empty())) // because we wouldn't be called in such a case
2371 return false;
2372 std::string const PartialFile = GetPartialFileNameFromURI(Target.URI);
2373 std::string const PatchedFile = GetKeepCompressedFileName(PartialFile + "-patched", Target);
2374 if (RemoveFileForBootstrapLinking(Debug, CurrentPackagesFile, PartialFile) == false ||
2375 RemoveFileForBootstrapLinking(Debug, CurrentPackagesFile, PatchedFile) == false)
2376 return false;
2377 for (auto const &ext : APT::Configuration::getCompressorExtensions())
2378 {
2379 if (RemoveFileForBootstrapLinking(Debug, CurrentPackagesFile, PartialFile + ext) == false ||
2380 RemoveFileForBootstrapLinking(Debug, CurrentPackagesFile, PatchedFile + ext) == false)
2381 return false;
2382 }
2383 std::string const Ext = Final.substr(CurrentPackagesFile.length());
2384 std::string const Partial = PartialFile + Ext;
2385 if (symlink(Final.c_str(), Partial.c_str()) != 0)
2386 {
2387 if (Debug)
2388 std::clog << "Bootstrap-linking for patching " << CurrentPackagesFile
2389 << " by linking " << Final << " to " << Partial << " failed!" << std::endl;
2390 return false;
2391 }
2392 }
2393
2394 if (pdiff_merge == false)
2395 new pkgAcqIndexDiffs(Owner, TransactionManager, Target, available_patches);
2396 else
2397 {
2398 diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
2399 for(size_t i = 0; i < available_patches.size(); ++i)
2400 (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, TransactionManager,
2401 Target,
2402 available_patches[i],
2403 diffs);
2404 }
2405
2406 Complete = false;
2407 Status = StatDone;
2408 Dequeue();
2409 return true;
2410 }
2411 /*}}}*/
2412 void pkgAcqDiffIndex::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
2413 {
2414 pkgAcqBaseIndex::Failed(Message,Cnf);
2415 Status = StatDone;
2416 ExpectedAdditionalItems = 0;
2417
2418 if(Debug)
2419 std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << " with " << Message << std::endl
2420 << "Falling back to normal index file acquire" << std::endl;
2421
2422 new pkgAcqIndex(Owner, TransactionManager, Target);
2423 }
2424 /*}}}*/
2425 void pkgAcqDiffIndex::Done(string const &Message,HashStringList const &Hashes, /*{{{*/
2426 pkgAcquire::MethodConfig const * const Cnf)
2427 {
2428 if(Debug)
2429 std::clog << "pkgAcqDiffIndex::Done(): " << Desc.URI << std::endl;
2430
2431 Item::Done(Message, Hashes, Cnf);
2432
2433 string const FinalFile = GetFinalFilename();
2434 if(StringToBool(LookupTag(Message,"IMS-Hit"),false))
2435 DestFile = FinalFile;
2436
2437 if(ParseDiffIndex(DestFile) == false)
2438 {
2439 Failed("Message: Couldn't parse pdiff index", Cnf);
2440 // queue for final move - this should happen even if we fail
2441 // while parsing (e.g. on sizelimit) and download the complete file.
2442 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
2443 return;
2444 }
2445
2446 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
2447
2448 Complete = true;
2449 Status = StatDone;
2450 Dequeue();
2451
2452 return;
2453 }
2454 /*}}}*/
2455 pkgAcqDiffIndex::~pkgAcqDiffIndex()
2456 {
2457 if (diffs != NULL)
2458 delete diffs;
2459 }
2460
2461 // AcqIndexDiffs::AcqIndexDiffs - Constructor /*{{{*/
2462 // ---------------------------------------------------------------------
2463 /* The package diff is added to the queue. one object is constructed
2464 * for each diff and the index
2465 */
2466 pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire * const Owner,
2467 pkgAcqMetaClearSig * const TransactionManager,
2468 IndexTarget const &Target,
2469 vector<DiffInfo> const &diffs)
2470 : pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL),
2471 available_patches(diffs)
2472 {
2473 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI), Target);
2474
2475 Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
2476
2477 Desc.Owner = this;
2478 Description = Target.Description;
2479 Desc.ShortDesc = Target.ShortDesc;
2480
2481 if(available_patches.empty() == true)
2482 {
2483 // we are done (yeah!), check hashes against the final file
2484 DestFile = GetKeepCompressedFileName(GetFinalFileNameFromURI(Target.URI), Target);
2485 Finish(true);
2486 }
2487 else
2488 {
2489 State = StateFetchDiff;
2490 QueueNextDiff();
2491 }
2492 }
2493 /*}}}*/
2494 void pkgAcqIndexDiffs::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
2495 {
2496 pkgAcqBaseIndex::Failed(Message,Cnf);
2497 Status = StatDone;
2498
2499 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI), Target);
2500 if(Debug)
2501 std::clog << "pkgAcqIndexDiffs failed: " << Desc.URI << " with " << Message << std::endl
2502 << "Falling back to normal index file acquire " << std::endl;
2503 RenameOnError(PDiffError);
2504 std::string const patchname = GetDiffsPatchFileName(DestFile);
2505 if (RealFileExists(patchname))
2506 Rename(patchname, patchname + ".FAILED");
2507 std::string const UnpatchedFile = GetExistingFilename(GetPartialFileNameFromURI(Target.URI));
2508 if (UnpatchedFile.empty() == false && FileExists(UnpatchedFile))
2509 Rename(UnpatchedFile, UnpatchedFile + ".FAILED");
2510 new pkgAcqIndex(Owner, TransactionManager, Target);
2511 Finish();
2512 }
2513 /*}}}*/
2514 // Finish - helper that cleans the item out of the fetcher queue /*{{{*/
2515 void pkgAcqIndexDiffs::Finish(bool allDone)
2516 {
2517 if(Debug)
2518 std::clog << "pkgAcqIndexDiffs::Finish(): "
2519 << allDone << " "
2520 << Desc.URI << std::endl;
2521
2522 // we restore the original name, this is required, otherwise
2523 // the file will be cleaned
2524 if(allDone)
2525 {
2526 std::string const Final = GetKeepCompressedFileName(GetFinalFilename(), Target);
2527 TransactionManager->TransactionStageCopy(this, DestFile, Final);
2528
2529 // this is for the "real" finish
2530 Complete = true;
2531 Status = StatDone;
2532 Dequeue();
2533 if(Debug)
2534 std::clog << "\n\nallDone: " << DestFile << "\n" << std::endl;
2535 return;
2536 }
2537 else
2538 DestFile.clear();
2539
2540 if(Debug)
2541 std::clog << "Finishing: " << Desc.URI << std::endl;
2542 Complete = false;
2543 Status = StatDone;
2544 Dequeue();
2545 return;
2546 }
2547 /*}}}*/
2548 bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/
2549 {
2550 // calc sha1 of the just patched file
2551 std::string const PartialFile = GetExistingFilename(GetPartialFileNameFromURI(Target.URI));
2552 if(unlikely(PartialFile.empty()))
2553 {
2554 Failed("Message: The file " + GetPartialFileNameFromURI(Target.URI) + " isn't available", NULL);
2555 return false;
2556 }
2557
2558 FileFd fd(PartialFile, FileFd::ReadOnly, FileFd::Extension);
2559 Hashes LocalHashesCalc;
2560 LocalHashesCalc.AddFD(fd);
2561 HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
2562
2563 if(Debug)
2564 std::clog << "QueueNextDiff: " << PartialFile << " (" << LocalHashes.find(NULL)->toStr() << ")" << std::endl;
2565
2566 HashStringList const TargetFileHashes = GetExpectedHashesFor(Target.MetaKey);
2567 if (unlikely(LocalHashes.usable() == false || TargetFileHashes.usable() == false))
2568 {
2569 Failed("Local/Expected hashes are not usable for " + PartialFile, NULL);
2570 return false;
2571 }
2572
2573 // final file reached before all patches are applied
2574 if(LocalHashes == TargetFileHashes)
2575 {
2576 Finish(true);
2577 return true;
2578 }
2579
2580 // remove all patches until the next matching patch is found
2581 // this requires the Index file to be ordered
2582 available_patches.erase(available_patches.begin(),
2583 std::find_if(available_patches.begin(), available_patches.end(), [&](DiffInfo const &I) {
2584 return I.result_hashes == LocalHashes;
2585 }));
2586
2587 // error checking and falling back if no patch was found
2588 if(available_patches.empty() == true)
2589 {
2590 Failed("No patches left to reach target for " + PartialFile, NULL);
2591 return false;
2592 }
2593
2594 // queue the right diff
2595 Desc.URI = Target.URI + ".diff/" + available_patches[0].file + ".gz";
2596 Desc.Description = Description + " " + available_patches[0].file + string(".pdiff");
2597 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI + ".diff/" + available_patches[0].file), Target);
2598
2599 if(Debug)
2600 std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl;
2601
2602 QueueURI(Desc);
2603
2604 return true;
2605 }
2606 /*}}}*/
2607 void pkgAcqIndexDiffs::Done(string const &Message, HashStringList const &Hashes, /*{{{*/
2608 pkgAcquire::MethodConfig const * const Cnf)
2609 {
2610 if (Debug)
2611 std::clog << "pkgAcqIndexDiffs::Done(): " << Desc.URI << std::endl;
2612
2613 Item::Done(Message, Hashes, Cnf);
2614
2615 std::string const UncompressedUnpatchedFile = GetPartialFileNameFromURI(Target.URI);
2616 std::string const UnpatchedFile = GetExistingFilename(UncompressedUnpatchedFile);
2617 std::string const PatchFile = GetDiffsPatchFileName(UnpatchedFile);
2618 std::string const PatchedFile = GetKeepCompressedFileName(UncompressedUnpatchedFile, Target);
2619
2620 switch (State)
2621 {
2622 // success in downloading a diff, enter ApplyDiff state
2623 case StateFetchDiff:
2624 Rename(DestFile, PatchFile);
2625 DestFile = GetKeepCompressedFileName(UncompressedUnpatchedFile + "-patched", Target);
2626 if(Debug)
2627 std::clog << "Sending to rred method: " << UnpatchedFile << std::endl;
2628 State = StateApplyDiff;
2629 Local = true;
2630 Desc.URI = "rred:" + UnpatchedFile;
2631 QueueURI(Desc);
2632 SetActiveSubprocess("rred");
2633 return;
2634 // success in download/apply a diff, queue next (if needed)
2635 case StateApplyDiff:
2636 // remove the just applied patch and base file
2637 available_patches.erase(available_patches.begin());
2638 RemoveFile("pkgAcqIndexDiffs::Done", PatchFile);
2639 RemoveFile("pkgAcqIndexDiffs::Done", UnpatchedFile);
2640 if(Debug)
2641 std::clog << "Moving patched file in place: " << std::endl
2642 << DestFile << " -> " << PatchedFile << std::endl;
2643 Rename(DestFile, PatchedFile);
2644
2645 // see if there is more to download
2646 if(available_patches.empty() == false)
2647 {
2648 new pkgAcqIndexDiffs(Owner, TransactionManager, Target, available_patches);
2649 Finish();
2650 } else {
2651 DestFile = PatchedFile;
2652 Finish(true);
2653 }
2654 return;
2655 }
2656 }
2657 /*}}}*/
2658 std::string pkgAcqIndexDiffs::Custom600Headers() const /*{{{*/
2659 {
2660 if(State != StateApplyDiff)
2661 return pkgAcqBaseIndex::Custom600Headers();
2662 std::ostringstream patchhashes;
2663 for (auto && hs : available_patches[0].result_hashes)
2664 patchhashes << "\nStart-" << hs.HashType() << "-Hash: " << hs.HashValue();
2665 for (auto && hs : available_patches[0].patch_hashes)
2666 patchhashes << "\nPatch-0-" << hs.HashType() << "-Hash: " << hs.HashValue();
2667 patchhashes << pkgAcqBaseIndex::Custom600Headers();
2668 return patchhashes.str();
2669 }
2670 /*}}}*/
2671 pkgAcqIndexDiffs::~pkgAcqIndexDiffs() {}
2672
2673 // AcqIndexMergeDiffs::AcqIndexMergeDiffs - Constructor /*{{{*/
2674 pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire * const Owner,
2675 pkgAcqMetaClearSig * const TransactionManager,
2676 IndexTarget const &Target,
2677 DiffInfo const &patch,
2678 std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches)
2679 : pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL),
2680 patch(patch), allPatches(allPatches), State(StateFetchDiff)
2681 {
2682 Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
2683
2684 Desc.Owner = this;
2685 Description = Target.Description;
2686 Desc.ShortDesc = Target.ShortDesc;
2687 Desc.URI = Target.URI + ".diff/" + patch.file + ".gz";
2688 Desc.Description = Description + " " + patch.file + ".pdiff";
2689 DestFile = GetPartialFileNameFromURI(Desc.URI);
2690
2691 if(Debug)
2692 std::clog << "pkgAcqIndexMergeDiffs: " << Desc.URI << std::endl;
2693
2694 QueueURI(Desc);
2695 }
2696 /*}}}*/
2697 void pkgAcqIndexMergeDiffs::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)/*{{{*/
2698 {
2699 if(Debug)
2700 std::clog << "pkgAcqIndexMergeDiffs failed: " << Desc.URI << " with " << Message << std::endl;
2701
2702 pkgAcqBaseIndex::Failed(Message,Cnf);
2703 Status = StatDone;
2704
2705 // check if we are the first to fail, otherwise we are done here
2706 State = StateDoneDiff;
2707 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
2708 I != allPatches->end(); ++I)
2709 if ((*I)->State == StateErrorDiff)
2710 {
2711 State = StateErrorDiff;
2712 return;
2713 }
2714
2715 // first failure means we should fallback
2716 State = StateErrorDiff;
2717 if (Debug)
2718 std::clog << "Falling back to normal index file acquire" << std::endl;
2719 RenameOnError(PDiffError);
2720 if (RealFileExists(DestFile))
2721 Rename(DestFile, DestFile + ".FAILED");
2722 std::string const UnpatchedFile = GetExistingFilename(GetPartialFileNameFromURI(Target.URI));
2723 if (UnpatchedFile.empty() == false && FileExists(UnpatchedFile))
2724 Rename(UnpatchedFile, UnpatchedFile + ".FAILED");
2725 DestFile.clear();
2726 new pkgAcqIndex(Owner, TransactionManager, Target);
2727 }
2728 /*}}}*/
2729 void pkgAcqIndexMergeDiffs::Done(string const &Message, HashStringList const &Hashes, /*{{{*/
2730 pkgAcquire::MethodConfig const * const Cnf)
2731 {
2732 if(Debug)
2733 std::clog << "pkgAcqIndexMergeDiffs::Done(): " << Desc.URI << std::endl;
2734
2735 Item::Done(Message, Hashes, Cnf);
2736
2737 if (std::any_of(allPatches->begin(), allPatches->end(),
2738 [](pkgAcqIndexMergeDiffs const * const P) { return P->State == StateErrorDiff; }))
2739 {
2740 if(Debug)
2741 std::clog << "Another patch failed already, no point in processing this one." << std::endl;
2742 State = StateErrorDiff;
2743 return;
2744 }
2745
2746 std::string const UncompressedUnpatchedFile = GetPartialFileNameFromURI(Target.URI);
2747 std::string const UnpatchedFile = GetExistingFilename(UncompressedUnpatchedFile);
2748 if (UnpatchedFile.empty())
2749 {
2750 _error->Fatal("Unpatched file %s doesn't exist (anymore)!", UncompressedUnpatchedFile.c_str());
2751 State = StateErrorDiff;
2752 return;
2753 }
2754 std::string const PatchFile = GetMergeDiffsPatchFileName(UnpatchedFile, patch.file);
2755 std::string const PatchedFile = GetKeepCompressedFileName(UncompressedUnpatchedFile, Target);
2756
2757 switch (State)
2758 {
2759 case StateFetchDiff:
2760 Rename(DestFile, PatchFile);
2761
2762 // check if this is the last completed diff
2763 State = StateDoneDiff;
2764 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
2765 I != allPatches->end(); ++I)
2766 if ((*I)->State != StateDoneDiff)
2767 {
2768 if(Debug)
2769 std::clog << "Not the last done diff in the batch: " << Desc.URI << std::endl;
2770 return;
2771 }
2772 // this is the last completed diff, so we are ready to apply now
2773 DestFile = GetKeepCompressedFileName(UncompressedUnpatchedFile + "-patched", Target);
2774 if(Debug)
2775 std::clog << "Sending to rred method: " << UnpatchedFile << std::endl;
2776 State = StateApplyDiff;
2777 Local = true;
2778 Desc.URI = "rred:" + UnpatchedFile;
2779 QueueURI(Desc);
2780 SetActiveSubprocess("rred");
2781 return;
2782 case StateApplyDiff:
2783 // success in download & apply all diffs, finialize and clean up
2784 if(Debug)
2785 std::clog << "Queue patched file in place: " << std::endl
2786 << DestFile << " -> " << PatchedFile << std::endl;
2787
2788 // queue for copy by the transaction manager
2789 TransactionManager->TransactionStageCopy(this, DestFile, GetKeepCompressedFileName(GetFinalFilename(), Target));
2790
2791 // ensure the ed's are gone regardless of list-cleanup
2792 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
2793 I != allPatches->end(); ++I)
2794 RemoveFile("pkgAcqIndexMergeDiffs::Done", GetMergeDiffsPatchFileName(UnpatchedFile, (*I)->patch.file));
2795 RemoveFile("pkgAcqIndexMergeDiffs::Done", UnpatchedFile);
2796
2797 // all set and done
2798 Complete = true;
2799 if(Debug)
2800 std::clog << "allDone: " << DestFile << "\n" << std::endl;
2801 return;
2802 case StateDoneDiff: _error->Fatal("Done called for %s which is in an invalid Done state", PatchFile.c_str()); break;
2803 case StateErrorDiff: _error->Fatal("Done called for %s which is in an invalid Error state", PatchFile.c_str()); break;
2804 }
2805 }
2806 /*}}}*/
2807 std::string pkgAcqIndexMergeDiffs::Custom600Headers() const /*{{{*/
2808 {
2809 if(State != StateApplyDiff)
2810 return pkgAcqBaseIndex::Custom600Headers();
2811 std::ostringstream patchhashes;
2812 unsigned int seen_patches = 0;
2813 for (auto && hs : (*allPatches)[0]->patch.result_hashes)
2814 patchhashes << "\nStart-" << hs.HashType() << "-Hash: " << hs.HashValue();
2815 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
2816 I != allPatches->end(); ++I)
2817 {
2818 HashStringList const ExpectedHashes = (*I)->patch.patch_hashes;
2819 for (HashStringList::const_iterator hs = ExpectedHashes.begin(); hs != ExpectedHashes.end(); ++hs)
2820 patchhashes << "\nPatch-" << std::to_string(seen_patches) << "-" << hs->HashType() << "-Hash: " << hs->HashValue();
2821 ++seen_patches;
2822 }
2823 patchhashes << pkgAcqBaseIndex::Custom600Headers();
2824 return patchhashes.str();
2825 }
2826 /*}}}*/
2827 pkgAcqIndexMergeDiffs::~pkgAcqIndexMergeDiffs() {}
2828
2829 // AcqIndex::AcqIndex - Constructor /*{{{*/
2830 pkgAcqIndex::pkgAcqIndex(pkgAcquire * const Owner,
2831 pkgAcqMetaClearSig * const TransactionManager,
2832 IndexTarget const &Target)
2833 : pkgAcqBaseIndex(Owner, TransactionManager, Target), d(NULL), Stage(STAGE_DOWNLOAD),
2834 CompressionExtensions(Target.Option(IndexTarget::COMPRESSIONTYPES))
2835 {
2836 Init(Target.URI, Target.Description, Target.ShortDesc);
2837
2838 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
2839 std::clog << "New pkgIndex with TransactionManager "
2840 << TransactionManager << std::endl;
2841 }
2842 /*}}}*/
2843 // AcqIndex::Init - defered Constructor /*{{{*/
2844 static void NextCompressionExtension(std::string &CurrentCompressionExtension, std::string &CompressionExtensions, bool const preview)
2845 {
2846 size_t const nextExt = CompressionExtensions.find(' ');
2847 if (nextExt == std::string::npos)
2848 {
2849 CurrentCompressionExtension = CompressionExtensions;
2850 if (preview == false)
2851 CompressionExtensions.clear();
2852 }
2853 else
2854 {
2855 CurrentCompressionExtension = CompressionExtensions.substr(0, nextExt);
2856 if (preview == false)
2857 CompressionExtensions = CompressionExtensions.substr(nextExt+1);
2858 }
2859 }
2860 void pkgAcqIndex::Init(string const &URI, string const &URIDesc,
2861 string const &ShortDesc)
2862 {
2863 Stage = STAGE_DOWNLOAD;
2864
2865 DestFile = GetPartialFileNameFromURI(URI);
2866 NextCompressionExtension(CurrentCompressionExtension, CompressionExtensions, false);
2867
2868 if (CurrentCompressionExtension == "uncompressed")
2869 {
2870 Desc.URI = URI;
2871 }
2872 else if (CurrentCompressionExtension == "by-hash")
2873 {
2874 NextCompressionExtension(CurrentCompressionExtension, CompressionExtensions, true);
2875 if(unlikely(CurrentCompressionExtension.empty()))
2876 return;
2877 if (CurrentCompressionExtension != "uncompressed")
2878 {
2879 Desc.URI = URI + '.' + CurrentCompressionExtension;
2880 DestFile = DestFile + '.' + CurrentCompressionExtension;
2881 }
2882 else
2883 Desc.URI = URI;
2884
2885 HashStringList const Hashes = GetExpectedHashes();
2886 HashString const * const TargetHash = Hashes.find(NULL);
2887 if (unlikely(TargetHash == nullptr))
2888 return;
2889 std::string const ByHash = "/by-hash/" + TargetHash->HashType() + "/" + TargetHash->HashValue();
2890 size_t const trailing_slash = Desc.URI.find_last_of("/");
2891 if (unlikely(trailing_slash == std::string::npos))
2892 return;
2893 Desc.URI = Desc.URI.replace(
2894 trailing_slash,
2895 Desc.URI.substr(trailing_slash+1).size()+1,
2896 ByHash);
2897 }
2898 else if (unlikely(CurrentCompressionExtension.empty()))
2899 return;
2900 else
2901 {
2902 Desc.URI = URI + '.' + CurrentCompressionExtension;
2903 DestFile = DestFile + '.' + CurrentCompressionExtension;
2904 }
2905
2906 // store file size of the download to ensure the fetcher gives
2907 // accurate progress reporting
2908 FileSize = GetExpectedHashes().FileSize();
2909
2910 Desc.Description = URIDesc;
2911 Desc.Owner = this;
2912 Desc.ShortDesc = ShortDesc;
2913
2914 QueueURI(Desc);
2915 }
2916 /*}}}*/
2917 // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
2918 // ---------------------------------------------------------------------
2919 /* The only header we use is the last-modified header. */
2920 string pkgAcqIndex::Custom600Headers() const
2921 {
2922
2923 string msg = "\nIndex-File: true";
2924
2925 if (TransactionManager->LastMetaIndexParser == NULL)
2926 {
2927 std::string const Final = GetFinalFilename();
2928
2929 struct stat Buf;
2930 if (stat(Final.c_str(),&Buf) == 0)
2931 msg += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime, false);
2932 }
2933
2934 if(Target.IsOptional)
2935 msg += "\nFail-Ignore: true";
2936
2937 return msg;
2938 }
2939 /*}}}*/
2940 // AcqIndex::Failed - getting the indexfile failed /*{{{*/
2941 void pkgAcqIndex::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
2942 {
2943 pkgAcqBaseIndex::Failed(Message,Cnf);
2944
2945 if (UsedMirror.empty() == false && UsedMirror != "DIRECT" &&
2946 LookupTag(Message, "FailReason") == "HttpError404")
2947 {
2948 UsedMirror = "DIRECT";
2949 if (Desc.URI.find("/by-hash/") != std::string::npos)
2950 CompressionExtensions = "by-hash " + CompressionExtensions;
2951 else
2952 CompressionExtensions = CurrentCompressionExtension + ' ' + CompressionExtensions;
2953 Desc.Description = Target.Description;
2954 Init(Target.URI, Desc.Description, Desc.ShortDesc);
2955 Status = StatIdle;
2956 return;
2957 }
2958
2959 // authorisation matches will not be fixed by other compression types
2960 if (Status != StatAuthError)
2961 {
2962 if (CompressionExtensions.empty() == false)
2963 {
2964 Init(Target.URI, Desc.Description, Desc.ShortDesc);
2965 Status = StatIdle;
2966 return;
2967 }
2968 }
2969
2970 if(Target.IsOptional && GetExpectedHashes().empty() && Stage == STAGE_DOWNLOAD)
2971 Status = StatDone;
2972 else
2973 TransactionManager->AbortTransaction();
2974 }
2975 /*}}}*/
2976 // AcqIndex::Done - Finished a fetch /*{{{*/
2977 // ---------------------------------------------------------------------
2978 /* This goes through a number of states.. On the initial fetch the
2979 method could possibly return an alternate filename which points
2980 to the uncompressed version of the file. If this is so the file
2981 is copied into the partial directory. In all other cases the file
2982 is decompressed with a compressed uri. */
2983 void pkgAcqIndex::Done(string const &Message,
2984 HashStringList const &Hashes,
2985 pkgAcquire::MethodConfig const * const Cfg)
2986 {
2987 Item::Done(Message,Hashes,Cfg);
2988
2989 switch(Stage)
2990 {
2991 case STAGE_DOWNLOAD:
2992 StageDownloadDone(Message);
2993 break;
2994 case STAGE_DECOMPRESS_AND_VERIFY:
2995 StageDecompressDone();
2996 break;
2997 }
2998 }
2999 /*}}}*/
3000 // AcqIndex::StageDownloadDone - Queue for decompress and verify /*{{{*/
3001 void pkgAcqIndex::StageDownloadDone(string const &Message)
3002 {
3003 Local = true;
3004 Complete = true;
3005
3006 std::string const AltFilename = LookupTag(Message,"Alt-Filename");
3007 std::string Filename = LookupTag(Message,"Filename");
3008
3009 // we need to verify the file against the current Release file again
3010 // on if-modfied-since hit to avoid a stale attack against us
3011 if(StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
3012 {
3013 // copy FinalFile into partial/ so that we check the hash again
3014 string const FinalFile = GetExistingFilename(GetFinalFileNameFromURI(Target.URI));
3015 if (symlink(FinalFile.c_str(), DestFile.c_str()) != 0)
3016 _error->WarningE("pkgAcqIndex::StageDownloadDone", "Symlinking final file %s back to %s failed", FinalFile.c_str(), DestFile.c_str());
3017 else
3018 {
3019 EraseFileName = DestFile;
3020 Filename = DestFile;
3021 }
3022 Stage = STAGE_DECOMPRESS_AND_VERIFY;
3023 Desc.URI = "store:" + Filename;
3024 QueueURI(Desc);
3025 SetActiveSubprocess(::URI(Desc.URI).Access);
3026 return;
3027 }
3028 // methods like file:// give us an alternative (uncompressed) file
3029 else if (Target.KeepCompressed == false && AltFilename.empty() == false)
3030 {
3031 Filename = AltFilename;
3032 EraseFileName.clear();
3033 }
3034 // Methods like e.g. "file:" will give us a (compressed) FileName that is
3035 // not the "DestFile" we set, in this case we uncompress from the local file
3036 else if (Filename != DestFile && RealFileExists(DestFile) == false)
3037 {
3038 // symlinking ensures that the filename can be used for compression detection
3039 // that is e.g. needed for by-hash which has no extension over file
3040 if (symlink(Filename.c_str(),DestFile.c_str()) != 0)
3041 _error->WarningE("pkgAcqIndex::StageDownloadDone", "Symlinking file %s to %s failed", Filename.c_str(), DestFile.c_str());
3042 else
3043 {
3044 EraseFileName = DestFile;
3045 Filename = DestFile;
3046 }
3047 }
3048
3049 Stage = STAGE_DECOMPRESS_AND_VERIFY;
3050 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI), Target);
3051 if (Filename != DestFile && flExtension(Filename) == flExtension(DestFile))
3052 Desc.URI = "copy:" + Filename;
3053 else
3054 Desc.URI = "store:" + Filename;
3055 if (DestFile == Filename)
3056 {
3057 if (CurrentCompressionExtension == "uncompressed")
3058 return StageDecompressDone();
3059 DestFile = "/dev/null";
3060 }
3061
3062 if (EraseFileName.empty() && Filename != AltFilename)
3063 EraseFileName = Filename;
3064
3065 // queue uri for the next stage
3066 QueueURI(Desc);
3067 SetActiveSubprocess(::URI(Desc.URI).Access);
3068 }
3069 /*}}}*/
3070 // AcqIndex::StageDecompressDone - Final verification /*{{{*/
3071 void pkgAcqIndex::StageDecompressDone()
3072 {
3073 if (DestFile == "/dev/null")
3074 DestFile = GetKeepCompressedFileName(GetPartialFileNameFromURI(Target.URI), Target);
3075
3076 // Done, queue for rename on transaction finished
3077 TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
3078 }
3079 /*}}}*/
3080 pkgAcqIndex::~pkgAcqIndex() {}
3081
3082
3083 // AcqArchive::AcqArchive - Constructor /*{{{*/
3084 // ---------------------------------------------------------------------
3085 /* This just sets up the initial fetch environment and queues the first
3086 possibilitiy */
3087 pkgAcqArchive::pkgAcqArchive(pkgAcquire * const Owner,pkgSourceList * const Sources,
3088 pkgRecords * const Recs,pkgCache::VerIterator const &Version,
3089 string &StoreFilename) :
3090 Item(Owner), d(NULL), LocalSource(false), Version(Version), Sources(Sources), Recs(Recs),
3091 StoreFilename(StoreFilename), Vf(Version.FileList()),
3092 Trusted(false)
3093 {
3094 Retries = _config->FindI("Acquire::Retries",0);
3095
3096 if (Version.Arch() == 0)
3097 {
3098 _error->Error(_("I wasn't able to locate a file for the %s package. "
3099 "This might mean you need to manually fix this package. "
3100 "(due to missing arch)"),
3101 Version.ParentPkg().FullName().c_str());
3102 return;
3103 }
3104
3105 /* We need to find a filename to determine the extension. We make the
3106 assumption here that all the available sources for this version share
3107 the same extension.. */
3108 // Skip not source sources, they do not have file fields.
3109 for (; Vf.end() == false; ++Vf)
3110 {
3111 if (Vf.File().Flagged(pkgCache::Flag::NotSource))
3112 continue;
3113 break;
3114 }
3115
3116 // Does not really matter here.. we are going to fail out below
3117 if (Vf.end() != true)
3118 {
3119 // If this fails to get a file name we will bomb out below.
3120 pkgRecords::Parser &Parse = Recs->Lookup(Vf);
3121 if (_error->PendingError() == true)
3122 return;
3123
3124 // Generate the final file name as: package_version_arch.foo
3125 StoreFilename = QuoteString(Version.ParentPkg().Name(),"_:") + '_' +
3126 QuoteString(Version.VerStr(),"_:") + '_' +
3127 QuoteString(Version.Arch(),"_:.") +
3128 "." + flExtension(Parse.FileName());
3129 }
3130
3131 // check if we have one trusted source for the package. if so, switch
3132 // to "TrustedOnly" mode - but only if not in AllowUnauthenticated mode
3133 bool const allowUnauth = _config->FindB("APT::Get::AllowUnauthenticated", false);
3134 bool const debugAuth = _config->FindB("Debug::pkgAcquire::Auth", false);
3135 bool seenUntrusted = false;
3136 for (pkgCache::VerFileIterator i = Version.FileList(); i.end() == false; ++i)
3137 {
3138 pkgIndexFile *Index;
3139 if (Sources->FindIndex(i.File(),Index) == false)
3140 continue;
3141
3142 if (debugAuth == true)
3143 std::cerr << "Checking index: " << Index->Describe()
3144 << "(Trusted=" << Index->IsTrusted() << ")" << std::endl;
3145
3146 if (Index->IsTrusted() == true)
3147 {
3148 Trusted = true;
3149 if (allowUnauth == false)
3150 break;
3151 }
3152 else
3153 seenUntrusted = true;
3154 }
3155
3156 // "allow-unauthenticated" restores apts old fetching behaviour
3157 // that means that e.g. unauthenticated file:// uris are higher
3158 // priority than authenticated http:// uris
3159 if (allowUnauth == true && seenUntrusted == true)
3160 Trusted = false;
3161
3162 // Select a source
3163 if (QueueNext() == false && _error->PendingError() == false)
3164 _error->Error(_("Can't find a source to download version '%s' of '%s'"),
3165 Version.VerStr(), Version.ParentPkg().FullName(false).c_str());
3166 }
3167 /*}}}*/
3168 // AcqArchive::QueueNext - Queue the next file source /*{{{*/
3169 // ---------------------------------------------------------------------
3170 /* This queues the next available file version for download. It checks if
3171 the archive is already available in the cache and stashs the MD5 for
3172 checking later. */
3173 bool pkgAcqArchive::QueueNext()
3174 {
3175 for (; Vf.end() == false; ++Vf)
3176 {
3177 pkgCache::PkgFileIterator const PkgF = Vf.File();
3178 // Ignore not source sources
3179 if (PkgF.Flagged(pkgCache::Flag::NotSource))
3180 continue;
3181
3182 // Try to cross match against the source list
3183 pkgIndexFile *Index;
3184 if (Sources->FindIndex(PkgF, Index) == false)
3185 continue;
3186 LocalSource = PkgF.Flagged(pkgCache::Flag::LocalSource);
3187
3188 // only try to get a trusted package from another source if that source
3189 // is also trusted
3190 if(Trusted && !Index->IsTrusted())
3191 continue;
3192
3193 // Grab the text package record
3194 pkgRecords::Parser &Parse = Recs->Lookup(Vf);
3195 if (_error->PendingError() == true)
3196 return false;
3197
3198 string PkgFile = Parse.FileName();
3199 ExpectedHashes = Parse.Hashes();
3200
3201 if (PkgFile.empty() == true)
3202 return _error->Error(_("The package index files are corrupted. No Filename: "
3203 "field for package %s."),
3204 Version.ParentPkg().Name());
3205
3206 Desc.URI = Index->ArchiveURI(PkgFile);
3207 Desc.Description = Index->ArchiveInfo(Version);
3208 Desc.Owner = this;
3209 Desc.ShortDesc = Version.ParentPkg().FullName(true);
3210
3211 // See if we already have the file. (Legacy filenames)
3212 FileSize = Version->Size;
3213 string FinalFile = _config->FindDir("Dir::Cache::Archives") + flNotDir(PkgFile);
3214 struct stat Buf;
3215 if (stat(FinalFile.c_str(),&Buf) == 0)
3216 {
3217 // Make sure the size matches
3218 if ((unsigned long long)Buf.st_size == Version->Size)
3219 {
3220 Complete = true;
3221 Local = true;
3222 Status = StatDone;
3223 StoreFilename = DestFile = FinalFile;
3224 return true;
3225 }
3226
3227 /* Hmm, we have a file and its size does not match, this means it is
3228 an old style mismatched arch */
3229 RemoveFile("pkgAcqArchive::QueueNext", FinalFile);
3230 }
3231
3232 // Check it again using the new style output filenames
3233 FinalFile = _config->FindDir("Dir::Cache::Archives") + flNotDir(StoreFilename);
3234 if (stat(FinalFile.c_str(),&Buf) == 0)
3235 {
3236 // Make sure the size matches
3237 if ((unsigned long long)Buf.st_size == Version->Size)
3238 {
3239 Complete = true;
3240 Local = true;
3241 Status = StatDone;
3242 StoreFilename = DestFile = FinalFile;
3243 return true;
3244 }
3245
3246 /* Hmm, we have a file and its size does not match, this shouldn't
3247 happen.. */
3248 RemoveFile("pkgAcqArchive::QueueNext", FinalFile);
3249 }
3250
3251 DestFile = _config->FindDir("Dir::Cache::Archives") + "partial/" + flNotDir(StoreFilename);
3252
3253 // Check the destination file
3254 if (stat(DestFile.c_str(),&Buf) == 0)
3255 {
3256 // Hmm, the partial file is too big, erase it
3257 if ((unsigned long long)Buf.st_size > Version->Size)
3258 RemoveFile("pkgAcqArchive::QueueNext", DestFile);
3259 else
3260 PartialSize = Buf.st_size;
3261 }
3262
3263 // Disables download of archives - useful if no real installation follows,
3264 // e.g. if we are just interested in proposed installation order
3265 if (_config->FindB("Debug::pkgAcqArchive::NoQueue", false) == true)
3266 {
3267 Complete = true;
3268 Local = true;
3269 Status = StatDone;
3270 StoreFilename = DestFile = FinalFile;
3271 return true;
3272 }
3273
3274 // Create the item
3275 Local = false;
3276 QueueURI(Desc);
3277
3278 ++Vf;
3279 return true;
3280 }
3281 return false;
3282 }
3283 /*}}}*/
3284 // AcqArchive::Done - Finished fetching /*{{{*/
3285 // ---------------------------------------------------------------------
3286 /* */
3287 void pkgAcqArchive::Done(string const &Message, HashStringList const &Hashes,
3288 pkgAcquire::MethodConfig const * const Cfg)
3289 {
3290 Item::Done(Message, Hashes, Cfg);
3291
3292 // Grab the output filename
3293 std::string const FileName = LookupTag(Message,"Filename");
3294 if (DestFile != FileName && RealFileExists(DestFile) == false)
3295 {
3296 StoreFilename = DestFile = FileName;
3297 Local = true;
3298 Complete = true;
3299 return;
3300 }
3301
3302 // Done, move it into position
3303 string const FinalFile = GetFinalFilename();
3304 Rename(DestFile,FinalFile);
3305 StoreFilename = DestFile = FinalFile;
3306 Complete = true;
3307 }
3308 /*}}}*/
3309 // AcqArchive::Failed - Failure handler /*{{{*/
3310 // ---------------------------------------------------------------------
3311 /* Here we try other sources */
3312 void pkgAcqArchive::Failed(string const &Message,pkgAcquire::MethodConfig const * const Cnf)
3313 {
3314 Item::Failed(Message,Cnf);
3315
3316 /* We don't really want to retry on failed media swaps, this prevents
3317 that. An interesting observation is that permanent failures are not
3318 recorded. */
3319 if (Cnf->Removable == true &&
3320 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
3321 {
3322 // Vf = Version.FileList();
3323 while (Vf.end() == false) ++Vf;
3324 StoreFilename = string();
3325 return;
3326 }
3327
3328 Status = StatIdle;
3329 if (QueueNext() == false)
3330 {
3331 // This is the retry counter
3332 if (Retries != 0 &&
3333 Cnf->LocalOnly == false &&
3334 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
3335 {
3336 Retries--;
3337 Vf = Version.FileList();
3338 if (QueueNext() == true)
3339 return;
3340 }
3341
3342 StoreFilename = string();
3343 Status = StatError;
3344 }
3345 }
3346 /*}}}*/
3347 APT_PURE bool pkgAcqArchive::IsTrusted() const /*{{{*/
3348 {
3349 return Trusted;
3350 }
3351 /*}}}*/
3352 void pkgAcqArchive::Finished() /*{{{*/
3353 {
3354 if (Status == pkgAcquire::Item::StatDone &&
3355 Complete == true)
3356 return;
3357 StoreFilename = string();
3358 }
3359 /*}}}*/
3360 std::string pkgAcqArchive::DescURI() const /*{{{*/
3361 {
3362 return Desc.URI;
3363 }
3364 /*}}}*/
3365 std::string pkgAcqArchive::ShortDesc() const /*{{{*/
3366 {
3367 return Desc.ShortDesc;
3368 }
3369 /*}}}*/
3370 pkgAcqArchive::~pkgAcqArchive() {}
3371
3372 // AcqChangelog::pkgAcqChangelog - Constructors /*{{{*/
3373 class pkgAcqChangelog::Private
3374 {
3375 public:
3376 std::string FinalFile;
3377 };
3378 pkgAcqChangelog::pkgAcqChangelog(pkgAcquire * const Owner, pkgCache::VerIterator const &Ver,
3379 std::string const &DestDir, std::string const &DestFilename) :
3380 pkgAcquire::Item(Owner), d(new pkgAcqChangelog::Private()), SrcName(Ver.SourcePkgName()), SrcVersion(Ver.SourceVerStr())
3381 {
3382 Desc.URI = URI(Ver);
3383 Init(DestDir, DestFilename);
3384 }
3385 // some parameters are char* here as they come likely from char* interfaces – which can also return NULL
3386 pkgAcqChangelog::pkgAcqChangelog(pkgAcquire * const Owner, pkgCache::RlsFileIterator const &RlsFile,
3387 char const * const Component, char const * const SrcName, char const * const SrcVersion,
3388 const string &DestDir, const string &DestFilename) :
3389 pkgAcquire::Item(Owner), d(new pkgAcqChangelog::Private()), SrcName(SrcName), SrcVersion(SrcVersion)
3390 {
3391 Desc.URI = URI(RlsFile, Component, SrcName, SrcVersion);
3392 Init(DestDir, DestFilename);
3393 }
3394 pkgAcqChangelog::pkgAcqChangelog(pkgAcquire * const Owner,
3395 std::string const &URI, char const * const SrcName, char const * const SrcVersion,
3396 const string &DestDir, const string &DestFilename) :
3397 pkgAcquire::Item(Owner), d(new pkgAcqChangelog::Private()), SrcName(SrcName), SrcVersion(SrcVersion)
3398 {
3399 Desc.URI = URI;
3400 Init(DestDir, DestFilename);
3401 }
3402 void pkgAcqChangelog::Init(std::string const &DestDir, std::string const &DestFilename)
3403 {
3404 if (Desc.URI.empty())
3405 {
3406 Status = StatError;
3407 // TRANSLATOR: %s=%s is sourcename=sourceversion, e.g. apt=1.1
3408 strprintf(ErrorText, _("Changelog unavailable for %s=%s"), SrcName.c_str(), SrcVersion.c_str());
3409 // Let the error message print something sensible rather than "Failed to fetch /"
3410 if (DestFilename.empty())
3411 DestFile = SrcName + ".changelog";
3412 else
3413 DestFile = DestFilename;
3414 Desc.URI = "changelog:/" + DestFile;
3415 return;
3416 }
3417
3418 std::string DestFileName;
3419 if (DestFilename.empty())
3420 DestFileName = flCombine(DestFile, SrcName + ".changelog");
3421 else
3422 DestFileName = flCombine(DestFile, DestFilename);
3423
3424 std::string const SandboxUser = _config->Find("APT::Sandbox::User");
3425 std::string const systemTemp = GetTempDir(SandboxUser);
3426 char tmpname[1000];
3427 snprintf(tmpname, sizeof(tmpname), "%s/apt-changelog-XXXXXX", systemTemp.c_str());
3428 if (NULL == mkdtemp(tmpname))
3429 {
3430 _error->Errno("mkdtemp", "mkdtemp failed in changelog acquire of %s %s", SrcName.c_str(), SrcVersion.c_str());
3431 Status = StatError;
3432 return;
3433 }
3434 TemporaryDirectory = tmpname;
3435
3436 ChangeOwnerAndPermissionOfFile("Item::QueueURI", TemporaryDirectory.c_str(),
3437 SandboxUser.c_str(), "root", 0700);
3438
3439 DestFile = flCombine(TemporaryDirectory, DestFileName);
3440 if (DestDir.empty() == false)
3441 {
3442 d->FinalFile = flCombine(DestDir, DestFileName);
3443 if (RealFileExists(d->FinalFile))
3444 {
3445 FileFd file1, file2;
3446 if (file1.Open(DestFile, FileFd::WriteOnly | FileFd::Create | FileFd::Exclusive) &&
3447 file2.Open(d->FinalFile, FileFd::ReadOnly) && CopyFile(file2, file1))
3448 {
3449 struct timeval times[2];
3450 times[0].tv_sec = times[1].tv_sec = file2.ModificationTime();
3451 times[0].tv_usec = times[1].tv_usec = 0;
3452 utimes(DestFile.c_str(), times);
3453 }
3454 }
3455 }
3456
3457 Desc.ShortDesc = "Changelog";
3458 strprintf(Desc.Description, "%s %s %s Changelog", URI::SiteOnly(Desc.URI).c_str(), SrcName.c_str(), SrcVersion.c_str());
3459 Desc.Owner = this;
3460 QueueURI(Desc);
3461 }
3462 /*}}}*/
3463 std::string pkgAcqChangelog::URI(pkgCache::VerIterator const &Ver) /*{{{*/
3464 {
3465 std::string const confOnline = "Acquire::Changelogs::AlwaysOnline";
3466 bool AlwaysOnline = _config->FindB(confOnline, false);
3467 if (AlwaysOnline == false)
3468 for (pkgCache::VerFileIterator VF = Ver.FileList(); VF.end() == false; ++VF)
3469 {
3470 pkgCache::PkgFileIterator const PF = VF.File();
3471 if (PF.Flagged(pkgCache::Flag::NotSource) || PF->Release == 0)
3472 continue;
3473 pkgCache::RlsFileIterator const RF = PF.ReleaseFile();
3474 if (RF->Origin != 0 && _config->FindB(confOnline + "::Origin::" + RF.Origin(), false))
3475 {
3476 AlwaysOnline = true;
3477 break;
3478 }
3479 }
3480 if (AlwaysOnline == false)
3481 {
3482 pkgCache::PkgIterator const Pkg = Ver.ParentPkg();
3483 if (Pkg->CurrentVer != 0 && Pkg.CurrentVer() == Ver)
3484 {
3485 std::string const basename = std::string("/usr/share/doc/") + Pkg.Name() + "/changelog";
3486 std::string const debianname = basename + ".Debian";
3487 if (FileExists(debianname))
3488 return "copy://" + debianname;
3489 else if (FileExists(debianname + ".gz"))
3490 return "gzip://" + debianname + ".gz";
3491 else if (FileExists(basename))
3492 return "copy://" + basename;
3493 else if (FileExists(basename + ".gz"))
3494 return "gzip://" + basename + ".gz";
3495 }
3496 }
3497
3498 char const * const SrcName = Ver.SourcePkgName();
3499 char const * const SrcVersion = Ver.SourceVerStr();
3500 // find the first source for this version which promises a changelog
3501 for (pkgCache::VerFileIterator VF = Ver.FileList(); VF.end() == false; ++VF)
3502 {
3503 pkgCache::PkgFileIterator const PF = VF.File();
3504 if (PF.Flagged(pkgCache::Flag::NotSource) || PF->Release == 0)
3505 continue;
3506 pkgCache::RlsFileIterator const RF = PF.ReleaseFile();
3507 std::string const uri = URI(RF, PF.Component(), SrcName, SrcVersion);
3508 if (uri.empty())
3509 continue;
3510 return uri;
3511 }
3512 return "";
3513 }
3514 std::string pkgAcqChangelog::URITemplate(pkgCache::RlsFileIterator const &Rls)
3515 {
3516 if (Rls.end() == true || (Rls->Label == 0 && Rls->Origin == 0))
3517 return "";
3518 std::string const serverConfig = "Acquire::Changelogs::URI";
3519 std::string server;
3520 #define APT_EMPTY_SERVER \
3521 if (server.empty() == false) \
3522 { \
3523 if (server != "no") \
3524 return server; \
3525 return ""; \
3526 }
3527 #define APT_CHECK_SERVER(X, Y) \
3528 if (Rls->X != 0) \
3529 { \
3530 std::string const specialServerConfig = serverConfig + "::" + Y + #X + "::" + Rls.X(); \
3531 server = _config->Find(specialServerConfig); \
3532 APT_EMPTY_SERVER \
3533 }
3534 // this way e.g. Debian-Security can fallback to Debian
3535 APT_CHECK_SERVER(Label, "Override::")
3536 APT_CHECK_SERVER(Origin, "Override::")
3537
3538 if (RealFileExists(Rls.FileName()))
3539 {
3540 _error->PushToStack();
3541 FileFd rf;
3542 /* This can be costly. A caller wanting to get millions of URIs might
3543 want to do this on its own once and use Override settings.
3544 We don't do this here as Origin/Label are not as unique as they
3545 should be so this could produce request order-dependent anomalies */
3546 if (OpenMaybeClearSignedFile(Rls.FileName(), rf) == true)
3547 {
3548 pkgTagFile TagFile(&rf, rf.Size());
3549 pkgTagSection Section;
3550 if (TagFile.Step(Section) == true)
3551 server = Section.FindS("Changelogs");
3552 }
3553 _error->RevertToStack();
3554 APT_EMPTY_SERVER
3555 }
3556
3557 APT_CHECK_SERVER(Label, "")
3558 APT_CHECK_SERVER(Origin, "")
3559 #undef APT_CHECK_SERVER
3560 #undef APT_EMPTY_SERVER
3561 return "";
3562 }
3563 std::string pkgAcqChangelog::URI(pkgCache::RlsFileIterator const &Rls,
3564 char const * const Component, char const * const SrcName,
3565 char const * const SrcVersion)
3566 {
3567 return URI(URITemplate(Rls), Component, SrcName, SrcVersion);
3568 }
3569 std::string pkgAcqChangelog::URI(std::string const &Template,
3570 char const * const Component, char const * const SrcName,
3571 char const * const SrcVersion)
3572 {
3573 if (Template.find("@CHANGEPATH@") == std::string::npos)
3574 return "";
3575
3576 // the path is: COMPONENT/SRC/SRCNAME/SRCNAME_SRCVER, e.g. main/a/apt/1.1 or contrib/liba/libapt/2.0
3577 std::string Src = SrcName;
3578 std::string path = APT::String::Startswith(SrcName, "lib") ? Src.substr(0, 4) : Src.substr(0,1);
3579 path.append("/").append(Src).append("/");
3580 path.append(Src).append("_").append(StripEpoch(SrcVersion));
3581 // we omit component for releases without one (= flat-style repositories)
3582 if (Component != NULL && strlen(Component) != 0)
3583 path = std::string(Component) + "/" + path;
3584
3585 return SubstVar(Template, "@CHANGEPATH@", path);
3586 }
3587 /*}}}*/
3588 // AcqChangelog::Failed - Failure handler /*{{{*/
3589 void pkgAcqChangelog::Failed(string const &Message, pkgAcquire::MethodConfig const * const Cnf)
3590 {
3591 Item::Failed(Message,Cnf);
3592
3593 std::string errText;
3594 // TRANSLATOR: %s=%s is sourcename=sourceversion, e.g. apt=1.1
3595 strprintf(errText, _("Changelog unavailable for %s=%s"), SrcName.c_str(), SrcVersion.c_str());
3596
3597 // Error is probably something techy like 404 Not Found
3598 if (ErrorText.empty())
3599 ErrorText = errText;
3600 else
3601 ErrorText = errText + " (" + ErrorText + ")";
3602 }
3603 /*}}}*/
3604 // AcqChangelog::Done - Item downloaded OK /*{{{*/
3605 void pkgAcqChangelog::Done(string const &Message,HashStringList const &CalcHashes,
3606 pkgAcquire::MethodConfig const * const Cnf)
3607 {
3608 Item::Done(Message,CalcHashes,Cnf);
3609 if (d->FinalFile.empty() == false)
3610 {
3611 if (RemoveFile("pkgAcqChangelog::Done", d->FinalFile) == false ||
3612 Rename(DestFile, d->FinalFile) == false)
3613 Status = StatError;
3614 }
3615
3616 Complete = true;
3617 }
3618 /*}}}*/
3619 pkgAcqChangelog::~pkgAcqChangelog() /*{{{*/
3620 {
3621 if (TemporaryDirectory.empty() == false)
3622 {
3623 RemoveFile("~pkgAcqChangelog", DestFile);
3624 rmdir(TemporaryDirectory.c_str());
3625 }
3626 delete d;
3627 }
3628 /*}}}*/
3629
3630 // AcqFile::pkgAcqFile - Constructor /*{{{*/
3631 pkgAcqFile::pkgAcqFile(pkgAcquire * const Owner,string const &URI, HashStringList const &Hashes,
3632 unsigned long long const Size,string const &Dsc,string const &ShortDesc,
3633 const string &DestDir, const string &DestFilename,
3634 bool const IsIndexFile) :
3635 Item(Owner), d(NULL), IsIndexFile(IsIndexFile), ExpectedHashes(Hashes)
3636 {
3637 Retries = _config->FindI("Acquire::Retries",0);
3638
3639 if(!DestFilename.empty())
3640 DestFile = DestFilename;
3641 else if(!DestDir.empty())
3642 DestFile = DestDir + "/" + flNotDir(URI);
3643 else
3644 DestFile = flNotDir(URI);
3645
3646 // Create the item
3647 Desc.URI = URI;
3648 Desc.Description = Dsc;
3649 Desc.Owner = this;
3650
3651 // Set the short description to the archive component
3652 Desc.ShortDesc = ShortDesc;
3653
3654 // Get the transfer sizes
3655 FileSize = Size;
3656 struct stat Buf;
3657 if (stat(DestFile.c_str(),&Buf) == 0)
3658 {
3659 // Hmm, the partial file is too big, erase it
3660 if ((Size > 0) && (unsigned long long)Buf.st_size > Size)
3661 RemoveFile("pkgAcqFile", DestFile);
3662 else
3663 PartialSize = Buf.st_size;
3664 }
3665
3666 QueueURI(Desc);
3667 }
3668 /*}}}*/
3669 // AcqFile::Done - Item downloaded OK /*{{{*/
3670 void pkgAcqFile::Done(string const &Message,HashStringList const &CalcHashes,
3671 pkgAcquire::MethodConfig const * const Cnf)
3672 {
3673 Item::Done(Message,CalcHashes,Cnf);
3674
3675 std::string const FileName = LookupTag(Message,"Filename");
3676 Complete = true;
3677
3678 // The files timestamp matches
3679 if (StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
3680 return;
3681
3682 // We have to copy it into place
3683 if (RealFileExists(DestFile.c_str()) == false)
3684 {
3685 Local = true;
3686 if (_config->FindB("Acquire::Source-Symlinks",true) == false ||
3687 Cnf->Removable == true)
3688 {
3689 Desc.URI = "copy:" + FileName;
3690 QueueURI(Desc);
3691 return;
3692 }
3693
3694 // Erase the file if it is a symlink so we can overwrite it
3695 struct stat St;
3696 if (lstat(DestFile.c_str(),&St) == 0)
3697 {
3698 if (S_ISLNK(St.st_mode) != 0)
3699 RemoveFile("pkgAcqFile::Done", DestFile);
3700 }
3701
3702 // Symlink the file
3703 if (symlink(FileName.c_str(),DestFile.c_str()) != 0)
3704 {
3705 _error->PushToStack();
3706 _error->Errno("pkgAcqFile::Done", "Symlinking file %s failed", DestFile.c_str());
3707 std::stringstream msg;
3708 _error->DumpErrors(msg, GlobalError::DEBUG, false);
3709 _error->RevertToStack();
3710 ErrorText = msg.str();
3711 Status = StatError;
3712 Complete = false;
3713 }
3714 }
3715 }
3716 /*}}}*/
3717 // AcqFile::Failed - Failure handler /*{{{*/
3718 // ---------------------------------------------------------------------
3719 /* Here we try other sources */
3720 void pkgAcqFile::Failed(string const &Message, pkgAcquire::MethodConfig const * const Cnf)
3721 {
3722 Item::Failed(Message,Cnf);
3723
3724 // This is the retry counter
3725 if (Retries != 0 &&
3726 Cnf->LocalOnly == false &&
3727 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
3728 {
3729 --Retries;
3730 QueueURI(Desc);
3731 Status = StatIdle;
3732 return;
3733 }
3734
3735 }
3736 /*}}}*/
3737 string pkgAcqFile::Custom600Headers() const /*{{{*/
3738 {
3739 if (IsIndexFile)
3740 return "\nIndex-File: true";
3741 return "";
3742 }
3743 /*}}}*/
3744 pkgAcqFile::~pkgAcqFile() {}