]> git.saurik.com Git - apt.git/blob - apt-pkg/acquire-item.cc
605ca7ae4b31feb87b1708954f14112ff6382278
[apt.git] / apt-pkg / acquire-item.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 // $Id: acquire-item.cc,v 1.46.2.9 2004/01/16 18:51:11 mdz Exp $
4 /* ######################################################################
5
6 Acquire Item - Item to acquire
7
8 Each item can download to exactly one file at a time. This means you
9 cannot create an item that fetches two uri's to two files at the same
10 time. The pkgAcqIndex class creates a second class upon instantiation
11 to fetch the other index files because of this.
12
13 ##################################################################### */
14 /*}}}*/
15 // Include Files /*{{{*/
16 #include <config.h>
17
18 #include <apt-pkg/acquire-item.h>
19 #include <apt-pkg/configuration.h>
20 #include <apt-pkg/aptconfiguration.h>
21 #include <apt-pkg/sourcelist.h>
22 #include <apt-pkg/error.h>
23 #include <apt-pkg/strutl.h>
24 #include <apt-pkg/fileutl.h>
25 #include <apt-pkg/sha1.h>
26 #include <apt-pkg/tagfile.h>
27 #include <apt-pkg/indexrecords.h>
28 #include <apt-pkg/acquire.h>
29 #include <apt-pkg/hashes.h>
30 #include <apt-pkg/indexfile.h>
31 #include <apt-pkg/pkgcache.h>
32 #include <apt-pkg/cacheiterators.h>
33 #include <apt-pkg/pkgrecords.h>
34
35 #include <stddef.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <iostream>
39 #include <vector>
40 #include <sys/stat.h>
41 #include <unistd.h>
42 #include <errno.h>
43 #include <string>
44 #include <sstream>
45 #include <stdio.h>
46 #include <ctime>
47
48 #include <apti18n.h>
49 /*}}}*/
50
51 using namespace std;
52
53 static void printHashSumComparision(std::string const &URI, HashStringList const &Expected, HashStringList const &Actual) /*{{{*/
54 {
55 if (_config->FindB("Debug::Acquire::HashSumMismatch", false) == false)
56 return;
57 std::cerr << std::endl << URI << ":" << std::endl << " Expected Hash: " << std::endl;
58 for (HashStringList::const_iterator hs = Expected.begin(); hs != Expected.end(); ++hs)
59 std::cerr << "\t- " << hs->toStr() << std::endl;
60 std::cerr << " Actual Hash: " << std::endl;
61 for (HashStringList::const_iterator hs = Actual.begin(); hs != Actual.end(); ++hs)
62 std::cerr << "\t- " << hs->toStr() << std::endl;
63 }
64 /*}}}*/
65 static std::string GetPartialFileName(std::string const &file) /*{{{*/
66 {
67 std::string DestFile = _config->FindDir("Dir::State::lists") + "partial/";
68 DestFile += file;
69 return DestFile;
70 }
71 /*}}}*/
72 static std::string GetPartialFileNameFromURI(std::string const &uri) /*{{{*/
73 {
74 return GetPartialFileName(URItoFileName(uri));
75 }
76 /*}}}*/
77 static std::string GetCompressedFileName(std::string const &URI, std::string const &Name, std::string const &Ext) /*{{{*/
78 {
79 if (Ext.empty() || Ext == "uncompressed")
80 return Name;
81
82 // do not reverify cdrom sources as apt-cdrom may rewrite the Packages
83 // file when its doing the indexcopy
84 if (URI.substr(0,6) == "cdrom:")
85 return Name;
86
87 // adjust DestFile if its compressed on disk
88 if (_config->FindB("Acquire::GzipIndexes",false) == true)
89 return Name + '.' + Ext;
90 return Name;
91 }
92 /*}}}*/
93 static bool AllowInsecureRepositories(indexRecords const * const MetaIndexParser, pkgAcqMetaBase * const TransactionManager, pkgAcquire::Item * const I) /*{{{*/
94 {
95 if(MetaIndexParser->IsAlwaysTrusted() || _config->FindB("Acquire::AllowInsecureRepositories") == true)
96 return true;
97
98 _error->Error(_("Use --allow-insecure-repositories to force the update"));
99 TransactionManager->AbortTransaction();
100 I->Status = pkgAcquire::Item::StatError;
101 return false;
102 }
103 /*}}}*/
104
105
106 // Acquire::Item::Item - Constructor /*{{{*/
107 #if __GNUC__ >= 4
108 #pragma GCC diagnostic push
109 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
110 #endif
111 pkgAcquire::Item::Item(pkgAcquire *Owner,
112 HashStringList const &ExpectedHashes,
113 pkgAcqMetaBase *TransactionManager)
114 : Owner(Owner), FileSize(0), PartialSize(0), Mode(0), ID(0), Complete(false),
115 Local(false), QueueCounter(0), TransactionManager(TransactionManager),
116 ExpectedAdditionalItems(0), ExpectedHashes(ExpectedHashes)
117 {
118 Owner->Add(this);
119 Status = StatIdle;
120 if(TransactionManager != NULL)
121 TransactionManager->Add(this);
122 }
123 #if __GNUC__ >= 4
124 #pragma GCC diagnostic pop
125 #endif
126 /*}}}*/
127 // Acquire::Item::~Item - Destructor /*{{{*/
128 // ---------------------------------------------------------------------
129 /* */
130 pkgAcquire::Item::~Item()
131 {
132 Owner->Remove(this);
133 }
134 /*}}}*/
135 // Acquire::Item::Failed - Item failed to download /*{{{*/
136 // ---------------------------------------------------------------------
137 /* We return to an idle state if there are still other queues that could
138 fetch this object */
139 void pkgAcquire::Item::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
140 {
141 if(ErrorText.empty())
142 ErrorText = LookupTag(Message,"Message");
143 UsedMirror = LookupTag(Message,"UsedMirror");
144 if (QueueCounter <= 1)
145 {
146 /* This indicates that the file is not available right now but might
147 be sometime later. If we do a retry cycle then this should be
148 retried [CDROMs] */
149 if (Cnf != NULL && Cnf->LocalOnly == true &&
150 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
151 {
152 Status = StatIdle;
153 Dequeue();
154 return;
155 }
156
157 Status = StatError;
158 Complete = false;
159 Dequeue();
160 }
161 else
162 Status = StatIdle;
163
164 // check fail reason
165 string const FailReason = LookupTag(Message, "FailReason");
166 if(FailReason == "MaximumSizeExceeded")
167 RenameOnError(MaximumSizeExceeded);
168
169 // report mirror failure back to LP if we actually use a mirror
170 if(FailReason.size() != 0)
171 ReportMirrorFailure(FailReason);
172 else
173 ReportMirrorFailure(ErrorText);
174 }
175 /*}}}*/
176 // Acquire::Item::Start - Item has begun to download /*{{{*/
177 // ---------------------------------------------------------------------
178 /* Stash status and the file size. Note that setting Complete means
179 sub-phases of the acquire process such as decompresion are operating */
180 void pkgAcquire::Item::Start(string /*Message*/,unsigned long long Size)
181 {
182 Status = StatFetching;
183 ErrorText.clear();
184 if (FileSize == 0 && Complete == false)
185 FileSize = Size;
186 }
187 /*}}}*/
188 // Acquire::Item::Done - Item downloaded OK /*{{{*/
189 // ---------------------------------------------------------------------
190 /* */
191 void pkgAcquire::Item::Done(string Message,unsigned long long Size,HashStringList const &/*Hash*/,
192 pkgAcquire::MethodConfig * /*Cnf*/)
193 {
194 // We just downloaded something..
195 string FileName = LookupTag(Message,"Filename");
196 UsedMirror = LookupTag(Message,"UsedMirror");
197 if (Complete == false && !Local && FileName == DestFile)
198 {
199 if (Owner->Log != 0)
200 Owner->Log->Fetched(Size,atoi(LookupTag(Message,"Resume-Point","0").c_str()));
201 }
202
203 if (FileSize == 0)
204 FileSize= Size;
205 Status = StatDone;
206 ErrorText = string();
207 Owner->Dequeue(this);
208 }
209 /*}}}*/
210 // Acquire::Item::Rename - Rename a file /*{{{*/
211 // ---------------------------------------------------------------------
212 /* This helper function is used by a lot of item methods as their final
213 step */
214 bool pkgAcquire::Item::Rename(string From,string To)
215 {
216 if (rename(From.c_str(),To.c_str()) == 0)
217 return true;
218
219 std::string S;
220 strprintf(S, _("rename failed, %s (%s -> %s)."), strerror(errno),
221 From.c_str(),To.c_str());
222 Status = StatError;
223 ErrorText += S;
224 return false;
225 }
226 /*}}}*/
227 void pkgAcquire::Item::QueueURI(ItemDesc &Item) /*{{{*/
228 {
229 Owner->Enqueue(Item);
230 }
231 /*}}}*/
232 void pkgAcquire::Item::Dequeue() /*{{{*/
233 {
234 Owner->Dequeue(this);
235 }
236 /*}}}*/
237 bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const error)/*{{{*/
238 {
239 if (RealFileExists(DestFile))
240 Rename(DestFile, DestFile + ".FAILED");
241
242 switch (error)
243 {
244 case HashSumMismatch:
245 ErrorText = _("Hash Sum mismatch");
246 Status = StatAuthError;
247 ReportMirrorFailure("HashChecksumFailure");
248 break;
249 case SizeMismatch:
250 ErrorText = _("Size mismatch");
251 Status = StatAuthError;
252 ReportMirrorFailure("SizeFailure");
253 break;
254 case InvalidFormat:
255 ErrorText = _("Invalid file format");
256 Status = StatError;
257 // do not report as usually its not the mirrors fault, but Portal/Proxy
258 break;
259 case SignatureError:
260 ErrorText = _("Signature error");
261 Status = StatError;
262 break;
263 case NotClearsigned:
264 ErrorText = _("Does not start with a cleartext signature");
265 Status = StatError;
266 break;
267 case MaximumSizeExceeded:
268 // the method is expected to report a good error for this
269 Status = StatError;
270 break;
271 }
272 return false;
273 }
274 /*}}}*/
275 void pkgAcquire::Item::SetActiveSubprocess(const std::string &subprocess)/*{{{*/
276 {
277 ActiveSubprocess = subprocess;
278 #if __GNUC__ >= 4
279 #pragma GCC diagnostic push
280 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
281 #endif
282 Mode = ActiveSubprocess.c_str();
283 #if __GNUC__ >= 4
284 #pragma GCC diagnostic pop
285 #endif
286 }
287 /*}}}*/
288 // Acquire::Item::ReportMirrorFailure /*{{{*/
289 // ---------------------------------------------------------------------
290 void pkgAcquire::Item::ReportMirrorFailure(string FailCode)
291 {
292 // we only act if a mirror was used at all
293 if(UsedMirror.empty())
294 return;
295 #if 0
296 std::cerr << "\nReportMirrorFailure: "
297 << UsedMirror
298 << " Uri: " << DescURI()
299 << " FailCode: "
300 << FailCode << std::endl;
301 #endif
302 string report = _config->Find("Methods::Mirror::ProblemReporting",
303 "/usr/lib/apt/apt-report-mirror-failure");
304 if(!FileExists(report))
305 return;
306
307 std::vector<char const*> Args;
308 Args.push_back(report.c_str());
309 Args.push_back(UsedMirror.c_str());
310 Args.push_back(DescURI().c_str());
311 Args.push_back(FailCode.c_str());
312 Args.push_back(NULL);
313
314 pid_t pid = ExecFork();
315 if(pid < 0)
316 {
317 _error->Error("ReportMirrorFailure Fork failed");
318 return;
319 }
320 else if(pid == 0)
321 {
322 execvp(Args[0], (char**)Args.data());
323 std::cerr << "Could not exec " << Args[0] << std::endl;
324 _exit(100);
325 }
326 if(!ExecWait(pid, "report-mirror-failure"))
327 {
328 _error->Warning("Couldn't report problem to '%s'",
329 _config->Find("Methods::Mirror::ProblemReporting").c_str());
330 }
331 }
332 /*}}}*/
333 // AcqDiffIndex::AcqDiffIndex - Constructor /*{{{*/
334 // ---------------------------------------------------------------------
335 /* Get the DiffIndex file first and see if there are patches available
336 * If so, create a pkgAcqIndexDiffs fetcher that will get and apply the
337 * patches. If anything goes wrong in that process, it will fall back to
338 * the original packages file
339 */
340 pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner,
341 pkgAcqMetaBase *TransactionManager,
342 IndexTarget const * const Target,
343 HashStringList const &ExpectedHashes,
344 indexRecords *MetaIndexParser)
345 : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes,
346 MetaIndexParser), PackagesFileReadyInPartial(false)
347 {
348
349 Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
350
351 RealURI = Target->URI;
352 Desc.Owner = this;
353 Desc.Description = Target->Description + ".diff/Index";
354 Desc.ShortDesc = Target->ShortDesc;
355 Desc.URI = Target->URI + ".diff/Index";
356
357 DestFile = GetPartialFileNameFromURI(Desc.URI);
358
359 if(Debug)
360 std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl;
361
362 // look for the current package file
363 CurrentPackagesFile = _config->FindDir("Dir::State::lists");
364 CurrentPackagesFile += URItoFileName(RealURI);
365
366 // FIXME: this file:/ check is a hack to prevent fetching
367 // from local sources. this is really silly, and
368 // should be fixed cleanly as soon as possible
369 if(!FileExists(CurrentPackagesFile) ||
370 Desc.URI.substr(0,strlen("file:/")) == "file:/")
371 {
372 // we don't have a pkg file or we don't want to queue
373 Failed("No index file, local or canceld by user", NULL);
374 return;
375 }
376
377 if(Debug)
378 std::clog << "pkgAcqDiffIndex::pkgAcqDiffIndex(): "
379 << CurrentPackagesFile << std::endl;
380
381 QueueURI(Desc);
382
383 }
384 /*}}}*/
385 // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
386 // ---------------------------------------------------------------------
387 /* The only header we use is the last-modified header. */
388 string pkgAcqDiffIndex::Custom600Headers() const
389 {
390 string Final = _config->FindDir("Dir::State::lists");
391 Final += URItoFileName(Desc.URI);
392
393 if(Debug)
394 std::clog << "Custom600Header-IMS: " << Final << std::endl;
395
396 struct stat Buf;
397 if (stat(Final.c_str(),&Buf) != 0)
398 return "\nIndex-File: true";
399
400 return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
401 }
402 /*}}}*/
403 bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile) /*{{{*/
404 {
405 // failing here is fine: our caller will take care of trying to
406 // get the complete file if patching fails
407 if(Debug)
408 std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile
409 << std::endl;
410
411 FileFd Fd(IndexDiffFile,FileFd::ReadOnly);
412 pkgTagFile TF(&Fd);
413 if (_error->PendingError() == true)
414 return false;
415
416 pkgTagSection Tags;
417 if(unlikely(TF.Step(Tags) == false))
418 return false;
419
420 HashStringList ServerHashes;
421 unsigned long long ServerSize = 0;
422
423 for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
424 {
425 std::string tagname = *type;
426 tagname.append("-Current");
427 std::string const tmp = Tags.FindS(tagname.c_str());
428 if (tmp.empty() == true)
429 continue;
430
431 string hash;
432 unsigned long long size;
433 std::stringstream ss(tmp);
434 ss >> hash >> size;
435 if (unlikely(hash.empty() == true))
436 continue;
437 if (unlikely(ServerSize != 0 && ServerSize != size))
438 continue;
439 ServerHashes.push_back(HashString(*type, hash));
440 ServerSize = size;
441 }
442
443 if (ServerHashes.usable() == false)
444 {
445 if (Debug == true)
446 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl;
447 return false;
448 }
449
450 if (ServerHashes != HashSums())
451 {
452 if (Debug == true)
453 {
454 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl;
455 printHashSumComparision(CurrentPackagesFile, ServerHashes, HashSums());
456 }
457 return false;
458 }
459
460 if (ServerHashes.VerifyFile(CurrentPackagesFile) == true)
461 {
462 // we have the same sha1 as the server so we are done here
463 if(Debug)
464 std::clog << "pkgAcqDiffIndex: Package file " << CurrentPackagesFile << " is up-to-date" << std::endl;
465
466 // list cleanup needs to know that this file as well as the already
467 // present index is ours, so we create an empty diff to save it for us
468 new pkgAcqIndexDiffs(Owner, TransactionManager, Target,
469 ExpectedHashes, MetaIndexParser);
470 return true;
471 }
472
473 FileFd fd(CurrentPackagesFile, FileFd::ReadOnly);
474 Hashes LocalHashesCalc;
475 LocalHashesCalc.AddFD(fd);
476 HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
477
478 if(Debug)
479 std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at "
480 << fd.Name() << " " << fd.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl;
481
482 // parse all of (provided) history
483 vector<DiffInfo> available_patches;
484 bool firstAcceptedHashes = true;
485 for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
486 {
487 if (LocalHashes.find(*type) == NULL)
488 continue;
489
490 std::string tagname = *type;
491 tagname.append("-History");
492 std::string const tmp = Tags.FindS(tagname.c_str());
493 if (tmp.empty() == true)
494 continue;
495
496 string hash, filename;
497 unsigned long long size;
498 std::stringstream ss(tmp);
499
500 while (ss >> hash >> size >> filename)
501 {
502 if (unlikely(hash.empty() == true || filename.empty() == true))
503 continue;
504
505 // see if we have a record for this file already
506 std::vector<DiffInfo>::iterator cur = available_patches.begin();
507 for (; cur != available_patches.end(); ++cur)
508 {
509 if (cur->file != filename || unlikely(cur->result_size != size))
510 continue;
511 cur->result_hashes.push_back(HashString(*type, hash));
512 break;
513 }
514 if (cur != available_patches.end())
515 continue;
516 if (firstAcceptedHashes == true)
517 {
518 DiffInfo next;
519 next.file = filename;
520 next.result_hashes.push_back(HashString(*type, hash));
521 next.result_size = size;
522 next.patch_size = 0;
523 available_patches.push_back(next);
524 }
525 else
526 {
527 if (Debug == true)
528 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
529 << " wasn't in the list for the first parsed hash! (history)" << std::endl;
530 break;
531 }
532 }
533 firstAcceptedHashes = false;
534 }
535
536 if (unlikely(available_patches.empty() == true))
537 {
538 if (Debug)
539 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
540 << "Couldn't find any patches for the patch series." << std::endl;
541 return false;
542 }
543
544 for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type)
545 {
546 if (LocalHashes.find(*type) == NULL)
547 continue;
548
549 std::string tagname = *type;
550 tagname.append("-Patches");
551 std::string const tmp = Tags.FindS(tagname.c_str());
552 if (tmp.empty() == true)
553 continue;
554
555 string hash, filename;
556 unsigned long long size;
557 std::stringstream ss(tmp);
558
559 while (ss >> hash >> size >> filename)
560 {
561 if (unlikely(hash.empty() == true || filename.empty() == true))
562 continue;
563
564 // see if we have a record for this file already
565 std::vector<DiffInfo>::iterator cur = available_patches.begin();
566 for (; cur != available_patches.end(); ++cur)
567 {
568 if (cur->file != filename)
569 continue;
570 if (unlikely(cur->patch_size != 0 && cur->patch_size != size))
571 continue;
572 cur->patch_hashes.push_back(HashString(*type, hash));
573 cur->patch_size = size;
574 break;
575 }
576 if (cur != available_patches.end())
577 continue;
578 if (Debug == true)
579 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename
580 << " wasn't in the list for the first parsed hash! (patches)" << std::endl;
581 break;
582 }
583 }
584
585 bool foundStart = false;
586 for (std::vector<DiffInfo>::iterator cur = available_patches.begin();
587 cur != available_patches.end(); ++cur)
588 {
589 if (LocalHashes != cur->result_hashes)
590 continue;
591
592 available_patches.erase(available_patches.begin(), cur);
593 foundStart = true;
594 break;
595 }
596
597 if (foundStart == false || unlikely(available_patches.empty() == true))
598 {
599 if (Debug)
600 std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": "
601 << "Couldn't find the start of the patch series." << std::endl;
602 return false;
603 }
604
605 // patching with too many files is rather slow compared to a fast download
606 unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0);
607 if (fileLimit != 0 && fileLimit < available_patches.size())
608 {
609 if (Debug)
610 std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit
611 << ") so fallback to complete download" << std::endl;
612 return false;
613 }
614
615 // calculate the size of all patches we have to get
616 // note that all sizes are uncompressed, while we download compressed files
617 unsigned long long patchesSize = 0;
618 for (std::vector<DiffInfo>::const_iterator cur = available_patches.begin();
619 cur != available_patches.end(); ++cur)
620 patchesSize += cur->patch_size;
621 unsigned long long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100);
622 if (false && sizeLimit > 0 && (sizeLimit/100) < patchesSize)
623 {
624 if (Debug)
625 std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100
626 << ") so fallback to complete download" << std::endl;
627 return false;
628 }
629
630 // FIXME: make this use the method
631 PackagesFileReadyInPartial = true;
632 std::string const Partial = GetPartialFileNameFromURI(RealURI);
633
634 FileFd From(CurrentPackagesFile, FileFd::ReadOnly);
635 FileFd To(Partial, FileFd::WriteEmpty);
636 if(CopyFile(From, To) == false)
637 return _error->Errno("CopyFile", "failed to copy");
638
639 if(Debug)
640 std::cerr << "Done copying " << CurrentPackagesFile
641 << " -> " << Partial
642 << std::endl;
643
644 // we have something, queue the diffs
645 string::size_type const last_space = Description.rfind(" ");
646 if(last_space != string::npos)
647 Description.erase(last_space, Description.size()-last_space);
648
649 /* decide if we should download patches one by one or in one go:
650 The first is good if the server merges patches, but many don't so client
651 based merging can be attempt in which case the second is better.
652 "bad things" will happen if patches are merged on the server,
653 but client side merging is attempt as well */
654 bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true);
655 if (pdiff_merge == true)
656 {
657 // reprepro adds this flag if it has merged patches on the server
658 std::string const precedence = Tags.FindS("X-Patch-Precedence");
659 pdiff_merge = (precedence != "merged");
660 }
661
662 if (pdiff_merge == false)
663 {
664 new pkgAcqIndexDiffs(Owner, TransactionManager, Target, ExpectedHashes,
665 MetaIndexParser, available_patches);
666 }
667 else
668 {
669 std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size());
670 for(size_t i = 0; i < available_patches.size(); ++i)
671 (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, TransactionManager,
672 Target,
673 ExpectedHashes,
674 MetaIndexParser,
675 available_patches[i],
676 diffs);
677 }
678
679 Complete = false;
680 Status = StatDone;
681 Dequeue();
682 return true;
683 }
684 /*}}}*/
685 void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/
686 {
687 Item::Failed(Message,Cnf);
688 Status = StatDone;
689
690 if(Debug)
691 std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << " with " << Message << std::endl
692 << "Falling back to normal index file acquire" << std::endl;
693
694 new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser);
695 }
696 /*}}}*/
697 void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/
698 pkgAcquire::MethodConfig *Cnf)
699 {
700 if(Debug)
701 std::clog << "pkgAcqDiffIndex::Done(): " << Desc.URI << std::endl;
702
703 Item::Done(Message, Size, Hashes, Cnf);
704
705 // verify the index target
706 if(Target && Target->MetaKey != "" && MetaIndexParser && Hashes.usable())
707 {
708 std::string IndexMetaKey = Target->MetaKey + ".diff/Index";
709 indexRecords::checkSum *Record = MetaIndexParser->Lookup(IndexMetaKey);
710 if(Record && Record->Hashes.usable() && Hashes != Record->Hashes)
711 {
712 RenameOnError(HashSumMismatch);
713 printHashSumComparision(RealURI, Record->Hashes, Hashes);
714 Failed(Message, Cnf);
715 return;
716 }
717
718 }
719
720 string FinalFile;
721 FinalFile = _config->FindDir("Dir::State::lists");
722 FinalFile += URItoFileName(Desc.URI);
723
724 if(StringToBool(LookupTag(Message,"IMS-Hit"),false))
725 DestFile = FinalFile;
726
727 if(!ParseDiffIndex(DestFile))
728 return Failed("Message: Couldn't parse pdiff index", Cnf);
729
730 // queue for final move
731 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
732
733 Complete = true;
734 Status = StatDone;
735 Dequeue();
736 return;
737 }
738 /*}}}*/
739 // AcqIndexDiffs::AcqIndexDiffs - Constructor /*{{{*/
740 // ---------------------------------------------------------------------
741 /* The package diff is added to the queue. one object is constructed
742 * for each diff and the index
743 */
744 pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner,
745 pkgAcqMetaBase *TransactionManager,
746 struct IndexTarget const * const Target,
747 HashStringList const &ExpectedHashes,
748 indexRecords *MetaIndexParser,
749 vector<DiffInfo> diffs)
750 : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser),
751 available_patches(diffs)
752 {
753 DestFile = GetPartialFileNameFromURI(Target->URI);
754
755 Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
756
757 RealURI = Target->URI;
758 Desc.Owner = this;
759 Description = Target->Description;
760 Desc.ShortDesc = Target->ShortDesc;
761
762 if(available_patches.empty() == true)
763 {
764 // we are done (yeah!), check hashes against the final file
765 DestFile = _config->FindDir("Dir::State::lists");
766 DestFile += URItoFileName(Target->URI);
767 Finish(true);
768 }
769 else
770 {
771 // get the next diff
772 State = StateFetchDiff;
773 QueueNextDiff();
774 }
775 }
776 /*}}}*/
777 void pkgAcqIndexDiffs::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/
778 {
779 Item::Failed(Message,Cnf);
780 Status = StatDone;
781
782 if(Debug)
783 std::clog << "pkgAcqIndexDiffs failed: " << Desc.URI << " with " << Message << std::endl
784 << "Falling back to normal index file acquire" << std::endl;
785 new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser);
786 Finish();
787 }
788 /*}}}*/
789 // Finish - helper that cleans the item out of the fetcher queue /*{{{*/
790 void pkgAcqIndexDiffs::Finish(bool allDone)
791 {
792 if(Debug)
793 std::clog << "pkgAcqIndexDiffs::Finish(): "
794 << allDone << " "
795 << Desc.URI << std::endl;
796
797 // we restore the original name, this is required, otherwise
798 // the file will be cleaned
799 if(allDone)
800 {
801 if(HashSums().usable() && !HashSums().VerifyFile(DestFile))
802 {
803 RenameOnError(HashSumMismatch);
804 Dequeue();
805 return;
806 }
807
808 // queue for copy
809 std::string FinalFile = _config->FindDir("Dir::State::lists");
810 FinalFile += URItoFileName(RealURI);
811 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
812
813 // this is for the "real" finish
814 Complete = true;
815 Status = StatDone;
816 Dequeue();
817 if(Debug)
818 std::clog << "\n\nallDone: " << DestFile << "\n" << std::endl;
819 return;
820 }
821
822 if(Debug)
823 std::clog << "Finishing: " << Desc.URI << std::endl;
824 Complete = false;
825 Status = StatDone;
826 Dequeue();
827 return;
828 }
829 /*}}}*/
830 bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/
831 {
832 // calc sha1 of the just patched file
833 std::string const FinalFile = GetPartialFileNameFromURI(RealURI);
834
835 if(!FileExists(FinalFile))
836 {
837 Failed("Message: No FinalFile " + FinalFile + " available", NULL);
838 return false;
839 }
840
841 FileFd fd(FinalFile, FileFd::ReadOnly);
842 Hashes LocalHashesCalc;
843 LocalHashesCalc.AddFD(fd);
844 HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
845
846 if(Debug)
847 std::clog << "QueueNextDiff: " << FinalFile << " (" << LocalHashes.find(NULL)->toStr() << ")" << std::endl;
848
849 if (unlikely(LocalHashes.usable() == false || ExpectedHashes.usable() == false))
850 {
851 Failed("Local/Expected hashes are not usable", NULL);
852 return false;
853 }
854
855
856 // final file reached before all patches are applied
857 if(LocalHashes == ExpectedHashes)
858 {
859 Finish(true);
860 return true;
861 }
862
863 // remove all patches until the next matching patch is found
864 // this requires the Index file to be ordered
865 for(vector<DiffInfo>::iterator I = available_patches.begin();
866 available_patches.empty() == false &&
867 I != available_patches.end() &&
868 I->result_hashes != LocalHashes;
869 ++I)
870 {
871 available_patches.erase(I);
872 }
873
874 // error checking and falling back if no patch was found
875 if(available_patches.empty() == true)
876 {
877 Failed("No patches left to reach target", NULL);
878 return false;
879 }
880
881 // queue the right diff
882 Desc.URI = RealURI + ".diff/" + available_patches[0].file + ".gz";
883 Desc.Description = Description + " " + available_patches[0].file + string(".pdiff");
884 DestFile = GetPartialFileNameFromURI(RealURI + ".diff/" + available_patches[0].file);
885
886 if(Debug)
887 std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl;
888
889 QueueURI(Desc);
890
891 return true;
892 }
893 /*}}}*/
894 void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringList const &Hashes, /*{{{*/
895 pkgAcquire::MethodConfig *Cnf)
896 {
897 if(Debug)
898 std::clog << "pkgAcqIndexDiffs::Done(): " << Desc.URI << std::endl;
899
900 Item::Done(Message, Size, Hashes, Cnf);
901
902 // FIXME: verify this download too before feeding it to rred
903 std::string const FinalFile = GetPartialFileNameFromURI(RealURI);
904
905 // success in downloading a diff, enter ApplyDiff state
906 if(State == StateFetchDiff)
907 {
908 FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip);
909 class Hashes LocalHashesCalc;
910 LocalHashesCalc.AddFD(fd);
911 HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
912
913 if (fd.Size() != available_patches[0].patch_size ||
914 available_patches[0].patch_hashes != LocalHashes)
915 {
916 Failed("Patch has Size/Hashsum mismatch", NULL);
917 return;
918 }
919
920 // rred excepts the patch as $FinalFile.ed
921 Rename(DestFile,FinalFile+".ed");
922
923 if(Debug)
924 std::clog << "Sending to rred method: " << FinalFile << std::endl;
925
926 State = StateApplyDiff;
927 Local = true;
928 Desc.URI = "rred:" + FinalFile;
929 QueueURI(Desc);
930 SetActiveSubprocess("rred");
931 return;
932 }
933
934
935 // success in download/apply a diff, queue next (if needed)
936 if(State == StateApplyDiff)
937 {
938 // remove the just applied patch
939 available_patches.erase(available_patches.begin());
940 unlink((FinalFile + ".ed").c_str());
941
942 // move into place
943 if(Debug)
944 {
945 std::clog << "Moving patched file in place: " << std::endl
946 << DestFile << " -> " << FinalFile << std::endl;
947 }
948 Rename(DestFile,FinalFile);
949 chmod(FinalFile.c_str(),0644);
950
951 // see if there is more to download
952 if(available_patches.empty() == false) {
953 new pkgAcqIndexDiffs(Owner, TransactionManager, Target,
954 ExpectedHashes, MetaIndexParser,
955 available_patches);
956 return Finish();
957 } else
958 // update
959 DestFile = FinalFile;
960 return Finish(true);
961 }
962 }
963 /*}}}*/
964 // AcqIndexMergeDiffs::AcqIndexMergeDiffs - Constructor /*{{{*/
965 pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire *Owner,
966 pkgAcqMetaBase *TransactionManager,
967 struct IndexTarget const * const Target,
968 HashStringList const &ExpectedHashes,
969 indexRecords *MetaIndexParser,
970 DiffInfo const &patch,
971 std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches)
972 : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser),
973 patch(patch), allPatches(allPatches), State(StateFetchDiff)
974 {
975 Debug = _config->FindB("Debug::pkgAcquire::Diffs",false);
976
977 RealURI = Target->URI;
978 Desc.Owner = this;
979 Description = Target->Description;
980 Desc.ShortDesc = Target->ShortDesc;
981
982 Desc.URI = RealURI + ".diff/" + patch.file + ".gz";
983 Desc.Description = Description + " " + patch.file + string(".pdiff");
984
985 DestFile = GetPartialFileNameFromURI(RealURI + ".diff/" + patch.file);
986
987 if(Debug)
988 std::clog << "pkgAcqIndexMergeDiffs: " << Desc.URI << std::endl;
989
990 QueueURI(Desc);
991 }
992 /*}}}*/
993 void pkgAcqIndexMergeDiffs::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/
994 {
995 if(Debug)
996 std::clog << "pkgAcqIndexMergeDiffs failed: " << Desc.URI << " with " << Message << std::endl;
997
998 Item::Failed(Message,Cnf);
999 Status = StatDone;
1000
1001 // check if we are the first to fail, otherwise we are done here
1002 State = StateDoneDiff;
1003 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
1004 I != allPatches->end(); ++I)
1005 if ((*I)->State == StateErrorDiff)
1006 return;
1007
1008 // first failure means we should fallback
1009 State = StateErrorDiff;
1010 std::clog << "Falling back to normal index file acquire" << std::endl;
1011 new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser);
1012 }
1013 /*}}}*/
1014 void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/
1015 pkgAcquire::MethodConfig *Cnf)
1016 {
1017 if(Debug)
1018 std::clog << "pkgAcqIndexMergeDiffs::Done(): " << Desc.URI << std::endl;
1019
1020 Item::Done(Message,Size,Hashes,Cnf);
1021
1022 // FIXME: verify download before feeding it to rred
1023 string const FinalFile = GetPartialFileNameFromURI(RealURI);
1024
1025 if (State == StateFetchDiff)
1026 {
1027 FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip);
1028 class Hashes LocalHashesCalc;
1029 LocalHashesCalc.AddFD(fd);
1030 HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList();
1031
1032 if (fd.Size() != patch.patch_size || patch.patch_hashes != LocalHashes)
1033 {
1034 Failed("Patch has Size/Hashsum mismatch", NULL);
1035 return;
1036 }
1037
1038 // rred expects the patch as $FinalFile.ed.$patchname.gz
1039 Rename(DestFile, FinalFile + ".ed." + patch.file + ".gz");
1040
1041 // check if this is the last completed diff
1042 State = StateDoneDiff;
1043 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
1044 I != allPatches->end(); ++I)
1045 if ((*I)->State != StateDoneDiff)
1046 {
1047 if(Debug)
1048 std::clog << "Not the last done diff in the batch: " << Desc.URI << std::endl;
1049 return;
1050 }
1051
1052 // this is the last completed diff, so we are ready to apply now
1053 State = StateApplyDiff;
1054
1055 if(Debug)
1056 std::clog << "Sending to rred method: " << FinalFile << std::endl;
1057
1058 Local = true;
1059 Desc.URI = "rred:" + FinalFile;
1060 QueueURI(Desc);
1061 SetActiveSubprocess("rred");
1062 return;
1063 }
1064 // success in download/apply all diffs, clean up
1065 else if (State == StateApplyDiff)
1066 {
1067 // see if we really got the expected file
1068 if(ExpectedHashes.usable() && !ExpectedHashes.VerifyFile(DestFile))
1069 {
1070 RenameOnError(HashSumMismatch);
1071 return;
1072 }
1073
1074
1075 std::string FinalFile = _config->FindDir("Dir::State::lists");
1076 FinalFile += URItoFileName(RealURI);
1077
1078 // move the result into place
1079 if(Debug)
1080 std::clog << "Queue patched file in place: " << std::endl
1081 << DestFile << " -> " << FinalFile << std::endl;
1082
1083 // queue for copy by the transaction manager
1084 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
1085
1086 // ensure the ed's are gone regardless of list-cleanup
1087 for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin();
1088 I != allPatches->end(); ++I)
1089 {
1090 std::string const PartialFile = GetPartialFileNameFromURI(RealURI);
1091 std::string patch = PartialFile + ".ed." + (*I)->patch.file + ".gz";
1092 unlink(patch.c_str());
1093 }
1094
1095 // all set and done
1096 Complete = true;
1097 if(Debug)
1098 std::clog << "allDone: " << DestFile << "\n" << std::endl;
1099 }
1100 }
1101 /*}}}*/
1102 // AcqBaseIndex::VerifyHashByMetaKey - verify hash for the given metakey /*{{{*/
1103 bool pkgAcqBaseIndex::VerifyHashByMetaKey(HashStringList const &Hashes)
1104 {
1105 if(MetaKey != "" && Hashes.usable())
1106 {
1107 indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey);
1108 if(Record && Record->Hashes.usable() && Hashes != Record->Hashes)
1109 {
1110 printHashSumComparision(RealURI, Record->Hashes, Hashes);
1111 return false;
1112 }
1113 }
1114 return true;
1115 }
1116 /*}}}*/
1117 // AcqIndex::AcqIndex - Constructor /*{{{*/
1118 // ---------------------------------------------------------------------
1119 /* The package file is added to the queue and a second class is
1120 instantiated to fetch the revision file */
1121 pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner,
1122 string URI,string URIDesc,string ShortDesc,
1123 HashStringList const &ExpectedHash)
1124 : pkgAcqBaseIndex(Owner, 0, NULL, ExpectedHash, NULL)
1125 {
1126 RealURI = URI;
1127
1128 AutoSelectCompression();
1129 Init(URI, URIDesc, ShortDesc);
1130
1131 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1132 std::clog << "New pkgIndex with TransactionManager "
1133 << TransactionManager << std::endl;
1134 }
1135 /*}}}*/
1136 // AcqIndex::AcqIndex - Constructor /*{{{*/
1137 pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner,
1138 pkgAcqMetaBase *TransactionManager,
1139 IndexTarget const *Target,
1140 HashStringList const &ExpectedHash,
1141 indexRecords *MetaIndexParser)
1142 : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHash,
1143 MetaIndexParser)
1144 {
1145 RealURI = Target->URI;
1146
1147 // autoselect the compression method
1148 AutoSelectCompression();
1149 Init(Target->URI, Target->Description, Target->ShortDesc);
1150
1151 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1152 std::clog << "New pkgIndex with TransactionManager "
1153 << TransactionManager << std::endl;
1154 }
1155 /*}}}*/
1156 // AcqIndex::AutoSelectCompression - Select compression /*{{{*/
1157 void pkgAcqIndex::AutoSelectCompression()
1158 {
1159 std::vector<std::string> types = APT::Configuration::getCompressionTypes();
1160 CompressionExtensions = "";
1161 if (ExpectedHashes.usable())
1162 {
1163 for (std::vector<std::string>::const_iterator t = types.begin();
1164 t != types.end(); ++t)
1165 {
1166 std::string CompressedMetaKey = string(Target->MetaKey).append(".").append(*t);
1167 if (*t == "uncompressed" ||
1168 MetaIndexParser->Exists(CompressedMetaKey) == true)
1169 CompressionExtensions.append(*t).append(" ");
1170 }
1171 }
1172 else
1173 {
1174 for (std::vector<std::string>::const_iterator t = types.begin(); t != types.end(); ++t)
1175 CompressionExtensions.append(*t).append(" ");
1176 }
1177 if (CompressionExtensions.empty() == false)
1178 CompressionExtensions.erase(CompressionExtensions.end()-1);
1179 }
1180 /*}}}*/
1181 // AcqIndex::Init - defered Constructor /*{{{*/
1182 void pkgAcqIndex::Init(string const &URI, string const &URIDesc,
1183 string const &ShortDesc)
1184 {
1185 Stage = STAGE_DOWNLOAD;
1186
1187 DestFile = GetPartialFileNameFromURI(URI);
1188
1189 CurrentCompressionExtension = CompressionExtensions.substr(0, CompressionExtensions.find(' '));
1190 if (CurrentCompressionExtension == "uncompressed")
1191 {
1192 Desc.URI = URI;
1193 if(Target)
1194 MetaKey = string(Target->MetaKey);
1195 }
1196 else
1197 {
1198 Desc.URI = URI + '.' + CurrentCompressionExtension;
1199 DestFile = DestFile + '.' + CurrentCompressionExtension;
1200 if(Target)
1201 MetaKey = string(Target->MetaKey) + '.' + CurrentCompressionExtension;
1202 }
1203
1204 // load the filesize
1205 if(MetaIndexParser)
1206 {
1207 indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey);
1208 if(Record)
1209 FileSize = Record->Size;
1210
1211 InitByHashIfNeeded(MetaKey);
1212 }
1213
1214 Desc.Description = URIDesc;
1215 Desc.Owner = this;
1216 Desc.ShortDesc = ShortDesc;
1217
1218 QueueURI(Desc);
1219 }
1220 /*}}}*/
1221 // AcqIndex::AdjustForByHash - modify URI for by-hash support /*{{{*/
1222 void pkgAcqIndex::InitByHashIfNeeded(const std::string MetaKey)
1223 {
1224 // TODO:
1225 // - (maybe?) add support for by-hash into the sources.list as flag
1226 // - make apt-ftparchive generate the hashes (and expire?)
1227 std::string HostKnob = "APT::Acquire::" + ::URI(Desc.URI).Host + "::By-Hash";
1228 if(_config->FindB("APT::Acquire::By-Hash", false) == true ||
1229 _config->FindB(HostKnob, false) == true ||
1230 MetaIndexParser->GetSupportsAcquireByHash())
1231 {
1232 indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey);
1233 if(Record)
1234 {
1235 // FIXME: should we really use the best hash here? or a fixed one?
1236 const HashString *TargetHash = Record->Hashes.find("");
1237 std::string ByHash = "/by-hash/" + TargetHash->HashType() + "/" + TargetHash->HashValue();
1238 size_t trailing_slash = Desc.URI.find_last_of("/");
1239 Desc.URI = Desc.URI.replace(
1240 trailing_slash,
1241 Desc.URI.substr(trailing_slash+1).size()+1,
1242 ByHash);
1243 } else {
1244 _error->Warning(
1245 "Fetching ByHash requested but can not find record for %s",
1246 MetaKey.c_str());
1247 }
1248 }
1249 }
1250 /*}}}*/
1251 // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
1252 // ---------------------------------------------------------------------
1253 /* The only header we use is the last-modified header. */
1254 string pkgAcqIndex::Custom600Headers() const
1255 {
1256 string Final = GetFinalFilename();
1257
1258 string msg = "\nIndex-File: true";
1259 struct stat Buf;
1260 if (stat(Final.c_str(),&Buf) == 0)
1261 msg += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
1262
1263 if(Target->IsOptional())
1264 msg += "\nFail-Ignore: true";
1265
1266 return msg;
1267 }
1268 /*}}}*/
1269 // pkgAcqIndex::Failed - getting the indexfile failed /*{{{*/
1270 void pkgAcqIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
1271 {
1272 Item::Failed(Message,Cnf);
1273
1274 size_t const nextExt = CompressionExtensions.find(' ');
1275 if (nextExt != std::string::npos)
1276 {
1277 CompressionExtensions = CompressionExtensions.substr(nextExt+1);
1278 Init(RealURI, Desc.Description, Desc.ShortDesc);
1279 Status = StatIdle;
1280 return;
1281 }
1282
1283 // on decompression failure, remove bad versions in partial/
1284 if (Stage == STAGE_DECOMPRESS_AND_VERIFY)
1285 {
1286 unlink(EraseFileName.c_str());
1287 }
1288
1289 Item::Failed(Message,Cnf);
1290
1291 if(Target->IsOptional() && ExpectedHashes.empty() && Stage == STAGE_DOWNLOAD)
1292 Status = StatDone;
1293 else
1294 TransactionManager->AbortTransaction();
1295 }
1296 /*}}}*/
1297 // pkgAcqIndex::GetFinalFilename - Return the full final file path /*{{{*/
1298 std::string pkgAcqIndex::GetFinalFilename() const
1299 {
1300 std::string FinalFile = _config->FindDir("Dir::State::lists");
1301 FinalFile += URItoFileName(RealURI);
1302 return GetCompressedFileName(RealURI, FinalFile, CurrentCompressionExtension);
1303 }
1304 /*}}}*/
1305 // AcqIndex::ReverifyAfterIMS - Reverify index after an ims-hit /*{{{*/
1306 void pkgAcqIndex::ReverifyAfterIMS()
1307 {
1308 // update destfile to *not* include the compression extension when doing
1309 // a reverify (as its uncompressed on disk already)
1310 DestFile = GetCompressedFileName(RealURI, GetPartialFileNameFromURI(RealURI), CurrentCompressionExtension);
1311
1312 // copy FinalFile into partial/ so that we check the hash again
1313 string FinalFile = GetFinalFilename();
1314 Stage = STAGE_DECOMPRESS_AND_VERIFY;
1315 Desc.URI = "copy:" + FinalFile;
1316 QueueURI(Desc);
1317 }
1318 /*}}}*/
1319 // AcqIndex::ValidateFile - Validate the content of the downloaded file /*{{{*/
1320 bool pkgAcqIndex::ValidateFile(const std::string &FileName)
1321 {
1322 // FIXME: this can go away once we only ever download stuff that
1323 // has a valid hash and we never do GET based probing
1324 // FIXME2: this also leaks debian-isms into the code and should go therefore
1325
1326 /* Always validate the index file for correctness (all indexes must
1327 * have a Package field) (LP: #346386) (Closes: #627642)
1328 */
1329 FileFd fd(FileName, FileFd::ReadOnly, FileFd::Extension);
1330 // Only test for correctness if the content of the file is not empty
1331 // (empty is ok)
1332 if (fd.Size() > 0)
1333 {
1334 pkgTagSection sec;
1335 pkgTagFile tag(&fd);
1336
1337 // all our current indexes have a field 'Package' in each section
1338 if (_error->PendingError() == true ||
1339 tag.Step(sec) == false ||
1340 sec.Exists("Package") == false)
1341 return false;
1342 }
1343 return true;
1344 }
1345 /*}}}*/
1346 // AcqIndex::Done - Finished a fetch /*{{{*/
1347 // ---------------------------------------------------------------------
1348 /* This goes through a number of states.. On the initial fetch the
1349 method could possibly return an alternate filename which points
1350 to the uncompressed version of the file. If this is so the file
1351 is copied into the partial directory. In all other cases the file
1352 is decompressed with a compressed uri. */
1353 void pkgAcqIndex::Done(string Message,
1354 unsigned long long Size,
1355 HashStringList const &Hashes,
1356 pkgAcquire::MethodConfig *Cfg)
1357 {
1358 Item::Done(Message,Size,Hashes,Cfg);
1359
1360 switch(Stage)
1361 {
1362 case STAGE_DOWNLOAD:
1363 StageDownloadDone(Message, Hashes, Cfg);
1364 break;
1365 case STAGE_DECOMPRESS_AND_VERIFY:
1366 StageDecompressDone(Message, Hashes, Cfg);
1367 break;
1368 }
1369 }
1370 /*}}}*/
1371 // AcqIndex::StageDownloadDone - Queue for decompress and verify /*{{{*/
1372 void pkgAcqIndex::StageDownloadDone(string Message,
1373 HashStringList const &Hashes,
1374 pkgAcquire::MethodConfig *Cfg)
1375 {
1376 // First check if the calculcated Hash of the (compressed) downloaded
1377 // file matches the hash we have in the MetaIndexRecords for this file
1378 if(VerifyHashByMetaKey(Hashes) == false)
1379 {
1380 RenameOnError(HashSumMismatch);
1381 Failed(Message, Cfg);
1382 return;
1383 }
1384
1385 Complete = true;
1386
1387 // Handle the unzipd case
1388 string FileName = LookupTag(Message,"Alt-Filename");
1389 if (FileName.empty() == false)
1390 {
1391 Stage = STAGE_DECOMPRESS_AND_VERIFY;
1392 Local = true;
1393 DestFile += ".decomp";
1394 Desc.URI = "copy:" + FileName;
1395 QueueURI(Desc);
1396 SetActiveSubprocess("copy");
1397 return;
1398 }
1399
1400 FileName = LookupTag(Message,"Filename");
1401 if (FileName.empty() == true)
1402 {
1403 Status = StatError;
1404 ErrorText = "Method gave a blank filename";
1405 }
1406
1407 // Methods like e.g. "file:" will give us a (compressed) FileName that is
1408 // not the "DestFile" we set, in this case we uncompress from the local file
1409 if (FileName != DestFile)
1410 Local = true;
1411 else
1412 EraseFileName = FileName;
1413
1414 // we need to verify the file against the current Release file again
1415 // on if-modfied-since hit to avoid a stale attack against us
1416 if(StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
1417 {
1418 // The files timestamp matches, reverify by copy into partial/
1419 EraseFileName = "";
1420 ReverifyAfterIMS();
1421 return;
1422 }
1423
1424 // If we have compressed indexes enabled, queue for hash verification
1425 if (_config->FindB("Acquire::GzipIndexes",false))
1426 {
1427 DestFile = GetPartialFileNameFromURI(RealURI + '.' + CurrentCompressionExtension);
1428 EraseFileName = "";
1429 Stage = STAGE_DECOMPRESS_AND_VERIFY;
1430 Desc.URI = "copy:" + FileName;
1431 QueueURI(Desc);
1432 SetActiveSubprocess("copy");
1433 return;
1434 }
1435
1436 // get the binary name for your used compression type
1437 string decompProg;
1438 if(CurrentCompressionExtension == "uncompressed")
1439 decompProg = "copy";
1440 else
1441 decompProg = _config->Find(string("Acquire::CompressionTypes::").append(CurrentCompressionExtension),"");
1442 if(decompProg.empty() == true)
1443 {
1444 _error->Error("Unsupported extension: %s", CurrentCompressionExtension.c_str());
1445 return;
1446 }
1447
1448 // queue uri for the next stage
1449 Stage = STAGE_DECOMPRESS_AND_VERIFY;
1450 DestFile += ".decomp";
1451 Desc.URI = decompProg + ":" + FileName;
1452 QueueURI(Desc);
1453 SetActiveSubprocess(decompProg);
1454 }
1455 /*}}}*/
1456 // pkgAcqIndex::StageDecompressDone - Final verification /*{{{*/
1457 void pkgAcqIndex::StageDecompressDone(string Message,
1458 HashStringList const &Hashes,
1459 pkgAcquire::MethodConfig *Cfg)
1460 {
1461 if (ExpectedHashes.usable() && ExpectedHashes != Hashes)
1462 {
1463 Desc.URI = RealURI;
1464 RenameOnError(HashSumMismatch);
1465 printHashSumComparision(RealURI, ExpectedHashes, Hashes);
1466 Failed(Message, Cfg);
1467 return;
1468 }
1469
1470 if(!ValidateFile(DestFile))
1471 {
1472 RenameOnError(InvalidFormat);
1473 Failed(Message, Cfg);
1474 return;
1475 }
1476
1477 // remove the compressed version of the file
1478 unlink(EraseFileName.c_str());
1479
1480 // Done, queue for rename on transaction finished
1481 TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename());
1482
1483 return;
1484 }
1485 /*}}}*/
1486 // AcqMetaBase::Add - Add a item to the current Transaction /*{{{*/
1487 void pkgAcqMetaBase::Add(Item *I)
1488 {
1489 Transaction.push_back(I);
1490 }
1491 /*}}}*/
1492 // AcqMetaBase::AbortTransaction - Abort the current Transaction /*{{{*/
1493 void pkgAcqMetaBase::AbortTransaction()
1494 {
1495 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1496 std::clog << "AbortTransaction: " << TransactionManager << std::endl;
1497
1498 // ensure the toplevel is in error state too
1499 for (std::vector<Item*>::iterator I = Transaction.begin();
1500 I != Transaction.end(); ++I)
1501 {
1502 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1503 std::clog << " Cancel: " << (*I)->DestFile << std::endl;
1504 // the transaction will abort, so stop anything that is idle
1505 if ((*I)->Status == pkgAcquire::Item::StatIdle)
1506 {
1507 (*I)->Status = pkgAcquire::Item::StatDone;
1508 (*I)->Dequeue();
1509 }
1510 }
1511 Transaction.clear();
1512 }
1513 /*}}}*/
1514 // AcqMetaBase::TransactionHasError - Check for errors in Transaction /*{{{*/
1515 bool pkgAcqMetaBase::TransactionHasError()
1516 {
1517 for (pkgAcquire::ItemIterator I = Transaction.begin();
1518 I != Transaction.end(); ++I)
1519 if((*I)->Status != pkgAcquire::Item::StatDone &&
1520 (*I)->Status != pkgAcquire::Item::StatIdle)
1521 return true;
1522
1523 return false;
1524 }
1525 /*}}}*/
1526 // AcqMetaBase::CommitTransaction - Commit a transaction /*{{{*/
1527 void pkgAcqMetaBase::CommitTransaction()
1528 {
1529 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1530 std::clog << "CommitTransaction: " << this << std::endl;
1531
1532 // move new files into place *and* remove files that are not
1533 // part of the transaction but are still on disk
1534 for (std::vector<Item*>::iterator I = Transaction.begin();
1535 I != Transaction.end(); ++I)
1536 {
1537 if((*I)->PartialFile != "")
1538 {
1539 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1540 std::clog << "mv " << (*I)->PartialFile << " -> "<< (*I)->DestFile << " "
1541 << (*I)->DescURI() << std::endl;
1542
1543 Rename((*I)->PartialFile, (*I)->DestFile);
1544 } else {
1545 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1546 std::clog << "rm "
1547 << (*I)->DestFile
1548 << " "
1549 << (*I)->DescURI()
1550 << std::endl;
1551 unlink((*I)->DestFile.c_str());
1552 }
1553 // mark that this transaction is finished
1554 (*I)->TransactionManager = 0;
1555 }
1556 Transaction.clear();
1557 }
1558 /*}}}*/
1559 // AcqMetaBase::TransactionStageCopy - Stage a file for copying /*{{{*/
1560 void pkgAcqMetaBase::TransactionStageCopy(Item *I,
1561 const std::string &From,
1562 const std::string &To)
1563 {
1564 I->PartialFile = From;
1565 I->DestFile = To;
1566 }
1567 /*}}}*/
1568 // AcqMetaBase::TransactionStageRemoval - Sage a file for removal /*{{{*/
1569 void pkgAcqMetaBase::TransactionStageRemoval(Item *I,
1570 const std::string &FinalFile)
1571 {
1572 I->PartialFile = "";
1573 I->DestFile = FinalFile;
1574 }
1575 /*}}}*/
1576 // AcqMetaBase::GenerateAuthWarning - Check gpg authentication error /*{{{*/
1577 bool pkgAcqMetaBase::CheckStopAuthentication(const std::string &RealURI,
1578 const std::string &Message)
1579 {
1580 // FIXME: this entire function can do now that we disallow going to
1581 // a unauthenticated state and can cleanly rollback
1582
1583 string Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
1584
1585 if(FileExists(Final))
1586 {
1587 Status = StatTransientNetworkError;
1588 _error->Warning(_("An error occurred during the signature "
1589 "verification. The repository is not updated "
1590 "and the previous index files will be used. "
1591 "GPG error: %s: %s\n"),
1592 Desc.Description.c_str(),
1593 LookupTag(Message,"Message").c_str());
1594 RunScripts("APT::Update::Auth-Failure");
1595 return true;
1596 } else if (LookupTag(Message,"Message").find("NODATA") != string::npos) {
1597 /* Invalid signature file, reject (LP: #346386) (Closes: #627642) */
1598 _error->Error(_("GPG error: %s: %s"),
1599 Desc.Description.c_str(),
1600 LookupTag(Message,"Message").c_str());
1601 Status = StatError;
1602 return true;
1603 } else {
1604 _error->Warning(_("GPG error: %s: %s"),
1605 Desc.Description.c_str(),
1606 LookupTag(Message,"Message").c_str());
1607 }
1608 // gpgv method failed
1609 ReportMirrorFailure("GPGFailure");
1610 return false;
1611 }
1612 /*}}}*/
1613 // AcqMetaSig::AcqMetaSig - Constructor /*{{{*/
1614 pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire *Owner,
1615 pkgAcqMetaBase *TransactionManager,
1616 string URI,string URIDesc,string ShortDesc,
1617 string MetaIndexFile,
1618 const vector<IndexTarget*>* IndexTargets,
1619 indexRecords* MetaIndexParser) :
1620 pkgAcqMetaBase(Owner, IndexTargets, MetaIndexParser,
1621 HashStringList(), TransactionManager),
1622 RealURI(URI), MetaIndexFile(MetaIndexFile), URIDesc(URIDesc),
1623 ShortDesc(ShortDesc)
1624 {
1625 DestFile = _config->FindDir("Dir::State::lists") + "partial/";
1626 DestFile += URItoFileName(RealURI);
1627
1628 // remove any partial downloaded sig-file in partial/.
1629 // it may confuse proxies and is too small to warrant a
1630 // partial download anyway
1631 unlink(DestFile.c_str());
1632
1633 // set the TransactionManager
1634 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1635 std::clog << "New pkgAcqMetaSig with TransactionManager "
1636 << TransactionManager << std::endl;
1637
1638 // Create the item
1639 Desc.Description = URIDesc;
1640 Desc.Owner = this;
1641 Desc.ShortDesc = ShortDesc;
1642 Desc.URI = URI;
1643
1644 QueueURI(Desc);
1645 }
1646 /*}}}*/
1647 pkgAcqMetaSig::~pkgAcqMetaSig() /*{{{*/
1648 {
1649 }
1650 /*}}}*/
1651 // pkgAcqMetaSig::Custom600Headers - Insert custom request headers /*{{{*/
1652 // ---------------------------------------------------------------------
1653 string pkgAcqMetaSig::Custom600Headers() const
1654 {
1655 std::string Header = GetCustom600Headers(RealURI);
1656 return Header;
1657 }
1658 /*}}}*/
1659 // pkgAcqMetaSig::Done - The signature was downloaded/verified /*{{{*/
1660 // ---------------------------------------------------------------------
1661 /* The only header we use is the last-modified header. */
1662 void pkgAcqMetaSig::Done(string Message,unsigned long long Size,
1663 HashStringList const &Hashes,
1664 pkgAcquire::MethodConfig *Cfg)
1665 {
1666 Item::Done(Message, Size, Hashes, Cfg);
1667
1668 if(AuthPass == false)
1669 {
1670 if(CheckDownloadDone(Message, RealURI) == true)
1671 {
1672 // destfile will be modified to point to MetaIndexFile for the
1673 // gpgv method, so we need to save it here
1674 MetaIndexFileSignature = DestFile;
1675 QueueForSignatureVerify(MetaIndexFile, MetaIndexFileSignature);
1676 }
1677 return;
1678 }
1679 else
1680 {
1681 if(CheckAuthDone(Message, RealURI) == true)
1682 {
1683 std::string FinalFile = _config->FindDir("Dir::State::lists");
1684 FinalFile += URItoFileName(RealURI);
1685 TransactionManager->TransactionStageCopy(this, MetaIndexFileSignature, FinalFile);
1686 }
1687 }
1688 }
1689 /*}}}*/
1690 void pkgAcqMetaSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf)/*{{{*/
1691 {
1692 Item::Failed(Message,Cnf);
1693
1694 // check if we need to fail at this point
1695 if (AuthPass == true && CheckStopAuthentication(RealURI, Message))
1696 return;
1697
1698 // FIXME: meh, this is not really elegant
1699 string const Final = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
1700 string const InReleaseURI = RealURI.replace(RealURI.rfind("Release.gpg"), 12,
1701 "InRelease");
1702 string const FinalInRelease = _config->FindDir("Dir::State::lists") + URItoFileName(InReleaseURI);
1703
1704 if (RealFileExists(Final) || RealFileExists(FinalInRelease))
1705 {
1706 std::string downgrade_msg;
1707 strprintf(downgrade_msg, _("The repository '%s' is no longer signed."),
1708 URIDesc.c_str());
1709 if(_config->FindB("Acquire::AllowDowngradeToInsecureRepositories"))
1710 {
1711 // meh, the users wants to take risks (we still mark the packages
1712 // from this repository as unauthenticated)
1713 _error->Warning("%s", downgrade_msg.c_str());
1714 _error->Warning(_("This is normally not allowed, but the option "
1715 "Acquire::AllowDowngradeToInsecureRepositories was "
1716 "given to override it."));
1717 Status = StatDone;
1718 } else {
1719 _error->Error("%s", downgrade_msg.c_str());
1720 Rename(MetaIndexFile, MetaIndexFile+".FAILED");
1721 Item::Failed("Message: " + downgrade_msg, Cnf);
1722 TransactionManager->AbortTransaction();
1723 return;
1724 }
1725 }
1726 else
1727 _error->Warning(_("The data from '%s' is not signed. Packages "
1728 "from that repository can not be authenticated."),
1729 URIDesc.c_str());
1730
1731 // this ensures that any file in the lists/ dir is removed by the
1732 // transaction
1733 DestFile = GetPartialFileNameFromURI(RealURI);
1734 TransactionManager->TransactionStageRemoval(this, DestFile);
1735
1736 // only allow going further if the users explicitely wants it
1737 if(AllowInsecureRepositories(MetaIndexParser, TransactionManager, this) == true)
1738 {
1739 // we parse the indexes here because at this point the user wanted
1740 // a repository that may potentially harm him
1741 MetaIndexParser->Load(MetaIndexFile);
1742 QueueIndexes(true);
1743 }
1744
1745 // FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor
1746 if (Cnf->LocalOnly == true ||
1747 StringToBool(LookupTag(Message,"Transient-Failure"),false) == false)
1748 {
1749 // Ignore this
1750 Status = StatDone;
1751 }
1752 }
1753 /*}}}*/
1754 pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire *Owner, /*{{{*/
1755 pkgAcqMetaBase *TransactionManager,
1756 string URI,string URIDesc,string ShortDesc,
1757 string MetaIndexSigURI,string MetaIndexSigURIDesc, string MetaIndexSigShortDesc,
1758 const vector<IndexTarget*>* IndexTargets,
1759 indexRecords* MetaIndexParser) :
1760 pkgAcqMetaBase(Owner, IndexTargets, MetaIndexParser, HashStringList(),
1761 TransactionManager),
1762 RealURI(URI), URIDesc(URIDesc), ShortDesc(ShortDesc),
1763 MetaIndexSigURI(MetaIndexSigURI), MetaIndexSigURIDesc(MetaIndexSigURIDesc),
1764 MetaIndexSigShortDesc(MetaIndexSigShortDesc)
1765 {
1766 if(TransactionManager == NULL)
1767 {
1768 this->TransactionManager = this;
1769 this->TransactionManager->Add(this);
1770 }
1771
1772 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
1773 std::clog << "New pkgAcqMetaIndex with TransactionManager "
1774 << this->TransactionManager << std::endl;
1775
1776
1777 Init(URIDesc, ShortDesc);
1778 }
1779 /*}}}*/
1780 // pkgAcqMetaIndex::Init - Delayed constructor /*{{{*/
1781 void pkgAcqMetaIndex::Init(std::string URIDesc, std::string ShortDesc)
1782 {
1783 DestFile = GetPartialFileNameFromURI(RealURI);
1784
1785 // Create the item
1786 Desc.Description = URIDesc;
1787 Desc.Owner = this;
1788 Desc.ShortDesc = ShortDesc;
1789 Desc.URI = RealURI;
1790
1791 // we expect more item
1792 ExpectedAdditionalItems = IndexTargets->size();
1793 QueueURI(Desc);
1794 }
1795 /*}}}*/
1796 // pkgAcqMetaIndex::Custom600Headers - Insert custom request headers /*{{{*/
1797 // ---------------------------------------------------------------------
1798 string pkgAcqMetaIndex::Custom600Headers() const
1799 {
1800 return GetCustom600Headers(RealURI);
1801 }
1802 /*}}}*/
1803 void pkgAcqMetaIndex::Done(string Message,unsigned long long Size, /*{{{*/
1804 HashStringList const &Hashes,
1805 pkgAcquire::MethodConfig *Cfg)
1806 {
1807 Item::Done(Message,Size,Hashes,Cfg);
1808
1809 if(CheckDownloadDone(Message, RealURI))
1810 {
1811 // we have a Release file, now download the Signature, all further
1812 // verify/queue for additional downloads will be done in the
1813 // pkgAcqMetaSig::Done() code
1814 std::string MetaIndexFile = DestFile;
1815 new pkgAcqMetaSig(Owner, TransactionManager,
1816 MetaIndexSigURI, MetaIndexSigURIDesc,
1817 MetaIndexSigShortDesc, MetaIndexFile, IndexTargets,
1818 MetaIndexParser);
1819
1820 string FinalFile = _config->FindDir("Dir::State::lists");
1821 FinalFile += URItoFileName(RealURI);
1822 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
1823 }
1824 }
1825 /*}}}*/
1826 bool pkgAcqMetaBase::CheckAuthDone(string Message, const string &RealURI) /*{{{*/
1827 {
1828 // At this point, the gpgv method has succeeded, so there is a
1829 // valid signature from a key in the trusted keyring. We
1830 // perform additional verification of its contents, and use them
1831 // to verify the indexes we are about to download
1832
1833 if (!MetaIndexParser->Load(DestFile))
1834 {
1835 Status = StatAuthError;
1836 ErrorText = MetaIndexParser->ErrorText;
1837 return false;
1838 }
1839
1840 if (!VerifyVendor(Message, RealURI))
1841 {
1842 return false;
1843 }
1844
1845 if (_config->FindB("Debug::pkgAcquire::Auth", false))
1846 std::cerr << "Signature verification succeeded: "
1847 << DestFile << std::endl;
1848
1849 // Download further indexes with verification
1850 //
1851 // it would be really nice if we could simply do
1852 // if (IMSHit == false) QueueIndexes(true)
1853 // and skip the download if the Release file has not changed
1854 // - but right now the list cleaner will needs to be tricked
1855 // to not delete all our packages/source indexes in this case
1856 QueueIndexes(true);
1857
1858 return true;
1859 }
1860 /*}}}*/
1861 // pkgAcqMetaBase::GetCustom600Headers - Get header for AcqMetaBase /*{{{*/
1862 // ---------------------------------------------------------------------
1863 string pkgAcqMetaBase::GetCustom600Headers(const string &RealURI) const
1864 {
1865 std::string Header = "\nIndex-File: true";
1866 std::string MaximumSize;
1867 strprintf(MaximumSize, "\nMaximum-Size: %i",
1868 _config->FindI("Acquire::MaxReleaseFileSize", 10*1000*1000));
1869 Header += MaximumSize;
1870
1871 string FinalFile = _config->FindDir("Dir::State::lists");
1872 FinalFile += URItoFileName(RealURI);
1873
1874 struct stat Buf;
1875 if (stat(FinalFile.c_str(),&Buf) == 0)
1876 Header += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime);
1877
1878 return Header;
1879 }
1880 /*}}}*/
1881 // pkgAcqMetaBase::QueueForSignatureVerify /*{{{*/
1882 void pkgAcqMetaBase::QueueForSignatureVerify(const std::string &MetaIndexFile,
1883 const std::string &MetaIndexFileSignature)
1884 {
1885 AuthPass = true;
1886 Desc.URI = "gpgv:" + MetaIndexFileSignature;
1887 DestFile = MetaIndexFile;
1888 QueueURI(Desc);
1889 SetActiveSubprocess("gpgv");
1890 }
1891 /*}}}*/
1892 // pkgAcqMetaBase::CheckDownloadDone /*{{{*/
1893 bool pkgAcqMetaBase::CheckDownloadDone(const std::string &Message,
1894 const std::string &RealURI)
1895 {
1896 // We have just finished downloading a Release file (it is not
1897 // verified yet)
1898
1899 string FileName = LookupTag(Message,"Filename");
1900 if (FileName.empty() == true)
1901 {
1902 Status = StatError;
1903 ErrorText = "Method gave a blank filename";
1904 return false;
1905 }
1906
1907 if (FileName != DestFile)
1908 {
1909 Local = true;
1910 Desc.URI = "copy:" + FileName;
1911 QueueURI(Desc);
1912 return false;
1913 }
1914
1915 // make sure to verify against the right file on I-M-S hit
1916 IMSHit = StringToBool(LookupTag(Message,"IMS-Hit"),false);
1917 if(IMSHit)
1918 {
1919 string FinalFile = _config->FindDir("Dir::State::lists");
1920 FinalFile += URItoFileName(RealURI);
1921 DestFile = FinalFile;
1922 }
1923
1924 // set Item to complete as the remaining work is all local (verify etc)
1925 Complete = true;
1926
1927 return true;
1928 }
1929 /*}}}*/
1930 void pkgAcqMetaBase::QueueIndexes(bool verify) /*{{{*/
1931 {
1932 // at this point the real Items are loaded in the fetcher
1933 ExpectedAdditionalItems = 0;
1934
1935 vector <struct IndexTarget*>::const_iterator Target;
1936 for (Target = IndexTargets->begin();
1937 Target != IndexTargets->end();
1938 ++Target)
1939 {
1940 HashStringList ExpectedIndexHashes;
1941 const indexRecords::checkSum *Record = MetaIndexParser->Lookup((*Target)->MetaKey);
1942
1943 // optional target that we do not have in the Release file are
1944 // skipped
1945 if (verify == true && Record == NULL && (*Target)->IsOptional())
1946 continue;
1947
1948 // targets without a hash record are a error when verify is required
1949 if (verify == true && Record == NULL)
1950 {
1951 Status = StatAuthError;
1952 strprintf(ErrorText, _("Unable to find expected entry '%s' in Release file (Wrong sources.list entry or malformed file)"), (*Target)->MetaKey.c_str());
1953 return;
1954 }
1955
1956 if (Record)
1957 ExpectedIndexHashes = Record->Hashes;
1958
1959 if (_config->FindB("Debug::pkgAcquire::Auth", false))
1960 {
1961 std::cerr << "Queueing: " << (*Target)->URI << std::endl
1962 << "Expected Hash:" << std::endl;
1963 for (HashStringList::const_iterator hs = ExpectedIndexHashes.begin(); hs != ExpectedIndexHashes.end(); ++hs)
1964 std::cerr << "\t- " << hs->toStr() << std::endl;
1965 std::cerr << "For: " << Record->MetaKeyFilename << std::endl;
1966
1967 }
1968 if (verify == true && ExpectedIndexHashes.empty() == true)
1969 {
1970 Status = StatAuthError;
1971 strprintf(ErrorText, _("Unable to find hash sum for '%s' in Release file"), (*Target)->MetaKey.c_str());
1972 return;
1973 }
1974
1975 /* Queue the Index file (Packages, Sources, Translation-$foo
1976 (either diff or full packages files, depending
1977 on the users option) - we also check if the PDiff Index file is listed
1978 in the Meta-Index file. Ideal would be if pkgAcqDiffIndex would test this
1979 instead, but passing the required info to it is to much hassle */
1980 if(_config->FindB("Acquire::PDiffs",true) == true && (verify == false ||
1981 MetaIndexParser->Exists((*Target)->MetaKey + ".diff/Index") == true))
1982 new pkgAcqDiffIndex(Owner, TransactionManager, *Target, ExpectedIndexHashes, MetaIndexParser);
1983 else
1984 new pkgAcqIndex(Owner, TransactionManager, *Target, ExpectedIndexHashes, MetaIndexParser);
1985 }
1986 }
1987 /*}}}*/
1988 bool pkgAcqMetaBase::VerifyVendor(string Message, const string &RealURI)/*{{{*/
1989 {
1990 string::size_type pos;
1991
1992 // check for missing sigs (that where not fatal because otherwise we had
1993 // bombed earlier)
1994 string missingkeys;
1995 string msg = _("There is no public key available for the "
1996 "following key IDs:\n");
1997 pos = Message.find("NO_PUBKEY ");
1998 if (pos != std::string::npos)
1999 {
2000 string::size_type start = pos+strlen("NO_PUBKEY ");
2001 string Fingerprint = Message.substr(start, Message.find("\n")-start);
2002 missingkeys += (Fingerprint);
2003 }
2004 if(!missingkeys.empty())
2005 _error->Warning("%s", (msg + missingkeys).c_str());
2006
2007 string Transformed = MetaIndexParser->GetExpectedDist();
2008
2009 if (Transformed == "../project/experimental")
2010 {
2011 Transformed = "experimental";
2012 }
2013
2014 pos = Transformed.rfind('/');
2015 if (pos != string::npos)
2016 {
2017 Transformed = Transformed.substr(0, pos);
2018 }
2019
2020 if (Transformed == ".")
2021 {
2022 Transformed = "";
2023 }
2024
2025 if (_config->FindB("Acquire::Check-Valid-Until", true) == true &&
2026 MetaIndexParser->GetValidUntil() > 0) {
2027 time_t const invalid_since = time(NULL) - MetaIndexParser->GetValidUntil();
2028 if (invalid_since > 0)
2029 // TRANSLATOR: The first %s is the URL of the bad Release file, the second is
2030 // the time since then the file is invalid - formated in the same way as in
2031 // the download progress display (e.g. 7d 3h 42min 1s)
2032 return _error->Error(
2033 _("Release file for %s is expired (invalid since %s). "
2034 "Updates for this repository will not be applied."),
2035 RealURI.c_str(), TimeToStr(invalid_since).c_str());
2036 }
2037
2038 if (_config->FindB("Debug::pkgAcquire::Auth", false))
2039 {
2040 std::cerr << "Got Codename: " << MetaIndexParser->GetDist() << std::endl;
2041 std::cerr << "Expecting Dist: " << MetaIndexParser->GetExpectedDist() << std::endl;
2042 std::cerr << "Transformed Dist: " << Transformed << std::endl;
2043 }
2044
2045 if (MetaIndexParser->CheckDist(Transformed) == false)
2046 {
2047 // This might become fatal one day
2048 // Status = StatAuthError;
2049 // ErrorText = "Conflicting distribution; expected "
2050 // + MetaIndexParser->GetExpectedDist() + " but got "
2051 // + MetaIndexParser->GetDist();
2052 // return false;
2053 if (!Transformed.empty())
2054 {
2055 _error->Warning(_("Conflicting distribution: %s (expected %s but got %s)"),
2056 Desc.Description.c_str(),
2057 Transformed.c_str(),
2058 MetaIndexParser->GetDist().c_str());
2059 }
2060 }
2061
2062 return true;
2063 }
2064 /*}}}*/
2065 // pkgAcqMetaIndex::Failed - no Release file present /*{{{*/
2066 void pkgAcqMetaIndex::Failed(string Message,
2067 pkgAcquire::MethodConfig * Cnf)
2068 {
2069 pkgAcquire::Item::Failed(Message, Cnf);
2070 Status = StatDone;
2071
2072 string FinalFile = _config->FindDir("Dir::State::lists") + URItoFileName(RealURI);
2073
2074 _error->Warning(_("The repository '%s' does not have a Release file. "
2075 "This is deprecated, please contact the owner of the "
2076 "repository."), URIDesc.c_str());
2077
2078 // No Release file was present so fall
2079 // back to queueing Packages files without verification
2080 // only allow going further if the users explicitely wants it
2081 if(AllowInsecureRepositories(MetaIndexParser, TransactionManager, this) == true)
2082 {
2083 // Done, queue for rename on transaction finished
2084 if (FileExists(DestFile))
2085 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
2086
2087 // queue without any kind of hashsum support
2088 QueueIndexes(false);
2089 }
2090 }
2091 /*}}}*/
2092 void pkgAcqMetaIndex::Finished() /*{{{*/
2093 {
2094 if(_config->FindB("Debug::Acquire::Transaction", false) == true)
2095 std::clog << "Finished: " << DestFile <<std::endl;
2096 if(TransactionManager != NULL &&
2097 TransactionManager->TransactionHasError() == false)
2098 TransactionManager->CommitTransaction();
2099 }
2100 /*}}}*/
2101 pkgAcqMetaClearSig::pkgAcqMetaClearSig(pkgAcquire *Owner, /*{{{*/
2102 string const &URI, string const &URIDesc, string const &ShortDesc,
2103 string const &MetaIndexURI, string const &MetaIndexURIDesc, string const &MetaIndexShortDesc,
2104 string const &MetaSigURI, string const &MetaSigURIDesc, string const &MetaSigShortDesc,
2105 const vector<IndexTarget*>* IndexTargets,
2106 indexRecords* MetaIndexParser) :
2107 pkgAcqMetaIndex(Owner, NULL, URI, URIDesc, ShortDesc, MetaSigURI, MetaSigURIDesc,MetaSigShortDesc, IndexTargets, MetaIndexParser),
2108 MetaIndexURI(MetaIndexURI), MetaIndexURIDesc(MetaIndexURIDesc), MetaIndexShortDesc(MetaIndexShortDesc),
2109 MetaSigURI(MetaSigURI), MetaSigURIDesc(MetaSigURIDesc), MetaSigShortDesc(MetaSigShortDesc)
2110 {
2111 // index targets + (worst case:) Release/Release.gpg
2112 ExpectedAdditionalItems = IndexTargets->size() + 2;
2113
2114 }
2115 /*}}}*/
2116 pkgAcqMetaClearSig::~pkgAcqMetaClearSig() /*{{{*/
2117 {
2118 }
2119 /*}}}*/
2120 // pkgAcqMetaClearSig::Custom600Headers - Insert custom request headers /*{{{*/
2121 // ---------------------------------------------------------------------
2122 string pkgAcqMetaClearSig::Custom600Headers() const
2123 {
2124 string Header = GetCustom600Headers(RealURI);
2125 Header += "\nFail-Ignore: true";
2126 return Header;
2127 }
2128 /*}}}*/
2129 // pkgAcqMetaClearSig::Done - We got a file /*{{{*/
2130 // ---------------------------------------------------------------------
2131 void pkgAcqMetaClearSig::Done(std::string Message,unsigned long long Size,
2132 HashStringList const &Hashes,
2133 pkgAcquire::MethodConfig *Cnf)
2134 {
2135 Item::Done(Message, Size, Hashes, Cnf);
2136
2137 // if we expect a ClearTextSignature (InRelase), ensure that
2138 // this is what we get and if not fail to queue a
2139 // Release/Release.gpg, see #346386
2140 if (FileExists(DestFile) && !StartsWithGPGClearTextSignature(DestFile))
2141 {
2142 pkgAcquire::Item::Failed(Message, Cnf);
2143 RenameOnError(NotClearsigned);
2144 TransactionManager->AbortTransaction();
2145 return;
2146 }
2147
2148 if(AuthPass == false)
2149 {
2150 if(CheckDownloadDone(Message, RealURI) == true)
2151 QueueForSignatureVerify(DestFile, DestFile);
2152 return;
2153 }
2154 else
2155 {
2156 if(CheckAuthDone(Message, RealURI) == true)
2157 {
2158 string FinalFile = _config->FindDir("Dir::State::lists");
2159 FinalFile += URItoFileName(RealURI);
2160
2161 // queue for copy in place
2162 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
2163 }
2164 }
2165 }
2166 /*}}}*/
2167 void pkgAcqMetaClearSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/
2168 {
2169 Item::Failed(Message, Cnf);
2170
2171 // we failed, we will not get additional items from this method
2172 ExpectedAdditionalItems = 0;
2173
2174 if (AuthPass == false)
2175 {
2176 // Queue the 'old' InRelease file for removal if we try Release.gpg
2177 // as otherwise the file will stay around and gives a false-auth
2178 // impression (CVE-2012-0214)
2179 string FinalFile = _config->FindDir("Dir::State::lists");
2180 FinalFile.append(URItoFileName(RealURI));
2181 TransactionManager->TransactionStageRemoval(this, FinalFile);
2182 Status = StatDone;
2183
2184 new pkgAcqMetaIndex(Owner, TransactionManager,
2185 MetaIndexURI, MetaIndexURIDesc, MetaIndexShortDesc,
2186 MetaSigURI, MetaSigURIDesc, MetaSigShortDesc,
2187 IndexTargets, MetaIndexParser);
2188 }
2189 else
2190 {
2191 if(CheckStopAuthentication(RealURI, Message))
2192 return;
2193
2194 _error->Warning(_("The data from '%s' is not signed. Packages "
2195 "from that repository can not be authenticated."),
2196 URIDesc.c_str());
2197
2198 // No Release file was present, or verification failed, so fall
2199 // back to queueing Packages files without verification
2200 // only allow going further if the users explicitely wants it
2201 if(AllowInsecureRepositories(MetaIndexParser, TransactionManager, this) == true)
2202 {
2203 Status = StatDone;
2204
2205 /* Always move the meta index, even if gpgv failed. This ensures
2206 * that PackageFile objects are correctly filled in */
2207 if (FileExists(DestFile))
2208 {
2209 string FinalFile = _config->FindDir("Dir::State::lists");
2210 FinalFile += URItoFileName(RealURI);
2211 /* InRelease files become Release files, otherwise
2212 * they would be considered as trusted later on */
2213 RealURI = RealURI.replace(RealURI.rfind("InRelease"), 9,
2214 "Release");
2215 FinalFile = FinalFile.replace(FinalFile.rfind("InRelease"), 9,
2216 "Release");
2217
2218 // Done, queue for rename on transaction finished
2219 TransactionManager->TransactionStageCopy(this, DestFile, FinalFile);
2220 }
2221 QueueIndexes(false);
2222 }
2223 }
2224 }
2225 /*}}}*/
2226 // AcqArchive::AcqArchive - Constructor /*{{{*/
2227 // ---------------------------------------------------------------------
2228 /* This just sets up the initial fetch environment and queues the first
2229 possibilitiy */
2230 pkgAcqArchive::pkgAcqArchive(pkgAcquire *Owner,pkgSourceList *Sources,
2231 pkgRecords *Recs,pkgCache::VerIterator const &Version,
2232 string &StoreFilename) :
2233 Item(Owner, HashStringList()), Version(Version), Sources(Sources), Recs(Recs),
2234 StoreFilename(StoreFilename), Vf(Version.FileList()),
2235 Trusted(false)
2236 {
2237 Retries = _config->FindI("Acquire::Retries",0);
2238
2239 if (Version.Arch() == 0)
2240 {
2241 _error->Error(_("I wasn't able to locate a file for the %s package. "
2242 "This might mean you need to manually fix this package. "
2243 "(due to missing arch)"),
2244 Version.ParentPkg().FullName().c_str());
2245 return;
2246 }
2247
2248 /* We need to find a filename to determine the extension. We make the
2249 assumption here that all the available sources for this version share
2250 the same extension.. */
2251 // Skip not source sources, they do not have file fields.
2252 for (; Vf.end() == false; ++Vf)
2253 {
2254 if ((Vf.File()->Flags & pkgCache::Flag::NotSource) != 0)
2255 continue;
2256 break;
2257 }
2258
2259 // Does not really matter here.. we are going to fail out below
2260 if (Vf.end() != true)
2261 {
2262 // If this fails to get a file name we will bomb out below.
2263 pkgRecords::Parser &Parse = Recs->Lookup(Vf);
2264 if (_error->PendingError() == true)
2265 return;
2266
2267 // Generate the final file name as: package_version_arch.foo
2268 StoreFilename = QuoteString(Version.ParentPkg().Name(),"_:") + '_' +
2269 QuoteString(Version.VerStr(),"_:") + '_' +
2270 QuoteString(Version.Arch(),"_:.") +
2271 "." + flExtension(Parse.FileName());
2272 }
2273
2274 // check if we have one trusted source for the package. if so, switch
2275 // to "TrustedOnly" mode - but only if not in AllowUnauthenticated mode
2276 bool const allowUnauth = _config->FindB("APT::Get::AllowUnauthenticated", false);
2277 bool const debugAuth = _config->FindB("Debug::pkgAcquire::Auth", false);
2278 bool seenUntrusted = false;
2279 for (pkgCache::VerFileIterator i = Version.FileList(); i.end() == false; ++i)
2280 {
2281 pkgIndexFile *Index;
2282 if (Sources->FindIndex(i.File(),Index) == false)
2283 continue;
2284
2285 if (debugAuth == true)
2286 std::cerr << "Checking index: " << Index->Describe()
2287 << "(Trusted=" << Index->IsTrusted() << ")" << std::endl;
2288
2289 if (Index->IsTrusted() == true)
2290 {
2291 Trusted = true;
2292 if (allowUnauth == false)
2293 break;
2294 }
2295 else
2296 seenUntrusted = true;
2297 }
2298
2299 // "allow-unauthenticated" restores apts old fetching behaviour
2300 // that means that e.g. unauthenticated file:// uris are higher
2301 // priority than authenticated http:// uris
2302 if (allowUnauth == true && seenUntrusted == true)
2303 Trusted = false;
2304
2305 // Select a source
2306 if (QueueNext() == false && _error->PendingError() == false)
2307 _error->Error(_("Can't find a source to download version '%s' of '%s'"),
2308 Version.VerStr(), Version.ParentPkg().FullName(false).c_str());
2309 }
2310 /*}}}*/
2311 // AcqArchive::QueueNext - Queue the next file source /*{{{*/
2312 // ---------------------------------------------------------------------
2313 /* This queues the next available file version for download. It checks if
2314 the archive is already available in the cache and stashs the MD5 for
2315 checking later. */
2316 bool pkgAcqArchive::QueueNext()
2317 {
2318 for (; Vf.end() == false; ++Vf)
2319 {
2320 // Ignore not source sources
2321 if ((Vf.File()->Flags & pkgCache::Flag::NotSource) != 0)
2322 continue;
2323
2324 // Try to cross match against the source list
2325 pkgIndexFile *Index;
2326 if (Sources->FindIndex(Vf.File(),Index) == false)
2327 continue;
2328
2329 // only try to get a trusted package from another source if that source
2330 // is also trusted
2331 if(Trusted && !Index->IsTrusted())
2332 continue;
2333
2334 // Grab the text package record
2335 pkgRecords::Parser &Parse = Recs->Lookup(Vf);
2336 if (_error->PendingError() == true)
2337 return false;
2338
2339 string PkgFile = Parse.FileName();
2340 ExpectedHashes = Parse.Hashes();
2341
2342 if (PkgFile.empty() == true)
2343 return _error->Error(_("The package index files are corrupted. No Filename: "
2344 "field for package %s."),
2345 Version.ParentPkg().Name());
2346
2347 Desc.URI = Index->ArchiveURI(PkgFile);
2348 Desc.Description = Index->ArchiveInfo(Version);
2349 Desc.Owner = this;
2350 Desc.ShortDesc = Version.ParentPkg().FullName(true);
2351
2352 // See if we already have the file. (Legacy filenames)
2353 FileSize = Version->Size;
2354 string FinalFile = _config->FindDir("Dir::Cache::Archives") + flNotDir(PkgFile);
2355 struct stat Buf;
2356 if (stat(FinalFile.c_str(),&Buf) == 0)
2357 {
2358 // Make sure the size matches
2359 if ((unsigned long long)Buf.st_size == Version->Size)
2360 {
2361 Complete = true;
2362 Local = true;
2363 Status = StatDone;
2364 StoreFilename = DestFile = FinalFile;
2365 return true;
2366 }
2367
2368 /* Hmm, we have a file and its size does not match, this means it is
2369 an old style mismatched arch */
2370 unlink(FinalFile.c_str());
2371 }
2372
2373 // Check it again using the new style output filenames
2374 FinalFile = _config->FindDir("Dir::Cache::Archives") + flNotDir(StoreFilename);
2375 if (stat(FinalFile.c_str(),&Buf) == 0)
2376 {
2377 // Make sure the size matches
2378 if ((unsigned long long)Buf.st_size == Version->Size)
2379 {
2380 Complete = true;
2381 Local = true;
2382 Status = StatDone;
2383 StoreFilename = DestFile = FinalFile;
2384 return true;
2385 }
2386
2387 /* Hmm, we have a file and its size does not match, this shouldn't
2388 happen.. */
2389 unlink(FinalFile.c_str());
2390 }
2391
2392 DestFile = _config->FindDir("Dir::Cache::Archives") + "partial/" + flNotDir(StoreFilename);
2393
2394 // Check the destination file
2395 if (stat(DestFile.c_str(),&Buf) == 0)
2396 {
2397 // Hmm, the partial file is too big, erase it
2398 if ((unsigned long long)Buf.st_size > Version->Size)
2399 unlink(DestFile.c_str());
2400 else
2401 PartialSize = Buf.st_size;
2402 }
2403
2404 // Disables download of archives - useful if no real installation follows,
2405 // e.g. if we are just interested in proposed installation order
2406 if (_config->FindB("Debug::pkgAcqArchive::NoQueue", false) == true)
2407 {
2408 Complete = true;
2409 Local = true;
2410 Status = StatDone;
2411 StoreFilename = DestFile = FinalFile;
2412 return true;
2413 }
2414
2415 // Create the item
2416 Local = false;
2417 QueueURI(Desc);
2418
2419 ++Vf;
2420 return true;
2421 }
2422 return false;
2423 }
2424 /*}}}*/
2425 // AcqArchive::Done - Finished fetching /*{{{*/
2426 // ---------------------------------------------------------------------
2427 /* */
2428 void pkgAcqArchive::Done(string Message,unsigned long long Size, HashStringList const &CalcHashes,
2429 pkgAcquire::MethodConfig *Cfg)
2430 {
2431 Item::Done(Message, Size, CalcHashes, Cfg);
2432
2433 // Check the size
2434 if (Size != Version->Size)
2435 {
2436 RenameOnError(SizeMismatch);
2437 return;
2438 }
2439
2440 // FIXME: could this empty() check impose *any* sort of security issue?
2441 if(ExpectedHashes.usable() && ExpectedHashes != CalcHashes)
2442 {
2443 RenameOnError(HashSumMismatch);
2444 printHashSumComparision(DestFile, ExpectedHashes, CalcHashes);
2445 return;
2446 }
2447
2448 // Grab the output filename
2449 string FileName = LookupTag(Message,"Filename");
2450 if (FileName.empty() == true)
2451 {
2452 Status = StatError;
2453 ErrorText = "Method gave a blank filename";
2454 return;
2455 }
2456
2457 // Reference filename
2458 if (FileName != DestFile)
2459 {
2460 StoreFilename = DestFile = FileName;
2461 Local = true;
2462 Complete = true;
2463 return;
2464 }
2465
2466 // Done, move it into position
2467 string FinalFile = _config->FindDir("Dir::Cache::Archives");
2468 FinalFile += flNotDir(StoreFilename);
2469 Rename(DestFile,FinalFile);
2470 StoreFilename = DestFile = FinalFile;
2471 Complete = true;
2472 }
2473 /*}}}*/
2474 // AcqArchive::Failed - Failure handler /*{{{*/
2475 // ---------------------------------------------------------------------
2476 /* Here we try other sources */
2477 void pkgAcqArchive::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
2478 {
2479 Item::Failed(Message,Cnf);
2480
2481 /* We don't really want to retry on failed media swaps, this prevents
2482 that. An interesting observation is that permanent failures are not
2483 recorded. */
2484 if (Cnf->Removable == true &&
2485 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
2486 {
2487 // Vf = Version.FileList();
2488 while (Vf.end() == false) ++Vf;
2489 StoreFilename = string();
2490 return;
2491 }
2492
2493 Status = StatIdle;
2494 if (QueueNext() == false)
2495 {
2496 // This is the retry counter
2497 if (Retries != 0 &&
2498 Cnf->LocalOnly == false &&
2499 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
2500 {
2501 Retries--;
2502 Vf = Version.FileList();
2503 if (QueueNext() == true)
2504 return;
2505 }
2506
2507 StoreFilename = string();
2508 Status = StatError;
2509 }
2510 }
2511 /*}}}*/
2512 // AcqArchive::IsTrusted - Determine whether this archive comes from a trusted source /*{{{*/
2513 // ---------------------------------------------------------------------
2514 APT_PURE bool pkgAcqArchive::IsTrusted() const
2515 {
2516 return Trusted;
2517 }
2518 /*}}}*/
2519 // AcqArchive::Finished - Fetching has finished, tidy up /*{{{*/
2520 // ---------------------------------------------------------------------
2521 /* */
2522 void pkgAcqArchive::Finished()
2523 {
2524 if (Status == pkgAcquire::Item::StatDone &&
2525 Complete == true)
2526 return;
2527 StoreFilename = string();
2528 }
2529 /*}}}*/
2530 // AcqFile::pkgAcqFile - Constructor /*{{{*/
2531 // ---------------------------------------------------------------------
2532 /* The file is added to the queue */
2533 pkgAcqFile::pkgAcqFile(pkgAcquire *Owner,string URI, HashStringList const &Hashes,
2534 unsigned long long Size,string Dsc,string ShortDesc,
2535 const string &DestDir, const string &DestFilename,
2536 bool IsIndexFile) :
2537 Item(Owner, Hashes), IsIndexFile(IsIndexFile)
2538 {
2539 Retries = _config->FindI("Acquire::Retries",0);
2540
2541 if(!DestFilename.empty())
2542 DestFile = DestFilename;
2543 else if(!DestDir.empty())
2544 DestFile = DestDir + "/" + flNotDir(URI);
2545 else
2546 DestFile = flNotDir(URI);
2547
2548 // Create the item
2549 Desc.URI = URI;
2550 Desc.Description = Dsc;
2551 Desc.Owner = this;
2552
2553 // Set the short description to the archive component
2554 Desc.ShortDesc = ShortDesc;
2555
2556 // Get the transfer sizes
2557 FileSize = Size;
2558 struct stat Buf;
2559 if (stat(DestFile.c_str(),&Buf) == 0)
2560 {
2561 // Hmm, the partial file is too big, erase it
2562 if ((Size > 0) && (unsigned long long)Buf.st_size > Size)
2563 unlink(DestFile.c_str());
2564 else
2565 PartialSize = Buf.st_size;
2566 }
2567
2568 QueueURI(Desc);
2569 }
2570 /*}}}*/
2571 // AcqFile::Done - Item downloaded OK /*{{{*/
2572 // ---------------------------------------------------------------------
2573 /* */
2574 void pkgAcqFile::Done(string Message,unsigned long long Size,HashStringList const &CalcHashes,
2575 pkgAcquire::MethodConfig *Cnf)
2576 {
2577 Item::Done(Message,Size,CalcHashes,Cnf);
2578
2579 // Check the hash
2580 if(ExpectedHashes.usable() && ExpectedHashes != CalcHashes)
2581 {
2582 RenameOnError(HashSumMismatch);
2583 printHashSumComparision(DestFile, ExpectedHashes, CalcHashes);
2584 return;
2585 }
2586
2587 string FileName = LookupTag(Message,"Filename");
2588 if (FileName.empty() == true)
2589 {
2590 Status = StatError;
2591 ErrorText = "Method gave a blank filename";
2592 return;
2593 }
2594
2595 Complete = true;
2596
2597 // The files timestamp matches
2598 if (StringToBool(LookupTag(Message,"IMS-Hit"),false) == true)
2599 return;
2600
2601 // We have to copy it into place
2602 if (FileName != DestFile)
2603 {
2604 Local = true;
2605 if (_config->FindB("Acquire::Source-Symlinks",true) == false ||
2606 Cnf->Removable == true)
2607 {
2608 Desc.URI = "copy:" + FileName;
2609 QueueURI(Desc);
2610 return;
2611 }
2612
2613 // Erase the file if it is a symlink so we can overwrite it
2614 struct stat St;
2615 if (lstat(DestFile.c_str(),&St) == 0)
2616 {
2617 if (S_ISLNK(St.st_mode) != 0)
2618 unlink(DestFile.c_str());
2619 }
2620
2621 // Symlink the file
2622 if (symlink(FileName.c_str(),DestFile.c_str()) != 0)
2623 {
2624 _error->PushToStack();
2625 _error->Errno("pkgAcqFile::Done", "Symlinking file %s failed", DestFile.c_str());
2626 std::stringstream msg;
2627 _error->DumpErrors(msg);
2628 _error->RevertToStack();
2629 ErrorText = msg.str();
2630 Status = StatError;
2631 Complete = false;
2632 }
2633 }
2634 }
2635 /*}}}*/
2636 // AcqFile::Failed - Failure handler /*{{{*/
2637 // ---------------------------------------------------------------------
2638 /* Here we try other sources */
2639 void pkgAcqFile::Failed(string Message,pkgAcquire::MethodConfig *Cnf)
2640 {
2641 Item::Failed(Message,Cnf);
2642
2643 // This is the retry counter
2644 if (Retries != 0 &&
2645 Cnf->LocalOnly == false &&
2646 StringToBool(LookupTag(Message,"Transient-Failure"),false) == true)
2647 {
2648 --Retries;
2649 QueueURI(Desc);
2650 Status = StatIdle;
2651 return;
2652 }
2653
2654 }
2655 /*}}}*/
2656 // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/
2657 // ---------------------------------------------------------------------
2658 /* The only header we use is the last-modified header. */
2659 string pkgAcqFile::Custom600Headers() const
2660 {
2661 if (IsIndexFile)
2662 return "\nIndex-File: true";
2663 return "";
2664 }
2665 /*}}}*/