]>
Commit | Line | Data |
---|---|---|
1 | // -*- mode: cpp; mode: fold -*- | |
2 | // Description /*{{{*/ | |
3 | // $Id: acquire-item.cc,v 1.46.2.9 2004/01/16 18:51:11 mdz Exp $ | |
4 | /* ###################################################################### | |
5 | ||
6 | Acquire Item - Item to acquire | |
7 | ||
8 | Each item can download to exactly one file at a time. This means you | |
9 | cannot create an item that fetches two uri's to two files at the same | |
10 | time. The pkgAcqIndex class creates a second class upon instantiation | |
11 | to fetch the other index files because of this. | |
12 | ||
13 | ##################################################################### */ | |
14 | /*}}}*/ | |
15 | // Include Files /*{{{*/ | |
16 | #include <config.h> | |
17 | ||
18 | #include <apt-pkg/acquire-item.h> | |
19 | #include <apt-pkg/configuration.h> | |
20 | #include <apt-pkg/aptconfiguration.h> | |
21 | #include <apt-pkg/sourcelist.h> | |
22 | #include <apt-pkg/error.h> | |
23 | #include <apt-pkg/strutl.h> | |
24 | #include <apt-pkg/fileutl.h> | |
25 | #include <apt-pkg/sha1.h> | |
26 | #include <apt-pkg/tagfile.h> | |
27 | #include <apt-pkg/indexrecords.h> | |
28 | #include <apt-pkg/acquire.h> | |
29 | #include <apt-pkg/hashes.h> | |
30 | #include <apt-pkg/indexfile.h> | |
31 | #include <apt-pkg/pkgcache.h> | |
32 | #include <apt-pkg/cacheiterators.h> | |
33 | #include <apt-pkg/pkgrecords.h> | |
34 | ||
35 | #include <stddef.h> | |
36 | #include <stdlib.h> | |
37 | #include <string.h> | |
38 | #include <iostream> | |
39 | #include <vector> | |
40 | #include <sys/stat.h> | |
41 | #include <unistd.h> | |
42 | #include <errno.h> | |
43 | #include <string> | |
44 | #include <sstream> | |
45 | #include <stdio.h> | |
46 | #include <ctime> | |
47 | ||
48 | #include <apti18n.h> | |
49 | /*}}}*/ | |
50 | ||
51 | using namespace std; | |
52 | ||
53 | static void printHashSumComparision(std::string const &URI, HashStringList const &Expected, HashStringList const &Actual) /*{{{*/ | |
54 | { | |
55 | if (_config->FindB("Debug::Acquire::HashSumMismatch", false) == false) | |
56 | return; | |
57 | std::cerr << std::endl << URI << ":" << std::endl << " Expected Hash: " << std::endl; | |
58 | for (HashStringList::const_iterator hs = Expected.begin(); hs != Expected.end(); ++hs) | |
59 | std::cerr << "\t- " << hs->toStr() << std::endl; | |
60 | std::cerr << " Actual Hash: " << std::endl; | |
61 | for (HashStringList::const_iterator hs = Actual.begin(); hs != Actual.end(); ++hs) | |
62 | std::cerr << "\t- " << hs->toStr() << std::endl; | |
63 | } | |
64 | /*}}}*/ | |
65 | static std::string GetPartialFileName(std::string const &file) /*{{{*/ | |
66 | { | |
67 | std::string DestFile = _config->FindDir("Dir::State::lists") + "partial/"; | |
68 | DestFile += file; | |
69 | return DestFile; | |
70 | } | |
71 | /*}}}*/ | |
72 | static std::string GetPartialFileNameFromURI(std::string const &uri) /*{{{*/ | |
73 | { | |
74 | return GetPartialFileName(URItoFileName(uri)); | |
75 | } | |
76 | /*}}}*/ | |
77 | static std::string GetFinalFileNameFromURI(std::string const &uri) /*{{{*/ | |
78 | { | |
79 | return _config->FindDir("Dir::State::lists") + URItoFileName(uri); | |
80 | } | |
81 | /*}}}*/ | |
82 | static std::string GetCompressedFileName(std::string const &URI, std::string const &Name, std::string const &Ext) /*{{{*/ | |
83 | { | |
84 | if (Ext.empty() || Ext == "uncompressed") | |
85 | return Name; | |
86 | ||
87 | // do not reverify cdrom sources as apt-cdrom may rewrite the Packages | |
88 | // file when its doing the indexcopy | |
89 | if (URI.substr(0,6) == "cdrom:") | |
90 | return Name; | |
91 | ||
92 | // adjust DestFile if its compressed on disk | |
93 | if (_config->FindB("Acquire::GzipIndexes",false) == true) | |
94 | return Name + '.' + Ext; | |
95 | return Name; | |
96 | } | |
97 | /*}}}*/ | |
98 | static bool AllowInsecureRepositories(indexRecords const * const MetaIndexParser, pkgAcqMetaBase * const TransactionManager, pkgAcquire::Item * const I) /*{{{*/ | |
99 | { | |
100 | if(MetaIndexParser->IsAlwaysTrusted() || _config->FindB("Acquire::AllowInsecureRepositories") == true) | |
101 | return true; | |
102 | ||
103 | _error->Error(_("Use --allow-insecure-repositories to force the update")); | |
104 | TransactionManager->AbortTransaction(); | |
105 | I->Status = pkgAcquire::Item::StatError; | |
106 | return false; | |
107 | } | |
108 | /*}}}*/ | |
109 | ||
110 | ||
111 | // Acquire::Item::Item - Constructor /*{{{*/ | |
112 | APT_IGNORE_DEPRECATED_PUSH | |
113 | pkgAcquire::Item::Item(pkgAcquire *Owner, | |
114 | HashStringList const &ExpectedHashes, | |
115 | pkgAcqMetaBase *TransactionManager) | |
116 | : Owner(Owner), FileSize(0), PartialSize(0), Mode(0), ID(0), Complete(false), | |
117 | Local(false), QueueCounter(0), TransactionManager(TransactionManager), | |
118 | ExpectedAdditionalItems(0), ExpectedHashes(ExpectedHashes) | |
119 | { | |
120 | Owner->Add(this); | |
121 | Status = StatIdle; | |
122 | if(TransactionManager != NULL) | |
123 | TransactionManager->Add(this); | |
124 | } | |
125 | APT_IGNORE_DEPRECATED_POP | |
126 | /*}}}*/ | |
127 | // Acquire::Item::~Item - Destructor /*{{{*/ | |
128 | // --------------------------------------------------------------------- | |
129 | /* */ | |
130 | pkgAcquire::Item::~Item() | |
131 | { | |
132 | Owner->Remove(this); | |
133 | } | |
134 | /*}}}*/ | |
135 | // Acquire::Item::Failed - Item failed to download /*{{{*/ | |
136 | // --------------------------------------------------------------------- | |
137 | /* We return to an idle state if there are still other queues that could | |
138 | fetch this object */ | |
139 | void pkgAcquire::Item::Failed(string Message,pkgAcquire::MethodConfig *Cnf) | |
140 | { | |
141 | if(ErrorText.empty()) | |
142 | ErrorText = LookupTag(Message,"Message"); | |
143 | UsedMirror = LookupTag(Message,"UsedMirror"); | |
144 | if (QueueCounter <= 1) | |
145 | { | |
146 | /* This indicates that the file is not available right now but might | |
147 | be sometime later. If we do a retry cycle then this should be | |
148 | retried [CDROMs] */ | |
149 | if (Cnf != NULL && Cnf->LocalOnly == true && | |
150 | StringToBool(LookupTag(Message,"Transient-Failure"),false) == true) | |
151 | { | |
152 | Status = StatIdle; | |
153 | Dequeue(); | |
154 | return; | |
155 | } | |
156 | ||
157 | Status = StatError; | |
158 | Complete = false; | |
159 | Dequeue(); | |
160 | } | |
161 | else | |
162 | Status = StatIdle; | |
163 | ||
164 | // check fail reason | |
165 | string const FailReason = LookupTag(Message, "FailReason"); | |
166 | if(FailReason == "MaximumSizeExceeded") | |
167 | RenameOnError(MaximumSizeExceeded); | |
168 | ||
169 | // report mirror failure back to LP if we actually use a mirror | |
170 | if(FailReason.size() != 0) | |
171 | ReportMirrorFailure(FailReason); | |
172 | else | |
173 | ReportMirrorFailure(ErrorText); | |
174 | } | |
175 | /*}}}*/ | |
176 | bool pkgAcquire::Item::TransactionState(TransactionStates const state) /*{{{*/ | |
177 | { | |
178 | bool const Debug = _config->FindB("Debug::Acquire::Transaction", false); | |
179 | switch(state) | |
180 | { | |
181 | case TransactionAbort: | |
182 | if(Debug == true) | |
183 | std::clog << " Cancel: " << DestFile << std::endl; | |
184 | if (Status == pkgAcquire::Item::StatIdle) | |
185 | { | |
186 | Status = pkgAcquire::Item::StatDone; | |
187 | Dequeue(); | |
188 | } | |
189 | break; | |
190 | case TransactionCommit: | |
191 | if(PartialFile != "") | |
192 | { | |
193 | if(Debug == true) | |
194 | std::clog << "mv " << PartialFile << " -> "<< DestFile << " # " << DescURI() << std::endl; | |
195 | ||
196 | Rename(PartialFile, DestFile); | |
197 | } else { | |
198 | if(Debug == true) | |
199 | std::clog << "rm " << DestFile << " # " << DescURI() << std::endl; | |
200 | unlink(DestFile.c_str()); | |
201 | } | |
202 | // mark that this transaction is finished | |
203 | TransactionManager = 0; | |
204 | break; | |
205 | } | |
206 | return true; | |
207 | } | |
208 | /*}}}*/ | |
209 | // Acquire::Item::Start - Item has begun to download /*{{{*/ | |
210 | // --------------------------------------------------------------------- | |
211 | /* Stash status and the file size. Note that setting Complete means | |
212 | sub-phases of the acquire process such as decompresion are operating */ | |
213 | void pkgAcquire::Item::Start(string /*Message*/,unsigned long long Size) | |
214 | { | |
215 | Status = StatFetching; | |
216 | ErrorText.clear(); | |
217 | if (FileSize == 0 && Complete == false) | |
218 | FileSize = Size; | |
219 | } | |
220 | /*}}}*/ | |
221 | // Acquire::Item::Done - Item downloaded OK /*{{{*/ | |
222 | // --------------------------------------------------------------------- | |
223 | /* */ | |
224 | void pkgAcquire::Item::Done(string Message,unsigned long long Size,HashStringList const &/*Hash*/, | |
225 | pkgAcquire::MethodConfig * /*Cnf*/) | |
226 | { | |
227 | // We just downloaded something.. | |
228 | string FileName = LookupTag(Message,"Filename"); | |
229 | UsedMirror = LookupTag(Message,"UsedMirror"); | |
230 | if (Complete == false && !Local && FileName == DestFile) | |
231 | { | |
232 | if (Owner->Log != 0) | |
233 | Owner->Log->Fetched(Size,atoi(LookupTag(Message,"Resume-Point","0").c_str())); | |
234 | } | |
235 | ||
236 | if (FileSize == 0) | |
237 | FileSize= Size; | |
238 | Status = StatDone; | |
239 | ErrorText = string(); | |
240 | Owner->Dequeue(this); | |
241 | } | |
242 | /*}}}*/ | |
243 | // Acquire::Item::Rename - Rename a file /*{{{*/ | |
244 | // --------------------------------------------------------------------- | |
245 | /* This helper function is used by a lot of item methods as their final | |
246 | step */ | |
247 | bool pkgAcquire::Item::Rename(string From,string To) | |
248 | { | |
249 | if (From == To || rename(From.c_str(),To.c_str()) == 0) | |
250 | return true; | |
251 | ||
252 | std::string S; | |
253 | strprintf(S, _("rename failed, %s (%s -> %s)."), strerror(errno), | |
254 | From.c_str(),To.c_str()); | |
255 | Status = StatError; | |
256 | ErrorText += S; | |
257 | return false; | |
258 | } | |
259 | /*}}}*/ | |
260 | // Acquire::Item::QueueURI and specialisations from child classes /*{{{*/ | |
261 | /* The idea here is that an item isn't queued if it exists on disk and the | |
262 | transition manager was a hit as this means that the files it contains | |
263 | the checksums for can't be updated either (or they are and we are asking | |
264 | for a hashsum mismatch to happen which helps nobody) */ | |
265 | bool pkgAcquire::Item::QueueURI(ItemDesc &Item) | |
266 | { | |
267 | std::string const FinalFile = GetFinalFilename(); | |
268 | if (TransactionManager != NULL && TransactionManager->IMSHit == true && | |
269 | FileExists(FinalFile) == true) | |
270 | { | |
271 | PartialFile = DestFile = FinalFile; | |
272 | Status = StatDone; | |
273 | return false; | |
274 | } | |
275 | ||
276 | Owner->Enqueue(Item); | |
277 | return true; | |
278 | } | |
279 | /* The transition manager InRelease itself (or its older sisters-in-law | |
280 | Release & Release.gpg) is always queued as this allows us to rerun gpgv | |
281 | on it to verify that we aren't stalled with old files */ | |
282 | bool pkgAcqMetaBase::QueueURI(pkgAcquire::ItemDesc &Item) | |
283 | { | |
284 | Owner->Enqueue(Item); | |
285 | return true; | |
286 | } | |
287 | /* the Diff/Index needs to queue also the up-to-date complete index file | |
288 | to ensure that the list cleaner isn't eating it */ | |
289 | bool pkgAcqDiffIndex::QueueURI(pkgAcquire::ItemDesc &Item) | |
290 | { | |
291 | if (pkgAcquire::Item::QueueURI(Item) == true) | |
292 | return true; | |
293 | QueueOnIMSHit(); | |
294 | return false; | |
295 | } | |
296 | /*}}}*/ | |
297 | void pkgAcquire::Item::Dequeue() /*{{{*/ | |
298 | { | |
299 | Owner->Dequeue(this); | |
300 | } | |
301 | /*}}}*/ | |
302 | bool pkgAcquire::Item::RenameOnError(pkgAcquire::Item::RenameOnErrorState const error)/*{{{*/ | |
303 | { | |
304 | if (RealFileExists(DestFile)) | |
305 | Rename(DestFile, DestFile + ".FAILED"); | |
306 | ||
307 | switch (error) | |
308 | { | |
309 | case HashSumMismatch: | |
310 | ErrorText = _("Hash Sum mismatch"); | |
311 | Status = StatAuthError; | |
312 | ReportMirrorFailure("HashChecksumFailure"); | |
313 | break; | |
314 | case SizeMismatch: | |
315 | ErrorText = _("Size mismatch"); | |
316 | Status = StatAuthError; | |
317 | ReportMirrorFailure("SizeFailure"); | |
318 | break; | |
319 | case InvalidFormat: | |
320 | ErrorText = _("Invalid file format"); | |
321 | Status = StatError; | |
322 | // do not report as usually its not the mirrors fault, but Portal/Proxy | |
323 | break; | |
324 | case SignatureError: | |
325 | ErrorText = _("Signature error"); | |
326 | Status = StatError; | |
327 | break; | |
328 | case NotClearsigned: | |
329 | ErrorText = _("Does not start with a cleartext signature"); | |
330 | Status = StatError; | |
331 | break; | |
332 | case MaximumSizeExceeded: | |
333 | // the method is expected to report a good error for this | |
334 | Status = StatError; | |
335 | break; | |
336 | case PDiffError: | |
337 | // no handling here, done by callers | |
338 | break; | |
339 | } | |
340 | return false; | |
341 | } | |
342 | /*}}}*/ | |
343 | void pkgAcquire::Item::SetActiveSubprocess(const std::string &subprocess)/*{{{*/ | |
344 | { | |
345 | ActiveSubprocess = subprocess; | |
346 | APT_IGNORE_DEPRECATED(Mode = ActiveSubprocess.c_str();) | |
347 | } | |
348 | /*}}}*/ | |
349 | // Acquire::Item::GetFinalFilename - Return the full final file path /*{{{*/ | |
350 | std::string pkgAcquire::Item::GetFinalFilename() const | |
351 | { | |
352 | return GetFinalFileNameFromURI(Desc.URI); | |
353 | } | |
354 | /*}}}*/ | |
355 | // Acquire::Item::ReportMirrorFailure /*{{{*/ | |
356 | // --------------------------------------------------------------------- | |
357 | void pkgAcquire::Item::ReportMirrorFailure(string FailCode) | |
358 | { | |
359 | // we only act if a mirror was used at all | |
360 | if(UsedMirror.empty()) | |
361 | return; | |
362 | #if 0 | |
363 | std::cerr << "\nReportMirrorFailure: " | |
364 | << UsedMirror | |
365 | << " Uri: " << DescURI() | |
366 | << " FailCode: " | |
367 | << FailCode << std::endl; | |
368 | #endif | |
369 | string report = _config->Find("Methods::Mirror::ProblemReporting", | |
370 | "/usr/lib/apt/apt-report-mirror-failure"); | |
371 | if(!FileExists(report)) | |
372 | return; | |
373 | ||
374 | std::vector<char const*> Args; | |
375 | Args.push_back(report.c_str()); | |
376 | Args.push_back(UsedMirror.c_str()); | |
377 | Args.push_back(DescURI().c_str()); | |
378 | Args.push_back(FailCode.c_str()); | |
379 | Args.push_back(NULL); | |
380 | ||
381 | pid_t pid = ExecFork(); | |
382 | if(pid < 0) | |
383 | { | |
384 | _error->Error("ReportMirrorFailure Fork failed"); | |
385 | return; | |
386 | } | |
387 | else if(pid == 0) | |
388 | { | |
389 | execvp(Args[0], (char**)Args.data()); | |
390 | std::cerr << "Could not exec " << Args[0] << std::endl; | |
391 | _exit(100); | |
392 | } | |
393 | if(!ExecWait(pid, "report-mirror-failure")) | |
394 | { | |
395 | _error->Warning("Couldn't report problem to '%s'", | |
396 | _config->Find("Methods::Mirror::ProblemReporting").c_str()); | |
397 | } | |
398 | } | |
399 | /*}}}*/ | |
400 | // AcqDiffIndex::AcqDiffIndex - Constructor /*{{{*/ | |
401 | // --------------------------------------------------------------------- | |
402 | /* Get the DiffIndex file first and see if there are patches available | |
403 | * If so, create a pkgAcqIndexDiffs fetcher that will get and apply the | |
404 | * patches. If anything goes wrong in that process, it will fall back to | |
405 | * the original packages file | |
406 | */ | |
407 | pkgAcqDiffIndex::pkgAcqDiffIndex(pkgAcquire *Owner, | |
408 | pkgAcqMetaBase *TransactionManager, | |
409 | IndexTarget const * const Target, | |
410 | HashStringList const &ExpectedHashes, | |
411 | indexRecords *MetaIndexParser) | |
412 | : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes, | |
413 | MetaIndexParser) | |
414 | { | |
415 | ||
416 | Debug = _config->FindB("Debug::pkgAcquire::Diffs",false); | |
417 | ||
418 | RealURI = Target->URI; | |
419 | Desc.Owner = this; | |
420 | Desc.Description = Target->Description + ".diff/Index"; | |
421 | Desc.ShortDesc = Target->ShortDesc; | |
422 | Desc.URI = Target->URI + ".diff/Index"; | |
423 | ||
424 | DestFile = GetPartialFileNameFromURI(Desc.URI); | |
425 | ||
426 | if(Debug) | |
427 | std::clog << "pkgAcqDiffIndex: " << Desc.URI << std::endl; | |
428 | ||
429 | // look for the current package file | |
430 | CurrentPackagesFile = GetFinalFileNameFromURI(RealURI); | |
431 | ||
432 | // FIXME: this file:/ check is a hack to prevent fetching | |
433 | // from local sources. this is really silly, and | |
434 | // should be fixed cleanly as soon as possible | |
435 | if(!FileExists(CurrentPackagesFile) || | |
436 | Desc.URI.substr(0,strlen("file:/")) == "file:/") | |
437 | { | |
438 | // we don't have a pkg file or we don't want to queue | |
439 | Failed("No index file, local or canceld by user", NULL); | |
440 | return; | |
441 | } | |
442 | ||
443 | if(Debug) | |
444 | std::clog << "pkgAcqDiffIndex::pkgAcqDiffIndex(): " | |
445 | << CurrentPackagesFile << std::endl; | |
446 | ||
447 | QueueURI(Desc); | |
448 | ||
449 | } | |
450 | /*}}}*/ | |
451 | // Acquire::Item::GetFinalFilename - Return the full final file path /*{{{*/ | |
452 | std::string pkgAcqDiffIndex::GetFinalFilename() const | |
453 | { | |
454 | // the logic we inherent from pkgAcqBaseIndex isn't what we need here | |
455 | return pkgAcquire::Item::GetFinalFilename(); | |
456 | } | |
457 | /*}}}*/ | |
458 | // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/ | |
459 | // --------------------------------------------------------------------- | |
460 | /* The only header we use is the last-modified header. */ | |
461 | #if APT_PKG_ABI >= 413 | |
462 | string pkgAcqDiffIndex::Custom600Headers() const | |
463 | #else | |
464 | string pkgAcqDiffIndex::Custom600Headers() | |
465 | #endif | |
466 | { | |
467 | string const Final = GetFinalFilename(); | |
468 | ||
469 | if(Debug) | |
470 | std::clog << "Custom600Header-IMS: " << Final << std::endl; | |
471 | ||
472 | struct stat Buf; | |
473 | if (stat(Final.c_str(),&Buf) != 0) | |
474 | return "\nIndex-File: true"; | |
475 | ||
476 | return "\nIndex-File: true\nLast-Modified: " + TimeRFC1123(Buf.st_mtime); | |
477 | } | |
478 | /*}}}*/ | |
479 | void pkgAcqDiffIndex::QueueOnIMSHit() const /*{{{*/ | |
480 | { | |
481 | // list cleanup needs to know that this file as well as the already | |
482 | // present index is ours, so we create an empty diff to save it for us | |
483 | new pkgAcqIndexDiffs(Owner, TransactionManager, Target, | |
484 | ExpectedHashes, MetaIndexParser); | |
485 | } | |
486 | /*}}}*/ | |
487 | bool pkgAcqDiffIndex::ParseDiffIndex(string IndexDiffFile) /*{{{*/ | |
488 | { | |
489 | // failing here is fine: our caller will take care of trying to | |
490 | // get the complete file if patching fails | |
491 | if(Debug) | |
492 | std::clog << "pkgAcqDiffIndex::ParseIndexDiff() " << IndexDiffFile | |
493 | << std::endl; | |
494 | ||
495 | FileFd Fd(IndexDiffFile,FileFd::ReadOnly); | |
496 | pkgTagFile TF(&Fd); | |
497 | if (_error->PendingError() == true) | |
498 | return false; | |
499 | ||
500 | pkgTagSection Tags; | |
501 | if(unlikely(TF.Step(Tags) == false)) | |
502 | return false; | |
503 | ||
504 | HashStringList ServerHashes; | |
505 | unsigned long long ServerSize = 0; | |
506 | ||
507 | for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type) | |
508 | { | |
509 | std::string tagname = *type; | |
510 | tagname.append("-Current"); | |
511 | std::string const tmp = Tags.FindS(tagname.c_str()); | |
512 | if (tmp.empty() == true) | |
513 | continue; | |
514 | ||
515 | string hash; | |
516 | unsigned long long size; | |
517 | std::stringstream ss(tmp); | |
518 | ss >> hash >> size; | |
519 | if (unlikely(hash.empty() == true)) | |
520 | continue; | |
521 | if (unlikely(ServerSize != 0 && ServerSize != size)) | |
522 | continue; | |
523 | ServerHashes.push_back(HashString(*type, hash)); | |
524 | ServerSize = size; | |
525 | } | |
526 | ||
527 | if (ServerHashes.usable() == false) | |
528 | { | |
529 | if (Debug == true) | |
530 | std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Did not find a good hashsum in the index" << std::endl; | |
531 | return false; | |
532 | } | |
533 | ||
534 | if (ServerHashes != HashSums()) | |
535 | { | |
536 | if (Debug == true) | |
537 | { | |
538 | std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": Index has different hashes than parser, probably older, so fail pdiffing" << std::endl; | |
539 | printHashSumComparision(CurrentPackagesFile, ServerHashes, HashSums()); | |
540 | } | |
541 | return false; | |
542 | } | |
543 | ||
544 | if (ServerHashes.VerifyFile(CurrentPackagesFile) == true) | |
545 | { | |
546 | // we have the same sha1 as the server so we are done here | |
547 | if(Debug) | |
548 | std::clog << "pkgAcqDiffIndex: Package file " << CurrentPackagesFile << " is up-to-date" << std::endl; | |
549 | QueueOnIMSHit(); | |
550 | return true; | |
551 | } | |
552 | ||
553 | FileFd fd(CurrentPackagesFile, FileFd::ReadOnly); | |
554 | Hashes LocalHashesCalc; | |
555 | LocalHashesCalc.AddFD(fd); | |
556 | HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList(); | |
557 | ||
558 | if(Debug) | |
559 | std::clog << "Server-Current: " << ServerHashes.find(NULL)->toStr() << " and we start at " | |
560 | << fd.Name() << " " << fd.FileSize() << " " << LocalHashes.find(NULL)->toStr() << std::endl; | |
561 | ||
562 | // parse all of (provided) history | |
563 | vector<DiffInfo> available_patches; | |
564 | bool firstAcceptedHashes = true; | |
565 | for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type) | |
566 | { | |
567 | if (LocalHashes.find(*type) == NULL) | |
568 | continue; | |
569 | ||
570 | std::string tagname = *type; | |
571 | tagname.append("-History"); | |
572 | std::string const tmp = Tags.FindS(tagname.c_str()); | |
573 | if (tmp.empty() == true) | |
574 | continue; | |
575 | ||
576 | string hash, filename; | |
577 | unsigned long long size; | |
578 | std::stringstream ss(tmp); | |
579 | ||
580 | while (ss >> hash >> size >> filename) | |
581 | { | |
582 | if (unlikely(hash.empty() == true || filename.empty() == true)) | |
583 | continue; | |
584 | ||
585 | // see if we have a record for this file already | |
586 | std::vector<DiffInfo>::iterator cur = available_patches.begin(); | |
587 | for (; cur != available_patches.end(); ++cur) | |
588 | { | |
589 | if (cur->file != filename || unlikely(cur->result_size != size)) | |
590 | continue; | |
591 | cur->result_hashes.push_back(HashString(*type, hash)); | |
592 | break; | |
593 | } | |
594 | if (cur != available_patches.end()) | |
595 | continue; | |
596 | if (firstAcceptedHashes == true) | |
597 | { | |
598 | DiffInfo next; | |
599 | next.file = filename; | |
600 | next.result_hashes.push_back(HashString(*type, hash)); | |
601 | next.result_size = size; | |
602 | next.patch_size = 0; | |
603 | available_patches.push_back(next); | |
604 | } | |
605 | else | |
606 | { | |
607 | if (Debug == true) | |
608 | std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename | |
609 | << " wasn't in the list for the first parsed hash! (history)" << std::endl; | |
610 | break; | |
611 | } | |
612 | } | |
613 | firstAcceptedHashes = false; | |
614 | } | |
615 | ||
616 | if (unlikely(available_patches.empty() == true)) | |
617 | { | |
618 | if (Debug) | |
619 | std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": " | |
620 | << "Couldn't find any patches for the patch series." << std::endl; | |
621 | return false; | |
622 | } | |
623 | ||
624 | for (char const * const * type = HashString::SupportedHashes(); *type != NULL; ++type) | |
625 | { | |
626 | if (LocalHashes.find(*type) == NULL) | |
627 | continue; | |
628 | ||
629 | std::string tagname = *type; | |
630 | tagname.append("-Patches"); | |
631 | std::string const tmp = Tags.FindS(tagname.c_str()); | |
632 | if (tmp.empty() == true) | |
633 | continue; | |
634 | ||
635 | string hash, filename; | |
636 | unsigned long long size; | |
637 | std::stringstream ss(tmp); | |
638 | ||
639 | while (ss >> hash >> size >> filename) | |
640 | { | |
641 | if (unlikely(hash.empty() == true || filename.empty() == true)) | |
642 | continue; | |
643 | ||
644 | // see if we have a record for this file already | |
645 | std::vector<DiffInfo>::iterator cur = available_patches.begin(); | |
646 | for (; cur != available_patches.end(); ++cur) | |
647 | { | |
648 | if (cur->file != filename) | |
649 | continue; | |
650 | if (unlikely(cur->patch_size != 0 && cur->patch_size != size)) | |
651 | continue; | |
652 | cur->patch_hashes.push_back(HashString(*type, hash)); | |
653 | cur->patch_size = size; | |
654 | break; | |
655 | } | |
656 | if (cur != available_patches.end()) | |
657 | continue; | |
658 | if (Debug == true) | |
659 | std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": File " << filename | |
660 | << " wasn't in the list for the first parsed hash! (patches)" << std::endl; | |
661 | break; | |
662 | } | |
663 | } | |
664 | ||
665 | bool foundStart = false; | |
666 | for (std::vector<DiffInfo>::iterator cur = available_patches.begin(); | |
667 | cur != available_patches.end(); ++cur) | |
668 | { | |
669 | if (LocalHashes != cur->result_hashes) | |
670 | continue; | |
671 | ||
672 | available_patches.erase(available_patches.begin(), cur); | |
673 | foundStart = true; | |
674 | break; | |
675 | } | |
676 | ||
677 | if (foundStart == false || unlikely(available_patches.empty() == true)) | |
678 | { | |
679 | if (Debug) | |
680 | std::clog << "pkgAcqDiffIndex: " << IndexDiffFile << ": " | |
681 | << "Couldn't find the start of the patch series." << std::endl; | |
682 | return false; | |
683 | } | |
684 | ||
685 | // patching with too many files is rather slow compared to a fast download | |
686 | unsigned long const fileLimit = _config->FindI("Acquire::PDiffs::FileLimit", 0); | |
687 | if (fileLimit != 0 && fileLimit < available_patches.size()) | |
688 | { | |
689 | if (Debug) | |
690 | std::clog << "Need " << available_patches.size() << " diffs (Limit is " << fileLimit | |
691 | << ") so fallback to complete download" << std::endl; | |
692 | return false; | |
693 | } | |
694 | ||
695 | // calculate the size of all patches we have to get | |
696 | // note that all sizes are uncompressed, while we download compressed files | |
697 | unsigned long long patchesSize = 0; | |
698 | for (std::vector<DiffInfo>::const_iterator cur = available_patches.begin(); | |
699 | cur != available_patches.end(); ++cur) | |
700 | patchesSize += cur->patch_size; | |
701 | unsigned long long const sizeLimit = ServerSize * _config->FindI("Acquire::PDiffs::SizeLimit", 100); | |
702 | if (sizeLimit > 0 && (sizeLimit/100) < patchesSize) | |
703 | { | |
704 | if (Debug) | |
705 | std::clog << "Need " << patchesSize << " bytes (Limit is " << sizeLimit/100 | |
706 | << ") so fallback to complete download" << std::endl; | |
707 | return false; | |
708 | } | |
709 | ||
710 | // we have something, queue the diffs | |
711 | string::size_type const last_space = Description.rfind(" "); | |
712 | if(last_space != string::npos) | |
713 | Description.erase(last_space, Description.size()-last_space); | |
714 | ||
715 | /* decide if we should download patches one by one or in one go: | |
716 | The first is good if the server merges patches, but many don't so client | |
717 | based merging can be attempt in which case the second is better. | |
718 | "bad things" will happen if patches are merged on the server, | |
719 | but client side merging is attempt as well */ | |
720 | bool pdiff_merge = _config->FindB("Acquire::PDiffs::Merge", true); | |
721 | if (pdiff_merge == true) | |
722 | { | |
723 | // reprepro adds this flag if it has merged patches on the server | |
724 | std::string const precedence = Tags.FindS("X-Patch-Precedence"); | |
725 | pdiff_merge = (precedence != "merged"); | |
726 | } | |
727 | ||
728 | if (pdiff_merge == false) | |
729 | { | |
730 | new pkgAcqIndexDiffs(Owner, TransactionManager, Target, ExpectedHashes, | |
731 | MetaIndexParser, available_patches); | |
732 | } | |
733 | else | |
734 | { | |
735 | std::vector<pkgAcqIndexMergeDiffs*> *diffs = new std::vector<pkgAcqIndexMergeDiffs*>(available_patches.size()); | |
736 | for(size_t i = 0; i < available_patches.size(); ++i) | |
737 | (*diffs)[i] = new pkgAcqIndexMergeDiffs(Owner, TransactionManager, | |
738 | Target, | |
739 | ExpectedHashes, | |
740 | MetaIndexParser, | |
741 | available_patches[i], | |
742 | diffs); | |
743 | } | |
744 | ||
745 | Complete = false; | |
746 | Status = StatDone; | |
747 | Dequeue(); | |
748 | return true; | |
749 | } | |
750 | /*}}}*/ | |
751 | void pkgAcqDiffIndex::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/ | |
752 | { | |
753 | Item::Failed(Message,Cnf); | |
754 | Status = StatDone; | |
755 | ||
756 | if(Debug) | |
757 | std::clog << "pkgAcqDiffIndex failed: " << Desc.URI << " with " << Message << std::endl | |
758 | << "Falling back to normal index file acquire" << std::endl; | |
759 | ||
760 | new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser); | |
761 | } | |
762 | /*}}}*/ | |
763 | bool pkgAcqDiffIndex::TransactionState(TransactionStates const state) /*{{{*/ | |
764 | { | |
765 | if (pkgAcquire::Item::TransactionState(state) == false) | |
766 | return false; | |
767 | ||
768 | switch (state) | |
769 | { | |
770 | case TransactionCommit: | |
771 | break; | |
772 | case TransactionAbort: | |
773 | std::string const Partial = GetPartialFileNameFromURI(RealURI); | |
774 | unlink(Partial.c_str()); | |
775 | break; | |
776 | } | |
777 | ||
778 | return true; | |
779 | } | |
780 | /*}}}*/ | |
781 | void pkgAcqDiffIndex::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/ | |
782 | pkgAcquire::MethodConfig *Cnf) | |
783 | { | |
784 | if(Debug) | |
785 | std::clog << "pkgAcqDiffIndex::Done(): " << Desc.URI << std::endl; | |
786 | ||
787 | Item::Done(Message, Size, Hashes, Cnf); | |
788 | ||
789 | // verify the index target | |
790 | if(Target && Target->MetaKey != "" && MetaIndexParser && Hashes.usable()) | |
791 | { | |
792 | std::string IndexMetaKey = Target->MetaKey + ".diff/Index"; | |
793 | indexRecords::checkSum *Record = MetaIndexParser->Lookup(IndexMetaKey); | |
794 | if(Record && Record->Hashes.usable() && Hashes != Record->Hashes) | |
795 | { | |
796 | RenameOnError(HashSumMismatch); | |
797 | printHashSumComparision(RealURI, Record->Hashes, Hashes); | |
798 | Failed(Message, Cnf); | |
799 | return; | |
800 | } | |
801 | ||
802 | } | |
803 | ||
804 | string const FinalFile = GetFinalFilename(); | |
805 | if(StringToBool(LookupTag(Message,"IMS-Hit"),false)) | |
806 | DestFile = FinalFile; | |
807 | ||
808 | if(ParseDiffIndex(DestFile) == false) | |
809 | { | |
810 | Failed("Message: Couldn't parse pdiff index", Cnf); | |
811 | // queue for final move - this should happen even if we fail | |
812 | // while parsing (e.g. on sizelimit) and download the complete file. | |
813 | TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); | |
814 | return; | |
815 | } | |
816 | ||
817 | TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); | |
818 | ||
819 | Complete = true; | |
820 | Status = StatDone; | |
821 | Dequeue(); | |
822 | ||
823 | return; | |
824 | } | |
825 | /*}}}*/ | |
826 | // AcqIndexDiffs::AcqIndexDiffs - Constructor /*{{{*/ | |
827 | // --------------------------------------------------------------------- | |
828 | /* The package diff is added to the queue. one object is constructed | |
829 | * for each diff and the index | |
830 | */ | |
831 | pkgAcqIndexDiffs::pkgAcqIndexDiffs(pkgAcquire *Owner, | |
832 | pkgAcqMetaBase *TransactionManager, | |
833 | struct IndexTarget const * const Target, | |
834 | HashStringList const &ExpectedHashes, | |
835 | indexRecords *MetaIndexParser, | |
836 | vector<DiffInfo> diffs) | |
837 | : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser), | |
838 | available_patches(diffs) | |
839 | { | |
840 | DestFile = GetPartialFileNameFromURI(Target->URI); | |
841 | ||
842 | Debug = _config->FindB("Debug::pkgAcquire::Diffs",false); | |
843 | ||
844 | RealURI = Target->URI; | |
845 | Desc.Owner = this; | |
846 | Description = Target->Description; | |
847 | Desc.ShortDesc = Target->ShortDesc; | |
848 | ||
849 | if(available_patches.empty() == true) | |
850 | { | |
851 | // we are done (yeah!), check hashes against the final file | |
852 | DestFile = GetFinalFileNameFromURI(Target->URI); | |
853 | Finish(true); | |
854 | } | |
855 | else | |
856 | { | |
857 | // patching needs to be bootstrapped with the 'old' version | |
858 | std::string const PartialFile = GetPartialFileNameFromURI(RealURI); | |
859 | if (RealFileExists(PartialFile) == false) | |
860 | { | |
861 | if (symlink(GetFinalFilename().c_str(), PartialFile.c_str()) != 0) | |
862 | { | |
863 | Failed("Link creation of " + PartialFile + " to " + GetFinalFilename() + " failed", NULL); | |
864 | return; | |
865 | } | |
866 | } | |
867 | ||
868 | // get the next diff | |
869 | State = StateFetchDiff; | |
870 | QueueNextDiff(); | |
871 | } | |
872 | } | |
873 | /*}}}*/ | |
874 | void pkgAcqIndexDiffs::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/ | |
875 | { | |
876 | Item::Failed(Message,Cnf); | |
877 | Status = StatDone; | |
878 | ||
879 | if(Debug) | |
880 | std::clog << "pkgAcqIndexDiffs failed: " << Desc.URI << " with " << Message << std::endl | |
881 | << "Falling back to normal index file acquire" << std::endl; | |
882 | DestFile = GetPartialFileNameFromURI(Target->URI); | |
883 | RenameOnError(PDiffError); | |
884 | new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser); | |
885 | Finish(); | |
886 | } | |
887 | /*}}}*/ | |
888 | // Finish - helper that cleans the item out of the fetcher queue /*{{{*/ | |
889 | void pkgAcqIndexDiffs::Finish(bool allDone) | |
890 | { | |
891 | if(Debug) | |
892 | std::clog << "pkgAcqIndexDiffs::Finish(): " | |
893 | << allDone << " " | |
894 | << Desc.URI << std::endl; | |
895 | ||
896 | // we restore the original name, this is required, otherwise | |
897 | // the file will be cleaned | |
898 | if(allDone) | |
899 | { | |
900 | if(HashSums().usable() && !HashSums().VerifyFile(DestFile)) | |
901 | { | |
902 | RenameOnError(HashSumMismatch); | |
903 | Dequeue(); | |
904 | return; | |
905 | } | |
906 | ||
907 | TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename()); | |
908 | ||
909 | // this is for the "real" finish | |
910 | Complete = true; | |
911 | Status = StatDone; | |
912 | Dequeue(); | |
913 | if(Debug) | |
914 | std::clog << "\n\nallDone: " << DestFile << "\n" << std::endl; | |
915 | return; | |
916 | } | |
917 | ||
918 | if(Debug) | |
919 | std::clog << "Finishing: " << Desc.URI << std::endl; | |
920 | Complete = false; | |
921 | Status = StatDone; | |
922 | Dequeue(); | |
923 | return; | |
924 | } | |
925 | /*}}}*/ | |
926 | bool pkgAcqIndexDiffs::QueueNextDiff() /*{{{*/ | |
927 | { | |
928 | // calc sha1 of the just patched file | |
929 | std::string const FinalFile = GetPartialFileNameFromURI(RealURI); | |
930 | ||
931 | if(!FileExists(FinalFile)) | |
932 | { | |
933 | Failed("Message: No FinalFile " + FinalFile + " available", NULL); | |
934 | return false; | |
935 | } | |
936 | ||
937 | FileFd fd(FinalFile, FileFd::ReadOnly); | |
938 | Hashes LocalHashesCalc; | |
939 | LocalHashesCalc.AddFD(fd); | |
940 | HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList(); | |
941 | ||
942 | if(Debug) | |
943 | std::clog << "QueueNextDiff: " << FinalFile << " (" << LocalHashes.find(NULL)->toStr() << ")" << std::endl; | |
944 | ||
945 | if (unlikely(LocalHashes.usable() == false || ExpectedHashes.usable() == false)) | |
946 | { | |
947 | Failed("Local/Expected hashes are not usable", NULL); | |
948 | return false; | |
949 | } | |
950 | ||
951 | ||
952 | // final file reached before all patches are applied | |
953 | if(LocalHashes == ExpectedHashes) | |
954 | { | |
955 | Finish(true); | |
956 | return true; | |
957 | } | |
958 | ||
959 | // remove all patches until the next matching patch is found | |
960 | // this requires the Index file to be ordered | |
961 | for(vector<DiffInfo>::iterator I = available_patches.begin(); | |
962 | available_patches.empty() == false && | |
963 | I != available_patches.end() && | |
964 | I->result_hashes != LocalHashes; | |
965 | ++I) | |
966 | { | |
967 | available_patches.erase(I); | |
968 | } | |
969 | ||
970 | // error checking and falling back if no patch was found | |
971 | if(available_patches.empty() == true) | |
972 | { | |
973 | Failed("No patches left to reach target", NULL); | |
974 | return false; | |
975 | } | |
976 | ||
977 | // queue the right diff | |
978 | Desc.URI = RealURI + ".diff/" + available_patches[0].file + ".gz"; | |
979 | Desc.Description = Description + " " + available_patches[0].file + string(".pdiff"); | |
980 | DestFile = GetPartialFileNameFromURI(RealURI + ".diff/" + available_patches[0].file); | |
981 | ||
982 | if(Debug) | |
983 | std::clog << "pkgAcqIndexDiffs::QueueNextDiff(): " << Desc.URI << std::endl; | |
984 | ||
985 | QueueURI(Desc); | |
986 | ||
987 | return true; | |
988 | } | |
989 | /*}}}*/ | |
990 | void pkgAcqIndexDiffs::Done(string Message,unsigned long long Size, HashStringList const &Hashes, /*{{{*/ | |
991 | pkgAcquire::MethodConfig *Cnf) | |
992 | { | |
993 | if(Debug) | |
994 | std::clog << "pkgAcqIndexDiffs::Done(): " << Desc.URI << std::endl; | |
995 | ||
996 | Item::Done(Message, Size, Hashes, Cnf); | |
997 | ||
998 | // FIXME: verify this download too before feeding it to rred | |
999 | std::string const FinalFile = GetPartialFileNameFromURI(RealURI); | |
1000 | ||
1001 | // success in downloading a diff, enter ApplyDiff state | |
1002 | if(State == StateFetchDiff) | |
1003 | { | |
1004 | FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip); | |
1005 | class Hashes LocalHashesCalc; | |
1006 | LocalHashesCalc.AddFD(fd); | |
1007 | HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList(); | |
1008 | ||
1009 | if (fd.Size() != available_patches[0].patch_size || | |
1010 | available_patches[0].patch_hashes != LocalHashes) | |
1011 | { | |
1012 | // patchfiles are dated, so bad indicates a bad download, so kill it | |
1013 | unlink(DestFile.c_str()); | |
1014 | Failed("Patch has Size/Hashsum mismatch", NULL); | |
1015 | return; | |
1016 | } | |
1017 | ||
1018 | // rred excepts the patch as $FinalFile.ed | |
1019 | Rename(DestFile,FinalFile+".ed"); | |
1020 | ||
1021 | if(Debug) | |
1022 | std::clog << "Sending to rred method: " << FinalFile << std::endl; | |
1023 | ||
1024 | State = StateApplyDiff; | |
1025 | Local = true; | |
1026 | Desc.URI = "rred:" + FinalFile; | |
1027 | QueueURI(Desc); | |
1028 | SetActiveSubprocess("rred"); | |
1029 | return; | |
1030 | } | |
1031 | ||
1032 | ||
1033 | // success in download/apply a diff, queue next (if needed) | |
1034 | if(State == StateApplyDiff) | |
1035 | { | |
1036 | // remove the just applied patch | |
1037 | available_patches.erase(available_patches.begin()); | |
1038 | unlink((FinalFile + ".ed").c_str()); | |
1039 | ||
1040 | // move into place | |
1041 | if(Debug) | |
1042 | { | |
1043 | std::clog << "Moving patched file in place: " << std::endl | |
1044 | << DestFile << " -> " << FinalFile << std::endl; | |
1045 | } | |
1046 | Rename(DestFile,FinalFile); | |
1047 | chmod(FinalFile.c_str(),0644); | |
1048 | ||
1049 | // see if there is more to download | |
1050 | if(available_patches.empty() == false) { | |
1051 | new pkgAcqIndexDiffs(Owner, TransactionManager, Target, | |
1052 | ExpectedHashes, MetaIndexParser, | |
1053 | available_patches); | |
1054 | return Finish(); | |
1055 | } else | |
1056 | // update | |
1057 | DestFile = FinalFile; | |
1058 | return Finish(true); | |
1059 | } | |
1060 | } | |
1061 | /*}}}*/ | |
1062 | // AcqIndexMergeDiffs::AcqIndexMergeDiffs - Constructor /*{{{*/ | |
1063 | pkgAcqIndexMergeDiffs::pkgAcqIndexMergeDiffs(pkgAcquire *Owner, | |
1064 | pkgAcqMetaBase *TransactionManager, | |
1065 | struct IndexTarget const * const Target, | |
1066 | HashStringList const &ExpectedHashes, | |
1067 | indexRecords *MetaIndexParser, | |
1068 | DiffInfo const &patch, | |
1069 | std::vector<pkgAcqIndexMergeDiffs*> const * const allPatches) | |
1070 | : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser), | |
1071 | patch(patch), allPatches(allPatches), State(StateFetchDiff) | |
1072 | { | |
1073 | Debug = _config->FindB("Debug::pkgAcquire::Diffs",false); | |
1074 | ||
1075 | RealURI = Target->URI; | |
1076 | Desc.Owner = this; | |
1077 | Description = Target->Description; | |
1078 | Desc.ShortDesc = Target->ShortDesc; | |
1079 | ||
1080 | Desc.URI = RealURI + ".diff/" + patch.file + ".gz"; | |
1081 | Desc.Description = Description + " " + patch.file + string(".pdiff"); | |
1082 | ||
1083 | DestFile = GetPartialFileNameFromURI(RealURI + ".diff/" + patch.file); | |
1084 | ||
1085 | if(Debug) | |
1086 | std::clog << "pkgAcqIndexMergeDiffs: " << Desc.URI << std::endl; | |
1087 | ||
1088 | QueueURI(Desc); | |
1089 | } | |
1090 | /*}}}*/ | |
1091 | void pkgAcqIndexMergeDiffs::Failed(string Message,pkgAcquire::MethodConfig * Cnf)/*{{{*/ | |
1092 | { | |
1093 | if(Debug) | |
1094 | std::clog << "pkgAcqIndexMergeDiffs failed: " << Desc.URI << " with " << Message << std::endl; | |
1095 | ||
1096 | Item::Failed(Message,Cnf); | |
1097 | Status = StatDone; | |
1098 | ||
1099 | // check if we are the first to fail, otherwise we are done here | |
1100 | State = StateDoneDiff; | |
1101 | for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin(); | |
1102 | I != allPatches->end(); ++I) | |
1103 | if ((*I)->State == StateErrorDiff) | |
1104 | return; | |
1105 | ||
1106 | // first failure means we should fallback | |
1107 | State = StateErrorDiff; | |
1108 | if (Debug) | |
1109 | std::clog << "Falling back to normal index file acquire" << std::endl; | |
1110 | DestFile = GetPartialFileNameFromURI(Target->URI); | |
1111 | RenameOnError(PDiffError); | |
1112 | new pkgAcqIndex(Owner, TransactionManager, Target, ExpectedHashes, MetaIndexParser); | |
1113 | } | |
1114 | /*}}}*/ | |
1115 | void pkgAcqIndexMergeDiffs::Done(string Message,unsigned long long Size,HashStringList const &Hashes, /*{{{*/ | |
1116 | pkgAcquire::MethodConfig *Cnf) | |
1117 | { | |
1118 | if(Debug) | |
1119 | std::clog << "pkgAcqIndexMergeDiffs::Done(): " << Desc.URI << std::endl; | |
1120 | ||
1121 | Item::Done(Message,Size,Hashes,Cnf); | |
1122 | ||
1123 | // FIXME: verify download before feeding it to rred | |
1124 | string const FinalFile = GetPartialFileNameFromURI(RealURI); | |
1125 | ||
1126 | if (State == StateFetchDiff) | |
1127 | { | |
1128 | FileFd fd(DestFile, FileFd::ReadOnly, FileFd::Gzip); | |
1129 | class Hashes LocalHashesCalc; | |
1130 | LocalHashesCalc.AddFD(fd); | |
1131 | HashStringList const LocalHashes = LocalHashesCalc.GetHashStringList(); | |
1132 | ||
1133 | if (fd.Size() != patch.patch_size || patch.patch_hashes != LocalHashes) | |
1134 | { | |
1135 | // patchfiles are dated, so bad indicates a bad download, so kill it | |
1136 | unlink(DestFile.c_str()); | |
1137 | Failed("Patch has Size/Hashsum mismatch", NULL); | |
1138 | return; | |
1139 | } | |
1140 | ||
1141 | // rred expects the patch as $FinalFile.ed.$patchname.gz | |
1142 | Rename(DestFile, FinalFile + ".ed." + patch.file + ".gz"); | |
1143 | ||
1144 | // check if this is the last completed diff | |
1145 | State = StateDoneDiff; | |
1146 | for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin(); | |
1147 | I != allPatches->end(); ++I) | |
1148 | if ((*I)->State != StateDoneDiff) | |
1149 | { | |
1150 | if(Debug) | |
1151 | std::clog << "Not the last done diff in the batch: " << Desc.URI << std::endl; | |
1152 | return; | |
1153 | } | |
1154 | ||
1155 | // this is the last completed diff, so we are ready to apply now | |
1156 | State = StateApplyDiff; | |
1157 | ||
1158 | // patching needs to be bootstrapped with the 'old' version | |
1159 | if (symlink(GetFinalFilename().c_str(), FinalFile.c_str()) != 0) | |
1160 | { | |
1161 | Failed("Link creation of " + FinalFile + " to " + GetFinalFilename() + " failed", NULL); | |
1162 | return; | |
1163 | } | |
1164 | ||
1165 | if(Debug) | |
1166 | std::clog << "Sending to rred method: " << FinalFile << std::endl; | |
1167 | ||
1168 | Local = true; | |
1169 | Desc.URI = "rred:" + FinalFile; | |
1170 | QueueURI(Desc); | |
1171 | SetActiveSubprocess("rred"); | |
1172 | return; | |
1173 | } | |
1174 | // success in download/apply all diffs, clean up | |
1175 | else if (State == StateApplyDiff) | |
1176 | { | |
1177 | // see if we really got the expected file | |
1178 | if(ExpectedHashes.usable() && !ExpectedHashes.VerifyFile(DestFile)) | |
1179 | { | |
1180 | RenameOnError(HashSumMismatch); | |
1181 | return; | |
1182 | } | |
1183 | ||
1184 | // move the result into place | |
1185 | std::string const Final = GetFinalFilename(); | |
1186 | if(Debug) | |
1187 | std::clog << "Queue patched file in place: " << std::endl | |
1188 | << DestFile << " -> " << Final << std::endl; | |
1189 | ||
1190 | // queue for copy by the transaction manager | |
1191 | TransactionManager->TransactionStageCopy(this, DestFile, Final); | |
1192 | ||
1193 | // ensure the ed's are gone regardless of list-cleanup | |
1194 | for (std::vector<pkgAcqIndexMergeDiffs *>::const_iterator I = allPatches->begin(); | |
1195 | I != allPatches->end(); ++I) | |
1196 | { | |
1197 | std::string const PartialFile = GetPartialFileNameFromURI(RealURI); | |
1198 | std::string patch = PartialFile + ".ed." + (*I)->patch.file + ".gz"; | |
1199 | unlink(patch.c_str()); | |
1200 | } | |
1201 | unlink(FinalFile.c_str()); | |
1202 | ||
1203 | // all set and done | |
1204 | Complete = true; | |
1205 | if(Debug) | |
1206 | std::clog << "allDone: " << DestFile << "\n" << std::endl; | |
1207 | } | |
1208 | } | |
1209 | /*}}}*/ | |
1210 | // AcqBaseIndex - Constructor /*{{{*/ | |
1211 | pkgAcqBaseIndex::pkgAcqBaseIndex(pkgAcquire *Owner, | |
1212 | pkgAcqMetaBase *TransactionManager, | |
1213 | struct IndexTarget const * const Target, | |
1214 | HashStringList const &ExpectedHashes, | |
1215 | indexRecords *MetaIndexParser) | |
1216 | : Item(Owner, ExpectedHashes, TransactionManager), Target(Target), | |
1217 | MetaIndexParser(MetaIndexParser) | |
1218 | { | |
1219 | } | |
1220 | /*}}}*/ | |
1221 | // AcqBaseIndex::VerifyHashByMetaKey - verify hash for the given metakey /*{{{*/ | |
1222 | bool pkgAcqBaseIndex::VerifyHashByMetaKey(HashStringList const &Hashes) | |
1223 | { | |
1224 | if(MetaKey != "" && Hashes.usable()) | |
1225 | { | |
1226 | indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey); | |
1227 | if(Record && Record->Hashes.usable() && Hashes != Record->Hashes) | |
1228 | { | |
1229 | printHashSumComparision(RealURI, Record->Hashes, Hashes); | |
1230 | return false; | |
1231 | } | |
1232 | } | |
1233 | return true; | |
1234 | } | |
1235 | /*}}}*/ | |
1236 | // AcqBaseIndex::GetFinalFilename - Return the full final file path /*{{{*/ | |
1237 | std::string pkgAcqBaseIndex::GetFinalFilename() const | |
1238 | { | |
1239 | return GetFinalFileNameFromURI(RealURI); | |
1240 | } | |
1241 | /*}}}*/ | |
1242 | // AcqIndex::AcqIndex - Constructor /*{{{*/ | |
1243 | // --------------------------------------------------------------------- | |
1244 | /* The package file is added to the queue and a second class is | |
1245 | instantiated to fetch the revision file */ | |
1246 | pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner, | |
1247 | string URI,string URIDesc,string ShortDesc, | |
1248 | HashStringList const &ExpectedHash) | |
1249 | : pkgAcqBaseIndex(Owner, 0, NULL, ExpectedHash, NULL) | |
1250 | { | |
1251 | RealURI = URI; | |
1252 | ||
1253 | AutoSelectCompression(); | |
1254 | Init(URI, URIDesc, ShortDesc); | |
1255 | ||
1256 | if(_config->FindB("Debug::Acquire::Transaction", false) == true) | |
1257 | std::clog << "New pkgIndex with TransactionManager " | |
1258 | << TransactionManager << std::endl; | |
1259 | } | |
1260 | /*}}}*/ | |
1261 | // AcqIndex::AcqIndex - Constructor /*{{{*/ | |
1262 | pkgAcqIndex::pkgAcqIndex(pkgAcquire *Owner, | |
1263 | pkgAcqMetaBase *TransactionManager, | |
1264 | IndexTarget const *Target, | |
1265 | HashStringList const &ExpectedHash, | |
1266 | indexRecords *MetaIndexParser) | |
1267 | : pkgAcqBaseIndex(Owner, TransactionManager, Target, ExpectedHash, | |
1268 | MetaIndexParser) | |
1269 | { | |
1270 | RealURI = Target->URI; | |
1271 | ||
1272 | // autoselect the compression method | |
1273 | AutoSelectCompression(); | |
1274 | Init(Target->URI, Target->Description, Target->ShortDesc); | |
1275 | ||
1276 | if(_config->FindB("Debug::Acquire::Transaction", false) == true) | |
1277 | std::clog << "New pkgIndex with TransactionManager " | |
1278 | << TransactionManager << std::endl; | |
1279 | } | |
1280 | /*}}}*/ | |
1281 | // AcqIndex::AutoSelectCompression - Select compression /*{{{*/ | |
1282 | void pkgAcqIndex::AutoSelectCompression() | |
1283 | { | |
1284 | std::vector<std::string> types = APT::Configuration::getCompressionTypes(); | |
1285 | CompressionExtensions = ""; | |
1286 | if (ExpectedHashes.usable()) | |
1287 | { | |
1288 | for (std::vector<std::string>::const_iterator t = types.begin(); | |
1289 | t != types.end(); ++t) | |
1290 | { | |
1291 | std::string CompressedMetaKey = string(Target->MetaKey).append(".").append(*t); | |
1292 | if (*t == "uncompressed" || | |
1293 | MetaIndexParser->Exists(CompressedMetaKey) == true) | |
1294 | CompressionExtensions.append(*t).append(" "); | |
1295 | } | |
1296 | } | |
1297 | else | |
1298 | { | |
1299 | for (std::vector<std::string>::const_iterator t = types.begin(); t != types.end(); ++t) | |
1300 | CompressionExtensions.append(*t).append(" "); | |
1301 | } | |
1302 | if (CompressionExtensions.empty() == false) | |
1303 | CompressionExtensions.erase(CompressionExtensions.end()-1); | |
1304 | } | |
1305 | /*}}}*/ | |
1306 | // AcqIndex::Init - defered Constructor /*{{{*/ | |
1307 | void pkgAcqIndex::Init(string const &URI, string const &URIDesc, | |
1308 | string const &ShortDesc) | |
1309 | { | |
1310 | Stage = STAGE_DOWNLOAD; | |
1311 | ||
1312 | DestFile = GetPartialFileNameFromURI(URI); | |
1313 | ||
1314 | CurrentCompressionExtension = CompressionExtensions.substr(0, CompressionExtensions.find(' ')); | |
1315 | if (CurrentCompressionExtension == "uncompressed") | |
1316 | { | |
1317 | Desc.URI = URI; | |
1318 | if(Target) | |
1319 | MetaKey = string(Target->MetaKey); | |
1320 | } | |
1321 | else | |
1322 | { | |
1323 | Desc.URI = URI + '.' + CurrentCompressionExtension; | |
1324 | DestFile = DestFile + '.' + CurrentCompressionExtension; | |
1325 | if(Target) | |
1326 | MetaKey = string(Target->MetaKey) + '.' + CurrentCompressionExtension; | |
1327 | } | |
1328 | ||
1329 | // load the filesize | |
1330 | if(MetaIndexParser) | |
1331 | { | |
1332 | indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey); | |
1333 | if(Record) | |
1334 | FileSize = Record->Size; | |
1335 | ||
1336 | InitByHashIfNeeded(MetaKey); | |
1337 | } | |
1338 | ||
1339 | Desc.Description = URIDesc; | |
1340 | Desc.Owner = this; | |
1341 | Desc.ShortDesc = ShortDesc; | |
1342 | ||
1343 | QueueURI(Desc); | |
1344 | } | |
1345 | /*}}}*/ | |
1346 | // AcqIndex::AdjustForByHash - modify URI for by-hash support /*{{{*/ | |
1347 | void pkgAcqIndex::InitByHashIfNeeded(const std::string MetaKey) | |
1348 | { | |
1349 | // TODO: | |
1350 | // - (maybe?) add support for by-hash into the sources.list as flag | |
1351 | // - make apt-ftparchive generate the hashes (and expire?) | |
1352 | std::string HostKnob = "APT::Acquire::" + ::URI(Desc.URI).Host + "::By-Hash"; | |
1353 | if(_config->FindB("APT::Acquire::By-Hash", false) == true || | |
1354 | _config->FindB(HostKnob, false) == true || | |
1355 | MetaIndexParser->GetSupportsAcquireByHash()) | |
1356 | { | |
1357 | indexRecords::checkSum *Record = MetaIndexParser->Lookup(MetaKey); | |
1358 | if(Record) | |
1359 | { | |
1360 | // FIXME: should we really use the best hash here? or a fixed one? | |
1361 | const HashString *TargetHash = Record->Hashes.find(""); | |
1362 | std::string ByHash = "/by-hash/" + TargetHash->HashType() + "/" + TargetHash->HashValue(); | |
1363 | size_t trailing_slash = Desc.URI.find_last_of("/"); | |
1364 | Desc.URI = Desc.URI.replace( | |
1365 | trailing_slash, | |
1366 | Desc.URI.substr(trailing_slash+1).size()+1, | |
1367 | ByHash); | |
1368 | } else { | |
1369 | _error->Warning( | |
1370 | "Fetching ByHash requested but can not find record for %s", | |
1371 | MetaKey.c_str()); | |
1372 | } | |
1373 | } | |
1374 | } | |
1375 | /*}}}*/ | |
1376 | // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/ | |
1377 | // --------------------------------------------------------------------- | |
1378 | /* The only header we use is the last-modified header. */ | |
1379 | #if APT_PKG_ABI >= 413 | |
1380 | string pkgAcqIndex::Custom600Headers() const | |
1381 | #else | |
1382 | string pkgAcqIndex::Custom600Headers() | |
1383 | #endif | |
1384 | { | |
1385 | string Final = GetFinalFilename(); | |
1386 | ||
1387 | string msg = "\nIndex-File: true"; | |
1388 | struct stat Buf; | |
1389 | if (stat(Final.c_str(),&Buf) == 0) | |
1390 | msg += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime); | |
1391 | ||
1392 | if(Target->IsOptional()) | |
1393 | msg += "\nFail-Ignore: true"; | |
1394 | ||
1395 | return msg; | |
1396 | } | |
1397 | /*}}}*/ | |
1398 | // pkgAcqIndex::Failed - getting the indexfile failed /*{{{*/ | |
1399 | void pkgAcqIndex::Failed(string Message,pkgAcquire::MethodConfig *Cnf) | |
1400 | { | |
1401 | Item::Failed(Message,Cnf); | |
1402 | ||
1403 | size_t const nextExt = CompressionExtensions.find(' '); | |
1404 | if (nextExt != std::string::npos) | |
1405 | { | |
1406 | CompressionExtensions = CompressionExtensions.substr(nextExt+1); | |
1407 | Init(RealURI, Desc.Description, Desc.ShortDesc); | |
1408 | Status = StatIdle; | |
1409 | return; | |
1410 | } | |
1411 | ||
1412 | Item::Failed(Message,Cnf); | |
1413 | ||
1414 | if(Target->IsOptional() && ExpectedHashes.empty() && Stage == STAGE_DOWNLOAD) | |
1415 | Status = StatDone; | |
1416 | else | |
1417 | TransactionManager->AbortTransaction(); | |
1418 | } | |
1419 | /*}}}*/ | |
1420 | bool pkgAcqIndex::TransactionState(TransactionStates const state) /*{{{*/ | |
1421 | { | |
1422 | if (pkgAcquire::Item::TransactionState(state) == false) | |
1423 | return false; | |
1424 | ||
1425 | switch (state) | |
1426 | { | |
1427 | case TransactionAbort: | |
1428 | if (Stage == STAGE_DECOMPRESS_AND_VERIFY) | |
1429 | { | |
1430 | // keep the compressed file, but drop the decompressed | |
1431 | EraseFileName.clear(); | |
1432 | if (PartialFile.empty() == false && flExtension(PartialFile) == "decomp") | |
1433 | unlink(PartialFile.c_str()); | |
1434 | } | |
1435 | break; | |
1436 | case TransactionCommit: | |
1437 | if (EraseFileName.empty() == false) | |
1438 | unlink(EraseFileName.c_str()); | |
1439 | break; | |
1440 | } | |
1441 | return true; | |
1442 | } | |
1443 | /*}}}*/ | |
1444 | // pkgAcqIndex::GetFinalFilename - Return the full final file path /*{{{*/ | |
1445 | std::string pkgAcqIndex::GetFinalFilename() const | |
1446 | { | |
1447 | std::string const FinalFile = GetFinalFileNameFromURI(RealURI); | |
1448 | return GetCompressedFileName(RealURI, FinalFile, CurrentCompressionExtension); | |
1449 | } | |
1450 | /*}}}*/ | |
1451 | // AcqIndex::ReverifyAfterIMS - Reverify index after an ims-hit /*{{{*/ | |
1452 | void pkgAcqIndex::ReverifyAfterIMS() | |
1453 | { | |
1454 | // update destfile to *not* include the compression extension when doing | |
1455 | // a reverify (as its uncompressed on disk already) | |
1456 | DestFile = GetCompressedFileName(RealURI, GetPartialFileNameFromURI(RealURI), CurrentCompressionExtension); | |
1457 | ||
1458 | // copy FinalFile into partial/ so that we check the hash again | |
1459 | string FinalFile = GetFinalFilename(); | |
1460 | Stage = STAGE_DECOMPRESS_AND_VERIFY; | |
1461 | Desc.URI = "copy:" + FinalFile; | |
1462 | QueueURI(Desc); | |
1463 | } | |
1464 | /*}}}*/ | |
1465 | // AcqIndex::ValidateFile - Validate the content of the downloaded file /*{{{*/ | |
1466 | bool pkgAcqIndex::ValidateFile(const std::string &FileName) | |
1467 | { | |
1468 | // FIXME: this can go away once we only ever download stuff that | |
1469 | // has a valid hash and we never do GET based probing | |
1470 | // FIXME2: this also leaks debian-isms into the code and should go therefore | |
1471 | ||
1472 | /* Always validate the index file for correctness (all indexes must | |
1473 | * have a Package field) (LP: #346386) (Closes: #627642) | |
1474 | */ | |
1475 | FileFd fd(FileName, FileFd::ReadOnly, FileFd::Extension); | |
1476 | // Only test for correctness if the content of the file is not empty | |
1477 | // (empty is ok) | |
1478 | if (fd.Size() > 0) | |
1479 | { | |
1480 | pkgTagSection sec; | |
1481 | pkgTagFile tag(&fd); | |
1482 | ||
1483 | // all our current indexes have a field 'Package' in each section | |
1484 | if (_error->PendingError() == true || | |
1485 | tag.Step(sec) == false || | |
1486 | sec.Exists("Package") == false) | |
1487 | return false; | |
1488 | } | |
1489 | return true; | |
1490 | } | |
1491 | /*}}}*/ | |
1492 | // AcqIndex::Done - Finished a fetch /*{{{*/ | |
1493 | // --------------------------------------------------------------------- | |
1494 | /* This goes through a number of states.. On the initial fetch the | |
1495 | method could possibly return an alternate filename which points | |
1496 | to the uncompressed version of the file. If this is so the file | |
1497 | is copied into the partial directory. In all other cases the file | |
1498 | is decompressed with a compressed uri. */ | |
1499 | void pkgAcqIndex::Done(string Message, | |
1500 | unsigned long long Size, | |
1501 | HashStringList const &Hashes, | |
1502 | pkgAcquire::MethodConfig *Cfg) | |
1503 | { | |
1504 | Item::Done(Message,Size,Hashes,Cfg); | |
1505 | ||
1506 | switch(Stage) | |
1507 | { | |
1508 | case STAGE_DOWNLOAD: | |
1509 | StageDownloadDone(Message, Hashes, Cfg); | |
1510 | break; | |
1511 | case STAGE_DECOMPRESS_AND_VERIFY: | |
1512 | StageDecompressDone(Message, Hashes, Cfg); | |
1513 | break; | |
1514 | } | |
1515 | } | |
1516 | /*}}}*/ | |
1517 | // AcqIndex::StageDownloadDone - Queue for decompress and verify /*{{{*/ | |
1518 | void pkgAcqIndex::StageDownloadDone(string Message, | |
1519 | HashStringList const &Hashes, | |
1520 | pkgAcquire::MethodConfig *Cfg) | |
1521 | { | |
1522 | // First check if the calculcated Hash of the (compressed) downloaded | |
1523 | // file matches the hash we have in the MetaIndexRecords for this file | |
1524 | if(VerifyHashByMetaKey(Hashes) == false) | |
1525 | { | |
1526 | RenameOnError(HashSumMismatch); | |
1527 | Failed(Message, Cfg); | |
1528 | return; | |
1529 | } | |
1530 | ||
1531 | Complete = true; | |
1532 | ||
1533 | // Handle the unzipd case | |
1534 | string FileName = LookupTag(Message,"Alt-Filename"); | |
1535 | if (FileName.empty() == false) | |
1536 | { | |
1537 | Stage = STAGE_DECOMPRESS_AND_VERIFY; | |
1538 | Local = true; | |
1539 | DestFile += ".decomp"; | |
1540 | Desc.URI = "copy:" + FileName; | |
1541 | QueueURI(Desc); | |
1542 | SetActiveSubprocess("copy"); | |
1543 | return; | |
1544 | } | |
1545 | ||
1546 | FileName = LookupTag(Message,"Filename"); | |
1547 | if (FileName.empty() == true) | |
1548 | { | |
1549 | Status = StatError; | |
1550 | ErrorText = "Method gave a blank filename"; | |
1551 | } | |
1552 | ||
1553 | // Methods like e.g. "file:" will give us a (compressed) FileName that is | |
1554 | // not the "DestFile" we set, in this case we uncompress from the local file | |
1555 | if (FileName != DestFile) | |
1556 | Local = true; | |
1557 | else | |
1558 | EraseFileName = FileName; | |
1559 | ||
1560 | // we need to verify the file against the current Release file again | |
1561 | // on if-modfied-since hit to avoid a stale attack against us | |
1562 | if(StringToBool(LookupTag(Message,"IMS-Hit"),false) == true) | |
1563 | { | |
1564 | // The files timestamp matches, reverify by copy into partial/ | |
1565 | EraseFileName = ""; | |
1566 | ReverifyAfterIMS(); | |
1567 | return; | |
1568 | } | |
1569 | ||
1570 | // If we have compressed indexes enabled, queue for hash verification | |
1571 | if (_config->FindB("Acquire::GzipIndexes",false)) | |
1572 | { | |
1573 | DestFile = GetPartialFileNameFromURI(RealURI + '.' + CurrentCompressionExtension); | |
1574 | EraseFileName = ""; | |
1575 | Stage = STAGE_DECOMPRESS_AND_VERIFY; | |
1576 | Desc.URI = "copy:" + FileName; | |
1577 | QueueURI(Desc); | |
1578 | SetActiveSubprocess("copy"); | |
1579 | return; | |
1580 | } | |
1581 | ||
1582 | // get the binary name for your used compression type | |
1583 | string decompProg; | |
1584 | if(CurrentCompressionExtension == "uncompressed") | |
1585 | decompProg = "copy"; | |
1586 | else | |
1587 | decompProg = _config->Find(string("Acquire::CompressionTypes::").append(CurrentCompressionExtension),""); | |
1588 | if(decompProg.empty() == true) | |
1589 | { | |
1590 | _error->Error("Unsupported extension: %s", CurrentCompressionExtension.c_str()); | |
1591 | return; | |
1592 | } | |
1593 | ||
1594 | // queue uri for the next stage | |
1595 | Stage = STAGE_DECOMPRESS_AND_VERIFY; | |
1596 | DestFile += ".decomp"; | |
1597 | Desc.URI = decompProg + ":" + FileName; | |
1598 | QueueURI(Desc); | |
1599 | SetActiveSubprocess(decompProg); | |
1600 | } | |
1601 | /*}}}*/ | |
1602 | // pkgAcqIndex::StageDecompressDone - Final verification /*{{{*/ | |
1603 | void pkgAcqIndex::StageDecompressDone(string Message, | |
1604 | HashStringList const &Hashes, | |
1605 | pkgAcquire::MethodConfig *Cfg) | |
1606 | { | |
1607 | if (ExpectedHashes.usable() && ExpectedHashes != Hashes) | |
1608 | { | |
1609 | Desc.URI = RealURI; | |
1610 | RenameOnError(HashSumMismatch); | |
1611 | printHashSumComparision(RealURI, ExpectedHashes, Hashes); | |
1612 | Failed(Message, Cfg); | |
1613 | return; | |
1614 | } | |
1615 | ||
1616 | if(!ValidateFile(DestFile)) | |
1617 | { | |
1618 | RenameOnError(InvalidFormat); | |
1619 | Failed(Message, Cfg); | |
1620 | return; | |
1621 | } | |
1622 | ||
1623 | // Done, queue for rename on transaction finished | |
1624 | TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename()); | |
1625 | ||
1626 | return; | |
1627 | } | |
1628 | /*}}}*/ | |
1629 | // AcqMetaBase - Constructor /*{{{*/ | |
1630 | pkgAcqMetaBase::pkgAcqMetaBase(pkgAcquire *Owner, | |
1631 | const std::vector<IndexTarget*>* IndexTargets, | |
1632 | indexRecords* MetaIndexParser, | |
1633 | std::string const &RealURI, | |
1634 | HashStringList const &ExpectedHashes, | |
1635 | pkgAcqMetaBase *TransactionManager) | |
1636 | : Item(Owner, ExpectedHashes, TransactionManager), | |
1637 | MetaIndexParser(MetaIndexParser), IndexTargets(IndexTargets), | |
1638 | AuthPass(false), RealURI(RealURI), IMSHit(false) | |
1639 | { | |
1640 | } | |
1641 | /*}}}*/ | |
1642 | // AcqMetaBase::Add - Add a item to the current Transaction /*{{{*/ | |
1643 | void pkgAcqMetaBase::Add(Item *I) | |
1644 | { | |
1645 | Transaction.push_back(I); | |
1646 | } | |
1647 | /*}}}*/ | |
1648 | // AcqMetaBase::AbortTransaction - Abort the current Transaction /*{{{*/ | |
1649 | void pkgAcqMetaBase::AbortTransaction() | |
1650 | { | |
1651 | if(_config->FindB("Debug::Acquire::Transaction", false) == true) | |
1652 | std::clog << "AbortTransaction: " << TransactionManager << std::endl; | |
1653 | ||
1654 | // ensure the toplevel is in error state too | |
1655 | for (std::vector<Item*>::iterator I = Transaction.begin(); | |
1656 | I != Transaction.end(); ++I) | |
1657 | { | |
1658 | (*I)->TransactionState(TransactionAbort); | |
1659 | } | |
1660 | Transaction.clear(); | |
1661 | } | |
1662 | /*}}}*/ | |
1663 | // AcqMetaBase::TransactionHasError - Check for errors in Transaction /*{{{*/ | |
1664 | bool pkgAcqMetaBase::TransactionHasError() | |
1665 | { | |
1666 | for (pkgAcquire::ItemIterator I = Transaction.begin(); | |
1667 | I != Transaction.end(); ++I) | |
1668 | { | |
1669 | switch((*I)->Status) { | |
1670 | case StatDone: break; | |
1671 | case StatIdle: break; | |
1672 | case StatAuthError: return true; | |
1673 | case StatError: return true; | |
1674 | case StatTransientNetworkError: return true; | |
1675 | case StatFetching: break; | |
1676 | } | |
1677 | } | |
1678 | return false; | |
1679 | } | |
1680 | /*}}}*/ | |
1681 | // AcqMetaBase::CommitTransaction - Commit a transaction /*{{{*/ | |
1682 | void pkgAcqMetaBase::CommitTransaction() | |
1683 | { | |
1684 | if(_config->FindB("Debug::Acquire::Transaction", false) == true) | |
1685 | std::clog << "CommitTransaction: " << this << std::endl; | |
1686 | ||
1687 | // move new files into place *and* remove files that are not | |
1688 | // part of the transaction but are still on disk | |
1689 | for (std::vector<Item*>::iterator I = Transaction.begin(); | |
1690 | I != Transaction.end(); ++I) | |
1691 | { | |
1692 | (*I)->TransactionState(TransactionCommit); | |
1693 | } | |
1694 | Transaction.clear(); | |
1695 | } | |
1696 | /*}}}*/ | |
1697 | // AcqMetaBase::TransactionStageCopy - Stage a file for copying /*{{{*/ | |
1698 | void pkgAcqMetaBase::TransactionStageCopy(Item *I, | |
1699 | const std::string &From, | |
1700 | const std::string &To) | |
1701 | { | |
1702 | I->PartialFile = From; | |
1703 | I->DestFile = To; | |
1704 | } | |
1705 | /*}}}*/ | |
1706 | // AcqMetaBase::TransactionStageRemoval - Stage a file for removal /*{{{*/ | |
1707 | void pkgAcqMetaBase::TransactionStageRemoval(Item *I, | |
1708 | const std::string &FinalFile) | |
1709 | { | |
1710 | I->PartialFile = ""; | |
1711 | I->DestFile = FinalFile; | |
1712 | } | |
1713 | /*}}}*/ | |
1714 | // AcqMetaBase::GenerateAuthWarning - Check gpg authentication error /*{{{*/ | |
1715 | bool pkgAcqMetaBase::CheckStopAuthentication(const std::string &Message) | |
1716 | { | |
1717 | // FIXME: this entire function can do now that we disallow going to | |
1718 | // a unauthenticated state and can cleanly rollback | |
1719 | ||
1720 | string const Final = GetFinalFilename(); | |
1721 | if(FileExists(Final)) | |
1722 | { | |
1723 | Status = StatTransientNetworkError; | |
1724 | _error->Warning(_("An error occurred during the signature " | |
1725 | "verification. The repository is not updated " | |
1726 | "and the previous index files will be used. " | |
1727 | "GPG error: %s: %s\n"), | |
1728 | Desc.Description.c_str(), | |
1729 | LookupTag(Message,"Message").c_str()); | |
1730 | RunScripts("APT::Update::Auth-Failure"); | |
1731 | return true; | |
1732 | } else if (LookupTag(Message,"Message").find("NODATA") != string::npos) { | |
1733 | /* Invalid signature file, reject (LP: #346386) (Closes: #627642) */ | |
1734 | _error->Error(_("GPG error: %s: %s"), | |
1735 | Desc.Description.c_str(), | |
1736 | LookupTag(Message,"Message").c_str()); | |
1737 | Status = StatError; | |
1738 | return true; | |
1739 | } else { | |
1740 | _error->Warning(_("GPG error: %s: %s"), | |
1741 | Desc.Description.c_str(), | |
1742 | LookupTag(Message,"Message").c_str()); | |
1743 | } | |
1744 | // gpgv method failed | |
1745 | ReportMirrorFailure("GPGFailure"); | |
1746 | return false; | |
1747 | } | |
1748 | /*}}}*/ | |
1749 | // AcqMetaSig::AcqMetaSig - Constructor /*{{{*/ | |
1750 | pkgAcqMetaSig::pkgAcqMetaSig(pkgAcquire *Owner, | |
1751 | pkgAcqMetaBase *TransactionManager, | |
1752 | string URI,string URIDesc,string ShortDesc, | |
1753 | string MetaIndexFile, | |
1754 | const vector<IndexTarget*>* IndexTargets, | |
1755 | indexRecords* MetaIndexParser) : | |
1756 | pkgAcqMetaBase(Owner, IndexTargets, MetaIndexParser, URI, | |
1757 | HashStringList(), TransactionManager), | |
1758 | MetaIndexFile(MetaIndexFile), URIDesc(URIDesc), | |
1759 | ShortDesc(ShortDesc) | |
1760 | { | |
1761 | DestFile = GetPartialFileNameFromURI(RealURI); | |
1762 | ||
1763 | // remove any partial downloaded sig-file in partial/. | |
1764 | // it may confuse proxies and is too small to warrant a | |
1765 | // partial download anyway | |
1766 | unlink(DestFile.c_str()); | |
1767 | ||
1768 | // set the TransactionManager | |
1769 | if(_config->FindB("Debug::Acquire::Transaction", false) == true) | |
1770 | std::clog << "New pkgAcqMetaSig with TransactionManager " | |
1771 | << TransactionManager << std::endl; | |
1772 | ||
1773 | // Create the item | |
1774 | Desc.Description = URIDesc; | |
1775 | Desc.Owner = this; | |
1776 | Desc.ShortDesc = ShortDesc; | |
1777 | Desc.URI = URI; | |
1778 | ||
1779 | QueueURI(Desc); | |
1780 | } | |
1781 | /*}}}*/ | |
1782 | pkgAcqMetaSig::~pkgAcqMetaSig() /*{{{*/ | |
1783 | { | |
1784 | } | |
1785 | /*}}}*/ | |
1786 | // pkgAcqMetaSig::Done - The signature was downloaded/verified /*{{{*/ | |
1787 | // --------------------------------------------------------------------- | |
1788 | /* The only header we use is the last-modified header. */ | |
1789 | void pkgAcqMetaSig::Done(string Message,unsigned long long Size, | |
1790 | HashStringList const &Hashes, | |
1791 | pkgAcquire::MethodConfig *Cfg) | |
1792 | { | |
1793 | Item::Done(Message, Size, Hashes, Cfg); | |
1794 | ||
1795 | if(AuthPass == false) | |
1796 | { | |
1797 | if(CheckDownloadDone(Message) == true) | |
1798 | { | |
1799 | // destfile will be modified to point to MetaIndexFile for the | |
1800 | // gpgv method, so we need to save it here | |
1801 | MetaIndexFileSignature = DestFile; | |
1802 | QueueForSignatureVerify(MetaIndexFile, MetaIndexFileSignature); | |
1803 | } | |
1804 | return; | |
1805 | } | |
1806 | else if(CheckAuthDone(Message) == true) | |
1807 | TransactionManager->TransactionStageCopy(this, MetaIndexFileSignature, GetFinalFilename()); | |
1808 | } | |
1809 | /*}}}*/ | |
1810 | void pkgAcqMetaSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf)/*{{{*/ | |
1811 | { | |
1812 | Item::Failed(Message,Cnf); | |
1813 | ||
1814 | // check if we need to fail at this point | |
1815 | if (AuthPass == true && CheckStopAuthentication(Message)) | |
1816 | return; | |
1817 | ||
1818 | // FIXME: meh, this is not really elegant | |
1819 | string const Final = GetFinalFileNameFromURI(RealURI); | |
1820 | string const InReleaseURI = RealURI.replace(RealURI.rfind("Release.gpg"), 12, | |
1821 | "InRelease"); | |
1822 | string const FinalInRelease = GetFinalFileNameFromURI(InReleaseURI); | |
1823 | ||
1824 | if (RealFileExists(Final) || RealFileExists(FinalInRelease)) | |
1825 | { | |
1826 | std::string downgrade_msg; | |
1827 | strprintf(downgrade_msg, _("The repository '%s' is no longer signed."), | |
1828 | URIDesc.c_str()); | |
1829 | if(_config->FindB("Acquire::AllowDowngradeToInsecureRepositories")) | |
1830 | { | |
1831 | // meh, the users wants to take risks (we still mark the packages | |
1832 | // from this repository as unauthenticated) | |
1833 | _error->Warning("%s", downgrade_msg.c_str()); | |
1834 | _error->Warning(_("This is normally not allowed, but the option " | |
1835 | "Acquire::AllowDowngradeToInsecureRepositories was " | |
1836 | "given to override it.")); | |
1837 | Status = StatDone; | |
1838 | } else { | |
1839 | _error->Error("%s", downgrade_msg.c_str()); | |
1840 | Rename(MetaIndexFile, MetaIndexFile+".FAILED"); | |
1841 | Item::Failed("Message: " + downgrade_msg, Cnf); | |
1842 | TransactionManager->AbortTransaction(); | |
1843 | return; | |
1844 | } | |
1845 | } | |
1846 | else | |
1847 | _error->Warning(_("The data from '%s' is not signed. Packages " | |
1848 | "from that repository can not be authenticated."), | |
1849 | URIDesc.c_str()); | |
1850 | ||
1851 | // this ensures that any file in the lists/ dir is removed by the | |
1852 | // transaction | |
1853 | DestFile = GetPartialFileNameFromURI(RealURI); | |
1854 | TransactionManager->TransactionStageRemoval(this, DestFile); | |
1855 | ||
1856 | // only allow going further if the users explicitely wants it | |
1857 | if(AllowInsecureRepositories(MetaIndexParser, TransactionManager, this) == true) | |
1858 | { | |
1859 | // we parse the indexes here because at this point the user wanted | |
1860 | // a repository that may potentially harm him | |
1861 | MetaIndexParser->Load(MetaIndexFile); | |
1862 | if (!VerifyVendor(Message)) | |
1863 | /* expired Release files are still a problem you need extra force for */; | |
1864 | else | |
1865 | QueueIndexes(true); | |
1866 | } | |
1867 | ||
1868 | // FIXME: this is used often (e.g. in pkgAcqIndexTrans) so refactor | |
1869 | if (Cnf->LocalOnly == true || | |
1870 | StringToBool(LookupTag(Message,"Transient-Failure"),false) == false) | |
1871 | { | |
1872 | // Ignore this | |
1873 | Status = StatDone; | |
1874 | } | |
1875 | } | |
1876 | /*}}}*/ | |
1877 | pkgAcqMetaIndex::pkgAcqMetaIndex(pkgAcquire *Owner, /*{{{*/ | |
1878 | pkgAcqMetaBase *TransactionManager, | |
1879 | string URI,string URIDesc,string ShortDesc, | |
1880 | string MetaIndexSigURI,string MetaIndexSigURIDesc, string MetaIndexSigShortDesc, | |
1881 | const vector<IndexTarget*>* IndexTargets, | |
1882 | indexRecords* MetaIndexParser) : | |
1883 | pkgAcqMetaBase(Owner, IndexTargets, MetaIndexParser, URI, HashStringList(), | |
1884 | TransactionManager), | |
1885 | URIDesc(URIDesc), ShortDesc(ShortDesc), | |
1886 | MetaIndexSigURI(MetaIndexSigURI), MetaIndexSigURIDesc(MetaIndexSigURIDesc), | |
1887 | MetaIndexSigShortDesc(MetaIndexSigShortDesc) | |
1888 | { | |
1889 | if(TransactionManager == NULL) | |
1890 | { | |
1891 | this->TransactionManager = this; | |
1892 | this->TransactionManager->Add(this); | |
1893 | } | |
1894 | ||
1895 | if(_config->FindB("Debug::Acquire::Transaction", false) == true) | |
1896 | std::clog << "New pkgAcqMetaIndex with TransactionManager " | |
1897 | << this->TransactionManager << std::endl; | |
1898 | ||
1899 | ||
1900 | Init(URIDesc, ShortDesc); | |
1901 | } | |
1902 | /*}}}*/ | |
1903 | // pkgAcqMetaIndex::Init - Delayed constructor /*{{{*/ | |
1904 | void pkgAcqMetaIndex::Init(std::string URIDesc, std::string ShortDesc) | |
1905 | { | |
1906 | DestFile = GetPartialFileNameFromURI(RealURI); | |
1907 | ||
1908 | // Create the item | |
1909 | Desc.Description = URIDesc; | |
1910 | Desc.Owner = this; | |
1911 | Desc.ShortDesc = ShortDesc; | |
1912 | Desc.URI = RealURI; | |
1913 | ||
1914 | // we expect more item | |
1915 | ExpectedAdditionalItems = IndexTargets->size(); | |
1916 | QueueURI(Desc); | |
1917 | } | |
1918 | /*}}}*/ | |
1919 | void pkgAcqMetaIndex::Done(string Message,unsigned long long Size, /*{{{*/ | |
1920 | HashStringList const &Hashes, | |
1921 | pkgAcquire::MethodConfig *Cfg) | |
1922 | { | |
1923 | Item::Done(Message,Size,Hashes,Cfg); | |
1924 | ||
1925 | if(CheckDownloadDone(Message)) | |
1926 | { | |
1927 | // we have a Release file, now download the Signature, all further | |
1928 | // verify/queue for additional downloads will be done in the | |
1929 | // pkgAcqMetaSig::Done() code | |
1930 | std::string MetaIndexFile = DestFile; | |
1931 | new pkgAcqMetaSig(Owner, TransactionManager, | |
1932 | MetaIndexSigURI, MetaIndexSigURIDesc, | |
1933 | MetaIndexSigShortDesc, MetaIndexFile, IndexTargets, | |
1934 | MetaIndexParser); | |
1935 | ||
1936 | TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename()); | |
1937 | } | |
1938 | } | |
1939 | /*}}}*/ | |
1940 | bool pkgAcqMetaBase::CheckAuthDone(string Message) /*{{{*/ | |
1941 | { | |
1942 | // At this point, the gpgv method has succeeded, so there is a | |
1943 | // valid signature from a key in the trusted keyring. We | |
1944 | // perform additional verification of its contents, and use them | |
1945 | // to verify the indexes we are about to download | |
1946 | ||
1947 | if (!MetaIndexParser->Load(DestFile)) | |
1948 | { | |
1949 | Status = StatAuthError; | |
1950 | ErrorText = MetaIndexParser->ErrorText; | |
1951 | return false; | |
1952 | } | |
1953 | ||
1954 | if (!VerifyVendor(Message)) | |
1955 | { | |
1956 | Status = StatAuthError; | |
1957 | return false; | |
1958 | } | |
1959 | ||
1960 | if (_config->FindB("Debug::pkgAcquire::Auth", false)) | |
1961 | std::cerr << "Signature verification succeeded: " | |
1962 | << DestFile << std::endl; | |
1963 | ||
1964 | // Download further indexes with verification | |
1965 | QueueIndexes(true); | |
1966 | ||
1967 | return true; | |
1968 | } | |
1969 | /*}}}*/ | |
1970 | // pkgAcqMetaBase::Custom600Headers - Get header for AcqMetaBase /*{{{*/ | |
1971 | // --------------------------------------------------------------------- | |
1972 | #if APT_PKG_ABI >= 413 | |
1973 | string pkgAcqMetaBase::Custom600Headers() const | |
1974 | #else | |
1975 | string pkgAcqMetaBase::Custom600Headers() | |
1976 | #endif | |
1977 | { | |
1978 | std::string Header = "\nIndex-File: true"; | |
1979 | std::string MaximumSize; | |
1980 | strprintf(MaximumSize, "\nMaximum-Size: %i", | |
1981 | _config->FindI("Acquire::MaxReleaseFileSize", 10*1000*1000)); | |
1982 | Header += MaximumSize; | |
1983 | ||
1984 | string const FinalFile = GetFinalFilename(); | |
1985 | ||
1986 | struct stat Buf; | |
1987 | if (stat(FinalFile.c_str(),&Buf) == 0) | |
1988 | Header += "\nLast-Modified: " + TimeRFC1123(Buf.st_mtime); | |
1989 | ||
1990 | return Header; | |
1991 | } | |
1992 | /*}}}*/ | |
1993 | // pkgAcqMetaBase::GetFinalFilename - Return the full final file path /*{{{*/ | |
1994 | std::string pkgAcqMetaBase::GetFinalFilename() const | |
1995 | { | |
1996 | return GetFinalFileNameFromURI(RealURI); | |
1997 | } | |
1998 | /*}}}*/ | |
1999 | // pkgAcqMetaBase::QueueForSignatureVerify /*{{{*/ | |
2000 | void pkgAcqMetaBase::QueueForSignatureVerify(const std::string &MetaIndexFile, | |
2001 | const std::string &MetaIndexFileSignature) | |
2002 | { | |
2003 | AuthPass = true; | |
2004 | Desc.URI = "gpgv:" + MetaIndexFileSignature; | |
2005 | DestFile = MetaIndexFile; | |
2006 | QueueURI(Desc); | |
2007 | SetActiveSubprocess("gpgv"); | |
2008 | } | |
2009 | /*}}}*/ | |
2010 | // pkgAcqMetaBase::CheckDownloadDone /*{{{*/ | |
2011 | bool pkgAcqMetaBase::CheckDownloadDone(const std::string &Message) | |
2012 | { | |
2013 | // We have just finished downloading a Release file (it is not | |
2014 | // verified yet) | |
2015 | ||
2016 | string FileName = LookupTag(Message,"Filename"); | |
2017 | if (FileName.empty() == true) | |
2018 | { | |
2019 | Status = StatError; | |
2020 | ErrorText = "Method gave a blank filename"; | |
2021 | return false; | |
2022 | } | |
2023 | ||
2024 | if (FileName != DestFile) | |
2025 | { | |
2026 | Local = true; | |
2027 | Desc.URI = "copy:" + FileName; | |
2028 | QueueURI(Desc); | |
2029 | return false; | |
2030 | } | |
2031 | ||
2032 | // make sure to verify against the right file on I-M-S hit | |
2033 | IMSHit = StringToBool(LookupTag(Message,"IMS-Hit"),false); | |
2034 | if(IMSHit) | |
2035 | { | |
2036 | // for simplicity, the transaction manager is always InRelease | |
2037 | // even if it doesn't exist. | |
2038 | if (TransactionManager != NULL) | |
2039 | TransactionManager->IMSHit = true; | |
2040 | DestFile = GetFinalFilename(); | |
2041 | } | |
2042 | ||
2043 | // set Item to complete as the remaining work is all local (verify etc) | |
2044 | Complete = true; | |
2045 | ||
2046 | return true; | |
2047 | } | |
2048 | /*}}}*/ | |
2049 | void pkgAcqMetaBase::QueueIndexes(bool verify) /*{{{*/ | |
2050 | { | |
2051 | // at this point the real Items are loaded in the fetcher | |
2052 | ExpectedAdditionalItems = 0; | |
2053 | ||
2054 | vector <struct IndexTarget*>::const_iterator Target; | |
2055 | for (Target = IndexTargets->begin(); | |
2056 | Target != IndexTargets->end(); | |
2057 | ++Target) | |
2058 | { | |
2059 | HashStringList ExpectedIndexHashes; | |
2060 | const indexRecords::checkSum *Record = MetaIndexParser->Lookup((*Target)->MetaKey); | |
2061 | ||
2062 | // optional target that we do not have in the Release file are | |
2063 | // skipped | |
2064 | if (verify == true && Record == NULL && (*Target)->IsOptional()) | |
2065 | continue; | |
2066 | ||
2067 | // targets without a hash record are a error when verify is required | |
2068 | if (verify == true && Record == NULL) | |
2069 | { | |
2070 | Status = StatAuthError; | |
2071 | strprintf(ErrorText, _("Unable to find expected entry '%s' in Release file (Wrong sources.list entry or malformed file)"), (*Target)->MetaKey.c_str()); | |
2072 | return; | |
2073 | } | |
2074 | ||
2075 | if (Record) | |
2076 | ExpectedIndexHashes = Record->Hashes; | |
2077 | ||
2078 | if (_config->FindB("Debug::pkgAcquire::Auth", false)) | |
2079 | { | |
2080 | std::cerr << "Queueing: " << (*Target)->URI << std::endl | |
2081 | << "Expected Hash:" << std::endl; | |
2082 | for (HashStringList::const_iterator hs = ExpectedIndexHashes.begin(); hs != ExpectedIndexHashes.end(); ++hs) | |
2083 | std::cerr << "\t- " << hs->toStr() << std::endl; | |
2084 | std::cerr << "For: " << ((Record == NULL) ? "<NULL>" : Record->MetaKeyFilename) << std::endl; | |
2085 | ||
2086 | } | |
2087 | if (verify == true && ExpectedIndexHashes.empty() == true) | |
2088 | { | |
2089 | Status = StatAuthError; | |
2090 | strprintf(ErrorText, _("Unable to find hash sum for '%s' in Release file"), (*Target)->MetaKey.c_str()); | |
2091 | return; | |
2092 | } | |
2093 | ||
2094 | /* Queue the Index file (Packages, Sources, Translation-$foo | |
2095 | (either diff or full packages files, depending | |
2096 | on the users option) - we also check if the PDiff Index file is listed | |
2097 | in the Meta-Index file. Ideal would be if pkgAcqDiffIndex would test this | |
2098 | instead, but passing the required info to it is to much hassle */ | |
2099 | if(_config->FindB("Acquire::PDiffs",true) == true && (verify == false || | |
2100 | MetaIndexParser->Exists((*Target)->MetaKey + ".diff/Index") == true)) | |
2101 | new pkgAcqDiffIndex(Owner, TransactionManager, *Target, ExpectedIndexHashes, MetaIndexParser); | |
2102 | else | |
2103 | new pkgAcqIndex(Owner, TransactionManager, *Target, ExpectedIndexHashes, MetaIndexParser); | |
2104 | } | |
2105 | } | |
2106 | /*}}}*/ | |
2107 | bool pkgAcqMetaBase::VerifyVendor(string Message) /*{{{*/ | |
2108 | { | |
2109 | string::size_type pos; | |
2110 | ||
2111 | // check for missing sigs (that where not fatal because otherwise we had | |
2112 | // bombed earlier) | |
2113 | string missingkeys; | |
2114 | string msg = _("There is no public key available for the " | |
2115 | "following key IDs:\n"); | |
2116 | pos = Message.find("NO_PUBKEY "); | |
2117 | if (pos != std::string::npos) | |
2118 | { | |
2119 | string::size_type start = pos+strlen("NO_PUBKEY "); | |
2120 | string Fingerprint = Message.substr(start, Message.find("\n")-start); | |
2121 | missingkeys += (Fingerprint); | |
2122 | } | |
2123 | if(!missingkeys.empty()) | |
2124 | _error->Warning("%s", (msg + missingkeys).c_str()); | |
2125 | ||
2126 | string Transformed = MetaIndexParser->GetExpectedDist(); | |
2127 | ||
2128 | if (Transformed == "../project/experimental") | |
2129 | { | |
2130 | Transformed = "experimental"; | |
2131 | } | |
2132 | ||
2133 | pos = Transformed.rfind('/'); | |
2134 | if (pos != string::npos) | |
2135 | { | |
2136 | Transformed = Transformed.substr(0, pos); | |
2137 | } | |
2138 | ||
2139 | if (Transformed == ".") | |
2140 | { | |
2141 | Transformed = ""; | |
2142 | } | |
2143 | ||
2144 | if (_config->FindB("Acquire::Check-Valid-Until", true) == true && | |
2145 | MetaIndexParser->GetValidUntil() > 0) { | |
2146 | time_t const invalid_since = time(NULL) - MetaIndexParser->GetValidUntil(); | |
2147 | if (invalid_since > 0) | |
2148 | { | |
2149 | std::string errmsg; | |
2150 | strprintf(errmsg, | |
2151 | // TRANSLATOR: The first %s is the URL of the bad Release file, the second is | |
2152 | // the time since then the file is invalid - formated in the same way as in | |
2153 | // the download progress display (e.g. 7d 3h 42min 1s) | |
2154 | _("Release file for %s is expired (invalid since %s). " | |
2155 | "Updates for this repository will not be applied."), | |
2156 | RealURI.c_str(), TimeToStr(invalid_since).c_str()); | |
2157 | if (ErrorText.empty()) | |
2158 | ErrorText = errmsg; | |
2159 | return _error->Error("%s", errmsg.c_str()); | |
2160 | } | |
2161 | } | |
2162 | ||
2163 | if (_config->FindB("Debug::pkgAcquire::Auth", false)) | |
2164 | { | |
2165 | std::cerr << "Got Codename: " << MetaIndexParser->GetDist() << std::endl; | |
2166 | std::cerr << "Expecting Dist: " << MetaIndexParser->GetExpectedDist() << std::endl; | |
2167 | std::cerr << "Transformed Dist: " << Transformed << std::endl; | |
2168 | } | |
2169 | ||
2170 | if (MetaIndexParser->CheckDist(Transformed) == false) | |
2171 | { | |
2172 | // This might become fatal one day | |
2173 | // Status = StatAuthError; | |
2174 | // ErrorText = "Conflicting distribution; expected " | |
2175 | // + MetaIndexParser->GetExpectedDist() + " but got " | |
2176 | // + MetaIndexParser->GetDist(); | |
2177 | // return false; | |
2178 | if (!Transformed.empty()) | |
2179 | { | |
2180 | _error->Warning(_("Conflicting distribution: %s (expected %s but got %s)"), | |
2181 | Desc.Description.c_str(), | |
2182 | Transformed.c_str(), | |
2183 | MetaIndexParser->GetDist().c_str()); | |
2184 | } | |
2185 | } | |
2186 | ||
2187 | return true; | |
2188 | } | |
2189 | /*}}}*/ | |
2190 | // pkgAcqMetaIndex::Failed - no Release file present /*{{{*/ | |
2191 | void pkgAcqMetaIndex::Failed(string Message, | |
2192 | pkgAcquire::MethodConfig * Cnf) | |
2193 | { | |
2194 | pkgAcquire::Item::Failed(Message, Cnf); | |
2195 | Status = StatDone; | |
2196 | ||
2197 | _error->Warning(_("The repository '%s' does not have a Release file. " | |
2198 | "This is deprecated, please contact the owner of the " | |
2199 | "repository."), URIDesc.c_str()); | |
2200 | ||
2201 | // No Release file was present so fall | |
2202 | // back to queueing Packages files without verification | |
2203 | // only allow going further if the users explicitely wants it | |
2204 | if(AllowInsecureRepositories(MetaIndexParser, TransactionManager, this) == true) | |
2205 | { | |
2206 | // Done, queue for rename on transaction finished | |
2207 | if (FileExists(DestFile)) | |
2208 | TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename()); | |
2209 | ||
2210 | // queue without any kind of hashsum support | |
2211 | QueueIndexes(false); | |
2212 | } | |
2213 | } | |
2214 | /*}}}*/ | |
2215 | void pkgAcqMetaIndex::Finished() /*{{{*/ | |
2216 | { | |
2217 | if(_config->FindB("Debug::Acquire::Transaction", false) == true) | |
2218 | std::clog << "Finished: " << DestFile <<std::endl; | |
2219 | if(TransactionManager != NULL && | |
2220 | TransactionManager->TransactionHasError() == false) | |
2221 | TransactionManager->CommitTransaction(); | |
2222 | } | |
2223 | /*}}}*/ | |
2224 | pkgAcqMetaClearSig::pkgAcqMetaClearSig(pkgAcquire *Owner, /*{{{*/ | |
2225 | string const &URI, string const &URIDesc, string const &ShortDesc, | |
2226 | string const &MetaIndexURI, string const &MetaIndexURIDesc, string const &MetaIndexShortDesc, | |
2227 | string const &MetaSigURI, string const &MetaSigURIDesc, string const &MetaSigShortDesc, | |
2228 | const vector<IndexTarget*>* IndexTargets, | |
2229 | indexRecords* MetaIndexParser) : | |
2230 | pkgAcqMetaIndex(Owner, NULL, URI, URIDesc, ShortDesc, MetaSigURI, MetaSigURIDesc,MetaSigShortDesc, IndexTargets, MetaIndexParser), | |
2231 | MetaIndexURI(MetaIndexURI), MetaIndexURIDesc(MetaIndexURIDesc), MetaIndexShortDesc(MetaIndexShortDesc), | |
2232 | MetaSigURI(MetaSigURI), MetaSigURIDesc(MetaSigURIDesc), MetaSigShortDesc(MetaSigShortDesc) | |
2233 | { | |
2234 | // index targets + (worst case:) Release/Release.gpg | |
2235 | ExpectedAdditionalItems = IndexTargets->size() + 2; | |
2236 | ||
2237 | } | |
2238 | /*}}}*/ | |
2239 | pkgAcqMetaClearSig::~pkgAcqMetaClearSig() /*{{{*/ | |
2240 | { | |
2241 | } | |
2242 | /*}}}*/ | |
2243 | // pkgAcqMetaClearSig::Custom600Headers - Insert custom request headers /*{{{*/ | |
2244 | #if APT_PKG_ABI >= 413 | |
2245 | string pkgAcqMetaClearSig::Custom600Headers() const | |
2246 | #else | |
2247 | string pkgAcqMetaClearSig::Custom600Headers() | |
2248 | #endif | |
2249 | { | |
2250 | string Header = pkgAcqMetaBase::Custom600Headers(); | |
2251 | Header += "\nFail-Ignore: true"; | |
2252 | return Header; | |
2253 | } | |
2254 | /*}}}*/ | |
2255 | // pkgAcqMetaClearSig::Done - We got a file /*{{{*/ | |
2256 | // --------------------------------------------------------------------- | |
2257 | void pkgAcqMetaClearSig::Done(std::string Message,unsigned long long Size, | |
2258 | HashStringList const &Hashes, | |
2259 | pkgAcquire::MethodConfig *Cnf) | |
2260 | { | |
2261 | Item::Done(Message, Size, Hashes, Cnf); | |
2262 | ||
2263 | // if we expect a ClearTextSignature (InRelase), ensure that | |
2264 | // this is what we get and if not fail to queue a | |
2265 | // Release/Release.gpg, see #346386 | |
2266 | if (FileExists(DestFile) && !StartsWithGPGClearTextSignature(DestFile)) | |
2267 | { | |
2268 | pkgAcquire::Item::Failed(Message, Cnf); | |
2269 | RenameOnError(NotClearsigned); | |
2270 | TransactionManager->AbortTransaction(); | |
2271 | return; | |
2272 | } | |
2273 | ||
2274 | if(AuthPass == false) | |
2275 | { | |
2276 | if(CheckDownloadDone(Message) == true) | |
2277 | QueueForSignatureVerify(DestFile, DestFile); | |
2278 | return; | |
2279 | } | |
2280 | else if(CheckAuthDone(Message) == true) | |
2281 | TransactionManager->TransactionStageCopy(this, DestFile, GetFinalFilename()); | |
2282 | } | |
2283 | /*}}}*/ | |
2284 | void pkgAcqMetaClearSig::Failed(string Message,pkgAcquire::MethodConfig *Cnf) /*{{{*/ | |
2285 | { | |
2286 | Item::Failed(Message, Cnf); | |
2287 | ||
2288 | // we failed, we will not get additional items from this method | |
2289 | ExpectedAdditionalItems = 0; | |
2290 | ||
2291 | if (AuthPass == false) | |
2292 | { | |
2293 | // Queue the 'old' InRelease file for removal if we try Release.gpg | |
2294 | // as otherwise the file will stay around and gives a false-auth | |
2295 | // impression (CVE-2012-0214) | |
2296 | TransactionManager->TransactionStageRemoval(this, GetFinalFilename()); | |
2297 | Status = StatDone; | |
2298 | ||
2299 | new pkgAcqMetaIndex(Owner, TransactionManager, | |
2300 | MetaIndexURI, MetaIndexURIDesc, MetaIndexShortDesc, | |
2301 | MetaSigURI, MetaSigURIDesc, MetaSigShortDesc, | |
2302 | IndexTargets, MetaIndexParser); | |
2303 | } | |
2304 | else | |
2305 | { | |
2306 | if(CheckStopAuthentication(Message)) | |
2307 | return; | |
2308 | ||
2309 | _error->Warning(_("The data from '%s' is not signed. Packages " | |
2310 | "from that repository can not be authenticated."), | |
2311 | URIDesc.c_str()); | |
2312 | ||
2313 | // No Release file was present, or verification failed, so fall | |
2314 | // back to queueing Packages files without verification | |
2315 | // only allow going further if the users explicitely wants it | |
2316 | if(AllowInsecureRepositories(MetaIndexParser, TransactionManager, this) == true) | |
2317 | { | |
2318 | Status = StatDone; | |
2319 | ||
2320 | /* Always move the meta index, even if gpgv failed. This ensures | |
2321 | * that PackageFile objects are correctly filled in */ | |
2322 | if (FileExists(DestFile)) | |
2323 | { | |
2324 | string FinalFile = GetFinalFilename(); | |
2325 | /* InRelease files become Release files, otherwise | |
2326 | * they would be considered as trusted later on */ | |
2327 | RealURI = RealURI.replace(RealURI.rfind("InRelease"), 9, | |
2328 | "Release"); | |
2329 | FinalFile = FinalFile.replace(FinalFile.rfind("InRelease"), 9, | |
2330 | "Release"); | |
2331 | ||
2332 | // Done, queue for rename on transaction finished | |
2333 | TransactionManager->TransactionStageCopy(this, DestFile, FinalFile); | |
2334 | } | |
2335 | QueueIndexes(false); | |
2336 | } | |
2337 | } | |
2338 | } | |
2339 | /*}}}*/ | |
2340 | // AcqArchive::AcqArchive - Constructor /*{{{*/ | |
2341 | // --------------------------------------------------------------------- | |
2342 | /* This just sets up the initial fetch environment and queues the first | |
2343 | possibilitiy */ | |
2344 | pkgAcqArchive::pkgAcqArchive(pkgAcquire *Owner,pkgSourceList *Sources, | |
2345 | pkgRecords *Recs,pkgCache::VerIterator const &Version, | |
2346 | string &StoreFilename) : | |
2347 | Item(Owner, HashStringList()), Version(Version), Sources(Sources), Recs(Recs), | |
2348 | StoreFilename(StoreFilename), Vf(Version.FileList()), | |
2349 | Trusted(false) | |
2350 | { | |
2351 | Retries = _config->FindI("Acquire::Retries",0); | |
2352 | ||
2353 | if (Version.Arch() == 0) | |
2354 | { | |
2355 | _error->Error(_("I wasn't able to locate a file for the %s package. " | |
2356 | "This might mean you need to manually fix this package. " | |
2357 | "(due to missing arch)"), | |
2358 | Version.ParentPkg().FullName().c_str()); | |
2359 | return; | |
2360 | } | |
2361 | ||
2362 | /* We need to find a filename to determine the extension. We make the | |
2363 | assumption here that all the available sources for this version share | |
2364 | the same extension.. */ | |
2365 | // Skip not source sources, they do not have file fields. | |
2366 | for (; Vf.end() == false; ++Vf) | |
2367 | { | |
2368 | if ((Vf.File()->Flags & pkgCache::Flag::NotSource) != 0) | |
2369 | continue; | |
2370 | break; | |
2371 | } | |
2372 | ||
2373 | // Does not really matter here.. we are going to fail out below | |
2374 | if (Vf.end() != true) | |
2375 | { | |
2376 | // If this fails to get a file name we will bomb out below. | |
2377 | pkgRecords::Parser &Parse = Recs->Lookup(Vf); | |
2378 | if (_error->PendingError() == true) | |
2379 | return; | |
2380 | ||
2381 | // Generate the final file name as: package_version_arch.foo | |
2382 | StoreFilename = QuoteString(Version.ParentPkg().Name(),"_:") + '_' + | |
2383 | QuoteString(Version.VerStr(),"_:") + '_' + | |
2384 | QuoteString(Version.Arch(),"_:.") + | |
2385 | "." + flExtension(Parse.FileName()); | |
2386 | } | |
2387 | ||
2388 | // check if we have one trusted source for the package. if so, switch | |
2389 | // to "TrustedOnly" mode - but only if not in AllowUnauthenticated mode | |
2390 | bool const allowUnauth = _config->FindB("APT::Get::AllowUnauthenticated", false); | |
2391 | bool const debugAuth = _config->FindB("Debug::pkgAcquire::Auth", false); | |
2392 | bool seenUntrusted = false; | |
2393 | for (pkgCache::VerFileIterator i = Version.FileList(); i.end() == false; ++i) | |
2394 | { | |
2395 | pkgIndexFile *Index; | |
2396 | if (Sources->FindIndex(i.File(),Index) == false) | |
2397 | continue; | |
2398 | ||
2399 | if (debugAuth == true) | |
2400 | std::cerr << "Checking index: " << Index->Describe() | |
2401 | << "(Trusted=" << Index->IsTrusted() << ")" << std::endl; | |
2402 | ||
2403 | if (Index->IsTrusted() == true) | |
2404 | { | |
2405 | Trusted = true; | |
2406 | if (allowUnauth == false) | |
2407 | break; | |
2408 | } | |
2409 | else | |
2410 | seenUntrusted = true; | |
2411 | } | |
2412 | ||
2413 | // "allow-unauthenticated" restores apts old fetching behaviour | |
2414 | // that means that e.g. unauthenticated file:// uris are higher | |
2415 | // priority than authenticated http:// uris | |
2416 | if (allowUnauth == true && seenUntrusted == true) | |
2417 | Trusted = false; | |
2418 | ||
2419 | // Select a source | |
2420 | if (QueueNext() == false && _error->PendingError() == false) | |
2421 | _error->Error(_("Can't find a source to download version '%s' of '%s'"), | |
2422 | Version.VerStr(), Version.ParentPkg().FullName(false).c_str()); | |
2423 | } | |
2424 | /*}}}*/ | |
2425 | // AcqArchive::QueueNext - Queue the next file source /*{{{*/ | |
2426 | // --------------------------------------------------------------------- | |
2427 | /* This queues the next available file version for download. It checks if | |
2428 | the archive is already available in the cache and stashs the MD5 for | |
2429 | checking later. */ | |
2430 | bool pkgAcqArchive::QueueNext() | |
2431 | { | |
2432 | for (; Vf.end() == false; ++Vf) | |
2433 | { | |
2434 | // Ignore not source sources | |
2435 | if ((Vf.File()->Flags & pkgCache::Flag::NotSource) != 0) | |
2436 | continue; | |
2437 | ||
2438 | // Try to cross match against the source list | |
2439 | pkgIndexFile *Index; | |
2440 | if (Sources->FindIndex(Vf.File(),Index) == false) | |
2441 | continue; | |
2442 | ||
2443 | // only try to get a trusted package from another source if that source | |
2444 | // is also trusted | |
2445 | if(Trusted && !Index->IsTrusted()) | |
2446 | continue; | |
2447 | ||
2448 | // Grab the text package record | |
2449 | pkgRecords::Parser &Parse = Recs->Lookup(Vf); | |
2450 | if (_error->PendingError() == true) | |
2451 | return false; | |
2452 | ||
2453 | string PkgFile = Parse.FileName(); | |
2454 | ExpectedHashes = Parse.Hashes(); | |
2455 | ||
2456 | if (PkgFile.empty() == true) | |
2457 | return _error->Error(_("The package index files are corrupted. No Filename: " | |
2458 | "field for package %s."), | |
2459 | Version.ParentPkg().Name()); | |
2460 | ||
2461 | Desc.URI = Index->ArchiveURI(PkgFile); | |
2462 | Desc.Description = Index->ArchiveInfo(Version); | |
2463 | Desc.Owner = this; | |
2464 | Desc.ShortDesc = Version.ParentPkg().FullName(true); | |
2465 | ||
2466 | // See if we already have the file. (Legacy filenames) | |
2467 | FileSize = Version->Size; | |
2468 | string FinalFile = _config->FindDir("Dir::Cache::Archives") + flNotDir(PkgFile); | |
2469 | struct stat Buf; | |
2470 | if (stat(FinalFile.c_str(),&Buf) == 0) | |
2471 | { | |
2472 | // Make sure the size matches | |
2473 | if ((unsigned long long)Buf.st_size == Version->Size) | |
2474 | { | |
2475 | Complete = true; | |
2476 | Local = true; | |
2477 | Status = StatDone; | |
2478 | StoreFilename = DestFile = FinalFile; | |
2479 | return true; | |
2480 | } | |
2481 | ||
2482 | /* Hmm, we have a file and its size does not match, this means it is | |
2483 | an old style mismatched arch */ | |
2484 | unlink(FinalFile.c_str()); | |
2485 | } | |
2486 | ||
2487 | // Check it again using the new style output filenames | |
2488 | FinalFile = _config->FindDir("Dir::Cache::Archives") + flNotDir(StoreFilename); | |
2489 | if (stat(FinalFile.c_str(),&Buf) == 0) | |
2490 | { | |
2491 | // Make sure the size matches | |
2492 | if ((unsigned long long)Buf.st_size == Version->Size) | |
2493 | { | |
2494 | Complete = true; | |
2495 | Local = true; | |
2496 | Status = StatDone; | |
2497 | StoreFilename = DestFile = FinalFile; | |
2498 | return true; | |
2499 | } | |
2500 | ||
2501 | /* Hmm, we have a file and its size does not match, this shouldn't | |
2502 | happen.. */ | |
2503 | unlink(FinalFile.c_str()); | |
2504 | } | |
2505 | ||
2506 | DestFile = _config->FindDir("Dir::Cache::Archives") + "partial/" + flNotDir(StoreFilename); | |
2507 | ||
2508 | // Check the destination file | |
2509 | if (stat(DestFile.c_str(),&Buf) == 0) | |
2510 | { | |
2511 | // Hmm, the partial file is too big, erase it | |
2512 | if ((unsigned long long)Buf.st_size > Version->Size) | |
2513 | unlink(DestFile.c_str()); | |
2514 | else | |
2515 | PartialSize = Buf.st_size; | |
2516 | } | |
2517 | ||
2518 | // Disables download of archives - useful if no real installation follows, | |
2519 | // e.g. if we are just interested in proposed installation order | |
2520 | if (_config->FindB("Debug::pkgAcqArchive::NoQueue", false) == true) | |
2521 | { | |
2522 | Complete = true; | |
2523 | Local = true; | |
2524 | Status = StatDone; | |
2525 | StoreFilename = DestFile = FinalFile; | |
2526 | return true; | |
2527 | } | |
2528 | ||
2529 | // Create the item | |
2530 | Local = false; | |
2531 | QueueURI(Desc); | |
2532 | ||
2533 | ++Vf; | |
2534 | return true; | |
2535 | } | |
2536 | return false; | |
2537 | } | |
2538 | /*}}}*/ | |
2539 | // AcqArchive::Done - Finished fetching /*{{{*/ | |
2540 | // --------------------------------------------------------------------- | |
2541 | /* */ | |
2542 | void pkgAcqArchive::Done(string Message,unsigned long long Size, HashStringList const &CalcHashes, | |
2543 | pkgAcquire::MethodConfig *Cfg) | |
2544 | { | |
2545 | Item::Done(Message, Size, CalcHashes, Cfg); | |
2546 | ||
2547 | // Check the size | |
2548 | if (Size != Version->Size) | |
2549 | { | |
2550 | RenameOnError(SizeMismatch); | |
2551 | return; | |
2552 | } | |
2553 | ||
2554 | // FIXME: could this empty() check impose *any* sort of security issue? | |
2555 | if(ExpectedHashes.usable() && ExpectedHashes != CalcHashes) | |
2556 | { | |
2557 | RenameOnError(HashSumMismatch); | |
2558 | printHashSumComparision(DestFile, ExpectedHashes, CalcHashes); | |
2559 | return; | |
2560 | } | |
2561 | ||
2562 | // Grab the output filename | |
2563 | string FileName = LookupTag(Message,"Filename"); | |
2564 | if (FileName.empty() == true) | |
2565 | { | |
2566 | Status = StatError; | |
2567 | ErrorText = "Method gave a blank filename"; | |
2568 | return; | |
2569 | } | |
2570 | ||
2571 | // Reference filename | |
2572 | if (FileName != DestFile) | |
2573 | { | |
2574 | StoreFilename = DestFile = FileName; | |
2575 | Local = true; | |
2576 | Complete = true; | |
2577 | return; | |
2578 | } | |
2579 | ||
2580 | // Done, move it into position | |
2581 | string const FinalFile = GetFinalFilename(); | |
2582 | Rename(DestFile,FinalFile); | |
2583 | StoreFilename = DestFile = FinalFile; | |
2584 | Complete = true; | |
2585 | } | |
2586 | /*}}}*/ | |
2587 | // Acquire::Item::GetFinalFilename - Return the full final file path /*{{{*/ | |
2588 | std::string pkgAcqArchive::GetFinalFilename() const | |
2589 | { | |
2590 | return _config->FindDir("Dir::Cache::Archives") + flNotDir(StoreFilename); | |
2591 | } | |
2592 | /*}}}*/ | |
2593 | // AcqArchive::Failed - Failure handler /*{{{*/ | |
2594 | // --------------------------------------------------------------------- | |
2595 | /* Here we try other sources */ | |
2596 | void pkgAcqArchive::Failed(string Message,pkgAcquire::MethodConfig *Cnf) | |
2597 | { | |
2598 | Item::Failed(Message,Cnf); | |
2599 | ||
2600 | /* We don't really want to retry on failed media swaps, this prevents | |
2601 | that. An interesting observation is that permanent failures are not | |
2602 | recorded. */ | |
2603 | if (Cnf->Removable == true && | |
2604 | StringToBool(LookupTag(Message,"Transient-Failure"),false) == true) | |
2605 | { | |
2606 | // Vf = Version.FileList(); | |
2607 | while (Vf.end() == false) ++Vf; | |
2608 | StoreFilename = string(); | |
2609 | return; | |
2610 | } | |
2611 | ||
2612 | Status = StatIdle; | |
2613 | if (QueueNext() == false) | |
2614 | { | |
2615 | // This is the retry counter | |
2616 | if (Retries != 0 && | |
2617 | Cnf->LocalOnly == false && | |
2618 | StringToBool(LookupTag(Message,"Transient-Failure"),false) == true) | |
2619 | { | |
2620 | Retries--; | |
2621 | Vf = Version.FileList(); | |
2622 | if (QueueNext() == true) | |
2623 | return; | |
2624 | } | |
2625 | ||
2626 | StoreFilename = string(); | |
2627 | Status = StatError; | |
2628 | } | |
2629 | } | |
2630 | /*}}}*/ | |
2631 | // AcqArchive::IsTrusted - Determine whether this archive comes from a trusted source /*{{{*/ | |
2632 | // --------------------------------------------------------------------- | |
2633 | #if APT_PKG_ABI >= 413 | |
2634 | APT_PURE bool pkgAcqArchive::IsTrusted() const | |
2635 | #else | |
2636 | APT_PURE bool pkgAcqArchive::IsTrusted() | |
2637 | #endif | |
2638 | { | |
2639 | return Trusted; | |
2640 | } | |
2641 | /*}}}*/ | |
2642 | // AcqArchive::Finished - Fetching has finished, tidy up /*{{{*/ | |
2643 | // --------------------------------------------------------------------- | |
2644 | /* */ | |
2645 | void pkgAcqArchive::Finished() | |
2646 | { | |
2647 | if (Status == pkgAcquire::Item::StatDone && | |
2648 | Complete == true) | |
2649 | return; | |
2650 | StoreFilename = string(); | |
2651 | } | |
2652 | /*}}}*/ | |
2653 | // AcqFile::pkgAcqFile - Constructor /*{{{*/ | |
2654 | // --------------------------------------------------------------------- | |
2655 | /* The file is added to the queue */ | |
2656 | pkgAcqFile::pkgAcqFile(pkgAcquire *Owner,string URI, HashStringList const &Hashes, | |
2657 | unsigned long long Size,string Dsc,string ShortDesc, | |
2658 | const string &DestDir, const string &DestFilename, | |
2659 | bool IsIndexFile) : | |
2660 | Item(Owner, Hashes), IsIndexFile(IsIndexFile) | |
2661 | { | |
2662 | Retries = _config->FindI("Acquire::Retries",0); | |
2663 | ||
2664 | if(!DestFilename.empty()) | |
2665 | DestFile = DestFilename; | |
2666 | else if(!DestDir.empty()) | |
2667 | DestFile = DestDir + "/" + flNotDir(URI); | |
2668 | else | |
2669 | DestFile = flNotDir(URI); | |
2670 | ||
2671 | // Create the item | |
2672 | Desc.URI = URI; | |
2673 | Desc.Description = Dsc; | |
2674 | Desc.Owner = this; | |
2675 | ||
2676 | // Set the short description to the archive component | |
2677 | Desc.ShortDesc = ShortDesc; | |
2678 | ||
2679 | // Get the transfer sizes | |
2680 | FileSize = Size; | |
2681 | struct stat Buf; | |
2682 | if (stat(DestFile.c_str(),&Buf) == 0) | |
2683 | { | |
2684 | // Hmm, the partial file is too big, erase it | |
2685 | if ((Size > 0) && (unsigned long long)Buf.st_size > Size) | |
2686 | unlink(DestFile.c_str()); | |
2687 | else | |
2688 | PartialSize = Buf.st_size; | |
2689 | } | |
2690 | ||
2691 | QueueURI(Desc); | |
2692 | } | |
2693 | /*}}}*/ | |
2694 | // AcqFile::Done - Item downloaded OK /*{{{*/ | |
2695 | // --------------------------------------------------------------------- | |
2696 | /* */ | |
2697 | void pkgAcqFile::Done(string Message,unsigned long long Size,HashStringList const &CalcHashes, | |
2698 | pkgAcquire::MethodConfig *Cnf) | |
2699 | { | |
2700 | Item::Done(Message,Size,CalcHashes,Cnf); | |
2701 | ||
2702 | // Check the hash | |
2703 | if(ExpectedHashes.usable() && ExpectedHashes != CalcHashes) | |
2704 | { | |
2705 | RenameOnError(HashSumMismatch); | |
2706 | printHashSumComparision(DestFile, ExpectedHashes, CalcHashes); | |
2707 | return; | |
2708 | } | |
2709 | ||
2710 | string FileName = LookupTag(Message,"Filename"); | |
2711 | if (FileName.empty() == true) | |
2712 | { | |
2713 | Status = StatError; | |
2714 | ErrorText = "Method gave a blank filename"; | |
2715 | return; | |
2716 | } | |
2717 | ||
2718 | Complete = true; | |
2719 | ||
2720 | // The files timestamp matches | |
2721 | if (StringToBool(LookupTag(Message,"IMS-Hit"),false) == true) | |
2722 | return; | |
2723 | ||
2724 | // We have to copy it into place | |
2725 | if (FileName != DestFile) | |
2726 | { | |
2727 | Local = true; | |
2728 | if (_config->FindB("Acquire::Source-Symlinks",true) == false || | |
2729 | Cnf->Removable == true) | |
2730 | { | |
2731 | Desc.URI = "copy:" + FileName; | |
2732 | QueueURI(Desc); | |
2733 | return; | |
2734 | } | |
2735 | ||
2736 | // Erase the file if it is a symlink so we can overwrite it | |
2737 | struct stat St; | |
2738 | if (lstat(DestFile.c_str(),&St) == 0) | |
2739 | { | |
2740 | if (S_ISLNK(St.st_mode) != 0) | |
2741 | unlink(DestFile.c_str()); | |
2742 | } | |
2743 | ||
2744 | // Symlink the file | |
2745 | if (symlink(FileName.c_str(),DestFile.c_str()) != 0) | |
2746 | { | |
2747 | _error->PushToStack(); | |
2748 | _error->Errno("pkgAcqFile::Done", "Symlinking file %s failed", DestFile.c_str()); | |
2749 | std::stringstream msg; | |
2750 | _error->DumpErrors(msg); | |
2751 | _error->RevertToStack(); | |
2752 | ErrorText = msg.str(); | |
2753 | Status = StatError; | |
2754 | Complete = false; | |
2755 | } | |
2756 | } | |
2757 | } | |
2758 | /*}}}*/ | |
2759 | // AcqFile::Failed - Failure handler /*{{{*/ | |
2760 | // --------------------------------------------------------------------- | |
2761 | /* Here we try other sources */ | |
2762 | void pkgAcqFile::Failed(string Message,pkgAcquire::MethodConfig *Cnf) | |
2763 | { | |
2764 | Item::Failed(Message,Cnf); | |
2765 | ||
2766 | // This is the retry counter | |
2767 | if (Retries != 0 && | |
2768 | Cnf->LocalOnly == false && | |
2769 | StringToBool(LookupTag(Message,"Transient-Failure"),false) == true) | |
2770 | { | |
2771 | --Retries; | |
2772 | QueueURI(Desc); | |
2773 | Status = StatIdle; | |
2774 | return; | |
2775 | } | |
2776 | ||
2777 | } | |
2778 | /*}}}*/ | |
2779 | // AcqIndex::Custom600Headers - Insert custom request headers /*{{{*/ | |
2780 | // --------------------------------------------------------------------- | |
2781 | /* The only header we use is the last-modified header. */ | |
2782 | #if APT_PKG_ABI >= 413 | |
2783 | string pkgAcqFile::Custom600Headers() const | |
2784 | #else | |
2785 | string pkgAcqFile::Custom600Headers() | |
2786 | #endif | |
2787 | { | |
2788 | if (IsIndexFile) | |
2789 | return "\nIndex-File: true"; | |
2790 | return ""; | |
2791 | } | |
2792 | /*}}}*/ |