]> git.saurik.com Git - apt.git/blob - methods/basehttp.cc
rename ServerMethod to BaseHttpMethod
[apt.git] / methods / basehttp.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 /* ######################################################################
4
5 HTTP and HTTPS share a lot of common code and these classes are
6 exactly the dumping ground for this common code
7
8 ##################################################################### */
9 /*}}}*/
10 // Include Files /*{{{*/
11 #include <config.h>
12
13 #include <apt-pkg/configuration.h>
14 #include <apt-pkg/error.h>
15 #include <apt-pkg/fileutl.h>
16 #include <apt-pkg/strutl.h>
17
18 #include <ctype.h>
19 #include <signal.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <sys/stat.h>
23 #include <sys/time.h>
24 #include <time.h>
25 #include <unistd.h>
26 #include <iostream>
27 #include <limits>
28 #include <map>
29 #include <string>
30 #include <vector>
31
32 #include "basehttp.h"
33
34 #include <apti18n.h>
35 /*}}}*/
36 using namespace std;
37
38 string BaseHttpMethod::FailFile;
39 int BaseHttpMethod::FailFd = -1;
40 time_t BaseHttpMethod::FailTime = 0;
41
42 // ServerState::RunHeaders - Get the headers before the data /*{{{*/
43 // ---------------------------------------------------------------------
44 /* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
45 parse error occurred */
46 ServerState::RunHeadersResult ServerState::RunHeaders(RequestState &Req,
47 const std::string &Uri)
48 {
49 Owner->Status(_("Waiting for headers"));
50 do
51 {
52 string Data;
53 if (ReadHeaderLines(Data) == false)
54 continue;
55
56 if (Owner->Debug == true)
57 clog << "Answer for: " << Uri << endl << Data;
58
59 for (string::const_iterator I = Data.begin(); I < Data.end(); ++I)
60 {
61 string::const_iterator J = I;
62 for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J);
63 if (Req.HeaderLine(string(I,J)) == false)
64 return RUN_HEADERS_PARSE_ERROR;
65 I = J;
66 }
67
68 // 100 Continue is a Nop...
69 if (Req.Result == 100)
70 continue;
71
72 // Tidy up the connection persistence state.
73 if (Req.Encoding == RequestState::Closes && Req.HaveContent == true)
74 Persistent = false;
75
76 return RUN_HEADERS_OK;
77 }
78 while (LoadNextResponse(false, Req) == true);
79
80 return RUN_HEADERS_IO_ERROR;
81 }
82 /*}}}*/
83 bool RequestState::HeaderLine(string const &Line) /*{{{*/
84 {
85 if (Line.empty() == true)
86 return true;
87
88 if (Line.size() > 4 && stringcasecmp(Line.data(), Line.data()+4, "HTTP") == 0)
89 {
90 // Evil servers return no version
91 if (Line[4] == '/')
92 {
93 int const elements = sscanf(Line.c_str(),"HTTP/%3u.%3u %3u%359[^\n]",&Major,&Minor,&Result,Code);
94 if (elements == 3)
95 {
96 Code[0] = '\0';
97 if (Owner != NULL && Owner->Debug == true)
98 clog << "HTTP server doesn't give Reason-Phrase for " << std::to_string(Result) << std::endl;
99 }
100 else if (elements != 4)
101 return _error->Error(_("The HTTP server sent an invalid reply header"));
102 }
103 else
104 {
105 Major = 0;
106 Minor = 9;
107 if (sscanf(Line.c_str(),"HTTP %3u%359[^\n]",&Result,Code) != 2)
108 return _error->Error(_("The HTTP server sent an invalid reply header"));
109 }
110
111 /* Check the HTTP response header to get the default persistence
112 state. */
113 if (Major < 1)
114 Server->Persistent = false;
115 else
116 {
117 if (Major == 1 && Minor == 0)
118 {
119 Server->Persistent = false;
120 }
121 else
122 {
123 Server->Persistent = true;
124 if (Server->PipelineAllowed)
125 Server->Pipeline = true;
126 }
127 }
128
129 return true;
130 }
131
132 // Blah, some servers use "connection:closes", evil.
133 // and some even send empty header fields…
134 string::size_type Pos = Line.find(':');
135 if (Pos == string::npos)
136 return _error->Error(_("Bad header line"));
137 ++Pos;
138
139 // Parse off any trailing spaces between the : and the next word.
140 string::size_type Pos2 = Pos;
141 while (Pos2 < Line.length() && isspace_ascii(Line[Pos2]) != 0)
142 Pos2++;
143
144 string const Tag(Line,0,Pos);
145 string const Val(Line,Pos2);
146
147 if (stringcasecmp(Tag,"Content-Length:") == 0)
148 {
149 if (Encoding == Closes)
150 Encoding = Stream;
151 HaveContent = true;
152
153 unsigned long long * DownloadSizePtr = &DownloadSize;
154 if (Result == 416 || (Result >= 300 && Result < 400))
155 DownloadSizePtr = &JunkSize;
156
157 *DownloadSizePtr = strtoull(Val.c_str(), NULL, 10);
158 if (*DownloadSizePtr >= std::numeric_limits<unsigned long long>::max())
159 return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header"));
160 else if (*DownloadSizePtr == 0)
161 HaveContent = false;
162
163 // On partial content (206) the Content-Length less than the real
164 // size, so do not set it here but leave that to the Content-Range
165 // header instead
166 if(Result != 206 && TotalFileSize == 0)
167 TotalFileSize = DownloadSize;
168
169 return true;
170 }
171
172 if (stringcasecmp(Tag,"Content-Type:") == 0)
173 {
174 HaveContent = true;
175 return true;
176 }
177
178 if (stringcasecmp(Tag,"Content-Range:") == 0)
179 {
180 HaveContent = true;
181
182 // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416
183 if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&TotalFileSize) == 1)
184 ; // we got the expected filesize which is all we wanted
185 else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&TotalFileSize) != 2)
186 return _error->Error(_("The HTTP server sent an invalid Content-Range header"));
187 if ((unsigned long long)StartPos > TotalFileSize)
188 return _error->Error(_("This HTTP server has broken range support"));
189
190 // figure out what we will download
191 DownloadSize = TotalFileSize - StartPos;
192 return true;
193 }
194
195 if (stringcasecmp(Tag,"Transfer-Encoding:") == 0)
196 {
197 HaveContent = true;
198 if (stringcasecmp(Val,"chunked") == 0)
199 Encoding = Chunked;
200 return true;
201 }
202
203 if (stringcasecmp(Tag,"Connection:") == 0)
204 {
205 if (stringcasecmp(Val,"close") == 0)
206 {
207 Server->Persistent = false;
208 Server->Pipeline = false;
209 /* Some servers send error pages (as they are dynamically generated)
210 for simplicity via a connection close instead of e.g. chunked,
211 so assuming an always closing server only if we get a file + close */
212 if (Result >= 200 && Result < 300)
213 Server->PipelineAllowed = false;
214 }
215 else if (stringcasecmp(Val,"keep-alive") == 0)
216 Server->Persistent = true;
217 return true;
218 }
219
220 if (stringcasecmp(Tag,"Last-Modified:") == 0)
221 {
222 if (RFC1123StrToTime(Val.c_str(), Date) == false)
223 return _error->Error(_("Unknown date format"));
224 return true;
225 }
226
227 if (stringcasecmp(Tag,"Location:") == 0)
228 {
229 Location = Val;
230 return true;
231 }
232
233 if (stringcasecmp(Tag, "Accept-Ranges:") == 0)
234 {
235 std::string ranges = ',' + Val + ',';
236 ranges.erase(std::remove(ranges.begin(), ranges.end(), ' '), ranges.end());
237 if (ranges.find(",bytes,") == std::string::npos)
238 Server->RangesAllowed = false;
239 return true;
240 }
241
242 return true;
243 }
244 /*}}}*/
245 // ServerState::ServerState - Constructor /*{{{*/
246 ServerState::ServerState(URI Srv, BaseHttpMethod *Owner) :
247 ServerName(Srv), TimeOut(120), Owner(Owner)
248 {
249 Reset();
250 }
251 /*}}}*/
252 bool RequestState::AddPartialFileToHashes(FileFd &File) /*{{{*/
253 {
254 File.Truncate(StartPos);
255 return Server->GetHashes()->AddFD(File, StartPos);
256 }
257 /*}}}*/
258 void ServerState::Reset() /*{{{*/
259 {
260 Persistent = false;
261 Pipeline = false;
262 PipelineAllowed = true;
263 RangesAllowed = true;
264 }
265 /*}}}*/
266
267 // BaseHttpMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/
268 // ---------------------------------------------------------------------
269 /* We look at the header data we got back from the server and decide what
270 to do. Returns DealWithHeadersResult (see http.h for details).
271 */
272 BaseHttpMethod::DealWithHeadersResult
273 BaseHttpMethod::DealWithHeaders(FetchResult &Res, RequestState &Req)
274 {
275 // Not Modified
276 if (Req.Result == 304)
277 {
278 RemoveFile("server", Queue->DestFile);
279 Res.IMSHit = true;
280 Res.LastModified = Queue->LastModified;
281 Res.Size = 0;
282 return IMS_HIT;
283 }
284
285 /* Redirect
286 *
287 * Note that it is only OK for us to treat all redirection the same
288 * because we *always* use GET, not other HTTP methods. There are
289 * three redirection codes for which it is not appropriate that we
290 * redirect. Pass on those codes so the error handling kicks in.
291 */
292 if (AllowRedirect
293 && (Req.Result > 300 && Req.Result < 400)
294 && (Req.Result != 300 // Multiple Choices
295 && Req.Result != 304 // Not Modified
296 && Req.Result != 306)) // (Not part of HTTP/1.1, reserved)
297 {
298 if (Req.Location.empty() == true)
299 ;
300 else if (Req.Location[0] == '/' && Queue->Uri.empty() == false)
301 {
302 URI Uri = Queue->Uri;
303 if (Uri.Host.empty() == false)
304 NextURI = URI::SiteOnly(Uri);
305 else
306 NextURI.clear();
307 NextURI.append(DeQuoteString(Req.Location));
308 if (Queue->Uri == NextURI)
309 {
310 SetFailReason("RedirectionLoop");
311 _error->Error("Redirection loop encountered");
312 if (Req.HaveContent == true)
313 return ERROR_WITH_CONTENT_PAGE;
314 return ERROR_UNRECOVERABLE;
315 }
316 return TRY_AGAIN_OR_REDIRECT;
317 }
318 else
319 {
320 NextURI = DeQuoteString(Req.Location);
321 URI tmpURI = NextURI;
322 if (tmpURI.Access.find('+') != std::string::npos)
323 {
324 _error->Error("Server tried to trick us into using a specific implementation: %s", tmpURI.Access.c_str());
325 if (Req.HaveContent == true)
326 return ERROR_WITH_CONTENT_PAGE;
327 return ERROR_UNRECOVERABLE;
328 }
329 URI Uri = Queue->Uri;
330 if (Binary.find('+') != std::string::npos)
331 {
332 auto base = Binary.substr(0, Binary.find('+'));
333 if (base != tmpURI.Access)
334 {
335 tmpURI.Access = base + '+' + tmpURI.Access;
336 if (tmpURI.Access == Binary)
337 {
338 std::string tmpAccess = Uri.Access;
339 std::swap(tmpURI.Access, Uri.Access);
340 NextURI = tmpURI;
341 std::swap(tmpURI.Access, Uri.Access);
342 }
343 else
344 NextURI = tmpURI;
345 }
346 }
347 if (Queue->Uri == NextURI)
348 {
349 SetFailReason("RedirectionLoop");
350 _error->Error("Redirection loop encountered");
351 if (Req.HaveContent == true)
352 return ERROR_WITH_CONTENT_PAGE;
353 return ERROR_UNRECOVERABLE;
354 }
355 Uri.Access = Binary;
356 // same protocol redirects are okay
357 if (tmpURI.Access == Uri.Access)
358 return TRY_AGAIN_OR_REDIRECT;
359 // as well as http to https
360 else if ((Uri.Access == "http" || Uri.Access == "https+http") && tmpURI.Access == "https")
361 return TRY_AGAIN_OR_REDIRECT;
362 else
363 {
364 auto const tmpplus = tmpURI.Access.find('+');
365 if (tmpplus != std::string::npos && tmpURI.Access.substr(tmpplus + 1) == "https")
366 {
367 auto const uriplus = Uri.Access.find('+');
368 if (uriplus == std::string::npos)
369 {
370 if (Uri.Access == tmpURI.Access.substr(0, tmpplus)) // foo -> foo+https
371 return TRY_AGAIN_OR_REDIRECT;
372 }
373 else if (Uri.Access.substr(uriplus + 1) == "http" &&
374 Uri.Access.substr(0, uriplus) == tmpURI.Access.substr(0, tmpplus)) // foo+http -> foo+https
375 return TRY_AGAIN_OR_REDIRECT;
376 }
377 }
378 _error->Error("Redirection from %s to '%s' is forbidden", Uri.Access.c_str(), NextURI.c_str());
379 }
380 /* else pass through for error message */
381 }
382 // retry after an invalid range response without partial data
383 else if (Req.Result == 416)
384 {
385 struct stat SBuf;
386 if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
387 {
388 bool partialHit = false;
389 if (Queue->ExpectedHashes.usable() == true)
390 {
391 Hashes resultHashes(Queue->ExpectedHashes);
392 FileFd file(Queue->DestFile, FileFd::ReadOnly);
393 Req.TotalFileSize = file.FileSize();
394 Req.Date = file.ModificationTime();
395 resultHashes.AddFD(file);
396 HashStringList const hashList = resultHashes.GetHashStringList();
397 partialHit = (Queue->ExpectedHashes == hashList);
398 }
399 else if ((unsigned long long)SBuf.st_size == Req.TotalFileSize)
400 partialHit = true;
401 if (partialHit == true)
402 {
403 // the file is completely downloaded, but was not moved
404 if (Req.HaveContent == true)
405 {
406 // nuke the sent error page
407 Server->RunDataToDevNull(Req);
408 Req.HaveContent = false;
409 }
410 Req.StartPos = Req.TotalFileSize;
411 Req.Result = 200;
412 }
413 else if (RemoveFile("server", Queue->DestFile))
414 {
415 NextURI = Queue->Uri;
416 return TRY_AGAIN_OR_REDIRECT;
417 }
418 }
419 }
420
421 /* We have a reply we don't handle. This should indicate a perm server
422 failure */
423 if (Req.Result < 200 || Req.Result >= 300)
424 {
425 if (_error->PendingError() == false)
426 {
427 std::string err;
428 strprintf(err, "HttpError%u", Req.Result);
429 SetFailReason(err);
430 _error->Error("%u %s", Req.Result, Req.Code);
431 }
432 if (Req.HaveContent == true)
433 return ERROR_WITH_CONTENT_PAGE;
434 return ERROR_UNRECOVERABLE;
435 }
436
437 // This is some sort of 2xx 'data follows' reply
438 Res.LastModified = Req.Date;
439 Res.Size = Req.TotalFileSize;
440 return FILE_IS_OPEN;
441 }
442 /*}}}*/
443 // BaseHttpMethod::SigTerm - Handle a fatal signal /*{{{*/
444 // ---------------------------------------------------------------------
445 /* This closes and timestamps the open file. This is necessary to get
446 resume behavoir on user abort */
447 void BaseHttpMethod::SigTerm(int)
448 {
449 if (FailFd == -1)
450 _exit(100);
451
452 struct timeval times[2];
453 times[0].tv_sec = FailTime;
454 times[1].tv_sec = FailTime;
455 times[0].tv_usec = times[1].tv_usec = 0;
456 utimes(FailFile.c_str(), times);
457 close(FailFd);
458
459 _exit(100);
460 }
461 /*}}}*/
462 // BaseHttpMethod::Fetch - Fetch an item /*{{{*/
463 // ---------------------------------------------------------------------
464 /* This adds an item to the pipeline. We keep the pipeline at a fixed
465 depth. */
466 bool BaseHttpMethod::Fetch(FetchItem *)
467 {
468 if (Server == nullptr || QueueBack == nullptr)
469 return true;
470
471 // If pipelining is disabled, we only queue 1 request
472 auto const AllowedDepth = Server->Pipeline ? PipelineDepth : 0;
473 // how deep is our pipeline currently?
474 decltype(PipelineDepth) CurrentDepth = 0;
475 for (FetchItem const *I = Queue; I != QueueBack; I = I->Next)
476 ++CurrentDepth;
477 if (CurrentDepth > AllowedDepth)
478 return true;
479
480 do {
481 // Make sure we stick with the same server
482 if (Server->Comp(QueueBack->Uri) == false)
483 break;
484
485 bool const UsableHashes = QueueBack->ExpectedHashes.usable();
486 // if we have no hashes, do at most one such request
487 // as we can't fixup pipeling misbehaviors otherwise
488 if (CurrentDepth != 0 && UsableHashes == false)
489 break;
490
491 if (UsableHashes && FileExists(QueueBack->DestFile))
492 {
493 FileFd partial(QueueBack->DestFile, FileFd::ReadOnly);
494 Hashes wehave(QueueBack->ExpectedHashes);
495 if (QueueBack->ExpectedHashes.FileSize() == partial.FileSize())
496 {
497 if (wehave.AddFD(partial) &&
498 wehave.GetHashStringList() == QueueBack->ExpectedHashes)
499 {
500 FetchResult Res;
501 Res.Filename = QueueBack->DestFile;
502 Res.ResumePoint = QueueBack->ExpectedHashes.FileSize();
503 URIStart(Res);
504 // move item to the start of the queue as URIDone will
505 // always dequeued the first item in the queue
506 if (Queue != QueueBack)
507 {
508 FetchItem *Prev = Queue;
509 for (; Prev->Next != QueueBack; Prev = Prev->Next)
510 /* look for the previous queue item */;
511 Prev->Next = QueueBack->Next;
512 QueueBack->Next = Queue;
513 Queue = QueueBack;
514 QueueBack = Prev->Next;
515 }
516 Res.TakeHashes(wehave);
517 URIDone(Res);
518 continue;
519 }
520 else
521 RemoveFile("Fetch-Partial", QueueBack->DestFile);
522 }
523 }
524 auto const Tmp = QueueBack;
525 QueueBack = QueueBack->Next;
526 SendReq(Tmp);
527 ++CurrentDepth;
528 } while (CurrentDepth <= AllowedDepth && QueueBack != nullptr);
529
530 return true;
531 }
532 /*}}}*/
533 // BaseHttpMethod::Loop - Main loop /*{{{*/
534 int BaseHttpMethod::Loop()
535 {
536 signal(SIGTERM,SigTerm);
537 signal(SIGINT,SigTerm);
538
539 Server = 0;
540
541 int FailCounter = 0;
542 while (1)
543 {
544 // We have no commands, wait for some to arrive
545 if (Queue == 0)
546 {
547 if (WaitFd(STDIN_FILENO) == false)
548 return 0;
549 }
550
551 /* Run messages, we can accept 0 (no message) if we didn't
552 do a WaitFd above.. Otherwise the FD is closed. */
553 int Result = Run(true);
554 if (Result != -1 && (Result != 0 || Queue == 0))
555 {
556 if(FailReason.empty() == false ||
557 ConfigFindB("DependOnSTDIN", true) == true)
558 return 100;
559 else
560 return 0;
561 }
562
563 if (Queue == 0)
564 continue;
565
566 // Connect to the server
567 if (Server == 0 || Server->Comp(Queue->Uri) == false)
568 {
569 Server = CreateServerState(Queue->Uri);
570 setPostfixForMethodNames(::URI(Queue->Uri).Host.c_str());
571 AllowRedirect = ConfigFindB("AllowRedirect", true);
572 PipelineDepth = ConfigFindI("Pipeline-Depth", 10);
573 Debug = DebugEnabled();
574 }
575
576 /* If the server has explicitly said this is the last connection
577 then we pre-emptively shut down the pipeline and tear down
578 the connection. This will speed up HTTP/1.0 servers a tad
579 since we don't have to wait for the close sequence to
580 complete */
581 if (Server->Persistent == false)
582 Server->Close();
583
584 // Reset the pipeline
585 if (Server->IsOpen() == false)
586 QueueBack = Queue;
587
588 // Connnect to the host
589 if (Server->Open() == false)
590 {
591 Fail(true);
592 Server = nullptr;
593 continue;
594 }
595
596 // Fill the pipeline.
597 Fetch(0);
598
599 RequestState Req(this, Server.get());
600 // Fetch the next URL header data from the server.
601 switch (Server->RunHeaders(Req, Queue->Uri))
602 {
603 case ServerState::RUN_HEADERS_OK:
604 break;
605
606 // The header data is bad
607 case ServerState::RUN_HEADERS_PARSE_ERROR:
608 {
609 _error->Error(_("Bad header data"));
610 Fail(true);
611 Server->Close();
612 RotateDNS();
613 continue;
614 }
615
616 // The server closed a connection during the header get..
617 default:
618 case ServerState::RUN_HEADERS_IO_ERROR:
619 {
620 FailCounter++;
621 _error->Discard();
622 Server->Close();
623 Server->Pipeline = false;
624 Server->PipelineAllowed = false;
625
626 if (FailCounter >= 2)
627 {
628 Fail(_("Connection failed"),true);
629 FailCounter = 0;
630 }
631
632 RotateDNS();
633 continue;
634 }
635 };
636
637 // Decide what to do.
638 FetchResult Res;
639 Res.Filename = Queue->DestFile;
640 switch (DealWithHeaders(Res, Req))
641 {
642 // Ok, the file is Open
643 case FILE_IS_OPEN:
644 {
645 URIStart(Res);
646
647 // Run the data
648 bool Result = true;
649
650 // ensure we don't fetch too much
651 // we could do "Server->MaximumSize = Queue->MaximumSize" here
652 // but that would break the clever pipeline messup detection
653 // so instead we use the size of the biggest item in the queue
654 Req.MaximumSize = FindMaximumObjectSizeInQueue();
655
656 if (Req.HaveContent)
657 Result = Server->RunData(Req);
658
659 /* If the server is sending back sizeless responses then fill in
660 the size now */
661 if (Res.Size == 0)
662 Res.Size = Req.File.Size();
663
664 // Close the file, destroy the FD object and timestamp it
665 FailFd = -1;
666 Req.File.Close();
667
668 // Timestamp
669 struct timeval times[2];
670 times[0].tv_sec = times[1].tv_sec = Req.Date;
671 times[0].tv_usec = times[1].tv_usec = 0;
672 utimes(Queue->DestFile.c_str(), times);
673
674 // Send status to APT
675 if (Result == true)
676 {
677 Hashes * const resultHashes = Server->GetHashes();
678 HashStringList const hashList = resultHashes->GetHashStringList();
679 if (PipelineDepth != 0 && Queue->ExpectedHashes.usable() == true && Queue->ExpectedHashes != hashList)
680 {
681 // we did not get the expected hash… mhhh:
682 // could it be that server/proxy messed up pipelining?
683 FetchItem * BeforeI = Queue;
684 for (FetchItem *I = Queue->Next; I != 0 && I != QueueBack; I = I->Next)
685 {
686 if (I->ExpectedHashes.usable() == true && I->ExpectedHashes == hashList)
687 {
688 // yes, he did! Disable pipelining and rewrite queue
689 if (Server->Pipeline == true)
690 {
691 Warning(_("Automatically disabled %s due to incorrect response from server/proxy. (man 5 apt.conf)"), "Acquire::http::Pipeline-Depth");
692 Server->Pipeline = false;
693 Server->PipelineAllowed = false;
694 // we keep the PipelineDepth value so that the rest of the queue can be fixed up as well
695 }
696 Rename(Res.Filename, I->DestFile);
697 Res.Filename = I->DestFile;
698 BeforeI->Next = I->Next;
699 I->Next = Queue;
700 Queue = I;
701 break;
702 }
703 BeforeI = I;
704 }
705 }
706 Res.TakeHashes(*resultHashes);
707 URIDone(Res);
708 }
709 else
710 {
711 if (Server->IsOpen() == false)
712 {
713 FailCounter++;
714 _error->Discard();
715 Server->Close();
716
717 if (FailCounter >= 2)
718 {
719 Fail(_("Connection failed"),true);
720 FailCounter = 0;
721 }
722
723 QueueBack = Queue;
724 }
725 else
726 {
727 Server->Close();
728 Fail(true);
729 }
730 }
731 break;
732 }
733
734 // IMS hit
735 case IMS_HIT:
736 {
737 URIDone(Res);
738 break;
739 }
740
741 // Hard server error, not found or something
742 case ERROR_UNRECOVERABLE:
743 {
744 Fail();
745 break;
746 }
747
748 // Hard internal error, kill the connection and fail
749 case ERROR_NOT_FROM_SERVER:
750 {
751 Fail();
752 RotateDNS();
753 Server->Close();
754 break;
755 }
756
757 // We need to flush the data, the header is like a 404 w/ error text
758 case ERROR_WITH_CONTENT_PAGE:
759 {
760 Server->RunDataToDevNull(Req);
761 Fail();
762 break;
763 }
764
765 // Try again with a new URL
766 case TRY_AGAIN_OR_REDIRECT:
767 {
768 // Clear rest of response if there is content
769 if (Req.HaveContent)
770 Server->RunDataToDevNull(Req);
771 Redirect(NextURI);
772 break;
773 }
774
775 default:
776 Fail(_("Internal error"));
777 break;
778 }
779
780 FailCounter = 0;
781 }
782
783 return 0;
784 }
785 /*}}}*/
786 unsigned long long BaseHttpMethod::FindMaximumObjectSizeInQueue() const /*{{{*/
787 {
788 unsigned long long MaxSizeInQueue = 0;
789 for (FetchItem *I = Queue; I != 0 && I != QueueBack; I = I->Next)
790 MaxSizeInQueue = std::max(MaxSizeInQueue, I->MaximumSize);
791 return MaxSizeInQueue;
792 }
793 /*}}}*/
794 BaseHttpMethod::BaseHttpMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/
795 aptMethod(std::move(Binary), Ver, Flags), Server(nullptr), PipelineDepth(10),
796 AllowRedirect(false), Debug(false)
797 {
798 }
799 /*}}}*/
800 bool BaseHttpMethod::Configuration(std::string Message) /*{{{*/
801 {
802 if (aptMethod::Configuration(Message) == false)
803 return false;
804
805 _config->CndSet("Acquire::tor::Proxy",
806 "socks5h://apt-transport-tor@localhost:9050");
807 return true;
808 }
809 /*}}}*/
810 bool BaseHttpMethod::AddProxyAuth(URI &Proxy, URI const &Server) const /*{{{*/
811 {
812 if (std::find(methodNames.begin(), methodNames.end(), "tor") != methodNames.end() &&
813 Proxy.User == "apt-transport-tor" && Proxy.Password.empty())
814 {
815 std::string pass = Server.Host;
816 pass.erase(std::remove_if(pass.begin(), pass.end(), [](char const c) { return std::isalnum(c) == 0; }), pass.end());
817 if (pass.length() > 255)
818 Proxy.Password = pass.substr(0, 255);
819 else
820 Proxy.Password = std::move(pass);
821 }
822 // FIXME: should we support auth.conf for proxies?
823 return true;
824 }
825 /*}}}*/