]>
Commit | Line | Data |
---|---|---|
1 | // -*- mode: cpp; mode: fold -*- | |
2 | // Description /*{{{*/ | |
3 | /* ###################################################################### | |
4 | ||
5 | HTTP and HTTPS share a lot of common code and these classes are | |
6 | exactly the dumping ground for this common code | |
7 | ||
8 | ##################################################################### */ | |
9 | /*}}}*/ | |
10 | // Include Files /*{{{*/ | |
11 | #include <config.h> | |
12 | ||
13 | #include <apt-pkg/configuration.h> | |
14 | #include <apt-pkg/error.h> | |
15 | #include <apt-pkg/fileutl.h> | |
16 | #include <apt-pkg/strutl.h> | |
17 | ||
18 | #include <ctype.h> | |
19 | #include <signal.h> | |
20 | #include <stdio.h> | |
21 | #include <stdlib.h> | |
22 | #include <sys/stat.h> | |
23 | #include <sys/time.h> | |
24 | #include <time.h> | |
25 | #include <unistd.h> | |
26 | #include <iostream> | |
27 | #include <limits> | |
28 | #include <map> | |
29 | #include <string> | |
30 | #include <vector> | |
31 | ||
32 | #include "server.h" | |
33 | ||
34 | #include <apti18n.h> | |
35 | /*}}}*/ | |
36 | using namespace std; | |
37 | ||
38 | string ServerMethod::FailFile; | |
39 | int ServerMethod::FailFd = -1; | |
40 | time_t ServerMethod::FailTime = 0; | |
41 | ||
42 | // ServerState::RunHeaders - Get the headers before the data /*{{{*/ | |
43 | // --------------------------------------------------------------------- | |
44 | /* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header | |
45 | parse error occurred */ | |
46 | ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File, | |
47 | const std::string &Uri) | |
48 | { | |
49 | Reset(false); | |
50 | Owner->Status(_("Waiting for headers")); | |
51 | ||
52 | do | |
53 | { | |
54 | string Data; | |
55 | if (ReadHeaderLines(Data) == false) | |
56 | continue; | |
57 | ||
58 | if (Owner->Debug == true) | |
59 | clog << "Answer for: " << Uri << endl << Data; | |
60 | ||
61 | for (string::const_iterator I = Data.begin(); I < Data.end(); ++I) | |
62 | { | |
63 | string::const_iterator J = I; | |
64 | for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J); | |
65 | if (HeaderLine(string(I,J)) == false) | |
66 | return RUN_HEADERS_PARSE_ERROR; | |
67 | I = J; | |
68 | } | |
69 | ||
70 | // 100 Continue is a Nop... | |
71 | if (Result == 100) | |
72 | continue; | |
73 | ||
74 | // Tidy up the connection persistence state. | |
75 | if (Encoding == Closes && HaveContent == true) | |
76 | Persistent = false; | |
77 | ||
78 | return RUN_HEADERS_OK; | |
79 | } | |
80 | while (LoadNextResponse(false, File) == true); | |
81 | ||
82 | return RUN_HEADERS_IO_ERROR; | |
83 | } | |
84 | /*}}}*/ | |
85 | // ServerState::HeaderLine - Process a header line /*{{{*/ | |
86 | // --------------------------------------------------------------------- | |
87 | /* */ | |
88 | bool ServerState::HeaderLine(string Line) | |
89 | { | |
90 | if (Line.empty() == true) | |
91 | return true; | |
92 | ||
93 | if (Line.size() > 4 && stringcasecmp(Line.data(), Line.data()+4, "HTTP") == 0) | |
94 | { | |
95 | // Evil servers return no version | |
96 | if (Line[4] == '/') | |
97 | { | |
98 | int const elements = sscanf(Line.c_str(),"HTTP/%3u.%3u %3u%359[^\n]",&Major,&Minor,&Result,Code); | |
99 | if (elements == 3) | |
100 | { | |
101 | Code[0] = '\0'; | |
102 | if (Owner != NULL && Owner->Debug == true) | |
103 | clog << "HTTP server doesn't give Reason-Phrase for " << std::to_string(Result) << std::endl; | |
104 | } | |
105 | else if (elements != 4) | |
106 | return _error->Error(_("The HTTP server sent an invalid reply header")); | |
107 | } | |
108 | else | |
109 | { | |
110 | Major = 0; | |
111 | Minor = 9; | |
112 | if (sscanf(Line.c_str(),"HTTP %3u%359[^\n]",&Result,Code) != 2) | |
113 | return _error->Error(_("The HTTP server sent an invalid reply header")); | |
114 | } | |
115 | ||
116 | /* Check the HTTP response header to get the default persistence | |
117 | state. */ | |
118 | if (Major < 1) | |
119 | Persistent = false; | |
120 | else | |
121 | { | |
122 | if (Major == 1 && Minor == 0) | |
123 | { | |
124 | Persistent = false; | |
125 | } | |
126 | else | |
127 | { | |
128 | Persistent = true; | |
129 | if (PipelineAllowed) | |
130 | Pipeline = true; | |
131 | } | |
132 | } | |
133 | ||
134 | return true; | |
135 | } | |
136 | ||
137 | // Blah, some servers use "connection:closes", evil. | |
138 | // and some even send empty header fields… | |
139 | string::size_type Pos = Line.find(':'); | |
140 | if (Pos == string::npos) | |
141 | return _error->Error(_("Bad header line")); | |
142 | ++Pos; | |
143 | ||
144 | // Parse off any trailing spaces between the : and the next word. | |
145 | string::size_type Pos2 = Pos; | |
146 | while (Pos2 < Line.length() && isspace_ascii(Line[Pos2]) != 0) | |
147 | Pos2++; | |
148 | ||
149 | string const Tag(Line,0,Pos); | |
150 | string const Val(Line,Pos2); | |
151 | ||
152 | if (stringcasecmp(Tag,"Content-Length:") == 0) | |
153 | { | |
154 | if (Encoding == Closes) | |
155 | Encoding = Stream; | |
156 | HaveContent = true; | |
157 | ||
158 | unsigned long long * DownloadSizePtr = &DownloadSize; | |
159 | if (Result == 416 || (Result >= 300 && Result < 400)) | |
160 | DownloadSizePtr = &JunkSize; | |
161 | ||
162 | *DownloadSizePtr = strtoull(Val.c_str(), NULL, 10); | |
163 | if (*DownloadSizePtr >= std::numeric_limits<unsigned long long>::max()) | |
164 | return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header")); | |
165 | else if (*DownloadSizePtr == 0) | |
166 | HaveContent = false; | |
167 | ||
168 | // On partial content (206) the Content-Length less than the real | |
169 | // size, so do not set it here but leave that to the Content-Range | |
170 | // header instead | |
171 | if(Result != 206 && TotalFileSize == 0) | |
172 | TotalFileSize = DownloadSize; | |
173 | ||
174 | return true; | |
175 | } | |
176 | ||
177 | if (stringcasecmp(Tag,"Content-Type:") == 0) | |
178 | { | |
179 | HaveContent = true; | |
180 | return true; | |
181 | } | |
182 | ||
183 | if (stringcasecmp(Tag,"Content-Range:") == 0) | |
184 | { | |
185 | HaveContent = true; | |
186 | ||
187 | // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416 | |
188 | if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&TotalFileSize) == 1) | |
189 | ; // we got the expected filesize which is all we wanted | |
190 | else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&TotalFileSize) != 2) | |
191 | return _error->Error(_("The HTTP server sent an invalid Content-Range header")); | |
192 | if ((unsigned long long)StartPos > TotalFileSize) | |
193 | return _error->Error(_("This HTTP server has broken range support")); | |
194 | ||
195 | // figure out what we will download | |
196 | DownloadSize = TotalFileSize - StartPos; | |
197 | return true; | |
198 | } | |
199 | ||
200 | if (stringcasecmp(Tag,"Transfer-Encoding:") == 0) | |
201 | { | |
202 | HaveContent = true; | |
203 | if (stringcasecmp(Val,"chunked") == 0) | |
204 | Encoding = Chunked; | |
205 | return true; | |
206 | } | |
207 | ||
208 | if (stringcasecmp(Tag,"Connection:") == 0) | |
209 | { | |
210 | if (stringcasecmp(Val,"close") == 0) | |
211 | { | |
212 | Persistent = false; | |
213 | Pipeline = false; | |
214 | /* Some servers send error pages (as they are dynamically generated) | |
215 | for simplicity via a connection close instead of e.g. chunked, | |
216 | so assuming an always closing server only if we get a file + close */ | |
217 | if (Result >= 200 && Result < 300) | |
218 | PipelineAllowed = false; | |
219 | } | |
220 | else if (stringcasecmp(Val,"keep-alive") == 0) | |
221 | Persistent = true; | |
222 | return true; | |
223 | } | |
224 | ||
225 | if (stringcasecmp(Tag,"Last-Modified:") == 0) | |
226 | { | |
227 | if (RFC1123StrToTime(Val.c_str(), Date) == false) | |
228 | return _error->Error(_("Unknown date format")); | |
229 | return true; | |
230 | } | |
231 | ||
232 | if (stringcasecmp(Tag,"Location:") == 0) | |
233 | { | |
234 | Location = Val; | |
235 | return true; | |
236 | } | |
237 | ||
238 | if (stringcasecmp(Tag, "Accept-Ranges:") == 0) | |
239 | { | |
240 | std::string ranges = ',' + Val + ','; | |
241 | ranges.erase(std::remove(ranges.begin(), ranges.end(), ' '), ranges.end()); | |
242 | if (ranges.find(",bytes,") == std::string::npos) | |
243 | RangesAllowed = false; | |
244 | return true; | |
245 | } | |
246 | ||
247 | return true; | |
248 | } | |
249 | /*}}}*/ | |
250 | // ServerState::ServerState - Constructor /*{{{*/ | |
251 | ServerState::ServerState(URI Srv, ServerMethod *Owner) : | |
252 | DownloadSize(0), ServerName(Srv), TimeOut(120), Owner(Owner) | |
253 | { | |
254 | Reset(); | |
255 | } | |
256 | /*}}}*/ | |
257 | bool ServerState::AddPartialFileToHashes(FileFd &File) /*{{{*/ | |
258 | { | |
259 | File.Truncate(StartPos); | |
260 | return GetHashes()->AddFD(File, StartPos); | |
261 | } | |
262 | /*}}}*/ | |
263 | void ServerState::Reset(bool const Everything) /*{{{*/ | |
264 | { | |
265 | Major = 0; Minor = 0; Result = 0; Code[0] = '\0'; | |
266 | TotalFileSize = 0; JunkSize = 0; StartPos = 0; | |
267 | Encoding = Closes; time(&Date); HaveContent = false; | |
268 | State = Header; MaximumSize = 0; | |
269 | if (Everything) | |
270 | { | |
271 | Persistent = false; Pipeline = false; PipelineAllowed = true; | |
272 | RangesAllowed = true; | |
273 | } | |
274 | } | |
275 | /*}}}*/ | |
276 | ||
277 | // ServerMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/ | |
278 | // --------------------------------------------------------------------- | |
279 | /* We look at the header data we got back from the server and decide what | |
280 | to do. Returns DealWithHeadersResult (see http.h for details). | |
281 | */ | |
282 | ServerMethod::DealWithHeadersResult | |
283 | ServerMethod::DealWithHeaders(FetchResult &Res) | |
284 | { | |
285 | // Not Modified | |
286 | if (Server->Result == 304) | |
287 | { | |
288 | RemoveFile("server", Queue->DestFile); | |
289 | Res.IMSHit = true; | |
290 | Res.LastModified = Queue->LastModified; | |
291 | Res.Size = 0; | |
292 | return IMS_HIT; | |
293 | } | |
294 | ||
295 | /* Redirect | |
296 | * | |
297 | * Note that it is only OK for us to treat all redirection the same | |
298 | * because we *always* use GET, not other HTTP methods. There are | |
299 | * three redirection codes for which it is not appropriate that we | |
300 | * redirect. Pass on those codes so the error handling kicks in. | |
301 | */ | |
302 | if (AllowRedirect | |
303 | && (Server->Result > 300 && Server->Result < 400) | |
304 | && (Server->Result != 300 // Multiple Choices | |
305 | && Server->Result != 304 // Not Modified | |
306 | && Server->Result != 306)) // (Not part of HTTP/1.1, reserved) | |
307 | { | |
308 | if (Server->Location.empty() == true) | |
309 | ; | |
310 | else if (Server->Location[0] == '/' && Queue->Uri.empty() == false) | |
311 | { | |
312 | URI Uri = Queue->Uri; | |
313 | if (Uri.Host.empty() == false) | |
314 | NextURI = URI::SiteOnly(Uri); | |
315 | else | |
316 | NextURI.clear(); | |
317 | NextURI.append(DeQuoteString(Server->Location)); | |
318 | if (Queue->Uri == NextURI) | |
319 | { | |
320 | SetFailReason("RedirectionLoop"); | |
321 | _error->Error("Redirection loop encountered"); | |
322 | if (Server->HaveContent == true) | |
323 | return ERROR_WITH_CONTENT_PAGE; | |
324 | return ERROR_UNRECOVERABLE; | |
325 | } | |
326 | return TRY_AGAIN_OR_REDIRECT; | |
327 | } | |
328 | else | |
329 | { | |
330 | NextURI = DeQuoteString(Server->Location); | |
331 | URI tmpURI = NextURI; | |
332 | if (tmpURI.Access.find('+') != std::string::npos) | |
333 | { | |
334 | _error->Error("Server tried to trick us into using a specific implementation: %s", tmpURI.Access.c_str()); | |
335 | if (Server->HaveContent == true) | |
336 | return ERROR_WITH_CONTENT_PAGE; | |
337 | return ERROR_UNRECOVERABLE; | |
338 | } | |
339 | URI Uri = Queue->Uri; | |
340 | if (Binary.find('+') != std::string::npos) | |
341 | { | |
342 | auto base = Binary.substr(0, Binary.find('+')); | |
343 | if (base != tmpURI.Access) | |
344 | { | |
345 | tmpURI.Access = base + '+' + tmpURI.Access; | |
346 | if (tmpURI.Access == Binary) | |
347 | { | |
348 | std::string tmpAccess = Uri.Access; | |
349 | std::swap(tmpURI.Access, Uri.Access); | |
350 | NextURI = tmpURI; | |
351 | std::swap(tmpURI.Access, Uri.Access); | |
352 | } | |
353 | else | |
354 | NextURI = tmpURI; | |
355 | } | |
356 | } | |
357 | if (Queue->Uri == NextURI) | |
358 | { | |
359 | SetFailReason("RedirectionLoop"); | |
360 | _error->Error("Redirection loop encountered"); | |
361 | if (Server->HaveContent == true) | |
362 | return ERROR_WITH_CONTENT_PAGE; | |
363 | return ERROR_UNRECOVERABLE; | |
364 | } | |
365 | Uri.Access = Binary; | |
366 | // same protocol redirects are okay | |
367 | if (tmpURI.Access == Uri.Access) | |
368 | return TRY_AGAIN_OR_REDIRECT; | |
369 | // as well as http to https | |
370 | else if ((Uri.Access == "http" || Uri.Access == "https+http") && tmpURI.Access == "https") | |
371 | return TRY_AGAIN_OR_REDIRECT; | |
372 | else | |
373 | { | |
374 | auto const tmpplus = tmpURI.Access.find('+'); | |
375 | if (tmpplus != std::string::npos && tmpURI.Access.substr(tmpplus + 1) == "https") | |
376 | { | |
377 | auto const uriplus = Uri.Access.find('+'); | |
378 | if (uriplus == std::string::npos) | |
379 | { | |
380 | if (Uri.Access == tmpURI.Access.substr(0, tmpplus)) // foo -> foo+https | |
381 | return TRY_AGAIN_OR_REDIRECT; | |
382 | } | |
383 | else if (Uri.Access.substr(uriplus + 1) == "http" && | |
384 | Uri.Access.substr(0, uriplus) == tmpURI.Access.substr(0, tmpplus)) // foo+http -> foo+https | |
385 | return TRY_AGAIN_OR_REDIRECT; | |
386 | } | |
387 | } | |
388 | _error->Error("Redirection from %s to '%s' is forbidden", Uri.Access.c_str(), NextURI.c_str()); | |
389 | } | |
390 | /* else pass through for error message */ | |
391 | } | |
392 | // retry after an invalid range response without partial data | |
393 | else if (Server->Result == 416) | |
394 | { | |
395 | struct stat SBuf; | |
396 | if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) | |
397 | { | |
398 | bool partialHit = false; | |
399 | if (Queue->ExpectedHashes.usable() == true) | |
400 | { | |
401 | Hashes resultHashes(Queue->ExpectedHashes); | |
402 | FileFd file(Queue->DestFile, FileFd::ReadOnly); | |
403 | Server->TotalFileSize = file.FileSize(); | |
404 | Server->Date = file.ModificationTime(); | |
405 | resultHashes.AddFD(file); | |
406 | HashStringList const hashList = resultHashes.GetHashStringList(); | |
407 | partialHit = (Queue->ExpectedHashes == hashList); | |
408 | } | |
409 | else if ((unsigned long long)SBuf.st_size == Server->TotalFileSize) | |
410 | partialHit = true; | |
411 | if (partialHit == true) | |
412 | { | |
413 | // the file is completely downloaded, but was not moved | |
414 | if (Server->HaveContent == true) | |
415 | { | |
416 | // nuke the sent error page | |
417 | Server->RunDataToDevNull(); | |
418 | Server->HaveContent = false; | |
419 | } | |
420 | Server->StartPos = Server->TotalFileSize; | |
421 | Server->Result = 200; | |
422 | } | |
423 | else if (RemoveFile("server", Queue->DestFile)) | |
424 | { | |
425 | NextURI = Queue->Uri; | |
426 | return TRY_AGAIN_OR_REDIRECT; | |
427 | } | |
428 | } | |
429 | } | |
430 | ||
431 | /* We have a reply we don't handle. This should indicate a perm server | |
432 | failure */ | |
433 | if (Server->Result < 200 || Server->Result >= 300) | |
434 | { | |
435 | if (_error->PendingError() == false) | |
436 | { | |
437 | std::string err; | |
438 | strprintf(err, "HttpError%u", Server->Result); | |
439 | SetFailReason(err); | |
440 | _error->Error("%u %s", Server->Result, Server->Code); | |
441 | } | |
442 | if (Server->HaveContent == true) | |
443 | return ERROR_WITH_CONTENT_PAGE; | |
444 | return ERROR_UNRECOVERABLE; | |
445 | } | |
446 | ||
447 | // This is some sort of 2xx 'data follows' reply | |
448 | Res.LastModified = Server->Date; | |
449 | Res.Size = Server->TotalFileSize; | |
450 | return FILE_IS_OPEN; | |
451 | } | |
452 | /*}}}*/ | |
453 | // ServerMethod::SigTerm - Handle a fatal signal /*{{{*/ | |
454 | // --------------------------------------------------------------------- | |
455 | /* This closes and timestamps the open file. This is necessary to get | |
456 | resume behavoir on user abort */ | |
457 | void ServerMethod::SigTerm(int) | |
458 | { | |
459 | if (FailFd == -1) | |
460 | _exit(100); | |
461 | ||
462 | struct timeval times[2]; | |
463 | times[0].tv_sec = FailTime; | |
464 | times[1].tv_sec = FailTime; | |
465 | times[0].tv_usec = times[1].tv_usec = 0; | |
466 | utimes(FailFile.c_str(), times); | |
467 | close(FailFd); | |
468 | ||
469 | _exit(100); | |
470 | } | |
471 | /*}}}*/ | |
472 | // ServerMethod::Fetch - Fetch an item /*{{{*/ | |
473 | // --------------------------------------------------------------------- | |
474 | /* This adds an item to the pipeline. We keep the pipeline at a fixed | |
475 | depth. */ | |
476 | bool ServerMethod::Fetch(FetchItem *) | |
477 | { | |
478 | if (Server == nullptr || QueueBack == nullptr) | |
479 | return true; | |
480 | ||
481 | // If pipelining is disabled, we only queue 1 request | |
482 | auto const AllowedDepth = Server->Pipeline ? PipelineDepth : 0; | |
483 | // how deep is our pipeline currently? | |
484 | decltype(PipelineDepth) CurrentDepth = 0; | |
485 | for (FetchItem const *I = Queue; I != QueueBack; I = I->Next) | |
486 | ++CurrentDepth; | |
487 | if (CurrentDepth > AllowedDepth) | |
488 | return true; | |
489 | ||
490 | do { | |
491 | // Make sure we stick with the same server | |
492 | if (Server->Comp(QueueBack->Uri) == false) | |
493 | break; | |
494 | ||
495 | bool const UsableHashes = QueueBack->ExpectedHashes.usable(); | |
496 | // if we have no hashes, do at most one such request | |
497 | // as we can't fixup pipeling misbehaviors otherwise | |
498 | if (CurrentDepth != 0 && UsableHashes == false) | |
499 | break; | |
500 | ||
501 | if (UsableHashes && FileExists(QueueBack->DestFile)) | |
502 | { | |
503 | FileFd partial(QueueBack->DestFile, FileFd::ReadOnly); | |
504 | Hashes wehave(QueueBack->ExpectedHashes); | |
505 | if (QueueBack->ExpectedHashes.FileSize() == partial.FileSize()) | |
506 | { | |
507 | if (wehave.AddFD(partial) && | |
508 | wehave.GetHashStringList() == QueueBack->ExpectedHashes) | |
509 | { | |
510 | FetchResult Res; | |
511 | Res.Filename = QueueBack->DestFile; | |
512 | Res.ResumePoint = QueueBack->ExpectedHashes.FileSize(); | |
513 | URIStart(Res); | |
514 | // move item to the start of the queue as URIDone will | |
515 | // always dequeued the first item in the queue | |
516 | if (Queue != QueueBack) | |
517 | { | |
518 | FetchItem *Prev = Queue; | |
519 | for (; Prev->Next != QueueBack; Prev = Prev->Next) | |
520 | /* look for the previous queue item */; | |
521 | Prev->Next = QueueBack->Next; | |
522 | QueueBack->Next = Queue; | |
523 | Queue = QueueBack; | |
524 | QueueBack = Prev->Next; | |
525 | } | |
526 | Res.TakeHashes(wehave); | |
527 | URIDone(Res); | |
528 | continue; | |
529 | } | |
530 | else | |
531 | RemoveFile("Fetch-Partial", QueueBack->DestFile); | |
532 | } | |
533 | } | |
534 | auto const Tmp = QueueBack; | |
535 | QueueBack = QueueBack->Next; | |
536 | SendReq(Tmp); | |
537 | ++CurrentDepth; | |
538 | } while (CurrentDepth <= AllowedDepth && QueueBack != nullptr); | |
539 | ||
540 | return true; | |
541 | } | |
542 | /*}}}*/ | |
543 | // ServerMethod::Loop - Main loop /*{{{*/ | |
544 | int ServerMethod::Loop() | |
545 | { | |
546 | signal(SIGTERM,SigTerm); | |
547 | signal(SIGINT,SigTerm); | |
548 | ||
549 | Server = 0; | |
550 | ||
551 | int FailCounter = 0; | |
552 | while (1) | |
553 | { | |
554 | // We have no commands, wait for some to arrive | |
555 | if (Queue == 0) | |
556 | { | |
557 | if (WaitFd(STDIN_FILENO) == false) | |
558 | return 0; | |
559 | } | |
560 | ||
561 | /* Run messages, we can accept 0 (no message) if we didn't | |
562 | do a WaitFd above.. Otherwise the FD is closed. */ | |
563 | int Result = Run(true); | |
564 | if (Result != -1 && (Result != 0 || Queue == 0)) | |
565 | { | |
566 | if(FailReason.empty() == false || | |
567 | ConfigFindB("DependOnSTDIN", true) == true) | |
568 | return 100; | |
569 | else | |
570 | return 0; | |
571 | } | |
572 | ||
573 | if (Queue == 0) | |
574 | continue; | |
575 | ||
576 | // Connect to the server | |
577 | if (Server == 0 || Server->Comp(Queue->Uri) == false) | |
578 | { | |
579 | Server = CreateServerState(Queue->Uri); | |
580 | setPostfixForMethodNames(::URI(Queue->Uri).Host.c_str()); | |
581 | AllowRedirect = ConfigFindB("AllowRedirect", true); | |
582 | PipelineDepth = ConfigFindI("Pipeline-Depth", 10); | |
583 | Debug = DebugEnabled(); | |
584 | } | |
585 | ||
586 | /* If the server has explicitly said this is the last connection | |
587 | then we pre-emptively shut down the pipeline and tear down | |
588 | the connection. This will speed up HTTP/1.0 servers a tad | |
589 | since we don't have to wait for the close sequence to | |
590 | complete */ | |
591 | if (Server->Persistent == false) | |
592 | Server->Close(); | |
593 | ||
594 | // Reset the pipeline | |
595 | if (Server->IsOpen() == false) | |
596 | QueueBack = Queue; | |
597 | ||
598 | // Connnect to the host | |
599 | if (Server->Open() == false) | |
600 | { | |
601 | Fail(true); | |
602 | Server = nullptr; | |
603 | continue; | |
604 | } | |
605 | ||
606 | // Fill the pipeline. | |
607 | Fetch(0); | |
608 | ||
609 | // Fetch the next URL header data from the server. | |
610 | switch (Server->RunHeaders(File, Queue->Uri)) | |
611 | { | |
612 | case ServerState::RUN_HEADERS_OK: | |
613 | break; | |
614 | ||
615 | // The header data is bad | |
616 | case ServerState::RUN_HEADERS_PARSE_ERROR: | |
617 | { | |
618 | _error->Error(_("Bad header data")); | |
619 | Fail(true); | |
620 | Server->Close(); | |
621 | RotateDNS(); | |
622 | continue; | |
623 | } | |
624 | ||
625 | // The server closed a connection during the header get.. | |
626 | default: | |
627 | case ServerState::RUN_HEADERS_IO_ERROR: | |
628 | { | |
629 | FailCounter++; | |
630 | _error->Discard(); | |
631 | Server->Close(); | |
632 | Server->Pipeline = false; | |
633 | Server->PipelineAllowed = false; | |
634 | ||
635 | if (FailCounter >= 2) | |
636 | { | |
637 | Fail(_("Connection failed"),true); | |
638 | FailCounter = 0; | |
639 | } | |
640 | ||
641 | RotateDNS(); | |
642 | continue; | |
643 | } | |
644 | }; | |
645 | ||
646 | // Decide what to do. | |
647 | FetchResult Res; | |
648 | Res.Filename = Queue->DestFile; | |
649 | switch (DealWithHeaders(Res)) | |
650 | { | |
651 | // Ok, the file is Open | |
652 | case FILE_IS_OPEN: | |
653 | { | |
654 | URIStart(Res); | |
655 | ||
656 | // Run the data | |
657 | bool Result = true; | |
658 | ||
659 | // ensure we don't fetch too much | |
660 | // we could do "Server->MaximumSize = Queue->MaximumSize" here | |
661 | // but that would break the clever pipeline messup detection | |
662 | // so instead we use the size of the biggest item in the queue | |
663 | Server->MaximumSize = FindMaximumObjectSizeInQueue(); | |
664 | ||
665 | if (Server->HaveContent) | |
666 | Result = Server->RunData(File); | |
667 | ||
668 | /* If the server is sending back sizeless responses then fill in | |
669 | the size now */ | |
670 | if (Res.Size == 0) | |
671 | Res.Size = File->Size(); | |
672 | ||
673 | // Close the file, destroy the FD object and timestamp it | |
674 | FailFd = -1; | |
675 | delete File; | |
676 | File = 0; | |
677 | ||
678 | // Timestamp | |
679 | struct timeval times[2]; | |
680 | times[0].tv_sec = times[1].tv_sec = Server->Date; | |
681 | times[0].tv_usec = times[1].tv_usec = 0; | |
682 | utimes(Queue->DestFile.c_str(), times); | |
683 | ||
684 | // Send status to APT | |
685 | if (Result == true) | |
686 | { | |
687 | Hashes * const resultHashes = Server->GetHashes(); | |
688 | HashStringList const hashList = resultHashes->GetHashStringList(); | |
689 | if (PipelineDepth != 0 && Queue->ExpectedHashes.usable() == true && Queue->ExpectedHashes != hashList) | |
690 | { | |
691 | // we did not get the expected hash… mhhh: | |
692 | // could it be that server/proxy messed up pipelining? | |
693 | FetchItem * BeforeI = Queue; | |
694 | for (FetchItem *I = Queue->Next; I != 0 && I != QueueBack; I = I->Next) | |
695 | { | |
696 | if (I->ExpectedHashes.usable() == true && I->ExpectedHashes == hashList) | |
697 | { | |
698 | // yes, he did! Disable pipelining and rewrite queue | |
699 | if (Server->Pipeline == true) | |
700 | { | |
701 | Warning(_("Automatically disabled %s due to incorrect response from server/proxy. (man 5 apt.conf)"), "Acquire::http::Pipeline-Depth"); | |
702 | Server->Pipeline = false; | |
703 | Server->PipelineAllowed = false; | |
704 | // we keep the PipelineDepth value so that the rest of the queue can be fixed up as well | |
705 | } | |
706 | Rename(Res.Filename, I->DestFile); | |
707 | Res.Filename = I->DestFile; | |
708 | BeforeI->Next = I->Next; | |
709 | I->Next = Queue; | |
710 | Queue = I; | |
711 | break; | |
712 | } | |
713 | BeforeI = I; | |
714 | } | |
715 | } | |
716 | Res.TakeHashes(*resultHashes); | |
717 | URIDone(Res); | |
718 | } | |
719 | else | |
720 | { | |
721 | if (Server->IsOpen() == false) | |
722 | { | |
723 | FailCounter++; | |
724 | _error->Discard(); | |
725 | Server->Close(); | |
726 | ||
727 | if (FailCounter >= 2) | |
728 | { | |
729 | Fail(_("Connection failed"),true); | |
730 | FailCounter = 0; | |
731 | } | |
732 | ||
733 | QueueBack = Queue; | |
734 | } | |
735 | else | |
736 | { | |
737 | Server->Close(); | |
738 | Fail(true); | |
739 | } | |
740 | } | |
741 | break; | |
742 | } | |
743 | ||
744 | // IMS hit | |
745 | case IMS_HIT: | |
746 | { | |
747 | URIDone(Res); | |
748 | break; | |
749 | } | |
750 | ||
751 | // Hard server error, not found or something | |
752 | case ERROR_UNRECOVERABLE: | |
753 | { | |
754 | Fail(); | |
755 | break; | |
756 | } | |
757 | ||
758 | // Hard internal error, kill the connection and fail | |
759 | case ERROR_NOT_FROM_SERVER: | |
760 | { | |
761 | delete File; | |
762 | File = 0; | |
763 | ||
764 | Fail(); | |
765 | RotateDNS(); | |
766 | Server->Close(); | |
767 | break; | |
768 | } | |
769 | ||
770 | // We need to flush the data, the header is like a 404 w/ error text | |
771 | case ERROR_WITH_CONTENT_PAGE: | |
772 | { | |
773 | Fail(); | |
774 | Server->RunDataToDevNull(); | |
775 | break; | |
776 | } | |
777 | ||
778 | // Try again with a new URL | |
779 | case TRY_AGAIN_OR_REDIRECT: | |
780 | { | |
781 | // Clear rest of response if there is content | |
782 | if (Server->HaveContent) | |
783 | Server->RunDataToDevNull(); | |
784 | Redirect(NextURI); | |
785 | break; | |
786 | } | |
787 | ||
788 | default: | |
789 | Fail(_("Internal error")); | |
790 | break; | |
791 | } | |
792 | ||
793 | FailCounter = 0; | |
794 | } | |
795 | ||
796 | return 0; | |
797 | } | |
798 | /*}}}*/ | |
799 | unsigned long long ServerMethod::FindMaximumObjectSizeInQueue() const /*{{{*/ | |
800 | { | |
801 | unsigned long long MaxSizeInQueue = 0; | |
802 | for (FetchItem *I = Queue; I != 0 && I != QueueBack; I = I->Next) | |
803 | MaxSizeInQueue = std::max(MaxSizeInQueue, I->MaximumSize); | |
804 | return MaxSizeInQueue; | |
805 | } | |
806 | /*}}}*/ | |
807 | ServerMethod::ServerMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/ | |
808 | aptMethod(std::move(Binary), Ver, Flags), Server(nullptr), File(NULL), PipelineDepth(10), | |
809 | AllowRedirect(false), Debug(false) | |
810 | { | |
811 | } | |
812 | /*}}}*/ | |
813 | bool ServerMethod::Configuration(std::string Message) /*{{{*/ | |
814 | { | |
815 | if (aptMethod::Configuration(Message) == false) | |
816 | return false; | |
817 | ||
818 | _config->CndSet("Acquire::tor::Proxy", | |
819 | "socks5h://apt-transport-tor@localhost:9050"); | |
820 | return true; | |
821 | } | |
822 | /*}}}*/ | |
823 | bool ServerMethod::AddProxyAuth(URI &Proxy, URI const &Server) const /*{{{*/ | |
824 | { | |
825 | if (std::find(methodNames.begin(), methodNames.end(), "tor") != methodNames.end() && | |
826 | Proxy.User == "apt-transport-tor" && Proxy.Password.empty()) | |
827 | { | |
828 | std::string pass = Server.Host; | |
829 | pass.erase(std::remove_if(pass.begin(), pass.end(), [](char const c) { return std::isalnum(c) == 0; }), pass.end()); | |
830 | if (pass.length() > 255) | |
831 | Proxy.Password = pass.substr(0, 255); | |
832 | else | |
833 | Proxy.Password = std::move(pass); | |
834 | } | |
835 | // FIXME: should we support auth.conf for proxies? | |
836 | return true; | |
837 | } | |
838 | /*}}}*/ |