]>
Commit | Line | Data |
---|---|---|
1 | // -*- mode: cpp; mode: fold -*- | |
2 | // Description /*{{{*/ | |
3 | /* ###################################################################### | |
4 | ||
5 | HTTP and HTTPS share a lot of common code and these classes are | |
6 | exactly the dumping ground for this common code | |
7 | ||
8 | ##################################################################### */ | |
9 | /*}}}*/ | |
10 | // Include Files /*{{{*/ | |
11 | #include <config.h> | |
12 | ||
13 | #include <apt-pkg/acquire-method.h> | |
14 | #include <apt-pkg/configuration.h> | |
15 | #include <apt-pkg/error.h> | |
16 | #include <apt-pkg/fileutl.h> | |
17 | #include <apt-pkg/strutl.h> | |
18 | ||
19 | #include <ctype.h> | |
20 | #include <signal.h> | |
21 | #include <stdio.h> | |
22 | #include <stdlib.h> | |
23 | #include <sys/stat.h> | |
24 | #include <sys/time.h> | |
25 | #include <time.h> | |
26 | #include <unistd.h> | |
27 | #include <iostream> | |
28 | #include <limits> | |
29 | #include <map> | |
30 | #include <string> | |
31 | #include <vector> | |
32 | ||
33 | #include "server.h" | |
34 | ||
35 | #include <apti18n.h> | |
36 | /*}}}*/ | |
37 | using namespace std; | |
38 | ||
39 | string ServerMethod::FailFile; | |
40 | int ServerMethod::FailFd = -1; | |
41 | time_t ServerMethod::FailTime = 0; | |
42 | ||
43 | // ServerState::RunHeaders - Get the headers before the data /*{{{*/ | |
44 | // --------------------------------------------------------------------- | |
45 | /* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header | |
46 | parse error occurred */ | |
47 | ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File, | |
48 | const std::string &Uri) | |
49 | { | |
50 | State = Header; | |
51 | ||
52 | Owner->Status(_("Waiting for headers")); | |
53 | ||
54 | Major = 0; | |
55 | Minor = 0; | |
56 | Result = 0; | |
57 | Size = 0; | |
58 | JunkSize = 0; | |
59 | StartPos = 0; | |
60 | Encoding = Closes; | |
61 | HaveContent = false; | |
62 | time(&Date); | |
63 | ||
64 | do | |
65 | { | |
66 | string Data; | |
67 | if (ReadHeaderLines(Data) == false) | |
68 | continue; | |
69 | ||
70 | if (Owner->Debug == true) | |
71 | clog << "Answer for: " << Uri << endl << Data; | |
72 | ||
73 | for (string::const_iterator I = Data.begin(); I < Data.end(); ++I) | |
74 | { | |
75 | string::const_iterator J = I; | |
76 | for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J); | |
77 | if (HeaderLine(string(I,J)) == false) | |
78 | return RUN_HEADERS_PARSE_ERROR; | |
79 | I = J; | |
80 | } | |
81 | ||
82 | // 100 Continue is a Nop... | |
83 | if (Result == 100) | |
84 | continue; | |
85 | ||
86 | // Tidy up the connection persistence state. | |
87 | if (Encoding == Closes && HaveContent == true) | |
88 | Persistent = false; | |
89 | ||
90 | return RUN_HEADERS_OK; | |
91 | } | |
92 | while (LoadNextResponse(false, File) == true); | |
93 | ||
94 | return RUN_HEADERS_IO_ERROR; | |
95 | } | |
96 | /*}}}*/ | |
97 | // ServerState::HeaderLine - Process a header line /*{{{*/ | |
98 | // --------------------------------------------------------------------- | |
99 | /* */ | |
100 | bool ServerState::HeaderLine(string Line) | |
101 | { | |
102 | if (Line.empty() == true) | |
103 | return true; | |
104 | ||
105 | string::size_type Pos = Line.find(' '); | |
106 | if (Pos == string::npos || Pos+1 > Line.length()) | |
107 | { | |
108 | // Blah, some servers use "connection:closes", evil. | |
109 | Pos = Line.find(':'); | |
110 | if (Pos == string::npos || Pos + 2 > Line.length()) | |
111 | return _error->Error(_("Bad header line")); | |
112 | Pos++; | |
113 | } | |
114 | ||
115 | // Parse off any trailing spaces between the : and the next word. | |
116 | string::size_type Pos2 = Pos; | |
117 | while (Pos2 < Line.length() && isspace(Line[Pos2]) != 0) | |
118 | Pos2++; | |
119 | ||
120 | string Tag = string(Line,0,Pos); | |
121 | string Val = string(Line,Pos2); | |
122 | ||
123 | if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0) | |
124 | { | |
125 | // Evil servers return no version | |
126 | if (Line[4] == '/') | |
127 | { | |
128 | int const elements = sscanf(Line.c_str(),"HTTP/%3u.%3u %3u%359[^\n]",&Major,&Minor,&Result,Code); | |
129 | if (elements == 3) | |
130 | { | |
131 | Code[0] = '\0'; | |
132 | if (Owner != NULL && Owner->Debug == true) | |
133 | clog << "HTTP server doesn't give Reason-Phrase for " << Result << std::endl; | |
134 | } | |
135 | else if (elements != 4) | |
136 | return _error->Error(_("The HTTP server sent an invalid reply header")); | |
137 | } | |
138 | else | |
139 | { | |
140 | Major = 0; | |
141 | Minor = 9; | |
142 | if (sscanf(Line.c_str(),"HTTP %3u%359[^\n]",&Result,Code) != 2) | |
143 | return _error->Error(_("The HTTP server sent an invalid reply header")); | |
144 | } | |
145 | ||
146 | /* Check the HTTP response header to get the default persistence | |
147 | state. */ | |
148 | if (Major < 1) | |
149 | Persistent = false; | |
150 | else | |
151 | { | |
152 | if (Major == 1 && Minor == 0) | |
153 | Persistent = false; | |
154 | else | |
155 | Persistent = true; | |
156 | } | |
157 | ||
158 | return true; | |
159 | } | |
160 | ||
161 | if (stringcasecmp(Tag,"Content-Length:") == 0) | |
162 | { | |
163 | if (Encoding == Closes) | |
164 | Encoding = Stream; | |
165 | HaveContent = true; | |
166 | ||
167 | unsigned long long * SizePtr = &Size; | |
168 | if (Result == 416) | |
169 | SizePtr = &JunkSize; | |
170 | ||
171 | *SizePtr = strtoull(Val.c_str(), NULL, 10); | |
172 | if (*SizePtr >= std::numeric_limits<unsigned long long>::max()) | |
173 | return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header")); | |
174 | else if (*SizePtr == 0) | |
175 | HaveContent = false; | |
176 | return true; | |
177 | } | |
178 | ||
179 | if (stringcasecmp(Tag,"Content-Type:") == 0) | |
180 | { | |
181 | HaveContent = true; | |
182 | return true; | |
183 | } | |
184 | ||
185 | if (stringcasecmp(Tag,"Content-Range:") == 0) | |
186 | { | |
187 | HaveContent = true; | |
188 | ||
189 | // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416 | |
190 | if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&Size) == 1) | |
191 | ; // we got the expected filesize which is all we wanted | |
192 | else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&Size) != 2) | |
193 | return _error->Error(_("The HTTP server sent an invalid Content-Range header")); | |
194 | if ((unsigned long long)StartPos > Size) | |
195 | return _error->Error(_("This HTTP server has broken range support")); | |
196 | return true; | |
197 | } | |
198 | ||
199 | if (stringcasecmp(Tag,"Transfer-Encoding:") == 0) | |
200 | { | |
201 | HaveContent = true; | |
202 | if (stringcasecmp(Val,"chunked") == 0) | |
203 | Encoding = Chunked; | |
204 | return true; | |
205 | } | |
206 | ||
207 | if (stringcasecmp(Tag,"Connection:") == 0) | |
208 | { | |
209 | if (stringcasecmp(Val,"close") == 0) | |
210 | Persistent = false; | |
211 | if (stringcasecmp(Val,"keep-alive") == 0) | |
212 | Persistent = true; | |
213 | return true; | |
214 | } | |
215 | ||
216 | if (stringcasecmp(Tag,"Last-Modified:") == 0) | |
217 | { | |
218 | if (RFC1123StrToTime(Val.c_str(), Date) == false) | |
219 | return _error->Error(_("Unknown date format")); | |
220 | return true; | |
221 | } | |
222 | ||
223 | if (stringcasecmp(Tag,"Location:") == 0) | |
224 | { | |
225 | Location = Val; | |
226 | return true; | |
227 | } | |
228 | ||
229 | return true; | |
230 | } | |
231 | /*}}}*/ | |
232 | // ServerState::ServerState - Constructor /*{{{*/ | |
233 | ServerState::ServerState(URI Srv, ServerMethod *Owner) : ServerName(Srv), TimeOut(120), Owner(Owner) | |
234 | { | |
235 | Reset(); | |
236 | } | |
237 | /*}}}*/ | |
238 | bool ServerState::AddPartialFileToHashes(FileFd &File) /*{{{*/ | |
239 | { | |
240 | File.Truncate(StartPos); | |
241 | return GetHashes()->AddFD(File, StartPos); | |
242 | } | |
243 | /*}}}*/ | |
244 | ||
245 | bool ServerMethod::Configuration(string Message) /*{{{*/ | |
246 | { | |
247 | if (pkgAcqMethod::Configuration(Message) == false) | |
248 | return false; | |
249 | ||
250 | DropPrivsOrDie(); | |
251 | ||
252 | return true; | |
253 | } | |
254 | /*}}}*/ | |
255 | ||
256 | // ServerMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/ | |
257 | // --------------------------------------------------------------------- | |
258 | /* We look at the header data we got back from the server and decide what | |
259 | to do. Returns DealWithHeadersResult (see http.h for details). | |
260 | */ | |
261 | ServerMethod::DealWithHeadersResult | |
262 | ServerMethod::DealWithHeaders(FetchResult &Res) | |
263 | { | |
264 | // Not Modified | |
265 | if (Server->Result == 304) | |
266 | { | |
267 | unlink(Queue->DestFile.c_str()); | |
268 | Res.IMSHit = true; | |
269 | Res.LastModified = Queue->LastModified; | |
270 | return IMS_HIT; | |
271 | } | |
272 | ||
273 | /* Redirect | |
274 | * | |
275 | * Note that it is only OK for us to treat all redirection the same | |
276 | * because we *always* use GET, not other HTTP methods. There are | |
277 | * three redirection codes for which it is not appropriate that we | |
278 | * redirect. Pass on those codes so the error handling kicks in. | |
279 | */ | |
280 | if (AllowRedirect | |
281 | && (Server->Result > 300 && Server->Result < 400) | |
282 | && (Server->Result != 300 // Multiple Choices | |
283 | && Server->Result != 304 // Not Modified | |
284 | && Server->Result != 306)) // (Not part of HTTP/1.1, reserved) | |
285 | { | |
286 | if (Server->Location.empty() == true); | |
287 | else if (Server->Location[0] == '/' && Queue->Uri.empty() == false) | |
288 | { | |
289 | URI Uri = Queue->Uri; | |
290 | if (Uri.Host.empty() == false) | |
291 | NextURI = URI::SiteOnly(Uri); | |
292 | else | |
293 | NextURI.clear(); | |
294 | NextURI.append(DeQuoteString(Server->Location)); | |
295 | return TRY_AGAIN_OR_REDIRECT; | |
296 | } | |
297 | else | |
298 | { | |
299 | NextURI = DeQuoteString(Server->Location); | |
300 | URI tmpURI = NextURI; | |
301 | URI Uri = Queue->Uri; | |
302 | // same protocol redirects are okay | |
303 | if (tmpURI.Access == Uri.Access) | |
304 | return TRY_AGAIN_OR_REDIRECT; | |
305 | // as well as http to https | |
306 | else if (Uri.Access == "http" && tmpURI.Access == "https") | |
307 | return TRY_AGAIN_OR_REDIRECT; | |
308 | } | |
309 | /* else pass through for error message */ | |
310 | } | |
311 | // retry after an invalid range response without partial data | |
312 | else if (Server->Result == 416) | |
313 | { | |
314 | struct stat SBuf; | |
315 | if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) | |
316 | { | |
317 | bool partialHit = false; | |
318 | if (Queue->ExpectedHashes.usable() == true) | |
319 | { | |
320 | Hashes resultHashes(Queue->ExpectedHashes); | |
321 | FileFd file(Queue->DestFile, FileFd::ReadOnly); | |
322 | Server->Size = file.FileSize(); | |
323 | Server->Date = file.ModificationTime(); | |
324 | resultHashes.AddFD(file); | |
325 | HashStringList const hashList = resultHashes.GetHashStringList(); | |
326 | partialHit = (Queue->ExpectedHashes == hashList); | |
327 | } | |
328 | else if ((unsigned long long)SBuf.st_size == Server->Size) | |
329 | partialHit = true; | |
330 | if (partialHit == true) | |
331 | { | |
332 | // the file is completely downloaded, but was not moved | |
333 | if (Server->HaveContent == true) | |
334 | { | |
335 | // Send to error page to dev/null | |
336 | FileFd DevNull("/dev/null",FileFd::WriteExists); | |
337 | Server->RunData(&DevNull); | |
338 | } | |
339 | Server->HaveContent = false; | |
340 | Server->StartPos = Server->Size; | |
341 | Server->Result = 200; | |
342 | } | |
343 | else if (unlink(Queue->DestFile.c_str()) == 0) | |
344 | { | |
345 | NextURI = Queue->Uri; | |
346 | return TRY_AGAIN_OR_REDIRECT; | |
347 | } | |
348 | } | |
349 | } | |
350 | ||
351 | /* We have a reply we dont handle. This should indicate a perm server | |
352 | failure */ | |
353 | if (Server->Result < 200 || Server->Result >= 300) | |
354 | { | |
355 | std::string err; | |
356 | strprintf(err, "HttpError%u", Server->Result); | |
357 | SetFailReason(err); | |
358 | _error->Error("%u %s", Server->Result, Server->Code); | |
359 | if (Server->HaveContent == true) | |
360 | return ERROR_WITH_CONTENT_PAGE; | |
361 | return ERROR_UNRECOVERABLE; | |
362 | } | |
363 | ||
364 | // This is some sort of 2xx 'data follows' reply | |
365 | Res.LastModified = Server->Date; | |
366 | Res.Size = Server->Size; | |
367 | ||
368 | // Open the file | |
369 | delete File; | |
370 | File = new FileFd(Queue->DestFile,FileFd::WriteAny); | |
371 | if (_error->PendingError() == true) | |
372 | return ERROR_NOT_FROM_SERVER; | |
373 | ||
374 | FailFile = Queue->DestFile; | |
375 | FailFile.c_str(); // Make sure we dont do a malloc in the signal handler | |
376 | FailFd = File->Fd(); | |
377 | FailTime = Server->Date; | |
378 | ||
379 | if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false) | |
380 | { | |
381 | _error->Errno("read",_("Problem hashing file")); | |
382 | return ERROR_NOT_FROM_SERVER; | |
383 | } | |
384 | if (Server->StartPos > 0) | |
385 | Res.ResumePoint = Server->StartPos; | |
386 | ||
387 | SetNonBlock(File->Fd(),true); | |
388 | return FILE_IS_OPEN; | |
389 | } | |
390 | /*}}}*/ | |
391 | // ServerMethod::SigTerm - Handle a fatal signal /*{{{*/ | |
392 | // --------------------------------------------------------------------- | |
393 | /* This closes and timestamps the open file. This is necessary to get | |
394 | resume behavoir on user abort */ | |
395 | void ServerMethod::SigTerm(int) | |
396 | { | |
397 | if (FailFd == -1) | |
398 | _exit(100); | |
399 | ||
400 | struct timeval times[2]; | |
401 | times[0].tv_sec = FailTime; | |
402 | times[1].tv_sec = FailTime; | |
403 | times[0].tv_usec = times[1].tv_usec = 0; | |
404 | utimes(FailFile.c_str(), times); | |
405 | close(FailFd); | |
406 | ||
407 | _exit(100); | |
408 | } | |
409 | /*}}}*/ | |
410 | // ServerMethod::Fetch - Fetch an item /*{{{*/ | |
411 | // --------------------------------------------------------------------- | |
412 | /* This adds an item to the pipeline. We keep the pipeline at a fixed | |
413 | depth. */ | |
414 | bool ServerMethod::Fetch(FetchItem *) | |
415 | { | |
416 | if (Server == 0) | |
417 | return true; | |
418 | ||
419 | // Queue the requests | |
420 | int Depth = -1; | |
421 | for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth; | |
422 | I = I->Next, Depth++) | |
423 | { | |
424 | if (Depth >= 0) | |
425 | { | |
426 | // If pipelining is disabled, we only queue 1 request | |
427 | if (Server->Pipeline == false) | |
428 | break; | |
429 | // if we have no hashes, do at most one such request | |
430 | // as we can't fixup pipeling misbehaviors otherwise | |
431 | else if (I->ExpectedHashes.usable() == false) | |
432 | break; | |
433 | } | |
434 | ||
435 | // Make sure we stick with the same server | |
436 | if (Server->Comp(I->Uri) == false) | |
437 | break; | |
438 | if (QueueBack == I) | |
439 | { | |
440 | QueueBack = I->Next; | |
441 | SendReq(I); | |
442 | continue; | |
443 | } | |
444 | } | |
445 | ||
446 | return true; | |
447 | } | |
448 | /*}}}*/ | |
449 | // ServerMethod::Loop - Main loop /*{{{*/ | |
450 | int ServerMethod::Loop() | |
451 | { | |
452 | typedef vector<string> StringVector; | |
453 | typedef vector<string>::iterator StringVectorIterator; | |
454 | map<string, StringVector> Redirected; | |
455 | ||
456 | signal(SIGTERM,SigTerm); | |
457 | signal(SIGINT,SigTerm); | |
458 | ||
459 | Server = 0; | |
460 | ||
461 | int FailCounter = 0; | |
462 | while (1) | |
463 | { | |
464 | // We have no commands, wait for some to arrive | |
465 | if (Queue == 0) | |
466 | { | |
467 | if (WaitFd(STDIN_FILENO) == false) | |
468 | return 0; | |
469 | } | |
470 | ||
471 | /* Run messages, we can accept 0 (no message) if we didn't | |
472 | do a WaitFd above.. Otherwise the FD is closed. */ | |
473 | int Result = Run(true); | |
474 | if (Result != -1 && (Result != 0 || Queue == 0)) | |
475 | { | |
476 | if(FailReason.empty() == false || | |
477 | _config->FindB("Acquire::http::DependOnSTDIN", true) == true) | |
478 | return 100; | |
479 | else | |
480 | return 0; | |
481 | } | |
482 | ||
483 | if (Queue == 0) | |
484 | continue; | |
485 | ||
486 | // Connect to the server | |
487 | if (Server == 0 || Server->Comp(Queue->Uri) == false) | |
488 | { | |
489 | delete Server; | |
490 | Server = CreateServerState(Queue->Uri); | |
491 | } | |
492 | /* If the server has explicitly said this is the last connection | |
493 | then we pre-emptively shut down the pipeline and tear down | |
494 | the connection. This will speed up HTTP/1.0 servers a tad | |
495 | since we don't have to wait for the close sequence to | |
496 | complete */ | |
497 | if (Server->Persistent == false) | |
498 | Server->Close(); | |
499 | ||
500 | // Reset the pipeline | |
501 | if (Server->IsOpen() == false) | |
502 | QueueBack = Queue; | |
503 | ||
504 | // Connnect to the host | |
505 | if (Server->Open() == false) | |
506 | { | |
507 | Fail(true); | |
508 | delete Server; | |
509 | Server = 0; | |
510 | continue; | |
511 | } | |
512 | ||
513 | // Fill the pipeline. | |
514 | Fetch(0); | |
515 | ||
516 | // Fetch the next URL header data from the server. | |
517 | switch (Server->RunHeaders(File, Queue->Uri)) | |
518 | { | |
519 | case ServerState::RUN_HEADERS_OK: | |
520 | break; | |
521 | ||
522 | // The header data is bad | |
523 | case ServerState::RUN_HEADERS_PARSE_ERROR: | |
524 | { | |
525 | _error->Error(_("Bad header data")); | |
526 | Fail(true); | |
527 | RotateDNS(); | |
528 | continue; | |
529 | } | |
530 | ||
531 | // The server closed a connection during the header get.. | |
532 | default: | |
533 | case ServerState::RUN_HEADERS_IO_ERROR: | |
534 | { | |
535 | FailCounter++; | |
536 | _error->Discard(); | |
537 | Server->Close(); | |
538 | Server->Pipeline = false; | |
539 | ||
540 | if (FailCounter >= 2) | |
541 | { | |
542 | Fail(_("Connection failed"),true); | |
543 | FailCounter = 0; | |
544 | } | |
545 | ||
546 | RotateDNS(); | |
547 | continue; | |
548 | } | |
549 | }; | |
550 | ||
551 | // Decide what to do. | |
552 | FetchResult Res; | |
553 | Res.Filename = Queue->DestFile; | |
554 | switch (DealWithHeaders(Res)) | |
555 | { | |
556 | // Ok, the file is Open | |
557 | case FILE_IS_OPEN: | |
558 | { | |
559 | URIStart(Res); | |
560 | ||
561 | // Run the data | |
562 | bool Result = true; | |
563 | ||
564 | // ensure we don't fetch too much | |
565 | // we could do "Server->MaximumSize = Queue->MaximumSize" here | |
566 | // but that would break the clever pipeline messup detection | |
567 | // so instead we use the size of the biggest item in the queue | |
568 | Server->MaximumSize = FindMaximumObjectSizeInQueue(); | |
569 | ||
570 | if (Server->HaveContent) | |
571 | Result = Server->RunData(File); | |
572 | ||
573 | /* If the server is sending back sizeless responses then fill in | |
574 | the size now */ | |
575 | if (Res.Size == 0) | |
576 | Res.Size = File->Size(); | |
577 | ||
578 | // Close the file, destroy the FD object and timestamp it | |
579 | FailFd = -1; | |
580 | delete File; | |
581 | File = 0; | |
582 | ||
583 | // Timestamp | |
584 | struct timeval times[2]; | |
585 | times[0].tv_sec = times[1].tv_sec = Server->Date; | |
586 | times[0].tv_usec = times[1].tv_usec = 0; | |
587 | utimes(Queue->DestFile.c_str(), times); | |
588 | ||
589 | // Send status to APT | |
590 | if (Result == true) | |
591 | { | |
592 | Hashes * const resultHashes = Server->GetHashes(); | |
593 | HashStringList const hashList = resultHashes->GetHashStringList(); | |
594 | if (PipelineDepth != 0 && Queue->ExpectedHashes.usable() == true && Queue->ExpectedHashes != hashList) | |
595 | { | |
596 | // we did not get the expected hash… mhhh: | |
597 | // could it be that server/proxy messed up pipelining? | |
598 | FetchItem * BeforeI = Queue; | |
599 | for (FetchItem *I = Queue->Next; I != 0 && I != QueueBack; I = I->Next) | |
600 | { | |
601 | if (I->ExpectedHashes.usable() == true && I->ExpectedHashes == hashList) | |
602 | { | |
603 | // yes, he did! Disable pipelining and rewrite queue | |
604 | if (Server->Pipeline == true) | |
605 | { | |
606 | // FIXME: fake a warning message as we have no proper way of communicating here | |
607 | std::string out; | |
608 | strprintf(out, _("Automatically disabled %s due to incorrect response from server/proxy. (man 5 apt.conf)"), "Acquire::http::PipelineDepth"); | |
609 | std::cerr << "W: " << out << std::endl; | |
610 | Server->Pipeline = false; | |
611 | // we keep the PipelineDepth value so that the rest of the queue can be fixed up as well | |
612 | } | |
613 | Rename(Res.Filename, I->DestFile); | |
614 | Res.Filename = I->DestFile; | |
615 | BeforeI->Next = I->Next; | |
616 | I->Next = Queue; | |
617 | Queue = I; | |
618 | break; | |
619 | } | |
620 | BeforeI = I; | |
621 | } | |
622 | } | |
623 | Res.TakeHashes(*resultHashes); | |
624 | URIDone(Res); | |
625 | } | |
626 | else | |
627 | { | |
628 | if (Server->IsOpen() == false) | |
629 | { | |
630 | FailCounter++; | |
631 | _error->Discard(); | |
632 | Server->Close(); | |
633 | ||
634 | if (FailCounter >= 2) | |
635 | { | |
636 | Fail(_("Connection failed"),true); | |
637 | FailCounter = 0; | |
638 | } | |
639 | ||
640 | QueueBack = Queue; | |
641 | } | |
642 | else | |
643 | { | |
644 | Server->Close(); | |
645 | Fail(true); | |
646 | } | |
647 | } | |
648 | break; | |
649 | } | |
650 | ||
651 | // IMS hit | |
652 | case IMS_HIT: | |
653 | { | |
654 | URIDone(Res); | |
655 | break; | |
656 | } | |
657 | ||
658 | // Hard server error, not found or something | |
659 | case ERROR_UNRECOVERABLE: | |
660 | { | |
661 | Fail(); | |
662 | break; | |
663 | } | |
664 | ||
665 | // Hard internal error, kill the connection and fail | |
666 | case ERROR_NOT_FROM_SERVER: | |
667 | { | |
668 | delete File; | |
669 | File = 0; | |
670 | ||
671 | Fail(); | |
672 | RotateDNS(); | |
673 | Server->Close(); | |
674 | break; | |
675 | } | |
676 | ||
677 | // We need to flush the data, the header is like a 404 w/ error text | |
678 | case ERROR_WITH_CONTENT_PAGE: | |
679 | { | |
680 | Fail(); | |
681 | ||
682 | // Send to content to dev/null | |
683 | File = new FileFd("/dev/null",FileFd::WriteExists); | |
684 | Server->RunData(File); | |
685 | delete File; | |
686 | File = 0; | |
687 | break; | |
688 | } | |
689 | ||
690 | // Try again with a new URL | |
691 | case TRY_AGAIN_OR_REDIRECT: | |
692 | { | |
693 | // Clear rest of response if there is content | |
694 | if (Server->HaveContent) | |
695 | { | |
696 | File = new FileFd("/dev/null",FileFd::WriteExists); | |
697 | Server->RunData(File); | |
698 | delete File; | |
699 | File = 0; | |
700 | } | |
701 | ||
702 | /* Detect redirect loops. No more redirects are allowed | |
703 | after the same URI is seen twice in a queue item. */ | |
704 | StringVector &R = Redirected[Queue->DestFile]; | |
705 | bool StopRedirects = false; | |
706 | if (R.empty() == true) | |
707 | R.push_back(Queue->Uri); | |
708 | else if (R[0] == "STOP" || R.size() > 10) | |
709 | StopRedirects = true; | |
710 | else | |
711 | { | |
712 | for (StringVectorIterator I = R.begin(); I != R.end(); ++I) | |
713 | if (Queue->Uri == *I) | |
714 | { | |
715 | R[0] = "STOP"; | |
716 | break; | |
717 | } | |
718 | ||
719 | R.push_back(Queue->Uri); | |
720 | } | |
721 | ||
722 | if (StopRedirects == false) | |
723 | Redirect(NextURI); | |
724 | else | |
725 | Fail(); | |
726 | ||
727 | break; | |
728 | } | |
729 | ||
730 | default: | |
731 | Fail(_("Internal error")); | |
732 | break; | |
733 | } | |
734 | ||
735 | FailCounter = 0; | |
736 | } | |
737 | ||
738 | return 0; | |
739 | } | |
740 | /*}}}*/ | |
741 | /*{{{*/ | |
742 | unsigned long long | |
743 | ServerMethod::FindMaximumObjectSizeInQueue() const | |
744 | { | |
745 | unsigned long long MaxSizeInQueue = 0; | |
746 | for (FetchItem *I = Queue; I != 0 && I != QueueBack; I = I->Next) | |
747 | MaxSizeInQueue = std::max(MaxSizeInQueue, I->MaximumSize); | |
748 | return MaxSizeInQueue; | |
749 | } | |
750 | /*}}}*/ |