]> git.saurik.com Git - apt.git/blob - methods/server.cc
gpgv: cleanup statusfd parsing a bit
[apt.git] / methods / server.cc
1 // -*- mode: cpp; mode: fold -*-
2 // Description /*{{{*/
3 /* ######################################################################
4
5 HTTP and HTTPS share a lot of common code and these classes are
6 exactly the dumping ground for this common code
7
8 ##################################################################### */
9 /*}}}*/
10 // Include Files /*{{{*/
11 #include <config.h>
12
13 #include <apt-pkg/acquire-method.h>
14 #include <apt-pkg/configuration.h>
15 #include <apt-pkg/error.h>
16 #include <apt-pkg/fileutl.h>
17 #include <apt-pkg/strutl.h>
18
19 #include <ctype.h>
20 #include <signal.h>
21 #include <stdio.h>
22 #include <stdlib.h>
23 #include <sys/stat.h>
24 #include <sys/time.h>
25 #include <time.h>
26 #include <unistd.h>
27 #include <iostream>
28 #include <limits>
29 #include <map>
30 #include <string>
31 #include <vector>
32
33 #include "server.h"
34
35 #include <apti18n.h>
36 /*}}}*/
37 using namespace std;
38
39 string ServerMethod::FailFile;
40 int ServerMethod::FailFd = -1;
41 time_t ServerMethod::FailTime = 0;
42
43 // ServerState::RunHeaders - Get the headers before the data /*{{{*/
44 // ---------------------------------------------------------------------
45 /* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
46 parse error occurred */
47 ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File,
48 const std::string &Uri)
49 {
50 State = Header;
51
52 Owner->Status(_("Waiting for headers"));
53
54 Major = 0;
55 Minor = 0;
56 Result = 0;
57 TotalFileSize = 0;
58 JunkSize = 0;
59 StartPos = 0;
60 Encoding = Closes;
61 HaveContent = false;
62 time(&Date);
63
64 do
65 {
66 string Data;
67 if (ReadHeaderLines(Data) == false)
68 continue;
69
70 if (Owner->Debug == true)
71 clog << "Answer for: " << Uri << endl << Data;
72
73 for (string::const_iterator I = Data.begin(); I < Data.end(); ++I)
74 {
75 string::const_iterator J = I;
76 for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J);
77 if (HeaderLine(string(I,J)) == false)
78 return RUN_HEADERS_PARSE_ERROR;
79 I = J;
80 }
81
82 // 100 Continue is a Nop...
83 if (Result == 100)
84 continue;
85
86 // Tidy up the connection persistence state.
87 if (Encoding == Closes && HaveContent == true)
88 Persistent = false;
89
90 return RUN_HEADERS_OK;
91 }
92 while (LoadNextResponse(false, File) == true);
93
94 return RUN_HEADERS_IO_ERROR;
95 }
96 /*}}}*/
97 // ServerState::HeaderLine - Process a header line /*{{{*/
98 // ---------------------------------------------------------------------
99 /* */
100 bool ServerState::HeaderLine(string Line)
101 {
102 if (Line.empty() == true)
103 return true;
104
105 string::size_type Pos = Line.find(' ');
106 if (Pos == string::npos || Pos+1 > Line.length())
107 {
108 // Blah, some servers use "connection:closes", evil.
109 Pos = Line.find(':');
110 if (Pos == string::npos || Pos + 2 > Line.length())
111 return _error->Error(_("Bad header line"));
112 Pos++;
113 }
114
115 // Parse off any trailing spaces between the : and the next word.
116 string::size_type Pos2 = Pos;
117 while (Pos2 < Line.length() && isspace_ascii(Line[Pos2]) != 0)
118 Pos2++;
119
120 string Tag = string(Line,0,Pos);
121 string Val = string(Line,Pos2);
122
123 if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0)
124 {
125 // Evil servers return no version
126 if (Line[4] == '/')
127 {
128 int const elements = sscanf(Line.c_str(),"HTTP/%3u.%3u %3u%359[^\n]",&Major,&Minor,&Result,Code);
129 if (elements == 3)
130 {
131 Code[0] = '\0';
132 if (Owner != NULL && Owner->Debug == true)
133 clog << "HTTP server doesn't give Reason-Phrase for " << Result << std::endl;
134 }
135 else if (elements != 4)
136 return _error->Error(_("The HTTP server sent an invalid reply header"));
137 }
138 else
139 {
140 Major = 0;
141 Minor = 9;
142 if (sscanf(Line.c_str(),"HTTP %3u%359[^\n]",&Result,Code) != 2)
143 return _error->Error(_("The HTTP server sent an invalid reply header"));
144 }
145
146 /* Check the HTTP response header to get the default persistence
147 state. */
148 if (Major < 1)
149 Persistent = false;
150 else
151 {
152 if (Major == 1 && Minor == 0)
153 {
154 Persistent = false;
155 }
156 else
157 {
158 Persistent = true;
159 if (PipelineAllowed)
160 Pipeline = true;
161 }
162 }
163
164 return true;
165 }
166
167 if (stringcasecmp(Tag,"Content-Length:") == 0)
168 {
169 if (Encoding == Closes)
170 Encoding = Stream;
171 HaveContent = true;
172
173 unsigned long long * DownloadSizePtr = &DownloadSize;
174 if (Result == 416)
175 DownloadSizePtr = &JunkSize;
176
177 *DownloadSizePtr = strtoull(Val.c_str(), NULL, 10);
178 if (*DownloadSizePtr >= std::numeric_limits<unsigned long long>::max())
179 return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header"));
180 else if (*DownloadSizePtr == 0)
181 HaveContent = false;
182
183 // On partial content (206) the Content-Length less than the real
184 // size, so do not set it here but leave that to the Content-Range
185 // header instead
186 if(Result != 206 && TotalFileSize == 0)
187 TotalFileSize = DownloadSize;
188
189 return true;
190 }
191
192 if (stringcasecmp(Tag,"Content-Type:") == 0)
193 {
194 HaveContent = true;
195 return true;
196 }
197
198 if (stringcasecmp(Tag,"Content-Range:") == 0)
199 {
200 HaveContent = true;
201
202 // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416
203 if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&TotalFileSize) == 1)
204 ; // we got the expected filesize which is all we wanted
205 else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&TotalFileSize) != 2)
206 return _error->Error(_("The HTTP server sent an invalid Content-Range header"));
207 if ((unsigned long long)StartPos > TotalFileSize)
208 return _error->Error(_("This HTTP server has broken range support"));
209
210 // figure out what we will download
211 DownloadSize = TotalFileSize - StartPos;
212 return true;
213 }
214
215 if (stringcasecmp(Tag,"Transfer-Encoding:") == 0)
216 {
217 HaveContent = true;
218 if (stringcasecmp(Val,"chunked") == 0)
219 Encoding = Chunked;
220 return true;
221 }
222
223 if (stringcasecmp(Tag,"Connection:") == 0)
224 {
225 if (stringcasecmp(Val,"close") == 0)
226 Persistent = false;
227 if (stringcasecmp(Val,"keep-alive") == 0)
228 Persistent = true;
229 return true;
230 }
231
232 if (stringcasecmp(Tag,"Last-Modified:") == 0)
233 {
234 if (RFC1123StrToTime(Val.c_str(), Date) == false)
235 return _error->Error(_("Unknown date format"));
236 return true;
237 }
238
239 if (stringcasecmp(Tag,"Location:") == 0)
240 {
241 Location = Val;
242 return true;
243 }
244
245 return true;
246 }
247 /*}}}*/
248 // ServerState::ServerState - Constructor /*{{{*/
249 ServerState::ServerState(URI Srv, ServerMethod *Owner) :
250 DownloadSize(0), ServerName(Srv), TimeOut(120), Owner(Owner)
251 {
252 Reset();
253 }
254 /*}}}*/
255 bool ServerState::AddPartialFileToHashes(FileFd &File) /*{{{*/
256 {
257 File.Truncate(StartPos);
258 return GetHashes()->AddFD(File, StartPos);
259 }
260 /*}}}*/
261
262 // ServerMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/
263 // ---------------------------------------------------------------------
264 /* We look at the header data we got back from the server and decide what
265 to do. Returns DealWithHeadersResult (see http.h for details).
266 */
267 ServerMethod::DealWithHeadersResult
268 ServerMethod::DealWithHeaders(FetchResult &Res)
269 {
270 // Not Modified
271 if (Server->Result == 304)
272 {
273 RemoveFile("server", Queue->DestFile);
274 Res.IMSHit = true;
275 Res.LastModified = Queue->LastModified;
276 return IMS_HIT;
277 }
278
279 /* Redirect
280 *
281 * Note that it is only OK for us to treat all redirection the same
282 * because we *always* use GET, not other HTTP methods. There are
283 * three redirection codes for which it is not appropriate that we
284 * redirect. Pass on those codes so the error handling kicks in.
285 */
286 if (AllowRedirect
287 && (Server->Result > 300 && Server->Result < 400)
288 && (Server->Result != 300 // Multiple Choices
289 && Server->Result != 304 // Not Modified
290 && Server->Result != 306)) // (Not part of HTTP/1.1, reserved)
291 {
292 if (Server->Location.empty() == true);
293 else if (Server->Location[0] == '/' && Queue->Uri.empty() == false)
294 {
295 URI Uri = Queue->Uri;
296 if (Uri.Host.empty() == false)
297 NextURI = URI::SiteOnly(Uri);
298 else
299 NextURI.clear();
300 NextURI.append(DeQuoteString(Server->Location));
301 return TRY_AGAIN_OR_REDIRECT;
302 }
303 else
304 {
305 NextURI = DeQuoteString(Server->Location);
306 URI tmpURI = NextURI;
307 URI Uri = Queue->Uri;
308 // same protocol redirects are okay
309 if (tmpURI.Access == Uri.Access)
310 return TRY_AGAIN_OR_REDIRECT;
311 // as well as http to https
312 else if (Uri.Access == "http" && tmpURI.Access == "https")
313 return TRY_AGAIN_OR_REDIRECT;
314 }
315 /* else pass through for error message */
316 }
317 // retry after an invalid range response without partial data
318 else if (Server->Result == 416)
319 {
320 struct stat SBuf;
321 if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
322 {
323 bool partialHit = false;
324 if (Queue->ExpectedHashes.usable() == true)
325 {
326 Hashes resultHashes(Queue->ExpectedHashes);
327 FileFd file(Queue->DestFile, FileFd::ReadOnly);
328 Server->TotalFileSize = file.FileSize();
329 Server->Date = file.ModificationTime();
330 resultHashes.AddFD(file);
331 HashStringList const hashList = resultHashes.GetHashStringList();
332 partialHit = (Queue->ExpectedHashes == hashList);
333 }
334 else if ((unsigned long long)SBuf.st_size == Server->TotalFileSize)
335 partialHit = true;
336 if (partialHit == true)
337 {
338 // the file is completely downloaded, but was not moved
339 if (Server->HaveContent == true)
340 {
341 // Send to error page to dev/null
342 FileFd DevNull("/dev/null",FileFd::WriteExists);
343 Server->RunData(&DevNull);
344 }
345 Server->HaveContent = false;
346 Server->StartPos = Server->TotalFileSize;
347 Server->Result = 200;
348 }
349 else if (RemoveFile("server", Queue->DestFile))
350 {
351 NextURI = Queue->Uri;
352 return TRY_AGAIN_OR_REDIRECT;
353 }
354 }
355 }
356
357 /* We have a reply we don't handle. This should indicate a perm server
358 failure */
359 if (Server->Result < 200 || Server->Result >= 300)
360 {
361 std::string err;
362 strprintf(err, "HttpError%u", Server->Result);
363 SetFailReason(err);
364 _error->Error("%u %s", Server->Result, Server->Code);
365 if (Server->HaveContent == true)
366 return ERROR_WITH_CONTENT_PAGE;
367 return ERROR_UNRECOVERABLE;
368 }
369
370 // This is some sort of 2xx 'data follows' reply
371 Res.LastModified = Server->Date;
372 Res.Size = Server->TotalFileSize;
373
374 // Open the file
375 delete File;
376 File = new FileFd(Queue->DestFile,FileFd::WriteAny);
377 if (_error->PendingError() == true)
378 return ERROR_NOT_FROM_SERVER;
379
380 FailFile = Queue->DestFile;
381 FailFile.c_str(); // Make sure we don't do a malloc in the signal handler
382 FailFd = File->Fd();
383 FailTime = Server->Date;
384
385 if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false)
386 {
387 _error->Errno("read",_("Problem hashing file"));
388 return ERROR_NOT_FROM_SERVER;
389 }
390 if (Server->StartPos > 0)
391 Res.ResumePoint = Server->StartPos;
392
393 SetNonBlock(File->Fd(),true);
394 return FILE_IS_OPEN;
395 }
396 /*}}}*/
397 // ServerMethod::SigTerm - Handle a fatal signal /*{{{*/
398 // ---------------------------------------------------------------------
399 /* This closes and timestamps the open file. This is necessary to get
400 resume behavoir on user abort */
401 void ServerMethod::SigTerm(int)
402 {
403 if (FailFd == -1)
404 _exit(100);
405
406 struct timeval times[2];
407 times[0].tv_sec = FailTime;
408 times[1].tv_sec = FailTime;
409 times[0].tv_usec = times[1].tv_usec = 0;
410 utimes(FailFile.c_str(), times);
411 close(FailFd);
412
413 _exit(100);
414 }
415 /*}}}*/
416 // ServerMethod::Fetch - Fetch an item /*{{{*/
417 // ---------------------------------------------------------------------
418 /* This adds an item to the pipeline. We keep the pipeline at a fixed
419 depth. */
420 bool ServerMethod::Fetch(FetchItem *)
421 {
422 if (Server == nullptr || QueueBack == nullptr)
423 return true;
424
425 // If pipelining is disabled, we only queue 1 request
426 auto const AllowedDepth = Server->Pipeline ? PipelineDepth : 0;
427 // how deep is our pipeline currently?
428 decltype(PipelineDepth) CurrentDepth = 0;
429 for (FetchItem const *I = Queue; I != QueueBack; I = I->Next)
430 ++CurrentDepth;
431
432 do {
433 // Make sure we stick with the same server
434 if (Server->Comp(QueueBack->Uri) == false)
435 break;
436
437 bool const UsableHashes = QueueBack->ExpectedHashes.usable();
438 // if we have no hashes, do at most one such request
439 // as we can't fixup pipeling misbehaviors otherwise
440 if (CurrentDepth != 0 && UsableHashes == false)
441 break;
442
443 if (UsableHashes && FileExists(QueueBack->DestFile))
444 {
445 FileFd partial(QueueBack->DestFile, FileFd::ReadOnly);
446 Hashes wehave(QueueBack->ExpectedHashes);
447 if (QueueBack->ExpectedHashes.FileSize() == partial.FileSize())
448 {
449 if (wehave.AddFD(partial) &&
450 wehave.GetHashStringList() == QueueBack->ExpectedHashes)
451 {
452 FetchResult Res;
453 Res.Filename = QueueBack->DestFile;
454 Res.ResumePoint = QueueBack->ExpectedHashes.FileSize();
455 URIStart(Res);
456 // move item to the start of the queue as URIDone will
457 // always dequeued the first item in the queue
458 if (Queue != QueueBack)
459 {
460 FetchItem *Prev = Queue;
461 for (; Prev->Next != QueueBack; Prev = Prev->Next)
462 /* look for the previous queue item */;
463 Prev->Next = QueueBack->Next;
464 QueueBack->Next = Queue;
465 Queue = QueueBack;
466 QueueBack = Prev->Next;
467 }
468 Res.TakeHashes(wehave);
469 URIDone(Res);
470 continue;
471 }
472 else
473 RemoveFile("Fetch-Partial", QueueBack->DestFile);
474 }
475 }
476 auto const Tmp = QueueBack;
477 QueueBack = QueueBack->Next;
478 SendReq(Tmp);
479 ++CurrentDepth;
480 } while (CurrentDepth <= AllowedDepth && QueueBack != nullptr);
481
482 return true;
483 }
484 /*}}}*/
485 // ServerMethod::Loop - Main loop /*{{{*/
486 int ServerMethod::Loop()
487 {
488 typedef vector<string> StringVector;
489 typedef vector<string>::iterator StringVectorIterator;
490 map<string, StringVector> Redirected;
491
492 signal(SIGTERM,SigTerm);
493 signal(SIGINT,SigTerm);
494
495 Server = 0;
496
497 int FailCounter = 0;
498 while (1)
499 {
500 // We have no commands, wait for some to arrive
501 if (Queue == 0)
502 {
503 if (WaitFd(STDIN_FILENO) == false)
504 return 0;
505 }
506
507 /* Run messages, we can accept 0 (no message) if we didn't
508 do a WaitFd above.. Otherwise the FD is closed. */
509 int Result = Run(true);
510 if (Result != -1 && (Result != 0 || Queue == 0))
511 {
512 if(FailReason.empty() == false ||
513 _config->FindB("Acquire::http::DependOnSTDIN", true) == true)
514 return 100;
515 else
516 return 0;
517 }
518
519 if (Queue == 0)
520 continue;
521
522 // Connect to the server
523 if (Server == 0 || Server->Comp(Queue->Uri) == false)
524 Server = CreateServerState(Queue->Uri);
525
526 /* If the server has explicitly said this is the last connection
527 then we pre-emptively shut down the pipeline and tear down
528 the connection. This will speed up HTTP/1.0 servers a tad
529 since we don't have to wait for the close sequence to
530 complete */
531 if (Server->Persistent == false)
532 Server->Close();
533
534 // Reset the pipeline
535 if (Server->IsOpen() == false)
536 QueueBack = Queue;
537
538 // Connnect to the host
539 if (Server->Open() == false)
540 {
541 Fail(true);
542 Server = nullptr;
543 continue;
544 }
545
546 // Fill the pipeline.
547 Fetch(0);
548
549 // Fetch the next URL header data from the server.
550 switch (Server->RunHeaders(File, Queue->Uri))
551 {
552 case ServerState::RUN_HEADERS_OK:
553 break;
554
555 // The header data is bad
556 case ServerState::RUN_HEADERS_PARSE_ERROR:
557 {
558 _error->Error(_("Bad header data"));
559 Fail(true);
560 RotateDNS();
561 continue;
562 }
563
564 // The server closed a connection during the header get..
565 default:
566 case ServerState::RUN_HEADERS_IO_ERROR:
567 {
568 FailCounter++;
569 _error->Discard();
570 Server->Close();
571 Server->Pipeline = false;
572 Server->PipelineAllowed = false;
573
574 if (FailCounter >= 2)
575 {
576 Fail(_("Connection failed"),true);
577 FailCounter = 0;
578 }
579
580 RotateDNS();
581 continue;
582 }
583 };
584
585 // Decide what to do.
586 FetchResult Res;
587 Res.Filename = Queue->DestFile;
588 switch (DealWithHeaders(Res))
589 {
590 // Ok, the file is Open
591 case FILE_IS_OPEN:
592 {
593 URIStart(Res);
594
595 // Run the data
596 bool Result = true;
597
598 // ensure we don't fetch too much
599 // we could do "Server->MaximumSize = Queue->MaximumSize" here
600 // but that would break the clever pipeline messup detection
601 // so instead we use the size of the biggest item in the queue
602 Server->MaximumSize = FindMaximumObjectSizeInQueue();
603
604 if (Server->HaveContent)
605 Result = Server->RunData(File);
606
607 /* If the server is sending back sizeless responses then fill in
608 the size now */
609 if (Res.Size == 0)
610 Res.Size = File->Size();
611
612 // Close the file, destroy the FD object and timestamp it
613 FailFd = -1;
614 delete File;
615 File = 0;
616
617 // Timestamp
618 struct timeval times[2];
619 times[0].tv_sec = times[1].tv_sec = Server->Date;
620 times[0].tv_usec = times[1].tv_usec = 0;
621 utimes(Queue->DestFile.c_str(), times);
622
623 // Send status to APT
624 if (Result == true)
625 {
626 Hashes * const resultHashes = Server->GetHashes();
627 HashStringList const hashList = resultHashes->GetHashStringList();
628 if (PipelineDepth != 0 && Queue->ExpectedHashes.usable() == true && Queue->ExpectedHashes != hashList)
629 {
630 // we did not get the expected hash… mhhh:
631 // could it be that server/proxy messed up pipelining?
632 FetchItem * BeforeI = Queue;
633 for (FetchItem *I = Queue->Next; I != 0 && I != QueueBack; I = I->Next)
634 {
635 if (I->ExpectedHashes.usable() == true && I->ExpectedHashes == hashList)
636 {
637 // yes, he did! Disable pipelining and rewrite queue
638 if (Server->Pipeline == true)
639 {
640 // FIXME: fake a warning message as we have no proper way of communicating here
641 std::string out;
642 strprintf(out, _("Automatically disabled %s due to incorrect response from server/proxy. (man 5 apt.conf)"), "Acquire::http::PipelineDepth");
643 std::cerr << "W: " << out << std::endl;
644 Server->Pipeline = false;
645 Server->PipelineAllowed = false;
646 // we keep the PipelineDepth value so that the rest of the queue can be fixed up as well
647 }
648 Rename(Res.Filename, I->DestFile);
649 Res.Filename = I->DestFile;
650 BeforeI->Next = I->Next;
651 I->Next = Queue;
652 Queue = I;
653 break;
654 }
655 BeforeI = I;
656 }
657 }
658 Res.TakeHashes(*resultHashes);
659 URIDone(Res);
660 }
661 else
662 {
663 if (Server->IsOpen() == false)
664 {
665 FailCounter++;
666 _error->Discard();
667 Server->Close();
668
669 if (FailCounter >= 2)
670 {
671 Fail(_("Connection failed"),true);
672 FailCounter = 0;
673 }
674
675 QueueBack = Queue;
676 }
677 else
678 {
679 Server->Close();
680 Fail(true);
681 }
682 }
683 break;
684 }
685
686 // IMS hit
687 case IMS_HIT:
688 {
689 URIDone(Res);
690 break;
691 }
692
693 // Hard server error, not found or something
694 case ERROR_UNRECOVERABLE:
695 {
696 Fail();
697 break;
698 }
699
700 // Hard internal error, kill the connection and fail
701 case ERROR_NOT_FROM_SERVER:
702 {
703 delete File;
704 File = 0;
705
706 Fail();
707 RotateDNS();
708 Server->Close();
709 break;
710 }
711
712 // We need to flush the data, the header is like a 404 w/ error text
713 case ERROR_WITH_CONTENT_PAGE:
714 {
715 Fail();
716
717 // Send to content to dev/null
718 File = new FileFd("/dev/null",FileFd::WriteExists);
719 Server->RunData(File);
720 delete File;
721 File = 0;
722 break;
723 }
724
725 // Try again with a new URL
726 case TRY_AGAIN_OR_REDIRECT:
727 {
728 // Clear rest of response if there is content
729 if (Server->HaveContent)
730 {
731 File = new FileFd("/dev/null",FileFd::WriteExists);
732 Server->RunData(File);
733 delete File;
734 File = 0;
735 }
736
737 /* Detect redirect loops. No more redirects are allowed
738 after the same URI is seen twice in a queue item. */
739 StringVector &R = Redirected[Queue->DestFile];
740 bool StopRedirects = false;
741 if (R.empty() == true)
742 R.push_back(Queue->Uri);
743 else if (R[0] == "STOP" || R.size() > 10)
744 StopRedirects = true;
745 else
746 {
747 for (StringVectorIterator I = R.begin(); I != R.end(); ++I)
748 if (Queue->Uri == *I)
749 {
750 R[0] = "STOP";
751 break;
752 }
753
754 R.push_back(Queue->Uri);
755 }
756
757 if (StopRedirects == false)
758 Redirect(NextURI);
759 else
760 Fail();
761
762 break;
763 }
764
765 default:
766 Fail(_("Internal error"));
767 break;
768 }
769
770 FailCounter = 0;
771 }
772
773 return 0;
774 }
775 /*}}}*/
776 unsigned long long ServerMethod::FindMaximumObjectSizeInQueue() const /*{{{*/
777 {
778 unsigned long long MaxSizeInQueue = 0;
779 for (FetchItem *I = Queue; I != 0 && I != QueueBack; I = I->Next)
780 MaxSizeInQueue = std::max(MaxSizeInQueue, I->MaximumSize);
781 return MaxSizeInQueue;
782 }
783 /*}}}*/
784 ServerMethod::ServerMethod(char const * const Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/
785 aptMethod(Binary, Ver, Flags), Server(nullptr), File(NULL), PipelineDepth(10),
786 AllowRedirect(false), Debug(false)
787 {
788 }
789 /*}}}*/