X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/f2b47ba290f3a178c584da83f007cf0f720baabb..a9e4fd6534a2ae6a5a983df77e5b1efa32f3a756:/methods/server.cc diff --git a/methods/server.cc b/methods/server.cc index cef809738..9db45eb8c 100644 --- a/methods/server.cc +++ b/methods/server.cc @@ -10,7 +10,6 @@ // Include Files /*{{{*/ #include -#include #include #include #include @@ -54,7 +53,8 @@ ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File, Major = 0; Minor = 0; Result = 0; - Size = 0; + TotalFileSize = 0; + JunkSize = 0; StartPos = 0; Encoding = Closes; HaveContent = false; @@ -113,7 +113,7 @@ bool ServerState::HeaderLine(string Line) // Parse off any trailing spaces between the : and the next word. string::size_type Pos2 = Pos; - while (Pos2 < Line.length() && isspace(Line[Pos2]) != 0) + while (Pos2 < Line.length() && isspace_ascii(Line[Pos2]) != 0) Pos2++; string Tag = string(Line,0,Pos); @@ -128,8 +128,8 @@ bool ServerState::HeaderLine(string Line) if (elements == 3) { Code[0] = '\0'; - if (Owner->Debug == true) - clog << "HTTP server doesn't give Reason-Phrase for " << Result << std::endl; + if (Owner != NULL && Owner->Debug == true) + clog << "HTTP server doesn't give Reason-Phrase for " << std::to_string(Result) << std::endl; } else if (elements != 4) return _error->Error(_("The HTTP server sent an invalid reply header")); @@ -149,9 +149,15 @@ bool ServerState::HeaderLine(string Line) else { if (Major == 1 && Minor == 0) + { Persistent = false; + } else + { Persistent = true; + if (PipelineAllowed) + Pipeline = true; + } } return true; @@ -163,15 +169,22 @@ bool ServerState::HeaderLine(string Line) Encoding = Stream; HaveContent = true; - // The length is already set from the Content-Range header - if (StartPos != 0) - return true; + unsigned long long * DownloadSizePtr = &DownloadSize; + if (Result == 416) + DownloadSizePtr = &JunkSize; - Size = strtoull(Val.c_str(), NULL, 10); - if (Size >= std::numeric_limits::max()) + *DownloadSizePtr = strtoull(Val.c_str(), NULL, 10); + if (*DownloadSizePtr >= std::numeric_limits::max()) return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header")); - else if (Size == 0) + else if (*DownloadSizePtr == 0) HaveContent = false; + + // On partial content (206) the Content-Length less than the real + // size, so do not set it here but leave that to the Content-Range + // header instead + if(Result != 206 && TotalFileSize == 0) + TotalFileSize = DownloadSize; + return true; } @@ -186,15 +199,15 @@ bool ServerState::HeaderLine(string Line) HaveContent = true; // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416 - if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&Size) == 1) - { - StartPos = 1; // ignore Content-Length, it would override Size - HaveContent = false; - } - else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&Size) != 2) + if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&TotalFileSize) == 1) + ; // we got the expected filesize which is all we wanted + else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&TotalFileSize) != 2) return _error->Error(_("The HTTP server sent an invalid Content-Range header")); - if ((unsigned long long)StartPos > Size) + if ((unsigned long long)StartPos > TotalFileSize) return _error->Error(_("This HTTP server has broken range support")); + + // figure out what we will download + DownloadSize = TotalFileSize - StartPos; return true; } @@ -232,15 +245,16 @@ bool ServerState::HeaderLine(string Line) } /*}}}*/ // ServerState::ServerState - Constructor /*{{{*/ -ServerState::ServerState(URI Srv, ServerMethod *Owner) : ServerName(Srv), TimeOut(120), Owner(Owner) +ServerState::ServerState(URI Srv, ServerMethod *Owner) : + DownloadSize(0), ServerName(Srv), TimeOut(120), Owner(Owner) { Reset(); } /*}}}*/ - -bool ServerMethod::Configuration(string Message) /*{{{*/ +bool ServerState::AddPartialFileToHashes(FileFd &File) /*{{{*/ { - return pkgAcqMethod::Configuration(Message); + File.Truncate(StartPos); + return GetHashes()->AddFD(File, StartPos); } /*}}}*/ @@ -255,12 +269,12 @@ ServerMethod::DealWithHeaders(FetchResult &Res) // Not Modified if (Server->Result == 304) { - unlink(Queue->DestFile.c_str()); + RemoveFile("server", Queue->DestFile); Res.IMSHit = true; Res.LastModified = Queue->LastModified; return IMS_HIT; } - + /* Redirect * * Note that it is only OK for us to treat all redirection the same @@ -305,14 +319,33 @@ ServerMethod::DealWithHeaders(FetchResult &Res) struct stat SBuf; if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) { - if ((unsigned long long)SBuf.st_size == Server->Size) + bool partialHit = false; + if (Queue->ExpectedHashes.usable() == true) + { + Hashes resultHashes(Queue->ExpectedHashes); + FileFd file(Queue->DestFile, FileFd::ReadOnly); + Server->TotalFileSize = file.FileSize(); + Server->Date = file.ModificationTime(); + resultHashes.AddFD(file); + HashStringList const hashList = resultHashes.GetHashStringList(); + partialHit = (Queue->ExpectedHashes == hashList); + } + else if ((unsigned long long)SBuf.st_size == Server->TotalFileSize) + partialHit = true; + if (partialHit == true) { // the file is completely downloaded, but was not moved - Server->StartPos = Server->Size; - Server->Result = 200; + if (Server->HaveContent == true) + { + // Send to error page to dev/null + FileFd DevNull("/dev/null",FileFd::WriteExists); + Server->RunData(&DevNull); + } Server->HaveContent = false; + Server->StartPos = Server->TotalFileSize; + Server->Result = 200; } - else if (unlink(Queue->DestFile.c_str()) == 0) + else if (RemoveFile("server", Queue->DestFile)) { NextURI = Queue->Uri; return TRY_AGAIN_OR_REDIRECT; @@ -320,7 +353,7 @@ ServerMethod::DealWithHeaders(FetchResult &Res) } } - /* We have a reply we dont handle. This should indicate a perm server + /* We have a reply we don't handle. This should indicate a perm server failure */ if (Server->Result < 200 || Server->Result >= 300) { @@ -335,7 +368,7 @@ ServerMethod::DealWithHeaders(FetchResult &Res) // This is some sort of 2xx 'data follows' reply Res.LastModified = Server->Date; - Res.Size = Server->Size; + Res.Size = Server->TotalFileSize; // Open the file delete File; @@ -344,11 +377,11 @@ ServerMethod::DealWithHeaders(FetchResult &Res) return ERROR_NOT_FROM_SERVER; FailFile = Queue->DestFile; - FailFile.c_str(); // Make sure we dont do a malloc in the signal handler + FailFile.c_str(); // Make sure we don't do a malloc in the signal handler FailFd = File->Fd(); FailTime = Server->Date; - if (Server->InitHashes(*File) == false) + if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false) { _error->Errno("read",_("Problem hashing file")); return ERROR_NOT_FROM_SERVER; @@ -385,36 +418,66 @@ void ServerMethod::SigTerm(int) depth. */ bool ServerMethod::Fetch(FetchItem *) { - if (Server == 0) + if (Server == nullptr || QueueBack == nullptr) return true; - // Queue the requests - int Depth = -1; - for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth; - I = I->Next, Depth++) - { - if (Depth >= 0) - { - // If pipelining is disabled, we only queue 1 request - if (Server->Pipeline == false) - break; - // if we have no hashes, do at most one such request - // as we can't fixup pipeling misbehaviors otherwise - else if (I->ExpectedHashes.usable() == false) - break; - } - + // If pipelining is disabled, we only queue 1 request + auto const AllowedDepth = Server->Pipeline ? PipelineDepth : 0; + // how deep is our pipeline currently? + decltype(PipelineDepth) CurrentDepth = 0; + for (FetchItem const *I = Queue; I != QueueBack; I = I->Next) + ++CurrentDepth; + + do { // Make sure we stick with the same server - if (Server->Comp(I->Uri) == false) + if (Server->Comp(QueueBack->Uri) == false) + break; + + bool const UsableHashes = QueueBack->ExpectedHashes.usable(); + // if we have no hashes, do at most one such request + // as we can't fixup pipeling misbehaviors otherwise + if (CurrentDepth != 0 && UsableHashes == false) break; - if (QueueBack == I) + + if (UsableHashes && FileExists(QueueBack->DestFile)) { - QueueBack = I->Next; - SendReq(I); - continue; + FileFd partial(QueueBack->DestFile, FileFd::ReadOnly); + Hashes wehave(QueueBack->ExpectedHashes); + if (QueueBack->ExpectedHashes.FileSize() == partial.FileSize()) + { + if (wehave.AddFD(partial) && + wehave.GetHashStringList() == QueueBack->ExpectedHashes) + { + FetchResult Res; + Res.Filename = QueueBack->DestFile; + Res.ResumePoint = QueueBack->ExpectedHashes.FileSize(); + URIStart(Res); + // move item to the start of the queue as URIDone will + // always dequeued the first item in the queue + if (Queue != QueueBack) + { + FetchItem *Prev = Queue; + for (; Prev->Next != QueueBack; Prev = Prev->Next) + /* look for the previous queue item */; + Prev->Next = QueueBack->Next; + QueueBack->Next = Queue; + Queue = QueueBack; + QueueBack = Prev->Next; + } + Res.TakeHashes(wehave); + URIDone(Res); + continue; + } + else + RemoveFile("Fetch-Partial", QueueBack->DestFile); + } } - } - + auto const Tmp = QueueBack; + QueueBack = QueueBack->Next; + SendReq(Tmp); + ++CurrentDepth; + } while (CurrentDepth <= AllowedDepth && QueueBack != nullptr); + return true; } /*}}}*/ @@ -457,10 +520,8 @@ int ServerMethod::Loop() // Connect to the server if (Server == 0 || Server->Comp(Queue->Uri) == false) - { - delete Server; Server = CreateServerState(Queue->Uri); - } + /* If the server has explicitly said this is the last connection then we pre-emptively shut down the pipeline and tear down the connection. This will speed up HTTP/1.0 servers a tad @@ -477,8 +538,7 @@ int ServerMethod::Loop() if (Server->Open() == false) { Fail(true); - delete Server; - Server = 0; + Server = nullptr; continue; } @@ -508,6 +568,7 @@ int ServerMethod::Loop() _error->Discard(); Server->Close(); Server->Pipeline = false; + Server->PipelineAllowed = false; if (FailCounter >= 2) { @@ -580,6 +641,7 @@ int ServerMethod::Loop() strprintf(out, _("Automatically disabled %s due to incorrect response from server/proxy. (man 5 apt.conf)"), "Acquire::http::PipelineDepth"); std::cerr << "W: " << out << std::endl; Server->Pipeline = false; + Server->PipelineAllowed = false; // we keep the PipelineDepth value so that the rest of the queue can be fixed up as well } Rename(Res.Filename, I->DestFile); @@ -710,13 +772,17 @@ int ServerMethod::Loop() return 0; } /*}}}*/ - /*{{{*/ -unsigned long long -ServerMethod::FindMaximumObjectSizeInQueue() const +unsigned long long ServerMethod::FindMaximumObjectSizeInQueue() const /*{{{*/ { unsigned long long MaxSizeInQueue = 0; - for (FetchItem *I = Queue->Next; I != 0 && I != QueueBack; I = I->Next) + for (FetchItem *I = Queue; I != 0 && I != QueueBack; I = I->Next) MaxSizeInQueue = std::max(MaxSizeInQueue, I->MaximumSize); return MaxSizeInQueue; } /*}}}*/ +ServerMethod::ServerMethod(char const * const Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/ + aptMethod(Binary, Ver, Flags), Server(nullptr), File(NULL), PipelineDepth(10), + AllowRedirect(false), Debug(false) +{ +} + /*}}}*/