X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/343325f4cc460b709fc929f85bc2a3d4691d63fe..5832913a49d4f7c75527264a935cc0ce00627f1d:/methods/server.cc diff --git a/methods/server.cc b/methods/server.cc index c91d3b218..0408dddfd 100644 --- a/methods/server.cc +++ b/methods/server.cc @@ -10,7 +10,6 @@ // Include Files /*{{{*/ #include -#include #include #include #include @@ -44,21 +43,12 @@ time_t ServerMethod::FailTime = 0; // --------------------------------------------------------------------- /* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header parse error occurred */ -ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File) +ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File, + const std::string &Uri) { - State = Header; - + Reset(false); Owner->Status(_("Waiting for headers")); - Major = 0; - Minor = 0; - Result = 0; - Size = 0; - StartPos = 0; - Encoding = Closes; - HaveContent = false; - time(&Date); - do { string Data; @@ -66,7 +56,7 @@ ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File) continue; if (Owner->Debug == true) - clog << Data; + clog << "Answer for: " << Uri << endl << Data; for (string::const_iterator I = Data.begin(); I < Data.end(); ++I) { @@ -100,25 +90,7 @@ bool ServerState::HeaderLine(string Line) if (Line.empty() == true) return true; - string::size_type Pos = Line.find(' '); - if (Pos == string::npos || Pos+1 > Line.length()) - { - // Blah, some servers use "connection:closes", evil. - Pos = Line.find(':'); - if (Pos == string::npos || Pos + 2 > Line.length()) - return _error->Error(_("Bad header line")); - Pos++; - } - - // Parse off any trailing spaces between the : and the next word. - string::size_type Pos2 = Pos; - while (Pos2 < Line.length() && isspace(Line[Pos2]) != 0) - Pos2++; - - string Tag = string(Line,0,Pos); - string Val = string(Line,Pos2); - - if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0) + if (Line.size() > 4 && stringcasecmp(Line.data(), Line.data()+4, "HTTP") == 0) { // Evil servers return no version if (Line[4] == '/') @@ -127,8 +99,8 @@ bool ServerState::HeaderLine(string Line) if (elements == 3) { Code[0] = '\0'; - if (Owner->Debug == true) - clog << "HTTP server doesn't give Reason-Phrase for " << Result << std::endl; + if (Owner != NULL && Owner->Debug == true) + clog << "HTTP server doesn't give Reason-Phrase for " << std::to_string(Result) << std::endl; } else if (elements != 4) return _error->Error(_("The HTTP server sent an invalid reply header")); @@ -148,29 +120,57 @@ bool ServerState::HeaderLine(string Line) else { if (Major == 1 && Minor == 0) + { Persistent = false; + } else + { Persistent = true; + if (PipelineAllowed) + Pipeline = true; + } } return true; } + // Blah, some servers use "connection:closes", evil. + // and some even send empty header fields… + string::size_type Pos = Line.find(':'); + if (Pos == string::npos) + return _error->Error(_("Bad header line")); + ++Pos; + + // Parse off any trailing spaces between the : and the next word. + string::size_type Pos2 = Pos; + while (Pos2 < Line.length() && isspace_ascii(Line[Pos2]) != 0) + Pos2++; + + string const Tag(Line,0,Pos); + string const Val(Line,Pos2); + if (stringcasecmp(Tag,"Content-Length:") == 0) { if (Encoding == Closes) Encoding = Stream; HaveContent = true; - // The length is already set from the Content-Range header - if (StartPos != 0) - return true; + unsigned long long * DownloadSizePtr = &DownloadSize; + if (Result == 416 || (Result >= 300 && Result < 400)) + DownloadSizePtr = &JunkSize; - Size = strtoull(Val.c_str(), NULL, 10); - if (Size >= std::numeric_limits::max()) + *DownloadSizePtr = strtoull(Val.c_str(), NULL, 10); + if (*DownloadSizePtr >= std::numeric_limits::max()) return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header")); - else if (Size == 0) + else if (*DownloadSizePtr == 0) HaveContent = false; + + // On partial content (206) the Content-Length less than the real + // size, so do not set it here but leave that to the Content-Range + // header instead + if(Result != 206 && TotalFileSize == 0) + TotalFileSize = DownloadSize; + return true; } @@ -185,15 +185,15 @@ bool ServerState::HeaderLine(string Line) HaveContent = true; // §14.16 says 'byte-range-resp-spec' should be a '*' in case of 416 - if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&Size) == 1) - { - StartPos = 1; // ignore Content-Length, it would override Size - HaveContent = false; - } - else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&Size) != 2) + if (Result == 416 && sscanf(Val.c_str(), "bytes */%llu",&TotalFileSize) == 1) + ; // we got the expected filesize which is all we wanted + else if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&TotalFileSize) != 2) return _error->Error(_("The HTTP server sent an invalid Content-Range header")); - if ((unsigned long long)StartPos > Size) + if ((unsigned long long)StartPos > TotalFileSize) return _error->Error(_("This HTTP server has broken range support")); + + // figure out what we will download + DownloadSize = TotalFileSize - StartPos; return true; } @@ -208,8 +208,16 @@ bool ServerState::HeaderLine(string Line) if (stringcasecmp(Tag,"Connection:") == 0) { if (stringcasecmp(Val,"close") == 0) + { Persistent = false; - if (stringcasecmp(Val,"keep-alive") == 0) + Pipeline = false; + /* Some servers send error pages (as they are dynamically generated) + for simplicity via a connection close instead of e.g. chunked, + so assuming an always closing server only if we get a file + close */ + if (Result >= 200 && Result < 300) + PipelineAllowed = false; + } + else if (stringcasecmp(Val,"keep-alive") == 0) Persistent = true; return true; } @@ -227,19 +235,42 @@ bool ServerState::HeaderLine(string Line) return true; } + if (stringcasecmp(Tag, "Accept-Ranges:") == 0) + { + std::string ranges = ',' + Val + ','; + ranges.erase(std::remove(ranges.begin(), ranges.end(), ' '), ranges.end()); + if (ranges.find(",bytes,") == std::string::npos) + RangesAllowed = false; + return true; + } + return true; } /*}}}*/ // ServerState::ServerState - Constructor /*{{{*/ -ServerState::ServerState(URI Srv, ServerMethod *Owner) : ServerName(Srv), TimeOut(120), Owner(Owner) +ServerState::ServerState(URI Srv, ServerMethod *Owner) : + DownloadSize(0), ServerName(Srv), TimeOut(120), Owner(Owner) { Reset(); } /*}}}*/ - -bool ServerMethod::Configuration(string Message) /*{{{*/ +bool ServerState::AddPartialFileToHashes(FileFd &File) /*{{{*/ +{ + File.Truncate(StartPos); + return GetHashes()->AddFD(File, StartPos); +} + /*}}}*/ +void ServerState::Reset(bool const Everything) /*{{{*/ { - return pkgAcqMethod::Configuration(Message); + Major = 0; Minor = 0; Result = 0; Code[0] = '\0'; + TotalFileSize = 0; JunkSize = 0; StartPos = 0; + Encoding = Closes; time(&Date); HaveContent = false; + State = Header; MaximumSize = 0; + if (Everything) + { + Persistent = false; Pipeline = false; PipelineAllowed = true; + RangesAllowed = true; + } } /*}}}*/ @@ -254,12 +285,13 @@ ServerMethod::DealWithHeaders(FetchResult &Res) // Not Modified if (Server->Result == 304) { - unlink(Queue->DestFile.c_str()); + RemoveFile("server", Queue->DestFile); Res.IMSHit = true; Res.LastModified = Queue->LastModified; + Res.Size = 0; return IMS_HIT; } - + /* Redirect * * Note that it is only OK for us to treat all redirection the same @@ -273,7 +305,8 @@ ServerMethod::DealWithHeaders(FetchResult &Res) && Server->Result != 304 // Not Modified && Server->Result != 306)) // (Not part of HTTP/1.1, reserved) { - if (Server->Location.empty() == true); + if (Server->Location.empty() == true) + ; else if (Server->Location[0] == '/' && Queue->Uri.empty() == false) { URI Uri = Queue->Uri; @@ -282,19 +315,77 @@ ServerMethod::DealWithHeaders(FetchResult &Res) else NextURI.clear(); NextURI.append(DeQuoteString(Server->Location)); + if (Queue->Uri == NextURI) + { + SetFailReason("RedirectionLoop"); + _error->Error("Redirection loop encountered"); + if (Server->HaveContent == true) + return ERROR_WITH_CONTENT_PAGE; + return ERROR_UNRECOVERABLE; + } return TRY_AGAIN_OR_REDIRECT; } else { NextURI = DeQuoteString(Server->Location); URI tmpURI = NextURI; + if (tmpURI.Access.find('+') != std::string::npos) + { + _error->Error("Server tried to trick us into using a specific implementation: %s", tmpURI.Access.c_str()); + if (Server->HaveContent == true) + return ERROR_WITH_CONTENT_PAGE; + return ERROR_UNRECOVERABLE; + } URI Uri = Queue->Uri; + if (Binary.find('+') != std::string::npos) + { + auto base = Binary.substr(0, Binary.find('+')); + if (base != tmpURI.Access) + { + tmpURI.Access = base + '+' + tmpURI.Access; + if (tmpURI.Access == Binary) + { + std::string tmpAccess = Uri.Access; + std::swap(tmpURI.Access, Uri.Access); + NextURI = tmpURI; + std::swap(tmpURI.Access, Uri.Access); + } + else + NextURI = tmpURI; + } + } + if (Queue->Uri == NextURI) + { + SetFailReason("RedirectionLoop"); + _error->Error("Redirection loop encountered"); + if (Server->HaveContent == true) + return ERROR_WITH_CONTENT_PAGE; + return ERROR_UNRECOVERABLE; + } + Uri.Access = Binary; // same protocol redirects are okay if (tmpURI.Access == Uri.Access) return TRY_AGAIN_OR_REDIRECT; // as well as http to https - else if (Uri.Access == "http" && tmpURI.Access == "https") + else if ((Uri.Access == "http" || Uri.Access == "https+http") && tmpURI.Access == "https") return TRY_AGAIN_OR_REDIRECT; + else + { + auto const tmpplus = tmpURI.Access.find('+'); + if (tmpplus != std::string::npos && tmpURI.Access.substr(tmpplus + 1) == "https") + { + auto const uriplus = Uri.Access.find('+'); + if (uriplus == std::string::npos) + { + if (Uri.Access == tmpURI.Access.substr(0, tmpplus)) // foo -> foo+https + return TRY_AGAIN_OR_REDIRECT; + } + else if (Uri.Access.substr(uriplus + 1) == "http" && + Uri.Access.substr(0, uriplus) == tmpURI.Access.substr(0, tmpplus)) // foo+http -> foo+https + return TRY_AGAIN_OR_REDIRECT; + } + } + _error->Error("Redirection from %s to '%s' is forbidden", Uri.Access.c_str(), NextURI.c_str()); } /* else pass through for error message */ } @@ -304,14 +395,32 @@ ServerMethod::DealWithHeaders(FetchResult &Res) struct stat SBuf; if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) { - if ((unsigned long long)SBuf.st_size == Server->Size) + bool partialHit = false; + if (Queue->ExpectedHashes.usable() == true) + { + Hashes resultHashes(Queue->ExpectedHashes); + FileFd file(Queue->DestFile, FileFd::ReadOnly); + Server->TotalFileSize = file.FileSize(); + Server->Date = file.ModificationTime(); + resultHashes.AddFD(file); + HashStringList const hashList = resultHashes.GetHashStringList(); + partialHit = (Queue->ExpectedHashes == hashList); + } + else if ((unsigned long long)SBuf.st_size == Server->TotalFileSize) + partialHit = true; + if (partialHit == true) { // the file is completely downloaded, but was not moved - Server->StartPos = Server->Size; + if (Server->HaveContent == true) + { + // nuke the sent error page + Server->RunDataToDevNull(); + Server->HaveContent = false; + } + Server->StartPos = Server->TotalFileSize; Server->Result = 200; - Server->HaveContent = false; } - else if (unlink(Queue->DestFile.c_str()) == 0) + else if (RemoveFile("server", Queue->DestFile)) { NextURI = Queue->Uri; return TRY_AGAIN_OR_REDIRECT; @@ -319,14 +428,17 @@ ServerMethod::DealWithHeaders(FetchResult &Res) } } - /* We have a reply we dont handle. This should indicate a perm server + /* We have a reply we don't handle. This should indicate a perm server failure */ if (Server->Result < 200 || Server->Result >= 300) { - char err[255]; - snprintf(err,sizeof(err)-1,"HttpError%i",Server->Result); - SetFailReason(err); - _error->Error("%u %s",Server->Result,Server->Code); + if (_error->PendingError() == false) + { + std::string err; + strprintf(err, "HttpError%u", Server->Result); + SetFailReason(err); + _error->Error("%u %s", Server->Result, Server->Code); + } if (Server->HaveContent == true) return ERROR_WITH_CONTENT_PAGE; return ERROR_UNRECOVERABLE; @@ -334,28 +446,7 @@ ServerMethod::DealWithHeaders(FetchResult &Res) // This is some sort of 2xx 'data follows' reply Res.LastModified = Server->Date; - Res.Size = Server->Size; - - // Open the file - delete File; - File = new FileFd(Queue->DestFile,FileFd::WriteAny); - if (_error->PendingError() == true) - return ERROR_NOT_FROM_SERVER; - - FailFile = Queue->DestFile; - FailFile.c_str(); // Make sure we dont do a malloc in the signal handler - FailFd = File->Fd(); - FailTime = Server->Date; - - if (Server->InitHashes(*File) == false) - { - _error->Errno("read",_("Problem hashing file")); - return ERROR_NOT_FROM_SERVER; - } - if (Server->StartPos > 0) - Res.ResumePoint = Server->StartPos; - - SetNonBlock(File->Fd(),true); + Res.Size = Server->TotalFileSize; return FILE_IS_OPEN; } /*}}}*/ @@ -384,46 +475,74 @@ void ServerMethod::SigTerm(int) depth. */ bool ServerMethod::Fetch(FetchItem *) { - if (Server == 0) + if (Server == nullptr || QueueBack == nullptr) return true; - // Queue the requests - int Depth = -1; - for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth; - I = I->Next, Depth++) - { - if (Depth >= 0) - { - // If pipelining is disabled, we only queue 1 request - if (Server->Pipeline == false) - break; - // if we have no hashes, do at most one such request - // as we can't fixup pipeling misbehaviors otherwise - else if (I->ExpectedHashes.usable() == false) - break; - } - + // If pipelining is disabled, we only queue 1 request + auto const AllowedDepth = Server->Pipeline ? PipelineDepth : 0; + // how deep is our pipeline currently? + decltype(PipelineDepth) CurrentDepth = 0; + for (FetchItem const *I = Queue; I != QueueBack; I = I->Next) + ++CurrentDepth; + if (CurrentDepth > AllowedDepth) + return true; + + do { // Make sure we stick with the same server - if (Server->Comp(I->Uri) == false) + if (Server->Comp(QueueBack->Uri) == false) break; - if (QueueBack == I) + + bool const UsableHashes = QueueBack->ExpectedHashes.usable(); + // if we have no hashes, do at most one such request + // as we can't fixup pipeling misbehaviors otherwise + if (CurrentDepth != 0 && UsableHashes == false) + break; + + if (UsableHashes && FileExists(QueueBack->DestFile)) { - QueueBack = I->Next; - SendReq(I); - continue; + FileFd partial(QueueBack->DestFile, FileFd::ReadOnly); + Hashes wehave(QueueBack->ExpectedHashes); + if (QueueBack->ExpectedHashes.FileSize() == partial.FileSize()) + { + if (wehave.AddFD(partial) && + wehave.GetHashStringList() == QueueBack->ExpectedHashes) + { + FetchResult Res; + Res.Filename = QueueBack->DestFile; + Res.ResumePoint = QueueBack->ExpectedHashes.FileSize(); + URIStart(Res); + // move item to the start of the queue as URIDone will + // always dequeued the first item in the queue + if (Queue != QueueBack) + { + FetchItem *Prev = Queue; + for (; Prev->Next != QueueBack; Prev = Prev->Next) + /* look for the previous queue item */; + Prev->Next = QueueBack->Next; + QueueBack->Next = Queue; + Queue = QueueBack; + QueueBack = Prev->Next; + } + Res.TakeHashes(wehave); + URIDone(Res); + continue; + } + else + RemoveFile("Fetch-Partial", QueueBack->DestFile); + } } - } - + auto const Tmp = QueueBack; + QueueBack = QueueBack->Next; + SendReq(Tmp); + ++CurrentDepth; + } while (CurrentDepth <= AllowedDepth && QueueBack != nullptr); + return true; } /*}}}*/ // ServerMethod::Loop - Main loop /*{{{*/ int ServerMethod::Loop() { - typedef vector StringVector; - typedef vector::iterator StringVectorIterator; - map Redirected; - signal(SIGTERM,SigTerm); signal(SIGINT,SigTerm); @@ -445,7 +564,7 @@ int ServerMethod::Loop() if (Result != -1 && (Result != 0 || Queue == 0)) { if(FailReason.empty() == false || - _config->FindB("Acquire::http::DependOnSTDIN", true) == true) + ConfigFindB("DependOnSTDIN", true) == true) return 100; else return 0; @@ -457,9 +576,13 @@ int ServerMethod::Loop() // Connect to the server if (Server == 0 || Server->Comp(Queue->Uri) == false) { - delete Server; Server = CreateServerState(Queue->Uri); + setPostfixForMethodNames(::URI(Queue->Uri).Host.c_str()); + AllowRedirect = ConfigFindB("AllowRedirect", true); + PipelineDepth = ConfigFindI("Pipeline-Depth", 10); + Debug = DebugEnabled(); } + /* If the server has explicitly said this is the last connection then we pre-emptively shut down the pipeline and tear down the connection. This will speed up HTTP/1.0 servers a tad @@ -476,8 +599,7 @@ int ServerMethod::Loop() if (Server->Open() == false) { Fail(true); - delete Server; - Server = 0; + Server = nullptr; continue; } @@ -485,7 +607,7 @@ int ServerMethod::Loop() Fetch(0); // Fetch the next URL header data from the server. - switch (Server->RunHeaders(File)) + switch (Server->RunHeaders(File, Queue->Uri)) { case ServerState::RUN_HEADERS_OK: break; @@ -495,6 +617,7 @@ int ServerMethod::Loop() { _error->Error(_("Bad header data")); Fail(true); + Server->Close(); RotateDNS(); continue; } @@ -507,6 +630,7 @@ int ServerMethod::Loop() _error->Discard(); Server->Close(); Server->Pipeline = false; + Server->PipelineAllowed = false; if (FailCounter >= 2) { @@ -531,6 +655,13 @@ int ServerMethod::Loop() // Run the data bool Result = true; + + // ensure we don't fetch too much + // we could do "Server->MaximumSize = Queue->MaximumSize" here + // but that would break the clever pipeline messup detection + // so instead we use the size of the biggest item in the queue + Server->MaximumSize = FindMaximumObjectSizeInQueue(); + if (Server->HaveContent) Result = Server->RunData(File); @@ -567,11 +698,9 @@ int ServerMethod::Loop() // yes, he did! Disable pipelining and rewrite queue if (Server->Pipeline == true) { - // FIXME: fake a warning message as we have no proper way of communicating here - std::string out; - strprintf(out, _("Automatically disabled %s due to incorrect response from server/proxy. (man 5 apt.conf)"), "Acquire::http::PipelineDepth"); - std::cerr << "W: " << out << std::endl; + Warning(_("Automatically disabled %s due to incorrect response from server/proxy. (man 5 apt.conf)"), "Acquire::http::Pipeline-Depth"); Server->Pipeline = false; + Server->PipelineAllowed = false; // we keep the PipelineDepth value so that the rest of the queue can be fixed up as well } Rename(Res.Filename, I->DestFile); @@ -604,7 +733,10 @@ int ServerMethod::Loop() QueueBack = Queue; } else + { + Server->Close(); Fail(true); + } } break; } @@ -638,55 +770,20 @@ int ServerMethod::Loop() // We need to flush the data, the header is like a 404 w/ error text case ERROR_WITH_CONTENT_PAGE: { + Server->RunDataToDevNull(); Fail(); - - // Send to content to dev/null - File = new FileFd("/dev/null",FileFd::WriteExists); - Server->RunData(File); - delete File; - File = 0; break; } - - // Try again with a new URL - case TRY_AGAIN_OR_REDIRECT: - { - // Clear rest of response if there is content - if (Server->HaveContent) - { - File = new FileFd("/dev/null",FileFd::WriteExists); - Server->RunData(File); - delete File; - File = 0; - } - - /* Detect redirect loops. No more redirects are allowed - after the same URI is seen twice in a queue item. */ - StringVector &R = Redirected[Queue->DestFile]; - bool StopRedirects = false; - if (R.empty() == true) - R.push_back(Queue->Uri); - else if (R[0] == "STOP" || R.size() > 10) - StopRedirects = true; - else - { - for (StringVectorIterator I = R.begin(); I != R.end(); ++I) - if (Queue->Uri == *I) - { - R[0] = "STOP"; - break; - } - - R.push_back(Queue->Uri); - } - - if (StopRedirects == false) - Redirect(NextURI); - else - Fail(); - - break; - } + + // Try again with a new URL + case TRY_AGAIN_OR_REDIRECT: + { + // Clear rest of response if there is content + if (Server->HaveContent) + Server->RunDataToDevNull(); + Redirect(NextURI); + break; + } default: Fail(_("Internal error")); @@ -699,3 +796,43 @@ int ServerMethod::Loop() return 0; } /*}}}*/ +unsigned long long ServerMethod::FindMaximumObjectSizeInQueue() const /*{{{*/ +{ + unsigned long long MaxSizeInQueue = 0; + for (FetchItem *I = Queue; I != 0 && I != QueueBack; I = I->Next) + MaxSizeInQueue = std::max(MaxSizeInQueue, I->MaximumSize); + return MaxSizeInQueue; +} + /*}}}*/ +ServerMethod::ServerMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/ + aptMethod(std::move(Binary), Ver, Flags), Server(nullptr), File(NULL), PipelineDepth(10), + AllowRedirect(false), Debug(false) +{ +} + /*}}}*/ +bool ServerMethod::Configuration(std::string Message) /*{{{*/ +{ + if (aptMethod::Configuration(Message) == false) + return false; + + _config->CndSet("Acquire::tor::Proxy", + "socks5h://apt-transport-tor@localhost:9050"); + return true; +} + /*}}}*/ +bool ServerMethod::AddProxyAuth(URI &Proxy, URI const &Server) const /*{{{*/ +{ + if (std::find(methodNames.begin(), methodNames.end(), "tor") != methodNames.end() && + Proxy.User == "apt-transport-tor" && Proxy.Password.empty()) + { + std::string pass = Server.Host; + pass.erase(std::remove_if(pass.begin(), pass.end(), [](char const c) { return std::isalnum(c) == 0; }), pass.end()); + if (pass.length() > 255) + Proxy.Password = pass.substr(0, 255); + else + Proxy.Password = std::move(pass); + } + // FIXME: should we support auth.conf for proxies? + return true; +} + /*}}}*/