X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/0f3150e7040f45565b459a1390606bc2f714f6a8..30060442025824c491f58887ca7369f3c572fa57:/methods/http.cc?ds=inline diff --git a/methods/http.cc b/methods/http.cc index ad90c9891..0378d7c60 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -28,7 +28,6 @@ #include #include -#include #include #include #include @@ -64,12 +63,13 @@ const unsigned int CircleBuf::BW_HZ=10; // CircleBuf::CircleBuf - Circular input buffer /*{{{*/ // --------------------------------------------------------------------- /* */ -CircleBuf::CircleBuf(unsigned long long Size) : Size(Size), Hash(0) +CircleBuf::CircleBuf(HttpMethod const * const Owner, unsigned long long Size) + : Size(Size), Hash(NULL), TotalWriten(0) { Buf = new unsigned char[Size]; Reset(); - CircleBuf::BwReadLimit = _config->FindI("Acquire::http::Dl-Limit",0)*1024; + CircleBuf::BwReadLimit = Owner->ConfigFindI("Dl-Limit", 0) * 1024; } /*}}}*/ // CircleBuf::Reset - Reset to the default state /*{{{*/ @@ -80,12 +80,13 @@ void CircleBuf::Reset() InP = 0; OutP = 0; StrPos = 0; + TotalWriten = 0; MaxGet = (unsigned long long)-1; OutQueue = string(); - if (Hash != 0) + if (Hash != NULL) { delete Hash; - Hash = new Hashes; + Hash = NULL; } } /*}}}*/ @@ -217,8 +218,10 @@ bool CircleBuf::Write(int Fd) return false; } + + TotalWriten += Res; - if (Hash != 0) + if (Hash != NULL) Hash->Add(Buf + (OutP%Size),Res); OutP += Res; @@ -284,9 +287,9 @@ CircleBuf::~CircleBuf() } // HttpServerState::HttpServerState - Constructor /*{{{*/ -HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(64*1024), Out(4*1024) +HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(Owner, 64*1024), Out(Owner, 4*1024) { - TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut); + TimeOut = Owner->ConfigFindI("Timeout", TimeOut); Reset(); } /*}}}*/ @@ -306,7 +309,7 @@ bool HttpServerState::Open() // Determine the proxy setting AutoDetectProxy(ServerName); - string SpecificProxy = _config->Find("Acquire::http::Proxy::" + ServerName.Host); + string SpecificProxy = Owner->ConfigFind("Proxy::" + ServerName.Host, ""); if (!SpecificProxy.empty()) { if (SpecificProxy == "DIRECT") @@ -316,7 +319,7 @@ bool HttpServerState::Open() } else { - string DefProxy = _config->Find("Acquire::http::Proxy"); + string DefProxy = Owner->ConfigFind("Proxy", ""); if (!DefProxy.empty()) { Proxy = DefProxy; @@ -344,6 +347,8 @@ bool HttpServerState::Open() Port = ServerName.Port; Host = ServerName.Host; } + else if (Proxy.Access != "http") + return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str()); else { if (Proxy.Port != 0) @@ -438,12 +443,12 @@ bool HttpServerState::RunData(FileFd * const File) { /* Closes encoding is used when the server did not specify a size, the loss of the connection means we are done */ - if (Encoding == Closes) - In.Limit(-1); - else if (JunkSize != 0) + if (JunkSize != 0) In.Limit(JunkSize); - else + else if (DownloadSize != 0) In.Limit(DownloadSize); + else if (Persistent == false) + In.Limit(-1); // Just transfer the whole block. do @@ -460,6 +465,12 @@ bool HttpServerState::RunData(FileFd * const File) return Owner->Flush() && !_error->PendingError(); } /*}}}*/ +bool HttpServerState::RunDataToDevNull() /*{{{*/ +{ + FileFd DevNull("/dev/null", FileFd::WriteOnly); + return RunData(&DevNull); +} + /*}}}*/ bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/ { return In.WriteTillEl(Data); @@ -480,36 +491,36 @@ APT_PURE bool HttpServerState::IsOpen() /*{{{*/ return (ServerFd != -1); } /*}}}*/ -bool HttpServerState::InitHashes(FileFd &File) /*{{{*/ +bool HttpServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/ { delete In.Hash; - In.Hash = new Hashes; - - // Set the expected size and read file for the hashes - File.Truncate(StartPos); - return In.Hash->AddFD(File, StartPos); + In.Hash = new Hashes(ExpectedHashes); + return true; } /*}}}*/ + APT_PURE Hashes * HttpServerState::GetHashes() /*{{{*/ { return In.Hash; } /*}}}*/ // HttpServerState::Die - The server has closed the connection. /*{{{*/ -bool HttpServerState::Die(FileFd &File) +bool HttpServerState::Die(FileFd * const File) { unsigned int LErrno = errno; // Dump the buffer to the file if (State == ServerState::Data) { + if (File == nullptr) + return true; // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking // can't be set - if (File.Name() != "/dev/null") - SetNonBlock(File.Fd(),false); + if (File->Name() != "/dev/null") + SetNonBlock(File->Fd(),false); while (In.WriteSpace() == true) { - if (In.Write(File.Fd()) == false) + if (In.Write(File->Fd()) == false) return _error->Errno("write",_("Error writing to the file")); // Done @@ -520,7 +531,7 @@ bool HttpServerState::Die(FileFd &File) // See if this is because the server finished the data stream if (In.IsLimit() == false && State != HttpServerState::Header && - Encoding != HttpServerState::Closes) + Persistent == true) { Close(); if (LErrno == 0) @@ -567,7 +578,7 @@ bool HttpServerState::Flush(FileFd * const File) return true; } - if (In.IsLimit() == true || Encoding == ServerState::Closes) + if (In.IsLimit() == true || Persistent == false) return true; } return false; @@ -605,7 +616,7 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File) FD_SET(FileFD,&wfds); // Add stdin - if (_config->FindB("Acquire::http::DependOnSTDIN", true) == true) + if (Owner->ConfigFindB("DependOnSTDIN", true) == true) FD_SET(STDIN_FILENO,&rfds); // Figure out the max fd @@ -628,7 +639,7 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File) if (Res == 0) { _error->Error(_("Connection timed out")); - return Die(*File); + return Die(File); } // Handle server IO @@ -636,14 +647,14 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File) { errno = 0; if (In.Read(ServerFd) == false) - return Die(*File); + return Die(File); } if (ServerFd != -1 && FD_ISSET(ServerFd,&wfds)) { errno = 0; if (Out.Write(ServerFd) == false) - return Die(*File); + return Die(File); } // Send data to the file @@ -653,6 +664,13 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File) return _error->Errno("write",_("Error writing to output file")); } + if (MaximumSize > 0 && File && File->Tell() > MaximumSize) + { + Owner->SetFailReason("MaximumSizeExceeded"); + return _error->Error("Writing more data than expected (%llu > %llu)", + File->Tell(), MaximumSize); + } + // Handle commands from APT if (FD_ISSET(STDIN_FILENO,&rfds)) { @@ -670,6 +688,11 @@ bool HttpServerState::Go(bool ToFile, FileFd * const File) void HttpMethod::SendReq(FetchItem *Itm) { URI Uri = Itm->Uri; + { + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + Uri.Access = Binary.substr(plus + 1); + } // The HTTP server expects a hostname with a trailing :port std::stringstream Req; @@ -687,7 +710,7 @@ void HttpMethod::SendReq(FetchItem *Itm) if (Server->Proxy.empty() == true || Server->Proxy.Host.empty()) requesturi = Uri.Path; else - requesturi = Itm->Uri; + requesturi = Uri; // The "+" is encoded as a workaround for a amazon S3 bug // see LP bugs #1003633 and #1086997. @@ -699,17 +722,17 @@ void HttpMethod::SendReq(FetchItem *Itm) C.f. https://tools.ietf.org/wg/httpbis/trac/ticket/158 */ Req << "GET " << requesturi << " HTTP/1.1\r\n"; if (Uri.Port != 0) - Req << "Host: " << ProperHost << ":" << Uri.Port << "\r\n"; + Req << "Host: " << ProperHost << ":" << std::to_string(Uri.Port) << "\r\n"; else Req << "Host: " << ProperHost << "\r\n"; // generate a cache control header (if needed) - if (_config->FindB("Acquire::http::No-Cache",false) == true) + if (ConfigFindB("No-Cache",false) == true) Req << "Cache-Control: no-cache\r\n" << "Pragma: no-cache\r\n"; else if (Itm->IndexFile == true) - Req << "Cache-Control: max-age=" << _config->FindI("Acquire::http::Max-Age",0) << "\r\n"; - else if (_config->FindB("Acquire::http::No-Store",false) == true) + Req << "Cache-Control: max-age=" << std::to_string(ConfigFindI("Max-Age", 0)) << "\r\n"; + else if (ConfigFindB("No-Store", false) == true) Req << "Cache-Control: no-store\r\n"; // If we ask for uncompressed files servers might respond with content- @@ -717,7 +740,7 @@ void HttpMethod::SendReq(FetchItem *Itm) // see 657029, 657560 and co, so if we have no extension on the request // ask for text only. As a sidenote: If there is nothing to negotate servers // seem to be nice and ignore it. - if (_config->FindB("Acquire::http::SendAccept", true) == true) + if (ConfigFindB("SendAccept", true) == true) { size_t const filepos = Itm->Uri.find_last_of('/'); string const file = Itm->Uri.substr(filepos + 1); @@ -728,10 +751,10 @@ void HttpMethod::SendReq(FetchItem *Itm) // Check for a partial file and send if-queries accordingly struct stat SBuf; if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) - Req << "Range: bytes=" << SBuf.st_size << "-\r\n" - << "If-Range: " << TimeRFC1123(SBuf.st_mtime) << "\r\n"; + Req << "Range: bytes=" << std::to_string(SBuf.st_size) << "-\r\n" + << "If-Range: " << TimeRFC1123(SBuf.st_mtime, false) << "\r\n"; else if (Itm->LastModified != 0) - Req << "If-Modified-Since: " << TimeRFC1123(Itm->LastModified).c_str() << "\r\n"; + Req << "If-Modified-Since: " << TimeRFC1123(Itm->LastModified, false).c_str() << "\r\n"; if (Server->Proxy.User.empty() == false || Server->Proxy.Password.empty() == false) Req << "Proxy-Authorization: Basic " @@ -742,7 +765,7 @@ void HttpMethod::SendReq(FetchItem *Itm) Req << "Authorization: Basic " << Base64Encode(Uri.User + ":" + Uri.Password) << "\r\n"; - Req << "User-Agent: " << _config->Find("Acquire::http::User-Agent", + Req << "User-Agent: " << ConfigFind("User-Agent", "Debian APT-HTTP/1.3 (" PACKAGE_VERSION ")") << "\r\n"; Req << "\r\n"; @@ -753,29 +776,54 @@ void HttpMethod::SendReq(FetchItem *Itm) Server->WriteResponse(Req.str()); } /*}}}*/ -// HttpMethod::Configuration - Handle a configuration message /*{{{*/ -// --------------------------------------------------------------------- -/* We stash the desired pipeline depth */ -bool HttpMethod::Configuration(string Message) +std::unique_ptr HttpMethod::CreateServerState(URI const &uri)/*{{{*/ { - if (ServerMethod::Configuration(Message) == false) - return false; - - AllowRedirect = _config->FindB("Acquire::http::AllowRedirect",true); - PipelineDepth = _config->FindI("Acquire::http::Pipeline-Depth", - PipelineDepth); - Debug = _config->FindB("Debug::Acquire::http",false); - - return true; + return std::unique_ptr(new HttpServerState(uri, this)); +} + /*}}}*/ +void HttpMethod::RotateDNS() /*{{{*/ +{ + ::RotateDNS(); } /*}}}*/ -ServerState * HttpMethod::CreateServerState(URI uri) /*{{{*/ +ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res)/*{{{*/ { - return new HttpServerState(uri, this); + auto ret = ServerMethod::DealWithHeaders(Res); + if (ret != ServerMethod::FILE_IS_OPEN) + return ret; + + // Open the file + delete File; + File = new FileFd(Queue->DestFile,FileFd::WriteAny); + if (_error->PendingError() == true) + return ERROR_NOT_FROM_SERVER; + + FailFile = Queue->DestFile; + FailFile.c_str(); // Make sure we don't do a malloc in the signal handler + FailFd = File->Fd(); + FailTime = Server->Date; + + if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false) + { + _error->Errno("read",_("Problem hashing file")); + return ERROR_NOT_FROM_SERVER; + } + if (Server->StartPos > 0) + Res.ResumePoint = Server->StartPos; + + SetNonBlock(File->Fd(),true); + return FILE_IS_OPEN; } /*}}}*/ -void HttpMethod::RotateDNS() /*{{{*/ +HttpMethod::HttpMethod(std::string &&pProg) : ServerMethod(pProg.c_str(), "1.2", Pipeline | SendConfig)/*{{{*/ { - ::RotateDNS(); + auto addName = std::inserter(methodNames, methodNames.begin()); + if (Binary != "http") + addName = "http"; + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + addName = Binary.substr(0, plus); + File = 0; + Server = 0; } /*}}}*/