X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/f8ebb9374aafc790eb899ebef23f3c8e1e620184..7cafe70555740bd0acbf0b8d2193b95423e7436b:/methods/http.cc diff --git a/methods/http.cc b/methods/http.cc index 904030e0a..8d3c569c1 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -3,7 +3,7 @@ // $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $ /* ###################################################################### - HTTP Acquire Method - This is the HTTP aquire method for APT. + HTTP Acquire Method - This is the HTTP acquire method for APT. It uses HTTP/1.1 and many of the fancy options there-in, such as pipelining, range, if-range and so on. @@ -25,58 +25,52 @@ ##################################################################### */ /*}}}*/ // Include Files /*{{{*/ +#include + #include -#include +#include #include #include #include +#include +#include +#include +#include +#include +#include #include #include -#include #include -#include #include #include -#include +#include #include -#include -#include - - -// Internet stuff -#include +#include #include "config.h" #include "connect.h" -#include "rfc2553emu.h" #include "http.h" + +#include /*}}}*/ using namespace std; -string HttpMethod::FailFile; -int HttpMethod::FailFd = -1; -time_t HttpMethod::FailTime = 0; -unsigned long PipelineDepth = 10; -unsigned long TimeOut = 120; -bool AllowRedirect = false; -bool Debug = false; -URI Proxy; - -unsigned long CircleBuf::BwReadLimit=0; -unsigned long CircleBuf::BwTickReadData=0; +unsigned long long CircleBuf::BwReadLimit=0; +unsigned long long CircleBuf::BwTickReadData=0; struct timeval CircleBuf::BwReadTick={0,0}; const unsigned int CircleBuf::BW_HZ=10; - + // CircleBuf::CircleBuf - Circular input buffer /*{{{*/ // --------------------------------------------------------------------- /* */ -CircleBuf::CircleBuf(unsigned long Size) : Size(Size), Hash(0) +CircleBuf::CircleBuf(HttpMethod const * const Owner, unsigned long long Size) + : Size(Size), Hash(NULL), TotalWriten(0) { Buf = new unsigned char[Size]; Reset(); - CircleBuf::BwReadLimit = _config->FindI("Acquire::http::Dl-Limit",0)*1024; + CircleBuf::BwReadLimit = Owner->ConfigFindI("Dl-Limit", 0) * 1024; } /*}}}*/ // CircleBuf::Reset - Reset to the default state /*{{{*/ @@ -87,14 +81,15 @@ void CircleBuf::Reset() InP = 0; OutP = 0; StrPos = 0; - MaxGet = (unsigned int)-1; + TotalWriten = 0; + MaxGet = (unsigned long long)-1; OutQueue = string(); - if (Hash != 0) + if (Hash != NULL) { delete Hash; - Hash = new Hashes; - } -}; + Hash = NULL; + } +} /*}}}*/ // CircleBuf::Read - Read from a FD into the circular buffer /*{{{*/ // --------------------------------------------------------------------- @@ -102,8 +97,6 @@ void CircleBuf::Reset() is non-blocking.. */ bool CircleBuf::Read(int Fd) { - unsigned long BwReadMax; - while (1) { // Woops, buffer is full @@ -111,13 +104,13 @@ bool CircleBuf::Read(int Fd) return true; // what's left to read in this tick - BwReadMax = CircleBuf::BwReadLimit/BW_HZ; + unsigned long long const BwReadMax = CircleBuf::BwReadLimit/BW_HZ; if(CircleBuf::BwReadLimit) { struct timeval now; gettimeofday(&now,0); - unsigned long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 + + unsigned long long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 + now.tv_usec-CircleBuf::BwReadTick.tv_usec; if(d > 1000000/BW_HZ) { CircleBuf::BwReadTick = now; @@ -131,7 +124,7 @@ bool CircleBuf::Read(int Fd) } // Write the buffer segment - int Res; + ssize_t Res; if(CircleBuf::BwReadLimit) { Res = read(Fd,Buf + (InP%Size), BwReadMax > LeftRead() ? LeftRead() : BwReadMax); @@ -159,9 +152,9 @@ bool CircleBuf::Read(int Fd) // CircleBuf::Read - Put the string into the buffer /*{{{*/ // --------------------------------------------------------------------- /* This will hold the string in and fill the buffer with it as it empties */ -bool CircleBuf::Read(string Data) +bool CircleBuf::Read(string const &Data) { - OutQueue += Data; + OutQueue.append(Data); FillOut(); return true; } @@ -180,7 +173,7 @@ void CircleBuf::FillOut() return; // Write the buffer segment - unsigned long Sz = LeftRead(); + unsigned long long Sz = LeftRead(); if (OutQueue.length() - StrPos < Sz) Sz = OutQueue.length() - StrPos; memcpy(Buf + (InP%Size),OutQueue.c_str() + StrPos,Sz); @@ -214,7 +207,7 @@ bool CircleBuf::Write(int Fd) return true; // Write the buffer segment - int Res; + ssize_t Res; Res = write(Fd,Buf + (OutP%Size),LeftWrite()); if (Res == 0) @@ -226,8 +219,10 @@ bool CircleBuf::Write(int Fd) return false; } + + TotalWriten += Res; - if (Hash != 0) + if (Hash != NULL) Hash->Add(Buf + (OutP%Size),Res); OutP += Res; @@ -240,7 +235,7 @@ bool CircleBuf::Write(int Fd) bool CircleBuf::WriteTillEl(string &Data,bool Single) { // We cheat and assume it is unneeded to have more than one buffer load - for (unsigned long I = OutP; I < InP; I++) + for (unsigned long long I = OutP; I < InP; I++) { if (Buf[I%Size] != '\n') continue; @@ -258,7 +253,7 @@ bool CircleBuf::WriteTillEl(string &Data,bool Single) Data = ""; while (OutP < I) { - unsigned long Sz = LeftWrite(); + unsigned long long Sz = LeftWrite(); if (Sz == 0) return false; if (I - OutP < Sz) @@ -286,21 +281,41 @@ void CircleBuf::Stats() clog << "Got " << InP << " in " << Diff << " at " << InP/Diff << endl;*/ } /*}}}*/ +CircleBuf::~CircleBuf() +{ + delete [] Buf; + delete Hash; +} -// ServerState::ServerState - Constructor /*{{{*/ -// --------------------------------------------------------------------- -/* */ -ServerState::ServerState(URI Srv,HttpMethod *Owner) : Owner(Owner), - In(64*1024), Out(4*1024), - ServerName(Srv) +// HttpServerState::HttpServerState - Constructor /*{{{*/ +HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(Owner, 64*1024), Out(Owner, 4*1024) { + TimeOut = Owner->ConfigFindI("Timeout", TimeOut); Reset(); } /*}}}*/ -// ServerState::Open - Open a connection to the server /*{{{*/ +// HttpServerState::Open - Open a connection to the server /*{{{*/ // --------------------------------------------------------------------- /* This opens a connection to the server. */ -bool ServerState::Open() +static bool TalkToSocksProxy(int const ServerFd, std::string const &Proxy, + char const * const type, bool const ReadWrite, uint8_t * const ToFrom, + unsigned int const Size, unsigned int const Timeout) +{ + if (WaitFd(ServerFd, ReadWrite, Timeout) == false) + return _error->Error("Waiting for the SOCKS proxy %s to %s timed out", URI::SiteOnly(Proxy).c_str(), type); + if (ReadWrite == false) + { + if (FileFd::Read(ServerFd, ToFrom, Size) == false) + return _error->Error("Reading the %s from SOCKS proxy %s failed", type, URI::SiteOnly(Proxy).c_str()); + } + else + { + if (FileFd::Write(ServerFd, ToFrom, Size) == false) + return _error->Error("Writing the %s to SOCKS proxy %s failed", type, URI::SiteOnly(Proxy).c_str()); + } + return true; +} +bool HttpServerState::Open() { // Use the already open connection if possible. if (ServerFd != -1) @@ -312,7 +327,8 @@ bool ServerState::Open() Persistent = true; // Determine the proxy setting - string SpecificProxy = _config->Find("Acquire::http::Proxy::" + ServerName.Host); + AutoDetectProxy(ServerName); + string SpecificProxy = Owner->ConfigFind("Proxy::" + ServerName.Host, ""); if (!SpecificProxy.empty()) { if (SpecificProxy == "DIRECT") @@ -322,7 +338,7 @@ bool ServerState::Open() } else { - string DefProxy = _config->Find("Acquire::http::Proxy"); + string DefProxy = Owner->ConfigFind("Proxy", ""); if (!DefProxy.empty()) { Proxy = DefProxy; @@ -340,96 +356,206 @@ bool ServerState::Open() if (CheckDomainList(ServerName.Host,getenv("no_proxy")) == true) Proxy = ""; } - - // Determine what host and port to use based on the proxy settings - int Port = 0; - string Host; - if (Proxy.empty() == true || Proxy.Host.empty() == true) + + if (Proxy.empty() == false) + Owner->AddProxyAuth(Proxy, ServerName); + + if (Proxy.Access == "socks5h") { - if (ServerName.Port != 0) - Port = ServerName.Port; - Host = ServerName.Host; + if (Connect(Proxy.Host, Proxy.Port, "socks", 1080, ServerFd, TimeOut, Owner) == false) + return false; + + /* We implement a very basic SOCKS5 client here complying mostly to RFC1928 expect + * for not offering GSSAPI auth which is a must (we only do no or user/pass auth). + * We also expect the SOCKS5 server to do hostname lookup (aka socks5h) */ + std::string const ProxyInfo = URI::SiteOnly(Proxy); + Owner->Status(_("Connecting to %s (%s)"),"SOCKS5h proxy",ProxyInfo.c_str()); + auto const Timeout = Owner->ConfigFindI("TimeOut", 120); + #define APT_WriteOrFail(TYPE, DATA, LENGTH) if (TalkToSocksProxy(ServerFd, ProxyInfo, TYPE, true, DATA, LENGTH, Timeout) == false) return false + #define APT_ReadOrFail(TYPE, DATA, LENGTH) if (TalkToSocksProxy(ServerFd, ProxyInfo, TYPE, false, DATA, LENGTH, Timeout) == false) return false + if (ServerName.Host.length() > 255) + return _error->Error("Can't use SOCKS5h as hostname %s is too long!", ServerName.Host.c_str()); + if (Proxy.User.length() > 255 || Proxy.Password.length() > 255) + return _error->Error("Can't use user&pass auth as they are too long (%lu and %lu) for the SOCKS5!", Proxy.User.length(), Proxy.Password.length()); + if (Proxy.User.empty()) + { + uint8_t greeting[] = { 0x05, 0x01, 0x00 }; + APT_WriteOrFail("greet-1", greeting, sizeof(greeting)); + } + else + { + uint8_t greeting[] = { 0x05, 0x02, 0x00, 0x02 }; + APT_WriteOrFail("greet-2", greeting, sizeof(greeting)); + } + uint8_t greeting[2]; + APT_ReadOrFail("greet back", greeting, sizeof(greeting)); + if (greeting[0] != 0x05) + return _error->Error("SOCKS proxy %s greets back with wrong version: %d", ProxyInfo.c_str(), greeting[0]); + if (greeting[1] == 0x00) + ; // no auth has no method-dependent sub-negotiations + else if (greeting[1] == 0x02) + { + if (Proxy.User.empty()) + return _error->Error("SOCKS proxy %s negotiated user&pass auth, but we had not offered it!", ProxyInfo.c_str()); + // user&pass auth sub-negotiations are defined by RFC1929 + std::vector auth = {{ 0x01, static_cast(Proxy.User.length()) }}; + std::copy(Proxy.User.begin(), Proxy.User.end(), std::back_inserter(auth)); + auth.push_back(static_cast(Proxy.Password.length())); + std::copy(Proxy.Password.begin(), Proxy.Password.end(), std::back_inserter(auth)); + APT_WriteOrFail("user&pass auth", auth.data(), auth.size()); + uint8_t authstatus[2]; + APT_ReadOrFail("auth report", authstatus, sizeof(authstatus)); + if (authstatus[0] != 0x01) + return _error->Error("SOCKS proxy %s auth status response with wrong version: %d", ProxyInfo.c_str(), authstatus[0]); + if (authstatus[1] != 0x00) + return _error->Error("SOCKS proxy %s reported authorization failure: username or password incorrect? (%d)", ProxyInfo.c_str(), authstatus[1]); + } + else + return _error->Error("SOCKS proxy %s greets back having not found a common authorization method: %d", ProxyInfo.c_str(), greeting[1]); + union { uint16_t * i; uint8_t * b; } portu; + uint16_t port = htons(static_cast(ServerName.Port == 0 ? 80 : ServerName.Port)); + portu.i = &port; + std::vector request = {{ 0x05, 0x01, 0x00, 0x03, static_cast(ServerName.Host.length()) }}; + std::copy(ServerName.Host.begin(), ServerName.Host.end(), std::back_inserter(request)); + request.push_back(portu.b[0]); + request.push_back(portu.b[1]); + APT_WriteOrFail("request", request.data(), request.size()); + uint8_t response[4]; + APT_ReadOrFail("first part of response", response, sizeof(response)); + if (response[0] != 0x05) + return _error->Error("SOCKS proxy %s response with wrong version: %d", ProxyInfo.c_str(), response[0]); + if (response[2] != 0x00) + return _error->Error("SOCKS proxy %s has unexpected non-zero reserved field value: %d", ProxyInfo.c_str(), response[2]); + std::string bindaddr; + if (response[3] == 0x01) // IPv4 address + { + uint8_t ip4port[6]; + APT_ReadOrFail("IPv4+Port of response", ip4port, sizeof(ip4port)); + portu.b[0] = ip4port[4]; + portu.b[1] = ip4port[5]; + port = ntohs(*portu.i); + strprintf(bindaddr, "%d.%d.%d.%d:%d", ip4port[0], ip4port[1], ip4port[2], ip4port[3], port); + } + else if (response[3] == 0x03) // hostname + { + uint8_t namelength; + APT_ReadOrFail("hostname length of response", &namelength, 1); + uint8_t hostname[namelength + 2]; + APT_ReadOrFail("hostname of response", hostname, sizeof(hostname)); + portu.b[0] = hostname[namelength]; + portu.b[1] = hostname[namelength + 1]; + port = ntohs(*portu.i); + hostname[namelength] = '\0'; + strprintf(bindaddr, "%s:%d", hostname, port); + } + else if (response[3] == 0x04) // IPv6 address + { + uint8_t ip6port[18]; + APT_ReadOrFail("IPv6+port of response", ip6port, sizeof(ip6port)); + portu.b[0] = ip6port[16]; + portu.b[1] = ip6port[17]; + port = ntohs(*portu.i); + strprintf(bindaddr, "[%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X:%02X%02X]:%d", + ip6port[0], ip6port[1], ip6port[2], ip6port[3], ip6port[4], ip6port[5], ip6port[6], ip6port[7], + ip6port[8], ip6port[9], ip6port[10], ip6port[11], ip6port[12], ip6port[13], ip6port[14], ip6port[15], + port); + } + else + return _error->Error("SOCKS proxy %s destination address is of unknown type: %d", + ProxyInfo.c_str(), response[3]); + if (response[1] != 0x00) + { + char const * errstr = nullptr; + auto errcode = response[1]; + // Tor error reporting can be a bit arcane, lets try to detect & fix it up + if (bindaddr == "0.0.0.0:0") + { + auto const lastdot = ServerName.Host.rfind('.'); + if (lastdot == std::string::npos || ServerName.Host.substr(lastdot) != ".onion") + ; + else if (errcode == 0x01) + { + auto const prevdot = ServerName.Host.rfind('.', lastdot - 1); + if (lastdot == 16 && prevdot == std::string::npos) + ; // valid .onion address + else if (prevdot != std::string::npos && (lastdot - prevdot) == 17) + ; // valid .onion address with subdomain(s) + else + { + errstr = "Invalid hostname: onion service name must be 16 characters long"; + Owner->SetFailReason("SOCKS"); + } + } + // in all likelihood the service is either down or the address has + // a typo and so "Host unreachable" is the better understood error + // compared to the technically correct "TLL expired". + else if (errcode == 0x06) + errcode = 0x04; + } + if (errstr == nullptr) + { + switch (errcode) + { + case 0x01: errstr = "general SOCKS server failure"; Owner->SetFailReason("SOCKS"); break; + case 0x02: errstr = "connection not allowed by ruleset"; Owner->SetFailReason("SOCKS"); break; + case 0x03: errstr = "Network unreachable"; Owner->SetFailReason("ConnectionTimedOut"); break; + case 0x04: errstr = "Host unreachable"; Owner->SetFailReason("ConnectionTimedOut"); break; + case 0x05: errstr = "Connection refused"; Owner->SetFailReason("ConnectionRefused"); break; + case 0x06: errstr = "TTL expired"; Owner->SetFailReason("Timeout"); break; + case 0x07: errstr = "Command not supported"; Owner->SetFailReason("SOCKS"); break; + case 0x08: errstr = "Address type not supported"; Owner->SetFailReason("SOCKS"); break; + default: errstr = "Unknown error"; Owner->SetFailReason("SOCKS"); break; + } + } + return _error->Error("SOCKS proxy %s could not connect to %s (%s) due to: %s (%d)", + ProxyInfo.c_str(), ServerName.Host.c_str(), bindaddr.c_str(), errstr, response[1]); + } + else if (Owner->DebugEnabled()) + ioprintf(std::clog, "http: SOCKS proxy %s connection established to %s (%s)\n", + ProxyInfo.c_str(), ServerName.Host.c_str(), bindaddr.c_str()); + + if (WaitFd(ServerFd, true, Timeout) == false) + return _error->Error("SOCKS proxy %s reported connection to %s (%s), but timed out", + ProxyInfo.c_str(), ServerName.Host.c_str(), bindaddr.c_str()); + #undef APT_ReadOrFail + #undef APT_WriteOrFail } else { - if (Proxy.Port != 0) - Port = Proxy.Port; - Host = Proxy.Host; + // Determine what host and port to use based on the proxy settings + int Port = 0; + string Host; + if (Proxy.empty() == true || Proxy.Host.empty() == true) + { + if (ServerName.Port != 0) + Port = ServerName.Port; + Host = ServerName.Host; + } + else if (Proxy.Access != "http") + return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str()); + else + { + if (Proxy.Port != 0) + Port = Proxy.Port; + Host = Proxy.Host; + } + return Connect(Host,Port,"http",80,ServerFd,TimeOut,Owner); } - - // Connect to the remote server - if (Connect(Host,Port,"http",80,ServerFd,TimeOut,Owner) == false) - return false; - return true; } /*}}}*/ -// ServerState::Close - Close a connection to the server /*{{{*/ +// HttpServerState::Close - Close a connection to the server /*{{{*/ // --------------------------------------------------------------------- /* */ -bool ServerState::Close() +bool HttpServerState::Close() { close(ServerFd); ServerFd = -1; return true; } /*}}}*/ -// ServerState::RunHeaders - Get the headers before the data /*{{{*/ -// --------------------------------------------------------------------- -/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header - parse error occurred */ -int ServerState::RunHeaders() -{ - State = Header; - - Owner->Status(_("Waiting for headers")); - - Major = 0; - Minor = 0; - Result = 0; - Size = 0; - StartPos = 0; - Encoding = Closes; - HaveContent = false; - time(&Date); - - do - { - string Data; - if (In.WriteTillEl(Data) == false) - continue; - - if (Debug == true) - clog << Data; - - for (string::const_iterator I = Data.begin(); I < Data.end(); I++) - { - string::const_iterator J = I; - for (; J != Data.end() && *J != '\n' && *J != '\r';J++); - if (HeaderLine(string(I,J)) == false) - return 2; - I = J; - } - - // 100 Continue is a Nop... - if (Result == 100) - continue; - - // Tidy up the connection persistance state. - if (Encoding == Closes && HaveContent == true) - Persistent = false; - - return 0; - } - while (Owner->Go(false,this) == true); - - return 1; -} - /*}}}*/ -// ServerState::RunData - Transfer the data from the socket /*{{{*/ -// --------------------------------------------------------------------- -/* */ -bool ServerState::RunData() +// HttpServerState::RunData - Transfer the data from the socket /*{{{*/ +bool HttpServerState::RunData(FileFd * const File) { State = Data; @@ -447,13 +573,13 @@ bool ServerState::RunData() if (In.WriteTillEl(Data,true) == true) break; } - while ((Last = Owner->Go(false,this)) == true); + while ((Last = Go(false, File)) == true); if (Last == false) return false; // See if we are done - unsigned long Len = strtol(Data.c_str(),0,16); + unsigned long long Len = strtoull(Data.c_str(),0,16); if (Len == 0) { In.Limit(-1); @@ -465,7 +591,7 @@ bool ServerState::RunData() if (In.WriteTillEl(Data,true) == true && Data.length() <= 2) break; } - while ((Last = Owner->Go(false,this)) == true); + while ((Last = Go(false, File)) == true); if (Last == false) return false; return !_error->PendingError(); @@ -473,7 +599,7 @@ bool ServerState::RunData() // Transfer the block In.Limit(Len); - while (Owner->Go(true,this) == true) + while (Go(true, File) == true) if (In.IsLimit() == true) break; @@ -489,7 +615,7 @@ bool ServerState::RunData() if (In.WriteTillEl(Data,true) == true) break; } - while ((Last = Owner->Go(false,this)) == true); + while ((Last = Go(false, File)) == true); if (Last == false) return false; } @@ -498,10 +624,12 @@ bool ServerState::RunData() { /* Closes encoding is used when the server did not specify a size, the loss of the connection means we are done */ - if (Encoding == Closes) + if (JunkSize != 0) + In.Limit(JunkSize); + else if (DownloadSize != 0) + In.Limit(DownloadSize); + else if (Persistent == false) In.Limit(-1); - else - In.Limit(Size - StartPos); // Just transfer the whole block. do @@ -512,244 +640,149 @@ bool ServerState::RunData() In.Limit(-1); return !_error->PendingError(); } - while (Owner->Go(true,this) == true); + while (Go(true, File) == true); } - return Owner->Flush(this) && !_error->PendingError(); + return Owner->Flush() && !_error->PendingError(); } /*}}}*/ -// ServerState::HeaderLine - Process a header line /*{{{*/ -// --------------------------------------------------------------------- -/* */ -bool ServerState::HeaderLine(string Line) +bool HttpServerState::RunDataToDevNull() /*{{{*/ { - if (Line.empty() == true) + // no need to clean up if we discard the connection anyhow + if (Persistent == false) return true; + FileFd DevNull("/dev/null", FileFd::WriteOnly); + return RunData(&DevNull); +} + /*}}}*/ +bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/ +{ + return In.WriteTillEl(Data); +} + /*}}}*/ +bool HttpServerState::LoadNextResponse(bool const ToFile, FileFd * const File)/*{{{*/ +{ + return Go(ToFile, File); +} + /*}}}*/ +bool HttpServerState::WriteResponse(const std::string &Data) /*{{{*/ +{ + return Out.Read(Data); +} + /*}}}*/ +APT_PURE bool HttpServerState::IsOpen() /*{{{*/ +{ + return (ServerFd != -1); +} + /*}}}*/ +bool HttpServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/ +{ + delete In.Hash; + In.Hash = new Hashes(ExpectedHashes); + return true; +} + /*}}}*/ +void HttpServerState::Reset(bool const Everything) /*{{{*/ +{ + ServerState::Reset(Everything); + if (Everything) + ServerFd = -1; +} + /*}}}*/ - // The http server might be trying to do something evil. - if (Line.length() >= MAXLEN) - return _error->Error(_("Got a single header line over %u chars"),MAXLEN); - - string::size_type Pos = Line.find(' '); - if (Pos == string::npos || Pos+1 > Line.length()) - { - // Blah, some servers use "connection:closes", evil. - Pos = Line.find(':'); - if (Pos == string::npos || Pos + 2 > Line.length()) - return _error->Error(_("Bad header line")); - Pos++; - } +APT_PURE Hashes * HttpServerState::GetHashes() /*{{{*/ +{ + return In.Hash; +} + /*}}}*/ +// HttpServerState::Die - The server has closed the connection. /*{{{*/ +bool HttpServerState::Die(FileFd * const File) +{ + unsigned int LErrno = errno; - // Parse off any trailing spaces between the : and the next word. - string::size_type Pos2 = Pos; - while (Pos2 < Line.length() && isspace(Line[Pos2]) != 0) - Pos2++; - - string Tag = string(Line,0,Pos); - string Val = string(Line,Pos2); - - if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0) + // Dump the buffer to the file + if (State == ServerState::Data) { - // Evil servers return no version - if (Line[4] == '/') - { - if (sscanf(Line.c_str(),"HTTP/%u.%u %u%[^\n]",&Major,&Minor, - &Result,Code) != 4) - return _error->Error(_("The HTTP server sent an invalid reply header")); - } - else + if (File == nullptr) + return true; + // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking + // can't be set + if (File->Name() != "/dev/null") + SetNonBlock(File->Fd(),false); + while (In.WriteSpace() == true) { - Major = 0; - Minor = 9; - if (sscanf(Line.c_str(),"HTTP %u%[^\n]",&Result,Code) != 2) - return _error->Error(_("The HTTP server sent an invalid reply header")); - } + if (In.Write(File->Fd()) == false) + return _error->Errno("write",_("Error writing to the file")); - /* Check the HTTP response header to get the default persistance - state. */ - if (Major < 1) - Persistent = false; - else - { - if (Major == 1 && Minor <= 0) - Persistent = false; - else - Persistent = true; + // Done + if (In.IsLimit() == true) + return true; } - - return true; - } - - if (stringcasecmp(Tag,"Content-Length:") == 0) - { - if (Encoding == Closes) - Encoding = Stream; - HaveContent = true; - - // The length is already set from the Content-Range header - if (StartPos != 0) - return true; - - if (sscanf(Val.c_str(),"%lu",&Size) != 1) - return _error->Error(_("The HTTP server sent an invalid Content-Length header")); - return true; } - if (stringcasecmp(Tag,"Content-Type:") == 0) - { - HaveContent = true; - return true; - } - - if (stringcasecmp(Tag,"Content-Range:") == 0) + // See if this is because the server finished the data stream + if (In.IsLimit() == false && State != HttpServerState::Header && + Persistent == true) { - HaveContent = true; - - if (sscanf(Val.c_str(),"bytes %lu-%*u/%lu",&StartPos,&Size) != 2) - return _error->Error(_("The HTTP server sent an invalid Content-Range header")); - if ((unsigned)StartPos > Size) - return _error->Error(_("This HTTP server has broken range support")); - return true; + Close(); + if (LErrno == 0) + return _error->Error(_("Error reading from server. Remote end closed connection")); + errno = LErrno; + return _error->Errno("read",_("Error reading from server")); } - - if (stringcasecmp(Tag,"Transfer-Encoding:") == 0) + else { - HaveContent = true; - if (stringcasecmp(Val,"chunked") == 0) - Encoding = Chunked; - return true; - } + In.Limit(-1); - if (stringcasecmp(Tag,"Connection:") == 0) - { - if (stringcasecmp(Val,"close") == 0) - Persistent = false; - if (stringcasecmp(Val,"keep-alive") == 0) - Persistent = true; - return true; - } - - if (stringcasecmp(Tag,"Last-Modified:") == 0) - { - if (StrToTime(Val,Date) == false) - return _error->Error(_("Unknown date format")); - return true; - } + // Nothing left in the buffer + if (In.WriteSpace() == false) + return false; - if (stringcasecmp(Tag,"Location:") == 0) - { - Location = Val; + // We may have got multiple responses back in one packet.. + Close(); return true; } - return true; + return false; } /*}}}*/ - -// HttpMethod::SendReq - Send the HTTP request /*{{{*/ +// HttpServerState::Flush - Dump the buffer into the file /*{{{*/ // --------------------------------------------------------------------- -/* This places the http request in the outbound buffer */ -void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) +/* This takes the current input buffer from the Server FD and writes it + into the file */ +bool HttpServerState::Flush(FileFd * const File) { - URI Uri = Itm->Uri; - - // The HTTP server expects a hostname with a trailing :port - char Buf[1000]; - string ProperHost = Uri.Host; - if (Uri.Port != 0) + if (File != NULL) { - sprintf(Buf,":%u",Uri.Port); - ProperHost += Buf; - } + // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking + // can't be set + if (File->Name() != "/dev/null") + SetNonBlock(File->Fd(),false); + if (In.WriteSpace() == false) + return true; - // Just in case. - if (Itm->Uri.length() >= sizeof(Buf)) - abort(); - - /* Build the request. We include a keep-alive header only for non-proxy - requests. This is to tweak old http/1.0 servers that do support keep-alive - but not HTTP/1.1 automatic keep-alive. Doing this with a proxy server - will glitch HTTP/1.0 proxies because they do not filter it out and - pass it on, HTTP/1.1 says the connection should default to keep alive - and we expect the proxy to do this */ - if (Proxy.empty() == true || Proxy.Host.empty()) - sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\nConnection: keep-alive\r\n", - QuoteString(Uri.Path,"~").c_str(),ProperHost.c_str()); - else - { - /* Generate a cache control header if necessary. We place a max - cache age on index files, optionally set a no-cache directive - and a no-store directive for archives. */ - sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\n", - Itm->Uri.c_str(),ProperHost.c_str()); - } - // generate a cache control header (if needed) - if (_config->FindB("Acquire::http::No-Cache",false) == true) - { - strcat(Buf,"Cache-Control: no-cache\r\nPragma: no-cache\r\n"); - } - else - { - if (Itm->IndexFile == true) + while (In.WriteSpace() == true) { - sprintf(Buf+strlen(Buf),"Cache-Control: max-age=%u\r\n", - _config->FindI("Acquire::http::Max-Age",0)); - } - else - { - if (_config->FindB("Acquire::http::No-Store",false) == true) - strcat(Buf,"Cache-Control: no-store\r\n"); - } - } - - - string Req = Buf; - - // Check for a partial file - struct stat SBuf; - if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) - { - // In this case we send an if-range query with a range header - sprintf(Buf,"Range: bytes=%li-\r\nIf-Range: %s\r\n",(long)SBuf.st_size - 1, - TimeRFC1123(SBuf.st_mtime).c_str()); - Req += Buf; - } - else - { - if (Itm->LastModified != 0) - { - sprintf(Buf,"If-Modified-Since: %s\r\n",TimeRFC1123(Itm->LastModified).c_str()); - Req += Buf; + if (In.Write(File->Fd()) == false) + return _error->Errno("write",_("Error writing to file")); + if (In.IsLimit() == true) + return true; } - } - - if (Proxy.User.empty() == false || Proxy.Password.empty() == false) - Req += string("Proxy-Authorization: Basic ") + - Base64Encode(Proxy.User + ":" + Proxy.Password) + "\r\n"; - maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc")); - if (Uri.User.empty() == false || Uri.Password.empty() == false) - { - Req += string("Authorization: Basic ") + - Base64Encode(Uri.User + ":" + Uri.Password) + "\r\n"; + if (In.IsLimit() == true || Persistent == false) + return true; } - Req += "User-Agent: " + _config->Find("Acquire::http::User-Agent", - "Ubuntu APT-HTTP/1.3 ("VERSION")") + "\r\n\r\n"; - - if (Debug == true) - cerr << Req << endl; - - Out.Read(Req); + return false; } /*}}}*/ -// HttpMethod::Go - Run a single loop /*{{{*/ +// HttpServerState::Go - Run a single loop /*{{{*/ // --------------------------------------------------------------------- /* This runs the select loop over the server FDs, Output file FDs and stdin. */ -bool HttpMethod::Go(bool ToFile,ServerState *Srv) +bool HttpServerState::Go(bool ToFile, FileFd * const File) { // Server has closed the connection - if (Srv->ServerFd == -1 && (Srv->In.WriteSpace() == false || + if (ServerFd == -1 && (In.WriteSpace() == false || ToFile == false)) return false; @@ -759,27 +792,28 @@ bool HttpMethod::Go(bool ToFile,ServerState *Srv) /* Add the server. We only send more requests if the connection will be persisting */ - if (Srv->Out.WriteSpace() == true && Srv->ServerFd != -1 - && Srv->Persistent == true) - FD_SET(Srv->ServerFd,&wfds); - if (Srv->In.ReadSpace() == true && Srv->ServerFd != -1) - FD_SET(Srv->ServerFd,&rfds); + if (Out.WriteSpace() == true && ServerFd != -1 + && Persistent == true) + FD_SET(ServerFd,&wfds); + if (In.ReadSpace() == true && ServerFd != -1) + FD_SET(ServerFd,&rfds); // Add the file int FileFD = -1; - if (File != 0) + if (File != NULL) FileFD = File->Fd(); - if (Srv->In.WriteSpace() == true && ToFile == true && FileFD != -1) + if (In.WriteSpace() == true && ToFile == true && FileFD != -1) FD_SET(FileFD,&wfds); - + // Add stdin - FD_SET(STDIN_FILENO,&rfds); + if (Owner->ConfigFindB("DependOnSTDIN", true) == true) + FD_SET(STDIN_FILENO,&rfds); // Figure out the max fd int MaxFd = FileFD; - if (MaxFd < Srv->ServerFd) - MaxFd = Srv->ServerFd; + if (MaxFd < ServerFd) + MaxFd = ServerFd; // Select struct timeval tv; @@ -796,582 +830,192 @@ bool HttpMethod::Go(bool ToFile,ServerState *Srv) if (Res == 0) { _error->Error(_("Connection timed out")); - return ServerDie(Srv); + return Die(File); } // Handle server IO - if (Srv->ServerFd != -1 && FD_ISSET(Srv->ServerFd,&rfds)) + if (ServerFd != -1 && FD_ISSET(ServerFd,&rfds)) { errno = 0; - if (Srv->In.Read(Srv->ServerFd) == false) - return ServerDie(Srv); + if (In.Read(ServerFd) == false) + return Die(File); } - if (Srv->ServerFd != -1 && FD_ISSET(Srv->ServerFd,&wfds)) + if (ServerFd != -1 && FD_ISSET(ServerFd,&wfds)) { errno = 0; - if (Srv->Out.Write(Srv->ServerFd) == false) - return ServerDie(Srv); + if (Out.Write(ServerFd) == false) + return Die(File); } // Send data to the file if (FileFD != -1 && FD_ISSET(FileFD,&wfds)) { - if (Srv->In.Write(FileFD) == false) + if (In.Write(FileFD) == false) return _error->Errno("write",_("Error writing to output file")); } + if (MaximumSize > 0 && File && File->Tell() > MaximumSize) + { + Owner->SetFailReason("MaximumSizeExceeded"); + return _error->Error("Writing more data than expected (%llu > %llu)", + File->Tell(), MaximumSize); + } + // Handle commands from APT if (FD_ISSET(STDIN_FILENO,&rfds)) { - if (Run(true) != -1) + if (Owner->Run(true) != -1) exit(100); } return true; } /*}}}*/ -// HttpMethod::Flush - Dump the buffer into the file /*{{{*/ -// --------------------------------------------------------------------- -/* This takes the current input buffer from the Server FD and writes it - into the file */ -bool HttpMethod::Flush(ServerState *Srv) -{ - if (File != 0) - { - // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking - // can't be set - if (File->Name() != "/dev/null") - SetNonBlock(File->Fd(),false); - if (Srv->In.WriteSpace() == false) - return true; - - while (Srv->In.WriteSpace() == true) - { - if (Srv->In.Write(File->Fd()) == false) - return _error->Errno("write",_("Error writing to file")); - if (Srv->In.IsLimit() == true) - return true; - } - if (Srv->In.IsLimit() == true || Srv->Encoding == ServerState::Closes) - return true; - } - return false; -} - /*}}}*/ -// HttpMethod::ServerDie - The server has closed the connection. /*{{{*/ +// HttpMethod::SendReq - Send the HTTP request /*{{{*/ // --------------------------------------------------------------------- -/* */ -bool HttpMethod::ServerDie(ServerState *Srv) +/* This places the http request in the outbound buffer */ +void HttpMethod::SendReq(FetchItem *Itm) { - unsigned int LErrno = errno; - - // Dump the buffer to the file - if (Srv->State == ServerState::Data) - { - // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking - // can't be set - if (File->Name() != "/dev/null") - SetNonBlock(File->Fd(),false); - while (Srv->In.WriteSpace() == true) - { - if (Srv->In.Write(File->Fd()) == false) - return _error->Errno("write",_("Error writing to the file")); - - // Done - if (Srv->In.IsLimit() == true) - return true; - } - } - - // See if this is because the server finished the data stream - if (Srv->In.IsLimit() == false && Srv->State != ServerState::Header && - Srv->Encoding != ServerState::Closes) + URI Uri = Itm->Uri; { - Srv->Close(); - if (LErrno == 0) - return _error->Error(_("Error reading from server. Remote end closed connection")); - errno = LErrno; - return _error->Errno("read",_("Error reading from server")); + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + Uri.Access = Binary.substr(plus + 1); } + + // The HTTP server expects a hostname with a trailing :port + std::stringstream Req; + string ProperHost; + + if (Uri.Host.find(':') != string::npos) + ProperHost = '[' + Uri.Host + ']'; else - { - Srv->In.Limit(-1); + ProperHost = Uri.Host; + + /* RFC 2616 §5.1.2 requires absolute URIs for requests to proxies, + but while its a must for all servers to accept absolute URIs, + it is assumed clients will sent an absolute path for non-proxies */ + std::string requesturi; + if (Server->Proxy.Access != "http" || Server->Proxy.empty() == true || Server->Proxy.Host.empty()) + requesturi = Uri.Path; + else + requesturi = Uri; - // Nothing left in the buffer - if (Srv->In.WriteSpace() == false) - return false; - - // We may have got multiple responses back in one packet.. - Srv->Close(); - return true; - } - - return false; -} - /*}}}*/ -// HttpMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/ -// --------------------------------------------------------------------- -/* We look at the header data we got back from the server and decide what - to do. Returns - 0 - File is open, - 1 - IMS hit - 3 - Unrecoverable error - 4 - Error with error content page - 5 - Unrecoverable non-server error (close the connection) - 6 - Try again with a new or changed URI - */ -int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) -{ - // Not Modified - if (Srv->Result == 304) - { - unlink(Queue->DestFile.c_str()); - Res.IMSHit = true; - Res.LastModified = Queue->LastModified; - return 1; - } - - /* Redirect - * - * Note that it is only OK for us to treat all redirection the same - * because we *always* use GET, not other HTTP methods. There are - * three redirection codes for which it is not appropriate that we - * redirect. Pass on those codes so the error handling kicks in. - */ - if (AllowRedirect - && (Srv->Result > 300 && Srv->Result < 400) - && (Srv->Result != 300 // Multiple Choices - && Srv->Result != 304 // Not Modified - && Srv->Result != 306)) // (Not part of HTTP/1.1, reserved) - { - if (!Srv->Location.empty()) - { - NextURI = Srv->Location; - return 6; - } - /* else pass through for error message */ - } - - /* We have a reply we dont handle. This should indicate a perm server - failure */ - if (Srv->Result < 200 || Srv->Result >= 300) - { - char err[255]; - snprintf(err,sizeof(err)-1,"HttpError%i",Srv->Result); - SetFailReason(err); - _error->Error("%u %s",Srv->Result,Srv->Code); - if (Srv->HaveContent == true) - return 4; - return 3; - } + // The "+" is encoded as a workaround for a amazon S3 bug + // see LP bugs #1003633 and #1086997. + requesturi = QuoteString(requesturi, "+~ "); - // This is some sort of 2xx 'data follows' reply - Res.LastModified = Srv->Date; - Res.Size = Srv->Size; - - // Open the file - delete File; - File = new FileFd(Queue->DestFile,FileFd::WriteAny); - if (_error->PendingError() == true) - return 5; + /* Build the request. No keep-alive is included as it is the default + in 1.1, can cause problems with proxies, and we are an HTTP/1.1 + client anyway. + C.f. https://tools.ietf.org/wg/httpbis/trac/ticket/158 */ + Req << "GET " << requesturi << " HTTP/1.1\r\n"; + if (Uri.Port != 0) + Req << "Host: " << ProperHost << ":" << std::to_string(Uri.Port) << "\r\n"; + else + Req << "Host: " << ProperHost << "\r\n"; - FailFile = Queue->DestFile; - FailFile.c_str(); // Make sure we dont do a malloc in the signal handler - FailFd = File->Fd(); - FailTime = Srv->Date; - - // Set the expected size - if (Srv->StartPos >= 0) - { - Res.ResumePoint = Srv->StartPos; - if (ftruncate(File->Fd(),Srv->StartPos) < 0) - _error->Errno("ftruncate", _("Failed to truncate file")); - } - - // Set the start point - lseek(File->Fd(),0,SEEK_END); + // generate a cache control header (if needed) + if (ConfigFindB("No-Cache",false) == true) + Req << "Cache-Control: no-cache\r\n" + << "Pragma: no-cache\r\n"; + else if (Itm->IndexFile == true) + Req << "Cache-Control: max-age=" << std::to_string(ConfigFindI("Max-Age", 0)) << "\r\n"; + else if (ConfigFindB("No-Store", false) == true) + Req << "Cache-Control: no-store\r\n"; + + // If we ask for uncompressed files servers might respond with content- + // negotiation which lets us end up with compressed files we do not support, + // see 657029, 657560 and co, so if we have no extension on the request + // ask for text only. As a sidenote: If there is nothing to negotate servers + // seem to be nice and ignore it. + if (ConfigFindB("SendAccept", true) == true) + { + size_t const filepos = Itm->Uri.find_last_of('/'); + string const file = Itm->Uri.substr(filepos + 1); + if (flExtension(file) == file) + Req << "Accept: text/*\r\n"; + } + + // Check for a partial file and send if-queries accordingly + struct stat SBuf; + if (Server->RangesAllowed && stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) + Req << "Range: bytes=" << std::to_string(SBuf.st_size) << "-\r\n" + << "If-Range: " << TimeRFC1123(SBuf.st_mtime, false) << "\r\n"; + else if (Itm->LastModified != 0) + Req << "If-Modified-Since: " << TimeRFC1123(Itm->LastModified, false).c_str() << "\r\n"; - delete Srv->In.Hash; - Srv->In.Hash = new Hashes; - - // Fill the Hash if the file is non-empty (resume) - if (Srv->StartPos > 0) - { - lseek(File->Fd(),0,SEEK_SET); - if (Srv->In.Hash->AddFD(File->Fd(),Srv->StartPos) == false) - { - _error->Errno("read",_("Problem hashing file")); - return 5; - } - lseek(File->Fd(),0,SEEK_END); - } - - SetNonBlock(File->Fd(),true); - return 0; + if (Server->Proxy.Access == "http" && + (Server->Proxy.User.empty() == false || Server->Proxy.Password.empty() == false)) + Req << "Proxy-Authorization: Basic " + << Base64Encode(Server->Proxy.User + ":" + Server->Proxy.Password) << "\r\n"; + + maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc")); + if (Uri.User.empty() == false || Uri.Password.empty() == false) + Req << "Authorization: Basic " + << Base64Encode(Uri.User + ":" + Uri.Password) << "\r\n"; + + Req << "User-Agent: " << ConfigFind("User-Agent", + "Debian APT-HTTP/1.3 (" PACKAGE_VERSION ")") << "\r\n"; + + Req << "\r\n"; + + if (Debug == true) + cerr << Req.str() << endl; + + Server->WriteResponse(Req.str()); } /*}}}*/ -// HttpMethod::SigTerm - Handle a fatal signal /*{{{*/ -// --------------------------------------------------------------------- -/* This closes and timestamps the open file. This is neccessary to get - resume behavoir on user abort */ -void HttpMethod::SigTerm(int) +std::unique_ptr HttpMethod::CreateServerState(URI const &uri)/*{{{*/ { - if (FailFd == -1) - _exit(100); - close(FailFd); - - // Timestamp - struct utimbuf UBuf; - UBuf.actime = FailTime; - UBuf.modtime = FailTime; - utime(FailFile.c_str(),&UBuf); - - _exit(100); + return std::unique_ptr(new HttpServerState(uri, this)); } /*}}}*/ -// HttpMethod::Fetch - Fetch an item /*{{{*/ -// --------------------------------------------------------------------- -/* This adds an item to the pipeline. We keep the pipeline at a fixed - depth. */ -bool HttpMethod::Fetch(FetchItem *) +void HttpMethod::RotateDNS() /*{{{*/ { - if (Server == 0) - return true; - - // Queue the requests - int Depth = -1; - for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth; - I = I->Next, Depth++) - { - // If pipelining is disabled, we only queue 1 request - if (Server->Pipeline == false && Depth >= 0) - break; - - // Make sure we stick with the same server - if (Server->Comp(I->Uri) == false) - break; - if (QueueBack == I) - { - QueueBack = I->Next; - SendReq(I,Server->Out); - continue; - } - } - - return true; -}; - /*}}}*/ -// HttpMethod::Configuration - Handle a configuration message /*{{{*/ -// --------------------------------------------------------------------- -/* We stash the desired pipeline depth */ -bool HttpMethod::Configuration(string Message) -{ - if (pkgAcqMethod::Configuration(Message) == false) - return false; - - AllowRedirect = _config->FindB("Acquire::http::AllowRedirect",true); - TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut); - PipelineDepth = _config->FindI("Acquire::http::Pipeline-Depth", - PipelineDepth); - Debug = _config->FindB("Debug::Acquire::http",false); - AutoDetectProxyCmd = _config->Find("Acquire::http::ProxyAutoDetect"); - - // Get the proxy to use - AutoDetectProxy(); - - return true; + ::RotateDNS(); } /*}}}*/ -// HttpMethod::Loop - Main loop /*{{{*/ -// --------------------------------------------------------------------- -/* */ -int HttpMethod::Loop() +ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res)/*{{{*/ { - typedef vector StringVector; - typedef vector::iterator StringVectorIterator; - map Redirected; - - signal(SIGTERM,SigTerm); - signal(SIGINT,SigTerm); - - Server = 0; - - int FailCounter = 0; - while (1) - { - // We have no commands, wait for some to arrive - if (Queue == 0) - { - if (WaitFd(STDIN_FILENO) == false) - return 0; - } - - /* Run messages, we can accept 0 (no message) if we didn't - do a WaitFd above.. Otherwise the FD is closed. */ - int Result = Run(true); - if (Result != -1 && (Result != 0 || Queue == 0)) - return 100; - - if (Queue == 0) - continue; - - // Connect to the server - if (Server == 0 || Server->Comp(Queue->Uri) == false) - { - delete Server; - Server = new ServerState(Queue->Uri,this); - } - /* If the server has explicitly said this is the last connection - then we pre-emptively shut down the pipeline and tear down - the connection. This will speed up HTTP/1.0 servers a tad - since we don't have to wait for the close sequence to - complete */ - if (Server->Persistent == false) - Server->Close(); - - // Reset the pipeline - if (Server->ServerFd == -1) - QueueBack = Queue; - - // Connnect to the host - if (Server->Open() == false) - { - Fail(true); - delete Server; - Server = 0; - continue; - } - - // Fill the pipeline. - Fetch(0); - - // Fetch the next URL header data from the server. - switch (Server->RunHeaders()) - { - case 0: - break; - - // The header data is bad - case 2: - { - _error->Error(_("Bad header data")); - Fail(true); - RotateDNS(); - continue; - } - - // The server closed a connection during the header get.. - default: - case 1: - { - FailCounter++; - _error->Discard(); - Server->Close(); - Server->Pipeline = false; - - if (FailCounter >= 2) - { - Fail(_("Connection failed"),true); - FailCounter = 0; - } - - RotateDNS(); - continue; - } - }; - - // Decide what to do. - FetchResult Res; - Res.Filename = Queue->DestFile; - switch (DealWithHeaders(Res,Server)) - { - // Ok, the file is Open - case 0: - { - URIStart(Res); + auto ret = ServerMethod::DealWithHeaders(Res); + if (ret != ServerMethod::FILE_IS_OPEN) + return ret; - // Run the data - bool Result = Server->RunData(); - - /* If the server is sending back sizeless responses then fill in - the size now */ - if (Res.Size == 0) - Res.Size = File->Size(); - - // Close the file, destroy the FD object and timestamp it - FailFd = -1; - delete File; - File = 0; - - // Timestamp - struct utimbuf UBuf; - time(&UBuf.actime); - UBuf.actime = Server->Date; - UBuf.modtime = Server->Date; - utime(Queue->DestFile.c_str(),&UBuf); - - // Send status to APT - if (Result == true) - { - Res.TakeHashes(*Server->In.Hash); - URIDone(Res); - } - else - { - if (Server->ServerFd == -1) - { - FailCounter++; - _error->Discard(); - Server->Close(); - - if (FailCounter >= 2) - { - Fail(_("Connection failed"),true); - FailCounter = 0; - } - - QueueBack = Queue; - } - else - Fail(true); - } - break; - } - - // IMS hit - case 1: - { - URIDone(Res); - break; - } - - // Hard server error, not found or something - case 3: - { - Fail(); - break; - } - - // Hard internal error, kill the connection and fail - case 5: - { - delete File; - File = 0; - - Fail(); - RotateDNS(); - Server->Close(); - break; - } - - // We need to flush the data, the header is like a 404 w/ error text - case 4: - { - Fail(); - - // Send to content to dev/null - File = new FileFd("/dev/null",FileFd::WriteExists); - Server->RunData(); - delete File; - File = 0; - break; - } - - // Try again with a new URL - case 6: - { - // Clear rest of response if there is content - if (Server->HaveContent) - { - File = new FileFd("/dev/null",FileFd::WriteExists); - Server->RunData(); - delete File; - File = 0; - } + // Open the file + delete File; + File = new FileFd(Queue->DestFile,FileFd::WriteAny); + if (_error->PendingError() == true) + return ERROR_NOT_FROM_SERVER; - /* Detect redirect loops. No more redirects are allowed - after the same URI is seen twice in a queue item. */ - StringVector &R = Redirected[Queue->DestFile]; - bool StopRedirects = false; - if (R.size() == 0) - R.push_back(Queue->Uri); - else if (R[0] == "STOP" || R.size() > 10) - StopRedirects = true; - else - { - for (StringVectorIterator I = R.begin(); I != R.end(); I++) - if (Queue->Uri == *I) - { - R[0] = "STOP"; - break; - } - - R.push_back(Queue->Uri); - } - - if (StopRedirects == false) - Redirect(NextURI); - else - Fail(); - - break; - } + FailFile = Queue->DestFile; + FailFile.c_str(); // Make sure we don't do a malloc in the signal handler + FailFd = File->Fd(); + FailTime = Server->Date; - default: - Fail(_("Internal error")); - break; - } - - FailCounter = 0; + if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false) + { + _error->Errno("read",_("Problem hashing file")); + return ERROR_NOT_FROM_SERVER; } - - return 0; + if (Server->StartPos > 0) + Res.ResumePoint = Server->StartPos; + + SetNonBlock(File->Fd(),true); + return FILE_IS_OPEN; } /*}}}*/ -// HttpMethod::AutoDetectProxy - auto detect proxy /*{{{*/ -// --------------------------------------------------------------------- -/* */ -bool HttpMethod::AutoDetectProxy() +HttpMethod::HttpMethod(std::string &&pProg) : ServerMethod(pProg.c_str(), "1.2", Pipeline | SendConfig)/*{{{*/ { - if (AutoDetectProxyCmd.empty()) - return true; - - if (Debug) - clog << "Using auto proxy detect command: " << AutoDetectProxyCmd << endl; - - int Pipes[2] = {-1,-1}; - if (pipe(Pipes) != 0) - return _error->Errno("pipe", "Failed to create Pipe"); - - pid_t Process = ExecFork(); - if (Process == 0) - { - dup2(Pipes[1],STDOUT_FILENO); - SetCloseExec(STDOUT_FILENO,false); - - const char *Args[2]; - Args[0] = AutoDetectProxyCmd.c_str(); - Args[1] = 0; - execv(Args[0],(char **)Args); - cerr << "Failed to exec method " << Args[0] << endl; - _exit(100); - } - char buf[512]; - int InFd = Pipes[0]; - if (read(InFd, buf, sizeof(buf)) < 0) - return _error->Errno("read", "Failed to read"); - ExecWait(Process, "ProxyAutoDetect"); - - if (Debug) - clog << "auto detect command returned: '" << buf << "'" << endl; - - if (strstr(buf, "http://") == buf) - _config->Set("Acquire::http::proxy", _strstrip(buf)); - - return true; + auto addName = std::inserter(methodNames, methodNames.begin()); + if (Binary != "http") + addName = "http"; + auto const plus = Binary.find('+'); + if (plus != std::string::npos) + addName = Binary.substr(0, plus); + File = 0; + Server = 0; } /*}}}*/ - -