X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/f8081133c528e8c91b533b4fab6f56ae42d51ecb..dda7233c5d3879f2580543ead0ad7cd76196a160:/methods/http.cc diff --git a/methods/http.cc b/methods/http.cc index 9f4494dc7..df62034e3 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -1,9 +1,9 @@ // -*- mode: cpp; mode: fold -*- // Description /*{{{*/ -// $Id: http.cc,v 1.48 2001/02/23 05:45:27 jgg Exp $ +// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $ /* ###################################################################### - HTTP Aquire Method - This is the HTTP aquire method for APT. + HTTP Acquire Method - This is the HTTP aquire method for APT. It uses HTTP/1.1 and many of the fancy options there-in, such as pipelining, range, if-range and so on. @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include @@ -37,30 +37,45 @@ #include #include #include +#include +#include +#include +#include // Internet stuff #include +#include "config.h" #include "connect.h" #include "rfc2553emu.h" #include "http.h" /*}}}*/ +using namespace std; string HttpMethod::FailFile; int HttpMethod::FailFd = -1; time_t HttpMethod::FailTime = 0; unsigned long PipelineDepth = 10; unsigned long TimeOut = 120; +bool AllowRedirect = false; bool Debug = false; +URI Proxy; +unsigned long CircleBuf::BwReadLimit=0; +unsigned long CircleBuf::BwTickReadData=0; +struct timeval CircleBuf::BwReadTick={0,0}; +const unsigned int CircleBuf::BW_HZ=10; + // CircleBuf::CircleBuf - Circular input buffer /*{{{*/ // --------------------------------------------------------------------- /* */ -CircleBuf::CircleBuf(unsigned long Size) : Size(Size), MD5(0) +CircleBuf::CircleBuf(unsigned long Size) : Size(Size), Hash(0) { Buf = new unsigned char[Size]; Reset(); + + CircleBuf::BwReadLimit = _config->FindI("Acquire::http::Dl-Limit",0)*1024; } /*}}}*/ // CircleBuf::Reset - Reset to the default state /*{{{*/ @@ -73,10 +88,10 @@ void CircleBuf::Reset() StrPos = 0; MaxGet = (unsigned int)-1; OutQueue = string(); - if (MD5 != 0) + if (Hash != 0) { - delete MD5; - MD5 = new MD5Summation; + delete Hash; + Hash = new Hashes; } }; /*}}}*/ @@ -86,16 +101,45 @@ void CircleBuf::Reset() is non-blocking.. */ bool CircleBuf::Read(int Fd) { + unsigned long BwReadMax; + while (1) { // Woops, buffer is full if (InP - OutP == Size) return true; - + + // what's left to read in this tick + BwReadMax = CircleBuf::BwReadLimit/BW_HZ; + + if(CircleBuf::BwReadLimit) { + struct timeval now; + gettimeofday(&now,0); + + unsigned long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 + + now.tv_usec-CircleBuf::BwReadTick.tv_usec; + if(d > 1000000/BW_HZ) { + CircleBuf::BwReadTick = now; + CircleBuf::BwTickReadData = 0; + } + + if(CircleBuf::BwTickReadData >= BwReadMax) { + usleep(1000000/BW_HZ); + return true; + } + } + // Write the buffer segment int Res; - Res = read(Fd,Buf + (InP%Size),LeftRead()); + if(CircleBuf::BwReadLimit) { + Res = read(Fd,Buf + (InP%Size), + BwReadMax > LeftRead() ? LeftRead() : BwReadMax); + } else + Res = read(Fd,Buf + (InP%Size),LeftRead()); + if(Res > 0 && BwReadLimit > 0) + CircleBuf::BwTickReadData += Res; + if (Res == 0) return false; if (Res < 0) @@ -138,7 +182,7 @@ void CircleBuf::FillOut() unsigned long Sz = LeftRead(); if (OutQueue.length() - StrPos < Sz) Sz = OutQueue.length() - StrPos; - memcpy(Buf + (InP%Size),OutQueue.begin() + StrPos,Sz); + memcpy(Buf + (InP%Size),OutQueue.c_str() + StrPos,Sz); // Advance StrPos += Sz; @@ -182,8 +226,8 @@ bool CircleBuf::Write(int Fd) return false; } - if (MD5 != 0) - MD5->Add(Buf + (OutP%Size),Res); + if (Hash != 0) + Hash->Add(Buf + (OutP%Size),Res); OutP += Res; } @@ -199,25 +243,24 @@ bool CircleBuf::WriteTillEl(string &Data,bool Single) { if (Buf[I%Size] != '\n') continue; - for (I++; I < InP && Buf[I%Size] == '\r'; I++); + ++I; if (Single == false) { - if (Buf[I%Size] != '\n') - continue; - for (I++; I < InP && Buf[I%Size] == '\r'; I++); + if (I < InP && Buf[I%Size] == '\r') + ++I; + if (I >= InP || Buf[I%Size] != '\n') + continue; + ++I; } - if (I > InP) - I = InP; - Data = ""; while (OutP < I) { unsigned long Sz = LeftWrite(); if (Sz == 0) return false; - if (I - OutP < LeftWrite()) + if (I - OutP < Sz) Sz = I - OutP; Data += string((char *)(Buf + (OutP%Size)),Sz); OutP += Sz; @@ -327,13 +370,13 @@ bool ServerState::Close() /*}}}*/ // ServerState::RunHeaders - Get the headers before the data /*{{{*/ // --------------------------------------------------------------------- -/* Returns 0 if things are OK, 1 if an IO error occursed and 2 if a header - parse error occured */ +/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header + parse error occurred */ int ServerState::RunHeaders() { State = Header; - Owner->Status("Waiting for file"); + Owner->Status(_("Waiting for headers")); Major = 0; Minor = 0; @@ -357,7 +400,7 @@ int ServerState::RunHeaders() { string::const_iterator J = I; for (; J != Data.end() && *J != '\n' && *J != '\r';J++); - if (HeaderLine(string(I,J-I)) == false) + if (HeaderLine(string(I,J)) == false) return 2; I = J; } @@ -479,7 +522,7 @@ bool ServerState::HeaderLine(string Line) // The http server might be trying to do something evil. if (Line.length() >= MAXLEN) - return _error->Error("Got a single header line over %u chars",MAXLEN); + return _error->Error(_("Got a single header line over %u chars"),MAXLEN); string::size_type Pos = Line.find(' '); if (Pos == string::npos || Pos+1 > Line.length()) @@ -487,7 +530,7 @@ bool ServerState::HeaderLine(string Line) // Blah, some servers use "connection:closes", evil. Pos = Line.find(':'); if (Pos == string::npos || Pos + 2 > Line.length()) - return _error->Error("Bad header line"); + return _error->Error(_("Bad header line")); Pos++; } @@ -499,21 +542,21 @@ bool ServerState::HeaderLine(string Line) string Tag = string(Line,0,Pos); string Val = string(Line,Pos2); - if (stringcasecmp(Tag.begin(),Tag.begin()+4,"HTTP") == 0) + if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0) { // Evil servers return no version if (Line[4] == '/') { - if (sscanf(Line.c_str(),"HTTP/%u.%u %u %[^\n]",&Major,&Minor, + if (sscanf(Line.c_str(),"HTTP/%u.%u %u%[^\n]",&Major,&Minor, &Result,Code) != 4) - return _error->Error("The http server sent an invalid reply header"); + return _error->Error(_("The HTTP server sent an invalid reply header")); } else { Major = 0; Minor = 9; - if (sscanf(Line.c_str(),"HTTP %u %[^\n]",&Result,Code) != 2) - return _error->Error("The http server sent an invalid reply header"); + if (sscanf(Line.c_str(),"HTTP %u%[^\n]",&Result,Code) != 2) + return _error->Error(_("The HTTP server sent an invalid reply header")); } /* Check the HTTP response header to get the default persistance @@ -542,7 +585,7 @@ bool ServerState::HeaderLine(string Line) return true; if (sscanf(Val.c_str(),"%lu",&Size) != 1) - return _error->Error("The http server sent an invalid Content-Length header"); + return _error->Error(_("The HTTP server sent an invalid Content-Length header")); return true; } @@ -557,9 +600,9 @@ bool ServerState::HeaderLine(string Line) HaveContent = true; if (sscanf(Val.c_str(),"bytes %lu-%*u/%lu",&StartPos,&Size) != 2) - return _error->Error("The http server sent an invalid Content-Range header"); + return _error->Error(_("The HTTP server sent an invalid Content-Range header")); if ((unsigned)StartPos > Size) - return _error->Error("This http server has broken range support"); + return _error->Error(_("This HTTP server has broken range support")); return true; } @@ -583,7 +626,13 @@ bool ServerState::HeaderLine(string Line) if (stringcasecmp(Tag,"Last-Modified:") == 0) { if (StrToTime(Val,Date) == false) - return _error->Error("Unknown date format"); + return _error->Error(_("Unknown date format")); + return true; + } + + if (stringcasecmp(Tag,"Location:") == 0) + { + Location = Val; return true; } @@ -617,7 +666,7 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) will glitch HTTP/1.0 proxies because they do not filter it out and pass it on, HTTP/1.1 says the connection should default to keep alive and we expect the proxy to do this */ - if (Proxy.empty() == true) + if (Proxy.empty() == true || Proxy.Host.empty()) sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\nConnection: keep-alive\r\n", QuoteString(Uri.Path,"~").c_str(),ProperHost.c_str()); else @@ -627,13 +676,13 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) and a no-store directive for archives. */ sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\n", Itm->Uri.c_str(),ProperHost.c_str()); - if (_config->FindB("Acquire::http::No-Cache",false) == true) - strcat(Buf,"Cache-Control: no-cache\r\nPragma: no-cache\r\n"); - else + // only generate a cache control header if we actually want to + // use a cache + if (_config->FindB("Acquire::http::No-Cache",false) == false) { if (Itm->IndexFile == true) sprintf(Buf+strlen(Buf),"Cache-Control: max-age=%u\r\n", - _config->FindI("Acquire::http::Max-Age",60*60*24)); + _config->FindI("Acquire::http::Max-Age",0)); else { if (_config->FindB("Acquire::http::No-Store",false) == true) @@ -641,6 +690,10 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) } } } + // generate a no-cache header if needed + if (_config->FindB("Acquire::http::No-Cache",false) == true) + strcat(Buf,"Cache-Control: no-cache\r\nPragma: no-cache\r\n"); + string Req = Buf; @@ -670,7 +723,7 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) Req += string("Authorization: Basic ") + Base64Encode(Uri.User + ":" + Uri.Password) + "\r\n"; - Req += "User-Agent: Debian APT-HTTP/1.2\r\n\r\n"; + Req += "User-Agent: Debian APT-HTTP/1.3 ("VERSION")\r\n\r\n"; if (Debug == true) cerr << Req << endl; @@ -723,11 +776,15 @@ bool HttpMethod::Go(bool ToFile,ServerState *Srv) tv.tv_usec = 0; int Res = 0; if ((Res = select(MaxFd+1,&rfds,&wfds,0,&tv)) < 0) - return _error->Errno("select","Select failed"); + { + if (errno == EINTR) + return true; + return _error->Errno("select",_("Select failed")); + } if (Res == 0) { - _error->Error("Connection timed out"); + _error->Error(_("Connection timed out")); return ServerDie(Srv); } @@ -750,7 +807,7 @@ bool HttpMethod::Go(bool ToFile,ServerState *Srv) if (FileFD != -1 && FD_ISSET(FileFD,&wfds)) { if (Srv->In.Write(FileFD) == false) - return _error->Errno("write","Error writing to output file"); + return _error->Errno("write",_("Error writing to output file")); } // Handle commands from APT @@ -771,14 +828,17 @@ bool HttpMethod::Flush(ServerState *Srv) { if (File != 0) { - SetNonBlock(File->Fd(),false); + // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking + // can't be set + if (File->Name() != "/dev/null") + SetNonBlock(File->Fd(),false); if (Srv->In.WriteSpace() == false) return true; while (Srv->In.WriteSpace() == true) { if (Srv->In.Write(File->Fd()) == false) - return _error->Errno("write","Error writing to file"); + return _error->Errno("write",_("Error writing to file")); if (Srv->In.IsLimit() == true) return true; } @@ -799,11 +859,14 @@ bool HttpMethod::ServerDie(ServerState *Srv) // Dump the buffer to the file if (Srv->State == ServerState::Data) { - SetNonBlock(File->Fd(),false); + // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking + // can't be set + if (File->Name() != "/dev/null") + SetNonBlock(File->Fd(),false); while (Srv->In.WriteSpace() == true) { if (Srv->In.Write(File->Fd()) == false) - return _error->Errno("write","Error writing to the file"); + return _error->Errno("write",_("Error writing to the file")); // Done if (Srv->In.IsLimit() == true) @@ -817,9 +880,9 @@ bool HttpMethod::ServerDie(ServerState *Srv) { Srv->Close(); if (LErrno == 0) - return _error->Error("Error reading from server Remote end closed connection"); + return _error->Error(_("Error reading from server. Remote end closed connection")); errno = LErrno; - return _error->Errno("read","Error reading from server"); + return _error->Errno("read",_("Error reading from server")); } else { @@ -845,7 +908,9 @@ bool HttpMethod::ServerDie(ServerState *Srv) 1 - IMS hit 3 - Unrecoverable error 4 - Error with error content page - 5 - Unrecoverable non-server error (close the connection) */ + 5 - Unrecoverable non-server error (close the connection) + 6 - Try again with a new or changed URI + */ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) { // Not Modified @@ -857,6 +922,27 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) return 1; } + /* Redirect + * + * Note that it is only OK for us to treat all redirection the same + * because we *always* use GET, not other HTTP methods. There are + * three redirection codes for which it is not appropriate that we + * redirect. Pass on those codes so the error handling kicks in. + */ + if (AllowRedirect + && (Srv->Result > 300 && Srv->Result < 400) + && (Srv->Result != 300 // Multiple Choices + && Srv->Result != 304 // Not Modified + && Srv->Result != 306)) // (Not part of HTTP/1.1, reserved) + { + if (!Srv->Location.empty()) + { + NextURI = Srv->Location; + return 6; + } + /* else pass through for error message */ + } + /* We have a reply we dont handle. This should indicate a perm server failure */ if (Srv->Result < 200 || Srv->Result >= 300) @@ -886,22 +972,23 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) if (Srv->StartPos >= 0) { Res.ResumePoint = Srv->StartPos; - ftruncate(File->Fd(),Srv->StartPos); + if (ftruncate(File->Fd(),Srv->StartPos) < 0) + _error->Errno("ftruncate", _("Failed to truncate file")); } // Set the start point lseek(File->Fd(),0,SEEK_END); - delete Srv->In.MD5; - Srv->In.MD5 = new MD5Summation; + delete Srv->In.Hash; + Srv->In.Hash = new Hashes; - // Fill the MD5 Hash if the file is non-empty (resume) + // Fill the Hash if the file is non-empty (resume) if (Srv->StartPos > 0) { lseek(File->Fd(),0,SEEK_SET); - if (Srv->In.MD5->AddFD(File->Fd(),Srv->StartPos) == false) + if (Srv->In.Hash->AddFD(File->Fd(),Srv->StartPos) == false) { - _error->Errno("read","Problem hashing file"); + _error->Errno("read",_("Problem hashing file")); return 5; } lseek(File->Fd(),0,SEEK_END); @@ -941,7 +1028,6 @@ bool HttpMethod::Fetch(FetchItem *) // Queue the requests int Depth = -1; - bool Tail = false; for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth; I = I->Next, Depth++) { @@ -953,8 +1039,6 @@ bool HttpMethod::Fetch(FetchItem *) if (Server->Comp(I->Uri) == false) break; if (QueueBack == I) - Tail = true; - if (Tail == true) { QueueBack = I->Next; SendReq(I,Server->Out); @@ -973,6 +1057,7 @@ bool HttpMethod::Configuration(string Message) if (pkgAcqMethod::Configuration(Message) == false) return false; + AllowRedirect = _config->FindB("Acquire::http::AllowRedirect",true); TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut); PipelineDepth = _config->FindI("Acquire::http::Pipeline-Depth", PipelineDepth); @@ -986,6 +1071,10 @@ bool HttpMethod::Configuration(string Message) /* */ int HttpMethod::Loop() { + typedef vector StringVector; + typedef vector::iterator StringVectorIterator; + map Redirected; + signal(SIGTERM,SigTerm); signal(SIGINT,SigTerm); @@ -1016,7 +1105,6 @@ int HttpMethod::Loop() delete Server; Server = new ServerState(Queue->Uri,this); } - /* If the server has explicitly said this is the last connection then we pre-emptively shut down the pipeline and tear down the connection. This will speed up HTTP/1.0 servers a tad @@ -1050,7 +1138,7 @@ int HttpMethod::Loop() // The header data is bad case 2: { - _error->Error("Bad header Data"); + _error->Error(_("Bad header data")); Fail(true); RotateDNS(); continue; @@ -1067,7 +1155,7 @@ int HttpMethod::Loop() if (FailCounter >= 2) { - Fail("Connection failed",true); + Fail(_("Connection failed"),true); FailCounter = 0; } @@ -1109,12 +1197,28 @@ int HttpMethod::Loop() // Send status to APT if (Result == true) { - Res.MD5Sum = Server->In.MD5->Result(); + Res.TakeHashes(*Server->In.Hash); URIDone(Res); } else - Fail(true); - + { + if (Server->ServerFd == -1) + { + FailCounter++; + _error->Discard(); + Server->Close(); + + if (FailCounter >= 2) + { + Fail(_("Connection failed"),true); + FailCounter = 0; + } + + QueueBack = Queue; + } + else + Fail(true); + } break; } @@ -1135,6 +1239,9 @@ int HttpMethod::Loop() // Hard internal error, kill the connection and fail case 5: { + delete File; + File = 0; + Fail(); RotateDNS(); Server->Close(); @@ -1154,8 +1261,48 @@ int HttpMethod::Loop() break; } + // Try again with a new URL + case 6: + { + // Clear rest of response if there is content + if (Server->HaveContent) + { + File = new FileFd("/dev/null",FileFd::WriteExists); + Server->RunData(); + delete File; + File = 0; + } + + /* Detect redirect loops. No more redirects are allowed + after the same URI is seen twice in a queue item. */ + StringVector &R = Redirected[Queue->DestFile]; + bool StopRedirects = false; + if (R.size() == 0) + R.push_back(Queue->Uri); + else if (R[0] == "STOP" || R.size() > 10) + StopRedirects = true; + else + { + for (StringVectorIterator I = R.begin(); I != R.end(); I++) + if (Queue->Uri == *I) + { + R[0] = "STOP"; + break; + } + + R.push_back(Queue->Uri); + } + + if (StopRedirects == false) + Redirect(NextURI); + else + Fail(); + + break; + } + default: - Fail("Internal error"); + Fail(_("Internal error")); break; } @@ -1168,7 +1315,13 @@ int HttpMethod::Loop() int main() { + setlocale(LC_ALL, ""); + // ignore SIGPIPE, this can happen on write() if the socket + // closes the connection (this is dealt with via ServerDie()) + signal(SIGPIPE, SIG_IGN); + HttpMethod Mth; - return Mth.Loop(); } + +