X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/d955fe80937173f6e4c609ae58a916b61137583d..dda7233c5d3879f2580543ead0ad7cd76196a160:/methods/http.cc?ds=inline diff --git a/methods/http.cc b/methods/http.cc index 536a23b67..df62034e3 100644 --- a/methods/http.cc +++ b/methods/http.cc @@ -1,20 +1,18 @@ // -*- mode: cpp; mode: fold -*- // Description /*{{{*/ -// $Id: http.cc,v 1.42 1999/12/10 08:53:43 jgg Exp $ +// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $ /* ###################################################################### - HTTP Aquire Method - This is the HTTP aquire method for APT. + HTTP Acquire Method - This is the HTTP aquire method for APT. It uses HTTP/1.1 and many of the fancy options there-in, such as - pipelining, range, if-range and so on. It accepts on the command line - a list of url destination pairs and writes to stdout the status of the - operation as defined in the APT method spec. - - It is based on a doubly buffered select loop. All the requests are + pipelining, range, if-range and so on. + + It is based on a doubly buffered select loop. A groupe of requests are fed into a single output buffer that is constantly fed out the socket. This provides ideal pipelining as in many cases all of the requests will fit into a single packet. The input socket is buffered - the same way and fed into the fd for the file. + the same way and fed into the fd for the file (may be a pipe in future). This double buffering provides fairly substantial transfer rates, compared to wget the http method is about 4% faster. Most importantly, @@ -30,7 +28,7 @@ #include #include #include -#include +#include #include #include @@ -39,30 +37,45 @@ #include #include #include +#include +#include +#include +#include // Internet stuff #include +#include "config.h" #include "connect.h" #include "rfc2553emu.h" #include "http.h" /*}}}*/ +using namespace std; string HttpMethod::FailFile; int HttpMethod::FailFd = -1; time_t HttpMethod::FailTime = 0; unsigned long PipelineDepth = 10; unsigned long TimeOut = 120; +bool AllowRedirect = false; bool Debug = false; +URI Proxy; +unsigned long CircleBuf::BwReadLimit=0; +unsigned long CircleBuf::BwTickReadData=0; +struct timeval CircleBuf::BwReadTick={0,0}; +const unsigned int CircleBuf::BW_HZ=10; + // CircleBuf::CircleBuf - Circular input buffer /*{{{*/ // --------------------------------------------------------------------- /* */ -CircleBuf::CircleBuf(unsigned long Size) : Size(Size), MD5(0) +CircleBuf::CircleBuf(unsigned long Size) : Size(Size), Hash(0) { Buf = new unsigned char[Size]; Reset(); + + CircleBuf::BwReadLimit = _config->FindI("Acquire::http::Dl-Limit",0)*1024; } /*}}}*/ // CircleBuf::Reset - Reset to the default state /*{{{*/ @@ -75,10 +88,10 @@ void CircleBuf::Reset() StrPos = 0; MaxGet = (unsigned int)-1; OutQueue = string(); - if (MD5 != 0) + if (Hash != 0) { - delete MD5; - MD5 = new MD5Summation; + delete Hash; + Hash = new Hashes; } }; /*}}}*/ @@ -88,16 +101,45 @@ void CircleBuf::Reset() is non-blocking.. */ bool CircleBuf::Read(int Fd) { + unsigned long BwReadMax; + while (1) { // Woops, buffer is full if (InP - OutP == Size) return true; - + + // what's left to read in this tick + BwReadMax = CircleBuf::BwReadLimit/BW_HZ; + + if(CircleBuf::BwReadLimit) { + struct timeval now; + gettimeofday(&now,0); + + unsigned long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 + + now.tv_usec-CircleBuf::BwReadTick.tv_usec; + if(d > 1000000/BW_HZ) { + CircleBuf::BwReadTick = now; + CircleBuf::BwTickReadData = 0; + } + + if(CircleBuf::BwTickReadData >= BwReadMax) { + usleep(1000000/BW_HZ); + return true; + } + } + // Write the buffer segment int Res; - Res = read(Fd,Buf + (InP%Size),LeftRead()); + if(CircleBuf::BwReadLimit) { + Res = read(Fd,Buf + (InP%Size), + BwReadMax > LeftRead() ? LeftRead() : BwReadMax); + } else + Res = read(Fd,Buf + (InP%Size),LeftRead()); + if(Res > 0 && BwReadLimit > 0) + CircleBuf::BwTickReadData += Res; + if (Res == 0) return false; if (Res < 0) @@ -140,7 +182,7 @@ void CircleBuf::FillOut() unsigned long Sz = LeftRead(); if (OutQueue.length() - StrPos < Sz) Sz = OutQueue.length() - StrPos; - memcpy(Buf + (InP%Size),OutQueue.begin() + StrPos,Sz); + memcpy(Buf + (InP%Size),OutQueue.c_str() + StrPos,Sz); // Advance StrPos += Sz; @@ -184,8 +226,8 @@ bool CircleBuf::Write(int Fd) return false; } - if (MD5 != 0) - MD5->Add(Buf + (OutP%Size),Res); + if (Hash != 0) + Hash->Add(Buf + (OutP%Size),Res); OutP += Res; } @@ -201,25 +243,24 @@ bool CircleBuf::WriteTillEl(string &Data,bool Single) { if (Buf[I%Size] != '\n') continue; - for (I++; I < InP && Buf[I%Size] == '\r'; I++); + ++I; if (Single == false) { - if (Buf[I%Size] != '\n') - continue; - for (I++; I < InP && Buf[I%Size] == '\r'; I++); + if (I < InP && Buf[I%Size] == '\r') + ++I; + if (I >= InP || Buf[I%Size] != '\n') + continue; + ++I; } - if (I > InP) - I = InP; - Data = ""; while (OutP < I) { unsigned long Sz = LeftWrite(); if (Sz == 0) return false; - if (I - OutP < LeftWrite()) + if (I - OutP < Sz) Sz = I - OutP; Data += string((char *)(Buf + (OutP%Size)),Sz); OutP += Sz; @@ -258,9 +299,6 @@ ServerState::ServerState(URI Srv,HttpMethod *Owner) : Owner(Owner), // ServerState::Open - Open a connection to the server /*{{{*/ // --------------------------------------------------------------------- /* This opens a connection to the server. */ -string LastHost; -int LastPort = 0; -struct addrinfo *LastHostAddr = 0; bool ServerState::Open() { // Use the already open connection if possible. @@ -270,7 +308,8 @@ bool ServerState::Open() Close(); In.Reset(); Out.Reset(); - + Persistent = true; + // Determine the proxy setting if (getenv("http_proxy") == 0) { @@ -289,31 +328,17 @@ bool ServerState::Open() else Proxy = getenv("http_proxy"); - // Parse no_proxy, a , seperated list of hosts + // Parse no_proxy, a , separated list of domains if (getenv("no_proxy") != 0) { - const char *Start = getenv("no_proxy"); - for (const char *Cur = Start; true ; Cur++) - { - if (*Cur != ',' && *Cur != 0) - continue; - if (stringcasecmp(ServerName.Host.begin(),ServerName.Host.end(), - Start,Cur) == 0) - { - Proxy = ""; - break; - } - - Start = Cur + 1; - if (*Cur == 0) - break; - } - } + if (CheckDomainList(ServerName.Host,getenv("no_proxy")) == true) + Proxy = ""; + } // Determine what host and port to use based on the proxy settings int Port = 0; string Host; - if (Proxy.empty() == true) + if (Proxy.empty() == true || Proxy.Host.empty() == true) { if (ServerName.Port != 0) Port = ServerName.Port; @@ -345,13 +370,13 @@ bool ServerState::Close() /*}}}*/ // ServerState::RunHeaders - Get the headers before the data /*{{{*/ // --------------------------------------------------------------------- -/* Returns 0 if things are OK, 1 if an IO error occursed and 2 if a header - parse error occured */ +/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header + parse error occurred */ int ServerState::RunHeaders() { State = Header; - Owner->Status("Waiting for file"); + Owner->Status(_("Waiting for headers")); Major = 0; Minor = 0; @@ -367,19 +392,31 @@ int ServerState::RunHeaders() string Data; if (In.WriteTillEl(Data) == false) continue; + + if (Debug == true) + clog << Data; for (string::const_iterator I = Data.begin(); I < Data.end(); I++) { string::const_iterator J = I; for (; J != Data.end() && *J != '\n' && *J != '\r';J++); - if (HeaderLine(string(I,J-I)) == false) + if (HeaderLine(string(I,J)) == false) return 2; I = J; } + + // 100 Continue is a Nop... + if (Result == 100) + continue; + + // Tidy up the connection persistance state. + if (Encoding == Closes && HaveContent == true) + Persistent = false; + return 0; } while (Owner->Go(false,this) == true); - + return 1; } /*}}}*/ @@ -485,7 +522,7 @@ bool ServerState::HeaderLine(string Line) // The http server might be trying to do something evil. if (Line.length() >= MAXLEN) - return _error->Error("Got a single header line over %u chars",MAXLEN); + return _error->Error(_("Got a single header line over %u chars"),MAXLEN); string::size_type Pos = Line.find(' '); if (Pos == string::npos || Pos+1 > Line.length()) @@ -493,7 +530,7 @@ bool ServerState::HeaderLine(string Line) // Blah, some servers use "connection:closes", evil. Pos = Line.find(':'); if (Pos == string::npos || Pos + 2 > Line.length()) - return _error->Error("Bad header line"); + return _error->Error(_("Bad header line")); Pos++; } @@ -505,23 +542,35 @@ bool ServerState::HeaderLine(string Line) string Tag = string(Line,0,Pos); string Val = string(Line,Pos2); - if (stringcasecmp(Tag.begin(),Tag.begin()+4,"HTTP") == 0) + if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0) { // Evil servers return no version if (Line[4] == '/') { - if (sscanf(Line.c_str(),"HTTP/%u.%u %u %[^\n]",&Major,&Minor, + if (sscanf(Line.c_str(),"HTTP/%u.%u %u%[^\n]",&Major,&Minor, &Result,Code) != 4) - return _error->Error("The http server sent an invalid reply header"); + return _error->Error(_("The HTTP server sent an invalid reply header")); } else { Major = 0; Minor = 9; - if (sscanf(Line.c_str(),"HTTP %u %[^\n]",&Result,Code) != 2) - return _error->Error("The http server sent an invalid reply header"); + if (sscanf(Line.c_str(),"HTTP %u%[^\n]",&Result,Code) != 2) + return _error->Error(_("The HTTP server sent an invalid reply header")); } - + + /* Check the HTTP response header to get the default persistance + state. */ + if (Major < 1) + Persistent = false; + else + { + if (Major == 1 && Minor <= 0) + Persistent = false; + else + Persistent = true; + } + return true; } @@ -536,7 +585,7 @@ bool ServerState::HeaderLine(string Line) return true; if (sscanf(Val.c_str(),"%lu",&Size) != 1) - return _error->Error("The http server sent an invalid Content-Length header"); + return _error->Error(_("The HTTP server sent an invalid Content-Length header")); return true; } @@ -551,9 +600,9 @@ bool ServerState::HeaderLine(string Line) HaveContent = true; if (sscanf(Val.c_str(),"bytes %lu-%*u/%lu",&StartPos,&Size) != 2) - return _error->Error("The http server sent an invalid Content-Range header"); + return _error->Error(_("The HTTP server sent an invalid Content-Range header")); if ((unsigned)StartPos > Size) - return _error->Error("This http server has broken range support"); + return _error->Error(_("This HTTP server has broken range support")); return true; } @@ -561,15 +610,29 @@ bool ServerState::HeaderLine(string Line) { HaveContent = true; if (stringcasecmp(Val,"chunked") == 0) - Encoding = Chunked; - + Encoding = Chunked; return true; } + if (stringcasecmp(Tag,"Connection:") == 0) + { + if (stringcasecmp(Val,"close") == 0) + Persistent = false; + if (stringcasecmp(Val,"keep-alive") == 0) + Persistent = true; + return true; + } + if (stringcasecmp(Tag,"Last-Modified:") == 0) { if (StrToTime(Val,Date) == false) - return _error->Error("Unknown date format"); + return _error->Error(_("Unknown date format")); + return true; + } + + if (stringcasecmp(Tag,"Location:") == 0) + { + Location = Val; return true; } @@ -603,7 +666,7 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) will glitch HTTP/1.0 proxies because they do not filter it out and pass it on, HTTP/1.1 says the connection should default to keep alive and we expect the proxy to do this */ - if (Proxy.empty() == true) + if (Proxy.empty() == true || Proxy.Host.empty()) sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\nConnection: keep-alive\r\n", QuoteString(Uri.Path,"~").c_str(),ProperHost.c_str()); else @@ -613,13 +676,13 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) and a no-store directive for archives. */ sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\n", Itm->Uri.c_str(),ProperHost.c_str()); - if (_config->FindB("Acquire::http::No-Cache",false) == true) - strcat(Buf,"Cache-Control: no-cache\r\nPragma: no-cache\r\n"); - else + // only generate a cache control header if we actually want to + // use a cache + if (_config->FindB("Acquire::http::No-Cache",false) == false) { if (Itm->IndexFile == true) sprintf(Buf+strlen(Buf),"Cache-Control: max-age=%u\r\n", - _config->FindI("Acquire::http::Max-Age",60*60*24)); + _config->FindI("Acquire::http::Max-Age",0)); else { if (_config->FindB("Acquire::http::No-Store",false) == true) @@ -627,6 +690,10 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) } } } + // generate a no-cache header if needed + if (_config->FindB("Acquire::http::No-Cache",false) == true) + strcat(Buf,"Cache-Control: no-cache\r\nPragma: no-cache\r\n"); + string Req = Buf; @@ -635,7 +702,7 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0) { // In this case we send an if-range query with a range header - sprintf(Buf,"Range: bytes=%li-\r\nIf-Range: %s\r\n",SBuf.st_size - 1, + sprintf(Buf,"Range: bytes=%li-\r\nIf-Range: %s\r\n",(long)SBuf.st_size - 1, TimeRFC1123(SBuf.st_mtime).c_str()); Req += Buf; } @@ -652,7 +719,11 @@ void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out) Req += string("Proxy-Authorization: Basic ") + Base64Encode(Proxy.User + ":" + Proxy.Password) + "\r\n"; - Req += "User-Agent: Debian APT-HTTP/1.2\r\n\r\n"; + if (Uri.User.empty() == false || Uri.Password.empty() == false) + Req += string("Authorization: Basic ") + + Base64Encode(Uri.User + ":" + Uri.Password) + "\r\n"; + + Req += "User-Agent: Debian APT-HTTP/1.3 ("VERSION")\r\n\r\n"; if (Debug == true) cerr << Req << endl; @@ -675,10 +746,12 @@ bool HttpMethod::Go(bool ToFile,ServerState *Srv) FD_ZERO(&rfds); FD_ZERO(&wfds); - // Add the server - if (Srv->Out.WriteSpace() == true && Srv->ServerFd != -1) + /* Add the server. We only send more requests if the connection will + be persisting */ + if (Srv->Out.WriteSpace() == true && Srv->ServerFd != -1 + && Srv->Persistent == true) FD_SET(Srv->ServerFd,&wfds); - if (Srv->In.ReadSpace() == true && Srv->ServerFd != -1) + if (Srv->In.ReadSpace() == true && Srv->ServerFd != -1) FD_SET(Srv->ServerFd,&rfds); // Add the file @@ -703,11 +776,15 @@ bool HttpMethod::Go(bool ToFile,ServerState *Srv) tv.tv_usec = 0; int Res = 0; if ((Res = select(MaxFd+1,&rfds,&wfds,0,&tv)) < 0) - return _error->Errno("select","Select failed"); + { + if (errno == EINTR) + return true; + return _error->Errno("select",_("Select failed")); + } if (Res == 0) { - _error->Error("Connection timed out"); + _error->Error(_("Connection timed out")); return ServerDie(Srv); } @@ -730,7 +807,7 @@ bool HttpMethod::Go(bool ToFile,ServerState *Srv) if (FileFD != -1 && FD_ISSET(FileFD,&wfds)) { if (Srv->In.Write(FileFD) == false) - return _error->Errno("write","Error writing to output file"); + return _error->Errno("write",_("Error writing to output file")); } // Handle commands from APT @@ -751,14 +828,17 @@ bool HttpMethod::Flush(ServerState *Srv) { if (File != 0) { - SetNonBlock(File->Fd(),false); + // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking + // can't be set + if (File->Name() != "/dev/null") + SetNonBlock(File->Fd(),false); if (Srv->In.WriteSpace() == false) return true; while (Srv->In.WriteSpace() == true) { if (Srv->In.Write(File->Fd()) == false) - return _error->Errno("write","Error writing to file"); + return _error->Errno("write",_("Error writing to file")); if (Srv->In.IsLimit() == true) return true; } @@ -779,11 +859,14 @@ bool HttpMethod::ServerDie(ServerState *Srv) // Dump the buffer to the file if (Srv->State == ServerState::Data) { - SetNonBlock(File->Fd(),false); + // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking + // can't be set + if (File->Name() != "/dev/null") + SetNonBlock(File->Fd(),false); while (Srv->In.WriteSpace() == true) { if (Srv->In.Write(File->Fd()) == false) - return _error->Errno("write","Error writing to the file"); + return _error->Errno("write",_("Error writing to the file")); // Done if (Srv->In.IsLimit() == true) @@ -797,9 +880,9 @@ bool HttpMethod::ServerDie(ServerState *Srv) { Srv->Close(); if (LErrno == 0) - return _error->Error("Error reading from server Remote end closed connection"); + return _error->Error(_("Error reading from server. Remote end closed connection")); errno = LErrno; - return _error->Errno("read","Error reading from server"); + return _error->Errno("read",_("Error reading from server")); } else { @@ -825,7 +908,9 @@ bool HttpMethod::ServerDie(ServerState *Srv) 1 - IMS hit 3 - Unrecoverable error 4 - Error with error content page - 5 - Unrecoverable non-server error (close the connection) */ + 5 - Unrecoverable non-server error (close the connection) + 6 - Try again with a new or changed URI + */ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) { // Not Modified @@ -837,6 +922,27 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) return 1; } + /* Redirect + * + * Note that it is only OK for us to treat all redirection the same + * because we *always* use GET, not other HTTP methods. There are + * three redirection codes for which it is not appropriate that we + * redirect. Pass on those codes so the error handling kicks in. + */ + if (AllowRedirect + && (Srv->Result > 300 && Srv->Result < 400) + && (Srv->Result != 300 // Multiple Choices + && Srv->Result != 304 // Not Modified + && Srv->Result != 306)) // (Not part of HTTP/1.1, reserved) + { + if (!Srv->Location.empty()) + { + NextURI = Srv->Location; + return 6; + } + /* else pass through for error message */ + } + /* We have a reply we dont handle. This should indicate a perm server failure */ if (Srv->Result < 200 || Srv->Result >= 300) @@ -866,22 +972,23 @@ int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv) if (Srv->StartPos >= 0) { Res.ResumePoint = Srv->StartPos; - ftruncate(File->Fd(),Srv->StartPos); + if (ftruncate(File->Fd(),Srv->StartPos) < 0) + _error->Errno("ftruncate", _("Failed to truncate file")); } // Set the start point lseek(File->Fd(),0,SEEK_END); - delete Srv->In.MD5; - Srv->In.MD5 = new MD5Summation; + delete Srv->In.Hash; + Srv->In.Hash = new Hashes; - // Fill the MD5 Hash if the file is non-empty (resume) + // Fill the Hash if the file is non-empty (resume) if (Srv->StartPos > 0) { lseek(File->Fd(),0,SEEK_SET); - if (Srv->In.MD5->AddFD(File->Fd(),Srv->StartPos) == false) + if (Srv->In.Hash->AddFD(File->Fd(),Srv->StartPos) == false) { - _error->Errno("read","Problem hashing file"); + _error->Errno("read",_("Problem hashing file")); return 5; } lseek(File->Fd(),0,SEEK_END); @@ -921,7 +1028,6 @@ bool HttpMethod::Fetch(FetchItem *) // Queue the requests int Depth = -1; - bool Tail = false; for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth; I = I->Next, Depth++) { @@ -933,8 +1039,6 @@ bool HttpMethod::Fetch(FetchItem *) if (Server->Comp(I->Uri) == false) break; if (QueueBack == I) - Tail = true; - if (Tail == true) { QueueBack = I->Next; SendReq(I,Server->Out); @@ -953,6 +1057,7 @@ bool HttpMethod::Configuration(string Message) if (pkgAcqMethod::Configuration(Message) == false) return false; + AllowRedirect = _config->FindB("Acquire::http::AllowRedirect",true); TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut); PipelineDepth = _config->FindI("Acquire::http::Pipeline-Depth", PipelineDepth); @@ -966,6 +1071,10 @@ bool HttpMethod::Configuration(string Message) /* */ int HttpMethod::Loop() { + typedef vector StringVector; + typedef vector::iterator StringVectorIterator; + map Redirected; + signal(SIGTERM,SigTerm); signal(SIGINT,SigTerm); @@ -996,7 +1105,14 @@ int HttpMethod::Loop() delete Server; Server = new ServerState(Queue->Uri,this); } - + /* If the server has explicitly said this is the last connection + then we pre-emptively shut down the pipeline and tear down + the connection. This will speed up HTTP/1.0 servers a tad + since we don't have to wait for the close sequence to + complete */ + if (Server->Persistent == false) + Server->Close(); + // Reset the pipeline if (Server->ServerFd == -1) QueueBack = Queue; @@ -1022,8 +1138,9 @@ int HttpMethod::Loop() // The header data is bad case 2: { - _error->Error("Bad header Data"); + _error->Error(_("Bad header data")); Fail(true); + RotateDNS(); continue; } @@ -1038,10 +1155,11 @@ int HttpMethod::Loop() if (FailCounter >= 2) { - Fail("Connection failed",true); + Fail(_("Connection failed"),true); FailCounter = 0; } + RotateDNS(); continue; } }; @@ -1059,6 +1177,11 @@ int HttpMethod::Loop() // Run the data bool Result = Server->RunData(); + /* If the server is sending back sizeless responses then fill in + the size now */ + if (Res.Size == 0) + Res.Size = File->Size(); + // Close the file, destroy the FD object and timestamp it FailFd = -1; delete File; @@ -1074,12 +1197,28 @@ int HttpMethod::Loop() // Send status to APT if (Result == true) { - Res.MD5Sum = Server->In.MD5->Result(); + Res.TakeHashes(*Server->In.Hash); URIDone(Res); } else - Fail(true); - + { + if (Server->ServerFd == -1) + { + FailCounter++; + _error->Discard(); + Server->Close(); + + if (FailCounter >= 2) + { + Fail(_("Connection failed"),true); + FailCounter = 0; + } + + QueueBack = Queue; + } + else + Fail(true); + } break; } @@ -1100,7 +1239,11 @@ int HttpMethod::Loop() // Hard internal error, kill the connection and fail case 5: { + delete File; + File = 0; + Fail(); + RotateDNS(); Server->Close(); break; } @@ -1118,8 +1261,48 @@ int HttpMethod::Loop() break; } + // Try again with a new URL + case 6: + { + // Clear rest of response if there is content + if (Server->HaveContent) + { + File = new FileFd("/dev/null",FileFd::WriteExists); + Server->RunData(); + delete File; + File = 0; + } + + /* Detect redirect loops. No more redirects are allowed + after the same URI is seen twice in a queue item. */ + StringVector &R = Redirected[Queue->DestFile]; + bool StopRedirects = false; + if (R.size() == 0) + R.push_back(Queue->Uri); + else if (R[0] == "STOP" || R.size() > 10) + StopRedirects = true; + else + { + for (StringVectorIterator I = R.begin(); I != R.end(); I++) + if (Queue->Uri == *I) + { + R[0] = "STOP"; + break; + } + + R.push_back(Queue->Uri); + } + + if (StopRedirects == false) + Redirect(NextURI); + else + Fail(); + + break; + } + default: - Fail("Internal error"); + Fail(_("Internal error")); break; } @@ -1132,7 +1315,13 @@ int HttpMethod::Loop() int main() { + setlocale(LC_ALL, ""); + // ignore SIGPIPE, this can happen on write() if the socket + // closes the connection (this is dealt with via ServerDie()) + signal(SIGPIPE, SIG_IGN); + HttpMethod Mth; - return Mth.Loop(); } + +