// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
-// $Id: http.cc,v 1.48 2001/02/23 05:45:27 jgg Exp $
+// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
/* ######################################################################
- HTTP Aquire Method - This is the HTTP aquire method for APT.
+ HTTP Acquire Method - This is the HTTP aquire method for APT.
It uses HTTP/1.1 and many of the fancy options there-in, such as
pipelining, range, if-range and so on.
#include <apt-pkg/fileutl.h>
#include <apt-pkg/acquire-method.h>
#include <apt-pkg/error.h>
-#include <apt-pkg/md5.h>
+#include <apt-pkg/hashes.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <signal.h>
#include <stdio.h>
#include <errno.h>
+#include <string.h>
+#include <iostream>
+#include <map>
+#include <apti18n.h>
// Internet stuff
#include <netdb.h>
+#include "config.h"
#include "connect.h"
#include "rfc2553emu.h"
#include "http.h"
/*}}}*/
+using namespace std;
string HttpMethod::FailFile;
int HttpMethod::FailFd = -1;
time_t HttpMethod::FailTime = 0;
unsigned long PipelineDepth = 10;
unsigned long TimeOut = 120;
+bool AllowRedirect = false;
bool Debug = false;
+URI Proxy;
+unsigned long CircleBuf::BwReadLimit=0;
+unsigned long CircleBuf::BwTickReadData=0;
+struct timeval CircleBuf::BwReadTick={0,0};
+const unsigned int CircleBuf::BW_HZ=10;
+
// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
// ---------------------------------------------------------------------
/* */
-CircleBuf::CircleBuf(unsigned long Size) : Size(Size), MD5(0)
+CircleBuf::CircleBuf(unsigned long Size) : Size(Size), Hash(0)
{
Buf = new unsigned char[Size];
Reset();
+
+ CircleBuf::BwReadLimit = _config->FindI("Acquire::http::Dl-Limit",0)*1024;
}
/*}}}*/
// CircleBuf::Reset - Reset to the default state /*{{{*/
StrPos = 0;
MaxGet = (unsigned int)-1;
OutQueue = string();
- if (MD5 != 0)
+ if (Hash != 0)
{
- delete MD5;
- MD5 = new MD5Summation;
+ delete Hash;
+ Hash = new Hashes;
}
};
/*}}}*/
is non-blocking.. */
bool CircleBuf::Read(int Fd)
{
+ unsigned long BwReadMax;
+
while (1)
{
// Woops, buffer is full
if (InP - OutP == Size)
return true;
-
+
+ // what's left to read in this tick
+ BwReadMax = CircleBuf::BwReadLimit/BW_HZ;
+
+ if(CircleBuf::BwReadLimit) {
+ struct timeval now;
+ gettimeofday(&now,0);
+
+ unsigned long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 +
+ now.tv_usec-CircleBuf::BwReadTick.tv_usec;
+ if(d > 1000000/BW_HZ) {
+ CircleBuf::BwReadTick = now;
+ CircleBuf::BwTickReadData = 0;
+ }
+
+ if(CircleBuf::BwTickReadData >= BwReadMax) {
+ usleep(1000000/BW_HZ);
+ return true;
+ }
+ }
+
// Write the buffer segment
int Res;
- Res = read(Fd,Buf + (InP%Size),LeftRead());
+ if(CircleBuf::BwReadLimit) {
+ Res = read(Fd,Buf + (InP%Size),
+ BwReadMax > LeftRead() ? LeftRead() : BwReadMax);
+ } else
+ Res = read(Fd,Buf + (InP%Size),LeftRead());
+ if(Res > 0 && BwReadLimit > 0)
+ CircleBuf::BwTickReadData += Res;
+
if (Res == 0)
return false;
if (Res < 0)
unsigned long Sz = LeftRead();
if (OutQueue.length() - StrPos < Sz)
Sz = OutQueue.length() - StrPos;
- memcpy(Buf + (InP%Size),OutQueue.begin() + StrPos,Sz);
+ memcpy(Buf + (InP%Size),OutQueue.c_str() + StrPos,Sz);
// Advance
StrPos += Sz;
return false;
}
- if (MD5 != 0)
- MD5->Add(Buf + (OutP%Size),Res);
+ if (Hash != 0)
+ Hash->Add(Buf + (OutP%Size),Res);
OutP += Res;
}
{
if (Buf[I%Size] != '\n')
continue;
- for (I++; I < InP && Buf[I%Size] == '\r'; I++);
+ ++I;
if (Single == false)
{
- if (Buf[I%Size] != '\n')
- continue;
- for (I++; I < InP && Buf[I%Size] == '\r'; I++);
+ if (I < InP && Buf[I%Size] == '\r')
+ ++I;
+ if (I >= InP || Buf[I%Size] != '\n')
+ continue;
+ ++I;
}
- if (I > InP)
- I = InP;
-
Data = "";
while (OutP < I)
{
unsigned long Sz = LeftWrite();
if (Sz == 0)
return false;
- if (I - OutP < LeftWrite())
+ if (I - OutP < Sz)
Sz = I - OutP;
Data += string((char *)(Buf + (OutP%Size)),Sz);
OutP += Sz;
/*}}}*/
// ServerState::RunHeaders - Get the headers before the data /*{{{*/
// ---------------------------------------------------------------------
-/* Returns 0 if things are OK, 1 if an IO error occursed and 2 if a header
- parse error occured */
+/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
+ parse error occurred */
int ServerState::RunHeaders()
{
State = Header;
- Owner->Status("Waiting for file");
+ Owner->Status(_("Waiting for headers"));
Major = 0;
Minor = 0;
{
string::const_iterator J = I;
for (; J != Data.end() && *J != '\n' && *J != '\r';J++);
- if (HeaderLine(string(I,J-I)) == false)
+ if (HeaderLine(string(I,J)) == false)
return 2;
I = J;
}
// The http server might be trying to do something evil.
if (Line.length() >= MAXLEN)
- return _error->Error("Got a single header line over %u chars",MAXLEN);
+ return _error->Error(_("Got a single header line over %u chars"),MAXLEN);
string::size_type Pos = Line.find(' ');
if (Pos == string::npos || Pos+1 > Line.length())
// Blah, some servers use "connection:closes", evil.
Pos = Line.find(':');
if (Pos == string::npos || Pos + 2 > Line.length())
- return _error->Error("Bad header line");
+ return _error->Error(_("Bad header line"));
Pos++;
}
string Tag = string(Line,0,Pos);
string Val = string(Line,Pos2);
- if (stringcasecmp(Tag.begin(),Tag.begin()+4,"HTTP") == 0)
+ if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0)
{
// Evil servers return no version
if (Line[4] == '/')
{
- if (sscanf(Line.c_str(),"HTTP/%u.%u %u %[^\n]",&Major,&Minor,
+ if (sscanf(Line.c_str(),"HTTP/%u.%u %u%[^\n]",&Major,&Minor,
&Result,Code) != 4)
- return _error->Error("The http server sent an invalid reply header");
+ return _error->Error(_("The HTTP server sent an invalid reply header"));
}
else
{
Major = 0;
Minor = 9;
- if (sscanf(Line.c_str(),"HTTP %u %[^\n]",&Result,Code) != 2)
- return _error->Error("The http server sent an invalid reply header");
+ if (sscanf(Line.c_str(),"HTTP %u%[^\n]",&Result,Code) != 2)
+ return _error->Error(_("The HTTP server sent an invalid reply header"));
}
/* Check the HTTP response header to get the default persistance
return true;
if (sscanf(Val.c_str(),"%lu",&Size) != 1)
- return _error->Error("The http server sent an invalid Content-Length header");
+ return _error->Error(_("The HTTP server sent an invalid Content-Length header"));
return true;
}
HaveContent = true;
if (sscanf(Val.c_str(),"bytes %lu-%*u/%lu",&StartPos,&Size) != 2)
- return _error->Error("The http server sent an invalid Content-Range header");
+ return _error->Error(_("The HTTP server sent an invalid Content-Range header"));
if ((unsigned)StartPos > Size)
- return _error->Error("This http server has broken range support");
+ return _error->Error(_("This HTTP server has broken range support"));
return true;
}
if (stringcasecmp(Tag,"Last-Modified:") == 0)
{
if (StrToTime(Val,Date) == false)
- return _error->Error("Unknown date format");
+ return _error->Error(_("Unknown date format"));
+ return true;
+ }
+
+ if (stringcasecmp(Tag,"Location:") == 0)
+ {
+ Location = Val;
return true;
}
will glitch HTTP/1.0 proxies because they do not filter it out and
pass it on, HTTP/1.1 says the connection should default to keep alive
and we expect the proxy to do this */
- if (Proxy.empty() == true)
+ if (Proxy.empty() == true || Proxy.Host.empty())
sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\nConnection: keep-alive\r\n",
QuoteString(Uri.Path,"~").c_str(),ProperHost.c_str());
else
and a no-store directive for archives. */
sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\n",
Itm->Uri.c_str(),ProperHost.c_str());
- if (_config->FindB("Acquire::http::No-Cache",false) == true)
- strcat(Buf,"Cache-Control: no-cache\r\nPragma: no-cache\r\n");
- else
+ // only generate a cache control header if we actually want to
+ // use a cache
+ if (_config->FindB("Acquire::http::No-Cache",false) == false)
{
if (Itm->IndexFile == true)
sprintf(Buf+strlen(Buf),"Cache-Control: max-age=%u\r\n",
- _config->FindI("Acquire::http::Max-Age",60*60*24));
+ _config->FindI("Acquire::http::Max-Age",0));
else
{
if (_config->FindB("Acquire::http::No-Store",false) == true)
}
}
}
+ // generate a no-cache header if needed
+ if (_config->FindB("Acquire::http::No-Cache",false) == true)
+ strcat(Buf,"Cache-Control: no-cache\r\nPragma: no-cache\r\n");
+
string Req = Buf;
Req += string("Authorization: Basic ") +
Base64Encode(Uri.User + ":" + Uri.Password) + "\r\n";
- Req += "User-Agent: Debian APT-HTTP/1.2\r\n\r\n";
+ Req += "User-Agent: Debian APT-HTTP/1.3 ("VERSION")\r\n\r\n";
if (Debug == true)
cerr << Req << endl;
tv.tv_usec = 0;
int Res = 0;
if ((Res = select(MaxFd+1,&rfds,&wfds,0,&tv)) < 0)
- return _error->Errno("select","Select failed");
+ {
+ if (errno == EINTR)
+ return true;
+ return _error->Errno("select",_("Select failed"));
+ }
if (Res == 0)
{
- _error->Error("Connection timed out");
+ _error->Error(_("Connection timed out"));
return ServerDie(Srv);
}
if (FileFD != -1 && FD_ISSET(FileFD,&wfds))
{
if (Srv->In.Write(FileFD) == false)
- return _error->Errno("write","Error writing to output file");
+ return _error->Errno("write",_("Error writing to output file"));
}
// Handle commands from APT
{
if (File != 0)
{
- SetNonBlock(File->Fd(),false);
+ // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
+ // can't be set
+ if (File->Name() != "/dev/null")
+ SetNonBlock(File->Fd(),false);
if (Srv->In.WriteSpace() == false)
return true;
while (Srv->In.WriteSpace() == true)
{
if (Srv->In.Write(File->Fd()) == false)
- return _error->Errno("write","Error writing to file");
+ return _error->Errno("write",_("Error writing to file"));
if (Srv->In.IsLimit() == true)
return true;
}
// Dump the buffer to the file
if (Srv->State == ServerState::Data)
{
- SetNonBlock(File->Fd(),false);
+ // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
+ // can't be set
+ if (File->Name() != "/dev/null")
+ SetNonBlock(File->Fd(),false);
while (Srv->In.WriteSpace() == true)
{
if (Srv->In.Write(File->Fd()) == false)
- return _error->Errno("write","Error writing to the file");
+ return _error->Errno("write",_("Error writing to the file"));
// Done
if (Srv->In.IsLimit() == true)
{
Srv->Close();
if (LErrno == 0)
- return _error->Error("Error reading from server Remote end closed connection");
+ return _error->Error(_("Error reading from server. Remote end closed connection"));
errno = LErrno;
- return _error->Errno("read","Error reading from server");
+ return _error->Errno("read",_("Error reading from server"));
}
else
{
1 - IMS hit
3 - Unrecoverable error
4 - Error with error content page
- 5 - Unrecoverable non-server error (close the connection) */
+ 5 - Unrecoverable non-server error (close the connection)
+ 6 - Try again with a new or changed URI
+ */
int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
{
// Not Modified
return 1;
}
+ /* Redirect
+ *
+ * Note that it is only OK for us to treat all redirection the same
+ * because we *always* use GET, not other HTTP methods. There are
+ * three redirection codes for which it is not appropriate that we
+ * redirect. Pass on those codes so the error handling kicks in.
+ */
+ if (AllowRedirect
+ && (Srv->Result > 300 && Srv->Result < 400)
+ && (Srv->Result != 300 // Multiple Choices
+ && Srv->Result != 304 // Not Modified
+ && Srv->Result != 306)) // (Not part of HTTP/1.1, reserved)
+ {
+ if (!Srv->Location.empty())
+ {
+ NextURI = Srv->Location;
+ return 6;
+ }
+ /* else pass through for error message */
+ }
+
/* We have a reply we dont handle. This should indicate a perm server
failure */
if (Srv->Result < 200 || Srv->Result >= 300)
if (Srv->StartPos >= 0)
{
Res.ResumePoint = Srv->StartPos;
- ftruncate(File->Fd(),Srv->StartPos);
+ if (ftruncate(File->Fd(),Srv->StartPos) < 0)
+ _error->Errno("ftruncate", _("Failed to truncate file"));
}
// Set the start point
lseek(File->Fd(),0,SEEK_END);
- delete Srv->In.MD5;
- Srv->In.MD5 = new MD5Summation;
+ delete Srv->In.Hash;
+ Srv->In.Hash = new Hashes;
- // Fill the MD5 Hash if the file is non-empty (resume)
+ // Fill the Hash if the file is non-empty (resume)
if (Srv->StartPos > 0)
{
lseek(File->Fd(),0,SEEK_SET);
- if (Srv->In.MD5->AddFD(File->Fd(),Srv->StartPos) == false)
+ if (Srv->In.Hash->AddFD(File->Fd(),Srv->StartPos) == false)
{
- _error->Errno("read","Problem hashing file");
+ _error->Errno("read",_("Problem hashing file"));
return 5;
}
lseek(File->Fd(),0,SEEK_END);
// Queue the requests
int Depth = -1;
- bool Tail = false;
for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth;
I = I->Next, Depth++)
{
if (Server->Comp(I->Uri) == false)
break;
if (QueueBack == I)
- Tail = true;
- if (Tail == true)
{
QueueBack = I->Next;
SendReq(I,Server->Out);
if (pkgAcqMethod::Configuration(Message) == false)
return false;
+ AllowRedirect = _config->FindB("Acquire::http::AllowRedirect",true);
TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut);
PipelineDepth = _config->FindI("Acquire::http::Pipeline-Depth",
PipelineDepth);
/* */
int HttpMethod::Loop()
{
+ typedef vector<string> StringVector;
+ typedef vector<string>::iterator StringVectorIterator;
+ map<string, StringVector> Redirected;
+
signal(SIGTERM,SigTerm);
signal(SIGINT,SigTerm);
delete Server;
Server = new ServerState(Queue->Uri,this);
}
-
/* If the server has explicitly said this is the last connection
then we pre-emptively shut down the pipeline and tear down
the connection. This will speed up HTTP/1.0 servers a tad
// The header data is bad
case 2:
{
- _error->Error("Bad header Data");
+ _error->Error(_("Bad header data"));
Fail(true);
RotateDNS();
continue;
if (FailCounter >= 2)
{
- Fail("Connection failed",true);
+ Fail(_("Connection failed"),true);
FailCounter = 0;
}
// Send status to APT
if (Result == true)
{
- Res.MD5Sum = Server->In.MD5->Result();
+ Res.TakeHashes(*Server->In.Hash);
URIDone(Res);
}
else
- Fail(true);
-
+ {
+ if (Server->ServerFd == -1)
+ {
+ FailCounter++;
+ _error->Discard();
+ Server->Close();
+
+ if (FailCounter >= 2)
+ {
+ Fail(_("Connection failed"),true);
+ FailCounter = 0;
+ }
+
+ QueueBack = Queue;
+ }
+ else
+ Fail(true);
+ }
break;
}
// Hard internal error, kill the connection and fail
case 5:
{
+ delete File;
+ File = 0;
+
Fail();
RotateDNS();
Server->Close();
break;
}
+ // Try again with a new URL
+ case 6:
+ {
+ // Clear rest of response if there is content
+ if (Server->HaveContent)
+ {
+ File = new FileFd("/dev/null",FileFd::WriteExists);
+ Server->RunData();
+ delete File;
+ File = 0;
+ }
+
+ /* Detect redirect loops. No more redirects are allowed
+ after the same URI is seen twice in a queue item. */
+ StringVector &R = Redirected[Queue->DestFile];
+ bool StopRedirects = false;
+ if (R.size() == 0)
+ R.push_back(Queue->Uri);
+ else if (R[0] == "STOP" || R.size() > 10)
+ StopRedirects = true;
+ else
+ {
+ for (StringVectorIterator I = R.begin(); I != R.end(); I++)
+ if (Queue->Uri == *I)
+ {
+ R[0] = "STOP";
+ break;
+ }
+
+ R.push_back(Queue->Uri);
+ }
+
+ if (StopRedirects == false)
+ Redirect(NextURI);
+ else
+ Fail();
+
+ break;
+ }
+
default:
- Fail("Internal error");
+ Fail(_("Internal error"));
break;
}
int main()
{
+ setlocale(LC_ALL, "");
+ // ignore SIGPIPE, this can happen on write() if the socket
+ // closes the connection (this is dealt with via ServerDie())
+ signal(SIGPIPE, SIG_IGN);
+
HttpMethod Mth;
-
return Mth.Loop();
}
+
+