// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
-// $Id: http.cc,v 1.1 1998/11/01 05:30:47 jgg Exp $
+// $Id: http.cc,v 1.9 1998/12/05 04:19:05 jgg Exp $
/* ######################################################################
HTTP Aquire Method - This is the HTTP aquire method for APT.
#include <sys/time.h>
#include <utime.h>
#include <unistd.h>
+#include <signal.h>
#include <stdio.h>
// Internet stuff
#include "http.h"
/*}}}*/
+string HttpMethod::FailFile;
+int HttpMethod::FailFd = -1;
+time_t HttpMethod::FailTime = 0;
+
// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
// ---------------------------------------------------------------------
/* */
in_addr LastHostA;
bool ServerState::Open()
{
- Close();
+ // Use the already open connection if possible.
+ if (ServerFd != -1)
+ return true;
- int Port;
- string Host;
+ Close();
+ In.Reset();
+ Out.Reset();
+
+ // Determine the proxy setting
+ if (getenv("http_proxy") == 0)
+ {
+ string DefProxy = _config->Find("Acquire::http::Proxy");
+ string SpecificProxy = _config->Find("Acquire::http::Proxy::" + ServerName.Host);
+ if (SpecificProxy.empty() == false)
+ {
+ if (SpecificProxy == "DIRECT")
+ Proxy = "";
+ else
+ Proxy = SpecificProxy;
+ }
+ else
+ Proxy = DefProxy;
+ }
+ else
+ Proxy = getenv("http_proxy");
- if (Proxy.empty() == false)
+ // Determine what host and port to use based on the proxy settings
+ int Port = 80;
+ string Host;
+ if (Proxy.empty() == true)
{
- Port = ServerName.Port;
+ if (ServerName.Port != 0)
+ Port = ServerName.Port;
Host = ServerName.Host;
}
else
{
- Port = Proxy.Port;
+ if (Proxy.Port != 0)
+ Port = Proxy.Port;
Host = Proxy.Host;
}
+ /* We used a cached address record.. Yes this is against the spec but
+ the way we have setup our rotating dns suggests that this is more
+ sensible */
if (LastHost != Host)
{
Owner->Status("Connecting to %s",Host.c_str());
// Lookup the host
hostent *Addr = gethostbyname(Host.c_str());
if (Addr == 0)
- return _error->Errno("gethostbyname","Could not lookup host %s",Host.c_str());
+ return _error->Error("Could not resolve '%s'",Host.c_str());
LastHost = Host;
LastHostA = *(in_addr *)(Addr->h_addr_list[0]);
}
{
close(ServerFd);
ServerFd = -1;
- In.Reset();
- Out.Reset();
return true;
}
/*}}}*/
// ServerState::RunHeaders - Get the headers before the data /*{{{*/
// ---------------------------------------------------------------------
-/* */
-bool ServerState::RunHeaders()
+/* Returns 0 if things are OK, 1 if an IO error occursed and 2 if a header
+ parse error occured */
+int ServerState::RunHeaders()
{
State = Header;
Result = 0;
Size = 0;
StartPos = 0;
- Encoding = Closes;
+ Encoding = Closes;
+ HaveContent = false;
time(&Date);
do
string::const_iterator J = I;
for (; J != Data.end() && *J != '\n' && *J != '\r';J++);
if (HeaderLine(string(I,J-I)) == false)
- return false;
+ return 2;
I = J;
}
- return true;
+ return 0;
}
while (Owner->Go(false,this) == true);
-
- return false;
+
+ return 1;
}
/*}}}*/
// ServerState::RunData - Transfer the data from the socket /*{{{*/
while ((Last = Owner->Go(false,this)) == true);
if (Last == false)
return false;
- }
+ }
}
else
{
string Tag = string(Line,0,Pos);
string Val = string(Line,Pos+1);
- if (stringcasecmp(Tag,"HTTP") == 0)
+ if (stringcasecmp(Tag.begin(),Tag.begin()+4,"HTTP") == 0)
{
// Evil servers return no version
if (Line[4] == '/')
return true;
}
- if (stringcasecmp(Tag,"Content-Length:"))
+ if (stringcasecmp(Tag,"Content-Length:") == 0)
{
if (Encoding == Closes)
Encoding = Stream;
+ HaveContent = true;
// The length is already set from the Content-Range header
if (StartPos != 0)
return true;
}
- if (stringcasecmp(Tag,"Content-Range:"))
+ if (stringcasecmp(Tag,"Content-Type:") == 0)
+ {
+ HaveContent = true;
+ return true;
+ }
+
+ if (stringcasecmp(Tag,"Content-Range:") == 0)
{
+ HaveContent = true;
+
if (sscanf(Val.c_str(),"bytes %lu-%*u/%lu",&StartPos,&Size) != 2)
return _error->Error("The http server sent an invalid Content-Range header");
if ((unsigned)StartPos > Size)
return true;
}
- if (stringcasecmp(Tag,"Transfer-Encoding:"))
+ if (stringcasecmp(Tag,"Transfer-Encoding:") == 0)
{
- if (stringcasecmp(Val,"chunked"))
+ HaveContent = true;
+ if (stringcasecmp(Val,"chunked") == 0)
Encoding = Chunked;
+
return true;
}
- if (stringcasecmp(Tag,"Last-Modified:"))
+ if (stringcasecmp(Tag,"Last-Modified:") == 0)
{
if (StrToTime(Val,Date) == false)
return _error->Error("Unknown date format");
ProperHost += Buf;
}
- // Build the request
+ /* Build the request. We include a keep-alive header only for non-proxy
+ requests. This is to tweak old http/1.0 servers that do support keep-alive
+ but not HTTP/1.1 automatic keep-alive. Doing this with a proxy server
+ will glitch HTTP/1.0 proxies because they do not filter it out and
+ pass it on, HTTP/1.1 says the connection should default to keep alive
+ and we expect the proxy to do this */
if (Proxy.empty() == true)
sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\nConnection: keep-alive\r\n",
Uri.Path.c_str(),ProperHost.c_str());
sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\n",
Itm->Uri.c_str(),ProperHost.c_str());
string Req = Buf;
-
+
// Check for a partial file
struct stat SBuf;
if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
Req += string("Proxy-Authorization: Basic ") + Base64Encode(ProxyAuth) + "\r\n";*/
Req += "User-Agent: Debian APT-HTTP/1.2\r\n\r\n";
+// cerr << Req << endl;
+
Out.Read(Req);
}
/*}}}*/
{
if (Srv->In.Write(File->Fd()) == false)
return _error->Errno("write","Error writing to file");
+ if (Srv->In.IsLimit() == true)
+ return true;
}
if (Srv->In.IsLimit() == true || Srv->Encoding == ServerState::Closes)
{
if (Srv->In.Write(File->Fd()) == false)
return _error->Errno("write","Error writing to the file");
+
+ // Done
+ if (Srv->In.IsLimit() == true)
+ return true;
}
}
to do. Returns
0 - File is open,
1 - IMS hit
- 3 - Unrecoverable error */
+ 3 - Unrecoverable error
+ 4 - Error with error content page
+ 5 - Unrecoverable non-server error (close the connection) */
int HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
{
// Not Modified
if (Srv->Result < 200 || Srv->Result >= 300)
{
_error->Error("%u %s",Srv->Result,Srv->Code);
+ if (Srv->HaveContent == true)
+ return 4;
return 3;
}
delete File;
File = new FileFd(Queue->DestFile,FileFd::WriteAny);
if (_error->PendingError() == true)
- return 3;
-
+ return 5;
+
+ FailFile = Queue->DestFile;
+ FailFd = File->Fd();
+ FailTime = Srv->Date;
+
// Set the expected size
if (Srv->StartPos >= 0)
{
if (Srv->In.MD5->AddFD(File->Fd(),Srv->StartPos) == false)
{
_error->Errno("read","Problem hashing file");
- return 3;
+ return 5;
}
lseek(File->Fd(),0,SEEK_END);
}
return 0;
}
/*}}}*/
-// HttpMethod::Loop /*{{{*/
+// HttpMethod::SigTerm - Handle a fatal signal /*{{{*/
+// ---------------------------------------------------------------------
+/* This closes and timestamps the open file. This is neccessary to get
+ resume behavoir on user abort */
+void HttpMethod::SigTerm(int)
+{
+ if (FailFd == -1)
+ exit(100);
+ close(FailFd);
+
+ // Timestamp
+ struct utimbuf UBuf;
+ time(&UBuf.actime);
+ UBuf.actime = FailTime;
+ UBuf.modtime = FailTime;
+ utime(FailFile.c_str(),&UBuf);
+
+ exit(100);
+}
+ /*}}}*/
+// HttpMethod::Fetch - Fetch an item /*{{{*/
+// ---------------------------------------------------------------------
+/* This adds an item to the pipeline. We keep the pipeline at a fixed
+ depth. */
+bool HttpMethod::Fetch(FetchItem *)
+{
+ if (Server == 0)
+ return true;
+
+ // Queue the requests
+ int Depth = -1;
+ bool Tail = false;
+ for (FetchItem *I = Queue; I != 0 && Depth < 5; I = I->Next, Depth++)
+ {
+ // Make sure we stick with the same server
+ if (Server->Comp(I->Uri) == false)
+ break;
+
+ if (QueueBack == I)
+ Tail = true;
+ if (Tail == true)
+ {
+ Depth++;
+ QueueBack = I->Next;
+ SendReq(I,Server->Out);
+ continue;
+ }
+ }
+
+ return true;
+};
+ /*}}}*/
+// HttpMethod::Loop - Main loop /*{{{*/
// ---------------------------------------------------------------------
/* */
int HttpMethod::Loop()
{
- ServerState *Server = 0;
+ signal(SIGTERM,SigTerm);
+ signal(SIGINT,SigTerm);
+
+ Server = 0;
+ int FailCounter = 0;
while (1)
{
+ if (FailCounter >= 2)
+ {
+ Fail("Massive Server Brain Damage");
+ FailCounter = 0;
+ }
+
// We have no commands, wait for some to arrive
if (Queue == 0)
{
Fail();
continue;
}
-
- // Queue the request
- SendReq(Queue,Server->In);
- // Handle the header data
- if (Server->RunHeaders() == false)
- {
- Fail();
- continue;
- }
+ // Fill the pipeline.
+ Fetch(0);
+ // Fetch the next URL header data from the server.
+ switch (Server->RunHeaders())
+ {
+ case 0:
+ break;
+
+ // The header data is bad
+ case 2:
+ {
+ _error->Error("Bad header Data");
+ Fail();
+ continue;
+ }
+
+ // The server closed a connection during the header get..
+ default:
+ case 1:
+ {
+ FailCounter++;
+ _error->DumpErrors();
+ Server->Close();
+ continue;
+ }
+ };
+
// Decide what to do.
FetchResult Res;
+ Res.Filename = Queue->DestFile;
switch (DealWithHeaders(Res,Server))
{
// Ok, the file is Open
URIStart(Res);
// Run the data
- if (Server->RunData() == false)
- Fail();
-
- Res.MD5Sum = Srv->In.MD5->Result();
+ bool Result = Server->RunData();
+
+ // Close the file, destroy the FD object and timestamp it
+ FailFd = -1;
delete File;
File = 0;
+
+ // Timestamp
+ struct utimbuf UBuf;
+ time(&UBuf.actime);
+ UBuf.actime = Server->Date;
+ UBuf.modtime = Server->Date;
+ utime(Queue->DestFile.c_str(),&UBuf);
+
+ // Send status to APT
+ if (Result == true)
+ {
+ Res.MD5Sum = Server->In.MD5->Result();
+ URIDone(Res);
+ }
+ else
+ Fail();
+
break;
}
Fail();
break;
}
+
+ // Hard internal error, kill the connection and fail
+ case 5:
+ {
+ Fail();
+ Server->Close();
+ break;
+ }
+
+ // We need to flush the data, the header is like a 404 w/ error text
+ case 4:
+ {
+ Fail();
+
+ // Send to content to dev/null
+ File = new FileFd("/dev/null",FileFd::WriteExists);
+ Server->RunData();
+ delete File;
+ File = 0;
+ break;
+ }
default:
Fail("Internal error");
break;
- }
+ }
+
+ FailCounter = 0;
}
return 0;