// $Id: http.cc,v 1.59 2004/05/08 19:42:35 mdz Exp $
/* ######################################################################
- HTTP Acquire Method - This is the HTTP aquire method for APT.
+ HTTP Acquire Method - This is the HTTP acquire method for APT.
It uses HTTP/1.1 and many of the fancy options there-in, such as
pipelining, range, if-range and so on.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
+#include <config.h>
+
#include <apt-pkg/fileutl.h>
-#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/netrc.h>
+#include <apt-pkg/strutl.h>
+#include <apt-pkg/proxy.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <sys/select.h>
+#include <cstring>
#include <sys/stat.h>
#include <sys/time.h>
-#include <utime.h>
#include <unistd.h>
-#include <signal.h>
#include <stdio.h>
#include <errno.h>
-#include <string.h>
#include <iostream>
-#include <map>
-#include <apti18n.h>
-
-
-// Internet stuff
-#include <netdb.h>
+#include <sstream>
#include "config.h"
#include "connect.h"
-#include "rfc2553emu.h"
#include "http.h"
+
+#include <apti18n.h>
/*}}}*/
using namespace std;
-string HttpMethod::FailFile;
-int HttpMethod::FailFd = -1;
-time_t HttpMethod::FailTime = 0;
-unsigned long PipelineDepth = 10;
-unsigned long TimeOut = 120;
-bool AllowRedirect = false;
-bool Debug = false;
-URI Proxy;
-
-unsigned long CircleBuf::BwReadLimit=0;
-unsigned long CircleBuf::BwTickReadData=0;
+unsigned long long CircleBuf::BwReadLimit=0;
+unsigned long long CircleBuf::BwTickReadData=0;
struct timeval CircleBuf::BwReadTick={0,0};
const unsigned int CircleBuf::BW_HZ=10;
-
+
// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
// ---------------------------------------------------------------------
/* */
-CircleBuf::CircleBuf(unsigned long Size) : Size(Size), Hash(0)
+CircleBuf::CircleBuf(unsigned long long Size)
+ : Size(Size), Hash(NULL), TotalWriten(0)
{
Buf = new unsigned char[Size];
Reset();
InP = 0;
OutP = 0;
StrPos = 0;
- MaxGet = (unsigned int)-1;
+ TotalWriten = 0;
+ MaxGet = (unsigned long long)-1;
OutQueue = string();
- if (Hash != 0)
+ if (Hash != NULL)
{
delete Hash;
- Hash = new Hashes;
- }
-};
+ Hash = NULL;
+ }
+}
/*}}}*/
// CircleBuf::Read - Read from a FD into the circular buffer /*{{{*/
// ---------------------------------------------------------------------
is non-blocking.. */
bool CircleBuf::Read(int Fd)
{
- unsigned long BwReadMax;
-
while (1)
{
// Woops, buffer is full
return true;
// what's left to read in this tick
- BwReadMax = CircleBuf::BwReadLimit/BW_HZ;
+ unsigned long long const BwReadMax = CircleBuf::BwReadLimit/BW_HZ;
if(CircleBuf::BwReadLimit) {
struct timeval now;
gettimeofday(&now,0);
- unsigned long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 +
+ unsigned long long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 +
now.tv_usec-CircleBuf::BwReadTick.tv_usec;
if(d > 1000000/BW_HZ) {
CircleBuf::BwReadTick = now;
}
// Write the buffer segment
- int Res;
+ ssize_t Res;
if(CircleBuf::BwReadLimit) {
Res = read(Fd,Buf + (InP%Size),
BwReadMax > LeftRead() ? LeftRead() : BwReadMax);
return;
// Write the buffer segment
- unsigned long Sz = LeftRead();
+ unsigned long long Sz = LeftRead();
if (OutQueue.length() - StrPos < Sz)
Sz = OutQueue.length() - StrPos;
memcpy(Buf + (InP%Size),OutQueue.c_str() + StrPos,Sz);
return true;
// Write the buffer segment
- int Res;
+ ssize_t Res;
Res = write(Fd,Buf + (OutP%Size),LeftWrite());
if (Res == 0)
return false;
}
+
+ TotalWriten += Res;
- if (Hash != 0)
+ if (Hash != NULL)
Hash->Add(Buf + (OutP%Size),Res);
OutP += Res;
bool CircleBuf::WriteTillEl(string &Data,bool Single)
{
// We cheat and assume it is unneeded to have more than one buffer load
- for (unsigned long I = OutP; I < InP; I++)
+ for (unsigned long long I = OutP; I < InP; I++)
{
if (Buf[I%Size] != '\n')
continue;
Data = "";
while (OutP < I)
{
- unsigned long Sz = LeftWrite();
+ unsigned long long Sz = LeftWrite();
if (Sz == 0)
return false;
if (I - OutP < Sz)
clog << "Got " << InP << " in " << Diff << " at " << InP/Diff << endl;*/
}
/*}}}*/
+CircleBuf::~CircleBuf()
+{
+ delete [] Buf;
+ delete Hash;
+}
-// ServerState::ServerState - Constructor /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-ServerState::ServerState(URI Srv,HttpMethod *Owner) : Owner(Owner),
- In(64*1024), Out(4*1024),
- ServerName(Srv)
+// HttpServerState::HttpServerState - Constructor /*{{{*/
+HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(64*1024), Out(4*1024)
{
+ TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut);
Reset();
}
/*}}}*/
-// ServerState::Open - Open a connection to the server /*{{{*/
+// HttpServerState::Open - Open a connection to the server /*{{{*/
// ---------------------------------------------------------------------
/* This opens a connection to the server. */
-bool ServerState::Open()
+bool HttpServerState::Open()
{
// Use the already open connection if possible.
if (ServerFd != -1)
Persistent = true;
// Determine the proxy setting
+ AutoDetectProxy(ServerName);
string SpecificProxy = _config->Find("Acquire::http::Proxy::" + ServerName.Host);
if (!SpecificProxy.empty())
{
Port = ServerName.Port;
Host = ServerName.Host;
}
+ else if (Proxy.Access != "http")
+ return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str());
else
{
if (Proxy.Port != 0)
return true;
}
/*}}}*/
-// ServerState::Close - Close a connection to the server /*{{{*/
+// HttpServerState::Close - Close a connection to the server /*{{{*/
// ---------------------------------------------------------------------
/* */
-bool ServerState::Close()
+bool HttpServerState::Close()
{
close(ServerFd);
ServerFd = -1;
return true;
}
/*}}}*/
-// ServerState::RunHeaders - Get the headers before the data /*{{{*/
-// ---------------------------------------------------------------------
-/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
- parse error occurred */
-ServerState::RunHeadersResult ServerState::RunHeaders()
-{
- State = Header;
-
- Owner->Status(_("Waiting for headers"));
-
- Major = 0;
- Minor = 0;
- Result = 0;
- Size = 0;
- StartPos = 0;
- Encoding = Closes;
- HaveContent = false;
- time(&Date);
-
- do
- {
- string Data;
- if (In.WriteTillEl(Data) == false)
- continue;
-
- if (Debug == true)
- clog << Data;
-
- for (string::const_iterator I = Data.begin(); I < Data.end(); ++I)
- {
- string::const_iterator J = I;
- for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J);
- if (HeaderLine(string(I,J)) == false)
- return RUN_HEADERS_PARSE_ERROR;
- I = J;
- }
-
- // 100 Continue is a Nop...
- if (Result == 100)
- continue;
-
- // Tidy up the connection persistance state.
- if (Encoding == Closes && HaveContent == true)
- Persistent = false;
-
- return RUN_HEADERS_OK;
- }
- while (Owner->Go(false,this) == true);
-
- return RUN_HEADERS_IO_ERROR;
-}
- /*}}}*/
-// ServerState::RunData - Transfer the data from the socket /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-bool ServerState::RunData()
+// HttpServerState::RunData - Transfer the data from the socket /*{{{*/
+bool HttpServerState::RunData(FileFd * const File)
{
State = Data;
if (In.WriteTillEl(Data,true) == true)
break;
}
- while ((Last = Owner->Go(false,this)) == true);
+ while ((Last = Go(false, File)) == true);
if (Last == false)
return false;
// See if we are done
- unsigned long Len = strtol(Data.c_str(),0,16);
+ unsigned long long Len = strtoull(Data.c_str(),0,16);
if (Len == 0)
{
In.Limit(-1);
if (In.WriteTillEl(Data,true) == true && Data.length() <= 2)
break;
}
- while ((Last = Owner->Go(false,this)) == true);
+ while ((Last = Go(false, File)) == true);
if (Last == false)
return false;
return !_error->PendingError();
// Transfer the block
In.Limit(Len);
- while (Owner->Go(true,this) == true)
+ while (Go(true, File) == true)
if (In.IsLimit() == true)
break;
if (In.WriteTillEl(Data,true) == true)
break;
}
- while ((Last = Owner->Go(false,this)) == true);
+ while ((Last = Go(false, File)) == true);
if (Last == false)
return false;
}
{
/* Closes encoding is used when the server did not specify a size, the
loss of the connection means we are done */
- if (Encoding == Closes)
+ if (JunkSize != 0)
+ In.Limit(JunkSize);
+ else if (DownloadSize != 0)
+ In.Limit(DownloadSize);
+ else if (Persistent == false)
In.Limit(-1);
- else
- In.Limit(Size - StartPos);
// Just transfer the whole block.
do
In.Limit(-1);
return !_error->PendingError();
}
- while (Owner->Go(true,this) == true);
+ while (Go(true, File) == true);
}
- return Owner->Flush(this) && !_error->PendingError();
+ return Owner->Flush() && !_error->PendingError();
}
/*}}}*/
-// ServerState::HeaderLine - Process a header line /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-bool ServerState::HeaderLine(string Line)
+bool HttpServerState::RunDataToDevNull() /*{{{*/
{
- if (Line.empty() == true)
- return true;
-
- // The http server might be trying to do something evil.
- if (Line.length() >= MAXLEN)
- return _error->Error(_("Got a single header line over %u chars"),MAXLEN);
+ FileFd DevNull("/dev/null", FileFd::WriteOnly);
+ return RunData(&DevNull);
+}
+ /*}}}*/
+bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/
+{
+ return In.WriteTillEl(Data);
+}
+ /*}}}*/
+bool HttpServerState::LoadNextResponse(bool const ToFile, FileFd * const File)/*{{{*/
+{
+ return Go(ToFile, File);
+}
+ /*}}}*/
+bool HttpServerState::WriteResponse(const std::string &Data) /*{{{*/
+{
+ return Out.Read(Data);
+}
+ /*}}}*/
+APT_PURE bool HttpServerState::IsOpen() /*{{{*/
+{
+ return (ServerFd != -1);
+}
+ /*}}}*/
+bool HttpServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/
+{
+ delete In.Hash;
+ In.Hash = new Hashes(ExpectedHashes);
+ return true;
+}
+ /*}}}*/
- string::size_type Pos = Line.find(' ');
- if (Pos == string::npos || Pos+1 > Line.length())
- {
- // Blah, some servers use "connection:closes", evil.
- Pos = Line.find(':');
- if (Pos == string::npos || Pos + 2 > Line.length())
- return _error->Error(_("Bad header line"));
- Pos++;
- }
+APT_PURE Hashes * HttpServerState::GetHashes() /*{{{*/
+{
+ return In.Hash;
+}
+ /*}}}*/
+// HttpServerState::Die - The server has closed the connection. /*{{{*/
+bool HttpServerState::Die(FileFd * const File)
+{
+ unsigned int LErrno = errno;
- // Parse off any trailing spaces between the : and the next word.
- string::size_type Pos2 = Pos;
- while (Pos2 < Line.length() && isspace(Line[Pos2]) != 0)
- Pos2++;
-
- string Tag = string(Line,0,Pos);
- string Val = string(Line,Pos2);
-
- if (stringcasecmp(Tag.c_str(),Tag.c_str()+4,"HTTP") == 0)
+ // Dump the buffer to the file
+ if (State == ServerState::Data)
{
- // Evil servers return no version
- if (Line[4] == '/')
- {
- int const elements = sscanf(Line.c_str(),"HTTP/%u.%u %u%[^\n]",&Major,&Minor,&Result,Code);
- if (elements == 3)
- {
- Code[0] = '\0';
- if (Debug == true)
- clog << "HTTP server doesn't give Reason-Phrase for " << Result << std::endl;
- }
- else if (elements != 4)
- return _error->Error(_("The HTTP server sent an invalid reply header"));
- }
- else
+ if (File == nullptr)
+ return true;
+ // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
+ // can't be set
+ if (File->Name() != "/dev/null")
+ SetNonBlock(File->Fd(),false);
+ while (In.WriteSpace() == true)
{
- Major = 0;
- Minor = 9;
- if (sscanf(Line.c_str(),"HTTP %u%[^\n]",&Result,Code) != 2)
- return _error->Error(_("The HTTP server sent an invalid reply header"));
- }
+ if (In.Write(File->Fd()) == false)
+ return _error->Errno("write",_("Error writing to the file"));
- /* Check the HTTP response header to get the default persistance
- state. */
- if (Major < 1)
- Persistent = false;
- else
- {
- if (Major == 1 && Minor <= 0)
- Persistent = false;
- else
- Persistent = true;
+ // Done
+ if (In.IsLimit() == true)
+ return true;
}
-
- return true;
- }
-
- if (stringcasecmp(Tag,"Content-Length:") == 0)
- {
- if (Encoding == Closes)
- Encoding = Stream;
- HaveContent = true;
-
- // The length is already set from the Content-Range header
- if (StartPos != 0)
- return true;
-
- if (sscanf(Val.c_str(),"%lu",&Size) != 1)
- return _error->Error(_("The HTTP server sent an invalid Content-Length header"));
- return true;
}
- if (stringcasecmp(Tag,"Content-Type:") == 0)
- {
- HaveContent = true;
- return true;
- }
-
- if (stringcasecmp(Tag,"Content-Range:") == 0)
+ // See if this is because the server finished the data stream
+ if (In.IsLimit() == false && State != HttpServerState::Header &&
+ Persistent == true)
{
- HaveContent = true;
-
- if (sscanf(Val.c_str(),"bytes %lu-%*u/%lu",&StartPos,&Size) != 2)
- return _error->Error(_("The HTTP server sent an invalid Content-Range header"));
- if ((unsigned)StartPos > Size)
- return _error->Error(_("This HTTP server has broken range support"));
- return true;
+ Close();
+ if (LErrno == 0)
+ return _error->Error(_("Error reading from server. Remote end closed connection"));
+ errno = LErrno;
+ return _error->Errno("read",_("Error reading from server"));
}
-
- if (stringcasecmp(Tag,"Transfer-Encoding:") == 0)
+ else
{
- HaveContent = true;
- if (stringcasecmp(Val,"chunked") == 0)
- Encoding = Chunked;
- return true;
- }
+ In.Limit(-1);
- if (stringcasecmp(Tag,"Connection:") == 0)
- {
- if (stringcasecmp(Val,"close") == 0)
- Persistent = false;
- if (stringcasecmp(Val,"keep-alive") == 0)
- Persistent = true;
- return true;
- }
-
- if (stringcasecmp(Tag,"Last-Modified:") == 0)
- {
- if (RFC1123StrToTime(Val.c_str(), Date) == false)
- return _error->Error(_("Unknown date format"));
- return true;
- }
+ // Nothing left in the buffer
+ if (In.WriteSpace() == false)
+ return false;
- if (stringcasecmp(Tag,"Location:") == 0)
- {
- Location = Val;
+ // We may have got multiple responses back in one packet..
+ Close();
return true;
}
- return true;
+ return false;
}
/*}}}*/
-
-// HttpMethod::SendReq - Send the HTTP request /*{{{*/
+// HttpServerState::Flush - Dump the buffer into the file /*{{{*/
// ---------------------------------------------------------------------
-/* This places the http request in the outbound buffer */
-void HttpMethod::SendReq(FetchItem *Itm,CircleBuf &Out)
+/* This takes the current input buffer from the Server FD and writes it
+ into the file */
+bool HttpServerState::Flush(FileFd * const File)
{
- URI Uri = Itm->Uri;
-
- // The HTTP server expects a hostname with a trailing :port
- char Buf[1000];
- string ProperHost = Uri.Host;
- if (Uri.Port != 0)
+ if (File != NULL)
{
- sprintf(Buf,":%u",Uri.Port);
- ProperHost += Buf;
- }
+ // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
+ // can't be set
+ if (File->Name() != "/dev/null")
+ SetNonBlock(File->Fd(),false);
+ if (In.WriteSpace() == false)
+ return true;
- // Just in case.
- if (Itm->Uri.length() >= sizeof(Buf))
- abort();
-
- /* Build the request. We include a keep-alive header only for non-proxy
- requests. This is to tweak old http/1.0 servers that do support keep-alive
- but not HTTP/1.1 automatic keep-alive. Doing this with a proxy server
- will glitch HTTP/1.0 proxies because they do not filter it out and
- pass it on, HTTP/1.1 says the connection should default to keep alive
- and we expect the proxy to do this */
- if (Proxy.empty() == true || Proxy.Host.empty())
- sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\nConnection: keep-alive\r\n",
- QuoteString(Uri.Path,"~").c_str(),ProperHost.c_str());
- else
- {
- /* Generate a cache control header if necessary. We place a max
- cache age on index files, optionally set a no-cache directive
- and a no-store directive for archives. */
- sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\n",
- Itm->Uri.c_str(),ProperHost.c_str());
- }
- // generate a cache control header (if needed)
- if (_config->FindB("Acquire::http::No-Cache",false) == true)
- {
- strcat(Buf,"Cache-Control: no-cache\r\nPragma: no-cache\r\n");
- }
- else
- {
- if (Itm->IndexFile == true)
- {
- sprintf(Buf+strlen(Buf),"Cache-Control: max-age=%u\r\n",
- _config->FindI("Acquire::http::Max-Age",0));
- }
- else
- {
- if (_config->FindB("Acquire::http::No-Store",false) == true)
- strcat(Buf,"Cache-Control: no-store\r\n");
- }
- }
-
-
- string Req = Buf;
-
- // Check for a partial file
- struct stat SBuf;
- if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
- {
- // In this case we send an if-range query with a range header
- sprintf(Buf,"Range: bytes=%li-\r\nIf-Range: %s\r\n",(long)SBuf.st_size - 1,
- TimeRFC1123(SBuf.st_mtime).c_str());
- Req += Buf;
- }
- else
- {
- if (Itm->LastModified != 0)
+ while (In.WriteSpace() == true)
{
- sprintf(Buf,"If-Modified-Since: %s\r\n",TimeRFC1123(Itm->LastModified).c_str());
- Req += Buf;
+ if (In.Write(File->Fd()) == false)
+ return _error->Errno("write",_("Error writing to file"));
+ if (In.IsLimit() == true)
+ return true;
}
- }
-
- if (Proxy.User.empty() == false || Proxy.Password.empty() == false)
- Req += string("Proxy-Authorization: Basic ") +
- Base64Encode(Proxy.User + ":" + Proxy.Password) + "\r\n";
- maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc"));
- if (Uri.User.empty() == false || Uri.Password.empty() == false)
- {
- Req += string("Authorization: Basic ") +
- Base64Encode(Uri.User + ":" + Uri.Password) + "\r\n";
+ if (In.IsLimit() == true || Persistent == false)
+ return true;
}
- Req += "User-Agent: " + _config->Find("Acquire::http::User-Agent",
- "Debian APT-HTTP/1.3 ("VERSION")") + "\r\n\r\n";
-
- if (Debug == true)
- cerr << Req << endl;
-
- Out.Read(Req);
+ return false;
}
/*}}}*/
-// HttpMethod::Go - Run a single loop /*{{{*/
+// HttpServerState::Go - Run a single loop /*{{{*/
// ---------------------------------------------------------------------
/* This runs the select loop over the server FDs, Output file FDs and
stdin. */
-bool HttpMethod::Go(bool ToFile,ServerState *Srv)
+bool HttpServerState::Go(bool ToFile, FileFd * const File)
{
// Server has closed the connection
- if (Srv->ServerFd == -1 && (Srv->In.WriteSpace() == false ||
+ if (ServerFd == -1 && (In.WriteSpace() == false ||
ToFile == false))
return false;
/* Add the server. We only send more requests if the connection will
be persisting */
- if (Srv->Out.WriteSpace() == true && Srv->ServerFd != -1
- && Srv->Persistent == true)
- FD_SET(Srv->ServerFd,&wfds);
- if (Srv->In.ReadSpace() == true && Srv->ServerFd != -1)
- FD_SET(Srv->ServerFd,&rfds);
+ if (Out.WriteSpace() == true && ServerFd != -1
+ && Persistent == true)
+ FD_SET(ServerFd,&wfds);
+ if (In.ReadSpace() == true && ServerFd != -1)
+ FD_SET(ServerFd,&rfds);
// Add the file
int FileFD = -1;
- if (File != 0)
+ if (File != NULL)
FileFD = File->Fd();
- if (Srv->In.WriteSpace() == true && ToFile == true && FileFD != -1)
+ if (In.WriteSpace() == true && ToFile == true && FileFD != -1)
FD_SET(FileFD,&wfds);
// Add stdin
// Figure out the max fd
int MaxFd = FileFD;
- if (MaxFd < Srv->ServerFd)
- MaxFd = Srv->ServerFd;
+ if (MaxFd < ServerFd)
+ MaxFd = ServerFd;
// Select
struct timeval tv;
if (Res == 0)
{
_error->Error(_("Connection timed out"));
- return ServerDie(Srv);
+ return Die(File);
}
// Handle server IO
- if (Srv->ServerFd != -1 && FD_ISSET(Srv->ServerFd,&rfds))
+ if (ServerFd != -1 && FD_ISSET(ServerFd,&rfds))
{
errno = 0;
- if (Srv->In.Read(Srv->ServerFd) == false)
- return ServerDie(Srv);
+ if (In.Read(ServerFd) == false)
+ return Die(File);
}
- if (Srv->ServerFd != -1 && FD_ISSET(Srv->ServerFd,&wfds))
+ if (ServerFd != -1 && FD_ISSET(ServerFd,&wfds))
{
errno = 0;
- if (Srv->Out.Write(Srv->ServerFd) == false)
- return ServerDie(Srv);
+ if (Out.Write(ServerFd) == false)
+ return Die(File);
}
// Send data to the file
if (FileFD != -1 && FD_ISSET(FileFD,&wfds))
{
- if (Srv->In.Write(FileFD) == false)
+ if (In.Write(FileFD) == false)
return _error->Errno("write",_("Error writing to output file"));
}
+ if (MaximumSize > 0 && File && File->Tell() > MaximumSize)
+ {
+ Owner->SetFailReason("MaximumSizeExceeded");
+ return _error->Error("Writing more data than expected (%llu > %llu)",
+ File->Tell(), MaximumSize);
+ }
+
// Handle commands from APT
if (FD_ISSET(STDIN_FILENO,&rfds))
{
- if (Run(true) != -1)
+ if (Owner->Run(true) != -1)
exit(100);
}
return true;
}
/*}}}*/
-// HttpMethod::Flush - Dump the buffer into the file /*{{{*/
-// ---------------------------------------------------------------------
-/* This takes the current input buffer from the Server FD and writes it
- into the file */
-bool HttpMethod::Flush(ServerState *Srv)
-{
- if (File != 0)
- {
- // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
- // can't be set
- if (File->Name() != "/dev/null")
- SetNonBlock(File->Fd(),false);
- if (Srv->In.WriteSpace() == false)
- return true;
-
- while (Srv->In.WriteSpace() == true)
- {
- if (Srv->In.Write(File->Fd()) == false)
- return _error->Errno("write",_("Error writing to file"));
- if (Srv->In.IsLimit() == true)
- return true;
- }
- if (Srv->In.IsLimit() == true || Srv->Encoding == ServerState::Closes)
- return true;
- }
- return false;
-}
- /*}}}*/
-// HttpMethod::ServerDie - The server has closed the connection. /*{{{*/
+// HttpMethod::SendReq - Send the HTTP request /*{{{*/
// ---------------------------------------------------------------------
-/* */
-bool HttpMethod::ServerDie(ServerState *Srv)
+/* This places the http request in the outbound buffer */
+void HttpMethod::SendReq(FetchItem *Itm)
{
- unsigned int LErrno = errno;
-
- // Dump the buffer to the file
- if (Srv->State == ServerState::Data)
- {
- // on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
- // can't be set
- if (File->Name() != "/dev/null")
- SetNonBlock(File->Fd(),false);
- while (Srv->In.WriteSpace() == true)
- {
- if (Srv->In.Write(File->Fd()) == false)
- return _error->Errno("write",_("Error writing to the file"));
+ URI Uri = Itm->Uri;
- // Done
- if (Srv->In.IsLimit() == true)
- return true;
- }
- }
-
- // See if this is because the server finished the data stream
- if (Srv->In.IsLimit() == false && Srv->State != ServerState::Header &&
- Srv->Encoding != ServerState::Closes)
- {
- Srv->Close();
- if (LErrno == 0)
- return _error->Error(_("Error reading from server. Remote end closed connection"));
- errno = LErrno;
- return _error->Errno("read",_("Error reading from server"));
- }
+ // The HTTP server expects a hostname with a trailing :port
+ std::stringstream Req;
+ string ProperHost;
+
+ if (Uri.Host.find(':') != string::npos)
+ ProperHost = '[' + Uri.Host + ']';
else
- {
- Srv->In.Limit(-1);
+ ProperHost = Uri.Host;
+
+ /* RFC 2616 §5.1.2 requires absolute URIs for requests to proxies,
+ but while its a must for all servers to accept absolute URIs,
+ it is assumed clients will sent an absolute path for non-proxies */
+ std::string requesturi;
+ if (Server->Proxy.empty() == true || Server->Proxy.Host.empty())
+ requesturi = Uri.Path;
+ else
+ requesturi = Itm->Uri;
- // Nothing left in the buffer
- if (Srv->In.WriteSpace() == false)
- return false;
-
- // We may have got multiple responses back in one packet..
- Srv->Close();
- return true;
- }
-
- return false;
-}
- /*}}}*/
-// HttpMethod::DealWithHeaders - Handle the retrieved header data /*{{{*/
-// ---------------------------------------------------------------------
-/* We look at the header data we got back from the server and decide what
- to do. Returns DealWithHeadersResult (see http.h for details).
- */
-HttpMethod::DealWithHeadersResult
-HttpMethod::DealWithHeaders(FetchResult &Res,ServerState *Srv)
-{
- // Not Modified
- if (Srv->Result == 304)
- {
- unlink(Queue->DestFile.c_str());
- Res.IMSHit = true;
- Res.LastModified = Queue->LastModified;
- return IMS_HIT;
- }
-
- /* Redirect
- *
- * Note that it is only OK for us to treat all redirection the same
- * because we *always* use GET, not other HTTP methods. There are
- * three redirection codes for which it is not appropriate that we
- * redirect. Pass on those codes so the error handling kicks in.
- */
- if (AllowRedirect
- && (Srv->Result > 300 && Srv->Result < 400)
- && (Srv->Result != 300 // Multiple Choices
- && Srv->Result != 304 // Not Modified
- && Srv->Result != 306)) // (Not part of HTTP/1.1, reserved)
- {
- if (Srv->Location.empty() == true);
- else if (Srv->Location[0] == '/' && Queue->Uri.empty() == false)
- {
- URI Uri = Queue->Uri;
- if (Uri.Host.empty() == false)
- {
- if (Uri.Port != 0)
- strprintf(NextURI, "http://%s:%u", Uri.Host.c_str(), Uri.Port);
- else
- NextURI = "http://" + Uri.Host;
- }
- else
- NextURI.clear();
- NextURI.append(DeQuoteString(Srv->Location));
- return TRY_AGAIN_OR_REDIRECT;
- }
- else
- {
- NextURI = DeQuoteString(Srv->Location);
- return TRY_AGAIN_OR_REDIRECT;
- }
- /* else pass through for error message */
- }
-
- /* We have a reply we dont handle. This should indicate a perm server
- failure */
- if (Srv->Result < 200 || Srv->Result >= 300)
- {
- char err[255];
- snprintf(err,sizeof(err)-1,"HttpError%i",Srv->Result);
- SetFailReason(err);
- _error->Error("%u %s",Srv->Result,Srv->Code);
- if (Srv->HaveContent == true)
- return ERROR_WITH_CONTENT_PAGE;
- return ERROR_UNRECOVERABLE;
- }
+ // The "+" is encoded as a workaround for a amazon S3 bug
+ // see LP bugs #1003633 and #1086997.
+ requesturi = QuoteString(requesturi, "+~ ");
- // This is some sort of 2xx 'data follows' reply
- Res.LastModified = Srv->Date;
- Res.Size = Srv->Size;
-
- // Open the file
- delete File;
- File = new FileFd(Queue->DestFile,FileFd::WriteAny);
- if (_error->PendingError() == true)
- return ERROR_NOT_FROM_SERVER;
+ /* Build the request. No keep-alive is included as it is the default
+ in 1.1, can cause problems with proxies, and we are an HTTP/1.1
+ client anyway.
+ C.f. https://tools.ietf.org/wg/httpbis/trac/ticket/158 */
+ Req << "GET " << requesturi << " HTTP/1.1\r\n";
+ if (Uri.Port != 0)
+ Req << "Host: " << ProperHost << ":" << std::to_string(Uri.Port) << "\r\n";
+ else
+ Req << "Host: " << ProperHost << "\r\n";
- FailFile = Queue->DestFile;
- FailFile.c_str(); // Make sure we dont do a malloc in the signal handler
- FailFd = File->Fd();
- FailTime = Srv->Date;
-
- // Set the expected size
- if (Srv->StartPos >= 0)
- {
- Res.ResumePoint = Srv->StartPos;
- if (ftruncate(File->Fd(),Srv->StartPos) < 0)
- _error->Errno("ftruncate", _("Failed to truncate file"));
- }
-
- // Set the start point
- lseek(File->Fd(),0,SEEK_END);
+ // generate a cache control header (if needed)
+ if (_config->FindB("Acquire::http::No-Cache",false) == true)
+ Req << "Cache-Control: no-cache\r\n"
+ << "Pragma: no-cache\r\n";
+ else if (Itm->IndexFile == true)
+ Req << "Cache-Control: max-age=" << std::to_string(_config->FindI("Acquire::http::Max-Age",0)) << "\r\n";
+ else if (_config->FindB("Acquire::http::No-Store",false) == true)
+ Req << "Cache-Control: no-store\r\n";
+
+ // If we ask for uncompressed files servers might respond with content-
+ // negotiation which lets us end up with compressed files we do not support,
+ // see 657029, 657560 and co, so if we have no extension on the request
+ // ask for text only. As a sidenote: If there is nothing to negotate servers
+ // seem to be nice and ignore it.
+ if (_config->FindB("Acquire::http::SendAccept", true) == true)
+ {
+ size_t const filepos = Itm->Uri.find_last_of('/');
+ string const file = Itm->Uri.substr(filepos + 1);
+ if (flExtension(file) == file)
+ Req << "Accept: text/*\r\n";
+ }
+
+ // Check for a partial file and send if-queries accordingly
+ struct stat SBuf;
+ if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
+ Req << "Range: bytes=" << std::to_string(SBuf.st_size) << "-\r\n"
+ << "If-Range: " << TimeRFC1123(SBuf.st_mtime, false) << "\r\n";
+ else if (Itm->LastModified != 0)
+ Req << "If-Modified-Since: " << TimeRFC1123(Itm->LastModified, false).c_str() << "\r\n";
- delete Srv->In.Hash;
- Srv->In.Hash = new Hashes;
-
- // Fill the Hash if the file is non-empty (resume)
- if (Srv->StartPos > 0)
- {
- lseek(File->Fd(),0,SEEK_SET);
- if (Srv->In.Hash->AddFD(File->Fd(),Srv->StartPos) == false)
- {
- _error->Errno("read",_("Problem hashing file"));
- return ERROR_NOT_FROM_SERVER;
- }
- lseek(File->Fd(),0,SEEK_END);
- }
-
- SetNonBlock(File->Fd(),true);
- return FILE_IS_OPEN;
-}
- /*}}}*/
-// HttpMethod::SigTerm - Handle a fatal signal /*{{{*/
-// ---------------------------------------------------------------------
-/* This closes and timestamps the open file. This is neccessary to get
- resume behavoir on user abort */
-void HttpMethod::SigTerm(int)
-{
- if (FailFd == -1)
- _exit(100);
- close(FailFd);
-
- // Timestamp
- struct utimbuf UBuf;
- UBuf.actime = FailTime;
- UBuf.modtime = FailTime;
- utime(FailFile.c_str(),&UBuf);
-
- _exit(100);
-}
- /*}}}*/
-// HttpMethod::Fetch - Fetch an item /*{{{*/
-// ---------------------------------------------------------------------
-/* This adds an item to the pipeline. We keep the pipeline at a fixed
- depth. */
-bool HttpMethod::Fetch(FetchItem *)
-{
- if (Server == 0)
- return true;
+ if (Server->Proxy.User.empty() == false || Server->Proxy.Password.empty() == false)
+ Req << "Proxy-Authorization: Basic "
+ << Base64Encode(Server->Proxy.User + ":" + Server->Proxy.Password) << "\r\n";
- // Queue the requests
- int Depth = -1;
- for (FetchItem *I = Queue; I != 0 && Depth < (signed)PipelineDepth;
- I = I->Next, Depth++)
- {
- // If pipelining is disabled, we only queue 1 request
- if (Server->Pipeline == false && Depth >= 0)
- break;
-
- // Make sure we stick with the same server
- if (Server->Comp(I->Uri) == false)
- break;
- if (QueueBack == I)
- {
- QueueBack = I->Next;
- SendReq(I,Server->Out);
- continue;
- }
- }
-
- return true;
-};
+ maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc"));
+ if (Uri.User.empty() == false || Uri.Password.empty() == false)
+ Req << "Authorization: Basic "
+ << Base64Encode(Uri.User + ":" + Uri.Password) << "\r\n";
+
+ Req << "User-Agent: " << _config->Find("Acquire::http::User-Agent",
+ "Debian APT-HTTP/1.3 (" PACKAGE_VERSION ")") << "\r\n";
+
+ Req << "\r\n";
+
+ if (Debug == true)
+ cerr << Req.str() << endl;
+
+ Server->WriteResponse(Req.str());
+}
/*}}}*/
// HttpMethod::Configuration - Handle a configuration message /*{{{*/
// ---------------------------------------------------------------------
/* We stash the desired pipeline depth */
bool HttpMethod::Configuration(string Message)
{
- if (pkgAcqMethod::Configuration(Message) == false)
+ if (ServerMethod::Configuration(Message) == false)
return false;
-
+
AllowRedirect = _config->FindB("Acquire::http::AllowRedirect",true);
- TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut);
PipelineDepth = _config->FindI("Acquire::http::Pipeline-Depth",
PipelineDepth);
Debug = _config->FindB("Debug::Acquire::http",false);
- AutoDetectProxyCmd = _config->Find("Acquire::http::ProxyAutoDetect");
-
- // Get the proxy to use
- AutoDetectProxy();
return true;
}
/*}}}*/
-// HttpMethod::Loop - Main loop /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-int HttpMethod::Loop()
+std::unique_ptr<ServerState> HttpMethod::CreateServerState(URI const &uri)/*{{{*/
{
- typedef vector<string> StringVector;
- typedef vector<string>::iterator StringVectorIterator;
- map<string, StringVector> Redirected;
-
- signal(SIGTERM,SigTerm);
- signal(SIGINT,SigTerm);
-
- Server = 0;
-
- int FailCounter = 0;
- while (1)
- {
- // We have no commands, wait for some to arrive
- if (Queue == 0)
- {
- if (WaitFd(STDIN_FILENO) == false)
- return 0;
- }
-
- /* Run messages, we can accept 0 (no message) if we didn't
- do a WaitFd above.. Otherwise the FD is closed. */
- int Result = Run(true);
- if (Result != -1 && (Result != 0 || Queue == 0))
- {
- if(FailReason.empty() == false ||
- _config->FindB("Acquire::http::DependOnSTDIN", true) == true)
- return 100;
- else
- return 0;
- }
-
- if (Queue == 0)
- continue;
-
- // Connect to the server
- if (Server == 0 || Server->Comp(Queue->Uri) == false)
- {
- delete Server;
- Server = new ServerState(Queue->Uri,this);
- }
- /* If the server has explicitly said this is the last connection
- then we pre-emptively shut down the pipeline and tear down
- the connection. This will speed up HTTP/1.0 servers a tad
- since we don't have to wait for the close sequence to
- complete */
- if (Server->Persistent == false)
- Server->Close();
-
- // Reset the pipeline
- if (Server->ServerFd == -1)
- QueueBack = Queue;
-
- // Connnect to the host
- if (Server->Open() == false)
- {
- Fail(true);
- delete Server;
- Server = 0;
- continue;
- }
-
- // Fill the pipeline.
- Fetch(0);
-
- // Fetch the next URL header data from the server.
- switch (Server->RunHeaders())
- {
- case ServerState::RUN_HEADERS_OK:
- break;
-
- // The header data is bad
- case ServerState::RUN_HEADERS_PARSE_ERROR:
- {
- _error->Error(_("Bad header data"));
- Fail(true);
- RotateDNS();
- continue;
- }
-
- // The server closed a connection during the header get..
- default:
- case ServerState::RUN_HEADERS_IO_ERROR:
- {
- FailCounter++;
- _error->Discard();
- Server->Close();
- Server->Pipeline = false;
-
- if (FailCounter >= 2)
- {
- Fail(_("Connection failed"),true);
- FailCounter = 0;
- }
-
- RotateDNS();
- continue;
- }
- };
-
- // Decide what to do.
- FetchResult Res;
- Res.Filename = Queue->DestFile;
- switch (DealWithHeaders(Res,Server))
- {
- // Ok, the file is Open
- case FILE_IS_OPEN:
- {
- URIStart(Res);
-
- // Run the data
- bool Result = Server->RunData();
-
- /* If the server is sending back sizeless responses then fill in
- the size now */
- if (Res.Size == 0)
- Res.Size = File->Size();
-
- // Close the file, destroy the FD object and timestamp it
- FailFd = -1;
- delete File;
- File = 0;
-
- // Timestamp
- struct utimbuf UBuf;
- time(&UBuf.actime);
- UBuf.actime = Server->Date;
- UBuf.modtime = Server->Date;
- utime(Queue->DestFile.c_str(),&UBuf);
-
- // Send status to APT
- if (Result == true)
- {
- Res.TakeHashes(*Server->In.Hash);
- URIDone(Res);
- }
- else
- {
- if (Server->ServerFd == -1)
- {
- FailCounter++;
- _error->Discard();
- Server->Close();
-
- if (FailCounter >= 2)
- {
- Fail(_("Connection failed"),true);
- FailCounter = 0;
- }
-
- QueueBack = Queue;
- }
- else
- Fail(true);
- }
- break;
- }
-
- // IMS hit
- case IMS_HIT:
- {
- URIDone(Res);
- break;
- }
-
- // Hard server error, not found or something
- case ERROR_UNRECOVERABLE:
- {
- Fail();
- break;
- }
-
- // Hard internal error, kill the connection and fail
- case ERROR_NOT_FROM_SERVER:
- {
- delete File;
- File = 0;
-
- Fail();
- RotateDNS();
- Server->Close();
- break;
- }
-
- // We need to flush the data, the header is like a 404 w/ error text
- case ERROR_WITH_CONTENT_PAGE:
- {
- Fail();
-
- // Send to content to dev/null
- File = new FileFd("/dev/null",FileFd::WriteExists);
- Server->RunData();
- delete File;
- File = 0;
- break;
- }
-
- // Try again with a new URL
- case TRY_AGAIN_OR_REDIRECT:
- {
- // Clear rest of response if there is content
- if (Server->HaveContent)
- {
- File = new FileFd("/dev/null",FileFd::WriteExists);
- Server->RunData();
- delete File;
- File = 0;
- }
-
- /* Detect redirect loops. No more redirects are allowed
- after the same URI is seen twice in a queue item. */
- StringVector &R = Redirected[Queue->DestFile];
- bool StopRedirects = false;
- if (R.size() == 0)
- R.push_back(Queue->Uri);
- else if (R[0] == "STOP" || R.size() > 10)
- StopRedirects = true;
- else
- {
- for (StringVectorIterator I = R.begin(); I != R.end(); ++I)
- if (Queue->Uri == *I)
- {
- R[0] = "STOP";
- break;
- }
-
- R.push_back(Queue->Uri);
- }
-
- if (StopRedirects == false)
- Redirect(NextURI);
- else
- Fail();
-
- break;
- }
-
- default:
- Fail(_("Internal error"));
- break;
- }
-
- FailCounter = 0;
- }
-
- return 0;
+ return std::unique_ptr<ServerState>(new HttpServerState(uri, this));
}
/*}}}*/
-// HttpMethod::AutoDetectProxy - auto detect proxy /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-bool HttpMethod::AutoDetectProxy()
+void HttpMethod::RotateDNS() /*{{{*/
{
- if (AutoDetectProxyCmd.empty())
- return true;
+ ::RotateDNS();
+}
+ /*}}}*/
+ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res)/*{{{*/
+{
+ auto ret = ServerMethod::DealWithHeaders(Res);
+ if (ret != ServerMethod::FILE_IS_OPEN)
+ return ret;
- if (Debug)
- clog << "Using auto proxy detect command: " << AutoDetectProxyCmd << endl;
+ // Open the file
+ delete File;
+ File = new FileFd(Queue->DestFile,FileFd::WriteAny);
+ if (_error->PendingError() == true)
+ return ERROR_NOT_FROM_SERVER;
- int Pipes[2] = {-1,-1};
- if (pipe(Pipes) != 0)
- return _error->Errno("pipe", "Failed to create Pipe");
+ FailFile = Queue->DestFile;
+ FailFile.c_str(); // Make sure we don't do a malloc in the signal handler
+ FailFd = File->Fd();
+ FailTime = Server->Date;
- pid_t Process = ExecFork();
- if (Process == 0)
+ if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false)
{
- close(Pipes[0]);
- dup2(Pipes[1],STDOUT_FILENO);
- SetCloseExec(STDOUT_FILENO,false);
-
- const char *Args[2];
- Args[0] = AutoDetectProxyCmd.c_str();
- Args[1] = 0;
- execv(Args[0],(char **)Args);
- cerr << "Failed to exec method " << Args[0] << endl;
- _exit(100);
+ _error->Errno("read",_("Problem hashing file"));
+ return ERROR_NOT_FROM_SERVER;
}
- char buf[512];
- int InFd = Pipes[0];
- close(Pipes[1]);
- int res = read(InFd, buf, sizeof(buf));
- ExecWait(Process, "ProxyAutoDetect", true);
-
- if (res < 0)
- return _error->Errno("read", "Failed to read");
- if (res == 0)
- return _error->Warning("ProxyAutoDetect returned no data");
-
- // add trailing \0
- buf[res] = 0;
-
- if (Debug)
- clog << "auto detect command returned: '" << buf << "'" << endl;
+ if (Server->StartPos > 0)
+ Res.ResumePoint = Server->StartPos;
- if (strstr(buf, "http://") == buf)
- _config->Set("Acquire::http::proxy", _strstrip(buf));
-
- return true;
+ SetNonBlock(File->Fd(),true);
+ return FILE_IS_OPEN;
}
/*}}}*/
-
-