##################################################################### */
/*}}}*/
// Include Files /*{{{*/
+#include <config.h>
+
#include <apt-pkg/fileutl.h>
#include <apt-pkg/acquire-method.h>
+#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/netrc.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
+#include <climits>
#include <iostream>
#include <map>
-#include <apti18n.h>
-
// Internet stuff
#include <netdb.h>
#include "connect.h"
#include "rfc2553emu.h"
#include "http.h"
+
+#include <apti18n.h>
/*}}}*/
using namespace std;
string HttpMethod::FailFile;
int HttpMethod::FailFd = -1;
time_t HttpMethod::FailTime = 0;
-unsigned long PipelineDepth = 10;
+unsigned long PipelineDepth = 0;
unsigned long TimeOut = 120;
bool AllowRedirect = false;
bool Debug = false;
URI Proxy;
-unsigned long CircleBuf::BwReadLimit=0;
-unsigned long CircleBuf::BwTickReadData=0;
+unsigned long long CircleBuf::BwReadLimit=0;
+unsigned long long CircleBuf::BwTickReadData=0;
struct timeval CircleBuf::BwReadTick={0,0};
const unsigned int CircleBuf::BW_HZ=10;
// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
// ---------------------------------------------------------------------
/* */
-CircleBuf::CircleBuf(unsigned long Size) : Size(Size), Hash(0)
+CircleBuf::CircleBuf(unsigned long long Size) : Size(Size), Hash(0)
{
Buf = new unsigned char[Size];
Reset();
InP = 0;
OutP = 0;
StrPos = 0;
- MaxGet = (unsigned int)-1;
+ MaxGet = (unsigned long long)-1;
OutQueue = string();
if (Hash != 0)
{
is non-blocking.. */
bool CircleBuf::Read(int Fd)
{
- unsigned long BwReadMax;
+ unsigned long long BwReadMax;
while (1)
{
struct timeval now;
gettimeofday(&now,0);
- unsigned long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 +
+ unsigned long long d = (now.tv_sec-CircleBuf::BwReadTick.tv_sec)*1000000 +
now.tv_usec-CircleBuf::BwReadTick.tv_usec;
if(d > 1000000/BW_HZ) {
CircleBuf::BwReadTick = now;
}
// Write the buffer segment
- int Res;
+ ssize_t Res;
if(CircleBuf::BwReadLimit) {
Res = read(Fd,Buf + (InP%Size),
BwReadMax > LeftRead() ? LeftRead() : BwReadMax);
return;
// Write the buffer segment
- unsigned long Sz = LeftRead();
+ unsigned long long Sz = LeftRead();
if (OutQueue.length() - StrPos < Sz)
Sz = OutQueue.length() - StrPos;
memcpy(Buf + (InP%Size),OutQueue.c_str() + StrPos,Sz);
return true;
// Write the buffer segment
- int Res;
+ ssize_t Res;
Res = write(Fd,Buf + (OutP%Size),LeftWrite());
if (Res == 0)
bool CircleBuf::WriteTillEl(string &Data,bool Single)
{
// We cheat and assume it is unneeded to have more than one buffer load
- for (unsigned long I = OutP; I < InP; I++)
+ for (unsigned long long I = OutP; I < InP; I++)
{
if (Buf[I%Size] != '\n')
continue;
Data = "";
while (OutP < I)
{
- unsigned long Sz = LeftWrite();
+ unsigned long long Sz = LeftWrite();
if (Sz == 0)
return false;
if (I - OutP < Sz)
clog << "Got " << InP << " in " << Diff << " at " << InP/Diff << endl;*/
}
/*}}}*/
+CircleBuf::~CircleBuf()
+{
+ delete [] Buf;
+ delete Hash;
+}
// ServerState::ServerState - Constructor /*{{{*/
// ---------------------------------------------------------------------
if (Debug == true)
clog << Data;
- for (string::const_iterator I = Data.begin(); I < Data.end(); I++)
+ for (string::const_iterator I = Data.begin(); I < Data.end(); ++I)
{
string::const_iterator J = I;
- for (; J != Data.end() && *J != '\n' && *J != '\r';J++);
+ for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J);
if (HeaderLine(string(I,J)) == false)
return RUN_HEADERS_PARSE_ERROR;
I = J;
return false;
// See if we are done
- unsigned long Len = strtol(Data.c_str(),0,16);
+ unsigned long long Len = strtoull(Data.c_str(),0,16);
if (Len == 0)
{
In.Limit(-1);
if (Line.empty() == true)
return true;
- // The http server might be trying to do something evil.
- if (Line.length() >= MAXLEN)
- return _error->Error(_("Got a single header line over %u chars"),MAXLEN);
-
string::size_type Pos = Line.find(' ');
if (Pos == string::npos || Pos+1 > Line.length())
{
// Evil servers return no version
if (Line[4] == '/')
{
- int const elements = sscanf(Line.c_str(),"HTTP/%u.%u %u%[^\n]",&Major,&Minor,&Result,Code);
+ int const elements = sscanf(Line.c_str(),"HTTP/%3u.%3u %3u%359[^\n]",&Major,&Minor,&Result,Code);
if (elements == 3)
{
Code[0] = '\0';
{
Major = 0;
Minor = 9;
- if (sscanf(Line.c_str(),"HTTP %u%[^\n]",&Result,Code) != 2)
+ if (sscanf(Line.c_str(),"HTTP %3u%359[^\n]",&Result,Code) != 2)
return _error->Error(_("The HTTP server sent an invalid reply header"));
}
Persistent = false;
else
{
- if (Major == 1 && Minor <= 0)
+ if (Major == 1 && Minor == 0)
Persistent = false;
else
Persistent = true;
// The length is already set from the Content-Range header
if (StartPos != 0)
return true;
-
- if (sscanf(Val.c_str(),"%lu",&Size) != 1)
- return _error->Error(_("The HTTP server sent an invalid Content-Length header"));
+
+ Size = strtoull(Val.c_str(), NULL, 10);
+ if (Size >= std::numeric_limits<unsigned long long>::max())
+ return _error->Errno("HeaderLine", _("The HTTP server sent an invalid Content-Length header"));
return true;
}
{
HaveContent = true;
- if (sscanf(Val.c_str(),"bytes %lu-%*u/%lu",&StartPos,&Size) != 2)
+ if (sscanf(Val.c_str(),"bytes %llu-%*u/%llu",&StartPos,&Size) != 2)
return _error->Error(_("The HTTP server sent an invalid Content-Range header"));
- if ((unsigned)StartPos > Size)
+ if ((unsigned long long)StartPos > Size)
return _error->Error(_("This HTTP server has broken range support"));
return true;
}
// The HTTP server expects a hostname with a trailing :port
char Buf[1000];
- string ProperHost = Uri.Host;
+ string ProperHost;
+
+ if (Uri.Host.find(':') != string::npos)
+ ProperHost = '[' + Uri.Host + ']';
+ else
+ ProperHost = Uri.Host;
if (Uri.Port != 0)
{
sprintf(Buf,":%u",Uri.Port);
pass it on, HTTP/1.1 says the connection should default to keep alive
and we expect the proxy to do this */
if (Proxy.empty() == true || Proxy.Host.empty())
+ {
+ // see LP bugs #1003633 and #1086997. The "+" is encoded as a workaround
+ // for a amazon S3 bug
sprintf(Buf,"GET %s HTTP/1.1\r\nHost: %s\r\nConnection: keep-alive\r\n",
- QuoteString(Uri.Path,"~").c_str(),ProperHost.c_str());
+ QuoteString(Uri.Path,"+~ ").c_str(),ProperHost.c_str());
+ }
else
{
/* Generate a cache control header if necessary. We place a max
}
}
-
+ // If we ask for uncompressed files servers might respond with content-
+ // negotation which lets us end up with compressed files we do not support,
+ // see 657029, 657560 and co, so if we have no extension on the request
+ // ask for text only. As a sidenote: If there is nothing to negotate servers
+ // seem to be nice and ignore it.
+ if (_config->FindB("Acquire::http::SendAccept", true) == true)
+ {
+ size_t const filepos = Itm->Uri.find_last_of('/');
+ string const file = Itm->Uri.substr(filepos + 1);
+ if (flExtension(file) == file)
+ strcat(Buf,"Accept: text/*\r\n");
+ }
+
string Req = Buf;
// Check for a partial file
if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
{
// In this case we send an if-range query with a range header
- sprintf(Buf,"Range: bytes=%li-\r\nIf-Range: %s\r\n",(long)SBuf.st_size - 1,
+ sprintf(Buf,"Range: bytes=%lli-\r\nIf-Range: %s\r\n",(long long)SBuf.st_size - 1,
TimeRFC1123(SBuf.st_mtime).c_str());
Req += Buf;
}
Base64Encode(Uri.User + ":" + Uri.Password) + "\r\n";
}
Req += "User-Agent: " + _config->Find("Acquire::http::User-Agent",
- "Debian APT-HTTP/1.3 ("VERSION")") + "\r\n\r\n";
+ "Debian APT-HTTP/1.3 (" PACKAGE_VERSION ")") + "\r\n\r\n";
if (Debug == true)
cerr << Req << endl;
&& Srv->Result != 304 // Not Modified
&& Srv->Result != 306)) // (Not part of HTTP/1.1, reserved)
{
- if (!Srv->Location.empty())
+ if (Srv->Location.empty() == true);
+ else if (Srv->Location[0] == '/' && Queue->Uri.empty() == false)
+ {
+ URI Uri = Queue->Uri;
+ if (Uri.Host.empty() == false)
+ NextURI = URI::SiteOnly(Uri);
+ else
+ NextURI.clear();
+ NextURI.append(DeQuoteString(Srv->Location));
+ return TRY_AGAIN_OR_REDIRECT;
+ }
+ else
{
- NextURI = Srv->Location;
- return TRY_AGAIN_OR_REDIRECT;
+ NextURI = DeQuoteString(Srv->Location);
+ URI tmpURI = NextURI;
+ // Do not allow a redirection to switch protocol
+ if (tmpURI.Access == "http")
+ return TRY_AGAIN_OR_REDIRECT;
}
/* else pass through for error message */
}
FailFile.c_str(); // Make sure we dont do a malloc in the signal handler
FailFd = File->Fd();
FailTime = Srv->Date;
-
- // Set the expected size
- if (Srv->StartPos >= 0)
- {
- Res.ResumePoint = Srv->StartPos;
- if (ftruncate(File->Fd(),Srv->StartPos) < 0)
- _error->Errno("ftruncate", _("Failed to truncate file"));
- }
-
- // Set the start point
- lseek(File->Fd(),0,SEEK_END);
delete Srv->In.Hash;
Srv->In.Hash = new Hashes;
-
- // Fill the Hash if the file is non-empty (resume)
- if (Srv->StartPos > 0)
+
+ // Set the expected size and read file for the hashes
+ if (Srv->StartPos >= 0)
{
- lseek(File->Fd(),0,SEEK_SET);
- if (Srv->In.Hash->AddFD(File->Fd(),Srv->StartPos) == false)
+ Res.ResumePoint = Srv->StartPos;
+ File->Truncate(Srv->StartPos);
+
+ if (Srv->In.Hash->AddFD(*File,Srv->StartPos) == false)
{
_error->Errno("read",_("Problem hashing file"));
return ERROR_NOT_FROM_SERVER;
}
- lseek(File->Fd(),0,SEEK_END);
}
SetNonBlock(File->Fd(),true);
after the same URI is seen twice in a queue item. */
StringVector &R = Redirected[Queue->DestFile];
bool StopRedirects = false;
- if (R.size() == 0)
+ if (R.empty() == true)
R.push_back(Queue->Uri);
else if (R[0] == "STOP" || R.size() > 10)
StopRedirects = true;
else
{
- for (StringVectorIterator I = R.begin(); I != R.end(); I++)
+ for (StringVectorIterator I = R.begin(); I != R.end(); ++I)
if (Queue->Uri == *I)
{
R[0] = "STOP";
char buf[512];
int InFd = Pipes[0];
close(Pipes[1]);
- int res = read(InFd, buf, sizeof(buf));
+ int res = read(InFd, buf, sizeof(buf)-1);
ExecWait(Process, "ProxyAutoDetect", true);
if (res < 0)