#include <config.h>
#include <apt-pkg/fileutl.h>
-#include <apt-pkg/acquire-method.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/netrc.h>
#include <apt-pkg/strutl.h>
+#include <apt-pkg/proxy.h>
#include <stddef.h>
#include <stdlib.h>
// CircleBuf::CircleBuf - Circular input buffer /*{{{*/
// ---------------------------------------------------------------------
/* */
-CircleBuf::CircleBuf(unsigned long long Size) : Size(Size), Hash(0)
+CircleBuf::CircleBuf(HttpMethod const * const Owner, unsigned long long Size)
+ : Size(Size), Hash(NULL), TotalWriten(0)
{
Buf = new unsigned char[Size];
Reset();
- CircleBuf::BwReadLimit = _config->FindI("Acquire::http::Dl-Limit",0)*1024;
+ CircleBuf::BwReadLimit = Owner->ConfigFindI("Dl-Limit", 0) * 1024;
}
/*}}}*/
// CircleBuf::Reset - Reset to the default state /*{{{*/
InP = 0;
OutP = 0;
StrPos = 0;
+ TotalWriten = 0;
MaxGet = (unsigned long long)-1;
OutQueue = string();
- if (Hash != 0)
+ if (Hash != NULL)
{
delete Hash;
- Hash = new Hashes;
+ Hash = NULL;
}
}
/*}}}*/
return false;
}
+
+ TotalWriten += Res;
- if (Hash != 0)
+ if (Hash != NULL)
Hash->Add(Buf + (OutP%Size),Res);
OutP += Res;
}
// HttpServerState::HttpServerState - Constructor /*{{{*/
-HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(64*1024), Out(4*1024)
+HttpServerState::HttpServerState(URI Srv,HttpMethod *Owner) : ServerState(Srv, Owner), In(Owner, 64*1024), Out(Owner, 4*1024)
{
- TimeOut = _config->FindI("Acquire::http::Timeout",TimeOut);
+ TimeOut = Owner->ConfigFindI("Timeout", TimeOut);
Reset();
}
/*}}}*/
Persistent = true;
// Determine the proxy setting
- string SpecificProxy = _config->Find("Acquire::http::Proxy::" + ServerName.Host);
+ AutoDetectProxy(ServerName);
+ string SpecificProxy = Owner->ConfigFind("Proxy::" + ServerName.Host, "");
if (!SpecificProxy.empty())
{
if (SpecificProxy == "DIRECT")
}
else
{
- string DefProxy = _config->Find("Acquire::http::Proxy");
+ string DefProxy = Owner->ConfigFind("Proxy", "");
if (!DefProxy.empty())
{
Proxy = DefProxy;
Port = ServerName.Port;
Host = ServerName.Host;
}
+ else if (Proxy.Access != "http")
+ return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str());
else
{
if (Proxy.Port != 0)
{
/* Closes encoding is used when the server did not specify a size, the
loss of the connection means we are done */
- if (Encoding == Closes)
+ if (JunkSize != 0)
+ In.Limit(JunkSize);
+ else if (DownloadSize != 0)
+ In.Limit(DownloadSize);
+ else if (Persistent == false)
In.Limit(-1);
- else
- In.Limit(Size - StartPos);
// Just transfer the whole block.
do
return Owner->Flush() && !_error->PendingError();
}
/*}}}*/
+bool HttpServerState::RunDataToDevNull() /*{{{*/
+{
+ FileFd DevNull("/dev/null", FileFd::WriteOnly);
+ return RunData(&DevNull);
+}
+ /*}}}*/
bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/
{
return In.WriteTillEl(Data);
return (ServerFd != -1);
}
/*}}}*/
-bool HttpServerState::InitHashes(FileFd &File) /*{{{*/
+bool HttpServerState::InitHashes(HashStringList const &ExpectedHashes) /*{{{*/
{
delete In.Hash;
- In.Hash = new Hashes;
-
- // Set the expected size and read file for the hashes
- File.Truncate(StartPos);
- return In.Hash->AddFD(File, StartPos);
+ In.Hash = new Hashes(ExpectedHashes);
+ return true;
}
/*}}}*/
+
APT_PURE Hashes * HttpServerState::GetHashes() /*{{{*/
{
return In.Hash;
}
/*}}}*/
// HttpServerState::Die - The server has closed the connection. /*{{{*/
-bool HttpServerState::Die(FileFd &File)
+bool HttpServerState::Die(FileFd * const File)
{
unsigned int LErrno = errno;
// Dump the buffer to the file
if (State == ServerState::Data)
{
+ if (File == nullptr)
+ return true;
// on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
// can't be set
- if (File.Name() != "/dev/null")
- SetNonBlock(File.Fd(),false);
+ if (File->Name() != "/dev/null")
+ SetNonBlock(File->Fd(),false);
while (In.WriteSpace() == true)
{
- if (In.Write(File.Fd()) == false)
+ if (In.Write(File->Fd()) == false)
return _error->Errno("write",_("Error writing to the file"));
// Done
// See if this is because the server finished the data stream
if (In.IsLimit() == false && State != HttpServerState::Header &&
- Encoding != HttpServerState::Closes)
+ Persistent == true)
{
Close();
if (LErrno == 0)
return true;
}
- if (In.IsLimit() == true || Encoding == ServerState::Closes)
+ if (In.IsLimit() == true || Persistent == false)
return true;
}
return false;
FD_SET(FileFD,&wfds);
// Add stdin
- if (_config->FindB("Acquire::http::DependOnSTDIN", true) == true)
+ if (Owner->ConfigFindB("DependOnSTDIN", true) == true)
FD_SET(STDIN_FILENO,&rfds);
// Figure out the max fd
if (Res == 0)
{
_error->Error(_("Connection timed out"));
- return Die(*File);
+ return Die(File);
}
// Handle server IO
{
errno = 0;
if (In.Read(ServerFd) == false)
- return Die(*File);
+ return Die(File);
}
if (ServerFd != -1 && FD_ISSET(ServerFd,&wfds))
{
errno = 0;
if (Out.Write(ServerFd) == false)
- return Die(*File);
+ return Die(File);
}
// Send data to the file
return _error->Errno("write",_("Error writing to output file"));
}
+ if (MaximumSize > 0 && File && File->Tell() > MaximumSize)
+ {
+ Owner->SetFailReason("MaximumSizeExceeded");
+ return _error->Error("Writing more data than expected (%llu > %llu)",
+ File->Tell(), MaximumSize);
+ }
+
// Handle commands from APT
if (FD_ISSET(STDIN_FILENO,&rfds))
{
void HttpMethod::SendReq(FetchItem *Itm)
{
URI Uri = Itm->Uri;
+ {
+ auto const plus = Binary.find('+');
+ if (plus != std::string::npos)
+ Uri.Access = Binary.substr(plus + 1);
+ }
// The HTTP server expects a hostname with a trailing :port
std::stringstream Req;
if (Server->Proxy.empty() == true || Server->Proxy.Host.empty())
requesturi = Uri.Path;
else
- requesturi = Itm->Uri;
+ requesturi = Uri;
// The "+" is encoded as a workaround for a amazon S3 bug
// see LP bugs #1003633 and #1086997.
C.f. https://tools.ietf.org/wg/httpbis/trac/ticket/158 */
Req << "GET " << requesturi << " HTTP/1.1\r\n";
if (Uri.Port != 0)
- Req << "Host: " << ProperHost << ":" << Uri.Port << "\r\n";
+ Req << "Host: " << ProperHost << ":" << std::to_string(Uri.Port) << "\r\n";
else
Req << "Host: " << ProperHost << "\r\n";
// generate a cache control header (if needed)
- if (_config->FindB("Acquire::http::No-Cache",false) == true)
+ if (ConfigFindB("No-Cache",false) == true)
Req << "Cache-Control: no-cache\r\n"
<< "Pragma: no-cache\r\n";
else if (Itm->IndexFile == true)
- Req << "Cache-Control: max-age=" << _config->FindI("Acquire::http::Max-Age",0) << "\r\n";
- else if (_config->FindB("Acquire::http::No-Store",false) == true)
+ Req << "Cache-Control: max-age=" << std::to_string(ConfigFindI("Max-Age", 0)) << "\r\n";
+ else if (ConfigFindB("No-Store", false) == true)
Req << "Cache-Control: no-store\r\n";
// If we ask for uncompressed files servers might respond with content-
// see 657029, 657560 and co, so if we have no extension on the request
// ask for text only. As a sidenote: If there is nothing to negotate servers
// seem to be nice and ignore it.
- if (_config->FindB("Acquire::http::SendAccept", true) == true)
+ if (ConfigFindB("SendAccept", true) == true)
{
size_t const filepos = Itm->Uri.find_last_of('/');
string const file = Itm->Uri.substr(filepos + 1);
// Check for a partial file and send if-queries accordingly
struct stat SBuf;
if (stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
- Req << "Range: bytes=" << SBuf.st_size << "-\r\n"
- << "If-Range: " << TimeRFC1123(SBuf.st_mtime) << "\r\n";
+ Req << "Range: bytes=" << std::to_string(SBuf.st_size) << "-\r\n"
+ << "If-Range: " << TimeRFC1123(SBuf.st_mtime, false) << "\r\n";
else if (Itm->LastModified != 0)
- Req << "If-Modified-Since: " << TimeRFC1123(Itm->LastModified).c_str() << "\r\n";
+ Req << "If-Modified-Since: " << TimeRFC1123(Itm->LastModified, false).c_str() << "\r\n";
if (Server->Proxy.User.empty() == false || Server->Proxy.Password.empty() == false)
Req << "Proxy-Authorization: Basic "
Req << "Authorization: Basic "
<< Base64Encode(Uri.User + ":" + Uri.Password) << "\r\n";
- Req << "User-Agent: " << _config->Find("Acquire::http::User-Agent",
+ Req << "User-Agent: " << ConfigFind("User-Agent",
"Debian APT-HTTP/1.3 (" PACKAGE_VERSION ")") << "\r\n";
Req << "\r\n";
if (Debug == true)
- cerr << Req << endl;
+ cerr << Req.str() << endl;
Server->WriteResponse(Req.str());
}
/*}}}*/
-// HttpMethod::Configuration - Handle a configuration message /*{{{*/
-// ---------------------------------------------------------------------
-/* We stash the desired pipeline depth */
-bool HttpMethod::Configuration(string Message)
+std::unique_ptr<ServerState> HttpMethod::CreateServerState(URI const &uri)/*{{{*/
{
- if (ServerMethod::Configuration(Message) == false)
- return false;
-
- AllowRedirect = _config->FindB("Acquire::http::AllowRedirect",true);
- PipelineDepth = _config->FindI("Acquire::http::Pipeline-Depth",
- PipelineDepth);
- Debug = _config->FindB("Debug::Acquire::http",false);
-
- // Get the proxy to use
- AutoDetectProxy();
-
- return true;
+ return std::unique_ptr<ServerState>(new HttpServerState(uri, this));
}
/*}}}*/
-// HttpMethod::AutoDetectProxy - auto detect proxy /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-bool HttpMethod::AutoDetectProxy()
+void HttpMethod::RotateDNS() /*{{{*/
{
- // option is "Acquire::http::Proxy-Auto-Detect" but we allow the old
- // name without the dash ("-")
- AutoDetectProxyCmd = _config->Find("Acquire::http::Proxy-Auto-Detect",
- _config->Find("Acquire::http::ProxyAutoDetect"));
-
- if (AutoDetectProxyCmd.empty())
- return true;
-
- if (Debug)
- clog << "Using auto proxy detect command: " << AutoDetectProxyCmd << endl;
-
- int Pipes[2] = {-1,-1};
- if (pipe(Pipes) != 0)
- return _error->Errno("pipe", "Failed to create Pipe");
-
- pid_t Process = ExecFork();
- if (Process == 0)
- {
- close(Pipes[0]);
- dup2(Pipes[1],STDOUT_FILENO);
- SetCloseExec(STDOUT_FILENO,false);
-
- const char *Args[2];
- Args[0] = AutoDetectProxyCmd.c_str();
- Args[1] = 0;
- execv(Args[0],(char **)Args);
- cerr << "Failed to exec method " << Args[0] << endl;
- _exit(100);
- }
- char buf[512];
- int InFd = Pipes[0];
- close(Pipes[1]);
- int res = read(InFd, buf, sizeof(buf)-1);
- ExecWait(Process, "ProxyAutoDetect", true);
-
- if (res < 0)
- return _error->Errno("read", "Failed to read");
- if (res == 0)
- return _error->Warning("ProxyAutoDetect returned no data");
-
- // add trailing \0
- buf[res] = 0;
-
- if (Debug)
- clog << "auto detect command returned: '" << buf << "'" << endl;
-
- if (strstr(buf, "http://") == buf)
- _config->Set("Acquire::http::proxy", _strstrip(buf));
-
- return true;
+ ::RotateDNS();
}
/*}}}*/
-ServerState * HttpMethod::CreateServerState(URI uri) /*{{{*/
+ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res)/*{{{*/
{
- return new HttpServerState(uri, this);
+ auto ret = ServerMethod::DealWithHeaders(Res);
+ if (ret != ServerMethod::FILE_IS_OPEN)
+ return ret;
+
+ // Open the file
+ delete File;
+ File = new FileFd(Queue->DestFile,FileFd::WriteAny);
+ if (_error->PendingError() == true)
+ return ERROR_NOT_FROM_SERVER;
+
+ FailFile = Queue->DestFile;
+ FailFile.c_str(); // Make sure we don't do a malloc in the signal handler
+ FailFd = File->Fd();
+ FailTime = Server->Date;
+
+ if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false)
+ {
+ _error->Errno("read",_("Problem hashing file"));
+ return ERROR_NOT_FROM_SERVER;
+ }
+ if (Server->StartPos > 0)
+ Res.ResumePoint = Server->StartPos;
+
+ SetNonBlock(File->Fd(),true);
+ return FILE_IS_OPEN;
}
/*}}}*/
-void HttpMethod::RotateDNS() /*{{{*/
+HttpMethod::HttpMethod(std::string &&pProg) : ServerMethod(pProg.c_str(), "1.2", Pipeline | SendConfig)/*{{{*/
{
- ::RotateDNS();
+ auto addName = std::inserter(methodNames, methodNames.begin());
+ if (Binary != "http")
+ addName = "http";
+ auto const plus = Binary.find('+');
+ if (plus != std::string::npos)
+ addName = Binary.substr(0, plus);
+ File = 0;
+ Server = 0;
}
/*}}}*/