Having a Reset(bool) method to partially reset certain variables like
the download size always were strange, so this commit splits the
ServerState into an additional RequestState living on the stack for as
long as we deal with this request causing an automatic "reset".
There is much to do still to make this code look better, but this is a
good first step which compiles cleanly and passes all tests, so keeping
it as history might be beneficial and due to avoiding explicit memory
allocations it ends up fixing a small memory leak in https, too.
Closes: #440057
}
/*}}}*/
// HttpServerState::RunData - Transfer the data from the socket /*{{{*/
}
/*}}}*/
// HttpServerState::RunData - Transfer the data from the socket /*{{{*/
-bool HttpServerState::RunData(FileFd * const File)
+bool HttpServerState::RunData(RequestState &Req)
+ Req.State = RequestState::Data;
// Chunked transfer encoding is fun..
// Chunked transfer encoding is fun..
- if (Encoding == Chunked)
+ if (Req.Encoding == RequestState::Chunked)
if (In.WriteTillEl(Data,true) == true)
break;
}
if (In.WriteTillEl(Data,true) == true)
break;
}
- while ((Last = Go(false, File)) == true);
+ while ((Last = Go(false, Req)) == true);
if (Last == false)
return false;
if (Last == false)
return false;
if (In.WriteTillEl(Data,true) == true && Data.length() <= 2)
break;
}
if (In.WriteTillEl(Data,true) == true && Data.length() <= 2)
break;
}
- while ((Last = Go(false, File)) == true);
+ while ((Last = Go(false, Req)) == true);
if (Last == false)
return false;
return !_error->PendingError();
if (Last == false)
return false;
return !_error->PendingError();
// Transfer the block
In.Limit(Len);
// Transfer the block
In.Limit(Len);
- while (Go(true, File) == true)
+ while (Go(true, Req) == true)
if (In.IsLimit() == true)
break;
if (In.IsLimit() == true)
break;
if (In.WriteTillEl(Data,true) == true)
break;
}
if (In.WriteTillEl(Data,true) == true)
break;
}
- while ((Last = Go(false, File)) == true);
+ while ((Last = Go(false, Req)) == true);
if (Last == false)
return false;
}
if (Last == false)
return false;
}
{
/* Closes encoding is used when the server did not specify a size, the
loss of the connection means we are done */
{
/* Closes encoding is used when the server did not specify a size, the
loss of the connection means we are done */
- if (JunkSize != 0)
- In.Limit(JunkSize);
- else if (DownloadSize != 0)
- In.Limit(DownloadSize);
+ if (Req.JunkSize != 0)
+ In.Limit(Req.JunkSize);
+ else if (Req.DownloadSize != 0)
+ In.Limit(Req.DownloadSize);
else if (Persistent == false)
In.Limit(-1);
else if (Persistent == false)
In.Limit(-1);
In.Limit(-1);
return !_error->PendingError();
}
In.Limit(-1);
return !_error->PendingError();
}
- while (Go(true, File) == true);
+ while (Go(true, Req) == true);
- return Owner->Flush() && !_error->PendingError();
+ return Flush(&Req.File) && !_error->PendingError();
-bool HttpServerState::RunDataToDevNull() /*{{{*/
+bool HttpServerState::RunDataToDevNull(RequestState &Req) /*{{{*/
{
// no need to clean up if we discard the connection anyhow
if (Persistent == false)
return true;
{
// no need to clean up if we discard the connection anyhow
if (Persistent == false)
return true;
- FileFd DevNull("/dev/null", FileFd::WriteOnly);
- return RunData(&DevNull);
+ Req.File.Open("/dev/null", FileFd::WriteOnly);
+ return RunData(Req);
}
/*}}}*/
bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/
}
/*}}}*/
bool HttpServerState::ReadHeaderLines(std::string &Data) /*{{{*/
return In.WriteTillEl(Data);
}
/*}}}*/
return In.WriteTillEl(Data);
}
/*}}}*/
-bool HttpServerState::LoadNextResponse(bool const ToFile, FileFd * const File)/*{{{*/
+bool HttpServerState::LoadNextResponse(bool const ToFile, RequestState &Req)/*{{{*/
- return Go(ToFile, File);
+ return Go(ToFile, Req);
}
/*}}}*/
bool HttpServerState::WriteResponse(const std::string &Data) /*{{{*/
}
/*}}}*/
bool HttpServerState::WriteResponse(const std::string &Data) /*{{{*/
-void HttpServerState::Reset(bool const Everything) /*{{{*/
+void HttpServerState::Reset() /*{{{*/
- ServerState::Reset(Everything);
- if (Everything)
- ServerFd = -1;
+ ServerState::Reset();
+ ServerFd = -1;
}
/*}}}*/
// HttpServerState::Die - The server has closed the connection. /*{{{*/
}
/*}}}*/
// HttpServerState::Die - The server has closed the connection. /*{{{*/
-bool HttpServerState::Die(FileFd * const File)
+bool HttpServerState::Die(RequestState &Req)
{
unsigned int LErrno = errno;
// Dump the buffer to the file
{
unsigned int LErrno = errno;
// Dump the buffer to the file
- if (State == ServerState::Data)
+ if (Req.State == RequestState::Data)
+ if (Req.File.IsOpen() == false)
return true;
// on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
// can't be set
return true;
// on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
// can't be set
- if (File->Name() != "/dev/null")
- SetNonBlock(File->Fd(),false);
+ if (Req.File.Name() != "/dev/null")
+ SetNonBlock(Req.File.Fd(),false);
while (In.WriteSpace() == true)
{
while (In.WriteSpace() == true)
{
- if (In.Write(File->Fd()) == false)
+ if (In.Write(Req.File.Fd()) == false)
return _error->Errno("write",_("Error writing to the file"));
// Done
return _error->Errno("write",_("Error writing to the file"));
// Done
}
// See if this is because the server finished the data stream
}
// See if this is because the server finished the data stream
- if (In.IsLimit() == false && State != HttpServerState::Header &&
+ if (In.IsLimit() == false && Req.State != RequestState::Header &&
Persistent == true)
{
Close();
Persistent == true)
{
Close();
into the file */
bool HttpServerState::Flush(FileFd * const File)
{
into the file */
bool HttpServerState::Flush(FileFd * const File)
{
{
// on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
// can't be set
{
// on GNU/kFreeBSD, apt dies on /dev/null because non-blocking
// can't be set
// ---------------------------------------------------------------------
/* This runs the select loop over the server FDs, Output file FDs and
stdin. */
// ---------------------------------------------------------------------
/* This runs the select loop over the server FDs, Output file FDs and
stdin. */
-bool HttpServerState::Go(bool ToFile, FileFd * const File)
+bool HttpServerState::Go(bool ToFile, RequestState &Req)
{
// Server has closed the connection
if (ServerFd == -1 && (In.WriteSpace() == false ||
{
// Server has closed the connection
if (ServerFd == -1 && (In.WriteSpace() == false ||
// Add the file
int FileFD = -1;
// Add the file
int FileFD = -1;
- if (File != NULL)
- FileFD = File->Fd();
+ if (Req.File.IsOpen())
+ FileFD = Req.File.Fd();
if (In.WriteSpace() == true && ToFile == true && FileFD != -1)
FD_SET(FileFD,&wfds);
if (In.WriteSpace() == true && ToFile == true && FileFD != -1)
FD_SET(FileFD,&wfds);
if (Res == 0)
{
_error->Error(_("Connection timed out"));
if (Res == 0)
{
_error->Error(_("Connection timed out"));
{
errno = 0;
if (In.Read(ServerFd) == false)
{
errno = 0;
if (In.Read(ServerFd) == false)
}
if (ServerFd != -1 && FD_ISSET(ServerFd,&wfds))
{
errno = 0;
if (Out.Write(ServerFd) == false)
}
if (ServerFd != -1 && FD_ISSET(ServerFd,&wfds))
{
errno = 0;
if (Out.Write(ServerFd) == false)
}
// Send data to the file
}
// Send data to the file
return _error->Errno("write",_("Error writing to output file"));
}
return _error->Errno("write",_("Error writing to output file"));
}
- if (MaximumSize > 0 && File && File->Tell() > MaximumSize)
+ if (Req.MaximumSize > 0 && Req.File.IsOpen() && Req.File.Failed() == false && Req.File.Tell() > Req.MaximumSize)
{
Owner->SetFailReason("MaximumSizeExceeded");
return _error->Error("Writing more data than expected (%llu > %llu)",
{
Owner->SetFailReason("MaximumSizeExceeded");
return _error->Error("Writing more data than expected (%llu > %llu)",
- File->Tell(), MaximumSize);
+ Req.File.Tell(), Req.MaximumSize);
}
// Handle commands from APT
}
// Handle commands from APT
-ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res)/*{{{*/
+ServerMethod::DealWithHeadersResult HttpMethod::DealWithHeaders(FetchResult &Res, RequestState &Req)/*{{{*/
- auto ret = ServerMethod::DealWithHeaders(Res);
+ auto ret = ServerMethod::DealWithHeaders(Res, Req);
if (ret != ServerMethod::FILE_IS_OPEN)
return ret;
if (ret != ServerMethod::FILE_IS_OPEN)
return ret;
-
- // Open the file
- delete File;
- File = new FileFd(Queue->DestFile,FileFd::WriteAny);
- if (_error->PendingError() == true)
+ if (Req.File.Open(Queue->DestFile, FileFd::WriteAny) == false)
return ERROR_NOT_FROM_SERVER;
FailFile = Queue->DestFile;
FailFile.c_str(); // Make sure we don't do a malloc in the signal handler
return ERROR_NOT_FROM_SERVER;
FailFile = Queue->DestFile;
FailFile.c_str(); // Make sure we don't do a malloc in the signal handler
- FailFd = File->Fd();
- FailTime = Server->Date;
+ FailFd = Req.File.Fd();
+ FailTime = Req.Date;
- if (Server->InitHashes(Queue->ExpectedHashes) == false || Server->AddPartialFileToHashes(*File) == false)
+ if (Server->InitHashes(Queue->ExpectedHashes) == false || Req.AddPartialFileToHashes(Req.File) == false)
{
_error->Errno("read",_("Problem hashing file"));
return ERROR_NOT_FROM_SERVER;
}
{
_error->Errno("read",_("Problem hashing file"));
return ERROR_NOT_FROM_SERVER;
}
- if (Server->StartPos > 0)
- Res.ResumePoint = Server->StartPos;
+ if (Req.StartPos > 0)
+ Res.ResumePoint = Req.StartPos;
- SetNonBlock(File->Fd(),true);
+ SetNonBlock(Req.File.Fd(),true);
return FILE_IS_OPEN;
}
/*}}}*/
return FILE_IS_OPEN;
}
/*}}}*/
auto const plus = Binary.find('+');
if (plus != std::string::npos)
addName = Binary.substr(0, plus);
auto const plus = Binary.find('+');
if (plus != std::string::npos)
addName = Binary.substr(0, plus);
- File = 0;
- Server = 0;
protected:
virtual bool ReadHeaderLines(std::string &Data) APT_OVERRIDE;
protected:
virtual bool ReadHeaderLines(std::string &Data) APT_OVERRIDE;
- virtual bool LoadNextResponse(bool const ToFile, FileFd * const File) APT_OVERRIDE;
+ virtual bool LoadNextResponse(bool const ToFile, RequestState &Req) APT_OVERRIDE;
virtual bool WriteResponse(std::string const &Data) APT_OVERRIDE;
public:
virtual bool WriteResponse(std::string const &Data) APT_OVERRIDE;
public:
- virtual void Reset(bool const Everything = true) APT_OVERRIDE;
+ virtual void Reset() APT_OVERRIDE;
- virtual bool RunData(FileFd * const File) APT_OVERRIDE;
- virtual bool RunDataToDevNull() APT_OVERRIDE;
+ virtual bool RunData(RequestState &Req) APT_OVERRIDE;
+ virtual bool RunDataToDevNull(RequestState &Req) APT_OVERRIDE;
virtual bool Open() APT_OVERRIDE;
virtual bool IsOpen() APT_OVERRIDE;
virtual bool Close() APT_OVERRIDE;
virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE;
virtual Hashes * GetHashes() APT_OVERRIDE;
virtual bool Open() APT_OVERRIDE;
virtual bool IsOpen() APT_OVERRIDE;
virtual bool Close() APT_OVERRIDE;
virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE;
virtual Hashes * GetHashes() APT_OVERRIDE;
- virtual bool Die(FileFd * const File) APT_OVERRIDE;
+ virtual bool Die(RequestState &Req) APT_OVERRIDE;
virtual bool Flush(FileFd * const File) APT_OVERRIDE;
virtual bool Flush(FileFd * const File) APT_OVERRIDE;
- virtual bool Go(bool ToFile, FileFd * const File) APT_OVERRIDE;
+ virtual bool Go(bool ToFile, RequestState &Req) APT_OVERRIDE;
HttpServerState(URI Srv, HttpMethod *Owner);
virtual ~HttpServerState() {Close();};
HttpServerState(URI Srv, HttpMethod *Owner);
virtual ~HttpServerState() {Close();};
virtual std::unique_ptr<ServerState> CreateServerState(URI const &uri) APT_OVERRIDE;
virtual void RotateDNS() APT_OVERRIDE;
virtual std::unique_ptr<ServerState> CreateServerState(URI const &uri) APT_OVERRIDE;
virtual void RotateDNS() APT_OVERRIDE;
- virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res) APT_OVERRIDE;
+ virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res, RequestState &Req) APT_OVERRIDE;
protected:
std::string AutoDetectProxyCmd;
protected:
std::string AutoDetectProxyCmd;
HttpsMethod * const https;
HttpsMethod::FetchResult * const Res;
HttpsMethod::FetchItem const * const Itm;
HttpsMethod * const https;
HttpsMethod::FetchResult * const Res;
HttpsMethod::FetchItem const * const Itm;
+ RequestState * const Req;
CURLUserPointer(HttpsMethod * const https, HttpsMethod::FetchResult * const Res,
CURLUserPointer(HttpsMethod * const https, HttpsMethod::FetchResult * const Res,
- HttpsMethod::FetchItem const * const Itm) : https(https), Res(Res), Itm(Itm) {}
+ HttpsMethod::FetchItem const * const Itm, RequestState * const Req) : https(https), Res(Res), Itm(Itm), Req(Req) {}
if (line.empty() == true)
{
if (line.empty() == true)
{
- me->https->Server->JunkSize = 0;
- if (me->https->Server->Result != 416 && me->https->Server->StartPos != 0)
+ if (me->Req->File.Open(me->Itm->DestFile, FileFd::WriteAny) == false)
+ return ERROR_NOT_FROM_SERVER;
+
+ me->Req->JunkSize = 0;
+ if (me->Req->Result != 416 && me->Req->StartPos != 0)
- else if (me->https->Server->Result == 416)
+ else if (me->Req->Result == 416)
{
bool partialHit = false;
if (me->Itm->ExpectedHashes.usable() == true)
{
Hashes resultHashes(me->Itm->ExpectedHashes);
FileFd file(me->Itm->DestFile, FileFd::ReadOnly);
{
bool partialHit = false;
if (me->Itm->ExpectedHashes.usable() == true)
{
Hashes resultHashes(me->Itm->ExpectedHashes);
FileFd file(me->Itm->DestFile, FileFd::ReadOnly);
- me->https->Server->TotalFileSize = file.FileSize();
- me->https->Server->Date = file.ModificationTime();
+ me->Req->TotalFileSize = file.FileSize();
+ me->Req->Date = file.ModificationTime();
resultHashes.AddFD(file);
HashStringList const hashList = resultHashes.GetHashStringList();
partialHit = (me->Itm->ExpectedHashes == hashList);
}
resultHashes.AddFD(file);
HashStringList const hashList = resultHashes.GetHashStringList();
partialHit = (me->Itm->ExpectedHashes == hashList);
}
- else if (me->https->Server->Result == 416 && me->https->Server->TotalFileSize == me->https->File->FileSize())
+ else if (me->Req->Result == 416 && me->Req->TotalFileSize == me->Req->File.FileSize())
partialHit = true;
if (partialHit == true)
{
partialHit = true;
if (partialHit == true)
{
- me->https->Server->Result = 200;
- me->https->Server->StartPos = me->https->Server->TotalFileSize;
+ me->Req->Result = 200;
+ me->Req->StartPos = me->Req->TotalFileSize;
// the actual size is not important for https as curl will deal with it
// by itself and e.g. doesn't bother us with transport-encoding…
// the actual size is not important for https as curl will deal with it
// by itself and e.g. doesn't bother us with transport-encoding…
- me->https->Server->JunkSize = std::numeric_limits<unsigned long long>::max();
+ me->Req->JunkSize = std::numeric_limits<unsigned long long>::max();
- me->https->Server->StartPos = 0;
- me->https->Server->StartPos = 0;
- me->Res->LastModified = me->https->Server->Date;
- me->Res->Size = me->https->Server->TotalFileSize;
- me->Res->ResumePoint = me->https->Server->StartPos;
+ me->Res->LastModified = me->Req->Date;
+ me->Res->Size = me->Req->TotalFileSize;
+ me->Res->ResumePoint = me->Req->StartPos;
// we expect valid data, so tell our caller we get the file now
// we expect valid data, so tell our caller we get the file now
- if (me->https->Server->Result >= 200 && me->https->Server->Result < 300)
+ if (me->Req->Result >= 200 && me->Req->Result < 300)
{
if (me->Res->Size != 0 && me->Res->Size > me->Res->ResumePoint)
me->https->URIStart(*me->Res);
{
if (me->Res->Size != 0 && me->Res->Size > me->Res->ResumePoint)
me->https->URIStart(*me->Res);
- if (me->https->Server->AddPartialFileToHashes(*(me->https->File)) == false)
+ if (me->Req->AddPartialFileToHashes(me->Req->File) == false)
- me->https->Server->JunkSize = std::numeric_limits<decltype(me->https->Server->JunkSize)>::max();
+ me->Req->JunkSize = std::numeric_limits<decltype(me->Req->JunkSize)>::max();
- else if (me->https->Server->HeaderLine(line) == false)
+ else if (me->Req->HeaderLine(line) == false)
return 0;
return size*nmemb;
return 0;
return size*nmemb;
size_t
HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp)
{
size_t
HttpsMethod::write_data(void *buffer, size_t size, size_t nmemb, void *userp)
{
- HttpsMethod *me = static_cast<HttpsMethod *>(userp);
+ CURLUserPointer *me = static_cast<CURLUserPointer *>(userp);
size_t buffer_size = size * nmemb;
// we don't need to count the junk here, just drop anything we get as
// we don't always know how long it would be, e.g. in chunked encoding.
size_t buffer_size = size * nmemb;
// we don't need to count the junk here, just drop anything we get as
// we don't always know how long it would be, e.g. in chunked encoding.
- if (me->Server->JunkSize != 0)
+ if (me->Req->JunkSize != 0)
- if(me->File->Write(buffer, buffer_size) != true)
+ if(me->Req->File.Write(buffer, buffer_size) != true)
- if(me->Queue->MaximumSize > 0)
+ if(me->https->Queue->MaximumSize > 0)
- unsigned long long const TotalWritten = me->File->Tell();
- if (TotalWritten > me->Queue->MaximumSize)
+ unsigned long long const TotalWritten = me->Req->File.Tell();
+ if (TotalWritten > me->https->Queue->MaximumSize)
- me->SetFailReason("MaximumSizeExceeded");
+ me->https->SetFailReason("MaximumSizeExceeded");
_error->Error("Writing more data than expected (%llu > %llu)",
_error->Error("Writing more data than expected (%llu > %llu)",
- TotalWritten, me->Queue->MaximumSize);
+ TotalWritten, me->https->Queue->MaximumSize);
- if (me->Server->GetHashes()->Add((unsigned char const * const)buffer, buffer_size) == false)
+ if (me->https->Server->GetHashes()->Add((unsigned char const * const)buffer, buffer_size) == false)
return 0;
return buffer_size;
return 0;
return buffer_size;
return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str());
maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc"));
return _error->Error("Unsupported proxy configured: %s", URI::SiteOnly(Proxy).c_str());
maybe_add_auth (Uri, _config->FindFile("Dir::Etc::netrc"));
+ if (Server == nullptr || Server->Comp(Itm->Uri) == false)
+ Server = CreateServerState(Itm->Uri);
- CURLUserPointer userp(this, &Res, Itm);
+ RequestState Req(this, Server.get());
+ CURLUserPointer userp(this, &Res, Itm, &Req);
// callbacks
curl_easy_setopt(curl, CURLOPT_URL, static_cast<string>(Uri).c_str());
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, parse_header);
curl_easy_setopt(curl, CURLOPT_WRITEHEADER, &userp);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
// callbacks
curl_easy_setopt(curl, CURLOPT_URL, static_cast<string>(Uri).c_str());
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, parse_header);
curl_easy_setopt(curl, CURLOPT_WRITEHEADER, &userp);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
- curl_easy_setopt(curl, CURLOPT_WRITEDATA, this);
+ curl_easy_setopt(curl, CURLOPT_WRITEDATA, &userp);
// options
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, true);
curl_easy_setopt(curl, CURLOPT_FILETIME, true);
// options
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, true);
curl_easy_setopt(curl, CURLOPT_FILETIME, true);
headers = curl_slist_append(headers, "Accept: text/*");
}
headers = curl_slist_append(headers, "Accept: text/*");
}
- // go for it - if the file exists, append on it
- File = new FileFd(Itm->DestFile, FileFd::WriteAny);
- if (Server == nullptr || Server->Comp(Itm->Uri) == false)
- Server = CreateServerState(Itm->Uri);
- else
- Server->Reset(false);
-
// if we have the file send an if-range query with a range header
if (Server->RangesAllowed && stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
{
// if we have the file send an if-range query with a range header
if (Server->RangesAllowed && stat(Itm->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
{
long curl_condition_unmet = 0;
curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &curl_condition_unmet);
if (curl_condition_unmet == 1)
long curl_condition_unmet = 0;
curl_easy_getinfo(curl, CURLINFO_CONDITION_UNMET, &curl_condition_unmet);
if (curl_condition_unmet == 1)
curl_slist_free_all(headers);
// cleanup
curl_slist_free_all(headers);
// cleanup
- switch (DealWithHeaders(Res))
+ switch (DealWithHeaders(Res, Req))
{
case ServerMethod::IMS_HIT:
URIDone(Res);
{
case ServerMethod::IMS_HIT:
URIDone(Res);
case ServerMethod::ERROR_WITH_CONTENT_PAGE:
// unlink, no need keep 401/404 page content in partial/
case ServerMethod::ERROR_WITH_CONTENT_PAGE:
// unlink, no need keep 401/404 page content in partial/
- RemoveFile(Binary.c_str(), File->Name());
+ RemoveFile(Binary.c_str(), Req.File.Name());
case ServerMethod::ERROR_UNRECOVERABLE:
case ServerMethod::ERROR_NOT_FROM_SERVER:
return false;
case ServerMethod::ERROR_UNRECOVERABLE:
case ServerMethod::ERROR_NOT_FROM_SERVER:
return false;
case ServerMethod::FILE_IS_OPEN:
struct stat resultStat;
case ServerMethod::FILE_IS_OPEN:
struct stat resultStat;
- if (unlikely(stat(File->Name().c_str(), &resultStat) != 0))
+ if (unlikely(stat(Req.File.Name().c_str(), &resultStat) != 0))
- _error->Errno("stat", "Unable to access file %s", File->Name().c_str());
+ _error->Errno("stat", "Unable to access file %s", Req.File.Name().c_str());
return false;
}
Res.Size = resultStat.st_size;
return false;
}
Res.Size = resultStat.st_size;
times[0].tv_sec = Res.LastModified;
times[1].tv_sec = Res.LastModified;
times[0].tv_usec = times[1].tv_usec = 0;
times[0].tv_sec = Res.LastModified;
times[1].tv_sec = Res.LastModified;
times[0].tv_usec = times[1].tv_usec = 0;
- utimes(File->Name().c_str(), times);
+ utimes(Req.File.Name().c_str(), times);
}
else
Res.LastModified = resultStat.st_mtime;
}
else
Res.LastModified = resultStat.st_mtime;
protected:
virtual bool ReadHeaderLines(std::string &/*Data*/) APT_OVERRIDE { return false; }
protected:
virtual bool ReadHeaderLines(std::string &/*Data*/) APT_OVERRIDE { return false; }
- virtual bool LoadNextResponse(bool const /*ToFile*/, FileFd * const /*File*/) APT_OVERRIDE { return false; }
+ virtual bool LoadNextResponse(bool const /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; }
public:
virtual bool WriteResponse(std::string const &/*Data*/) APT_OVERRIDE { return false; }
/** \brief Transfer the data from the socket */
public:
virtual bool WriteResponse(std::string const &/*Data*/) APT_OVERRIDE { return false; }
/** \brief Transfer the data from the socket */
- virtual bool RunData(FileFd * const /*File*/) APT_OVERRIDE { return false; }
- virtual bool RunDataToDevNull() APT_OVERRIDE { return false; }
+ virtual bool RunData(RequestState &) APT_OVERRIDE { return false; }
+ virtual bool RunDataToDevNull(RequestState &) APT_OVERRIDE { return false; }
virtual bool Open() APT_OVERRIDE { return false; }
virtual bool IsOpen() APT_OVERRIDE { return false; }
virtual bool Close() APT_OVERRIDE { return false; }
virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE;
virtual Hashes * GetHashes() APT_OVERRIDE;
virtual bool Open() APT_OVERRIDE { return false; }
virtual bool IsOpen() APT_OVERRIDE { return false; }
virtual bool Close() APT_OVERRIDE { return false; }
virtual bool InitHashes(HashStringList const &ExpectedHashes) APT_OVERRIDE;
virtual Hashes * GetHashes() APT_OVERRIDE;
- virtual bool Die(FileFd * const /*File*/) APT_OVERRIDE { return false; }
+ virtual bool Die(RequestState &/*Req*/) APT_OVERRIDE { return false; }
virtual bool Flush(FileFd * const /*File*/) APT_OVERRIDE { return false; }
virtual bool Flush(FileFd * const /*File*/) APT_OVERRIDE { return false; }
- virtual bool Go(bool /*ToFile*/, FileFd * const /*File*/) APT_OVERRIDE { return false; }
+ virtual bool Go(bool /*ToFile*/, RequestState &/*Req*/) APT_OVERRIDE { return false; }
HttpsServerState(URI Srv, HttpsMethod *Owner);
virtual ~HttpsServerState() {Close();};
HttpsServerState(URI Srv, HttpsMethod *Owner);
virtual ~HttpsServerState() {Close();};
// ---------------------------------------------------------------------
/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
parse error occurred */
// ---------------------------------------------------------------------
/* Returns 0 if things are OK, 1 if an IO error occurred and 2 if a header
parse error occurred */
-ServerState::RunHeadersResult ServerState::RunHeaders(FileFd * const File,
+ServerState::RunHeadersResult ServerState::RunHeaders(RequestState &Req,
const std::string &Uri)
{
const std::string &Uri)
{
Owner->Status(_("Waiting for headers"));
Owner->Status(_("Waiting for headers"));
if (Owner->Debug == true)
clog << "Answer for: " << Uri << endl << Data;
if (Owner->Debug == true)
clog << "Answer for: " << Uri << endl << Data;
for (string::const_iterator I = Data.begin(); I < Data.end(); ++I)
{
string::const_iterator J = I;
for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J);
for (string::const_iterator I = Data.begin(); I < Data.end(); ++I)
{
string::const_iterator J = I;
for (; J != Data.end() && *J != '\n' && *J != '\r'; ++J);
- if (HeaderLine(string(I,J)) == false)
+ if (Req.HeaderLine(string(I,J)) == false)
return RUN_HEADERS_PARSE_ERROR;
I = J;
}
// 100 Continue is a Nop...
return RUN_HEADERS_PARSE_ERROR;
I = J;
}
// 100 Continue is a Nop...
continue;
// Tidy up the connection persistence state.
continue;
// Tidy up the connection persistence state.
- if (Encoding == Closes && HaveContent == true)
+ if (Req.Encoding == RequestState::Closes && Req.HaveContent == true)
Persistent = false;
return RUN_HEADERS_OK;
}
Persistent = false;
return RUN_HEADERS_OK;
}
- while (LoadNextResponse(false, File) == true);
+ while (LoadNextResponse(false, Req) == true);
return RUN_HEADERS_IO_ERROR;
}
/*}}}*/
return RUN_HEADERS_IO_ERROR;
}
/*}}}*/
-// ServerState::HeaderLine - Process a header line /*{{{*/
-// ---------------------------------------------------------------------
-/* */
-bool ServerState::HeaderLine(string Line)
+bool RequestState::HeaderLine(string const &Line) /*{{{*/
{
if (Line.empty() == true)
return true;
{
if (Line.empty() == true)
return true;
/* Check the HTTP response header to get the default persistence
state. */
if (Major < 1)
/* Check the HTTP response header to get the default persistence
state. */
if (Major < 1)
+ Server->Persistent = false;
else
{
if (Major == 1 && Minor == 0)
{
else
{
if (Major == 1 && Minor == 0)
{
+ Server->Persistent = false;
- Persistent = true;
- if (PipelineAllowed)
- Pipeline = true;
+ Server->Persistent = true;
+ if (Server->PipelineAllowed)
+ Server->Pipeline = true;
{
if (stringcasecmp(Val,"close") == 0)
{
{
if (stringcasecmp(Val,"close") == 0)
{
- Persistent = false;
- Pipeline = false;
+ Server->Persistent = false;
+ Server->Pipeline = false;
/* Some servers send error pages (as they are dynamically generated)
for simplicity via a connection close instead of e.g. chunked,
so assuming an always closing server only if we get a file + close */
if (Result >= 200 && Result < 300)
/* Some servers send error pages (as they are dynamically generated)
for simplicity via a connection close instead of e.g. chunked,
so assuming an always closing server only if we get a file + close */
if (Result >= 200 && Result < 300)
- PipelineAllowed = false;
+ Server->PipelineAllowed = false;
}
else if (stringcasecmp(Val,"keep-alive") == 0)
}
else if (stringcasecmp(Val,"keep-alive") == 0)
+ Server->Persistent = true;
std::string ranges = ',' + Val + ',';
ranges.erase(std::remove(ranges.begin(), ranges.end(), ' '), ranges.end());
if (ranges.find(",bytes,") == std::string::npos)
std::string ranges = ',' + Val + ',';
ranges.erase(std::remove(ranges.begin(), ranges.end(), ' '), ranges.end());
if (ranges.find(",bytes,") == std::string::npos)
+ Server->RangesAllowed = false;
/*}}}*/
// ServerState::ServerState - Constructor /*{{{*/
ServerState::ServerState(URI Srv, ServerMethod *Owner) :
/*}}}*/
// ServerState::ServerState - Constructor /*{{{*/
ServerState::ServerState(URI Srv, ServerMethod *Owner) :
- DownloadSize(0), ServerName(Srv), TimeOut(120), Owner(Owner)
+ ServerName(Srv), TimeOut(120), Owner(Owner)
-bool ServerState::AddPartialFileToHashes(FileFd &File) /*{{{*/
+bool RequestState::AddPartialFileToHashes(FileFd &File) /*{{{*/
{
File.Truncate(StartPos);
{
File.Truncate(StartPos);
- return GetHashes()->AddFD(File, StartPos);
+ return Server->GetHashes()->AddFD(File, StartPos);
-void ServerState::Reset(bool const Everything) /*{{{*/
+void ServerState::Reset() /*{{{*/
- Major = 0; Minor = 0; Result = 0; Code[0] = '\0';
- TotalFileSize = 0; JunkSize = 0; StartPos = 0;
- Encoding = Closes; time(&Date); HaveContent = false;
- State = Header; MaximumSize = 0;
- if (Everything)
- {
- Persistent = false; Pipeline = false; PipelineAllowed = true;
- RangesAllowed = true;
- }
+ Persistent = false;
+ Pipeline = false;
+ PipelineAllowed = true;
+ RangesAllowed = true;
to do. Returns DealWithHeadersResult (see http.h for details).
*/
ServerMethod::DealWithHeadersResult
to do. Returns DealWithHeadersResult (see http.h for details).
*/
ServerMethod::DealWithHeadersResult
-ServerMethod::DealWithHeaders(FetchResult &Res)
+ServerMethod::DealWithHeaders(FetchResult &Res, RequestState &Req)
- if (Server->Result == 304)
{
RemoveFile("server", Queue->DestFile);
Res.IMSHit = true;
{
RemoveFile("server", Queue->DestFile);
Res.IMSHit = true;
* redirect. Pass on those codes so the error handling kicks in.
*/
if (AllowRedirect
* redirect. Pass on those codes so the error handling kicks in.
*/
if (AllowRedirect
- && (Server->Result > 300 && Server->Result < 400)
- && (Server->Result != 300 // Multiple Choices
- && Server->Result != 304 // Not Modified
- && Server->Result != 306)) // (Not part of HTTP/1.1, reserved)
+ && (Req.Result > 300 && Req.Result < 400)
+ && (Req.Result != 300 // Multiple Choices
+ && Req.Result != 304 // Not Modified
+ && Req.Result != 306)) // (Not part of HTTP/1.1, reserved)
- if (Server->Location.empty() == true)
+ if (Req.Location.empty() == true)
- else if (Server->Location[0] == '/' && Queue->Uri.empty() == false)
+ else if (Req.Location[0] == '/' && Queue->Uri.empty() == false)
{
URI Uri = Queue->Uri;
if (Uri.Host.empty() == false)
NextURI = URI::SiteOnly(Uri);
else
NextURI.clear();
{
URI Uri = Queue->Uri;
if (Uri.Host.empty() == false)
NextURI = URI::SiteOnly(Uri);
else
NextURI.clear();
- NextURI.append(DeQuoteString(Server->Location));
+ NextURI.append(DeQuoteString(Req.Location));
if (Queue->Uri == NextURI)
{
SetFailReason("RedirectionLoop");
_error->Error("Redirection loop encountered");
if (Queue->Uri == NextURI)
{
SetFailReason("RedirectionLoop");
_error->Error("Redirection loop encountered");
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
- NextURI = DeQuoteString(Server->Location);
+ NextURI = DeQuoteString(Req.Location);
URI tmpURI = NextURI;
if (tmpURI.Access.find('+') != std::string::npos)
{
_error->Error("Server tried to trick us into using a specific implementation: %s", tmpURI.Access.c_str());
URI tmpURI = NextURI;
if (tmpURI.Access.find('+') != std::string::npos)
{
_error->Error("Server tried to trick us into using a specific implementation: %s", tmpURI.Access.c_str());
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
{
SetFailReason("RedirectionLoop");
_error->Error("Redirection loop encountered");
{
SetFailReason("RedirectionLoop");
_error->Error("Redirection loop encountered");
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
/* else pass through for error message */
}
// retry after an invalid range response without partial data
/* else pass through for error message */
}
// retry after an invalid range response without partial data
- else if (Server->Result == 416)
+ else if (Req.Result == 416)
{
struct stat SBuf;
if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
{
struct stat SBuf;
if (stat(Queue->DestFile.c_str(),&SBuf) >= 0 && SBuf.st_size > 0)
{
Hashes resultHashes(Queue->ExpectedHashes);
FileFd file(Queue->DestFile, FileFd::ReadOnly);
{
Hashes resultHashes(Queue->ExpectedHashes);
FileFd file(Queue->DestFile, FileFd::ReadOnly);
- Server->TotalFileSize = file.FileSize();
- Server->Date = file.ModificationTime();
+ Req.TotalFileSize = file.FileSize();
+ Req.Date = file.ModificationTime();
resultHashes.AddFD(file);
HashStringList const hashList = resultHashes.GetHashStringList();
partialHit = (Queue->ExpectedHashes == hashList);
}
resultHashes.AddFD(file);
HashStringList const hashList = resultHashes.GetHashStringList();
partialHit = (Queue->ExpectedHashes == hashList);
}
- else if ((unsigned long long)SBuf.st_size == Server->TotalFileSize)
+ else if ((unsigned long long)SBuf.st_size == Req.TotalFileSize)
partialHit = true;
if (partialHit == true)
{
// the file is completely downloaded, but was not moved
partialHit = true;
if (partialHit == true)
{
// the file is completely downloaded, but was not moved
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
{
// nuke the sent error page
{
// nuke the sent error page
- Server->RunDataToDevNull();
- Server->HaveContent = false;
+ Server->RunDataToDevNull(Req);
+ Req.HaveContent = false;
- Server->StartPos = Server->TotalFileSize;
- Server->Result = 200;
+ Req.StartPos = Req.TotalFileSize;
+ Req.Result = 200;
}
else if (RemoveFile("server", Queue->DestFile))
{
}
else if (RemoveFile("server", Queue->DestFile))
{
/* We have a reply we don't handle. This should indicate a perm server
failure */
/* We have a reply we don't handle. This should indicate a perm server
failure */
- if (Server->Result < 200 || Server->Result >= 300)
+ if (Req.Result < 200 || Req.Result >= 300)
{
if (_error->PendingError() == false)
{
std::string err;
{
if (_error->PendingError() == false)
{
std::string err;
- strprintf(err, "HttpError%u", Server->Result);
+ strprintf(err, "HttpError%u", Req.Result);
- _error->Error("%u %s", Server->Result, Server->Code);
+ _error->Error("%u %s", Req.Result, Req.Code);
- if (Server->HaveContent == true)
+ if (Req.HaveContent == true)
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
// This is some sort of 2xx 'data follows' reply
return ERROR_WITH_CONTENT_PAGE;
return ERROR_UNRECOVERABLE;
}
// This is some sort of 2xx 'data follows' reply
- Res.LastModified = Server->Date;
- Res.Size = Server->TotalFileSize;
+ Res.LastModified = Req.Date;
+ Res.Size = Req.TotalFileSize;
return FILE_IS_OPEN;
}
/*}}}*/
return FILE_IS_OPEN;
}
/*}}}*/
// Fill the pipeline.
Fetch(0);
// Fill the pipeline.
Fetch(0);
+
+ RequestState Req(this, Server.get());
// Fetch the next URL header data from the server.
// Fetch the next URL header data from the server.
- switch (Server->RunHeaders(File, Queue->Uri))
+ switch (Server->RunHeaders(Req, Queue->Uri))
{
case ServerState::RUN_HEADERS_OK:
break;
{
case ServerState::RUN_HEADERS_OK:
break;
// Decide what to do.
FetchResult Res;
Res.Filename = Queue->DestFile;
// Decide what to do.
FetchResult Res;
Res.Filename = Queue->DestFile;
- switch (DealWithHeaders(Res))
+ switch (DealWithHeaders(Res, Req))
{
// Ok, the file is Open
case FILE_IS_OPEN:
{
// Ok, the file is Open
case FILE_IS_OPEN:
// we could do "Server->MaximumSize = Queue->MaximumSize" here
// but that would break the clever pipeline messup detection
// so instead we use the size of the biggest item in the queue
// we could do "Server->MaximumSize = Queue->MaximumSize" here
// but that would break the clever pipeline messup detection
// so instead we use the size of the biggest item in the queue
- Server->MaximumSize = FindMaximumObjectSizeInQueue();
+ Req.MaximumSize = FindMaximumObjectSizeInQueue();
- if (Server->HaveContent)
- Result = Server->RunData(File);
+ if (Req.HaveContent)
+ Result = Server->RunData(Req);
/* If the server is sending back sizeless responses then fill in
the size now */
if (Res.Size == 0)
/* If the server is sending back sizeless responses then fill in
the size now */
if (Res.Size == 0)
- Res.Size = File->Size();
-
+ Res.Size = Req.File.Size();
+
// Close the file, destroy the FD object and timestamp it
FailFd = -1;
// Close the file, destroy the FD object and timestamp it
FailFd = -1;
- delete File;
- File = 0;
-
// Timestamp
struct timeval times[2];
// Timestamp
struct timeval times[2];
- times[0].tv_sec = times[1].tv_sec = Server->Date;
+ times[0].tv_sec = times[1].tv_sec = Req.Date;
times[0].tv_usec = times[1].tv_usec = 0;
utimes(Queue->DestFile.c_str(), times);
times[0].tv_usec = times[1].tv_usec = 0;
utimes(Queue->DestFile.c_str(), times);
// Hard internal error, kill the connection and fail
case ERROR_NOT_FROM_SERVER:
{
// Hard internal error, kill the connection and fail
case ERROR_NOT_FROM_SERVER:
{
- delete File;
- File = 0;
-
Fail();
RotateDNS();
Server->Close();
Fail();
RotateDNS();
Server->Close();
// We need to flush the data, the header is like a 404 w/ error text
case ERROR_WITH_CONTENT_PAGE:
{
// We need to flush the data, the header is like a 404 w/ error text
case ERROR_WITH_CONTENT_PAGE:
{
- Server->RunDataToDevNull();
+ Server->RunDataToDevNull(Req);
case TRY_AGAIN_OR_REDIRECT:
{
// Clear rest of response if there is content
case TRY_AGAIN_OR_REDIRECT:
{
// Clear rest of response if there is content
- if (Server->HaveContent)
- Server->RunDataToDevNull();
+ if (Req.HaveContent)
+ Server->RunDataToDevNull(Req);
Redirect(NextURI);
break;
}
Redirect(NextURI);
break;
}
}
/*}}}*/
ServerMethod::ServerMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/
}
/*}}}*/
ServerMethod::ServerMethod(std::string &&Binary, char const * const Ver,unsigned long const Flags) :/*{{{*/
- aptMethod(std::move(Binary), Ver, Flags), Server(nullptr), File(NULL), PipelineDepth(10),
+ aptMethod(std::move(Binary), Ver, Flags), Server(nullptr), PipelineDepth(10),
AllowRedirect(false), Debug(false)
{
}
AllowRedirect(false), Debug(false)
{
}
#define APT_SERVER_H
#include <apt-pkg/strutl.h>
#define APT_SERVER_H
#include <apt-pkg/strutl.h>
+#include <apt-pkg/fileutl.h>
#include "aptmethod.h"
#include <time.h>
#include "aptmethod.h"
#include <time.h>
class Hashes;
class ServerMethod;
class Hashes;
class ServerMethod;
- // This is the last parsed Header Line
- unsigned int Major;
- unsigned int Minor;
- unsigned int Result;
+ unsigned int Major = 0;
+ unsigned int Minor = 0;
+ unsigned int Result = 0;
- // These are some statistics from the last parsed header lines
-
// total size of the usable content (aka: the file)
// total size of the usable content (aka: the file)
- unsigned long long TotalFileSize;
+ unsigned long long TotalFileSize = 0;
// size we actually download (can be smaller than Size if we have partial content)
// size we actually download (can be smaller than Size if we have partial content)
- unsigned long long DownloadSize;
+ unsigned long long DownloadSize = 0;
// size of junk content (aka: server error pages)
// size of junk content (aka: server error pages)
- unsigned long long JunkSize;
+ unsigned long long JunkSize = 0;
// The start of the data (for partial content)
// The start of the data (for partial content)
- unsigned long long StartPos;
+ unsigned long long StartPos = 0;
+
+ unsigned long long MaximumSize = 0;
- bool HaveContent;
- enum {Chunked,Stream,Closes} Encoding;
- enum {Header, Data} State;
+ bool HaveContent = false;
+ enum {Chunked,Stream,Closes} Encoding = Closes;
+ enum {Header, Data} State = Header;
+ std::string Location;
+
+ FileFd File;
+
+ ServerMethod * const Owner;
+ ServerState * const Server;
+
+ bool HeaderLine(std::string const &Line);
+ bool AddPartialFileToHashes(FileFd &File);
+
+ RequestState(ServerMethod * const Owner, ServerState * const Server) :
+ Owner(Owner), Server(Server) { time(&Date); }
+};
+
+struct ServerState
+{
bool Persistent;
bool PipelineAllowed;
bool RangesAllowed;
bool Persistent;
bool PipelineAllowed;
bool RangesAllowed;
- // This is a Persistent attribute of the server itself.
bool Pipeline;
URI ServerName;
URI Proxy;
unsigned long TimeOut;
bool Pipeline;
URI ServerName;
URI Proxy;
unsigned long TimeOut;
- unsigned long long MaximumSize;
-
protected:
ServerMethod *Owner;
virtual bool ReadHeaderLines(std::string &Data) = 0;
protected:
ServerMethod *Owner;
virtual bool ReadHeaderLines(std::string &Data) = 0;
- virtual bool LoadNextResponse(bool const ToFile, FileFd * const File) = 0;
+ virtual bool LoadNextResponse(bool const ToFile, RequestState &Req) = 0;
- bool HeaderLine(std::string Line);
/** \brief Result of the header acquire */
enum RunHeadersResult {
/** \brief Result of the header acquire */
enum RunHeadersResult {
RUN_HEADERS_PARSE_ERROR
};
/** \brief Get the headers before the data */
RUN_HEADERS_PARSE_ERROR
};
/** \brief Get the headers before the data */
- RunHeadersResult RunHeaders(FileFd * const File, const std::string &Uri);
- bool AddPartialFileToHashes(FileFd &File);
+ RunHeadersResult RunHeaders(RequestState &Req, const std::string &Uri);
bool Comp(URI Other) const {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;};
bool Comp(URI Other) const {return Other.Host == ServerName.Host && Other.Port == ServerName.Port;};
- virtual void Reset(bool const Everything = true);
virtual bool WriteResponse(std::string const &Data) = 0;
/** \brief Transfer the data from the socket */
virtual bool WriteResponse(std::string const &Data) = 0;
/** \brief Transfer the data from the socket */
- virtual bool RunData(FileFd * const File) = 0;
- virtual bool RunDataToDevNull() = 0;
+ virtual bool RunData(RequestState &Req) = 0;
+ virtual bool RunDataToDevNull(RequestState &Req) = 0;
virtual bool Open() = 0;
virtual bool IsOpen() = 0;
virtual bool Close() = 0;
virtual bool InitHashes(HashStringList const &ExpectedHashes) = 0;
virtual bool Open() = 0;
virtual bool IsOpen() = 0;
virtual bool Close() = 0;
virtual bool InitHashes(HashStringList const &ExpectedHashes) = 0;
- virtual Hashes * GetHashes() = 0;
- virtual bool Die(FileFd * const File) = 0;
+ virtual bool Die(RequestState &Req) = 0;
virtual bool Flush(FileFd * const File) = 0;
virtual bool Flush(FileFd * const File) = 0;
- virtual bool Go(bool ToFile, FileFd * const File) = 0;
+ virtual bool Go(bool ToFile, RequestState &Req) = 0;
+ virtual Hashes * GetHashes() = 0;
ServerState(URI Srv, ServerMethod *Owner);
virtual ~ServerState() {};
ServerState(URI Srv, ServerMethod *Owner);
virtual ~ServerState() {};
std::unique_ptr<ServerState> Server;
std::string NextURI;
std::unique_ptr<ServerState> Server;
std::string NextURI;
unsigned long PipelineDepth;
bool AllowRedirect;
unsigned long PipelineDepth;
bool AllowRedirect;
TRY_AGAIN_OR_REDIRECT
};
/** \brief Handle the retrieved header data */
TRY_AGAIN_OR_REDIRECT
};
/** \brief Handle the retrieved header data */
- virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res);
+ virtual DealWithHeadersResult DealWithHeaders(FetchResult &Res, RequestState &Req);
// In the event of a fatal signal this file will be closed and timestamped.
static std::string FailFile;
// In the event of a fatal signal this file will be closed and timestamped.
static std::string FailFile;
static time_t FailTime;
static APT_NORETURN void SigTerm(int);
static time_t FailTime;
static APT_NORETURN void SigTerm(int);
- virtual bool Flush() { return Server->Flush(File); };
-
int Loop();
virtual void SendReq(FetchItem *Itm) = 0;
int Loop();
virtual void SendReq(FetchItem *Itm) = 0;