X-Git-Url: https://git.saurik.com/apt.git/blobdiff_plain/0817301e47d0b1d3ab893039115fa906963af2c9..678bc33eaf95356d1b63090d220b65162478b89a:/apt-pkg/acquire.cc diff --git a/apt-pkg/acquire.cc b/apt-pkg/acquire.cc index 07e4fab59..1be8551f4 100644 --- a/apt-pkg/acquire.cc +++ b/apt-pkg/acquire.cc @@ -1,6 +1,6 @@ // -*- mode: cpp; mode: fold -*- // Description /*{{{*/ -// $Id: acquire.cc,v 1.32 1999/04/07 06:02:56 jgg Exp $ +// $Id: acquire.cc,v 1.47 2001/02/20 07:03:17 jgg Exp $ /* ###################################################################### Acquire - File Acquiration @@ -23,9 +23,12 @@ #include #include +#include + #include #include #include +#include /*}}}*/ // Acquire::pkgAcquire - Constructor /*{{{*/ @@ -46,6 +49,17 @@ pkgAcquire::pkgAcquire(pkgAcquireStatus *Log) : Log(Log) QueueMode = QueueAccess; Debug = _config->FindB("Debug::pkgAcquire",false); + + // This is really a stupid place for this + struct stat St; + if (stat((_config->FindDir("Dir::State::lists") + "partial/").c_str(),&St) != 0 || + S_ISDIR(St.st_mode) == 0) + _error->Error(_("Lists directory %spartial is missing."), + _config->FindDir("Dir::State::lists").c_str()); + if (stat((_config->FindDir("Dir::Cache::Archives") + "partial/").c_str(),&St) != 0 || + S_ISDIR(St.st_mode) == 0) + _error->Error(_("Archive directory %spartial is missing."), + _config->FindDir("Dir::Cache::Archives").c_str()); } /*}}}*/ // Acquire::~pkgAcquire - Destructor /*{{{*/ @@ -53,15 +67,23 @@ pkgAcquire::pkgAcquire(pkgAcquireStatus *Log) : Log(Log) /* Free our memory, clean up the queues (destroy the workers) */ pkgAcquire::~pkgAcquire() { - while (Items.size() != 0) - delete Items[0]; - + Shutdown(); + while (Configs != 0) { MethodConfig *Jnk = Configs; Configs = Configs->Next; delete Jnk; } +} + /*}}}*/ +// Acquire::Shutdown - Clean out the acquire object /*{{{*/ +// --------------------------------------------------------------------- +/* */ +void pkgAcquire::Shutdown() +{ + while (Items.size() != 0) + delete Items[0]; while (Queues != 0) { @@ -85,6 +107,8 @@ void pkgAcquire::Add(Item *Itm) /* Remove an item from the acquire list. This is usually not used.. */ void pkgAcquire::Remove(Item *Itm) { + Dequeue(Itm); + for (vector::iterator I = Items.begin(); I < Items.end(); I++) { if (*I == Itm) @@ -125,7 +149,7 @@ void pkgAcquire::Remove(Worker *Work) // Acquire::Enqueue - Queue an URI for fetching /*{{{*/ // --------------------------------------------------------------------- /* This is the entry point for an item. An item calls this function when - it is construction which creates a queue (based on the current queue + it is constructed which creates a queue (based on the current queue mode) and puts the item in that queue. If the system is running then the queue might be started. */ void pkgAcquire::Enqueue(ItemDesc &Item) @@ -275,7 +299,7 @@ void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet) /* This runs the queues. It manages a select loop for all of the Worker tasks. The workers interact with the queues and items to manage the actual fetch. */ -bool pkgAcquire::Run() +pkgAcquire::RunResult pkgAcquire::Run() { Running = true; @@ -285,6 +309,8 @@ bool pkgAcquire::Run() if (Log != 0) Log->Start(); + bool WasCancelled = false; + // Run till all things have been acquired struct timeval tv; tv.tv_sec = 0; @@ -321,8 +347,11 @@ bool pkgAcquire::Run() tv.tv_usec = 500000; for (Worker *I = Workers; I != 0; I = I->NextAcquire) I->Pulse(); - if (Log != 0) - Log->Pulse(this); + if (Log != 0 && Log->Pulse(this) == false) + { + WasCancelled = true; + break; + } } } @@ -332,9 +361,17 @@ bool pkgAcquire::Run() // Shut down the acquire bits Running = false; for (Queue *I = Queues; I != 0; I = I->Next) - I->Shutdown(); + I->Shutdown(false); - return !_error->PendingError(); + // Shut down the items + for (Item **I = Items.begin(); I != Items.end(); I++) + (*I)->Finished(); + + if (_error->PendingError()) + return Failed; + if (WasCancelled) + return Cancelled; + return Continue; } /*}}}*/ // Acquire::Bump - Called when an item is dequeued /*{{{*/ @@ -363,13 +400,13 @@ bool pkgAcquire::Clean(string Dir) { DIR *D = opendir(Dir.c_str()); if (D == 0) - return _error->Errno("opendir","Unable to read %s",Dir.c_str()); + return _error->Errno("opendir",_("Unable to read %s"),Dir.c_str()); string StartDir = SafeGetCWD(); if (chdir(Dir.c_str()) != 0) { closedir(D); - return _error->Errno("chdir","Unable to change to ",Dir.c_str()); + return _error->Errno("chdir",_("Unable to change to %s"),Dir.c_str()); } for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D)) @@ -400,9 +437,9 @@ bool pkgAcquire::Clean(string Dir) // Acquire::TotalNeeded - Number of bytes to fetch /*{{{*/ // --------------------------------------------------------------------- /* This is the total number of bytes needed */ -unsigned long pkgAcquire::TotalNeeded() +double pkgAcquire::TotalNeeded() { - unsigned long Total = 0; + double Total = 0; for (pkgAcquire::Item **I = ItemsBegin(); I != ItemsEnd(); I++) Total += (*I)->FileSize; return Total; @@ -411,9 +448,9 @@ unsigned long pkgAcquire::TotalNeeded() // Acquire::FetchNeeded - Number of bytes needed to get /*{{{*/ // --------------------------------------------------------------------- /* This is the number of bytes that is not local */ -unsigned long pkgAcquire::FetchNeeded() +double pkgAcquire::FetchNeeded() { - unsigned long Total = 0; + double Total = 0; for (pkgAcquire::Item **I = ItemsBegin(); I != ItemsEnd(); I++) if ((*I)->Local == false) Total += (*I)->FileSize; @@ -423,16 +460,16 @@ unsigned long pkgAcquire::FetchNeeded() // Acquire::PartialPresent - Number of partial bytes we already have /*{{{*/ // --------------------------------------------------------------------- /* This is the number of bytes that is not local */ -unsigned long pkgAcquire::PartialPresent() +double pkgAcquire::PartialPresent() { - unsigned long Total = 0; + double Total = 0; for (pkgAcquire::Item **I = ItemsBegin(); I != ItemsEnd(); I++) if ((*I)->Local == false) Total += (*I)->PartialSize; return Total; } /*}}}*/ -// pkgAcquire::UriBegin - Start iterator for the uri list /*{{{*/ +// Acquire::UriBegin - Start iterator for the uri list /*{{{*/ // --------------------------------------------------------------------- /* */ pkgAcquire::UriIterator pkgAcquire::UriBegin() @@ -440,7 +477,7 @@ pkgAcquire::UriIterator pkgAcquire::UriBegin() return UriIterator(Queues); } /*}}}*/ -// pkgAcquire::UriEnd - End iterator for the uri list /*{{{*/ +// Acquire::UriEnd - End iterator for the uri list /*{{{*/ // --------------------------------------------------------------------- /* */ pkgAcquire::UriIterator pkgAcquire::UriEnd() @@ -458,6 +495,7 @@ pkgAcquire::MethodConfig::MethodConfig() Pipeline = false; SendConfig = false; LocalOnly = false; + Removable = false; Next = 0; } /*}}}*/ @@ -480,7 +518,7 @@ pkgAcquire::Queue::Queue(string Name,pkgAcquire *Owner) : Name(Name), /* */ pkgAcquire::Queue::~Queue() { - Shutdown(); + Shutdown(true); while (Items != 0) { @@ -539,44 +577,53 @@ bool pkgAcquire::Queue::Dequeue(Item *Owner) /*}}}*/ // Queue::Startup - Start the worker processes /*{{{*/ // --------------------------------------------------------------------- -/* */ +/* It is possible for this to be called with a pre-existing set of + workers. */ bool pkgAcquire::Queue::Startup() { - Shutdown(); - - URI U(Name); - pkgAcquire::MethodConfig *Cnf = Owner->GetConfig(U.Access); - if (Cnf == 0) - return false; - - Workers = new Worker(this,Cnf,Owner->Log); - Owner->Add(Workers); - if (Workers->Start() == false) - return false; - - /* When pipelining we commit 10 items. This needs to change when we - added other source retry to have cycle maintain a pipeline depth - on its own. */ - if (Cnf->Pipeline == true) - MaxPipeDepth = 10; - else - MaxPipeDepth = 1; + if (Workers == 0) + { + URI U(Name); + pkgAcquire::MethodConfig *Cnf = Owner->GetConfig(U.Access); + if (Cnf == 0) + return false; + + Workers = new Worker(this,Cnf,Owner->Log); + Owner->Add(Workers); + if (Workers->Start() == false) + return false; + + /* When pipelining we commit 10 items. This needs to change when we + added other source retry to have cycle maintain a pipeline depth + on its own. */ + if (Cnf->Pipeline == true) + MaxPipeDepth = 10; + else + MaxPipeDepth = 1; + } return Cycle(); } /*}}}*/ // Queue::Shutdown - Shutdown the worker processes /*{{{*/ // --------------------------------------------------------------------- -/* */ -bool pkgAcquire::Queue::Shutdown() +/* If final is true then all workers are eliminated, otherwise only workers + that do not need cleanup are removed */ +bool pkgAcquire::Queue::Shutdown(bool Final) { // Delete all of the workers - while (Workers != 0) + pkgAcquire::Worker **Cur = &Workers; + while (*Cur != 0) { - pkgAcquire::Worker *Jnk = Workers; - Workers = Workers->NextQueue; - Owner->Remove(Jnk); - delete Jnk; + pkgAcquire::Worker *Jnk = *Cur; + if (Final == true || Jnk->GetConf()->NeedsCleanup == false) + { + *Cur = Jnk->NextQueue; + Owner->Remove(Jnk); + delete Jnk; + } + else + Cur = &(*Cur)->NextQueue; } return true; @@ -661,7 +708,7 @@ void pkgAcquire::Queue::Bump() // AcquireStatus::pkgAcquireStatus - Constructor /*{{{*/ // --------------------------------------------------------------------- /* */ -pkgAcquireStatus::pkgAcquireStatus() +pkgAcquireStatus::pkgAcquireStatus() : Update(true), MorePulses(false) { Start(); } @@ -671,7 +718,7 @@ pkgAcquireStatus::pkgAcquireStatus() /* This computes some internal state variables for the derived classes to use. It generates the current downloaded bytes and total bytes to download as well as the current CPS estimate. */ -void pkgAcquireStatus::Pulse(pkgAcquire *Owner) +bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) { TotalBytes = 0; CurrentBytes = 0; @@ -691,7 +738,7 @@ void pkgAcquireStatus::Pulse(pkgAcquire *Owner) // Totally ignore local items if ((*I)->Local == true) continue; - + TotalBytes += (*I)->FileSize; if ((*I)->Complete == true) CurrentBytes += (*I)->FileSize; @@ -700,16 +747,29 @@ void pkgAcquireStatus::Pulse(pkgAcquire *Owner) } // Compute the current completion + unsigned long ResumeSize = 0; for (pkgAcquire::Worker *I = Owner->WorkersBegin(); I != 0; I = Owner->WorkerStep(I)) if (I->CurrentItem != 0 && I->CurrentItem->Owner->Complete == false) + { CurrentBytes += I->CurrentSize; - + ResumeSize += I->ResumePoint; + + // Files with unknown size always have 100% completion + if (I->CurrentItem->Owner->FileSize == 0 && + I->CurrentItem->Owner->Complete == false) + TotalBytes += I->CurrentSize; + } + // Normalize the figures and account for unknown size downloads if (TotalBytes <= 0) TotalBytes = 1; if (Unknown == Count) TotalBytes = Unknown; + + // Wha?! Is not supposed to happen. + if (CurrentBytes > TotalBytes) + CurrentBytes = TotalBytes; // Compute the CPS struct timeval NewTime; @@ -724,11 +784,13 @@ void pkgAcquireStatus::Pulse(pkgAcquire *Owner) if (Delta < 0.01) CurrentCPS = 0; else - CurrentCPS = (CurrentBytes - LastBytes)/Delta; - LastBytes = CurrentBytes; + CurrentCPS = ((CurrentBytes - ResumeSize) - LastBytes)/Delta; + LastBytes = CurrentBytes - ResumeSize; ElapsedTime = (unsigned long)Delta; Time = NewTime; } + + return true; } /*}}}*/ // AcquireStatus::Start - Called when the download is started /*{{{*/ @@ -757,24 +819,16 @@ void pkgAcquireStatus::Stop() struct timeval NewTime; gettimeofday(&NewTime,0); - // Compute the delta time with full accuracy - long usdiff = NewTime.tv_usec - StartTime.tv_usec; - long sdiff = NewTime.tv_sec - StartTime.tv_sec; + double Delta = NewTime.tv_sec - StartTime.tv_sec + + (NewTime.tv_usec - StartTime.tv_usec)/1000000.0; - // Borrow - if (usdiff < 0) - { - usdiff += 1000000; - sdiff--; - } - // Compute the CPS value - if (sdiff == 0 && usdiff == 0) + if (Delta < 0.01) CurrentCPS = 0; else - CurrentCPS = FetchedBytes/(sdiff + usdiff/1000000.0); + CurrentCPS = FetchedBytes/Delta; LastBytes = CurrentBytes; - ElapsedTime = sdiff; + ElapsedTime = (unsigned int)Delta; } /*}}}*/ // AcquireStatus::Fetched - Called when a byte set has been fetched /*{{{*/