]> git.saurik.com Git - apt.git/blobdiff - apt-pkg/acquire.cc
Fixed --no-download
[apt.git] / apt-pkg / acquire.cc
index 80624f9d3341bbc209123b8ca9b94cecc10490ff..a9a8b3396e0a1706561c477e834a4d8b74226aca 100644 (file)
@@ -1,6 +1,6 @@
 // -*- mode: cpp; mode: fold -*-
 // Description                                                         /*{{{*/
-// $Id: acquire.cc,v 1.29 1999/03/16 00:43:55 jgg Exp $
+// $Id: acquire.cc,v 1.44 1999/12/09 05:22:33 jgg Exp $
 /* ######################################################################
 
    Acquire - File Acquiration
@@ -25,6 +25,8 @@
 
 #include <dirent.h>
 #include <sys/time.h>
+#include <errno.h>
+#include <sys/stat.h>
                                                                        /*}}}*/
 
 // Acquire::pkgAcquire - Constructor                                   /*{{{*/
@@ -45,6 +47,17 @@ pkgAcquire::pkgAcquire(pkgAcquireStatus *Log) : Log(Log)
       QueueMode = QueueAccess;   
 
    Debug = _config->FindB("Debug::pkgAcquire",false);
+   
+   // This is really a stupid place for this
+   struct stat St;
+   if (stat((_config->FindDir("Dir::State::lists") + "partial/").c_str(),&St) != 0 ||
+       S_ISDIR(St.st_mode) == 0)
+      _error->Error("Lists directory %spartial is missing.",
+                   _config->FindDir("Dir::State::lists").c_str());
+   if (stat((_config->FindDir("Dir::Cache::Archives") + "partial/").c_str(),&St) != 0 ||
+       S_ISDIR(St.st_mode) == 0)
+      _error->Error("Archive directory %spartial is missing.",
+                   _config->FindDir("Dir::Cache::Archives").c_str());
 }
                                                                        /*}}}*/
 // Acquire::~pkgAcquire        - Destructor                                    /*{{{*/
@@ -52,15 +65,23 @@ pkgAcquire::pkgAcquire(pkgAcquireStatus *Log) : Log(Log)
 /* Free our memory, clean up the queues (destroy the workers) */
 pkgAcquire::~pkgAcquire()
 {
-   while (Items.size() != 0)
-      delete Items[0];
-
    while (Configs != 0)
    {
       MethodConfig *Jnk = Configs;
       Configs = Configs->Next;
       delete Jnk;
    }   
+   
+   Shutdown();
+}
+                                                                       /*}}}*/
+// Acquire::Shutdown - Clean out the acquire object                    /*{{{*/
+// ---------------------------------------------------------------------
+/* */
+void pkgAcquire::Shutdown()
+{
+   while (Items.size() != 0)
+      delete Items[0];
 
    while (Queues != 0)
    {
@@ -84,6 +105,8 @@ void pkgAcquire::Add(Item *Itm)
 /* Remove an item from the acquire list. This is usually not used.. */
 void pkgAcquire::Remove(Item *Itm)
 {
+   Dequeue(Itm);
+   
    for (vector<Item *>::iterator I = Items.begin(); I < Items.end(); I++)
    {
       if (*I == Itm)
@@ -124,7 +147,7 @@ void pkgAcquire::Remove(Worker *Work)
 // Acquire::Enqueue - Queue an URI for fetching                                /*{{{*/
 // ---------------------------------------------------------------------
 /* This is the entry point for an item. An item calls this function when
-   it is construction which creates a queue (based on the current queue
+   it is constructed which creates a queue (based on the current queue
    mode) and puts the item in that queue. If the system is running then
    the queue might be started. */
 void pkgAcquire::Enqueue(ItemDesc &Item)
@@ -274,7 +297,7 @@ void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet)
 /* This runs the queues. It manages a select loop for all of the
    Worker tasks. The workers interact with the queues and items to
    manage the actual fetch. */
-bool pkgAcquire::Run()
+pkgAcquire::RunResult pkgAcquire::Run()
 {
    Running = true;
    
@@ -284,6 +307,8 @@ bool pkgAcquire::Run()
    if (Log != 0)
       Log->Start();
    
+   bool WasCancelled = false;
+
    // Run till all things have been acquired
    struct timeval tv;
    tv.tv_sec = 0;
@@ -320,8 +345,11 @@ bool pkgAcquire::Run()
         tv.tv_usec = 500000;
         for (Worker *I = Workers; I != 0; I = I->NextAcquire)
            I->Pulse();
-        if (Log != 0)
-           Log->Pulse(this);
+        if (Log != 0 && Log->Pulse(this) == false)
+        {
+           WasCancelled = true;
+           break;
+        }
       }      
    }   
 
@@ -331,9 +359,17 @@ bool pkgAcquire::Run()
    // Shut down the acquire bits
    Running = false;
    for (Queue *I = Queues; I != 0; I = I->Next)
-      I->Shutdown();
+      I->Shutdown(false);
 
-   return !_error->PendingError();
+   // Shut down the items
+   for (Item **I = Items.begin(); I != Items.end(); I++)
+      (*I)->Finished(); 
+   
+   if (_error->PendingError())
+      return Failed;
+   if (WasCancelled)
+      return Cancelled;
+   return Continue;
 }
                                                                        /*}}}*/
 // Acquire::Bump - Called when an item is dequeued                     /*{{{*/
@@ -419,7 +455,19 @@ unsigned long pkgAcquire::FetchNeeded()
    return Total;
 }
                                                                        /*}}}*/
-// pkgAcquire::UriBegin - Start iterator for the uri list              /*{{{*/
+// Acquire::PartialPresent - Number of partial bytes we already have   /*{{{*/
+// ---------------------------------------------------------------------
+/* This is the number of bytes that is not local */
+unsigned long pkgAcquire::PartialPresent()
+{
+   unsigned long Total = 0;
+   for (pkgAcquire::Item **I = ItemsBegin(); I != ItemsEnd(); I++)
+      if ((*I)->Local == false)
+        Total += (*I)->PartialSize;
+   return Total;
+}
+                                                                       /*}}}*/
+// Acquire::UriBegin - Start iterator for the uri list                 /*{{{*/
 // ---------------------------------------------------------------------
 /* */
 pkgAcquire::UriIterator pkgAcquire::UriBegin()
@@ -427,7 +475,7 @@ pkgAcquire::UriIterator pkgAcquire::UriBegin()
    return UriIterator(Queues);
 }
                                                                        /*}}}*/
-// pkgAcquire::UriEnd - End iterator for the uri list                  /*{{{*/
+// Acquire::UriEnd - End iterator for the uri list                     /*{{{*/
 // ---------------------------------------------------------------------
 /* */
 pkgAcquire::UriIterator pkgAcquire::UriEnd()
@@ -467,7 +515,7 @@ pkgAcquire::Queue::Queue(string Name,pkgAcquire *Owner) : Name(Name),
 /* */
 pkgAcquire::Queue::~Queue()
 {
-   Shutdown();
+   Shutdown(true);
    
    while (Items != 0)
    {
@@ -526,44 +574,53 @@ bool pkgAcquire::Queue::Dequeue(Item *Owner)
                                                                        /*}}}*/
 // Queue::Startup - Start the worker processes                         /*{{{*/
 // ---------------------------------------------------------------------
-/* */
+/* It is possible for this to be called with a pre-existing set of
+   workers. */
 bool pkgAcquire::Queue::Startup()
 {
-   Shutdown();
-   
-   URI U(Name);
-   pkgAcquire::MethodConfig *Cnf = Owner->GetConfig(U.Access);
-   if (Cnf == 0)
-      return false;
-   
-   Workers = new Worker(this,Cnf,Owner->Log);
-   Owner->Add(Workers);
-   if (Workers->Start() == false)
-      return false;
-   
-   /* When pipelining we commit 10 items. This needs to change when we
-      added other source retry to have cycle maintain a pipeline depth
-      on its own. */
-   if (Cnf->Pipeline == true)
-      MaxPipeDepth = 10;
-   else
-      MaxPipeDepth = 1;
+   if (Workers == 0)
+   {
+      URI U(Name);
+      pkgAcquire::MethodConfig *Cnf = Owner->GetConfig(U.Access);
+      if (Cnf == 0)
+        return false;
+      
+      Workers = new Worker(this,Cnf,Owner->Log);
+      Owner->Add(Workers);
+      if (Workers->Start() == false)
+        return false;
+      
+      /* When pipelining we commit 10 items. This needs to change when we
+         added other source retry to have cycle maintain a pipeline depth
+         on its own. */
+      if (Cnf->Pipeline == true)
+        MaxPipeDepth = 10;
+      else
+        MaxPipeDepth = 1;
+   }
    
    return Cycle();
 }
                                                                        /*}}}*/
 // Queue::Shutdown - Shutdown the worker processes                     /*{{{*/
 // ---------------------------------------------------------------------
-/* */
-bool pkgAcquire::Queue::Shutdown()
+/* If final is true then all workers are eliminated, otherwise only workers
+   that do not need cleanup are removed */
+bool pkgAcquire::Queue::Shutdown(bool Final)
 {
    // Delete all of the workers
-   while (Workers != 0)
+   pkgAcquire::Worker **Cur = &Workers;
+   while (*Cur != 0)
    {
-      pkgAcquire::Worker *Jnk = Workers;
-      Workers = Workers->NextQueue;
-      Owner->Remove(Jnk);
-      delete Jnk;
+      pkgAcquire::Worker *Jnk = *Cur;
+      if (Final == true || Jnk->GetConf()->NeedsCleanup == false)
+      {
+        *Cur = Jnk->NextQueue;
+        Owner->Remove(Jnk);
+        delete Jnk;
+      }
+      else
+        Cur = &(*Cur)->NextQueue;      
    }
    
    return true;
@@ -658,7 +715,7 @@ pkgAcquireStatus::pkgAcquireStatus()
 /* This computes some internal state variables for the derived classes to
    use. It generates the current downloaded bytes and total bytes to download
    as well as the current CPS estimate. */
-void pkgAcquireStatus::Pulse(pkgAcquire *Owner)
+bool pkgAcquireStatus::Pulse(pkgAcquire *Owner)
 {
    TotalBytes = 0;
    CurrentBytes = 0;
@@ -687,18 +744,29 @@ void pkgAcquireStatus::Pulse(pkgAcquire *Owner)
    }
    
    // Compute the current completion
+   unsigned long ResumeSize = 0;
    for (pkgAcquire::Worker *I = Owner->WorkersBegin(); I != 0;
        I = Owner->WorkerStep(I))
       if (I->CurrentItem != 0 && I->CurrentItem->Owner->Complete == false)
+      {
         CurrentBytes += I->CurrentSize;
-      
+        ResumeSize += I->ResumePoint;
+        
+        // Files with unknown size always have 100% completion
+        if (I->CurrentItem->Owner->FileSize == 0 && 
+            I->CurrentItem->Owner->Complete == false)
+           TotalBytes += I->CurrentSize;
+      }
+   
    // Normalize the figures and account for unknown size downloads
    if (TotalBytes <= 0)
       TotalBytes = 1;
    if (Unknown == Count)
       TotalBytes = Unknown;
-   else
-      TotalBytes += TotalBytes/(Count - Unknown)*Unknown;
+
+   // Wha?! Is not supposed to happen.
+   if (CurrentBytes > TotalBytes)
+      CurrentBytes = TotalBytes;
    
    // Compute the CPS
    struct timeval NewTime;
@@ -713,11 +781,13 @@ void pkgAcquireStatus::Pulse(pkgAcquire *Owner)
       if (Delta < 0.01)
         CurrentCPS = 0;
       else
-        CurrentCPS = (CurrentBytes - LastBytes)/Delta;
-      LastBytes = CurrentBytes;
+        CurrentCPS = ((CurrentBytes - ResumeSize) - LastBytes)/Delta;
+      LastBytes = CurrentBytes - ResumeSize;
       ElapsedTime = (unsigned long)Delta;
       Time = NewTime;
    }
+
+   return true;
 }
                                                                        /*}}}*/
 // AcquireStatus::Start - Called when the download is started          /*{{{*/
@@ -746,24 +816,16 @@ void pkgAcquireStatus::Stop()
    struct timeval NewTime;
    gettimeofday(&NewTime,0);
    
-   // Compute the delta time with full accuracy
-   long usdiff = NewTime.tv_usec - StartTime.tv_usec;
-   long sdiff = NewTime.tv_sec - StartTime.tv_sec;
+   double Delta = NewTime.tv_sec - StartTime.tv_sec + 
+                  (NewTime.tv_usec - StartTime.tv_usec)/1000000.0;
    
-   // Borrow
-   if (usdiff < 0)
-   {    
-      usdiff += 1000000;
-      sdiff--;
-   }
-
    // Compute the CPS value
-   if (sdiff == 0 && usdiff == 0)
+   if (Delta < 0.01)
       CurrentCPS = 0;
    else
-      CurrentCPS = FetchedBytes/(sdiff + usdiff/1000000.0);
+      CurrentCPS = FetchedBytes/Delta;
    LastBytes = CurrentBytes;
-   ElapsedTime = sdiff;
+   ElapsedTime = (unsigned int)Delta;
 }
                                                                        /*}}}*/
 // AcquireStatus::Fetched - Called when a byte set has been fetched    /*{{{*/