]>
Commit | Line | Data |
---|---|---|
1 | // -*- mode: cpp; mode: fold -*- | |
2 | // Description /*{{{*/ | |
3 | // $Id: acquire.cc,v 1.50 2004/03/17 05:17:11 mdz Exp $ | |
4 | /* ###################################################################### | |
5 | ||
6 | Acquire - File Acquiration | |
7 | ||
8 | The core element for the schedule system is the concept of a named | |
9 | queue. Each queue is unique and each queue has a name derived from the | |
10 | URI. The degree of paralization can be controlled by how the queue | |
11 | name is derived from the URI. | |
12 | ||
13 | ##################################################################### */ | |
14 | /*}}}*/ | |
15 | // Include Files /*{{{*/ | |
16 | #include <config.h> | |
17 | ||
18 | #include <apt-pkg/acquire.h> | |
19 | #include <apt-pkg/acquire-item.h> | |
20 | #include <apt-pkg/acquire-worker.h> | |
21 | #include <apt-pkg/configuration.h> | |
22 | #include <apt-pkg/error.h> | |
23 | #include <apt-pkg/strutl.h> | |
24 | #include <apt-pkg/fileutl.h> | |
25 | ||
26 | #include <algorithm> | |
27 | #include <numeric> | |
28 | #include <string> | |
29 | #include <vector> | |
30 | #include <iostream> | |
31 | #include <sstream> | |
32 | #include <iomanip> | |
33 | #include <memory> | |
34 | ||
35 | #include <stdio.h> | |
36 | #include <stdlib.h> | |
37 | #include <string.h> | |
38 | #include <unistd.h> | |
39 | #include <fcntl.h> | |
40 | #include <pwd.h> | |
41 | #include <grp.h> | |
42 | #include <dirent.h> | |
43 | #include <sys/time.h> | |
44 | #include <sys/select.h> | |
45 | #include <errno.h> | |
46 | #include <sys/stat.h> | |
47 | ||
48 | #include <apti18n.h> | |
49 | /*}}}*/ | |
50 | ||
51 | using namespace std; | |
52 | ||
53 | // Acquire::pkgAcquire - Constructor /*{{{*/ | |
54 | // --------------------------------------------------------------------- | |
55 | /* We grab some runtime state from the configuration space */ | |
56 | pkgAcquire::pkgAcquire() : LockFD(-1), d(NULL), Queues(0), Workers(0), Configs(0), Log(NULL), ToFetch(0), | |
57 | Debug(_config->FindB("Debug::pkgAcquire",false)), | |
58 | Running(false) | |
59 | { | |
60 | Initialize(); | |
61 | } | |
62 | pkgAcquire::pkgAcquire(pkgAcquireStatus *Progress) : LockFD(-1), d(NULL), Queues(0), Workers(0), | |
63 | Configs(0), Log(NULL), ToFetch(0), | |
64 | Debug(_config->FindB("Debug::pkgAcquire",false)), | |
65 | Running(false) | |
66 | { | |
67 | Initialize(); | |
68 | SetLog(Progress); | |
69 | } | |
70 | void pkgAcquire::Initialize() | |
71 | { | |
72 | string const Mode = _config->Find("Acquire::Queue-Mode","host"); | |
73 | if (strcasecmp(Mode.c_str(),"host") == 0) | |
74 | QueueMode = QueueHost; | |
75 | if (strcasecmp(Mode.c_str(),"access") == 0) | |
76 | QueueMode = QueueAccess; | |
77 | ||
78 | // chown the auth.conf file as it will be accessed by our methods | |
79 | std::string const SandboxUser = _config->Find("APT::Sandbox::User"); | |
80 | if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it | |
81 | { | |
82 | struct passwd const * const pw = getpwnam(SandboxUser.c_str()); | |
83 | struct group const * const gr = getgrnam(ROOT_GROUP); | |
84 | if (pw != NULL && gr != NULL) | |
85 | { | |
86 | std::string const AuthConf = _config->FindFile("Dir::Etc::netrc"); | |
87 | if(AuthConf.empty() == false && RealFileExists(AuthConf) && | |
88 | chown(AuthConf.c_str(), pw->pw_uid, gr->gr_gid) != 0) | |
89 | _error->WarningE("SetupAPTPartialDirectory", "chown to %s:root of file %s failed", SandboxUser.c_str(), AuthConf.c_str()); | |
90 | } | |
91 | } | |
92 | } | |
93 | /*}}}*/ | |
94 | // Acquire::GetLock - lock directory and prepare for action /*{{{*/ | |
95 | static bool SetupAPTPartialDirectory(std::string const &grand, std::string const &parent) | |
96 | { | |
97 | std::string const partial = parent + "partial"; | |
98 | mode_t const mode = umask(S_IWGRP | S_IWOTH); | |
99 | bool const creation_fail = (CreateAPTDirectoryIfNeeded(grand, partial) == false && | |
100 | CreateAPTDirectoryIfNeeded(parent, partial) == false); | |
101 | umask(mode); | |
102 | if (creation_fail == true) | |
103 | return false; | |
104 | ||
105 | std::string const SandboxUser = _config->Find("APT::Sandbox::User"); | |
106 | if (getuid() == 0 && SandboxUser.empty() == false && SandboxUser != "root") // if we aren't root, we can't chown, so don't try it | |
107 | { | |
108 | struct passwd const * const pw = getpwnam(SandboxUser.c_str()); | |
109 | struct group const * const gr = getgrnam(ROOT_GROUP); | |
110 | if (pw != NULL && gr != NULL) | |
111 | { | |
112 | // chown the partial dir | |
113 | if(chown(partial.c_str(), pw->pw_uid, gr->gr_gid) != 0) | |
114 | _error->WarningE("SetupAPTPartialDirectory", "chown to %s:root of directory %s failed", SandboxUser.c_str(), partial.c_str()); | |
115 | } | |
116 | } | |
117 | if (chmod(partial.c_str(), 0700) != 0) | |
118 | _error->WarningE("SetupAPTPartialDirectory", "chmod 0700 of directory %s failed", partial.c_str()); | |
119 | ||
120 | return true; | |
121 | } | |
122 | bool pkgAcquire::Setup(pkgAcquireStatus *Progress, string const &Lock) | |
123 | { | |
124 | Log = Progress; | |
125 | if (Lock.empty()) | |
126 | { | |
127 | string const listDir = _config->FindDir("Dir::State::lists"); | |
128 | if (SetupAPTPartialDirectory(_config->FindDir("Dir::State"), listDir) == false) | |
129 | return _error->Errno("Acquire", _("List directory %spartial is missing."), listDir.c_str()); | |
130 | string const archivesDir = _config->FindDir("Dir::Cache::Archives"); | |
131 | if (SetupAPTPartialDirectory(_config->FindDir("Dir::Cache"), archivesDir) == false) | |
132 | return _error->Errno("Acquire", _("Archives directory %spartial is missing."), archivesDir.c_str()); | |
133 | return true; | |
134 | } | |
135 | return GetLock(Lock); | |
136 | } | |
137 | bool pkgAcquire::GetLock(std::string const &Lock) | |
138 | { | |
139 | if (Lock.empty() == true) | |
140 | return false; | |
141 | ||
142 | // check for existence and possibly create auxiliary directories | |
143 | string const listDir = _config->FindDir("Dir::State::lists"); | |
144 | string const archivesDir = _config->FindDir("Dir::Cache::Archives"); | |
145 | ||
146 | if (Lock == listDir) | |
147 | { | |
148 | if (SetupAPTPartialDirectory(_config->FindDir("Dir::State"), listDir) == false) | |
149 | return _error->Errno("Acquire", _("List directory %spartial is missing."), listDir.c_str()); | |
150 | } | |
151 | if (Lock == archivesDir) | |
152 | { | |
153 | if (SetupAPTPartialDirectory(_config->FindDir("Dir::Cache"), archivesDir) == false) | |
154 | return _error->Errno("Acquire", _("Archives directory %spartial is missing."), archivesDir.c_str()); | |
155 | } | |
156 | ||
157 | if (_config->FindB("Debug::NoLocking", false) == true) | |
158 | return true; | |
159 | ||
160 | // Lock the directory this acquire object will work in | |
161 | if (LockFD != -1) | |
162 | close(LockFD); | |
163 | LockFD = ::GetLock(flCombine(Lock, "lock")); | |
164 | if (LockFD == -1) | |
165 | return _error->Error(_("Unable to lock directory %s"), Lock.c_str()); | |
166 | ||
167 | return true; | |
168 | } | |
169 | /*}}}*/ | |
170 | // Acquire::~pkgAcquire - Destructor /*{{{*/ | |
171 | // --------------------------------------------------------------------- | |
172 | /* Free our memory, clean up the queues (destroy the workers) */ | |
173 | pkgAcquire::~pkgAcquire() | |
174 | { | |
175 | Shutdown(); | |
176 | ||
177 | if (LockFD != -1) | |
178 | close(LockFD); | |
179 | ||
180 | while (Configs != 0) | |
181 | { | |
182 | MethodConfig *Jnk = Configs; | |
183 | Configs = Configs->Next; | |
184 | delete Jnk; | |
185 | } | |
186 | } | |
187 | /*}}}*/ | |
188 | // Acquire::Shutdown - Clean out the acquire object /*{{{*/ | |
189 | // --------------------------------------------------------------------- | |
190 | /* */ | |
191 | void pkgAcquire::Shutdown() | |
192 | { | |
193 | while (Items.empty() == false) | |
194 | { | |
195 | if (Items[0]->Status == Item::StatFetching) | |
196 | Items[0]->Status = Item::StatError; | |
197 | delete Items[0]; | |
198 | } | |
199 | ||
200 | while (Queues != 0) | |
201 | { | |
202 | Queue *Jnk = Queues; | |
203 | Queues = Queues->Next; | |
204 | delete Jnk; | |
205 | } | |
206 | } | |
207 | /*}}}*/ | |
208 | // Acquire::Add - Add a new item /*{{{*/ | |
209 | // --------------------------------------------------------------------- | |
210 | /* This puts an item on the acquire list. This list is mainly for tracking | |
211 | item status */ | |
212 | void pkgAcquire::Add(Item *Itm) | |
213 | { | |
214 | Items.push_back(Itm); | |
215 | } | |
216 | /*}}}*/ | |
217 | // Acquire::Remove - Remove a item /*{{{*/ | |
218 | // --------------------------------------------------------------------- | |
219 | /* Remove an item from the acquire list. This is usually not used.. */ | |
220 | void pkgAcquire::Remove(Item *Itm) | |
221 | { | |
222 | Dequeue(Itm); | |
223 | ||
224 | for (ItemIterator I = Items.begin(); I != Items.end();) | |
225 | { | |
226 | if (*I == Itm) | |
227 | { | |
228 | Items.erase(I); | |
229 | I = Items.begin(); | |
230 | } | |
231 | else | |
232 | ++I; | |
233 | } | |
234 | } | |
235 | /*}}}*/ | |
236 | // Acquire::Add - Add a worker /*{{{*/ | |
237 | // --------------------------------------------------------------------- | |
238 | /* A list of workers is kept so that the select loop can direct their FD | |
239 | usage. */ | |
240 | void pkgAcquire::Add(Worker *Work) | |
241 | { | |
242 | Work->NextAcquire = Workers; | |
243 | Workers = Work; | |
244 | } | |
245 | /*}}}*/ | |
246 | // Acquire::Remove - Remove a worker /*{{{*/ | |
247 | // --------------------------------------------------------------------- | |
248 | /* A worker has died. This can not be done while the select loop is running | |
249 | as it would require that RunFds could handling a changing list state and | |
250 | it can't.. */ | |
251 | void pkgAcquire::Remove(Worker *Work) | |
252 | { | |
253 | if (Running == true) | |
254 | abort(); | |
255 | ||
256 | Worker **I = &Workers; | |
257 | for (; *I != 0;) | |
258 | { | |
259 | if (*I == Work) | |
260 | *I = (*I)->NextAcquire; | |
261 | else | |
262 | I = &(*I)->NextAcquire; | |
263 | } | |
264 | } | |
265 | /*}}}*/ | |
266 | // Acquire::Enqueue - Queue an URI for fetching /*{{{*/ | |
267 | // --------------------------------------------------------------------- | |
268 | /* This is the entry point for an item. An item calls this function when | |
269 | it is constructed which creates a queue (based on the current queue | |
270 | mode) and puts the item in that queue. If the system is running then | |
271 | the queue might be started. */ | |
272 | static bool CheckForBadItemAndFailIt(pkgAcquire::Item * const Item, | |
273 | pkgAcquire::MethodConfig const * const Config, pkgAcquireStatus * const Log) | |
274 | { | |
275 | auto SavedDesc = Item->GetItemDesc(); | |
276 | if (Item->IsRedirectionLoop(SavedDesc.URI)) | |
277 | { | |
278 | std::string const Message = "400 URI Failure" | |
279 | "\nURI: " + SavedDesc.URI + | |
280 | "\nFilename: " + Item->DestFile + | |
281 | "\nFailReason: RedirectionLoop"; | |
282 | ||
283 | Item->Status = pkgAcquire::Item::StatError; | |
284 | Item->Failed(Message, Config); | |
285 | if (Log != nullptr) | |
286 | Log->Fail(SavedDesc); | |
287 | return true; | |
288 | } | |
289 | ||
290 | HashStringList const hsl = Item->GetExpectedHashes(); | |
291 | if (hsl.usable() == false && Item->HashesRequired() && | |
292 | _config->Exists("Acquire::ForceHash") == false) | |
293 | { | |
294 | std::string const Message = "400 URI Failure" | |
295 | "\nURI: " + SavedDesc.URI + | |
296 | "\nFilename: " + Item->DestFile + | |
297 | "\nFailReason: WeakHashSums"; | |
298 | ||
299 | auto SavedDesc = Item->GetItemDesc(); | |
300 | Item->Status = pkgAcquire::Item::StatAuthError; | |
301 | Item->Failed(Message, Config); | |
302 | if (Log != nullptr) | |
303 | Log->Fail(SavedDesc); | |
304 | return true; | |
305 | } | |
306 | return false; | |
307 | } | |
308 | void pkgAcquire::Enqueue(ItemDesc &Item) | |
309 | { | |
310 | // Determine which queue to put the item in | |
311 | const MethodConfig *Config; | |
312 | string Name = QueueName(Item.URI,Config); | |
313 | if (Name.empty() == true) | |
314 | return; | |
315 | ||
316 | /* the check for running avoids that we produce errors | |
317 | in logging before we actually have started, which would | |
318 | be easier to implement but would confuse users/implementations | |
319 | so we check the items skipped here in #Startup */ | |
320 | if (Running && CheckForBadItemAndFailIt(Item.Owner, Config, Log)) | |
321 | return; | |
322 | ||
323 | // Find the queue structure | |
324 | Queue *I = Queues; | |
325 | for (; I != 0 && I->Name != Name; I = I->Next); | |
326 | if (I == 0) | |
327 | { | |
328 | I = new Queue(Name,this); | |
329 | I->Next = Queues; | |
330 | Queues = I; | |
331 | ||
332 | if (Running == true) | |
333 | I->Startup(); | |
334 | } | |
335 | ||
336 | // See if this is a local only URI | |
337 | if (Config->LocalOnly == true && Item.Owner->Complete == false) | |
338 | Item.Owner->Local = true; | |
339 | Item.Owner->Status = Item::StatIdle; | |
340 | ||
341 | // Queue it into the named queue | |
342 | if(I->Enqueue(Item)) | |
343 | ToFetch++; | |
344 | ||
345 | // Some trace stuff | |
346 | if (Debug == true) | |
347 | { | |
348 | clog << "Fetching " << Item.URI << endl; | |
349 | clog << " to " << Item.Owner->DestFile << endl; | |
350 | clog << " Queue is: " << Name << endl; | |
351 | } | |
352 | } | |
353 | /*}}}*/ | |
354 | // Acquire::Dequeue - Remove an item from all queues /*{{{*/ | |
355 | // --------------------------------------------------------------------- | |
356 | /* This is called when an item is finished being fetched. It removes it | |
357 | from all the queues */ | |
358 | void pkgAcquire::Dequeue(Item *Itm) | |
359 | { | |
360 | Queue *I = Queues; | |
361 | bool Res = false; | |
362 | if (Debug == true) | |
363 | clog << "Dequeuing " << Itm->DestFile << endl; | |
364 | ||
365 | for (; I != 0; I = I->Next) | |
366 | { | |
367 | if (I->Dequeue(Itm)) | |
368 | { | |
369 | Res = true; | |
370 | if (Debug == true) | |
371 | clog << "Dequeued from " << I->Name << endl; | |
372 | } | |
373 | } | |
374 | ||
375 | if (Res == true) | |
376 | ToFetch--; | |
377 | } | |
378 | /*}}}*/ | |
379 | // Acquire::QueueName - Return the name of the queue for this URI /*{{{*/ | |
380 | // --------------------------------------------------------------------- | |
381 | /* The string returned depends on the configuration settings and the | |
382 | method parameters. Given something like http://foo.org/bar it can | |
383 | return http://foo.org or http */ | |
384 | string pkgAcquire::QueueName(string Uri,MethodConfig const *&Config) | |
385 | { | |
386 | URI U(Uri); | |
387 | ||
388 | Config = GetConfig(U.Access); | |
389 | if (Config == 0) | |
390 | return string(); | |
391 | ||
392 | /* Single-Instance methods get exactly one queue per URI. This is | |
393 | also used for the Access queue method */ | |
394 | if (Config->SingleInstance == true || QueueMode == QueueAccess) | |
395 | return U.Access; | |
396 | ||
397 | string AccessSchema = U.Access + ':'; | |
398 | string FullQueueName; | |
399 | ||
400 | if (U.Host.empty()) | |
401 | { | |
402 | long existing = 0; | |
403 | // check how many queues exist already and reuse empty ones | |
404 | for (Queue const *I = Queues; I != 0; I = I->Next) | |
405 | if (I->Name.compare(0, AccessSchema.length(), AccessSchema) == 0) | |
406 | { | |
407 | if (I->Items == nullptr) | |
408 | return I->Name; | |
409 | ++existing; | |
410 | } | |
411 | ||
412 | #ifdef _SC_NPROCESSORS_ONLN | |
413 | long cpuCount = sysconf(_SC_NPROCESSORS_ONLN) * 2; | |
414 | #else | |
415 | long cpuCount = 10; | |
416 | #endif | |
417 | cpuCount = _config->FindI("Acquire::QueueHost::Limit", cpuCount); | |
418 | ||
419 | if (cpuCount <= 0 || existing < cpuCount) | |
420 | strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), existing); | |
421 | else | |
422 | { | |
423 | long const randomQueue = random() % cpuCount; | |
424 | strprintf(FullQueueName, "%s%ld", AccessSchema.c_str(), randomQueue); | |
425 | } | |
426 | ||
427 | if (Debug) | |
428 | clog << "Chose random queue " << FullQueueName << " for " << Uri << endl; | |
429 | } else | |
430 | { | |
431 | FullQueueName = AccessSchema + U.Host; | |
432 | ||
433 | int parallel(_config->FindI("Acquire::"+U.Access+"::MaxParallel",8)); | |
434 | if (parallel > 0) { | |
435 | typedef map<string, int> indexmap; | |
436 | static indexmap indices; | |
437 | ||
438 | pair<indexmap::iterator, bool> cache(indices.insert(indexmap::value_type(FullQueueName, -1))); | |
439 | if (cache.second || cache.first->second == -1) { | |
440 | int &index(indices[U.Access]); | |
441 | if (index >= parallel) | |
442 | index = 0; | |
443 | cache.first->second = index++; | |
444 | } | |
445 | ||
446 | ostringstream value; | |
447 | value << U.Access << "::" << cache.first->second; | |
448 | FullQueueName = value.str(); | |
449 | } | |
450 | } | |
451 | unsigned int Instances = 0, SchemaLength = AccessSchema.length(); | |
452 | ||
453 | Queue *I = Queues; | |
454 | for (; I != 0; I = I->Next) { | |
455 | // if the queue already exists, re-use it | |
456 | if (I->Name == FullQueueName) | |
457 | return FullQueueName; | |
458 | ||
459 | if (I->Name.compare(0, SchemaLength, AccessSchema) == 0) | |
460 | Instances++; | |
461 | } | |
462 | ||
463 | if (Debug) { | |
464 | clog << "Found " << Instances << " instances of " << U.Access << endl; | |
465 | } | |
466 | ||
467 | if (Instances >= (unsigned int)_config->FindI("Acquire::QueueHost::Limit",10)) | |
468 | return U.Access; | |
469 | ||
470 | return FullQueueName; | |
471 | } | |
472 | /*}}}*/ | |
473 | // Acquire::GetConfig - Fetch the configuration information /*{{{*/ | |
474 | // --------------------------------------------------------------------- | |
475 | /* This locates the configuration structure for an access method. If | |
476 | a config structure cannot be found a Worker will be created to | |
477 | retrieve it */ | |
478 | pkgAcquire::MethodConfig *pkgAcquire::GetConfig(string Access) | |
479 | { | |
480 | // Search for an existing config | |
481 | MethodConfig *Conf; | |
482 | for (Conf = Configs; Conf != 0; Conf = Conf->Next) | |
483 | if (Conf->Access == Access) | |
484 | return Conf; | |
485 | ||
486 | // Create the new config class | |
487 | Conf = new MethodConfig; | |
488 | Conf->Access = Access; | |
489 | Conf->Next = Configs; | |
490 | Configs = Conf; | |
491 | ||
492 | // Create the worker to fetch the configuration | |
493 | Worker Work(Conf); | |
494 | if (Work.Start() == false) | |
495 | return 0; | |
496 | ||
497 | /* if a method uses DownloadLimit, we switch to SingleInstance mode */ | |
498 | if(_config->FindI("Acquire::"+Access+"::Dl-Limit",0) > 0) | |
499 | Conf->SingleInstance = true; | |
500 | ||
501 | return Conf; | |
502 | } | |
503 | /*}}}*/ | |
504 | // Acquire::SetFds - Deal with readable FDs /*{{{*/ | |
505 | // --------------------------------------------------------------------- | |
506 | /* Collect FDs that have activity monitors into the fd sets */ | |
507 | void pkgAcquire::SetFds(int &Fd,fd_set *RSet,fd_set *WSet) | |
508 | { | |
509 | for (Worker *I = Workers; I != 0; I = I->NextAcquire) | |
510 | { | |
511 | if (I->InReady == true && I->InFd >= 0) | |
512 | { | |
513 | if (Fd < I->InFd) | |
514 | Fd = I->InFd; | |
515 | FD_SET(I->InFd,RSet); | |
516 | } | |
517 | if (I->OutReady == true && I->OutFd >= 0) | |
518 | { | |
519 | if (Fd < I->OutFd) | |
520 | Fd = I->OutFd; | |
521 | FD_SET(I->OutFd,WSet); | |
522 | } | |
523 | } | |
524 | } | |
525 | /*}}}*/ | |
526 | // Acquire::RunFds - compatibility remove on next abi/api break /*{{{*/ | |
527 | void pkgAcquire::RunFds(fd_set *RSet,fd_set *WSet) | |
528 | { | |
529 | RunFdsSane(RSet, WSet); | |
530 | } | |
531 | /*}}}*/ | |
532 | // Acquire::RunFdsSane - Deal with active FDs /*{{{*/ | |
533 | // --------------------------------------------------------------------- | |
534 | /* Dispatch active FDs over to the proper workers. It is very important | |
535 | that a worker never be erased while this is running! The queue class | |
536 | should never erase a worker except during shutdown processing. */ | |
537 | bool pkgAcquire::RunFdsSane(fd_set *RSet,fd_set *WSet) | |
538 | { | |
539 | bool Res = true; | |
540 | ||
541 | for (Worker *I = Workers; I != 0; I = I->NextAcquire) | |
542 | { | |
543 | if (I->InFd >= 0 && FD_ISSET(I->InFd,RSet) != 0) | |
544 | Res &= I->InFdReady(); | |
545 | if (I->OutFd >= 0 && FD_ISSET(I->OutFd,WSet) != 0) | |
546 | Res &= I->OutFdReady(); | |
547 | } | |
548 | ||
549 | return Res; | |
550 | } | |
551 | /*}}}*/ | |
552 | // Acquire::Run - Run the fetch sequence /*{{{*/ | |
553 | // --------------------------------------------------------------------- | |
554 | /* This runs the queues. It manages a select loop for all of the | |
555 | Worker tasks. The workers interact with the queues and items to | |
556 | manage the actual fetch. */ | |
557 | static bool IsAccessibleBySandboxUser(std::string const &filename, bool const ReadWrite) | |
558 | { | |
559 | // you would think this is easily to answer with faccessat, right? Wrong! | |
560 | // It e.g. gets groups wrong, so the only thing which works reliable is trying | |
561 | // to open the file we want to open later on… | |
562 | if (unlikely(filename.empty())) | |
563 | return true; | |
564 | ||
565 | if (ReadWrite == false) | |
566 | { | |
567 | errno = 0; | |
568 | // can we read a file? Note that non-existing files are "fine" | |
569 | int const fd = open(filename.c_str(), O_RDONLY | O_CLOEXEC); | |
570 | if (fd == -1 && errno == EACCES) | |
571 | return false; | |
572 | close(fd); | |
573 | return true; | |
574 | } | |
575 | else | |
576 | { | |
577 | // the file might not exist yet and even if it does we will fix permissions, | |
578 | // so important is here just that the directory it is in allows that | |
579 | std::string const dirname = flNotFile(filename); | |
580 | if (unlikely(dirname.empty())) | |
581 | return true; | |
582 | ||
583 | char const * const filetag = ".apt-acquire-privs-test.XXXXXX"; | |
584 | std::string const tmpfile_tpl = flCombine(dirname, filetag); | |
585 | std::unique_ptr<char, decltype(std::free) *> tmpfile { strdup(tmpfile_tpl.c_str()), std::free }; | |
586 | int const fd = mkstemp(tmpfile.get()); | |
587 | if (fd == -1 && errno == EACCES) | |
588 | return false; | |
589 | RemoveFile("IsAccessibleBySandboxUser", tmpfile.get()); | |
590 | close(fd); | |
591 | return true; | |
592 | } | |
593 | } | |
594 | static void CheckDropPrivsMustBeDisabled(pkgAcquire const &Fetcher) | |
595 | { | |
596 | if(getuid() != 0) | |
597 | return; | |
598 | ||
599 | std::string const SandboxUser = _config->Find("APT::Sandbox::User"); | |
600 | if (SandboxUser.empty() || SandboxUser == "root") | |
601 | return; | |
602 | ||
603 | struct passwd const * const pw = getpwnam(SandboxUser.c_str()); | |
604 | if (pw == NULL) | |
605 | { | |
606 | _error->Warning(_("No sandbox user '%s' on the system, can not drop privileges"), SandboxUser.c_str()); | |
607 | _config->Set("APT::Sandbox::User", ""); | |
608 | return; | |
609 | } | |
610 | ||
611 | gid_t const old_euid = geteuid(); | |
612 | gid_t const old_egid = getegid(); | |
613 | ||
614 | long const ngroups_max = sysconf(_SC_NGROUPS_MAX); | |
615 | std::unique_ptr<gid_t[]> old_gidlist(new gid_t[ngroups_max]); | |
616 | if (unlikely(old_gidlist == NULL)) | |
617 | return; | |
618 | ssize_t old_gidlist_nr; | |
619 | if ((old_gidlist_nr = getgroups(ngroups_max, old_gidlist.get())) < 0) | |
620 | { | |
621 | _error->FatalE("getgroups", "getgroups %lu failed", ngroups_max); | |
622 | old_gidlist[0] = 0; | |
623 | old_gidlist_nr = 1; | |
624 | } | |
625 | if (setgroups(1, &pw->pw_gid)) | |
626 | _error->FatalE("setgroups", "setgroups %u failed", pw->pw_gid); | |
627 | ||
628 | if (setegid(pw->pw_gid) != 0) | |
629 | _error->FatalE("setegid", "setegid %u failed", pw->pw_gid); | |
630 | if (seteuid(pw->pw_uid) != 0) | |
631 | _error->FatalE("seteuid", "seteuid %u failed", pw->pw_uid); | |
632 | ||
633 | for (pkgAcquire::ItemCIterator I = Fetcher.ItemsBegin(); | |
634 | I != Fetcher.ItemsEnd(); ++I) | |
635 | { | |
636 | // no need to drop privileges for a complete file | |
637 | if ((*I)->Complete == true || (*I)->Status != pkgAcquire::Item::StatIdle) | |
638 | continue; | |
639 | ||
640 | // if destination file is inaccessible all hope is lost for privilege dropping | |
641 | if (IsAccessibleBySandboxUser((*I)->DestFile, true) == false) | |
642 | { | |
643 | _error->WarningE("pkgAcquire::Run", _("Can't drop privileges for downloading as file '%s' couldn't be accessed by user '%s'."), | |
644 | (*I)->DestFile.c_str(), SandboxUser.c_str()); | |
645 | _config->Set("APT::Sandbox::User", ""); | |
646 | break; | |
647 | } | |
648 | ||
649 | // if its the source file (e.g. local sources) we might be lucky | |
650 | // by dropping the dropping only for some methods. | |
651 | URI const source = (*I)->DescURI(); | |
652 | if (source.Access == "file" || source.Access == "copy") | |
653 | { | |
654 | std::string const conf = "Binary::" + source.Access + "::APT::Sandbox::User"; | |
655 | if (_config->Exists(conf) == true) | |
656 | continue; | |
657 | ||
658 | if (IsAccessibleBySandboxUser(source.Path, false) == false) | |
659 | { | |
660 | _error->NoticeE("pkgAcquire::Run", _("Can't drop privileges for downloading as file '%s' couldn't be accessed by user '%s'."), | |
661 | source.Path.c_str(), SandboxUser.c_str()); | |
662 | _config->CndSet("Binary::file::APT::Sandbox::User", "root"); | |
663 | _config->CndSet("Binary::copy::APT::Sandbox::User", "root"); | |
664 | } | |
665 | } | |
666 | } | |
667 | ||
668 | if (seteuid(old_euid) != 0) | |
669 | _error->FatalE("seteuid", "seteuid %u failed", old_euid); | |
670 | if (setegid(old_egid) != 0) | |
671 | _error->FatalE("setegid", "setegid %u failed", old_egid); | |
672 | if (setgroups(old_gidlist_nr, old_gidlist.get())) | |
673 | _error->FatalE("setgroups", "setgroups %u failed", 0); | |
674 | } | |
675 | pkgAcquire::RunResult pkgAcquire::Run(int PulseIntervall) | |
676 | { | |
677 | _error->PushToStack(); | |
678 | CheckDropPrivsMustBeDisabled(*this); | |
679 | ||
680 | Running = true; | |
681 | ||
682 | for (Queue *I = Queues; I != 0; I = I->Next) | |
683 | I->Startup(); | |
684 | ||
685 | if (Log != 0) | |
686 | Log->Start(); | |
687 | ||
688 | bool WasCancelled = false; | |
689 | ||
690 | // Run till all things have been acquired | |
691 | struct timeval tv; | |
692 | tv.tv_sec = 0; | |
693 | tv.tv_usec = PulseIntervall; | |
694 | while (ToFetch > 0) | |
695 | { | |
696 | fd_set RFds; | |
697 | fd_set WFds; | |
698 | int Highest = 0; | |
699 | FD_ZERO(&RFds); | |
700 | FD_ZERO(&WFds); | |
701 | SetFds(Highest,&RFds,&WFds); | |
702 | ||
703 | int Res; | |
704 | do | |
705 | { | |
706 | Res = select(Highest+1,&RFds,&WFds,0,&tv); | |
707 | } | |
708 | while (Res < 0 && errno == EINTR); | |
709 | ||
710 | if (Res < 0) | |
711 | { | |
712 | _error->Errno("select","Select has failed"); | |
713 | break; | |
714 | } | |
715 | ||
716 | if(RunFdsSane(&RFds,&WFds) == false) | |
717 | break; | |
718 | ||
719 | // Timeout, notify the log class | |
720 | if (Res == 0 || (Log != 0 && Log->Update == true)) | |
721 | { | |
722 | tv.tv_usec = PulseIntervall; | |
723 | for (Worker *I = Workers; I != 0; I = I->NextAcquire) | |
724 | I->Pulse(); | |
725 | if (Log != 0 && Log->Pulse(this) == false) | |
726 | { | |
727 | WasCancelled = true; | |
728 | break; | |
729 | } | |
730 | } | |
731 | } | |
732 | ||
733 | if (Log != 0) | |
734 | Log->Stop(); | |
735 | ||
736 | // Shut down the acquire bits | |
737 | Running = false; | |
738 | for (Queue *I = Queues; I != 0; I = I->Next) | |
739 | I->Shutdown(false); | |
740 | ||
741 | // Shut down the items | |
742 | for (ItemIterator I = Items.begin(); I != Items.end(); ++I) | |
743 | (*I)->Finished(); | |
744 | ||
745 | bool const newError = _error->PendingError(); | |
746 | _error->MergeWithStack(); | |
747 | if (newError) | |
748 | return Failed; | |
749 | if (WasCancelled) | |
750 | return Cancelled; | |
751 | return Continue; | |
752 | } | |
753 | /*}}}*/ | |
754 | // Acquire::Bump - Called when an item is dequeued /*{{{*/ | |
755 | // --------------------------------------------------------------------- | |
756 | /* This routine bumps idle queues in hopes that they will be able to fetch | |
757 | the dequeued item */ | |
758 | void pkgAcquire::Bump() | |
759 | { | |
760 | for (Queue *I = Queues; I != 0; I = I->Next) | |
761 | I->Bump(); | |
762 | } | |
763 | /*}}}*/ | |
764 | // Acquire::WorkerStep - Step to the next worker /*{{{*/ | |
765 | // --------------------------------------------------------------------- | |
766 | /* Not inlined to advoid including acquire-worker.h */ | |
767 | pkgAcquire::Worker *pkgAcquire::WorkerStep(Worker *I) | |
768 | { | |
769 | return I->NextAcquire; | |
770 | } | |
771 | /*}}}*/ | |
772 | // Acquire::Clean - Cleans a directory /*{{{*/ | |
773 | // --------------------------------------------------------------------- | |
774 | /* This is a bit simplistic, it looks at every file in the dir and sees | |
775 | if it is part of the download set. */ | |
776 | bool pkgAcquire::Clean(string Dir) | |
777 | { | |
778 | // non-existing directories are by definition clean… | |
779 | if (DirectoryExists(Dir) == false) | |
780 | return true; | |
781 | ||
782 | if(Dir == "/") | |
783 | return _error->Error(_("Clean of %s is not supported"), Dir.c_str()); | |
784 | ||
785 | DIR *D = opendir(Dir.c_str()); | |
786 | if (D == 0) | |
787 | return _error->Errno("opendir",_("Unable to read %s"),Dir.c_str()); | |
788 | ||
789 | string StartDir = SafeGetCWD(); | |
790 | if (chdir(Dir.c_str()) != 0) | |
791 | { | |
792 | closedir(D); | |
793 | return _error->Errno("chdir",_("Unable to change to %s"),Dir.c_str()); | |
794 | } | |
795 | ||
796 | for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D)) | |
797 | { | |
798 | // Skip some files.. | |
799 | if (strcmp(Dir->d_name,"lock") == 0 || | |
800 | strcmp(Dir->d_name,"partial") == 0 || | |
801 | strcmp(Dir->d_name,"lost+found") == 0 || | |
802 | strcmp(Dir->d_name,".") == 0 || | |
803 | strcmp(Dir->d_name,"..") == 0) | |
804 | continue; | |
805 | ||
806 | // Look in the get list | |
807 | ItemCIterator I = Items.begin(); | |
808 | for (; I != Items.end(); ++I) | |
809 | if (flNotDir((*I)->DestFile) == Dir->d_name) | |
810 | break; | |
811 | ||
812 | // Nothing found, nuke it | |
813 | if (I == Items.end()) | |
814 | RemoveFile("Clean", Dir->d_name); | |
815 | }; | |
816 | ||
817 | closedir(D); | |
818 | if (chdir(StartDir.c_str()) != 0) | |
819 | return _error->Errno("chdir",_("Unable to change to %s"),StartDir.c_str()); | |
820 | return true; | |
821 | } | |
822 | /*}}}*/ | |
823 | // Acquire::TotalNeeded - Number of bytes to fetch /*{{{*/ | |
824 | // --------------------------------------------------------------------- | |
825 | /* This is the total number of bytes needed */ | |
826 | APT_PURE unsigned long long pkgAcquire::TotalNeeded() | |
827 | { | |
828 | return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu, | |
829 | [](unsigned long long const T, Item const * const I) { | |
830 | return T + I->FileSize; | |
831 | }); | |
832 | } | |
833 | /*}}}*/ | |
834 | // Acquire::FetchNeeded - Number of bytes needed to get /*{{{*/ | |
835 | // --------------------------------------------------------------------- | |
836 | /* This is the number of bytes that is not local */ | |
837 | APT_PURE unsigned long long pkgAcquire::FetchNeeded() | |
838 | { | |
839 | return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu, | |
840 | [](unsigned long long const T, Item const * const I) { | |
841 | if (I->Local == false) | |
842 | return T + I->FileSize; | |
843 | else | |
844 | return T; | |
845 | }); | |
846 | } | |
847 | /*}}}*/ | |
848 | // Acquire::PartialPresent - Number of partial bytes we already have /*{{{*/ | |
849 | // --------------------------------------------------------------------- | |
850 | /* This is the number of bytes that is not local */ | |
851 | APT_PURE unsigned long long pkgAcquire::PartialPresent() | |
852 | { | |
853 | return std::accumulate(ItemsBegin(), ItemsEnd(), 0llu, | |
854 | [](unsigned long long const T, Item const * const I) { | |
855 | if (I->Local == false) | |
856 | return T + I->PartialSize; | |
857 | else | |
858 | return T; | |
859 | }); | |
860 | } | |
861 | /*}}}*/ | |
862 | // Acquire::UriBegin - Start iterator for the uri list /*{{{*/ | |
863 | // --------------------------------------------------------------------- | |
864 | /* */ | |
865 | pkgAcquire::UriIterator pkgAcquire::UriBegin() | |
866 | { | |
867 | return UriIterator(Queues); | |
868 | } | |
869 | /*}}}*/ | |
870 | // Acquire::UriEnd - End iterator for the uri list /*{{{*/ | |
871 | // --------------------------------------------------------------------- | |
872 | /* */ | |
873 | pkgAcquire::UriIterator pkgAcquire::UriEnd() | |
874 | { | |
875 | return UriIterator(0); | |
876 | } | |
877 | /*}}}*/ | |
878 | // Acquire::MethodConfig::MethodConfig - Constructor /*{{{*/ | |
879 | // --------------------------------------------------------------------- | |
880 | /* */ | |
881 | pkgAcquire::MethodConfig::MethodConfig() : d(NULL), Next(0), SingleInstance(false), | |
882 | Pipeline(false), SendConfig(false), LocalOnly(false), NeedsCleanup(false), | |
883 | Removable(false) | |
884 | { | |
885 | } | |
886 | /*}}}*/ | |
887 | // Queue::Queue - Constructor /*{{{*/ | |
888 | // --------------------------------------------------------------------- | |
889 | /* */ | |
890 | pkgAcquire::Queue::Queue(string const &name,pkgAcquire * const owner) : d(NULL), Next(0), | |
891 | Name(name), Items(0), Workers(0), Owner(owner), PipeDepth(0), MaxPipeDepth(1) | |
892 | { | |
893 | } | |
894 | /*}}}*/ | |
895 | // Queue::~Queue - Destructor /*{{{*/ | |
896 | // --------------------------------------------------------------------- | |
897 | /* */ | |
898 | pkgAcquire::Queue::~Queue() | |
899 | { | |
900 | Shutdown(true); | |
901 | ||
902 | while (Items != 0) | |
903 | { | |
904 | QItem *Jnk = Items; | |
905 | Items = Items->Next; | |
906 | delete Jnk; | |
907 | } | |
908 | } | |
909 | /*}}}*/ | |
910 | // Queue::Enqueue - Queue an item to the queue /*{{{*/ | |
911 | // --------------------------------------------------------------------- | |
912 | /* */ | |
913 | bool pkgAcquire::Queue::Enqueue(ItemDesc &Item) | |
914 | { | |
915 | QItem **OptimalI = &Items; | |
916 | QItem **I = &Items; | |
917 | // move to the end of the queue and check for duplicates here | |
918 | for (; *I != 0; ) { | |
919 | if (Item.URI == (*I)->URI) | |
920 | { | |
921 | if (_config->FindB("Debug::pkgAcquire::Worker",false) == true) | |
922 | std::cerr << " @ Queue: Action combined for " << Item.URI << " and " << (*I)->URI << std::endl; | |
923 | (*I)->Owners.push_back(Item.Owner); | |
924 | Item.Owner->Status = (*I)->Owner->Status; | |
925 | return false; | |
926 | } | |
927 | // Determine the optimal position to insert: before anything with a | |
928 | // higher priority. | |
929 | int priority = (*I)->GetPriority(); | |
930 | ||
931 | I = &(*I)->Next; | |
932 | if (priority >= Item.Owner->Priority()) { | |
933 | OptimalI = I; | |
934 | } | |
935 | } | |
936 | ||
937 | ||
938 | // Create a new item | |
939 | QItem *Itm = new QItem; | |
940 | *Itm = Item; | |
941 | Itm->Next = *OptimalI; | |
942 | *OptimalI = Itm; | |
943 | ||
944 | Item.Owner->QueueCounter++; | |
945 | if (Items->Next == 0) | |
946 | Cycle(); | |
947 | return true; | |
948 | } | |
949 | /*}}}*/ | |
950 | // Queue::Dequeue - Remove an item from the queue /*{{{*/ | |
951 | // --------------------------------------------------------------------- | |
952 | /* We return true if we hit something */ | |
953 | bool pkgAcquire::Queue::Dequeue(Item *Owner) | |
954 | { | |
955 | if (Owner->Status == pkgAcquire::Item::StatFetching) | |
956 | return _error->Error("Tried to dequeue a fetching object"); | |
957 | ||
958 | bool Res = false; | |
959 | ||
960 | QItem **I = &Items; | |
961 | for (; *I != 0;) | |
962 | { | |
963 | if (Owner == (*I)->Owner) | |
964 | { | |
965 | QItem *Jnk= *I; | |
966 | *I = (*I)->Next; | |
967 | Owner->QueueCounter--; | |
968 | delete Jnk; | |
969 | Res = true; | |
970 | } | |
971 | else | |
972 | I = &(*I)->Next; | |
973 | } | |
974 | ||
975 | return Res; | |
976 | } | |
977 | /*}}}*/ | |
978 | // Queue::Startup - Start the worker processes /*{{{*/ | |
979 | // --------------------------------------------------------------------- | |
980 | /* It is possible for this to be called with a pre-existing set of | |
981 | workers. */ | |
982 | bool pkgAcquire::Queue::Startup() | |
983 | { | |
984 | if (Workers == 0) | |
985 | { | |
986 | URI U(Name); | |
987 | pkgAcquire::MethodConfig * const Cnf = Owner->GetConfig(U.Access); | |
988 | if (unlikely(Cnf == nullptr)) | |
989 | return false; | |
990 | ||
991 | // now-running twin of the pkgAcquire::Enqueue call | |
992 | for (QItem *I = Items; I != 0; ) | |
993 | { | |
994 | auto const INext = I->Next; | |
995 | for (auto &&O: I->Owners) | |
996 | CheckForBadItemAndFailIt(O, Cnf, Owner->Log); | |
997 | // if an item failed, it will be auto-dequeued invalidation our I here | |
998 | I = INext; | |
999 | } | |
1000 | ||
1001 | Workers = new Worker(this,Cnf,Owner->Log); | |
1002 | Owner->Add(Workers); | |
1003 | if (Workers->Start() == false) | |
1004 | return false; | |
1005 | ||
1006 | /* When pipelining we commit 10 items. This needs to change when we | |
1007 | added other source retry to have cycle maintain a pipeline depth | |
1008 | on its own. */ | |
1009 | if (Cnf->Pipeline == true) | |
1010 | MaxPipeDepth = _config->FindI("Acquire::Max-Pipeline-Depth",10); | |
1011 | else | |
1012 | MaxPipeDepth = 1; | |
1013 | } | |
1014 | ||
1015 | return Cycle(); | |
1016 | } | |
1017 | /*}}}*/ | |
1018 | // Queue::Shutdown - Shutdown the worker processes /*{{{*/ | |
1019 | // --------------------------------------------------------------------- | |
1020 | /* If final is true then all workers are eliminated, otherwise only workers | |
1021 | that do not need cleanup are removed */ | |
1022 | bool pkgAcquire::Queue::Shutdown(bool Final) | |
1023 | { | |
1024 | // Delete all of the workers | |
1025 | pkgAcquire::Worker **Cur = &Workers; | |
1026 | while (*Cur != 0) | |
1027 | { | |
1028 | pkgAcquire::Worker *Jnk = *Cur; | |
1029 | if (Final == true || Jnk->GetConf()->NeedsCleanup == false) | |
1030 | { | |
1031 | *Cur = Jnk->NextQueue; | |
1032 | Owner->Remove(Jnk); | |
1033 | delete Jnk; | |
1034 | } | |
1035 | else | |
1036 | Cur = &(*Cur)->NextQueue; | |
1037 | } | |
1038 | ||
1039 | return true; | |
1040 | } | |
1041 | /*}}}*/ | |
1042 | // Queue::FindItem - Find a URI in the item list /*{{{*/ | |
1043 | // --------------------------------------------------------------------- | |
1044 | /* */ | |
1045 | pkgAcquire::Queue::QItem *pkgAcquire::Queue::FindItem(string URI,pkgAcquire::Worker *Owner) | |
1046 | { | |
1047 | for (QItem *I = Items; I != 0; I = I->Next) | |
1048 | if (I->URI == URI && I->Worker == Owner) | |
1049 | return I; | |
1050 | return 0; | |
1051 | } | |
1052 | /*}}}*/ | |
1053 | // Queue::ItemDone - Item has been completed /*{{{*/ | |
1054 | // --------------------------------------------------------------------- | |
1055 | /* The worker signals this which causes the item to be removed from the | |
1056 | queue. If this is the last queue instance then it is removed from the | |
1057 | main queue too.*/ | |
1058 | bool pkgAcquire::Queue::ItemDone(QItem *Itm) | |
1059 | { | |
1060 | PipeDepth--; | |
1061 | for (QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O) | |
1062 | { | |
1063 | if ((*O)->Status == pkgAcquire::Item::StatFetching) | |
1064 | (*O)->Status = pkgAcquire::Item::StatDone; | |
1065 | } | |
1066 | ||
1067 | if (Itm->Owner->QueueCounter <= 1) | |
1068 | Owner->Dequeue(Itm->Owner); | |
1069 | else | |
1070 | { | |
1071 | Dequeue(Itm->Owner); | |
1072 | Owner->Bump(); | |
1073 | } | |
1074 | ||
1075 | return Cycle(); | |
1076 | } | |
1077 | /*}}}*/ | |
1078 | // Queue::Cycle - Queue new items into the method /*{{{*/ | |
1079 | // --------------------------------------------------------------------- | |
1080 | /* This locates a new idle item and sends it to the worker. If pipelining | |
1081 | is enabled then it keeps the pipe full. */ | |
1082 | bool pkgAcquire::Queue::Cycle() | |
1083 | { | |
1084 | if (Items == 0 || Workers == 0) | |
1085 | return true; | |
1086 | ||
1087 | if (PipeDepth < 0) | |
1088 | return _error->Error("Pipedepth failure"); | |
1089 | ||
1090 | // Look for a queable item | |
1091 | QItem *I = Items; | |
1092 | int ActivePriority = 0; | |
1093 | while (PipeDepth < (signed)MaxPipeDepth) | |
1094 | { | |
1095 | for (; I != 0; I = I->Next) { | |
1096 | if (I->Owner->Status == pkgAcquire::Item::StatFetching) | |
1097 | ActivePriority = std::max(ActivePriority, I->GetPriority()); | |
1098 | if (I->Owner->Status == pkgAcquire::Item::StatIdle) | |
1099 | break; | |
1100 | } | |
1101 | ||
1102 | // Nothing to do, queue is idle. | |
1103 | if (I == 0) | |
1104 | return true; | |
1105 | ||
1106 | // This item has a lower priority than stuff in the pipeline, pretend | |
1107 | // the queue is idle | |
1108 | if (I->GetPriority() < ActivePriority) | |
1109 | return true; | |
1110 | I->Worker = Workers; | |
1111 | for (auto const &O: I->Owners) | |
1112 | O->Status = pkgAcquire::Item::StatFetching; | |
1113 | PipeDepth++; | |
1114 | if (Workers->QueueItem(I) == false) | |
1115 | return false; | |
1116 | } | |
1117 | ||
1118 | return true; | |
1119 | } | |
1120 | /*}}}*/ | |
1121 | // Queue::Bump - Fetch any pending objects if we are idle /*{{{*/ | |
1122 | // --------------------------------------------------------------------- | |
1123 | /* This is called when an item in multiple queues is dequeued */ | |
1124 | void pkgAcquire::Queue::Bump() | |
1125 | { | |
1126 | Cycle(); | |
1127 | } | |
1128 | /*}}}*/ | |
1129 | HashStringList pkgAcquire::Queue::QItem::GetExpectedHashes() const /*{{{*/ | |
1130 | { | |
1131 | /* each Item can have multiple owners and each owner might have different | |
1132 | hashes, even if that is unlikely in practice and if so at least some | |
1133 | owners will later fail. There is one situation through which is not a | |
1134 | failure and still needs this handling: Two owners who expect the same | |
1135 | file, but one owner only knows the SHA1 while the other only knows SHA256. */ | |
1136 | HashStringList superhsl; | |
1137 | for (pkgAcquire::Queue::QItem::owner_iterator O = Owners.begin(); O != Owners.end(); ++O) | |
1138 | { | |
1139 | HashStringList const hsl = (*O)->GetExpectedHashes(); | |
1140 | if (hsl.usable() == false) | |
1141 | continue; | |
1142 | if (superhsl.usable() == false) | |
1143 | superhsl = hsl; | |
1144 | else | |
1145 | { | |
1146 | // we merge both lists - if we find disagreement send no hashes | |
1147 | HashStringList::const_iterator hs = hsl.begin(); | |
1148 | for (; hs != hsl.end(); ++hs) | |
1149 | if (superhsl.push_back(*hs) == false) | |
1150 | break; | |
1151 | if (hs != hsl.end()) | |
1152 | { | |
1153 | superhsl.clear(); | |
1154 | break; | |
1155 | } | |
1156 | } | |
1157 | } | |
1158 | return superhsl; | |
1159 | } | |
1160 | /*}}}*/ | |
1161 | APT_PURE unsigned long long pkgAcquire::Queue::QItem::GetMaximumSize() const /*{{{*/ | |
1162 | { | |
1163 | unsigned long long Maximum = std::numeric_limits<unsigned long long>::max(); | |
1164 | for (auto const &O: Owners) | |
1165 | { | |
1166 | if (O->FileSize == 0) | |
1167 | continue; | |
1168 | Maximum = std::min(Maximum, O->FileSize); | |
1169 | } | |
1170 | if (Maximum == std::numeric_limits<unsigned long long>::max()) | |
1171 | return 0; | |
1172 | return Maximum; | |
1173 | } | |
1174 | /*}}}*/ | |
1175 | APT_PURE int pkgAcquire::Queue::QItem::GetPriority() const /*{{{*/ | |
1176 | { | |
1177 | int Priority = 0; | |
1178 | for (auto const &O: Owners) | |
1179 | Priority = std::max(Priority, O->Priority()); | |
1180 | ||
1181 | return Priority; | |
1182 | } | |
1183 | /*}}}*/ | |
1184 | void pkgAcquire::Queue::QItem::SyncDestinationFiles() const /*{{{*/ | |
1185 | { | |
1186 | /* ensure that the first owner has the best partial file of all and | |
1187 | the rest have (potentially dangling) symlinks to it so that | |
1188 | everything (like progress reporting) finds it easily */ | |
1189 | std::string superfile = Owner->DestFile; | |
1190 | off_t supersize = 0; | |
1191 | for (pkgAcquire::Queue::QItem::owner_iterator O = Owners.begin(); O != Owners.end(); ++O) | |
1192 | { | |
1193 | if ((*O)->DestFile == superfile) | |
1194 | continue; | |
1195 | struct stat file; | |
1196 | if (lstat((*O)->DestFile.c_str(),&file) == 0) | |
1197 | { | |
1198 | if ((file.st_mode & S_IFREG) == 0) | |
1199 | RemoveFile("SyncDestinationFiles", (*O)->DestFile); | |
1200 | else if (supersize < file.st_size) | |
1201 | { | |
1202 | supersize = file.st_size; | |
1203 | RemoveFile("SyncDestinationFiles", superfile); | |
1204 | rename((*O)->DestFile.c_str(), superfile.c_str()); | |
1205 | } | |
1206 | else | |
1207 | RemoveFile("SyncDestinationFiles", (*O)->DestFile); | |
1208 | if (symlink(superfile.c_str(), (*O)->DestFile.c_str()) != 0) | |
1209 | { | |
1210 | ; // not a problem per-se and no real alternative | |
1211 | } | |
1212 | } | |
1213 | } | |
1214 | } | |
1215 | /*}}}*/ | |
1216 | std::string pkgAcquire::Queue::QItem::Custom600Headers() const /*{{{*/ | |
1217 | { | |
1218 | /* The others are relatively easy to merge, but this one? | |
1219 | Lets not merge and see how far we can run with it… | |
1220 | Likely, nobody will ever notice as all the items will | |
1221 | be of the same class and hence generate the same headers. */ | |
1222 | return Owner->Custom600Headers(); | |
1223 | } | |
1224 | /*}}}*/ | |
1225 | ||
1226 | // AcquireStatus::pkgAcquireStatus - Constructor /*{{{*/ | |
1227 | // --------------------------------------------------------------------- | |
1228 | /* */ | |
1229 | pkgAcquireStatus::pkgAcquireStatus() : d(NULL), Percent(-1), Update(true), MorePulses(false) | |
1230 | { | |
1231 | Start(); | |
1232 | } | |
1233 | /*}}}*/ | |
1234 | // AcquireStatus::Pulse - Called periodically /*{{{*/ | |
1235 | // --------------------------------------------------------------------- | |
1236 | /* This computes some internal state variables for the derived classes to | |
1237 | use. It generates the current downloaded bytes and total bytes to download | |
1238 | as well as the current CPS estimate. */ | |
1239 | bool pkgAcquireStatus::Pulse(pkgAcquire *Owner) | |
1240 | { | |
1241 | TotalBytes = 0; | |
1242 | CurrentBytes = 0; | |
1243 | TotalItems = 0; | |
1244 | CurrentItems = 0; | |
1245 | ||
1246 | // Compute the total number of bytes to fetch | |
1247 | unsigned int Unknown = 0; | |
1248 | unsigned int Count = 0; | |
1249 | bool ExpectAdditionalItems = false; | |
1250 | for (pkgAcquire::ItemCIterator I = Owner->ItemsBegin(); | |
1251 | I != Owner->ItemsEnd(); | |
1252 | ++I, ++Count) | |
1253 | { | |
1254 | TotalItems++; | |
1255 | if ((*I)->Status == pkgAcquire::Item::StatDone) | |
1256 | ++CurrentItems; | |
1257 | ||
1258 | // do we expect to acquire more files than we know of yet? | |
1259 | if ((*I)->ExpectedAdditionalItems > 0) | |
1260 | ExpectAdditionalItems = true; | |
1261 | ||
1262 | TotalBytes += (*I)->FileSize; | |
1263 | if ((*I)->Complete == true) | |
1264 | CurrentBytes += (*I)->FileSize; | |
1265 | if ((*I)->FileSize == 0 && (*I)->Complete == false) | |
1266 | ++Unknown; | |
1267 | } | |
1268 | ||
1269 | // Compute the current completion | |
1270 | unsigned long long ResumeSize = 0; | |
1271 | for (pkgAcquire::Worker *I = Owner->WorkersBegin(); I != 0; | |
1272 | I = Owner->WorkerStep(I)) | |
1273 | { | |
1274 | if (I->CurrentItem != 0 && I->CurrentItem->Owner->Complete == false) | |
1275 | { | |
1276 | CurrentBytes += I->CurrentSize; | |
1277 | ResumeSize += I->ResumePoint; | |
1278 | ||
1279 | // Files with unknown size always have 100% completion | |
1280 | if (I->CurrentItem->Owner->FileSize == 0 && | |
1281 | I->CurrentItem->Owner->Complete == false) | |
1282 | TotalBytes += I->CurrentSize; | |
1283 | } | |
1284 | } | |
1285 | ||
1286 | // Normalize the figures and account for unknown size downloads | |
1287 | if (TotalBytes <= 0) | |
1288 | TotalBytes = 1; | |
1289 | if (Unknown == Count) | |
1290 | TotalBytes = Unknown; | |
1291 | ||
1292 | // Wha?! Is not supposed to happen. | |
1293 | if (CurrentBytes > TotalBytes) | |
1294 | CurrentBytes = TotalBytes; | |
1295 | ||
1296 | // Compute the CPS | |
1297 | struct timeval NewTime; | |
1298 | gettimeofday(&NewTime,0); | |
1299 | if ((NewTime.tv_sec - Time.tv_sec == 6 && NewTime.tv_usec > Time.tv_usec) || | |
1300 | NewTime.tv_sec - Time.tv_sec > 6) | |
1301 | { | |
1302 | double Delta = NewTime.tv_sec - Time.tv_sec + | |
1303 | (NewTime.tv_usec - Time.tv_usec)/1000000.0; | |
1304 | ||
1305 | // Compute the CPS value | |
1306 | if (Delta < 0.01) | |
1307 | CurrentCPS = 0; | |
1308 | else | |
1309 | CurrentCPS = ((CurrentBytes - ResumeSize) - LastBytes)/Delta; | |
1310 | LastBytes = CurrentBytes - ResumeSize; | |
1311 | ElapsedTime = (unsigned long long)Delta; | |
1312 | Time = NewTime; | |
1313 | } | |
1314 | ||
1315 | double const OldPercent = Percent; | |
1316 | // calculate the percentage, if we have too little data assume 1% | |
1317 | if (ExpectAdditionalItems) | |
1318 | Percent = 0; | |
1319 | else | |
1320 | // use both files and bytes because bytes can be unreliable | |
1321 | Percent = (0.8 * (CurrentBytes/float(TotalBytes)*100.0) + | |
1322 | 0.2 * (CurrentItems/float(TotalItems)*100.0)); | |
1323 | ||
1324 | // debug | |
1325 | if (_config->FindB("Debug::acquire::progress", false) == true) | |
1326 | { | |
1327 | std::clog | |
1328 | << "[" | |
1329 | << std::setw(5) << std::setprecision(4) << std::showpoint << Percent | |
1330 | << "]" | |
1331 | << " Bytes: " | |
1332 | << SizeToStr(CurrentBytes) << " / " << SizeToStr(TotalBytes) | |
1333 | << " # Files: " | |
1334 | << CurrentItems << " / " << TotalItems | |
1335 | << std::endl; | |
1336 | } | |
1337 | ||
1338 | double const DiffPercent = Percent - OldPercent; | |
1339 | if (DiffPercent < 0.001 && _config->FindB("Acquire::Progress::Diffpercent", false) == true) | |
1340 | return true; | |
1341 | ||
1342 | int fd = _config->FindI("APT::Status-Fd",-1); | |
1343 | if(fd > 0) | |
1344 | { | |
1345 | ostringstream status; | |
1346 | ||
1347 | char msg[200]; | |
1348 | long i = CurrentItems < TotalItems ? CurrentItems + 1 : CurrentItems; | |
1349 | unsigned long long ETA = 0; | |
1350 | if(CurrentCPS > 0) | |
1351 | ETA = (TotalBytes - CurrentBytes) / CurrentCPS; | |
1352 | ||
1353 | // only show the ETA if it makes sense | |
1354 | if (ETA > 0 && ETA < 172800 /* two days */ ) | |
1355 | snprintf(msg,sizeof(msg), _("Retrieving file %li of %li (%s remaining)"), i, TotalItems, TimeToStr(ETA).c_str()); | |
1356 | else | |
1357 | snprintf(msg,sizeof(msg), _("Retrieving file %li of %li"), i, TotalItems); | |
1358 | ||
1359 | // build the status str | |
1360 | std::ostringstream str; | |
1361 | str.imbue(std::locale::classic()); | |
1362 | str.precision(4); | |
1363 | str << "dlstatus" << ':' << std::fixed << i << ':' << Percent << ':' << msg << '\n'; | |
1364 | auto const dlstatus = str.str(); | |
1365 | FileFd::Write(fd, dlstatus.data(), dlstatus.size()); | |
1366 | } | |
1367 | ||
1368 | return true; | |
1369 | } | |
1370 | /*}}}*/ | |
1371 | // AcquireStatus::Start - Called when the download is started /*{{{*/ | |
1372 | // --------------------------------------------------------------------- | |
1373 | /* We just reset the counters */ | |
1374 | void pkgAcquireStatus::Start() | |
1375 | { | |
1376 | gettimeofday(&Time,0); | |
1377 | gettimeofday(&StartTime,0); | |
1378 | LastBytes = 0; | |
1379 | CurrentCPS = 0; | |
1380 | CurrentBytes = 0; | |
1381 | TotalBytes = 0; | |
1382 | FetchedBytes = 0; | |
1383 | ElapsedTime = 0; | |
1384 | TotalItems = 0; | |
1385 | CurrentItems = 0; | |
1386 | } | |
1387 | /*}}}*/ | |
1388 | // AcquireStatus::Stop - Finished downloading /*{{{*/ | |
1389 | // --------------------------------------------------------------------- | |
1390 | /* This accurately computes the elapsed time and the total overall CPS. */ | |
1391 | void pkgAcquireStatus::Stop() | |
1392 | { | |
1393 | // Compute the CPS and elapsed time | |
1394 | struct timeval NewTime; | |
1395 | gettimeofday(&NewTime,0); | |
1396 | ||
1397 | double Delta = NewTime.tv_sec - StartTime.tv_sec + | |
1398 | (NewTime.tv_usec - StartTime.tv_usec)/1000000.0; | |
1399 | ||
1400 | // Compute the CPS value | |
1401 | if (Delta < 0.01) | |
1402 | CurrentCPS = 0; | |
1403 | else | |
1404 | CurrentCPS = FetchedBytes/Delta; | |
1405 | LastBytes = CurrentBytes; | |
1406 | ElapsedTime = (unsigned long long)Delta; | |
1407 | } | |
1408 | /*}}}*/ | |
1409 | // AcquireStatus::Fetched - Called when a byte set has been fetched /*{{{*/ | |
1410 | // --------------------------------------------------------------------- | |
1411 | /* This is used to get accurate final transfer rate reporting. */ | |
1412 | void pkgAcquireStatus::Fetched(unsigned long long Size,unsigned long long Resume) | |
1413 | { | |
1414 | FetchedBytes += Size - Resume; | |
1415 | } | |
1416 | /*}}}*/ | |
1417 | ||
1418 | pkgAcquire::UriIterator::UriIterator(pkgAcquire::Queue *Q) : d(NULL), CurQ(Q), CurItem(0) | |
1419 | { | |
1420 | while (CurItem == 0 && CurQ != 0) | |
1421 | { | |
1422 | CurItem = CurQ->Items; | |
1423 | CurQ = CurQ->Next; | |
1424 | } | |
1425 | } | |
1426 | ||
1427 | APT_CONST pkgAcquire::UriIterator::~UriIterator() {} | |
1428 | APT_CONST pkgAcquire::MethodConfig::~MethodConfig() {} | |
1429 | APT_CONST pkgAcquireStatus::~pkgAcquireStatus() {} |