]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_FREE_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | * File: kern/task.c | |
52 | * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub, | |
53 | * David Black | |
54 | * | |
55 | * Task management primitives implementation. | |
56 | */ | |
57 | /* | |
58 | * Copyright (c) 1993 The University of Utah and | |
59 | * the Computer Systems Laboratory (CSL). All rights reserved. | |
60 | * | |
61 | * Permission to use, copy, modify and distribute this software and its | |
62 | * documentation is hereby granted, provided that both the copyright | |
63 | * notice and this permission notice appear in all copies of the | |
64 | * software, derivative works or modified versions, and any portions | |
65 | * thereof, and that both notices appear in supporting documentation. | |
66 | * | |
67 | * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS | |
68 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF | |
69 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
70 | * | |
71 | * CSL requests users of this software to return to csl-dist@cs.utah.edu any | |
72 | * improvements that they make and grant CSL redistribution rights. | |
73 | * | |
74 | */ | |
75 | ||
76 | #include <mach_kdb.h> | |
77 | #include <mach_host.h> | |
78 | #include <mach_prof.h> | |
79 | #include <fast_tas.h> | |
80 | #include <task_swapper.h> | |
81 | #include <platforms.h> | |
82 | ||
83 | #include <mach/boolean.h> | |
84 | #include <mach/machine/vm_types.h> | |
85 | #include <mach/vm_param.h> | |
86 | #include <mach/semaphore.h> | |
87 | #include <mach/task_info.h> | |
88 | #include <mach/task_special_ports.h> | |
89 | #include <mach/mach_types.h> | |
90 | #include <mach/machine/rpc.h> | |
91 | #include <ipc/ipc_space.h> | |
92 | #include <ipc/ipc_entry.h> | |
93 | #include <kern/mach_param.h> | |
94 | #include <kern/misc_protos.h> | |
95 | #include <kern/task.h> | |
96 | #include <kern/thread.h> | |
97 | #include <kern/zalloc.h> | |
98 | #include <kern/kalloc.h> | |
99 | #include <kern/processor.h> | |
100 | #include <kern/sched_prim.h> /* for thread_wakeup */ | |
101 | #include <kern/sf.h> | |
102 | #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/ | |
103 | #include <kern/ipc_tt.h> | |
104 | #include <kern/ledger.h> | |
105 | #include <kern/host.h> | |
106 | #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */ | |
107 | #include <kern/profile.h> | |
108 | #include <kern/assert.h> | |
109 | #include <kern/sync_lock.h> | |
110 | #if MACH_KDB | |
111 | #include <ddb/db_sym.h> | |
112 | #endif /* MACH_KDB */ | |
113 | ||
114 | #if TASK_SWAPPER | |
115 | #include <kern/task_swap.h> | |
116 | #endif /* TASK_SWAPPER */ | |
117 | ||
118 | /* | |
119 | * Exported interfaces | |
120 | */ | |
121 | ||
122 | #include <mach/task_server.h> | |
123 | #include <mach/mach_host_server.h> | |
124 | #include <mach/host_security_server.h> | |
125 | ||
126 | task_t kernel_task; | |
127 | zone_t task_zone; | |
128 | ||
129 | /* Forwards */ | |
130 | ||
131 | void task_hold_locked( | |
132 | task_t task); | |
133 | void task_wait_locked( | |
134 | task_t task); | |
135 | void task_release_locked( | |
136 | task_t task); | |
137 | void task_collect_scan(void); | |
138 | void task_free( | |
139 | task_t task ); | |
140 | void task_synchronizer_destroy_all( | |
141 | task_t task); | |
142 | void task_subsystem_destroy_all( | |
143 | task_t task); | |
144 | ||
145 | kern_return_t task_set_ledger( | |
146 | task_t task, | |
147 | ledger_t wired, | |
148 | ledger_t paged); | |
149 | ||
150 | void | |
151 | task_init(void) | |
152 | { | |
153 | task_zone = zinit( | |
154 | sizeof(struct task), | |
155 | TASK_MAX * sizeof(struct task), | |
156 | TASK_CHUNK * sizeof(struct task), | |
157 | "tasks"); | |
158 | ||
159 | eml_init(); | |
160 | ||
161 | /* | |
162 | * Create the kernel task as the first task. | |
163 | * Task_create_local must assign to kernel_task as a side effect, | |
164 | * for other initialization. (:-() | |
165 | */ | |
166 | if (task_create_local( | |
167 | TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS) | |
168 | panic("task_init\n"); | |
169 | vm_map_deallocate(kernel_task->map); | |
170 | kernel_task->map = kernel_map; | |
171 | ||
172 | #if MACH_ASSERT | |
173 | if (watchacts & WA_TASK) | |
174 | printf("task_init: kernel_task = %x map=%x\n", | |
175 | kernel_task, kernel_map); | |
176 | #endif /* MACH_ASSERT */ | |
177 | } | |
178 | ||
179 | #if MACH_HOST | |
180 | void | |
181 | task_freeze( | |
182 | task_t task) | |
183 | { | |
184 | task_lock(task); | |
185 | /* | |
186 | * If may_assign is false, task is already being assigned, | |
187 | * wait for that to finish. | |
188 | */ | |
189 | while (task->may_assign == FALSE) { | |
190 | task->assign_active = TRUE; | |
191 | thread_sleep_mutex((event_t) &task->assign_active, | |
192 | &task->lock, THREAD_INTERRUPTIBLE); | |
193 | task_lock(task); | |
194 | } | |
195 | task->may_assign = FALSE; | |
196 | task_unlock(task); | |
197 | ||
198 | return; | |
199 | } | |
200 | ||
201 | void | |
202 | task_unfreeze( | |
203 | task_t task) | |
204 | { | |
205 | task_lock(task); | |
206 | assert(task->may_assign == FALSE); | |
207 | task->may_assign = TRUE; | |
208 | if (task->assign_active == TRUE) { | |
209 | task->assign_active = FALSE; | |
210 | thread_wakeup((event_t)&task->assign_active); | |
211 | } | |
212 | task_unlock(task); | |
213 | ||
214 | return; | |
215 | } | |
216 | #endif /* MACH_HOST */ | |
217 | ||
218 | /* | |
219 | * Create a task running in the kernel address space. It may | |
220 | * have its own map of size mem_size and may have ipc privileges. | |
221 | */ | |
222 | kern_return_t | |
223 | kernel_task_create( | |
224 | task_t parent_task, | |
225 | vm_offset_t map_base, | |
226 | vm_size_t map_size, | |
227 | task_t *child_task) | |
228 | { | |
229 | kern_return_t result; | |
230 | task_t new_task; | |
231 | vm_map_t old_map; | |
232 | ||
233 | /* | |
234 | * Create the task. | |
235 | */ | |
236 | result = task_create_local(parent_task, FALSE, TRUE, &new_task); | |
237 | if (result != KERN_SUCCESS) | |
238 | return (result); | |
239 | ||
240 | /* | |
241 | * Task_create_local creates the task with a user-space map. | |
242 | * We attempt to replace the map and free it afterwards; else | |
243 | * task_deallocate will free it (can NOT set map to null before | |
244 | * task_deallocate, this impersonates a norma placeholder task). | |
245 | * _Mark the memory as pageable_ -- this is what we | |
246 | * want for images (like servers) loaded into the kernel. | |
247 | */ | |
248 | if (map_size == 0) { | |
249 | vm_map_deallocate(new_task->map); | |
250 | new_task->map = kernel_map; | |
251 | *child_task = new_task; | |
252 | } else { | |
253 | old_map = new_task->map; | |
254 | if ((result = kmem_suballoc(kernel_map, &map_base, | |
255 | map_size, TRUE, FALSE, | |
256 | &new_task->map)) != KERN_SUCCESS) { | |
257 | /* | |
258 | * New task created with ref count of 2 -- decrement by | |
259 | * one to force task deletion. | |
260 | */ | |
261 | printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n", | |
262 | kernel_map, map_base, map_size); | |
263 | --new_task->ref_count; | |
264 | task_deallocate(new_task); | |
265 | return (result); | |
266 | } | |
267 | vm_map_deallocate(old_map); | |
268 | *child_task = new_task; | |
269 | } | |
270 | return (KERN_SUCCESS); | |
271 | } | |
272 | ||
273 | kern_return_t | |
274 | task_create( | |
275 | task_t parent_task, | |
276 | ledger_port_array_t ledger_ports, | |
277 | mach_msg_type_number_t num_ledger_ports, | |
278 | boolean_t inherit_memory, | |
279 | task_t *child_task) /* OUT */ | |
280 | { | |
281 | if (parent_task == TASK_NULL) | |
282 | return(KERN_INVALID_ARGUMENT); | |
283 | ||
284 | return task_create_local( | |
285 | parent_task, inherit_memory, FALSE, child_task); | |
286 | } | |
287 | ||
288 | kern_return_t | |
289 | host_security_create_task_token( | |
290 | host_security_t host_security, | |
291 | task_t parent_task, | |
292 | security_token_t sec_token, | |
293 | host_priv_t host_priv, | |
294 | ledger_port_array_t ledger_ports, | |
295 | mach_msg_type_number_t num_ledger_ports, | |
296 | boolean_t inherit_memory, | |
297 | task_t *child_task) /* OUT */ | |
298 | { | |
299 | kern_return_t result; | |
300 | ||
301 | if (parent_task == TASK_NULL) | |
302 | return(KERN_INVALID_ARGUMENT); | |
303 | ||
304 | if (host_security == HOST_NULL) | |
305 | return(KERN_INVALID_SECURITY); | |
306 | ||
307 | result = task_create_local( | |
308 | parent_task, inherit_memory, FALSE, child_task); | |
309 | ||
310 | if (result != KERN_SUCCESS) | |
311 | return(result); | |
312 | ||
313 | result = host_security_set_task_token(host_security, | |
314 | *child_task, | |
315 | sec_token, | |
316 | host_priv); | |
317 | ||
318 | if (result != KERN_SUCCESS) | |
319 | return(result); | |
320 | ||
321 | return(result); | |
322 | } | |
323 | ||
324 | kern_return_t | |
325 | task_create_local( | |
326 | task_t parent_task, | |
327 | boolean_t inherit_memory, | |
328 | boolean_t kernel_loaded, | |
329 | task_t *child_task) /* OUT */ | |
330 | { | |
331 | task_t new_task; | |
332 | processor_set_t pset; | |
333 | ||
334 | new_task = (task_t) zalloc(task_zone); | |
335 | ||
336 | if (new_task == TASK_NULL) | |
337 | return(KERN_RESOURCE_SHORTAGE); | |
338 | ||
339 | /* one ref for just being alive; one for our caller */ | |
340 | new_task->ref_count = 2; | |
341 | ||
342 | if (inherit_memory) | |
343 | new_task->map = vm_map_fork(parent_task->map); | |
344 | else | |
345 | new_task->map = vm_map_create(pmap_create(0), | |
346 | round_page(VM_MIN_ADDRESS), | |
347 | trunc_page(VM_MAX_ADDRESS), TRUE); | |
348 | ||
349 | mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW); | |
350 | queue_init(&new_task->subsystem_list); | |
351 | queue_init(&new_task->thr_acts); | |
352 | new_task->suspend_count = 0; | |
353 | new_task->thr_act_count = 0; | |
354 | new_task->res_act_count = 0; | |
355 | new_task->active_act_count = 0; | |
356 | new_task->user_stop_count = 0; | |
357 | new_task->importance = 0; | |
358 | new_task->active = TRUE; | |
359 | new_task->kernel_loaded = kernel_loaded; | |
360 | new_task->user_data = 0; | |
361 | new_task->faults = 0; | |
362 | new_task->cow_faults = 0; | |
363 | new_task->pageins = 0; | |
364 | new_task->messages_sent = 0; | |
365 | new_task->messages_received = 0; | |
366 | new_task->syscalls_mach = 0; | |
367 | new_task->syscalls_unix=0; | |
368 | new_task->csw=0; | |
369 | ||
370 | #ifdef MACH_BSD | |
371 | new_task->bsd_info = 0; | |
372 | #endif /* MACH_BSD */ | |
373 | ||
374 | #if TASK_SWAPPER | |
375 | new_task->swap_state = TASK_SW_IN; | |
376 | new_task->swap_flags = 0; | |
377 | new_task->swap_ast_waiting = 0; | |
378 | new_task->swap_stamp = sched_tick; | |
379 | new_task->swap_rss = 0; | |
380 | new_task->swap_nswap = 0; | |
381 | #endif /* TASK_SWAPPER */ | |
382 | ||
383 | queue_init(&new_task->semaphore_list); | |
384 | queue_init(&new_task->lock_set_list); | |
385 | new_task->semaphores_owned = 0; | |
386 | new_task->lock_sets_owned = 0; | |
387 | ||
388 | #if MACH_HOST | |
389 | new_task->may_assign = TRUE; | |
390 | new_task->assign_active = FALSE; | |
391 | #endif /* MACH_HOST */ | |
392 | eml_task_reference(new_task, parent_task); | |
393 | ||
394 | ipc_task_init(new_task, parent_task); | |
395 | ||
396 | new_task->total_user_time.seconds = 0; | |
397 | new_task->total_user_time.microseconds = 0; | |
398 | new_task->total_system_time.seconds = 0; | |
399 | new_task->total_system_time.microseconds = 0; | |
400 | ||
401 | task_prof_init(new_task); | |
402 | ||
403 | if (parent_task != TASK_NULL) { | |
404 | #if MACH_HOST | |
405 | /* | |
406 | * Freeze the parent, so that parent_task->processor_set | |
407 | * cannot change. | |
408 | */ | |
409 | task_freeze(parent_task); | |
410 | #endif /* MACH_HOST */ | |
411 | pset = parent_task->processor_set; | |
412 | if (!pset->active) | |
413 | pset = &default_pset; | |
414 | ||
415 | new_task->policy = parent_task->policy; | |
416 | ||
417 | new_task->priority = parent_task->priority; | |
418 | new_task->max_priority = parent_task->max_priority; | |
419 | ||
420 | new_task->sec_token = parent_task->sec_token; | |
421 | ||
422 | shared_region_mapping_ref(parent_task->system_shared_region); | |
423 | new_task->system_shared_region = parent_task->system_shared_region; | |
424 | ||
425 | new_task->wired_ledger_port = ledger_copy( | |
426 | convert_port_to_ledger(parent_task->wired_ledger_port)); | |
427 | new_task->paged_ledger_port = ledger_copy( | |
428 | convert_port_to_ledger(parent_task->paged_ledger_port)); | |
429 | } | |
430 | else { | |
431 | pset = &default_pset; | |
432 | ||
433 | if (kernel_task == TASK_NULL) { | |
434 | new_task->policy = POLICY_RR; | |
435 | ||
436 | new_task->priority = MINPRI_KERNBAND; | |
437 | new_task->max_priority = MAXPRI_KERNBAND; | |
438 | } | |
439 | else { | |
440 | new_task->policy = POLICY_TIMESHARE; | |
441 | ||
442 | new_task->priority = BASEPRI_DEFAULT; | |
443 | new_task->max_priority = MAXPRI_HIGHBAND; | |
444 | } | |
445 | ||
446 | new_task->sec_token = KERNEL_SECURITY_TOKEN; | |
447 | new_task->wired_ledger_port = ledger_copy(root_wired_ledger); | |
448 | new_task->paged_ledger_port = ledger_copy(root_paged_ledger); | |
449 | } | |
450 | ||
451 | pset_lock(pset); | |
452 | pset_add_task(pset, new_task); | |
453 | pset_unlock(pset); | |
454 | #if MACH_HOST | |
455 | if (parent_task != TASK_NULL) | |
456 | task_unfreeze(parent_task); | |
457 | #endif /* MACH_HOST */ | |
458 | ||
459 | #if FAST_TAS | |
460 | if (inherit_memory) { | |
461 | new_task->fast_tas_base = parent_task->fast_tas_base; | |
462 | new_task->fast_tas_end = parent_task->fast_tas_end; | |
463 | } else { | |
464 | new_task->fast_tas_base = (vm_offset_t)0; | |
465 | new_task->fast_tas_end = (vm_offset_t)0; | |
466 | } | |
467 | #endif /* FAST_TAS */ | |
468 | ||
469 | ipc_task_enable(new_task); | |
470 | ||
471 | #if TASK_SWAPPER | |
472 | task_swapout_eligible(new_task); | |
473 | #endif /* TASK_SWAPPER */ | |
474 | ||
475 | #if MACH_ASSERT | |
476 | if (watchacts & WA_TASK) | |
477 | printf("*** task_create_local(par=%x inh=%x) == 0x%x\n", | |
478 | parent_task, inherit_memory, new_task); | |
479 | #endif /* MACH_ASSERT */ | |
480 | ||
481 | *child_task = new_task; | |
482 | return(KERN_SUCCESS); | |
483 | } | |
484 | ||
485 | /* | |
486 | * task_free: | |
487 | * | |
488 | * Called by task_deallocate when the task's reference count drops to zero. | |
489 | * Task is locked. | |
490 | */ | |
491 | void | |
492 | task_free( | |
493 | task_t task) | |
494 | { | |
495 | processor_set_t pset; | |
496 | ||
497 | #if MACH_ASSERT | |
498 | assert(task != 0); | |
499 | if (watchacts & (WA_EXIT|WA_TASK)) | |
500 | printf("task_free(%x(%d)) map ref %d\n", task, task->ref_count, | |
501 | task->map->ref_count); | |
502 | #endif /* MACH_ASSERT */ | |
503 | ||
504 | #if TASK_SWAPPER | |
505 | /* task_terminate guarantees that this task is off the list */ | |
506 | assert((task->swap_state & TASK_SW_ELIGIBLE) == 0); | |
507 | #endif /* TASK_SWAPPER */ | |
508 | ||
509 | eml_task_deallocate(task); | |
510 | ||
511 | /* | |
512 | * Temporarily restore the reference we dropped above, then | |
513 | * freeze the task so that the task->processor_set field | |
514 | * cannot change. In the !MACH_HOST case, the logic can be | |
515 | * simplified, since the default_pset is the only pset. | |
516 | */ | |
517 | ++task->ref_count; | |
518 | task_unlock(task); | |
519 | #if MACH_HOST | |
520 | task_freeze(task); | |
521 | #endif /* MACH_HOST */ | |
522 | ||
523 | pset = task->processor_set; | |
524 | pset_lock(pset); | |
525 | task_lock(task); | |
526 | if (--task->ref_count > 0) { | |
527 | /* | |
528 | * A new reference appeared (probably from the pset). | |
529 | * Back out. Must unfreeze inline since we'already | |
530 | * dropped our reference. | |
531 | */ | |
532 | #if MACH_HOST | |
533 | assert(task->may_assign == FALSE); | |
534 | task->may_assign = TRUE; | |
535 | if (task->assign_active == TRUE) { | |
536 | task->assign_active = FALSE; | |
537 | thread_wakeup((event_t)&task->assign_active); | |
538 | } | |
539 | #endif /* MACH_HOST */ | |
540 | task_unlock(task); | |
541 | pset_unlock(pset); | |
542 | return; | |
543 | } | |
544 | pset_remove_task(pset,task); | |
545 | task_unlock(task); | |
546 | pset_unlock(pset); | |
547 | pset_deallocate(pset); | |
548 | ||
549 | ipc_task_terminate(task); | |
550 | shared_region_mapping_dealloc(task->system_shared_region); | |
551 | ||
552 | if (task->kernel_loaded) | |
553 | vm_map_remove(kernel_map, task->map->min_offset, | |
554 | task->map->max_offset, VM_MAP_NO_FLAGS); | |
555 | vm_map_deallocate(task->map); | |
556 | is_release(task->itk_space); | |
557 | task_prof_deallocate(task); | |
558 | zfree(task_zone, (vm_offset_t) task); | |
559 | } | |
560 | ||
561 | void | |
562 | task_deallocate( | |
563 | task_t task) | |
564 | { | |
565 | if (task != TASK_NULL) { | |
566 | int c; | |
567 | ||
568 | task_lock(task); | |
569 | c = --task->ref_count; | |
570 | if (c == 0) | |
571 | task_free(task); /* unlocks task */ | |
572 | else | |
573 | task_unlock(task); | |
574 | } | |
575 | } | |
576 | ||
577 | void | |
578 | task_reference( | |
579 | task_t task) | |
580 | { | |
581 | if (task != TASK_NULL) { | |
582 | task_lock(task); | |
583 | task->ref_count++; | |
584 | task_unlock(task); | |
585 | } | |
586 | } | |
587 | ||
588 | boolean_t | |
589 | task_reference_try( | |
590 | task_t task) | |
591 | { | |
592 | if (task != TASK_NULL) { | |
593 | if (task_lock_try(task)) { | |
594 | task->ref_count++; | |
595 | task_unlock(task); | |
596 | return TRUE; | |
597 | } | |
598 | } | |
599 | return FALSE; | |
600 | } | |
601 | ||
602 | /* | |
603 | * task_terminate: | |
604 | * | |
605 | * Terminate the specified task. See comments on thread_terminate | |
606 | * (kern/thread.c) about problems with terminating the "current task." | |
607 | */ | |
608 | ||
609 | kern_return_t | |
610 | task_terminate( | |
611 | task_t task) | |
612 | { | |
613 | if (task == TASK_NULL) | |
614 | return(KERN_INVALID_ARGUMENT); | |
615 | if (task->bsd_info) | |
616 | return(KERN_FAILURE); | |
617 | return (task_terminate_internal(task)); | |
618 | } | |
619 | ||
620 | kern_return_t | |
621 | task_terminate_internal( | |
622 | task_t task) | |
623 | { | |
624 | thread_act_t thr_act, cur_thr_act; | |
625 | task_t cur_task; | |
626 | ||
627 | assert(task != kernel_task); | |
628 | ||
629 | cur_thr_act = current_act(); | |
630 | cur_task = cur_thr_act->task; | |
631 | ||
632 | #if TASK_SWAPPER | |
633 | /* | |
634 | * If task is not resident (swapped out, or being swapped | |
635 | * out), we want to bring it back in (this can block). | |
636 | * NOTE: The only way that this can happen in the current | |
637 | * system is if the task is swapped while it has a thread | |
638 | * in exit(), and the thread does not hit a clean point | |
639 | * to swap itself before getting here. | |
640 | * Terminating other tasks is another way to this code, but | |
641 | * it is not yet fully supported. | |
642 | * The task_swapin is unconditional. It used to be done | |
643 | * only if the task is not resident. Swapping in a | |
644 | * resident task will prevent it from being swapped out | |
645 | * while it terminates. | |
646 | */ | |
647 | task_swapin(task, TRUE); /* TRUE means make it unswappable */ | |
648 | #endif /* TASK_SWAPPER */ | |
649 | ||
650 | /* | |
651 | * Get the task locked and make sure that we are not racing | |
652 | * with someone else trying to terminate us. | |
653 | */ | |
654 | if (task == cur_task) { | |
655 | task_lock(task); | |
656 | } else if (task < cur_task) { | |
657 | task_lock(task); | |
658 | task_lock(cur_task); | |
659 | } else { | |
660 | task_lock(cur_task); | |
661 | task_lock(task); | |
662 | } | |
663 | ||
664 | if (!task->active || !cur_thr_act->active) { | |
665 | /* | |
666 | * Task or current act is already being terminated. | |
667 | * Just return an error. If we are dying, this will | |
668 | * just get us to our AST special handler and that | |
669 | * will get us to finalize the termination of ourselves. | |
670 | */ | |
671 | task_unlock(task); | |
672 | if (cur_task != task) | |
673 | task_unlock(cur_task); | |
674 | return(KERN_FAILURE); | |
675 | } | |
676 | if (cur_task != task) | |
677 | task_unlock(cur_task); | |
678 | ||
679 | /* | |
680 | * Indicate that we want all the threads to stop executing | |
681 | * at user space by holding the task (we would have held | |
682 | * each thread independently in thread_terminate_internal - | |
683 | * but this way we may be more likely to already find it | |
684 | * held there). Mark the task inactive, and prevent | |
685 | * further task operations via the task port. | |
686 | */ | |
687 | task_hold_locked(task); | |
688 | task->active = FALSE; | |
689 | ipc_task_disable(task); | |
690 | ||
691 | /* | |
692 | * Terminate each activation in the task. | |
693 | * | |
694 | * Each terminated activation will run it's special handler | |
695 | * when its current kernel context is unwound. That will | |
696 | * clean up most of the thread resources. Then it will be | |
697 | * handed over to the reaper, who will finally remove the | |
698 | * thread from the task list and free the structures. | |
699 | * | |
700 | * We can't terminate the current activation yet, because | |
701 | * it has to wait for the others in an interruptible state. | |
702 | * We may also block interruptibly during the rest of the | |
703 | * cleanup. Wait until the very last to terminate ourself. | |
704 | * | |
705 | * But if we have virtual machine state, we need to clean | |
706 | * that up now, because it may be holding wirings the task's | |
707 | * map that would get stuck in the vm_map_remove() below. | |
708 | */ | |
709 | queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { | |
710 | if (thr_act != cur_thr_act) | |
711 | thread_terminate_internal(thr_act); | |
712 | else | |
713 | act_virtual_machine_destroy(thr_act); | |
714 | } | |
715 | task_unlock(task); | |
716 | ||
717 | /* | |
718 | * Destroy all synchronizers owned by the task. | |
719 | */ | |
720 | task_synchronizer_destroy_all(task); | |
721 | ||
722 | /* | |
723 | * Deallocate all subsystems owned by the task. | |
724 | */ | |
725 | task_subsystem_destroy_all(task); | |
726 | ||
727 | /* | |
728 | * Destroy the IPC space, leaving just a reference for it. | |
729 | */ | |
730 | if (!task->kernel_loaded) | |
731 | ipc_space_destroy(task->itk_space); | |
732 | ||
733 | /* | |
734 | * If the current thread is a member of the task | |
735 | * being terminated, then the last reference to | |
736 | * the task will not be dropped until the thread | |
737 | * is finally reaped. To avoid incurring the | |
738 | * expense of removing the address space regions | |
739 | * at reap time, we do it explictly here. | |
740 | */ | |
741 | (void) vm_map_remove(task->map, | |
742 | task->map->min_offset, | |
743 | task->map->max_offset, VM_MAP_NO_FLAGS); | |
744 | ||
745 | /* | |
746 | * Finally, mark ourself for termination and then | |
747 | * deallocate the task's reference to itself. | |
748 | */ | |
749 | if (task == cur_task) | |
750 | thread_terminate(cur_thr_act); | |
751 | task_deallocate(task); | |
752 | ||
753 | return(KERN_SUCCESS); | |
754 | } | |
755 | ||
756 | /* | |
757 | * task_halt - Shut the current task down (except for the current thread) in | |
758 | * preparation for dramatic changes to the task (probably exec). | |
759 | * We hold the task, terminate all other threads in the task and | |
760 | * wait for them to terminate, clean up the portspace, and when | |
761 | * all done, let the current thread go. | |
762 | */ | |
763 | kern_return_t | |
764 | task_halt( | |
765 | task_t task) | |
766 | { | |
767 | thread_act_t thr_act, cur_thr_act; | |
768 | task_t cur_task; | |
769 | ||
770 | assert(task != kernel_task); | |
771 | ||
772 | cur_thr_act = current_act(); | |
773 | cur_task = cur_thr_act->task; | |
774 | ||
775 | if (task != cur_task) { | |
776 | return(KERN_INVALID_ARGUMENT); | |
777 | } | |
778 | ||
779 | #if TASK_SWAPPER | |
780 | /* | |
781 | * If task is not resident (swapped out, or being swapped | |
782 | * out), we want to bring it back in and make it unswappable. | |
783 | * This can block, so do it early. | |
784 | */ | |
785 | task_swapin(task, TRUE); /* TRUE means make it unswappable */ | |
786 | #endif /* TASK_SWAPPER */ | |
787 | ||
788 | task_lock(task); | |
789 | ||
790 | if (!task->active || !cur_thr_act->active) { | |
791 | /* | |
792 | * Task or current thread is already being terminated. | |
793 | * Hurry up and return out of the current kernel context | |
794 | * so that we run our AST special handler to terminate | |
795 | * ourselves. | |
796 | */ | |
797 | task_unlock(task); | |
798 | return(KERN_FAILURE); | |
799 | } | |
800 | ||
801 | if (task->thr_act_count > 1) { | |
802 | /* | |
803 | * Mark all the threads to keep them from starting any more | |
804 | * user-level execution. The thread_terminate_internal code | |
805 | * would do this on a thread by thread basis anyway, but this | |
806 | * gives us a better chance of not having to wait there. | |
807 | */ | |
808 | task_hold_locked(task); | |
809 | ||
810 | /* | |
811 | * Terminate all the other activations in the task. | |
812 | * | |
813 | * Each terminated activation will run it's special handler | |
814 | * when its current kernel context is unwound. That will | |
815 | * clean up most of the thread resources. Then it will be | |
816 | * handed over to the reaper, who will finally remove the | |
817 | * thread from the task list and free the structures. | |
818 | * | |
819 | * If the current thread has any virtual machine state | |
820 | * associated with it, clean that up now before we try | |
821 | * to clean up the task VM and port spaces. | |
822 | */ | |
823 | queue_iterate(&task->thr_acts, thr_act, thread_act_t,thr_acts) { | |
824 | if (thr_act != cur_thr_act) | |
825 | thread_terminate_internal(thr_act); | |
826 | else | |
827 | act_virtual_machine_destroy(thr_act); | |
828 | } | |
829 | task_release_locked(task); | |
830 | } | |
831 | task_unlock(task); | |
832 | ||
833 | /* | |
834 | * Destroy all synchronizers owned by the task. | |
835 | */ | |
836 | task_synchronizer_destroy_all(task); | |
837 | ||
838 | /* | |
839 | * Deallocate all subsystems owned by the task. | |
840 | */ | |
841 | task_subsystem_destroy_all(task); | |
842 | ||
843 | /* | |
844 | * Destroy the IPC space, leaving just a reference for it. | |
845 | */ | |
846 | #if 0 | |
847 | if (!task->kernel_loaded) | |
848 | ipc_space_clean(task->itk_space); | |
849 | #endif | |
850 | ||
851 | /* | |
852 | * Clean out the address space, as we are going to be | |
853 | * getting a new one. | |
854 | */ | |
855 | (void) vm_map_remove(task->map, | |
856 | task->map->min_offset, | |
857 | task->map->max_offset, VM_MAP_NO_FLAGS); | |
858 | ||
859 | return KERN_SUCCESS; | |
860 | } | |
861 | ||
862 | /* | |
863 | * task_hold_locked: | |
864 | * | |
865 | * Suspend execution of the specified task. | |
866 | * This is a recursive-style suspension of the task, a count of | |
867 | * suspends is maintained. | |
868 | * | |
869 | * CONDITIONS: the task is locked and active. | |
870 | */ | |
871 | void | |
872 | task_hold_locked( | |
873 | register task_t task) | |
874 | { | |
875 | register thread_act_t thr_act; | |
876 | ||
877 | assert(task->active); | |
878 | ||
879 | task->suspend_count++; | |
880 | ||
881 | /* | |
882 | * Iterate through all the thread_act's and hold them. | |
883 | */ | |
884 | queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { | |
885 | act_lock_thread(thr_act); | |
886 | thread_hold(thr_act); | |
887 | act_unlock_thread(thr_act); | |
888 | } | |
889 | } | |
890 | ||
891 | /* | |
892 | * task_hold: | |
893 | * | |
894 | * Same as the internal routine above, except that is must lock | |
895 | * and verify that the task is active. This differs from task_suspend | |
896 | * in that it places a kernel hold on the task rather than just a | |
897 | * user-level hold. This keeps users from over resuming and setting | |
898 | * it running out from under the kernel. | |
899 | * | |
900 | * CONDITIONS: the caller holds a reference on the task | |
901 | */ | |
902 | kern_return_t | |
903 | task_hold(task_t task) | |
904 | { | |
905 | kern_return_t kret; | |
906 | ||
907 | if (task == TASK_NULL) | |
908 | return (KERN_INVALID_ARGUMENT); | |
909 | task_lock(task); | |
910 | if (!task->active) { | |
911 | task_unlock(task); | |
912 | return (KERN_FAILURE); | |
913 | } | |
914 | task_hold_locked(task); | |
915 | task_unlock(task); | |
916 | ||
917 | return(KERN_SUCCESS); | |
918 | } | |
919 | ||
920 | /* | |
921 | * Routine: task_wait_locked | |
922 | * Wait for all threads in task to stop. | |
923 | * | |
924 | * Conditions: | |
925 | * Called with task locked, active, and held. | |
926 | */ | |
927 | void | |
928 | task_wait_locked( | |
929 | register task_t task) | |
930 | { | |
931 | register thread_act_t thr_act, cur_thr_act; | |
932 | ||
933 | assert(task->active); | |
934 | assert(task->suspend_count > 0); | |
935 | ||
936 | cur_thr_act = current_act(); | |
937 | /* | |
938 | * Iterate through all the thread's and wait for them to | |
939 | * stop. Do not wait for the current thread if it is within | |
940 | * the task. | |
941 | */ | |
942 | queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { | |
943 | if (thr_act != cur_thr_act) { | |
944 | thread_shuttle_t thr_shuttle; | |
945 | ||
946 | thr_shuttle = act_lock_thread(thr_act); | |
947 | thread_wait(thr_shuttle); | |
948 | act_unlock_thread(thr_act); | |
949 | } | |
950 | } | |
951 | } | |
952 | ||
953 | /* | |
954 | * task_release_locked: | |
955 | * | |
956 | * Release a kernel hold on a task. | |
957 | * | |
958 | * CONDITIONS: the task is locked and active | |
959 | */ | |
960 | void | |
961 | task_release_locked( | |
962 | register task_t task) | |
963 | { | |
964 | register thread_act_t thr_act; | |
965 | ||
966 | assert(task->active); | |
967 | ||
968 | task->suspend_count--; | |
969 | assert(task->suspend_count >= 0); | |
970 | ||
971 | /* | |
972 | * Iterate through all the thread_act's and hold them. | |
973 | * Do not hold the current thread_act if it is within the | |
974 | * task. | |
975 | */ | |
976 | queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { | |
977 | act_lock_thread(thr_act); | |
978 | thread_release(thr_act); | |
979 | act_unlock_thread(thr_act); | |
980 | } | |
981 | } | |
982 | ||
983 | /* | |
984 | * task_release: | |
985 | * | |
986 | * Same as the internal routine above, except that it must lock | |
987 | * and verify that the task is active. | |
988 | * | |
989 | * CONDITIONS: The caller holds a reference to the task | |
990 | */ | |
991 | kern_return_t | |
992 | task_release(task_t task) | |
993 | { | |
994 | kern_return_t kret; | |
995 | ||
996 | if (task == TASK_NULL) | |
997 | return (KERN_INVALID_ARGUMENT); | |
998 | task_lock(task); | |
999 | if (!task->active) { | |
1000 | task_unlock(task); | |
1001 | return (KERN_FAILURE); | |
1002 | } | |
1003 | task_release_locked(task); | |
1004 | task_unlock(task); | |
1005 | ||
1006 | return(KERN_SUCCESS); | |
1007 | } | |
1008 | ||
1009 | kern_return_t | |
1010 | task_threads( | |
1011 | task_t task, | |
1012 | thread_act_array_t *thr_act_list, | |
1013 | mach_msg_type_number_t *count) | |
1014 | { | |
1015 | unsigned int actual; /* this many thr_acts */ | |
1016 | thread_act_t thr_act; | |
1017 | thread_act_t *thr_acts; | |
1018 | thread_t thread; | |
1019 | int i, j; | |
1020 | ||
1021 | vm_size_t size, size_needed; | |
1022 | vm_offset_t addr; | |
1023 | ||
1024 | if (task == TASK_NULL) | |
1025 | return KERN_INVALID_ARGUMENT; | |
1026 | ||
1027 | size = 0; addr = 0; | |
1028 | ||
1029 | for (;;) { | |
1030 | task_lock(task); | |
1031 | if (!task->active) { | |
1032 | task_unlock(task); | |
1033 | if (size != 0) | |
1034 | kfree(addr, size); | |
1035 | return KERN_FAILURE; | |
1036 | } | |
1037 | ||
1038 | actual = task->thr_act_count; | |
1039 | ||
1040 | /* do we have the memory we need? */ | |
1041 | size_needed = actual * sizeof(mach_port_t); | |
1042 | if (size_needed <= size) | |
1043 | break; | |
1044 | ||
1045 | /* unlock the task and allocate more memory */ | |
1046 | task_unlock(task); | |
1047 | ||
1048 | if (size != 0) | |
1049 | kfree(addr, size); | |
1050 | ||
1051 | assert(size_needed > 0); | |
1052 | size = size_needed; | |
1053 | ||
1054 | addr = kalloc(size); | |
1055 | if (addr == 0) | |
1056 | return KERN_RESOURCE_SHORTAGE; | |
1057 | } | |
1058 | ||
1059 | /* OK, have memory and the task is locked & active */ | |
1060 | thr_acts = (thread_act_t *) addr; | |
1061 | ||
1062 | for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->thr_acts); | |
1063 | i < actual; | |
1064 | i++, thr_act = (thread_act_t) queue_next(&thr_act->thr_acts)) { | |
1065 | act_lock(thr_act); | |
1066 | if (thr_act->ref_count > 0) { | |
1067 | act_locked_act_reference(thr_act); | |
1068 | thr_acts[j++] = thr_act; | |
1069 | } | |
1070 | act_unlock(thr_act); | |
1071 | } | |
1072 | assert(queue_end(&task->thr_acts, (queue_entry_t) thr_act)); | |
1073 | ||
1074 | actual = j; | |
1075 | size_needed = actual * sizeof(mach_port_t); | |
1076 | ||
1077 | /* can unlock task now that we've got the thr_act refs */ | |
1078 | task_unlock(task); | |
1079 | ||
1080 | if (actual == 0) { | |
1081 | /* no thr_acts, so return null pointer and deallocate memory */ | |
1082 | ||
1083 | *thr_act_list = 0; | |
1084 | *count = 0; | |
1085 | ||
1086 | if (size != 0) | |
1087 | kfree(addr, size); | |
1088 | } else { | |
1089 | /* if we allocated too much, must copy */ | |
1090 | ||
1091 | if (size_needed < size) { | |
1092 | vm_offset_t newaddr; | |
1093 | ||
1094 | newaddr = kalloc(size_needed); | |
1095 | if (newaddr == 0) { | |
1096 | for (i = 0; i < actual; i++) | |
1097 | act_deallocate(thr_acts[i]); | |
1098 | kfree(addr, size); | |
1099 | return KERN_RESOURCE_SHORTAGE; | |
1100 | } | |
1101 | ||
1102 | bcopy((char *) addr, (char *) newaddr, size_needed); | |
1103 | kfree(addr, size); | |
1104 | thr_acts = (thread_act_t *) newaddr; | |
1105 | } | |
1106 | ||
1107 | *thr_act_list = thr_acts; | |
1108 | *count = actual; | |
1109 | ||
1110 | /* do the conversion that Mig should handle */ | |
1111 | ||
1112 | for (i = 0; i < actual; i++) | |
1113 | ((ipc_port_t *) thr_acts)[i] = | |
1114 | convert_act_to_port(thr_acts[i]); | |
1115 | } | |
1116 | ||
1117 | return KERN_SUCCESS; | |
1118 | } | |
1119 | ||
1120 | /* | |
1121 | * Routine: task_suspend | |
1122 | * Implement a user-level suspension on a task. | |
1123 | * | |
1124 | * Conditions: | |
1125 | * The caller holds a reference to the task | |
1126 | */ | |
1127 | kern_return_t | |
1128 | task_suspend( | |
1129 | register task_t task) | |
1130 | { | |
1131 | if (task == TASK_NULL) | |
1132 | return (KERN_INVALID_ARGUMENT); | |
1133 | ||
1134 | task_lock(task); | |
1135 | if (!task->active) { | |
1136 | task_unlock(task); | |
1137 | return (KERN_FAILURE); | |
1138 | } | |
1139 | if ((task->user_stop_count)++ > 0) { | |
1140 | /* | |
1141 | * If the stop count was positive, the task is | |
1142 | * already stopped and we can exit. | |
1143 | */ | |
1144 | task_unlock(task); | |
1145 | return (KERN_SUCCESS); | |
1146 | } | |
1147 | ||
1148 | /* | |
1149 | * Put a kernel-level hold on the threads in the task (all | |
1150 | * user-level task suspensions added together represent a | |
1151 | * single kernel-level hold). We then wait for the threads | |
1152 | * to stop executing user code. | |
1153 | */ | |
1154 | task_hold_locked(task); | |
1155 | task_wait_locked(task); | |
1156 | task_unlock(task); | |
1157 | return (KERN_SUCCESS); | |
1158 | } | |
1159 | ||
1160 | /* | |
1161 | * Routine: task_resume | |
1162 | * Release a kernel hold on a task. | |
1163 | * | |
1164 | * Conditions: | |
1165 | * The caller holds a reference to the task | |
1166 | */ | |
1167 | kern_return_t | |
1168 | task_resume(register task_t task) | |
1169 | { | |
1170 | register boolean_t release; | |
1171 | ||
1172 | if (task == TASK_NULL) | |
1173 | return(KERN_INVALID_ARGUMENT); | |
1174 | ||
1175 | release = FALSE; | |
1176 | task_lock(task); | |
1177 | if (!task->active) { | |
1178 | task_unlock(task); | |
1179 | return(KERN_FAILURE); | |
1180 | } | |
1181 | if (task->user_stop_count > 0) { | |
1182 | if (--(task->user_stop_count) == 0) | |
1183 | release = TRUE; | |
1184 | } | |
1185 | else { | |
1186 | task_unlock(task); | |
1187 | return(KERN_FAILURE); | |
1188 | } | |
1189 | ||
1190 | /* | |
1191 | * Release the task if necessary. | |
1192 | */ | |
1193 | if (release) | |
1194 | task_release_locked(task); | |
1195 | ||
1196 | task_unlock(task); | |
1197 | return(KERN_SUCCESS); | |
1198 | } | |
1199 | ||
1200 | kern_return_t | |
1201 | host_security_set_task_token( | |
1202 | host_security_t host_security, | |
1203 | task_t task, | |
1204 | security_token_t sec_token, | |
1205 | host_priv_t host_priv) | |
1206 | { | |
1207 | kern_return_t kr; | |
1208 | ||
1209 | if (task == TASK_NULL) | |
1210 | return(KERN_INVALID_ARGUMENT); | |
1211 | ||
1212 | if (host_security == HOST_NULL) | |
1213 | return(KERN_INVALID_SECURITY); | |
1214 | ||
1215 | task_lock(task); | |
1216 | task->sec_token = sec_token; | |
1217 | task_unlock(task); | |
1218 | ||
1219 | if (host_priv != HOST_PRIV_NULL) { | |
1220 | kr = task_set_special_port(task, | |
1221 | TASK_HOST_PORT, | |
1222 | ipc_port_make_send(realhost.host_priv_self)); | |
1223 | } else { | |
1224 | kr = task_set_special_port(task, | |
1225 | TASK_HOST_PORT, | |
1226 | ipc_port_make_send(realhost.host_self)); | |
1227 | } | |
1228 | return(kr); | |
1229 | } | |
1230 | ||
1231 | /* | |
1232 | * Utility routine to set a ledger | |
1233 | */ | |
1234 | kern_return_t | |
1235 | task_set_ledger( | |
1236 | task_t task, | |
1237 | ledger_t wired, | |
1238 | ledger_t paged) | |
1239 | { | |
1240 | if (task == TASK_NULL) | |
1241 | return(KERN_INVALID_ARGUMENT); | |
1242 | ||
1243 | task_lock(task); | |
1244 | if (wired) { | |
1245 | ipc_port_release_send(task->wired_ledger_port); | |
1246 | task->wired_ledger_port = ledger_copy(wired); | |
1247 | } | |
1248 | if (paged) { | |
1249 | ipc_port_release_send(task->paged_ledger_port); | |
1250 | task->paged_ledger_port = ledger_copy(paged); | |
1251 | } | |
1252 | task_unlock(task); | |
1253 | ||
1254 | return(KERN_SUCCESS); | |
1255 | } | |
1256 | ||
1257 | /* | |
1258 | * This routine was added, pretty much exclusively, for registering the | |
1259 | * RPC glue vector for in-kernel short circuited tasks. Rather than | |
1260 | * removing it completely, I have only disabled that feature (which was | |
1261 | * the only feature at the time). It just appears that we are going to | |
1262 | * want to add some user data to tasks in the future (i.e. bsd info, | |
1263 | * task names, etc...), so I left it in the formal task interface. | |
1264 | */ | |
1265 | kern_return_t | |
1266 | task_set_info( | |
1267 | task_t task, | |
1268 | task_flavor_t flavor, | |
1269 | task_info_t task_info_in, /* pointer to IN array */ | |
1270 | mach_msg_type_number_t task_info_count) | |
1271 | { | |
1272 | vm_map_t map; | |
1273 | ||
1274 | if (task == TASK_NULL) | |
1275 | return(KERN_INVALID_ARGUMENT); | |
1276 | ||
1277 | switch (flavor) { | |
1278 | default: | |
1279 | return (KERN_INVALID_ARGUMENT); | |
1280 | } | |
1281 | return (KERN_SUCCESS); | |
1282 | } | |
1283 | ||
1284 | kern_return_t | |
1285 | task_info( | |
1286 | task_t task, | |
1287 | task_flavor_t flavor, | |
1288 | task_info_t task_info_out, | |
1289 | mach_msg_type_number_t *task_info_count) | |
1290 | { | |
1291 | thread_t thread; | |
1292 | vm_map_t map; | |
1293 | ||
1294 | if (task == TASK_NULL) | |
1295 | return(KERN_INVALID_ARGUMENT); | |
1296 | ||
1297 | switch (flavor) { | |
1298 | ||
1299 | case TASK_BASIC_INFO: | |
1300 | { | |
1301 | register task_basic_info_t basic_info; | |
1302 | ||
1303 | if (*task_info_count < TASK_BASIC_INFO_COUNT) { | |
1304 | return(KERN_INVALID_ARGUMENT); | |
1305 | } | |
1306 | ||
1307 | basic_info = (task_basic_info_t) task_info_out; | |
1308 | ||
1309 | map = (task == kernel_task) ? kernel_map : task->map; | |
1310 | ||
1311 | basic_info->virtual_size = map->size; | |
1312 | basic_info->resident_size = pmap_resident_count(map->pmap) | |
1313 | * PAGE_SIZE; | |
1314 | ||
1315 | task_lock(task); | |
1316 | basic_info->policy = task->policy; | |
1317 | basic_info->suspend_count = task->user_stop_count; | |
1318 | basic_info->user_time.seconds | |
1319 | = task->total_user_time.seconds; | |
1320 | basic_info->user_time.microseconds | |
1321 | = task->total_user_time.microseconds; | |
1322 | basic_info->system_time.seconds | |
1323 | = task->total_system_time.seconds; | |
1324 | basic_info->system_time.microseconds | |
1325 | = task->total_system_time.microseconds; | |
1326 | task_unlock(task); | |
1327 | ||
1328 | *task_info_count = TASK_BASIC_INFO_COUNT; | |
1329 | break; | |
1330 | } | |
1331 | ||
1332 | case TASK_THREAD_TIMES_INFO: | |
1333 | { | |
1334 | register task_thread_times_info_t times_info; | |
1335 | register thread_t thread; | |
1336 | register thread_act_t thr_act; | |
1337 | ||
1338 | if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) { | |
1339 | return (KERN_INVALID_ARGUMENT); | |
1340 | } | |
1341 | ||
1342 | times_info = (task_thread_times_info_t) task_info_out; | |
1343 | times_info->user_time.seconds = 0; | |
1344 | times_info->user_time.microseconds = 0; | |
1345 | times_info->system_time.seconds = 0; | |
1346 | times_info->system_time.microseconds = 0; | |
1347 | ||
1348 | task_lock(task); | |
1349 | queue_iterate(&task->thr_acts, thr_act, | |
1350 | thread_act_t, thr_acts) | |
1351 | { | |
1352 | time_value_t user_time, system_time; | |
1353 | spl_t s; | |
1354 | ||
1355 | thread = act_lock_thread(thr_act); | |
1356 | ||
1357 | /* Skip empty threads and threads that have migrated | |
1358 | * into this task: | |
1359 | */ | |
1360 | if (!thread || thr_act->pool_port) { | |
1361 | act_unlock_thread(thr_act); | |
1362 | continue; | |
1363 | } | |
1364 | assert(thread); /* Must have thread, if no thread_pool*/ | |
1365 | s = splsched(); | |
1366 | thread_lock(thread); | |
1367 | ||
1368 | thread_read_times(thread, &user_time, &system_time); | |
1369 | ||
1370 | thread_unlock(thread); | |
1371 | splx(s); | |
1372 | act_unlock_thread(thr_act); | |
1373 | ||
1374 | time_value_add(×_info->user_time, &user_time); | |
1375 | time_value_add(×_info->system_time, &system_time); | |
1376 | } | |
1377 | task_unlock(task); | |
1378 | ||
1379 | *task_info_count = TASK_THREAD_TIMES_INFO_COUNT; | |
1380 | break; | |
1381 | } | |
1382 | ||
1383 | case TASK_SCHED_FIFO_INFO: | |
1384 | { | |
1385 | register policy_fifo_base_t fifo_base; | |
1386 | ||
1387 | if (*task_info_count < POLICY_FIFO_BASE_COUNT) | |
1388 | return(KERN_INVALID_ARGUMENT); | |
1389 | ||
1390 | fifo_base = (policy_fifo_base_t) task_info_out; | |
1391 | ||
1392 | task_lock(task); | |
1393 | if (task->policy != POLICY_FIFO) { | |
1394 | task_unlock(task); | |
1395 | return(KERN_INVALID_POLICY); | |
1396 | } | |
1397 | ||
1398 | fifo_base->base_priority = task->priority; | |
1399 | task_unlock(task); | |
1400 | ||
1401 | *task_info_count = POLICY_FIFO_BASE_COUNT; | |
1402 | break; | |
1403 | } | |
1404 | ||
1405 | case TASK_SCHED_RR_INFO: | |
1406 | { | |
1407 | register policy_rr_base_t rr_base; | |
1408 | ||
1409 | if (*task_info_count < POLICY_RR_BASE_COUNT) | |
1410 | return(KERN_INVALID_ARGUMENT); | |
1411 | ||
1412 | rr_base = (policy_rr_base_t) task_info_out; | |
1413 | ||
1414 | task_lock(task); | |
1415 | if (task->policy != POLICY_RR) { | |
1416 | task_unlock(task); | |
1417 | return(KERN_INVALID_POLICY); | |
1418 | } | |
1419 | ||
1420 | rr_base->base_priority = task->priority; | |
1421 | task_unlock(task); | |
1422 | ||
1423 | rr_base->quantum = (min_quantum * tick) / 1000; | |
1424 | ||
1425 | *task_info_count = POLICY_RR_BASE_COUNT; | |
1426 | break; | |
1427 | } | |
1428 | ||
1429 | case TASK_SCHED_TIMESHARE_INFO: | |
1430 | { | |
1431 | register policy_timeshare_base_t ts_base; | |
1432 | ||
1433 | if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) | |
1434 | return(KERN_INVALID_ARGUMENT); | |
1435 | ||
1436 | ts_base = (policy_timeshare_base_t) task_info_out; | |
1437 | ||
1438 | task_lock(task); | |
1439 | if (task->policy != POLICY_TIMESHARE) { | |
1440 | task_unlock(task); | |
1441 | return(KERN_INVALID_POLICY); | |
1442 | } | |
1443 | ||
1444 | ts_base->base_priority = task->priority; | |
1445 | task_unlock(task); | |
1446 | ||
1447 | *task_info_count = POLICY_TIMESHARE_BASE_COUNT; | |
1448 | break; | |
1449 | } | |
1450 | ||
1451 | case TASK_SECURITY_TOKEN: | |
1452 | { | |
1453 | register security_token_t *sec_token_p; | |
1454 | ||
1455 | if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) { | |
1456 | return(KERN_INVALID_ARGUMENT); | |
1457 | } | |
1458 | ||
1459 | sec_token_p = (security_token_t *) task_info_out; | |
1460 | ||
1461 | task_lock(task); | |
1462 | *sec_token_p = task->sec_token; | |
1463 | task_unlock(task); | |
1464 | ||
1465 | *task_info_count = TASK_SECURITY_TOKEN_COUNT; | |
1466 | break; | |
1467 | } | |
1468 | ||
1469 | case TASK_SCHED_INFO: | |
1470 | return(KERN_INVALID_ARGUMENT); | |
1471 | ||
1472 | case TASK_EVENTS_INFO: | |
1473 | { | |
1474 | register task_events_info_t events_info; | |
1475 | ||
1476 | if (*task_info_count < TASK_EVENTS_INFO_COUNT) { | |
1477 | return(KERN_INVALID_ARGUMENT); | |
1478 | } | |
1479 | ||
1480 | events_info = (task_events_info_t) task_info_out; | |
1481 | ||
1482 | task_lock(task); | |
1483 | events_info->faults = task->faults; | |
1484 | events_info->pageins = task->pageins; | |
1485 | events_info->cow_faults = task->cow_faults; | |
1486 | events_info->messages_sent = task->messages_sent; | |
1487 | events_info->messages_received = task->messages_received; | |
1488 | events_info->syscalls_mach = task->syscalls_mach; | |
1489 | events_info->syscalls_unix = task->syscalls_unix; | |
1490 | events_info->csw = task->csw; | |
1491 | task_unlock(task); | |
1492 | ||
1493 | *task_info_count = TASK_EVENTS_INFO_COUNT; | |
1494 | break; | |
1495 | } | |
1496 | ||
1497 | default: | |
1498 | return (KERN_INVALID_ARGUMENT); | |
1499 | } | |
1500 | ||
1501 | return(KERN_SUCCESS); | |
1502 | } | |
1503 | ||
1504 | /* | |
1505 | * task_assign: | |
1506 | * | |
1507 | * Change the assigned processor set for the task | |
1508 | */ | |
1509 | kern_return_t | |
1510 | task_assign( | |
1511 | task_t task, | |
1512 | processor_set_t new_pset, | |
1513 | boolean_t assign_threads) | |
1514 | { | |
1515 | #ifdef lint | |
1516 | task++; new_pset++; assign_threads++; | |
1517 | #endif /* lint */ | |
1518 | return(KERN_FAILURE); | |
1519 | } | |
1520 | ||
1521 | /* | |
1522 | * task_assign_default: | |
1523 | * | |
1524 | * Version of task_assign to assign to default processor set. | |
1525 | */ | |
1526 | kern_return_t | |
1527 | task_assign_default( | |
1528 | task_t task, | |
1529 | boolean_t assign_threads) | |
1530 | { | |
1531 | return (task_assign(task, &default_pset, assign_threads)); | |
1532 | } | |
1533 | ||
1534 | /* | |
1535 | * task_get_assignment | |
1536 | * | |
1537 | * Return name of processor set that task is assigned to. | |
1538 | */ | |
1539 | kern_return_t | |
1540 | task_get_assignment( | |
1541 | task_t task, | |
1542 | processor_set_t *pset) | |
1543 | { | |
1544 | if (!task->active) | |
1545 | return(KERN_FAILURE); | |
1546 | ||
1547 | *pset = task->processor_set; | |
1548 | pset_reference(*pset); | |
1549 | return(KERN_SUCCESS); | |
1550 | } | |
1551 | ||
1552 | ||
1553 | /* | |
1554 | * task_policy | |
1555 | * | |
1556 | * Set scheduling policy and parameters, both base and limit, for | |
1557 | * the given task. Policy must be a policy which is enabled for the | |
1558 | * processor set. Change contained threads if requested. | |
1559 | */ | |
1560 | kern_return_t | |
1561 | task_policy( | |
1562 | task_t task, | |
1563 | policy_t policy_id, | |
1564 | policy_base_t base, | |
1565 | mach_msg_type_number_t count, | |
1566 | boolean_t set_limit, | |
1567 | boolean_t change) | |
1568 | { | |
1569 | return(KERN_FAILURE); | |
1570 | } | |
1571 | ||
1572 | /* | |
1573 | * task_set_policy | |
1574 | * | |
1575 | * Set scheduling policy and parameters, both base and limit, for | |
1576 | * the given task. Policy can be any policy implemented by the | |
1577 | * processor set, whether enabled or not. Change contained threads | |
1578 | * if requested. | |
1579 | */ | |
1580 | kern_return_t | |
1581 | task_set_policy( | |
1582 | task_t task, | |
1583 | processor_set_t pset, | |
1584 | policy_t policy_id, | |
1585 | policy_base_t base, | |
1586 | mach_msg_type_number_t base_count, | |
1587 | policy_limit_t limit, | |
1588 | mach_msg_type_number_t limit_count, | |
1589 | boolean_t change) | |
1590 | { | |
1591 | return(KERN_FAILURE); | |
1592 | } | |
1593 | ||
1594 | /* | |
1595 | * task_collect_scan: | |
1596 | * | |
1597 | * Attempt to free resources owned by tasks. | |
1598 | */ | |
1599 | ||
1600 | void | |
1601 | task_collect_scan(void) | |
1602 | { | |
1603 | register task_t task, prev_task; | |
1604 | processor_set_t pset = &default_pset; | |
1605 | ||
1606 | prev_task = TASK_NULL; | |
1607 | ||
1608 | pset_lock(pset); | |
1609 | pset->ref_count++; | |
1610 | task = (task_t) queue_first(&pset->tasks); | |
1611 | while (!queue_end(&pset->tasks, (queue_entry_t) task)) { | |
1612 | task_reference(task); | |
1613 | pset_unlock(pset); | |
1614 | ||
1615 | pmap_collect(task->map->pmap); | |
1616 | ||
1617 | if (prev_task != TASK_NULL) | |
1618 | task_deallocate(prev_task); | |
1619 | prev_task = task; | |
1620 | ||
1621 | pset_lock(pset); | |
1622 | task = (task_t) queue_next(&task->pset_tasks); | |
1623 | } | |
1624 | pset_unlock(pset); | |
1625 | ||
1626 | pset_deallocate(pset); | |
1627 | ||
1628 | if (prev_task != TASK_NULL) | |
1629 | task_deallocate(prev_task); | |
1630 | } | |
1631 | ||
1632 | boolean_t task_collect_allowed = FALSE; | |
1633 | unsigned task_collect_last_tick = 0; | |
1634 | unsigned task_collect_max_rate = 0; /* in ticks */ | |
1635 | ||
1636 | /* | |
1637 | * consider_task_collect: | |
1638 | * | |
1639 | * Called by the pageout daemon when the system needs more free pages. | |
1640 | */ | |
1641 | ||
1642 | void | |
1643 | consider_task_collect(void) | |
1644 | { | |
1645 | /* | |
1646 | * By default, don't attempt task collection more frequently | |
1647 | * than once per second. | |
1648 | */ | |
1649 | ||
1650 | if (task_collect_max_rate == 0) | |
1651 | task_collect_max_rate = (2 << SCHED_TICK_SHIFT); | |
1652 | ||
1653 | if (task_collect_allowed && | |
1654 | (sched_tick > (task_collect_last_tick + task_collect_max_rate))) { | |
1655 | task_collect_last_tick = sched_tick; | |
1656 | task_collect_scan(); | |
1657 | } | |
1658 | } | |
1659 | ||
1660 | kern_return_t | |
1661 | task_set_ras_pc( | |
1662 | task_t task, | |
1663 | vm_offset_t pc, | |
1664 | vm_offset_t endpc) | |
1665 | { | |
1666 | #if FAST_TAS | |
1667 | extern int fast_tas_debug; | |
1668 | ||
1669 | if (fast_tas_debug) { | |
1670 | printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n", | |
1671 | task, pc, endpc); | |
1672 | } | |
1673 | task_lock(task); | |
1674 | task->fast_tas_base = pc; | |
1675 | task->fast_tas_end = endpc; | |
1676 | task_unlock(task); | |
1677 | return KERN_SUCCESS; | |
1678 | ||
1679 | #else /* FAST_TAS */ | |
1680 | #ifdef lint | |
1681 | task++; | |
1682 | pc++; | |
1683 | endpc++; | |
1684 | #endif /* lint */ | |
1685 | ||
1686 | return KERN_FAILURE; | |
1687 | ||
1688 | #endif /* FAST_TAS */ | |
1689 | } | |
1690 | ||
1691 | void | |
1692 | task_synchronizer_destroy_all(task_t task) | |
1693 | { | |
1694 | semaphore_t semaphore; | |
1695 | lock_set_t lock_set; | |
1696 | ||
1697 | /* | |
1698 | * Destroy owned semaphores | |
1699 | */ | |
1700 | ||
1701 | while (!queue_empty(&task->semaphore_list)) { | |
1702 | semaphore = (semaphore_t) queue_first(&task->semaphore_list); | |
1703 | (void) semaphore_destroy(task, semaphore); | |
1704 | } | |
1705 | ||
1706 | /* | |
1707 | * Destroy owned lock sets | |
1708 | */ | |
1709 | ||
1710 | while (!queue_empty(&task->lock_set_list)) { | |
1711 | lock_set = (lock_set_t) queue_first(&task->lock_set_list); | |
1712 | (void) lock_set_destroy(task, lock_set); | |
1713 | } | |
1714 | } | |
1715 | ||
1716 | void | |
1717 | task_subsystem_destroy_all(task_t task) | |
1718 | { | |
1719 | subsystem_t subsystem; | |
1720 | ||
1721 | /* | |
1722 | * Destroy owned subsystems | |
1723 | */ | |
1724 | ||
1725 | while (!queue_empty(&task->subsystem_list)) { | |
1726 | subsystem = (subsystem_t) queue_first(&task->subsystem_list); | |
1727 | subsystem_deallocate(subsystem); | |
1728 | } | |
1729 | } | |
1730 | ||
1731 | /* | |
1732 | * task_set_port_space: | |
1733 | * | |
1734 | * Set port name space of task to specified size. | |
1735 | */ | |
1736 | ||
1737 | kern_return_t | |
1738 | task_set_port_space( | |
1739 | task_t task, | |
1740 | int table_entries) | |
1741 | { | |
1742 | kern_return_t kr; | |
1743 | ||
1744 | is_write_lock(task->itk_space); | |
1745 | kr = ipc_entry_grow_table(task->itk_space, table_entries); | |
1746 | if (kr == KERN_SUCCESS) | |
1747 | is_write_unlock(task->itk_space); | |
1748 | return kr; | |
1749 | } | |
1750 | ||
1751 | /* | |
1752 | * We need to export some functions to other components that | |
1753 | * are currently implemented in macros within the osfmk | |
1754 | * component. Just export them as functions of the same name. | |
1755 | */ | |
1756 | boolean_t is_kerneltask(task_t t) | |
1757 | { | |
1758 | if (t == kernel_task) | |
1759 | return(TRUE); | |
1760 | else | |
1761 | return((t->kernel_loaded)); | |
1762 | } | |
1763 | ||
1764 | #undef current_task | |
1765 | task_t current_task() | |
1766 | { | |
1767 | return (current_task_fast()); | |
1768 | } |