]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
xnu-124.13.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 * File: kern/task.c
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
53 * David Black
54 *
55 * Task management primitives implementation.
56 */
57 /*
58 * Copyright (c) 1993 The University of Utah and
59 * the Computer Systems Laboratory (CSL). All rights reserved.
60 *
61 * Permission to use, copy, modify and distribute this software and its
62 * documentation is hereby granted, provided that both the copyright
63 * notice and this permission notice appear in all copies of the
64 * software, derivative works or modified versions, and any portions
65 * thereof, and that both notices appear in supporting documentation.
66 *
67 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
68 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
69 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
70 *
71 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
72 * improvements that they make and grant CSL redistribution rights.
73 *
74 */
75
76 #include <mach_kdb.h>
77 #include <mach_host.h>
78 #include <mach_prof.h>
79 #include <fast_tas.h>
80 #include <task_swapper.h>
81 #include <platforms.h>
82
83 #include <mach/boolean.h>
84 #include <mach/machine/vm_types.h>
85 #include <mach/vm_param.h>
86 #include <mach/semaphore.h>
87 #include <mach/task_info.h>
88 #include <mach/task_special_ports.h>
89 #include <mach/mach_types.h>
90 #include <mach/machine/rpc.h>
91 #include <ipc/ipc_space.h>
92 #include <ipc/ipc_entry.h>
93 #include <kern/mach_param.h>
94 #include <kern/misc_protos.h>
95 #include <kern/task.h>
96 #include <kern/thread.h>
97 #include <kern/zalloc.h>
98 #include <kern/kalloc.h>
99 #include <kern/processor.h>
100 #include <kern/sched_prim.h> /* for thread_wakeup */
101 #include <kern/sf.h>
102 #include <kern/mk_sp.h> /*** ??? fix so this can be removed ***/
103 #include <kern/ipc_tt.h>
104 #include <kern/ledger.h>
105 #include <kern/host.h>
106 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
107 #include <kern/profile.h>
108 #include <kern/assert.h>
109 #include <kern/sync_lock.h>
110 #if MACH_KDB
111 #include <ddb/db_sym.h>
112 #endif /* MACH_KDB */
113
114 #if TASK_SWAPPER
115 #include <kern/task_swap.h>
116 #endif /* TASK_SWAPPER */
117
118 /*
119 * Exported interfaces
120 */
121
122 #include <mach/task_server.h>
123 #include <mach/mach_host_server.h>
124 #include <mach/host_security_server.h>
125
126 task_t kernel_task;
127 zone_t task_zone;
128
129 /* Forwards */
130
131 void task_hold_locked(
132 task_t task);
133 void task_wait_locked(
134 task_t task);
135 void task_release_locked(
136 task_t task);
137 void task_collect_scan(void);
138 void task_free(
139 task_t task );
140 void task_synchronizer_destroy_all(
141 task_t task);
142 void task_subsystem_destroy_all(
143 task_t task);
144
145 kern_return_t task_set_ledger(
146 task_t task,
147 ledger_t wired,
148 ledger_t paged);
149
150 void
151 task_init(void)
152 {
153 task_zone = zinit(
154 sizeof(struct task),
155 TASK_MAX * sizeof(struct task),
156 TASK_CHUNK * sizeof(struct task),
157 "tasks");
158
159 eml_init();
160
161 /*
162 * Create the kernel task as the first task.
163 * Task_create_local must assign to kernel_task as a side effect,
164 * for other initialization. (:-()
165 */
166 if (task_create_local(
167 TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
168 panic("task_init\n");
169 vm_map_deallocate(kernel_task->map);
170 kernel_task->map = kernel_map;
171
172 #if MACH_ASSERT
173 if (watchacts & WA_TASK)
174 printf("task_init: kernel_task = %x map=%x\n",
175 kernel_task, kernel_map);
176 #endif /* MACH_ASSERT */
177 }
178
179 #if MACH_HOST
180 void
181 task_freeze(
182 task_t task)
183 {
184 task_lock(task);
185 /*
186 * If may_assign is false, task is already being assigned,
187 * wait for that to finish.
188 */
189 while (task->may_assign == FALSE) {
190 task->assign_active = TRUE;
191 thread_sleep_mutex((event_t) &task->assign_active,
192 &task->lock, THREAD_INTERRUPTIBLE);
193 task_lock(task);
194 }
195 task->may_assign = FALSE;
196 task_unlock(task);
197
198 return;
199 }
200
201 void
202 task_unfreeze(
203 task_t task)
204 {
205 task_lock(task);
206 assert(task->may_assign == FALSE);
207 task->may_assign = TRUE;
208 if (task->assign_active == TRUE) {
209 task->assign_active = FALSE;
210 thread_wakeup((event_t)&task->assign_active);
211 }
212 task_unlock(task);
213
214 return;
215 }
216 #endif /* MACH_HOST */
217
218 /*
219 * Create a task running in the kernel address space. It may
220 * have its own map of size mem_size and may have ipc privileges.
221 */
222 kern_return_t
223 kernel_task_create(
224 task_t parent_task,
225 vm_offset_t map_base,
226 vm_size_t map_size,
227 task_t *child_task)
228 {
229 kern_return_t result;
230 task_t new_task;
231 vm_map_t old_map;
232
233 /*
234 * Create the task.
235 */
236 result = task_create_local(parent_task, FALSE, TRUE, &new_task);
237 if (result != KERN_SUCCESS)
238 return (result);
239
240 /*
241 * Task_create_local creates the task with a user-space map.
242 * We attempt to replace the map and free it afterwards; else
243 * task_deallocate will free it (can NOT set map to null before
244 * task_deallocate, this impersonates a norma placeholder task).
245 * _Mark the memory as pageable_ -- this is what we
246 * want for images (like servers) loaded into the kernel.
247 */
248 if (map_size == 0) {
249 vm_map_deallocate(new_task->map);
250 new_task->map = kernel_map;
251 *child_task = new_task;
252 } else {
253 old_map = new_task->map;
254 if ((result = kmem_suballoc(kernel_map, &map_base,
255 map_size, TRUE, FALSE,
256 &new_task->map)) != KERN_SUCCESS) {
257 /*
258 * New task created with ref count of 2 -- decrement by
259 * one to force task deletion.
260 */
261 printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n",
262 kernel_map, map_base, map_size);
263 --new_task->ref_count;
264 task_deallocate(new_task);
265 return (result);
266 }
267 vm_map_deallocate(old_map);
268 *child_task = new_task;
269 }
270 return (KERN_SUCCESS);
271 }
272
273 kern_return_t
274 task_create(
275 task_t parent_task,
276 ledger_port_array_t ledger_ports,
277 mach_msg_type_number_t num_ledger_ports,
278 boolean_t inherit_memory,
279 task_t *child_task) /* OUT */
280 {
281 if (parent_task == TASK_NULL)
282 return(KERN_INVALID_ARGUMENT);
283
284 return task_create_local(
285 parent_task, inherit_memory, FALSE, child_task);
286 }
287
288 kern_return_t
289 host_security_create_task_token(
290 host_security_t host_security,
291 task_t parent_task,
292 security_token_t sec_token,
293 host_priv_t host_priv,
294 ledger_port_array_t ledger_ports,
295 mach_msg_type_number_t num_ledger_ports,
296 boolean_t inherit_memory,
297 task_t *child_task) /* OUT */
298 {
299 kern_return_t result;
300
301 if (parent_task == TASK_NULL)
302 return(KERN_INVALID_ARGUMENT);
303
304 if (host_security == HOST_NULL)
305 return(KERN_INVALID_SECURITY);
306
307 result = task_create_local(
308 parent_task, inherit_memory, FALSE, child_task);
309
310 if (result != KERN_SUCCESS)
311 return(result);
312
313 result = host_security_set_task_token(host_security,
314 *child_task,
315 sec_token,
316 host_priv);
317
318 if (result != KERN_SUCCESS)
319 return(result);
320
321 return(result);
322 }
323
324 kern_return_t
325 task_create_local(
326 task_t parent_task,
327 boolean_t inherit_memory,
328 boolean_t kernel_loaded,
329 task_t *child_task) /* OUT */
330 {
331 task_t new_task;
332 processor_set_t pset;
333
334 new_task = (task_t) zalloc(task_zone);
335
336 if (new_task == TASK_NULL)
337 return(KERN_RESOURCE_SHORTAGE);
338
339 /* one ref for just being alive; one for our caller */
340 new_task->ref_count = 2;
341
342 if (inherit_memory)
343 new_task->map = vm_map_fork(parent_task->map);
344 else
345 new_task->map = vm_map_create(pmap_create(0),
346 round_page(VM_MIN_ADDRESS),
347 trunc_page(VM_MAX_ADDRESS), TRUE);
348
349 mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW);
350 queue_init(&new_task->subsystem_list);
351 queue_init(&new_task->thr_acts);
352 new_task->suspend_count = 0;
353 new_task->thr_act_count = 0;
354 new_task->res_act_count = 0;
355 new_task->active_act_count = 0;
356 new_task->user_stop_count = 0;
357 new_task->importance = 0;
358 new_task->active = TRUE;
359 new_task->kernel_loaded = kernel_loaded;
360 new_task->user_data = 0;
361 new_task->faults = 0;
362 new_task->cow_faults = 0;
363 new_task->pageins = 0;
364 new_task->messages_sent = 0;
365 new_task->messages_received = 0;
366 new_task->syscalls_mach = 0;
367 new_task->syscalls_unix=0;
368 new_task->csw=0;
369
370 #ifdef MACH_BSD
371 new_task->bsd_info = 0;
372 #endif /* MACH_BSD */
373
374 #if TASK_SWAPPER
375 new_task->swap_state = TASK_SW_IN;
376 new_task->swap_flags = 0;
377 new_task->swap_ast_waiting = 0;
378 new_task->swap_stamp = sched_tick;
379 new_task->swap_rss = 0;
380 new_task->swap_nswap = 0;
381 #endif /* TASK_SWAPPER */
382
383 queue_init(&new_task->semaphore_list);
384 queue_init(&new_task->lock_set_list);
385 new_task->semaphores_owned = 0;
386 new_task->lock_sets_owned = 0;
387
388 #if MACH_HOST
389 new_task->may_assign = TRUE;
390 new_task->assign_active = FALSE;
391 #endif /* MACH_HOST */
392 eml_task_reference(new_task, parent_task);
393
394 ipc_task_init(new_task, parent_task);
395
396 new_task->total_user_time.seconds = 0;
397 new_task->total_user_time.microseconds = 0;
398 new_task->total_system_time.seconds = 0;
399 new_task->total_system_time.microseconds = 0;
400
401 task_prof_init(new_task);
402
403 if (parent_task != TASK_NULL) {
404 #if MACH_HOST
405 /*
406 * Freeze the parent, so that parent_task->processor_set
407 * cannot change.
408 */
409 task_freeze(parent_task);
410 #endif /* MACH_HOST */
411 pset = parent_task->processor_set;
412 if (!pset->active)
413 pset = &default_pset;
414
415 new_task->policy = parent_task->policy;
416
417 new_task->priority = parent_task->priority;
418 new_task->max_priority = parent_task->max_priority;
419
420 new_task->sec_token = parent_task->sec_token;
421
422 shared_region_mapping_ref(parent_task->system_shared_region);
423 new_task->system_shared_region = parent_task->system_shared_region;
424
425 new_task->wired_ledger_port = ledger_copy(
426 convert_port_to_ledger(parent_task->wired_ledger_port));
427 new_task->paged_ledger_port = ledger_copy(
428 convert_port_to_ledger(parent_task->paged_ledger_port));
429 }
430 else {
431 pset = &default_pset;
432
433 if (kernel_task == TASK_NULL) {
434 new_task->policy = POLICY_RR;
435
436 new_task->priority = MINPRI_KERNBAND;
437 new_task->max_priority = MAXPRI_KERNBAND;
438 }
439 else {
440 new_task->policy = POLICY_TIMESHARE;
441
442 new_task->priority = BASEPRI_DEFAULT;
443 new_task->max_priority = MAXPRI_HIGHBAND;
444 }
445
446 new_task->sec_token = KERNEL_SECURITY_TOKEN;
447 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
448 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
449 }
450
451 pset_lock(pset);
452 pset_add_task(pset, new_task);
453 pset_unlock(pset);
454 #if MACH_HOST
455 if (parent_task != TASK_NULL)
456 task_unfreeze(parent_task);
457 #endif /* MACH_HOST */
458
459 #if FAST_TAS
460 if (inherit_memory) {
461 new_task->fast_tas_base = parent_task->fast_tas_base;
462 new_task->fast_tas_end = parent_task->fast_tas_end;
463 } else {
464 new_task->fast_tas_base = (vm_offset_t)0;
465 new_task->fast_tas_end = (vm_offset_t)0;
466 }
467 #endif /* FAST_TAS */
468
469 ipc_task_enable(new_task);
470
471 #if TASK_SWAPPER
472 task_swapout_eligible(new_task);
473 #endif /* TASK_SWAPPER */
474
475 #if MACH_ASSERT
476 if (watchacts & WA_TASK)
477 printf("*** task_create_local(par=%x inh=%x) == 0x%x\n",
478 parent_task, inherit_memory, new_task);
479 #endif /* MACH_ASSERT */
480
481 *child_task = new_task;
482 return(KERN_SUCCESS);
483 }
484
485 /*
486 * task_free:
487 *
488 * Called by task_deallocate when the task's reference count drops to zero.
489 * Task is locked.
490 */
491 void
492 task_free(
493 task_t task)
494 {
495 processor_set_t pset;
496
497 #if MACH_ASSERT
498 assert(task != 0);
499 if (watchacts & (WA_EXIT|WA_TASK))
500 printf("task_free(%x(%d)) map ref %d\n", task, task->ref_count,
501 task->map->ref_count);
502 #endif /* MACH_ASSERT */
503
504 #if TASK_SWAPPER
505 /* task_terminate guarantees that this task is off the list */
506 assert((task->swap_state & TASK_SW_ELIGIBLE) == 0);
507 #endif /* TASK_SWAPPER */
508
509 eml_task_deallocate(task);
510
511 /*
512 * Temporarily restore the reference we dropped above, then
513 * freeze the task so that the task->processor_set field
514 * cannot change. In the !MACH_HOST case, the logic can be
515 * simplified, since the default_pset is the only pset.
516 */
517 ++task->ref_count;
518 task_unlock(task);
519 #if MACH_HOST
520 task_freeze(task);
521 #endif /* MACH_HOST */
522
523 pset = task->processor_set;
524 pset_lock(pset);
525 task_lock(task);
526 if (--task->ref_count > 0) {
527 /*
528 * A new reference appeared (probably from the pset).
529 * Back out. Must unfreeze inline since we'already
530 * dropped our reference.
531 */
532 #if MACH_HOST
533 assert(task->may_assign == FALSE);
534 task->may_assign = TRUE;
535 if (task->assign_active == TRUE) {
536 task->assign_active = FALSE;
537 thread_wakeup((event_t)&task->assign_active);
538 }
539 #endif /* MACH_HOST */
540 task_unlock(task);
541 pset_unlock(pset);
542 return;
543 }
544 pset_remove_task(pset,task);
545 task_unlock(task);
546 pset_unlock(pset);
547 pset_deallocate(pset);
548
549 ipc_task_terminate(task);
550 shared_region_mapping_dealloc(task->system_shared_region);
551
552 if (task->kernel_loaded)
553 vm_map_remove(kernel_map, task->map->min_offset,
554 task->map->max_offset, VM_MAP_NO_FLAGS);
555 vm_map_deallocate(task->map);
556 is_release(task->itk_space);
557 task_prof_deallocate(task);
558 zfree(task_zone, (vm_offset_t) task);
559 }
560
561 void
562 task_deallocate(
563 task_t task)
564 {
565 if (task != TASK_NULL) {
566 int c;
567
568 task_lock(task);
569 c = --task->ref_count;
570 if (c == 0)
571 task_free(task); /* unlocks task */
572 else
573 task_unlock(task);
574 }
575 }
576
577 void
578 task_reference(
579 task_t task)
580 {
581 if (task != TASK_NULL) {
582 task_lock(task);
583 task->ref_count++;
584 task_unlock(task);
585 }
586 }
587
588 boolean_t
589 task_reference_try(
590 task_t task)
591 {
592 if (task != TASK_NULL) {
593 if (task_lock_try(task)) {
594 task->ref_count++;
595 task_unlock(task);
596 return TRUE;
597 }
598 }
599 return FALSE;
600 }
601
602 /*
603 * task_terminate:
604 *
605 * Terminate the specified task. See comments on thread_terminate
606 * (kern/thread.c) about problems with terminating the "current task."
607 */
608
609 kern_return_t
610 task_terminate(
611 task_t task)
612 {
613 if (task == TASK_NULL)
614 return(KERN_INVALID_ARGUMENT);
615 if (task->bsd_info)
616 return(KERN_FAILURE);
617 return (task_terminate_internal(task));
618 }
619
620 kern_return_t
621 task_terminate_internal(
622 task_t task)
623 {
624 thread_act_t thr_act, cur_thr_act;
625 task_t cur_task;
626 thread_t cur_thread;
627 boolean_t interrupt_save;
628
629 assert(task != kernel_task);
630
631 cur_thr_act = current_act();
632 cur_task = cur_thr_act->task;
633
634 #if TASK_SWAPPER
635 /*
636 * If task is not resident (swapped out, or being swapped
637 * out), we want to bring it back in (this can block).
638 * NOTE: The only way that this can happen in the current
639 * system is if the task is swapped while it has a thread
640 * in exit(), and the thread does not hit a clean point
641 * to swap itself before getting here.
642 * Terminating other tasks is another way to this code, but
643 * it is not yet fully supported.
644 * The task_swapin is unconditional. It used to be done
645 * only if the task is not resident. Swapping in a
646 * resident task will prevent it from being swapped out
647 * while it terminates.
648 */
649 task_swapin(task, TRUE); /* TRUE means make it unswappable */
650 #endif /* TASK_SWAPPER */
651
652 /*
653 * Get the task locked and make sure that we are not racing
654 * with someone else trying to terminate us.
655 */
656 if (task == cur_task) {
657 task_lock(task);
658 } else if (task < cur_task) {
659 task_lock(task);
660 task_lock(cur_task);
661 } else {
662 task_lock(cur_task);
663 task_lock(task);
664 }
665
666 if (!task->active || !cur_thr_act->active) {
667 /*
668 * Task or current act is already being terminated.
669 * Just return an error. If we are dying, this will
670 * just get us to our AST special handler and that
671 * will get us to finalize the termination of ourselves.
672 */
673 task_unlock(task);
674 if (cur_task != task)
675 task_unlock(cur_task);
676 return(KERN_FAILURE);
677 }
678 if (cur_task != task)
679 task_unlock(cur_task);
680
681 /*
682 * Make sure the current thread does not get aborted out of
683 * the waits inside these operations.
684 */
685 cur_thread = current_thread();
686 interrupt_save = cur_thread->interruptible;
687 cur_thread->interruptible = FALSE;
688
689 /*
690 * Indicate that we want all the threads to stop executing
691 * at user space by holding the task (we would have held
692 * each thread independently in thread_terminate_internal -
693 * but this way we may be more likely to already find it
694 * held there). Mark the task inactive, and prevent
695 * further task operations via the task port.
696 */
697 task_hold_locked(task);
698 task->active = FALSE;
699 ipc_task_disable(task);
700
701 /*
702 * Terminate each activation in the task.
703 *
704 * Each terminated activation will run it's special handler
705 * when its current kernel context is unwound. That will
706 * clean up most of the thread resources. Then it will be
707 * handed over to the reaper, who will finally remove the
708 * thread from the task list and free the structures.
709 */
710 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
711 thread_terminate_internal(thr_act);
712 }
713
714 /*
715 * Clean up any virtual machine state/resources associated
716 * with the current activation because it may hold wiring
717 * and other references on resources we will be trying to
718 * release below.
719 */
720 if (cur_thr_act->task == task)
721 act_virtual_machine_destroy(cur_thr_act);
722
723 task_unlock(task);
724
725 /*
726 * Destroy all synchronizers owned by the task.
727 */
728 task_synchronizer_destroy_all(task);
729
730 /*
731 * Deallocate all subsystems owned by the task.
732 */
733 task_subsystem_destroy_all(task);
734
735 /*
736 * Destroy the IPC space, leaving just a reference for it.
737 */
738 if (!task->kernel_loaded)
739 ipc_space_destroy(task->itk_space);
740
741 /*
742 * If the current thread is a member of the task
743 * being terminated, then the last reference to
744 * the task will not be dropped until the thread
745 * is finally reaped. To avoid incurring the
746 * expense of removing the address space regions
747 * at reap time, we do it explictly here.
748 */
749 (void) vm_map_remove(task->map,
750 task->map->min_offset,
751 task->map->max_offset, VM_MAP_NO_FLAGS);
752
753 /*
754 * We no longer need to guard against being aborted, so restore
755 * the previous interruptible state.
756 */
757 cur_thread->interruptible = interrupt_save;
758
759 /*
760 * Get rid of the task active reference on itself.
761 */
762 task_deallocate(task);
763
764 return(KERN_SUCCESS);
765 }
766
767 /*
768 * task_halt - Shut the current task down (except for the current thread) in
769 * preparation for dramatic changes to the task (probably exec).
770 * We hold the task, terminate all other threads in the task and
771 * wait for them to terminate, clean up the portspace, and when
772 * all done, let the current thread go.
773 */
774 kern_return_t
775 task_halt(
776 task_t task)
777 {
778 thread_act_t thr_act, cur_thr_act;
779 task_t cur_task;
780
781 assert(task != kernel_task);
782
783 cur_thr_act = current_act();
784 cur_task = cur_thr_act->task;
785
786 if (task != cur_task) {
787 return(KERN_INVALID_ARGUMENT);
788 }
789
790 #if TASK_SWAPPER
791 /*
792 * If task is not resident (swapped out, or being swapped
793 * out), we want to bring it back in and make it unswappable.
794 * This can block, so do it early.
795 */
796 task_swapin(task, TRUE); /* TRUE means make it unswappable */
797 #endif /* TASK_SWAPPER */
798
799 task_lock(task);
800
801 if (!task->active || !cur_thr_act->active) {
802 /*
803 * Task or current thread is already being terminated.
804 * Hurry up and return out of the current kernel context
805 * so that we run our AST special handler to terminate
806 * ourselves.
807 */
808 task_unlock(task);
809 return(KERN_FAILURE);
810 }
811
812 if (task->thr_act_count > 1) {
813 /*
814 * Mark all the threads to keep them from starting any more
815 * user-level execution. The thread_terminate_internal code
816 * would do this on a thread by thread basis anyway, but this
817 * gives us a better chance of not having to wait there.
818 */
819 task_hold_locked(task);
820
821 /*
822 * Terminate all the other activations in the task.
823 *
824 * Each terminated activation will run it's special handler
825 * when its current kernel context is unwound. That will
826 * clean up most of the thread resources. Then it will be
827 * handed over to the reaper, who will finally remove the
828 * thread from the task list and free the structures.
829 */
830 queue_iterate(&task->thr_acts, thr_act, thread_act_t,thr_acts) {
831 if (thr_act != cur_thr_act)
832 thread_terminate_internal(thr_act);
833 }
834 task_release_locked(task);
835 }
836
837 /*
838 * If the current thread has any virtual machine state
839 * associated with it, we need to explicitly clean that
840 * up now (because we did not terminate the current act)
841 * before we try to clean up the task VM and port spaces.
842 */
843 act_virtual_machine_destroy(cur_thr_act);
844
845 task_unlock(task);
846
847 /*
848 * Destroy all synchronizers owned by the task.
849 */
850 task_synchronizer_destroy_all(task);
851
852 /*
853 * Deallocate all subsystems owned by the task.
854 */
855 task_subsystem_destroy_all(task);
856
857 #if 0
858 /*
859 * Destroy the IPC space, leaving just a reference for it.
860 */
861 /*
862 * Lookupd will break if we enable this cleaning, because it
863 * uses a slimey trick that depends upon the portspace not
864 * being cleaned up across exec (it passes the lookupd server
865 * port to the child after a restart using knowledge of this
866 * bug in past implementations). We need to fix lookupd to
867 * keep from leaking ports across exec.
868 */
869 if (!task->kernel_loaded)
870 ipc_space_clean(task->itk_space);
871 #endif
872
873 /*
874 * Clean out the address space, as we are going to be
875 * getting a new one.
876 */
877 (void) vm_map_remove(task->map,
878 task->map->min_offset,
879 task->map->max_offset, VM_MAP_NO_FLAGS);
880
881 return KERN_SUCCESS;
882 }
883
884 /*
885 * task_hold_locked:
886 *
887 * Suspend execution of the specified task.
888 * This is a recursive-style suspension of the task, a count of
889 * suspends is maintained.
890 *
891 * CONDITIONS: the task is locked and active.
892 */
893 void
894 task_hold_locked(
895 register task_t task)
896 {
897 register thread_act_t thr_act;
898
899 assert(task->active);
900
901 task->suspend_count++;
902
903 /*
904 * Iterate through all the thread_act's and hold them.
905 */
906 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
907 act_lock_thread(thr_act);
908 thread_hold(thr_act);
909 act_unlock_thread(thr_act);
910 }
911 }
912
913 /*
914 * task_hold:
915 *
916 * Same as the internal routine above, except that is must lock
917 * and verify that the task is active. This differs from task_suspend
918 * in that it places a kernel hold on the task rather than just a
919 * user-level hold. This keeps users from over resuming and setting
920 * it running out from under the kernel.
921 *
922 * CONDITIONS: the caller holds a reference on the task
923 */
924 kern_return_t
925 task_hold(task_t task)
926 {
927 kern_return_t kret;
928
929 if (task == TASK_NULL)
930 return (KERN_INVALID_ARGUMENT);
931 task_lock(task);
932 if (!task->active) {
933 task_unlock(task);
934 return (KERN_FAILURE);
935 }
936 task_hold_locked(task);
937 task_unlock(task);
938
939 return(KERN_SUCCESS);
940 }
941
942 /*
943 * Routine: task_wait_locked
944 * Wait for all threads in task to stop.
945 *
946 * Conditions:
947 * Called with task locked, active, and held.
948 */
949 void
950 task_wait_locked(
951 register task_t task)
952 {
953 register thread_act_t thr_act, cur_thr_act;
954
955 assert(task->active);
956 assert(task->suspend_count > 0);
957
958 cur_thr_act = current_act();
959 /*
960 * Iterate through all the thread's and wait for them to
961 * stop. Do not wait for the current thread if it is within
962 * the task.
963 */
964 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
965 if (thr_act != cur_thr_act) {
966 thread_shuttle_t thr_shuttle;
967
968 thr_shuttle = act_lock_thread(thr_act);
969 thread_wait(thr_shuttle);
970 act_unlock_thread(thr_act);
971 }
972 }
973 }
974
975 /*
976 * task_release_locked:
977 *
978 * Release a kernel hold on a task.
979 *
980 * CONDITIONS: the task is locked and active
981 */
982 void
983 task_release_locked(
984 register task_t task)
985 {
986 register thread_act_t thr_act;
987
988 assert(task->active);
989
990 task->suspend_count--;
991 assert(task->suspend_count >= 0);
992
993 /*
994 * Iterate through all the thread_act's and hold them.
995 * Do not hold the current thread_act if it is within the
996 * task.
997 */
998 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
999 act_lock_thread(thr_act);
1000 thread_release(thr_act);
1001 act_unlock_thread(thr_act);
1002 }
1003 }
1004
1005 /*
1006 * task_release:
1007 *
1008 * Same as the internal routine above, except that it must lock
1009 * and verify that the task is active.
1010 *
1011 * CONDITIONS: The caller holds a reference to the task
1012 */
1013 kern_return_t
1014 task_release(task_t task)
1015 {
1016 kern_return_t kret;
1017
1018 if (task == TASK_NULL)
1019 return (KERN_INVALID_ARGUMENT);
1020 task_lock(task);
1021 if (!task->active) {
1022 task_unlock(task);
1023 return (KERN_FAILURE);
1024 }
1025 task_release_locked(task);
1026 task_unlock(task);
1027
1028 return(KERN_SUCCESS);
1029 }
1030
1031 kern_return_t
1032 task_threads(
1033 task_t task,
1034 thread_act_array_t *thr_act_list,
1035 mach_msg_type_number_t *count)
1036 {
1037 unsigned int actual; /* this many thr_acts */
1038 thread_act_t thr_act;
1039 thread_act_t *thr_acts;
1040 thread_t thread;
1041 int i, j;
1042
1043 vm_size_t size, size_needed;
1044 vm_offset_t addr;
1045
1046 if (task == TASK_NULL)
1047 return KERN_INVALID_ARGUMENT;
1048
1049 size = 0; addr = 0;
1050
1051 for (;;) {
1052 task_lock(task);
1053 if (!task->active) {
1054 task_unlock(task);
1055 if (size != 0)
1056 kfree(addr, size);
1057 return KERN_FAILURE;
1058 }
1059
1060 actual = task->thr_act_count;
1061
1062 /* do we have the memory we need? */
1063 size_needed = actual * sizeof(mach_port_t);
1064 if (size_needed <= size)
1065 break;
1066
1067 /* unlock the task and allocate more memory */
1068 task_unlock(task);
1069
1070 if (size != 0)
1071 kfree(addr, size);
1072
1073 assert(size_needed > 0);
1074 size = size_needed;
1075
1076 addr = kalloc(size);
1077 if (addr == 0)
1078 return KERN_RESOURCE_SHORTAGE;
1079 }
1080
1081 /* OK, have memory and the task is locked & active */
1082 thr_acts = (thread_act_t *) addr;
1083
1084 for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->thr_acts);
1085 i < actual;
1086 i++, thr_act = (thread_act_t) queue_next(&thr_act->thr_acts)) {
1087 act_lock(thr_act);
1088 if (thr_act->ref_count > 0) {
1089 act_locked_act_reference(thr_act);
1090 thr_acts[j++] = thr_act;
1091 }
1092 act_unlock(thr_act);
1093 }
1094 assert(queue_end(&task->thr_acts, (queue_entry_t) thr_act));
1095
1096 actual = j;
1097 size_needed = actual * sizeof(mach_port_t);
1098
1099 /* can unlock task now that we've got the thr_act refs */
1100 task_unlock(task);
1101
1102 if (actual == 0) {
1103 /* no thr_acts, so return null pointer and deallocate memory */
1104
1105 *thr_act_list = 0;
1106 *count = 0;
1107
1108 if (size != 0)
1109 kfree(addr, size);
1110 } else {
1111 /* if we allocated too much, must copy */
1112
1113 if (size_needed < size) {
1114 vm_offset_t newaddr;
1115
1116 newaddr = kalloc(size_needed);
1117 if (newaddr == 0) {
1118 for (i = 0; i < actual; i++)
1119 act_deallocate(thr_acts[i]);
1120 kfree(addr, size);
1121 return KERN_RESOURCE_SHORTAGE;
1122 }
1123
1124 bcopy((char *) addr, (char *) newaddr, size_needed);
1125 kfree(addr, size);
1126 thr_acts = (thread_act_t *) newaddr;
1127 }
1128
1129 *thr_act_list = thr_acts;
1130 *count = actual;
1131
1132 /* do the conversion that Mig should handle */
1133
1134 for (i = 0; i < actual; i++)
1135 ((ipc_port_t *) thr_acts)[i] =
1136 convert_act_to_port(thr_acts[i]);
1137 }
1138
1139 return KERN_SUCCESS;
1140 }
1141
1142 /*
1143 * Routine: task_suspend
1144 * Implement a user-level suspension on a task.
1145 *
1146 * Conditions:
1147 * The caller holds a reference to the task
1148 */
1149 kern_return_t
1150 task_suspend(
1151 register task_t task)
1152 {
1153 if (task == TASK_NULL)
1154 return (KERN_INVALID_ARGUMENT);
1155
1156 task_lock(task);
1157 if (!task->active) {
1158 task_unlock(task);
1159 return (KERN_FAILURE);
1160 }
1161 if ((task->user_stop_count)++ > 0) {
1162 /*
1163 * If the stop count was positive, the task is
1164 * already stopped and we can exit.
1165 */
1166 task_unlock(task);
1167 return (KERN_SUCCESS);
1168 }
1169
1170 /*
1171 * Put a kernel-level hold on the threads in the task (all
1172 * user-level task suspensions added together represent a
1173 * single kernel-level hold). We then wait for the threads
1174 * to stop executing user code.
1175 */
1176 task_hold_locked(task);
1177 task_wait_locked(task);
1178 task_unlock(task);
1179 return (KERN_SUCCESS);
1180 }
1181
1182 /*
1183 * Routine: task_resume
1184 * Release a kernel hold on a task.
1185 *
1186 * Conditions:
1187 * The caller holds a reference to the task
1188 */
1189 kern_return_t
1190 task_resume(register task_t task)
1191 {
1192 register boolean_t release;
1193
1194 if (task == TASK_NULL)
1195 return(KERN_INVALID_ARGUMENT);
1196
1197 release = FALSE;
1198 task_lock(task);
1199 if (!task->active) {
1200 task_unlock(task);
1201 return(KERN_FAILURE);
1202 }
1203 if (task->user_stop_count > 0) {
1204 if (--(task->user_stop_count) == 0)
1205 release = TRUE;
1206 }
1207 else {
1208 task_unlock(task);
1209 return(KERN_FAILURE);
1210 }
1211
1212 /*
1213 * Release the task if necessary.
1214 */
1215 if (release)
1216 task_release_locked(task);
1217
1218 task_unlock(task);
1219 return(KERN_SUCCESS);
1220 }
1221
1222 kern_return_t
1223 host_security_set_task_token(
1224 host_security_t host_security,
1225 task_t task,
1226 security_token_t sec_token,
1227 host_priv_t host_priv)
1228 {
1229 kern_return_t kr;
1230
1231 if (task == TASK_NULL)
1232 return(KERN_INVALID_ARGUMENT);
1233
1234 if (host_security == HOST_NULL)
1235 return(KERN_INVALID_SECURITY);
1236
1237 task_lock(task);
1238 task->sec_token = sec_token;
1239 task_unlock(task);
1240
1241 if (host_priv != HOST_PRIV_NULL) {
1242 kr = task_set_special_port(task,
1243 TASK_HOST_PORT,
1244 ipc_port_make_send(realhost.host_priv_self));
1245 } else {
1246 kr = task_set_special_port(task,
1247 TASK_HOST_PORT,
1248 ipc_port_make_send(realhost.host_self));
1249 }
1250 return(kr);
1251 }
1252
1253 /*
1254 * Utility routine to set a ledger
1255 */
1256 kern_return_t
1257 task_set_ledger(
1258 task_t task,
1259 ledger_t wired,
1260 ledger_t paged)
1261 {
1262 if (task == TASK_NULL)
1263 return(KERN_INVALID_ARGUMENT);
1264
1265 task_lock(task);
1266 if (wired) {
1267 ipc_port_release_send(task->wired_ledger_port);
1268 task->wired_ledger_port = ledger_copy(wired);
1269 }
1270 if (paged) {
1271 ipc_port_release_send(task->paged_ledger_port);
1272 task->paged_ledger_port = ledger_copy(paged);
1273 }
1274 task_unlock(task);
1275
1276 return(KERN_SUCCESS);
1277 }
1278
1279 /*
1280 * This routine was added, pretty much exclusively, for registering the
1281 * RPC glue vector for in-kernel short circuited tasks. Rather than
1282 * removing it completely, I have only disabled that feature (which was
1283 * the only feature at the time). It just appears that we are going to
1284 * want to add some user data to tasks in the future (i.e. bsd info,
1285 * task names, etc...), so I left it in the formal task interface.
1286 */
1287 kern_return_t
1288 task_set_info(
1289 task_t task,
1290 task_flavor_t flavor,
1291 task_info_t task_info_in, /* pointer to IN array */
1292 mach_msg_type_number_t task_info_count)
1293 {
1294 vm_map_t map;
1295
1296 if (task == TASK_NULL)
1297 return(KERN_INVALID_ARGUMENT);
1298
1299 switch (flavor) {
1300 default:
1301 return (KERN_INVALID_ARGUMENT);
1302 }
1303 return (KERN_SUCCESS);
1304 }
1305
1306 kern_return_t
1307 task_info(
1308 task_t task,
1309 task_flavor_t flavor,
1310 task_info_t task_info_out,
1311 mach_msg_type_number_t *task_info_count)
1312 {
1313 thread_t thread;
1314 vm_map_t map;
1315
1316 if (task == TASK_NULL)
1317 return(KERN_INVALID_ARGUMENT);
1318
1319 switch (flavor) {
1320
1321 case TASK_BASIC_INFO:
1322 {
1323 register task_basic_info_t basic_info;
1324
1325 if (*task_info_count < TASK_BASIC_INFO_COUNT) {
1326 return(KERN_INVALID_ARGUMENT);
1327 }
1328
1329 basic_info = (task_basic_info_t) task_info_out;
1330
1331 map = (task == kernel_task) ? kernel_map : task->map;
1332
1333 basic_info->virtual_size = map->size;
1334 basic_info->resident_size = pmap_resident_count(map->pmap)
1335 * PAGE_SIZE;
1336
1337 task_lock(task);
1338 basic_info->policy = task->policy;
1339 basic_info->suspend_count = task->user_stop_count;
1340 basic_info->user_time.seconds
1341 = task->total_user_time.seconds;
1342 basic_info->user_time.microseconds
1343 = task->total_user_time.microseconds;
1344 basic_info->system_time.seconds
1345 = task->total_system_time.seconds;
1346 basic_info->system_time.microseconds
1347 = task->total_system_time.microseconds;
1348 task_unlock(task);
1349
1350 *task_info_count = TASK_BASIC_INFO_COUNT;
1351 break;
1352 }
1353
1354 case TASK_THREAD_TIMES_INFO:
1355 {
1356 register task_thread_times_info_t times_info;
1357 register thread_t thread;
1358 register thread_act_t thr_act;
1359
1360 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1361 return (KERN_INVALID_ARGUMENT);
1362 }
1363
1364 times_info = (task_thread_times_info_t) task_info_out;
1365 times_info->user_time.seconds = 0;
1366 times_info->user_time.microseconds = 0;
1367 times_info->system_time.seconds = 0;
1368 times_info->system_time.microseconds = 0;
1369
1370 task_lock(task);
1371 queue_iterate(&task->thr_acts, thr_act,
1372 thread_act_t, thr_acts)
1373 {
1374 time_value_t user_time, system_time;
1375 spl_t s;
1376
1377 thread = act_lock_thread(thr_act);
1378
1379 /* Skip empty threads and threads that have migrated
1380 * into this task:
1381 */
1382 if (!thread || thr_act->pool_port) {
1383 act_unlock_thread(thr_act);
1384 continue;
1385 }
1386 assert(thread); /* Must have thread, if no thread_pool*/
1387 s = splsched();
1388 thread_lock(thread);
1389
1390 thread_read_times(thread, &user_time, &system_time);
1391
1392 thread_unlock(thread);
1393 splx(s);
1394 act_unlock_thread(thr_act);
1395
1396 time_value_add(&times_info->user_time, &user_time);
1397 time_value_add(&times_info->system_time, &system_time);
1398 }
1399 task_unlock(task);
1400
1401 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1402 break;
1403 }
1404
1405 case TASK_SCHED_FIFO_INFO:
1406 {
1407 register policy_fifo_base_t fifo_base;
1408
1409 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1410 return(KERN_INVALID_ARGUMENT);
1411
1412 fifo_base = (policy_fifo_base_t) task_info_out;
1413
1414 task_lock(task);
1415 if (task->policy != POLICY_FIFO) {
1416 task_unlock(task);
1417 return(KERN_INVALID_POLICY);
1418 }
1419
1420 fifo_base->base_priority = task->priority;
1421 task_unlock(task);
1422
1423 *task_info_count = POLICY_FIFO_BASE_COUNT;
1424 break;
1425 }
1426
1427 case TASK_SCHED_RR_INFO:
1428 {
1429 register policy_rr_base_t rr_base;
1430
1431 if (*task_info_count < POLICY_RR_BASE_COUNT)
1432 return(KERN_INVALID_ARGUMENT);
1433
1434 rr_base = (policy_rr_base_t) task_info_out;
1435
1436 task_lock(task);
1437 if (task->policy != POLICY_RR) {
1438 task_unlock(task);
1439 return(KERN_INVALID_POLICY);
1440 }
1441
1442 rr_base->base_priority = task->priority;
1443 task_unlock(task);
1444
1445 rr_base->quantum = (min_quantum * tick) / 1000;
1446
1447 *task_info_count = POLICY_RR_BASE_COUNT;
1448 break;
1449 }
1450
1451 case TASK_SCHED_TIMESHARE_INFO:
1452 {
1453 register policy_timeshare_base_t ts_base;
1454
1455 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1456 return(KERN_INVALID_ARGUMENT);
1457
1458 ts_base = (policy_timeshare_base_t) task_info_out;
1459
1460 task_lock(task);
1461 if (task->policy != POLICY_TIMESHARE) {
1462 task_unlock(task);
1463 return(KERN_INVALID_POLICY);
1464 }
1465
1466 ts_base->base_priority = task->priority;
1467 task_unlock(task);
1468
1469 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1470 break;
1471 }
1472
1473 case TASK_SECURITY_TOKEN:
1474 {
1475 register security_token_t *sec_token_p;
1476
1477 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1478 return(KERN_INVALID_ARGUMENT);
1479 }
1480
1481 sec_token_p = (security_token_t *) task_info_out;
1482
1483 task_lock(task);
1484 *sec_token_p = task->sec_token;
1485 task_unlock(task);
1486
1487 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1488 break;
1489 }
1490
1491 case TASK_SCHED_INFO:
1492 return(KERN_INVALID_ARGUMENT);
1493
1494 case TASK_EVENTS_INFO:
1495 {
1496 register task_events_info_t events_info;
1497
1498 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1499 return(KERN_INVALID_ARGUMENT);
1500 }
1501
1502 events_info = (task_events_info_t) task_info_out;
1503
1504 task_lock(task);
1505 events_info->faults = task->faults;
1506 events_info->pageins = task->pageins;
1507 events_info->cow_faults = task->cow_faults;
1508 events_info->messages_sent = task->messages_sent;
1509 events_info->messages_received = task->messages_received;
1510 events_info->syscalls_mach = task->syscalls_mach;
1511 events_info->syscalls_unix = task->syscalls_unix;
1512 events_info->csw = task->csw;
1513 task_unlock(task);
1514
1515 *task_info_count = TASK_EVENTS_INFO_COUNT;
1516 break;
1517 }
1518
1519 default:
1520 return (KERN_INVALID_ARGUMENT);
1521 }
1522
1523 return(KERN_SUCCESS);
1524 }
1525
1526 /*
1527 * task_assign:
1528 *
1529 * Change the assigned processor set for the task
1530 */
1531 kern_return_t
1532 task_assign(
1533 task_t task,
1534 processor_set_t new_pset,
1535 boolean_t assign_threads)
1536 {
1537 #ifdef lint
1538 task++; new_pset++; assign_threads++;
1539 #endif /* lint */
1540 return(KERN_FAILURE);
1541 }
1542
1543 /*
1544 * task_assign_default:
1545 *
1546 * Version of task_assign to assign to default processor set.
1547 */
1548 kern_return_t
1549 task_assign_default(
1550 task_t task,
1551 boolean_t assign_threads)
1552 {
1553 return (task_assign(task, &default_pset, assign_threads));
1554 }
1555
1556 /*
1557 * task_get_assignment
1558 *
1559 * Return name of processor set that task is assigned to.
1560 */
1561 kern_return_t
1562 task_get_assignment(
1563 task_t task,
1564 processor_set_t *pset)
1565 {
1566 if (!task->active)
1567 return(KERN_FAILURE);
1568
1569 *pset = task->processor_set;
1570 pset_reference(*pset);
1571 return(KERN_SUCCESS);
1572 }
1573
1574
1575 /*
1576 * task_policy
1577 *
1578 * Set scheduling policy and parameters, both base and limit, for
1579 * the given task. Policy must be a policy which is enabled for the
1580 * processor set. Change contained threads if requested.
1581 */
1582 kern_return_t
1583 task_policy(
1584 task_t task,
1585 policy_t policy_id,
1586 policy_base_t base,
1587 mach_msg_type_number_t count,
1588 boolean_t set_limit,
1589 boolean_t change)
1590 {
1591 return(KERN_FAILURE);
1592 }
1593
1594 /*
1595 * task_set_policy
1596 *
1597 * Set scheduling policy and parameters, both base and limit, for
1598 * the given task. Policy can be any policy implemented by the
1599 * processor set, whether enabled or not. Change contained threads
1600 * if requested.
1601 */
1602 kern_return_t
1603 task_set_policy(
1604 task_t task,
1605 processor_set_t pset,
1606 policy_t policy_id,
1607 policy_base_t base,
1608 mach_msg_type_number_t base_count,
1609 policy_limit_t limit,
1610 mach_msg_type_number_t limit_count,
1611 boolean_t change)
1612 {
1613 return(KERN_FAILURE);
1614 }
1615
1616 /*
1617 * task_collect_scan:
1618 *
1619 * Attempt to free resources owned by tasks.
1620 */
1621
1622 void
1623 task_collect_scan(void)
1624 {
1625 register task_t task, prev_task;
1626 processor_set_t pset = &default_pset;
1627
1628 prev_task = TASK_NULL;
1629
1630 pset_lock(pset);
1631 pset->ref_count++;
1632 task = (task_t) queue_first(&pset->tasks);
1633 while (!queue_end(&pset->tasks, (queue_entry_t) task)) {
1634 task_reference(task);
1635 pset_unlock(pset);
1636
1637 pmap_collect(task->map->pmap);
1638
1639 if (prev_task != TASK_NULL)
1640 task_deallocate(prev_task);
1641 prev_task = task;
1642
1643 pset_lock(pset);
1644 task = (task_t) queue_next(&task->pset_tasks);
1645 }
1646 pset_unlock(pset);
1647
1648 pset_deallocate(pset);
1649
1650 if (prev_task != TASK_NULL)
1651 task_deallocate(prev_task);
1652 }
1653
1654 boolean_t task_collect_allowed = FALSE;
1655 unsigned task_collect_last_tick = 0;
1656 unsigned task_collect_max_rate = 0; /* in ticks */
1657
1658 /*
1659 * consider_task_collect:
1660 *
1661 * Called by the pageout daemon when the system needs more free pages.
1662 */
1663
1664 void
1665 consider_task_collect(void)
1666 {
1667 /*
1668 * By default, don't attempt task collection more frequently
1669 * than once per second.
1670 */
1671
1672 if (task_collect_max_rate == 0)
1673 task_collect_max_rate = (2 << SCHED_TICK_SHIFT);
1674
1675 if (task_collect_allowed &&
1676 (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
1677 task_collect_last_tick = sched_tick;
1678 task_collect_scan();
1679 }
1680 }
1681
1682 kern_return_t
1683 task_set_ras_pc(
1684 task_t task,
1685 vm_offset_t pc,
1686 vm_offset_t endpc)
1687 {
1688 #if FAST_TAS
1689 extern int fast_tas_debug;
1690
1691 if (fast_tas_debug) {
1692 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1693 task, pc, endpc);
1694 }
1695 task_lock(task);
1696 task->fast_tas_base = pc;
1697 task->fast_tas_end = endpc;
1698 task_unlock(task);
1699 return KERN_SUCCESS;
1700
1701 #else /* FAST_TAS */
1702 #ifdef lint
1703 task++;
1704 pc++;
1705 endpc++;
1706 #endif /* lint */
1707
1708 return KERN_FAILURE;
1709
1710 #endif /* FAST_TAS */
1711 }
1712
1713 void
1714 task_synchronizer_destroy_all(task_t task)
1715 {
1716 semaphore_t semaphore;
1717 lock_set_t lock_set;
1718
1719 /*
1720 * Destroy owned semaphores
1721 */
1722
1723 while (!queue_empty(&task->semaphore_list)) {
1724 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1725 (void) semaphore_destroy(task, semaphore);
1726 }
1727
1728 /*
1729 * Destroy owned lock sets
1730 */
1731
1732 while (!queue_empty(&task->lock_set_list)) {
1733 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1734 (void) lock_set_destroy(task, lock_set);
1735 }
1736 }
1737
1738 void
1739 task_subsystem_destroy_all(task_t task)
1740 {
1741 subsystem_t subsystem;
1742
1743 /*
1744 * Destroy owned subsystems
1745 */
1746
1747 while (!queue_empty(&task->subsystem_list)) {
1748 subsystem = (subsystem_t) queue_first(&task->subsystem_list);
1749 subsystem_deallocate(subsystem);
1750 }
1751 }
1752
1753 /*
1754 * task_set_port_space:
1755 *
1756 * Set port name space of task to specified size.
1757 */
1758
1759 kern_return_t
1760 task_set_port_space(
1761 task_t task,
1762 int table_entries)
1763 {
1764 kern_return_t kr;
1765
1766 is_write_lock(task->itk_space);
1767 kr = ipc_entry_grow_table(task->itk_space, table_entries);
1768 if (kr == KERN_SUCCESS)
1769 is_write_unlock(task->itk_space);
1770 return kr;
1771 }
1772
1773 /*
1774 * We need to export some functions to other components that
1775 * are currently implemented in macros within the osfmk
1776 * component. Just export them as functions of the same name.
1777 */
1778 boolean_t is_kerneltask(task_t t)
1779 {
1780 if (t == kernel_task)
1781 return(TRUE);
1782 else
1783 return((t->kernel_loaded));
1784 }
1785
1786 #undef current_task
1787 task_t current_task()
1788 {
1789 return (current_task_fast());
1790 }