2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
55 * Task management primitives implementation.
58 * Copyright (c) 1993 The University of Utah and
59 * the Computer Systems Laboratory (CSL). All rights reserved.
61 * Permission to use, copy, modify and distribute this software and its
62 * documentation is hereby granted, provided that both the copyright
63 * notice and this permission notice appear in all copies of the
64 * software, derivative works or modified versions, and any portions
65 * thereof, and that both notices appear in supporting documentation.
67 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
68 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
69 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
71 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
72 * improvements that they make and grant CSL redistribution rights.
77 #include <mach_host.h>
78 #include <mach_prof.h>
80 #include <task_swapper.h>
81 #include <platforms.h>
83 #include <mach/boolean.h>
84 #include <mach/machine/vm_types.h>
85 #include <mach/vm_param.h>
86 #include <mach/semaphore.h>
87 #include <mach/task_info.h>
88 #include <mach/task_special_ports.h>
89 #include <mach/mach_types.h>
90 #include <mach/machine/rpc.h>
91 #include <ipc/ipc_space.h>
92 #include <ipc/ipc_entry.h>
93 #include <kern/mach_param.h>
94 #include <kern/misc_protos.h>
95 #include <kern/task.h>
96 #include <kern/thread.h>
97 #include <kern/zalloc.h>
98 #include <kern/kalloc.h>
99 #include <kern/processor.h>
100 #include <kern/sched_prim.h> /* for thread_wakeup */
101 #include <kern/ipc_tt.h>
102 #include <kern/ledger.h>
103 #include <kern/host.h>
104 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
105 #include <kern/profile.h>
106 #include <kern/assert.h>
107 #include <kern/sync_lock.h>
109 #include <ddb/db_sym.h>
110 #endif /* MACH_KDB */
113 #include <kern/task_swap.h>
114 #endif /* TASK_SWAPPER */
117 * Exported interfaces
120 #include <mach/task_server.h>
121 #include <mach/mach_host_server.h>
122 #include <mach/host_security_server.h>
123 #include <vm/task_working_set.h>
130 void task_hold_locked(
132 void task_wait_locked(
134 void task_release_locked(
136 void task_collect_scan(void);
139 void task_synchronizer_destroy_all(
141 void task_subsystem_destroy_all(
144 kern_return_t
task_set_ledger(
154 TASK_MAX
* sizeof(struct task
),
155 TASK_CHUNK
* sizeof(struct task
),
161 * Create the kernel task as the first task.
162 * Task_create_local must assign to kernel_task as a side effect,
163 * for other initialization. (:-()
165 if (task_create_local(
166 TASK_NULL
, FALSE
, FALSE
, &kernel_task
) != KERN_SUCCESS
)
167 panic("task_init\n");
168 vm_map_deallocate(kernel_task
->map
);
169 kernel_task
->map
= kernel_map
;
172 if (watchacts
& WA_TASK
)
173 printf("task_init: kernel_task = %x map=%x\n",
174 kernel_task
, kernel_map
);
175 #endif /* MACH_ASSERT */
185 * If may_assign is false, task is already being assigned,
186 * wait for that to finish.
188 while (task
->may_assign
== FALSE
) {
189 task
->assign_active
= TRUE
;
190 thread_sleep_mutex((event_t
) &task
->assign_active
,
191 &task
->lock
, THREAD_INTERRUPTIBLE
);
194 task
->may_assign
= FALSE
;
205 assert(task
->may_assign
== FALSE
);
206 task
->may_assign
= TRUE
;
207 if (task
->assign_active
== TRUE
) {
208 task
->assign_active
= FALSE
;
209 thread_wakeup((event_t
)&task
->assign_active
);
215 #endif /* MACH_HOST */
218 * Create a task running in the kernel address space. It may
219 * have its own map of size mem_size and may have ipc privileges.
224 vm_offset_t map_base
,
228 kern_return_t result
;
235 result
= task_create_local(parent_task
, FALSE
, TRUE
, &new_task
);
236 if (result
!= KERN_SUCCESS
)
240 * Task_create_local creates the task with a user-space map.
241 * We attempt to replace the map and free it afterwards; else
242 * task_deallocate will free it (can NOT set map to null before
243 * task_deallocate, this impersonates a norma placeholder task).
244 * _Mark the memory as pageable_ -- this is what we
245 * want for images (like servers) loaded into the kernel.
248 vm_map_deallocate(new_task
->map
);
249 new_task
->map
= kernel_map
;
250 *child_task
= new_task
;
252 old_map
= new_task
->map
;
253 if ((result
= kmem_suballoc(kernel_map
, &map_base
,
254 map_size
, TRUE
, FALSE
,
255 &new_task
->map
)) != KERN_SUCCESS
) {
257 * New task created with ref count of 2 -- decrement by
258 * one to force task deletion.
260 printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n",
261 kernel_map
, map_base
, map_size
);
262 --new_task
->ref_count
;
263 task_deallocate(new_task
);
266 vm_map_deallocate(old_map
);
267 *child_task
= new_task
;
269 return (KERN_SUCCESS
);
275 ledger_port_array_t ledger_ports
,
276 mach_msg_type_number_t num_ledger_ports
,
277 boolean_t inherit_memory
,
278 task_t
*child_task
) /* OUT */
280 if (parent_task
== TASK_NULL
)
281 return(KERN_INVALID_ARGUMENT
);
283 return task_create_local(
284 parent_task
, inherit_memory
, FALSE
, child_task
);
288 host_security_create_task_token(
289 host_security_t host_security
,
291 security_token_t sec_token
,
292 host_priv_t host_priv
,
293 ledger_port_array_t ledger_ports
,
294 mach_msg_type_number_t num_ledger_ports
,
295 boolean_t inherit_memory
,
296 task_t
*child_task
) /* OUT */
298 kern_return_t result
;
300 if (parent_task
== TASK_NULL
)
301 return(KERN_INVALID_ARGUMENT
);
303 if (host_security
== HOST_NULL
)
304 return(KERN_INVALID_SECURITY
);
306 result
= task_create_local(
307 parent_task
, inherit_memory
, FALSE
, child_task
);
309 if (result
!= KERN_SUCCESS
)
312 result
= host_security_set_task_token(host_security
,
317 if (result
!= KERN_SUCCESS
)
326 boolean_t inherit_memory
,
327 boolean_t kernel_loaded
,
328 task_t
*child_task
) /* OUT */
331 processor_set_t pset
;
333 new_task
= (task_t
) zalloc(task_zone
);
335 if (new_task
== TASK_NULL
)
336 return(KERN_RESOURCE_SHORTAGE
);
338 /* one ref for just being alive; one for our caller */
339 new_task
->ref_count
= 2;
342 new_task
->map
= vm_map_fork(parent_task
->map
);
344 new_task
->map
= vm_map_create(pmap_create(0),
345 round_page(VM_MIN_ADDRESS
),
346 trunc_page(VM_MAX_ADDRESS
), TRUE
);
348 mutex_init(&new_task
->lock
, ETAP_THREAD_TASK_NEW
);
349 queue_init(&new_task
->subsystem_list
);
350 queue_init(&new_task
->thr_acts
);
351 new_task
->suspend_count
= 0;
352 new_task
->thr_act_count
= 0;
353 new_task
->res_act_count
= 0;
354 new_task
->active_act_count
= 0;
355 new_task
->user_stop_count
= 0;
356 new_task
->role
= TASK_UNSPECIFIED
;
357 new_task
->active
= TRUE
;
358 new_task
->kernel_loaded
= kernel_loaded
;
359 new_task
->user_data
= 0;
360 new_task
->faults
= 0;
361 new_task
->cow_faults
= 0;
362 new_task
->pageins
= 0;
363 new_task
->messages_sent
= 0;
364 new_task
->messages_received
= 0;
365 new_task
->syscalls_mach
= 0;
366 new_task
->syscalls_unix
=0;
368 new_task
->dynamic_working_set
= 0;
370 task_working_set_create(new_task
, TWS_SMALL_HASH_LINE_COUNT
,
371 0, TWS_HASH_STYLE_DEFAULT
);
374 new_task
->bsd_info
= 0;
375 #endif /* MACH_BSD */
378 new_task
->swap_state
= TASK_SW_IN
;
379 new_task
->swap_flags
= 0;
380 new_task
->swap_ast_waiting
= 0;
381 new_task
->swap_stamp
= sched_tick
;
382 new_task
->swap_rss
= 0;
383 new_task
->swap_nswap
= 0;
384 #endif /* TASK_SWAPPER */
386 queue_init(&new_task
->semaphore_list
);
387 queue_init(&new_task
->lock_set_list
);
388 new_task
->semaphores_owned
= 0;
389 new_task
->lock_sets_owned
= 0;
392 new_task
->may_assign
= TRUE
;
393 new_task
->assign_active
= FALSE
;
394 #endif /* MACH_HOST */
395 eml_task_reference(new_task
, parent_task
);
397 ipc_task_init(new_task
, parent_task
);
399 new_task
->total_user_time
.seconds
= 0;
400 new_task
->total_user_time
.microseconds
= 0;
401 new_task
->total_system_time
.seconds
= 0;
402 new_task
->total_system_time
.microseconds
= 0;
404 task_prof_init(new_task
);
406 if (parent_task
!= TASK_NULL
) {
409 * Freeze the parent, so that parent_task->processor_set
412 task_freeze(parent_task
);
413 #endif /* MACH_HOST */
414 pset
= parent_task
->processor_set
;
416 pset
= &default_pset
;
418 new_task
->sec_token
= parent_task
->sec_token
;
420 shared_region_mapping_ref(parent_task
->system_shared_region
);
421 new_task
->system_shared_region
= parent_task
->system_shared_region
;
423 new_task
->wired_ledger_port
= ledger_copy(
424 convert_port_to_ledger(parent_task
->wired_ledger_port
));
425 new_task
->paged_ledger_port
= ledger_copy(
426 convert_port_to_ledger(parent_task
->paged_ledger_port
));
429 pset
= &default_pset
;
431 new_task
->sec_token
= KERNEL_SECURITY_TOKEN
;
432 new_task
->wired_ledger_port
= ledger_copy(root_wired_ledger
);
433 new_task
->paged_ledger_port
= ledger_copy(root_paged_ledger
);
436 if (kernel_task
== TASK_NULL
) {
437 new_task
->priority
= MINPRI_KERNEL
;
438 new_task
->max_priority
= MAXPRI_KERNEL
;
441 new_task
->priority
= BASEPRI_DEFAULT
;
442 new_task
->max_priority
= MAXPRI_USER
;
446 pset_add_task(pset
, new_task
);
449 if (parent_task
!= TASK_NULL
)
450 task_unfreeze(parent_task
);
451 #endif /* MACH_HOST */
454 if (inherit_memory
) {
455 new_task
->fast_tas_base
= parent_task
->fast_tas_base
;
456 new_task
->fast_tas_end
= parent_task
->fast_tas_end
;
458 new_task
->fast_tas_base
= (vm_offset_t
)0;
459 new_task
->fast_tas_end
= (vm_offset_t
)0;
461 #endif /* FAST_TAS */
463 ipc_task_enable(new_task
);
466 task_swapout_eligible(new_task
);
467 #endif /* TASK_SWAPPER */
470 if (watchacts
& WA_TASK
)
471 printf("*** task_create_local(par=%x inh=%x) == 0x%x\n",
472 parent_task
, inherit_memory
, new_task
);
473 #endif /* MACH_ASSERT */
475 *child_task
= new_task
;
476 return(KERN_SUCCESS
);
482 * Called by task_deallocate when the task's reference count drops to zero.
489 processor_set_t pset
;
493 if (watchacts
& (WA_EXIT
|WA_TASK
))
494 printf("task_free(%x(%d)) map ref %d\n", task
, task
->ref_count
,
495 task
->map
->ref_count
);
496 #endif /* MACH_ASSERT */
499 /* task_terminate guarantees that this task is off the list */
500 assert((task
->swap_state
& TASK_SW_ELIGIBLE
) == 0);
501 #endif /* TASK_SWAPPER */
503 eml_task_deallocate(task
);
506 * Temporarily restore the reference we dropped above, then
507 * freeze the task so that the task->processor_set field
508 * cannot change. In the !MACH_HOST case, the logic can be
509 * simplified, since the default_pset is the only pset.
515 #endif /* MACH_HOST */
517 pset
= task
->processor_set
;
520 if (--task
->ref_count
> 0) {
522 * A new reference appeared (probably from the pset).
523 * Back out. Must unfreeze inline since we'already
524 * dropped our reference.
527 assert(task
->may_assign
== FALSE
);
528 task
->may_assign
= TRUE
;
529 if (task
->assign_active
== TRUE
) {
530 task
->assign_active
= FALSE
;
531 thread_wakeup((event_t
)&task
->assign_active
);
533 #endif /* MACH_HOST */
538 pset_remove_task(pset
,task
);
541 pset_deallocate(pset
);
543 ipc_task_terminate(task
);
544 shared_region_mapping_dealloc(task
->system_shared_region
);
546 if (task
->kernel_loaded
)
547 vm_map_remove(kernel_map
, task
->map
->min_offset
,
548 task
->map
->max_offset
, VM_MAP_NO_FLAGS
);
549 vm_map_deallocate(task
->map
);
550 is_release(task
->itk_space
);
551 task_prof_deallocate(task
);
552 if(task
->dynamic_working_set
)
553 tws_hash_destroy((tws_hash_t
)
554 task
->dynamic_working_set
);
555 zfree(task_zone
, (vm_offset_t
) task
);
562 if (task
!= TASK_NULL
) {
566 c
= --task
->ref_count
;
568 task_free(task
); /* unlocks task */
578 if (task
!= TASK_NULL
) {
589 if (task
!= TASK_NULL
) {
590 if (task_lock_try(task
)) {
602 * Terminate the specified task. See comments on thread_terminate
603 * (kern/thread.c) about problems with terminating the "current task."
610 if (task
== TASK_NULL
)
611 return(KERN_INVALID_ARGUMENT
);
613 return(KERN_FAILURE
);
614 return (task_terminate_internal(task
));
618 task_terminate_internal(
621 thread_act_t thr_act
, cur_thr_act
;
624 boolean_t interrupt_save
;
626 assert(task
!= kernel_task
);
628 cur_thr_act
= current_act();
629 cur_task
= cur_thr_act
->task
;
633 * If task is not resident (swapped out, or being swapped
634 * out), we want to bring it back in (this can block).
635 * NOTE: The only way that this can happen in the current
636 * system is if the task is swapped while it has a thread
637 * in exit(), and the thread does not hit a clean point
638 * to swap itself before getting here.
639 * Terminating other tasks is another way to this code, but
640 * it is not yet fully supported.
641 * The task_swapin is unconditional. It used to be done
642 * only if the task is not resident. Swapping in a
643 * resident task will prevent it from being swapped out
644 * while it terminates.
646 task_swapin(task
, TRUE
); /* TRUE means make it unswappable */
647 #endif /* TASK_SWAPPER */
650 * Get the task locked and make sure that we are not racing
651 * with someone else trying to terminate us.
653 if (task
== cur_task
) {
655 } else if (task
< cur_task
) {
663 if (!task
->active
|| !cur_thr_act
->active
) {
665 * Task or current act is already being terminated.
666 * Just return an error. If we are dying, this will
667 * just get us to our AST special handler and that
668 * will get us to finalize the termination of ourselves.
671 if (cur_task
!= task
)
672 task_unlock(cur_task
);
673 return(KERN_FAILURE
);
675 if (cur_task
!= task
)
676 task_unlock(cur_task
);
679 * Make sure the current thread does not get aborted out of
680 * the waits inside these operations.
682 cur_thread
= current_thread();
683 interrupt_save
= cur_thread
->interruptible
;
684 cur_thread
->interruptible
= FALSE
;
687 * Indicate that we want all the threads to stop executing
688 * at user space by holding the task (we would have held
689 * each thread independently in thread_terminate_internal -
690 * but this way we may be more likely to already find it
691 * held there). Mark the task inactive, and prevent
692 * further task operations via the task port.
694 task_hold_locked(task
);
695 task
->active
= FALSE
;
696 ipc_task_disable(task
);
699 * Terminate each activation in the task.
701 * Each terminated activation will run it's special handler
702 * when its current kernel context is unwound. That will
703 * clean up most of the thread resources. Then it will be
704 * handed over to the reaper, who will finally remove the
705 * thread from the task list and free the structures.
707 queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
) {
708 thread_terminate_internal(thr_act
);
712 * Clean up any virtual machine state/resources associated
713 * with the current activation because it may hold wiring
714 * and other references on resources we will be trying to
717 if (cur_thr_act
->task
== task
)
718 act_virtual_machine_destroy(cur_thr_act
);
723 * Destroy all synchronizers owned by the task.
725 task_synchronizer_destroy_all(task
);
728 * Deallocate all subsystems owned by the task.
730 task_subsystem_destroy_all(task
);
733 * Destroy the IPC space, leaving just a reference for it.
735 if (!task
->kernel_loaded
)
736 ipc_space_destroy(task
->itk_space
);
739 * If the current thread is a member of the task
740 * being terminated, then the last reference to
741 * the task will not be dropped until the thread
742 * is finally reaped. To avoid incurring the
743 * expense of removing the address space regions
744 * at reap time, we do it explictly here.
746 (void) vm_map_remove(task
->map
,
747 task
->map
->min_offset
,
748 task
->map
->max_offset
, VM_MAP_NO_FLAGS
);
751 * We no longer need to guard against being aborted, so restore
752 * the previous interruptible state.
754 cur_thread
->interruptible
= interrupt_save
;
757 * Get rid of the task active reference on itself.
759 task_deallocate(task
);
761 return(KERN_SUCCESS
);
765 * task_halt - Shut the current task down (except for the current thread) in
766 * preparation for dramatic changes to the task (probably exec).
767 * We hold the task, terminate all other threads in the task and
768 * wait for them to terminate, clean up the portspace, and when
769 * all done, let the current thread go.
775 thread_act_t thr_act
, cur_thr_act
;
778 assert(task
!= kernel_task
);
780 cur_thr_act
= current_act();
781 cur_task
= cur_thr_act
->task
;
783 if (task
!= cur_task
) {
784 return(KERN_INVALID_ARGUMENT
);
789 * If task is not resident (swapped out, or being swapped
790 * out), we want to bring it back in and make it unswappable.
791 * This can block, so do it early.
793 task_swapin(task
, TRUE
); /* TRUE means make it unswappable */
794 #endif /* TASK_SWAPPER */
798 if (!task
->active
|| !cur_thr_act
->active
) {
800 * Task or current thread is already being terminated.
801 * Hurry up and return out of the current kernel context
802 * so that we run our AST special handler to terminate
806 return(KERN_FAILURE
);
809 if (task
->thr_act_count
> 1) {
811 * Mark all the threads to keep them from starting any more
812 * user-level execution. The thread_terminate_internal code
813 * would do this on a thread by thread basis anyway, but this
814 * gives us a better chance of not having to wait there.
816 task_hold_locked(task
);
819 * Terminate all the other activations in the task.
821 * Each terminated activation will run it's special handler
822 * when its current kernel context is unwound. That will
823 * clean up most of the thread resources. Then it will be
824 * handed over to the reaper, who will finally remove the
825 * thread from the task list and free the structures.
827 queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
,thr_acts
) {
828 if (thr_act
!= cur_thr_act
)
829 thread_terminate_internal(thr_act
);
831 task_release_locked(task
);
835 * If the current thread has any virtual machine state
836 * associated with it, we need to explicitly clean that
837 * up now (because we did not terminate the current act)
838 * before we try to clean up the task VM and port spaces.
840 act_virtual_machine_destroy(cur_thr_act
);
845 * Destroy all synchronizers owned by the task.
847 task_synchronizer_destroy_all(task
);
850 * Deallocate all subsystems owned by the task.
852 task_subsystem_destroy_all(task
);
856 * Destroy the IPC space, leaving just a reference for it.
859 * Lookupd will break if we enable this cleaning, because it
860 * uses a slimey trick that depends upon the portspace not
861 * being cleaned up across exec (it passes the lookupd server
862 * port to the child after a restart using knowledge of this
863 * bug in past implementations). We need to fix lookupd to
864 * keep from leaking ports across exec.
866 if (!task
->kernel_loaded
)
867 ipc_space_clean(task
->itk_space
);
871 * Clean out the address space, as we are going to be
874 (void) vm_map_remove(task
->map
,
875 task
->map
->min_offset
,
876 task
->map
->max_offset
, VM_MAP_NO_FLAGS
);
884 * Suspend execution of the specified task.
885 * This is a recursive-style suspension of the task, a count of
886 * suspends is maintained.
888 * CONDITIONS: the task is locked and active.
892 register task_t task
)
894 register thread_act_t thr_act
;
896 assert(task
->active
);
898 task
->suspend_count
++;
901 * Iterate through all the thread_act's and hold them.
903 queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
) {
904 act_lock_thread(thr_act
);
905 thread_hold(thr_act
);
906 act_unlock_thread(thr_act
);
913 * Same as the internal routine above, except that is must lock
914 * and verify that the task is active. This differs from task_suspend
915 * in that it places a kernel hold on the task rather than just a
916 * user-level hold. This keeps users from over resuming and setting
917 * it running out from under the kernel.
919 * CONDITIONS: the caller holds a reference on the task
922 task_hold(task_t task
)
926 if (task
== TASK_NULL
)
927 return (KERN_INVALID_ARGUMENT
);
931 return (KERN_FAILURE
);
933 task_hold_locked(task
);
936 return(KERN_SUCCESS
);
940 * Routine: task_wait_locked
941 * Wait for all threads in task to stop.
944 * Called with task locked, active, and held.
948 register task_t task
)
950 register thread_act_t thr_act
, cur_thr_act
;
952 assert(task
->active
);
953 assert(task
->suspend_count
> 0);
955 cur_thr_act
= current_act();
957 * Iterate through all the thread's and wait for them to
958 * stop. Do not wait for the current thread if it is within
961 queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
) {
962 if (thr_act
!= cur_thr_act
) {
963 thread_shuttle_t thr_shuttle
;
965 thr_shuttle
= act_lock_thread(thr_act
);
966 thread_wait(thr_shuttle
);
967 act_unlock_thread(thr_act
);
973 * task_release_locked:
975 * Release a kernel hold on a task.
977 * CONDITIONS: the task is locked and active
981 register task_t task
)
983 register thread_act_t thr_act
;
985 assert(task
->active
);
987 task
->suspend_count
--;
988 assert(task
->suspend_count
>= 0);
991 * Iterate through all the thread_act's and hold them.
992 * Do not hold the current thread_act if it is within the
995 queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
) {
996 act_lock_thread(thr_act
);
997 thread_release(thr_act
);
998 act_unlock_thread(thr_act
);
1005 * Same as the internal routine above, except that it must lock
1006 * and verify that the task is active.
1008 * CONDITIONS: The caller holds a reference to the task
1011 task_release(task_t task
)
1015 if (task
== TASK_NULL
)
1016 return (KERN_INVALID_ARGUMENT
);
1018 if (!task
->active
) {
1020 return (KERN_FAILURE
);
1022 task_release_locked(task
);
1025 return(KERN_SUCCESS
);
1031 thread_act_array_t
*thr_act_list
,
1032 mach_msg_type_number_t
*count
)
1034 unsigned int actual
; /* this many thr_acts */
1035 thread_act_t thr_act
;
1036 thread_act_t
*thr_acts
;
1040 vm_size_t size
, size_needed
;
1043 if (task
== TASK_NULL
)
1044 return KERN_INVALID_ARGUMENT
;
1050 if (!task
->active
) {
1054 return KERN_FAILURE
;
1057 actual
= task
->thr_act_count
;
1059 /* do we have the memory we need? */
1060 size_needed
= actual
* sizeof(mach_port_t
);
1061 if (size_needed
<= size
)
1064 /* unlock the task and allocate more memory */
1070 assert(size_needed
> 0);
1073 addr
= kalloc(size
);
1075 return KERN_RESOURCE_SHORTAGE
;
1078 /* OK, have memory and the task is locked & active */
1079 thr_acts
= (thread_act_t
*) addr
;
1081 for (i
= j
= 0, thr_act
= (thread_act_t
) queue_first(&task
->thr_acts
);
1083 i
++, thr_act
= (thread_act_t
) queue_next(&thr_act
->thr_acts
)) {
1085 if (thr_act
->ref_count
> 0) {
1086 act_locked_act_reference(thr_act
);
1087 thr_acts
[j
++] = thr_act
;
1089 act_unlock(thr_act
);
1091 assert(queue_end(&task
->thr_acts
, (queue_entry_t
) thr_act
));
1094 size_needed
= actual
* sizeof(mach_port_t
);
1096 /* can unlock task now that we've got the thr_act refs */
1100 /* no thr_acts, so return null pointer and deallocate memory */
1108 /* if we allocated too much, must copy */
1110 if (size_needed
< size
) {
1111 vm_offset_t newaddr
;
1113 newaddr
= kalloc(size_needed
);
1115 for (i
= 0; i
< actual
; i
++)
1116 act_deallocate(thr_acts
[i
]);
1118 return KERN_RESOURCE_SHORTAGE
;
1121 bcopy((char *) addr
, (char *) newaddr
, size_needed
);
1123 thr_acts
= (thread_act_t
*) newaddr
;
1126 *thr_act_list
= thr_acts
;
1129 /* do the conversion that Mig should handle */
1131 for (i
= 0; i
< actual
; i
++)
1132 ((ipc_port_t
*) thr_acts
)[i
] =
1133 convert_act_to_port(thr_acts
[i
]);
1136 return KERN_SUCCESS
;
1140 * Routine: task_suspend
1141 * Implement a user-level suspension on a task.
1144 * The caller holds a reference to the task
1148 register task_t task
)
1150 if (task
== TASK_NULL
)
1151 return (KERN_INVALID_ARGUMENT
);
1154 if (!task
->active
) {
1156 return (KERN_FAILURE
);
1158 if ((task
->user_stop_count
)++ > 0) {
1160 * If the stop count was positive, the task is
1161 * already stopped and we can exit.
1164 return (KERN_SUCCESS
);
1168 * Put a kernel-level hold on the threads in the task (all
1169 * user-level task suspensions added together represent a
1170 * single kernel-level hold). We then wait for the threads
1171 * to stop executing user code.
1173 task_hold_locked(task
);
1174 task_wait_locked(task
);
1176 return (KERN_SUCCESS
);
1180 * Routine: task_resume
1181 * Release a kernel hold on a task.
1184 * The caller holds a reference to the task
1187 task_resume(register task_t task
)
1189 register boolean_t release
;
1191 if (task
== TASK_NULL
)
1192 return(KERN_INVALID_ARGUMENT
);
1196 if (!task
->active
) {
1198 return(KERN_FAILURE
);
1200 if (task
->user_stop_count
> 0) {
1201 if (--(task
->user_stop_count
) == 0)
1206 return(KERN_FAILURE
);
1210 * Release the task if necessary.
1213 task_release_locked(task
);
1216 return(KERN_SUCCESS
);
1220 host_security_set_task_token(
1221 host_security_t host_security
,
1223 security_token_t sec_token
,
1224 host_priv_t host_priv
)
1228 if (task
== TASK_NULL
)
1229 return(KERN_INVALID_ARGUMENT
);
1231 if (host_security
== HOST_NULL
)
1232 return(KERN_INVALID_SECURITY
);
1235 task
->sec_token
= sec_token
;
1238 if (host_priv
!= HOST_PRIV_NULL
) {
1239 kr
= task_set_special_port(task
,
1241 ipc_port_make_send(realhost
.host_priv_self
));
1243 kr
= task_set_special_port(task
,
1245 ipc_port_make_send(realhost
.host_self
));
1251 * Utility routine to set a ledger
1259 if (task
== TASK_NULL
)
1260 return(KERN_INVALID_ARGUMENT
);
1264 ipc_port_release_send(task
->wired_ledger_port
);
1265 task
->wired_ledger_port
= ledger_copy(wired
);
1268 ipc_port_release_send(task
->paged_ledger_port
);
1269 task
->paged_ledger_port
= ledger_copy(paged
);
1273 return(KERN_SUCCESS
);
1277 * This routine was added, pretty much exclusively, for registering the
1278 * RPC glue vector for in-kernel short circuited tasks. Rather than
1279 * removing it completely, I have only disabled that feature (which was
1280 * the only feature at the time). It just appears that we are going to
1281 * want to add some user data to tasks in the future (i.e. bsd info,
1282 * task names, etc...), so I left it in the formal task interface.
1287 task_flavor_t flavor
,
1288 task_info_t task_info_in
, /* pointer to IN array */
1289 mach_msg_type_number_t task_info_count
)
1293 if (task
== TASK_NULL
)
1294 return(KERN_INVALID_ARGUMENT
);
1298 return (KERN_INVALID_ARGUMENT
);
1300 return (KERN_SUCCESS
);
1306 task_flavor_t flavor
,
1307 task_info_t task_info_out
,
1308 mach_msg_type_number_t
*task_info_count
)
1313 if (task
== TASK_NULL
)
1314 return(KERN_INVALID_ARGUMENT
);
1318 case TASK_BASIC_INFO
:
1320 register task_basic_info_t basic_info
;
1322 if (*task_info_count
< TASK_BASIC_INFO_COUNT
) {
1323 return(KERN_INVALID_ARGUMENT
);
1326 basic_info
= (task_basic_info_t
) task_info_out
;
1328 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
1330 basic_info
->virtual_size
= map
->size
;
1331 basic_info
->resident_size
= pmap_resident_count(map
->pmap
)
1335 basic_info
->policy
= ((task
!= kernel_task
)?
1336 POLICY_TIMESHARE
: POLICY_RR
);
1337 basic_info
->suspend_count
= task
->user_stop_count
;
1338 basic_info
->user_time
.seconds
1339 = task
->total_user_time
.seconds
;
1340 basic_info
->user_time
.microseconds
1341 = task
->total_user_time
.microseconds
;
1342 basic_info
->system_time
.seconds
1343 = task
->total_system_time
.seconds
;
1344 basic_info
->system_time
.microseconds
1345 = task
->total_system_time
.microseconds
;
1348 *task_info_count
= TASK_BASIC_INFO_COUNT
;
1352 case TASK_THREAD_TIMES_INFO
:
1354 register task_thread_times_info_t times_info
;
1355 register thread_t thread
;
1356 register thread_act_t thr_act
;
1358 if (*task_info_count
< TASK_THREAD_TIMES_INFO_COUNT
) {
1359 return (KERN_INVALID_ARGUMENT
);
1362 times_info
= (task_thread_times_info_t
) task_info_out
;
1363 times_info
->user_time
.seconds
= 0;
1364 times_info
->user_time
.microseconds
= 0;
1365 times_info
->system_time
.seconds
= 0;
1366 times_info
->system_time
.microseconds
= 0;
1369 queue_iterate(&task
->thr_acts
, thr_act
,
1370 thread_act_t
, thr_acts
)
1372 time_value_t user_time
, system_time
;
1375 thread
= act_lock_thread(thr_act
);
1377 /* Skip empty threads and threads that have migrated
1380 if (!thread
|| thr_act
->pool_port
) {
1381 act_unlock_thread(thr_act
);
1384 assert(thread
); /* Must have thread, if no thread_pool*/
1386 thread_lock(thread
);
1388 thread_read_times(thread
, &user_time
, &system_time
);
1390 thread_unlock(thread
);
1392 act_unlock_thread(thr_act
);
1394 time_value_add(×_info
->user_time
, &user_time
);
1395 time_value_add(×_info
->system_time
, &system_time
);
1399 *task_info_count
= TASK_THREAD_TIMES_INFO_COUNT
;
1403 case TASK_SCHED_FIFO_INFO
:
1406 if (*task_info_count
< POLICY_FIFO_BASE_COUNT
)
1407 return(KERN_INVALID_ARGUMENT
);
1409 return(KERN_INVALID_POLICY
);
1412 case TASK_SCHED_RR_INFO
:
1414 register policy_rr_base_t rr_base
;
1416 if (*task_info_count
< POLICY_RR_BASE_COUNT
)
1417 return(KERN_INVALID_ARGUMENT
);
1419 rr_base
= (policy_rr_base_t
) task_info_out
;
1422 if (task
!= kernel_task
) {
1424 return(KERN_INVALID_POLICY
);
1427 rr_base
->base_priority
= task
->priority
;
1430 rr_base
->quantum
= tick
/ 1000;
1432 *task_info_count
= POLICY_RR_BASE_COUNT
;
1436 case TASK_SCHED_TIMESHARE_INFO
:
1438 register policy_timeshare_base_t ts_base
;
1440 if (*task_info_count
< POLICY_TIMESHARE_BASE_COUNT
)
1441 return(KERN_INVALID_ARGUMENT
);
1443 ts_base
= (policy_timeshare_base_t
) task_info_out
;
1446 if (task
== kernel_task
) {
1448 return(KERN_INVALID_POLICY
);
1451 ts_base
->base_priority
= task
->priority
;
1454 *task_info_count
= POLICY_TIMESHARE_BASE_COUNT
;
1458 case TASK_SECURITY_TOKEN
:
1460 register security_token_t
*sec_token_p
;
1462 if (*task_info_count
< TASK_SECURITY_TOKEN_COUNT
) {
1463 return(KERN_INVALID_ARGUMENT
);
1466 sec_token_p
= (security_token_t
*) task_info_out
;
1469 *sec_token_p
= task
->sec_token
;
1472 *task_info_count
= TASK_SECURITY_TOKEN_COUNT
;
1476 case TASK_SCHED_INFO
:
1477 return(KERN_INVALID_ARGUMENT
);
1479 case TASK_EVENTS_INFO
:
1481 register task_events_info_t events_info
;
1483 if (*task_info_count
< TASK_EVENTS_INFO_COUNT
) {
1484 return(KERN_INVALID_ARGUMENT
);
1487 events_info
= (task_events_info_t
) task_info_out
;
1490 events_info
->faults
= task
->faults
;
1491 events_info
->pageins
= task
->pageins
;
1492 events_info
->cow_faults
= task
->cow_faults
;
1493 events_info
->messages_sent
= task
->messages_sent
;
1494 events_info
->messages_received
= task
->messages_received
;
1495 events_info
->syscalls_mach
= task
->syscalls_mach
;
1496 events_info
->syscalls_unix
= task
->syscalls_unix
;
1497 events_info
->csw
= task
->csw
;
1500 *task_info_count
= TASK_EVENTS_INFO_COUNT
;
1505 return (KERN_INVALID_ARGUMENT
);
1508 return(KERN_SUCCESS
);
1514 * Change the assigned processor set for the task
1519 processor_set_t new_pset
,
1520 boolean_t assign_threads
)
1523 task
++; new_pset
++; assign_threads
++;
1525 return(KERN_FAILURE
);
1529 * task_assign_default:
1531 * Version of task_assign to assign to default processor set.
1534 task_assign_default(
1536 boolean_t assign_threads
)
1538 return (task_assign(task
, &default_pset
, assign_threads
));
1542 * task_get_assignment
1544 * Return name of processor set that task is assigned to.
1547 task_get_assignment(
1549 processor_set_t
*pset
)
1552 return(KERN_FAILURE
);
1554 *pset
= task
->processor_set
;
1555 pset_reference(*pset
);
1556 return(KERN_SUCCESS
);
1563 * Set scheduling policy and parameters, both base and limit, for
1564 * the given task. Policy must be a policy which is enabled for the
1565 * processor set. Change contained threads if requested.
1572 mach_msg_type_number_t count
,
1573 boolean_t set_limit
,
1576 return(KERN_FAILURE
);
1582 * Set scheduling policy and parameters, both base and limit, for
1583 * the given task. Policy can be any policy implemented by the
1584 * processor set, whether enabled or not. Change contained threads
1590 processor_set_t pset
,
1593 mach_msg_type_number_t base_count
,
1594 policy_limit_t limit
,
1595 mach_msg_type_number_t limit_count
,
1598 return(KERN_FAILURE
);
1602 * task_collect_scan:
1604 * Attempt to free resources owned by tasks.
1608 task_collect_scan(void)
1610 register task_t task
, prev_task
;
1611 processor_set_t pset
= &default_pset
;
1613 prev_task
= TASK_NULL
;
1617 task
= (task_t
) queue_first(&pset
->tasks
);
1618 while (!queue_end(&pset
->tasks
, (queue_entry_t
) task
)) {
1619 task_reference(task
);
1622 pmap_collect(task
->map
->pmap
);
1624 if (prev_task
!= TASK_NULL
)
1625 task_deallocate(prev_task
);
1629 task
= (task_t
) queue_next(&task
->pset_tasks
);
1633 pset_deallocate(pset
);
1635 if (prev_task
!= TASK_NULL
)
1636 task_deallocate(prev_task
);
1639 /* Also disabled in vm/vm_pageout.c */
1640 boolean_t task_collect_allowed
= FALSE
;
1641 unsigned task_collect_last_tick
= 0;
1642 unsigned task_collect_max_rate
= 0; /* in ticks */
1645 * consider_task_collect:
1647 * Called by the pageout daemon when the system needs more free pages.
1651 consider_task_collect(void)
1654 * By default, don't attempt task collection more frequently
1655 * than once per second.
1658 if (task_collect_max_rate
== 0)
1659 task_collect_max_rate
= (1 << SCHED_TICK_SHIFT
) + 1;
1661 if (task_collect_allowed
&&
1662 (sched_tick
> (task_collect_last_tick
+ task_collect_max_rate
))) {
1663 task_collect_last_tick
= sched_tick
;
1664 task_collect_scan();
1675 extern int fast_tas_debug
;
1677 if (fast_tas_debug
) {
1678 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1682 task
->fast_tas_base
= pc
;
1683 task
->fast_tas_end
= endpc
;
1685 return KERN_SUCCESS
;
1687 #else /* FAST_TAS */
1694 return KERN_FAILURE
;
1696 #endif /* FAST_TAS */
1700 task_synchronizer_destroy_all(task_t task
)
1702 semaphore_t semaphore
;
1703 lock_set_t lock_set
;
1706 * Destroy owned semaphores
1709 while (!queue_empty(&task
->semaphore_list
)) {
1710 semaphore
= (semaphore_t
) queue_first(&task
->semaphore_list
);
1711 (void) semaphore_destroy(task
, semaphore
);
1715 * Destroy owned lock sets
1718 while (!queue_empty(&task
->lock_set_list
)) {
1719 lock_set
= (lock_set_t
) queue_first(&task
->lock_set_list
);
1720 (void) lock_set_destroy(task
, lock_set
);
1725 task_subsystem_destroy_all(task_t task
)
1727 subsystem_t subsystem
;
1730 * Destroy owned subsystems
1733 while (!queue_empty(&task
->subsystem_list
)) {
1734 subsystem
= (subsystem_t
) queue_first(&task
->subsystem_list
);
1735 subsystem_deallocate(subsystem
);
1740 * task_set_port_space:
1742 * Set port name space of task to specified size.
1746 task_set_port_space(
1752 is_write_lock(task
->itk_space
);
1753 kr
= ipc_entry_grow_table(task
->itk_space
, table_entries
);
1754 if (kr
== KERN_SUCCESS
)
1755 is_write_unlock(task
->itk_space
);
1760 * We need to export some functions to other components that
1761 * are currently implemented in macros within the osfmk
1762 * component. Just export them as functions of the same name.
1764 boolean_t
is_kerneltask(task_t t
)
1766 if (t
== kernel_task
)
1769 return((t
->kernel_loaded
));
1773 task_t
current_task()
1775 return (current_task_fast());