2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * @OSF_FREE_COPYRIGHT@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
55 * Task management primitives implementation.
58 * Copyright (c) 1993 The University of Utah and
59 * the Computer Systems Laboratory (CSL). All rights reserved.
61 * Permission to use, copy, modify and distribute this software and its
62 * documentation is hereby granted, provided that both the copyright
63 * notice and this permission notice appear in all copies of the
64 * software, derivative works or modified versions, and any portions
65 * thereof, and that both notices appear in supporting documentation.
67 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
68 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
69 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
71 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
72 * improvements that they make and grant CSL redistribution rights.
77 #include <mach_host.h>
78 #include <mach_prof.h>
80 #include <task_swapper.h>
81 #include <platforms.h>
83 #include <mach/boolean.h>
84 #include <mach/machine/vm_types.h>
85 #include <mach/vm_param.h>
86 #include <mach/semaphore.h>
87 #include <mach/task_info.h>
88 #include <mach/task_special_ports.h>
89 #include <mach/mach_types.h>
90 #include <ipc/ipc_space.h>
91 #include <ipc/ipc_entry.h>
92 #include <kern/mach_param.h>
93 #include <kern/misc_protos.h>
94 #include <kern/task.h>
95 #include <kern/thread.h>
96 #include <kern/zalloc.h>
97 #include <kern/kalloc.h>
98 #include <kern/processor.h>
99 #include <kern/sched_prim.h> /* for thread_wakeup */
100 #include <kern/ipc_tt.h>
101 #include <kern/ledger.h>
102 #include <kern/host.h>
103 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
104 #include <kern/profile.h>
105 #include <kern/assert.h>
106 #include <kern/sync_lock.h>
108 #include <ddb/db_sym.h>
109 #endif /* MACH_KDB */
112 #include <kern/task_swap.h>
113 #endif /* TASK_SWAPPER */
116 #include <ppc/exception.h>
117 #include <ppc/hw_perfmon.h>
121 * Exported interfaces
124 #include <mach/task_server.h>
125 #include <mach/mach_host_server.h>
126 #include <mach/host_security_server.h>
127 #include <vm/task_working_set.h>
134 void task_hold_locked(
136 void task_wait_locked(
138 void task_release_locked(
140 void task_collect_scan(void);
143 void task_synchronizer_destroy_all(
146 kern_return_t
task_set_ledger(
152 task_backing_store_privileged(
156 task
->priv_flags
|= VM_BACKING_STORE_PRIV
;
166 TASK_MAX
* sizeof(struct task
),
167 TASK_CHUNK
* sizeof(struct task
),
173 * Create the kernel task as the first task.
175 if (task_create_internal(TASK_NULL
, FALSE
, &kernel_task
) != KERN_SUCCESS
)
176 panic("task_init\n");
178 vm_map_deallocate(kernel_task
->map
);
179 kernel_task
->map
= kernel_map
;
191 * If may_assign is false, task is already being assigned,
192 * wait for that to finish.
194 while (task
->may_assign
== FALSE
) {
197 task
->assign_active
= TRUE
;
198 res
= thread_sleep_mutex((event_t
) &task
->assign_active
,
199 &task
->lock
, THREAD_UNINT
);
200 assert(res
== THREAD_AWAKENED
);
202 task
->may_assign
= FALSE
;
207 #define thread_freeze(thread) assert(task->processor_set == &default_pset)
216 assert(task
->may_assign
== FALSE
);
217 task
->may_assign
= TRUE
;
218 if (task
->assign_active
== TRUE
) {
219 task
->assign_active
= FALSE
;
220 thread_wakeup((event_t
)&task
->assign_active
);
226 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
229 #endif /* MACH_HOST */
232 * Create a task running in the kernel address space. It may
233 * have its own map of size mem_size and may have ipc privileges.
238 vm_offset_t map_base
,
242 return (KERN_INVALID_ARGUMENT
);
248 ledger_port_array_t ledger_ports
,
249 mach_msg_type_number_t num_ledger_ports
,
250 boolean_t inherit_memory
,
251 task_t
*child_task
) /* OUT */
253 if (parent_task
== TASK_NULL
)
254 return(KERN_INVALID_ARGUMENT
);
256 return task_create_internal(
257 parent_task
, inherit_memory
, child_task
);
261 host_security_create_task_token(
262 host_security_t host_security
,
264 security_token_t sec_token
,
265 audit_token_t audit_token
,
266 host_priv_t host_priv
,
267 ledger_port_array_t ledger_ports
,
268 mach_msg_type_number_t num_ledger_ports
,
269 boolean_t inherit_memory
,
270 task_t
*child_task
) /* OUT */
272 kern_return_t result
;
274 if (parent_task
== TASK_NULL
)
275 return(KERN_INVALID_ARGUMENT
);
277 if (host_security
== HOST_NULL
)
278 return(KERN_INVALID_SECURITY
);
280 result
= task_create_internal(
281 parent_task
, inherit_memory
, child_task
);
283 if (result
!= KERN_SUCCESS
)
286 result
= host_security_set_task_token(host_security
,
292 if (result
!= KERN_SUCCESS
)
299 task_create_internal(
301 boolean_t inherit_memory
,
302 task_t
*child_task
) /* OUT */
305 processor_set_t pset
;
307 new_task
= (task_t
) zalloc(task_zone
);
309 if (new_task
== TASK_NULL
)
310 return(KERN_RESOURCE_SHORTAGE
);
312 /* one ref for just being alive; one for our caller */
313 new_task
->ref_count
= 2;
316 new_task
->map
= vm_map_fork(parent_task
->map
);
318 new_task
->map
= vm_map_create(pmap_create(0),
319 round_page_32(VM_MIN_ADDRESS
),
320 trunc_page_32(VM_MAX_ADDRESS
), TRUE
);
322 mutex_init(&new_task
->lock
, ETAP_THREAD_TASK_NEW
);
323 queue_init(&new_task
->threads
);
324 new_task
->suspend_count
= 0;
325 new_task
->thread_count
= 0;
326 new_task
->res_thread_count
= 0;
327 new_task
->active_thread_count
= 0;
328 new_task
->user_stop_count
= 0;
329 new_task
->role
= TASK_UNSPECIFIED
;
330 new_task
->active
= TRUE
;
331 new_task
->user_data
= 0;
332 new_task
->faults
= 0;
333 new_task
->cow_faults
= 0;
334 new_task
->pageins
= 0;
335 new_task
->messages_sent
= 0;
336 new_task
->messages_received
= 0;
337 new_task
->syscalls_mach
= 0;
338 new_task
->priv_flags
= 0;
339 new_task
->syscalls_unix
=0;
341 new_task
->taskFeatures
[0] = 0; /* Init task features */
342 new_task
->taskFeatures
[1] = 0; /* Init task features */
343 new_task
->dynamic_working_set
= 0;
345 task_working_set_create(new_task
, TWS_SMALL_HASH_LINE_COUNT
,
346 0, TWS_HASH_STYLE_DEFAULT
);
349 new_task
->bsd_info
= 0;
350 #endif /* MACH_BSD */
353 if(per_proc_info
[0].pf
.Available
& pf64Bit
) new_task
->taskFeatures
[0] |= tf64BitData
; /* If 64-bit machine, show we have 64-bit registers at least */
357 new_task
->swap_state
= TASK_SW_IN
;
358 new_task
->swap_flags
= 0;
359 new_task
->swap_ast_waiting
= 0;
360 new_task
->swap_stamp
= sched_tick
;
361 new_task
->swap_rss
= 0;
362 new_task
->swap_nswap
= 0;
363 #endif /* TASK_SWAPPER */
365 queue_init(&new_task
->semaphore_list
);
366 queue_init(&new_task
->lock_set_list
);
367 new_task
->semaphores_owned
= 0;
368 new_task
->lock_sets_owned
= 0;
371 new_task
->may_assign
= TRUE
;
372 new_task
->assign_active
= FALSE
;
373 #endif /* MACH_HOST */
374 eml_task_reference(new_task
, parent_task
);
376 ipc_task_init(new_task
, parent_task
);
378 new_task
->total_user_time
.seconds
= 0;
379 new_task
->total_user_time
.microseconds
= 0;
380 new_task
->total_system_time
.seconds
= 0;
381 new_task
->total_system_time
.microseconds
= 0;
383 task_prof_init(new_task
);
385 if (parent_task
!= TASK_NULL
) {
388 * Freeze the parent, so that parent_task->processor_set
391 task_freeze(parent_task
);
392 #endif /* MACH_HOST */
393 pset
= parent_task
->processor_set
;
395 pset
= &default_pset
;
397 new_task
->sec_token
= parent_task
->sec_token
;
398 new_task
->audit_token
= parent_task
->audit_token
;
400 shared_region_mapping_ref(parent_task
->system_shared_region
);
401 new_task
->system_shared_region
= parent_task
->system_shared_region
;
403 new_task
->wired_ledger_port
= ledger_copy(
404 convert_port_to_ledger(parent_task
->wired_ledger_port
));
405 new_task
->paged_ledger_port
= ledger_copy(
406 convert_port_to_ledger(parent_task
->paged_ledger_port
));
409 pset
= &default_pset
;
411 new_task
->sec_token
= KERNEL_SECURITY_TOKEN
;
412 new_task
->audit_token
= KERNEL_AUDIT_TOKEN
;
413 new_task
->wired_ledger_port
= ledger_copy(root_wired_ledger
);
414 new_task
->paged_ledger_port
= ledger_copy(root_paged_ledger
);
417 if (kernel_task
== TASK_NULL
) {
418 new_task
->priority
= BASEPRI_KERNEL
;
419 new_task
->max_priority
= MAXPRI_KERNEL
;
422 new_task
->priority
= BASEPRI_DEFAULT
;
423 new_task
->max_priority
= MAXPRI_USER
;
427 pset_add_task(pset
, new_task
);
430 if (parent_task
!= TASK_NULL
)
431 task_unfreeze(parent_task
);
432 #endif /* MACH_HOST */
434 if (vm_backing_store_low
&& parent_task
!= NULL
)
435 new_task
->priv_flags
|= (parent_task
->priv_flags
&VM_BACKING_STORE_PRIV
);
437 ipc_task_enable(new_task
);
439 *child_task
= new_task
;
440 return(KERN_SUCCESS
);
446 * Drop a reference on a task
453 processor_set_t pset
;
456 if (task
== TASK_NULL
)
460 refs
= --task
->ref_count
;
467 /* task_terminate guarantees that this task is off the list */
468 assert((task
->swap_state
& TASK_SW_ELIGIBLE
) == 0);
469 #endif /* TASK_SWAPPER */
471 if(task
->dynamic_working_set
)
472 tws_hash_destroy((tws_hash_t
)task
->dynamic_working_set
);
474 eml_task_deallocate(task
);
476 ipc_task_terminate(task
);
482 pset
= task
->processor_set
;
484 pset_remove_task(pset
,task
);
486 pset_deallocate(pset
);
492 vm_map_deallocate(task
->map
);
493 is_release(task
->itk_space
);
494 task_prof_deallocate(task
);
495 zfree(task_zone
, (vm_offset_t
) task
);
503 if (task
!= TASK_NULL
) {
514 if (task
!= TASK_NULL
) {
515 if (task_lock_try(task
)) {
527 * Terminate the specified task. See comments on thread_terminate
528 * (kern/thread.c) about problems with terminating the "current task."
535 if (task
== TASK_NULL
)
536 return(KERN_INVALID_ARGUMENT
);
538 return(KERN_FAILURE
);
539 return (task_terminate_internal(task
));
543 task_terminate_internal(
546 thread_act_t thr_act
, cur_thr_act
;
548 boolean_t interrupt_save
;
550 assert(task
!= kernel_task
);
552 cur_thr_act
= current_act();
553 cur_task
= cur_thr_act
->task
;
557 * If task is not resident (swapped out, or being swapped
558 * out), we want to bring it back in (this can block).
559 * NOTE: The only way that this can happen in the current
560 * system is if the task is swapped while it has a thread
561 * in exit(), and the thread does not hit a clean point
562 * to swap itself before getting here.
563 * Terminating other tasks is another way to this code, but
564 * it is not yet fully supported.
565 * The task_swapin is unconditional. It used to be done
566 * only if the task is not resident. Swapping in a
567 * resident task will prevent it from being swapped out
568 * while it terminates.
570 task_swapin(task
, TRUE
); /* TRUE means make it unswappable */
571 #endif /* TASK_SWAPPER */
574 * Get the task locked and make sure that we are not racing
575 * with someone else trying to terminate us.
577 if (task
== cur_task
) {
579 } else if (task
< cur_task
) {
587 if (!task
->active
|| !cur_thr_act
->active
) {
589 * Task or current act is already being terminated.
590 * Just return an error. If we are dying, this will
591 * just get us to our AST special handler and that
592 * will get us to finalize the termination of ourselves.
595 if (cur_task
!= task
)
596 task_unlock(cur_task
);
597 return(KERN_FAILURE
);
599 if (cur_task
!= task
)
600 task_unlock(cur_task
);
603 * Make sure the current thread does not get aborted out of
604 * the waits inside these operations.
606 interrupt_save
= thread_interrupt_level(THREAD_UNINT
);
609 * Indicate that we want all the threads to stop executing
610 * at user space by holding the task (we would have held
611 * each thread independently in thread_terminate_internal -
612 * but this way we may be more likely to already find it
613 * held there). Mark the task inactive, and prevent
614 * further task operations via the task port.
616 task_hold_locked(task
);
617 task
->active
= FALSE
;
618 ipc_task_disable(task
);
621 * Terminate each activation in the task.
623 * Each terminated activation will run it's special handler
624 * when its current kernel context is unwound. That will
625 * clean up most of the thread resources. Then it will be
626 * handed over to the reaper, who will finally remove the
627 * thread from the task list and free the structures.
629 queue_iterate(&task
->threads
, thr_act
, thread_act_t
, task_threads
) {
630 thread_terminate_internal(thr_act
);
634 * Give the machine dependent code a chance
635 * to perform cleanup before ripping apart
638 if (cur_thr_act
->task
== task
)
639 machine_thread_terminate_self();
644 * Destroy all synchronizers owned by the task.
646 task_synchronizer_destroy_all(task
);
649 * Destroy the IPC space, leaving just a reference for it.
651 ipc_space_destroy(task
->itk_space
);
654 * If the current thread is a member of the task
655 * being terminated, then the last reference to
656 * the task will not be dropped until the thread
657 * is finally reaped. To avoid incurring the
658 * expense of removing the address space regions
659 * at reap time, we do it explictly here.
661 (void) vm_map_remove(task
->map
,
662 task
->map
->min_offset
,
663 task
->map
->max_offset
, VM_MAP_NO_FLAGS
);
665 shared_region_mapping_dealloc(task
->system_shared_region
);
668 * Flush working set here to avoid I/O in reaper thread
670 if(task
->dynamic_working_set
)
671 tws_hash_ws_flush((tws_hash_t
)
672 task
->dynamic_working_set
);
675 * We no longer need to guard against being aborted, so restore
676 * the previous interruptible state.
678 thread_interrupt_level(interrupt_save
);
681 perfmon_release_facility(task
); // notify the perfmon facility
685 * Get rid of the task active reference on itself.
687 task_deallocate(task
);
689 return(KERN_SUCCESS
);
693 * task_halt - Shut the current task down (except for the current thread) in
694 * preparation for dramatic changes to the task (probably exec).
695 * We hold the task, terminate all other threads in the task and
696 * wait for them to terminate, clean up the portspace, and when
697 * all done, let the current thread go.
703 thread_act_t thr_act
, cur_thr_act
;
706 assert(task
!= kernel_task
);
708 cur_thr_act
= current_act();
709 cur_task
= cur_thr_act
->task
;
711 if (task
!= cur_task
) {
712 return(KERN_INVALID_ARGUMENT
);
717 * If task is not resident (swapped out, or being swapped
718 * out), we want to bring it back in and make it unswappable.
719 * This can block, so do it early.
721 task_swapin(task
, TRUE
); /* TRUE means make it unswappable */
722 #endif /* TASK_SWAPPER */
726 if (!task
->active
|| !cur_thr_act
->active
) {
728 * Task or current thread is already being terminated.
729 * Hurry up and return out of the current kernel context
730 * so that we run our AST special handler to terminate
734 return(KERN_FAILURE
);
737 if (task
->thread_count
> 1) {
739 * Mark all the threads to keep them from starting any more
740 * user-level execution. The thread_terminate_internal code
741 * would do this on a thread by thread basis anyway, but this
742 * gives us a better chance of not having to wait there.
744 task_hold_locked(task
);
747 * Terminate all the other activations in the task.
749 * Each terminated activation will run it's special handler
750 * when its current kernel context is unwound. That will
751 * clean up most of the thread resources. Then it will be
752 * handed over to the reaper, who will finally remove the
753 * thread from the task list and free the structures.
755 queue_iterate(&task
->threads
, thr_act
, thread_act_t
, task_threads
) {
756 if (thr_act
!= cur_thr_act
)
757 thread_terminate_internal(thr_act
);
759 task_release_locked(task
);
763 * Give the machine dependent code a chance
764 * to perform cleanup before ripping apart
767 machine_thread_terminate_self();
772 * Destroy all synchronizers owned by the task.
774 task_synchronizer_destroy_all(task
);
777 * Destroy the contents of the IPC space, leaving just
778 * a reference for it.
780 ipc_space_clean(task
->itk_space
);
783 * Clean out the address space, as we are going to be
786 (void) vm_map_remove(task
->map
,
787 task
->map
->min_offset
,
788 task
->map
->max_offset
, VM_MAP_NO_FLAGS
);
796 * Suspend execution of the specified task.
797 * This is a recursive-style suspension of the task, a count of
798 * suspends is maintained.
800 * CONDITIONS: the task is locked and active.
804 register task_t task
)
806 register thread_act_t thr_act
;
808 assert(task
->active
);
810 if (task
->suspend_count
++ > 0)
814 * Iterate through all the thread_act's and hold them.
816 queue_iterate(&task
->threads
, thr_act
, thread_act_t
, task_threads
) {
817 act_lock_thread(thr_act
);
818 thread_hold(thr_act
);
819 act_unlock_thread(thr_act
);
826 * Same as the internal routine above, except that is must lock
827 * and verify that the task is active. This differs from task_suspend
828 * in that it places a kernel hold on the task rather than just a
829 * user-level hold. This keeps users from over resuming and setting
830 * it running out from under the kernel.
832 * CONDITIONS: the caller holds a reference on the task
835 task_hold(task_t task
)
839 if (task
== TASK_NULL
)
840 return (KERN_INVALID_ARGUMENT
);
844 return (KERN_FAILURE
);
846 task_hold_locked(task
);
849 return(KERN_SUCCESS
);
853 * Routine: task_wait_locked
854 * Wait for all threads in task to stop.
857 * Called with task locked, active, and held.
861 register task_t task
)
863 register thread_act_t thr_act
, cur_thr_act
;
865 assert(task
->active
);
866 assert(task
->suspend_count
> 0);
868 cur_thr_act
= current_act();
870 * Iterate through all the thread's and wait for them to
871 * stop. Do not wait for the current thread if it is within
874 queue_iterate(&task
->threads
, thr_act
, thread_act_t
, task_threads
) {
875 if (thr_act
!= cur_thr_act
) {
878 thread
= act_lock_thread(thr_act
);
880 act_unlock_thread(thr_act
);
886 * task_release_locked:
888 * Release a kernel hold on a task.
890 * CONDITIONS: the task is locked and active
894 register task_t task
)
896 register thread_act_t thr_act
;
898 assert(task
->active
);
899 assert(task
->suspend_count
> 0);
901 if (--task
->suspend_count
> 0)
905 * Iterate through all the thread_act's and hold them.
906 * Do not hold the current thread_act if it is within the
909 queue_iterate(&task
->threads
, thr_act
, thread_act_t
, task_threads
) {
910 act_lock_thread(thr_act
);
911 thread_release(thr_act
);
912 act_unlock_thread(thr_act
);
919 * Same as the internal routine above, except that it must lock
920 * and verify that the task is active.
922 * CONDITIONS: The caller holds a reference to the task
925 task_release(task_t task
)
929 if (task
== TASK_NULL
)
930 return (KERN_INVALID_ARGUMENT
);
934 return (KERN_FAILURE
);
936 task_release_locked(task
);
939 return(KERN_SUCCESS
);
945 thread_act_array_t
*thr_act_list
,
946 mach_msg_type_number_t
*count
)
948 unsigned int actual
; /* this many thr_acts */
949 thread_act_t thr_act
;
950 thread_act_t
*thr_acts
;
954 vm_size_t size
, size_needed
;
957 if (task
== TASK_NULL
)
958 return KERN_INVALID_ARGUMENT
;
971 actual
= task
->thread_count
;
973 /* do we have the memory we need? */
974 size_needed
= actual
* sizeof(mach_port_t
);
975 if (size_needed
<= size
)
978 /* unlock the task and allocate more memory */
984 assert(size_needed
> 0);
989 return KERN_RESOURCE_SHORTAGE
;
992 /* OK, have memory and the task is locked & active */
993 thr_acts
= (thread_act_t
*) addr
;
995 for (i
= j
= 0, thr_act
= (thread_act_t
) queue_first(&task
->threads
);
997 i
++, thr_act
= (thread_act_t
) queue_next(&thr_act
->task_threads
)) {
999 if (thr_act
->act_ref_count
> 0) {
1000 act_reference_locked(thr_act
);
1001 thr_acts
[j
++] = thr_act
;
1003 act_unlock(thr_act
);
1005 assert(queue_end(&task
->threads
, (queue_entry_t
) thr_act
));
1008 size_needed
= actual
* sizeof(mach_port_t
);
1010 /* can unlock task now that we've got the thr_act refs */
1014 /* no thr_acts, so return null pointer and deallocate memory */
1022 /* if we allocated too much, must copy */
1024 if (size_needed
< size
) {
1025 vm_offset_t newaddr
;
1027 newaddr
= kalloc(size_needed
);
1029 for (i
= 0; i
< actual
; i
++)
1030 act_deallocate(thr_acts
[i
]);
1032 return KERN_RESOURCE_SHORTAGE
;
1035 bcopy((char *) addr
, (char *) newaddr
, size_needed
);
1037 thr_acts
= (thread_act_t
*) newaddr
;
1040 *thr_act_list
= thr_acts
;
1043 /* do the conversion that Mig should handle */
1045 for (i
= 0; i
< actual
; i
++)
1046 ((ipc_port_t
*) thr_acts
)[i
] =
1047 convert_act_to_port(thr_acts
[i
]);
1050 return KERN_SUCCESS
;
1054 * Routine: task_suspend
1055 * Implement a user-level suspension on a task.
1058 * The caller holds a reference to the task
1062 register task_t task
)
1064 if (task
== TASK_NULL
)
1065 return (KERN_INVALID_ARGUMENT
);
1068 if (!task
->active
) {
1070 return (KERN_FAILURE
);
1072 if ((task
->user_stop_count
)++ > 0) {
1074 * If the stop count was positive, the task is
1075 * already stopped and we can exit.
1078 return (KERN_SUCCESS
);
1082 * Put a kernel-level hold on the threads in the task (all
1083 * user-level task suspensions added together represent a
1084 * single kernel-level hold). We then wait for the threads
1085 * to stop executing user code.
1087 task_hold_locked(task
);
1088 task_wait_locked(task
);
1090 return (KERN_SUCCESS
);
1094 * Routine: task_resume
1095 * Release a kernel hold on a task.
1098 * The caller holds a reference to the task
1101 task_resume(register task_t task
)
1103 register boolean_t release
;
1105 if (task
== TASK_NULL
)
1106 return(KERN_INVALID_ARGUMENT
);
1110 if (!task
->active
) {
1112 return(KERN_FAILURE
);
1114 if (task
->user_stop_count
> 0) {
1115 if (--(task
->user_stop_count
) == 0)
1120 return(KERN_FAILURE
);
1124 * Release the task if necessary.
1127 task_release_locked(task
);
1130 return(KERN_SUCCESS
);
1134 host_security_set_task_token(
1135 host_security_t host_security
,
1137 security_token_t sec_token
,
1138 audit_token_t audit_token
,
1139 host_priv_t host_priv
)
1141 ipc_port_t host_port
;
1144 if (task
== TASK_NULL
)
1145 return(KERN_INVALID_ARGUMENT
);
1147 if (host_security
== HOST_NULL
)
1148 return(KERN_INVALID_SECURITY
);
1151 task
->sec_token
= sec_token
;
1152 task
->audit_token
= audit_token
;
1155 if (host_priv
!= HOST_PRIV_NULL
) {
1156 kr
= host_get_host_priv_port(host_priv
, &host_port
);
1158 kr
= host_get_host_port(host_priv_self(), &host_port
);
1160 assert(kr
== KERN_SUCCESS
);
1161 kr
= task_set_special_port(task
, TASK_HOST_PORT
, host_port
);
1166 * Utility routine to set a ledger
1174 if (task
== TASK_NULL
)
1175 return(KERN_INVALID_ARGUMENT
);
1179 ipc_port_release_send(task
->wired_ledger_port
);
1180 task
->wired_ledger_port
= ledger_copy(wired
);
1183 ipc_port_release_send(task
->paged_ledger_port
);
1184 task
->paged_ledger_port
= ledger_copy(paged
);
1188 return(KERN_SUCCESS
);
1192 * This routine was added, pretty much exclusively, for registering the
1193 * RPC glue vector for in-kernel short circuited tasks. Rather than
1194 * removing it completely, I have only disabled that feature (which was
1195 * the only feature at the time). It just appears that we are going to
1196 * want to add some user data to tasks in the future (i.e. bsd info,
1197 * task names, etc...), so I left it in the formal task interface.
1202 task_flavor_t flavor
,
1203 task_info_t task_info_in
, /* pointer to IN array */
1204 mach_msg_type_number_t task_info_count
)
1208 if (task
== TASK_NULL
)
1209 return(KERN_INVALID_ARGUMENT
);
1213 return (KERN_INVALID_ARGUMENT
);
1215 return (KERN_SUCCESS
);
1221 task_flavor_t flavor
,
1222 task_info_t task_info_out
,
1223 mach_msg_type_number_t
*task_info_count
)
1228 if (task
== TASK_NULL
)
1229 return(KERN_INVALID_ARGUMENT
);
1233 case TASK_BASIC_INFO
:
1235 register task_basic_info_t basic_info
;
1237 if (*task_info_count
< TASK_BASIC_INFO_COUNT
) {
1238 return(KERN_INVALID_ARGUMENT
);
1241 basic_info
= (task_basic_info_t
) task_info_out
;
1243 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
1245 basic_info
->virtual_size
= map
->size
;
1246 basic_info
->resident_size
= pmap_resident_count(map
->pmap
)
1250 basic_info
->policy
= ((task
!= kernel_task
)?
1251 POLICY_TIMESHARE
: POLICY_RR
);
1252 basic_info
->suspend_count
= task
->user_stop_count
;
1253 basic_info
->user_time
.seconds
1254 = task
->total_user_time
.seconds
;
1255 basic_info
->user_time
.microseconds
1256 = task
->total_user_time
.microseconds
;
1257 basic_info
->system_time
.seconds
1258 = task
->total_system_time
.seconds
;
1259 basic_info
->system_time
.microseconds
1260 = task
->total_system_time
.microseconds
;
1263 *task_info_count
= TASK_BASIC_INFO_COUNT
;
1267 case TASK_THREAD_TIMES_INFO
:
1269 register task_thread_times_info_t times_info
;
1270 register thread_t thread
;
1271 register thread_act_t thr_act
;
1273 if (*task_info_count
< TASK_THREAD_TIMES_INFO_COUNT
) {
1274 return (KERN_INVALID_ARGUMENT
);
1277 times_info
= (task_thread_times_info_t
) task_info_out
;
1278 times_info
->user_time
.seconds
= 0;
1279 times_info
->user_time
.microseconds
= 0;
1280 times_info
->system_time
.seconds
= 0;
1281 times_info
->system_time
.microseconds
= 0;
1284 queue_iterate(&task
->threads
, thr_act
,
1285 thread_act_t
, task_threads
)
1287 time_value_t user_time
, system_time
;
1290 thread
= act_lock_thread(thr_act
);
1292 /* JMM - add logic to skip threads that have migrated
1296 assert(thread
); /* Must have thread */
1298 thread_lock(thread
);
1300 thread_read_times(thread
, &user_time
, &system_time
);
1302 thread_unlock(thread
);
1304 act_unlock_thread(thr_act
);
1306 time_value_add(×_info
->user_time
, &user_time
);
1307 time_value_add(×_info
->system_time
, &system_time
);
1311 *task_info_count
= TASK_THREAD_TIMES_INFO_COUNT
;
1315 case TASK_SCHED_FIFO_INFO
:
1318 if (*task_info_count
< POLICY_FIFO_BASE_COUNT
)
1319 return(KERN_INVALID_ARGUMENT
);
1321 return(KERN_INVALID_POLICY
);
1324 case TASK_SCHED_RR_INFO
:
1326 register policy_rr_base_t rr_base
;
1328 if (*task_info_count
< POLICY_RR_BASE_COUNT
)
1329 return(KERN_INVALID_ARGUMENT
);
1331 rr_base
= (policy_rr_base_t
) task_info_out
;
1334 if (task
!= kernel_task
) {
1336 return(KERN_INVALID_POLICY
);
1339 rr_base
->base_priority
= task
->priority
;
1342 rr_base
->quantum
= tick
/ 1000;
1344 *task_info_count
= POLICY_RR_BASE_COUNT
;
1348 case TASK_SCHED_TIMESHARE_INFO
:
1350 register policy_timeshare_base_t ts_base
;
1352 if (*task_info_count
< POLICY_TIMESHARE_BASE_COUNT
)
1353 return(KERN_INVALID_ARGUMENT
);
1355 ts_base
= (policy_timeshare_base_t
) task_info_out
;
1358 if (task
== kernel_task
) {
1360 return(KERN_INVALID_POLICY
);
1363 ts_base
->base_priority
= task
->priority
;
1366 *task_info_count
= POLICY_TIMESHARE_BASE_COUNT
;
1370 case TASK_SECURITY_TOKEN
:
1372 register security_token_t
*sec_token_p
;
1374 if (*task_info_count
< TASK_SECURITY_TOKEN_COUNT
) {
1375 return(KERN_INVALID_ARGUMENT
);
1378 sec_token_p
= (security_token_t
*) task_info_out
;
1381 *sec_token_p
= task
->sec_token
;
1384 *task_info_count
= TASK_SECURITY_TOKEN_COUNT
;
1388 case TASK_AUDIT_TOKEN
:
1390 register audit_token_t
*audit_token_p
;
1392 if (*task_info_count
< TASK_AUDIT_TOKEN_COUNT
) {
1393 return(KERN_INVALID_ARGUMENT
);
1396 audit_token_p
= (audit_token_t
*) task_info_out
;
1399 *audit_token_p
= task
->audit_token
;
1402 *task_info_count
= TASK_AUDIT_TOKEN_COUNT
;
1406 case TASK_SCHED_INFO
:
1407 return(KERN_INVALID_ARGUMENT
);
1409 case TASK_EVENTS_INFO
:
1411 register task_events_info_t events_info
;
1413 if (*task_info_count
< TASK_EVENTS_INFO_COUNT
) {
1414 return(KERN_INVALID_ARGUMENT
);
1417 events_info
= (task_events_info_t
) task_info_out
;
1420 events_info
->faults
= task
->faults
;
1421 events_info
->pageins
= task
->pageins
;
1422 events_info
->cow_faults
= task
->cow_faults
;
1423 events_info
->messages_sent
= task
->messages_sent
;
1424 events_info
->messages_received
= task
->messages_received
;
1425 events_info
->syscalls_mach
= task
->syscalls_mach
;
1426 events_info
->syscalls_unix
= task
->syscalls_unix
;
1427 events_info
->csw
= task
->csw
;
1430 *task_info_count
= TASK_EVENTS_INFO_COUNT
;
1435 return (KERN_INVALID_ARGUMENT
);
1438 return(KERN_SUCCESS
);
1444 * Change the assigned processor set for the task
1449 processor_set_t new_pset
,
1450 boolean_t assign_threads
)
1453 task
++; new_pset
++; assign_threads
++;
1455 return(KERN_FAILURE
);
1459 * task_assign_default:
1461 * Version of task_assign to assign to default processor set.
1464 task_assign_default(
1466 boolean_t assign_threads
)
1468 return (task_assign(task
, &default_pset
, assign_threads
));
1472 * task_get_assignment
1474 * Return name of processor set that task is assigned to.
1477 task_get_assignment(
1479 processor_set_t
*pset
)
1482 return(KERN_FAILURE
);
1484 *pset
= task
->processor_set
;
1485 pset_reference(*pset
);
1486 return(KERN_SUCCESS
);
1493 * Set scheduling policy and parameters, both base and limit, for
1494 * the given task. Policy must be a policy which is enabled for the
1495 * processor set. Change contained threads if requested.
1502 mach_msg_type_number_t count
,
1503 boolean_t set_limit
,
1506 return(KERN_FAILURE
);
1512 * Set scheduling policy and parameters, both base and limit, for
1513 * the given task. Policy can be any policy implemented by the
1514 * processor set, whether enabled or not. Change contained threads
1520 processor_set_t pset
,
1523 mach_msg_type_number_t base_count
,
1524 policy_limit_t limit
,
1525 mach_msg_type_number_t limit_count
,
1528 return(KERN_FAILURE
);
1532 * task_collect_scan:
1534 * Attempt to free resources owned by tasks.
1538 task_collect_scan(void)
1540 register task_t task
, prev_task
;
1541 processor_set_t pset
= &default_pset
;
1545 task
= (task_t
) queue_first(&pset
->tasks
);
1546 while (!queue_end(&pset
->tasks
, (queue_entry_t
) task
)) {
1548 if (task
->ref_count
> 0) {
1550 task_reference_locked(task
);
1555 * While we still have the pset locked, freeze the task in
1556 * this pset. That way, when we get back from collecting
1557 * it, we can dereference the pset_tasks chain for the task
1558 * and be assured that we are still in this chain.
1565 pmap_collect(task
->map
->pmap
);
1569 task
= (task_t
) queue_next(&task
->pset_tasks
);
1572 task_unfreeze(prev_task
);
1575 task_deallocate(prev_task
);
1578 task
= (task_t
) queue_next(&task
->pset_tasks
);
1584 pset_deallocate(pset
);
1587 /* Also disabled in vm/vm_pageout.c */
1588 boolean_t task_collect_allowed
= FALSE
;
1589 unsigned task_collect_last_tick
= 0;
1590 unsigned task_collect_max_rate
= 0; /* in ticks */
1593 * consider_task_collect:
1595 * Called by the pageout daemon when the system needs more free pages.
1599 consider_task_collect(void)
1602 * By default, don't attempt task collection more frequently
1603 * than once per second.
1606 if (task_collect_max_rate
== 0)
1607 task_collect_max_rate
= (1 << SCHED_TICK_SHIFT
) + 1;
1609 if (task_collect_allowed
&&
1610 (sched_tick
> (task_collect_last_tick
+ task_collect_max_rate
))) {
1611 task_collect_last_tick
= sched_tick
;
1612 task_collect_scan();
1623 extern int fast_tas_debug
;
1625 if (fast_tas_debug
) {
1626 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1630 task
->fast_tas_base
= pc
;
1631 task
->fast_tas_end
= endpc
;
1633 return KERN_SUCCESS
;
1635 #else /* FAST_TAS */
1642 return KERN_FAILURE
;
1644 #endif /* FAST_TAS */
1648 task_synchronizer_destroy_all(task_t task
)
1650 semaphore_t semaphore
;
1651 lock_set_t lock_set
;
1654 * Destroy owned semaphores
1657 while (!queue_empty(&task
->semaphore_list
)) {
1658 semaphore
= (semaphore_t
) queue_first(&task
->semaphore_list
);
1659 (void) semaphore_destroy(task
, semaphore
);
1663 * Destroy owned lock sets
1666 while (!queue_empty(&task
->lock_set_list
)) {
1667 lock_set
= (lock_set_t
) queue_first(&task
->lock_set_list
);
1668 (void) lock_set_destroy(task
, lock_set
);
1673 * task_set_port_space:
1675 * Set port name space of task to specified size.
1679 task_set_port_space(
1685 is_write_lock(task
->itk_space
);
1686 kr
= ipc_entry_grow_table(task
->itk_space
, table_entries
);
1687 if (kr
== KERN_SUCCESS
)
1688 is_write_unlock(task
->itk_space
);
1696 * Returns true if the task is a P_CLASSIC task.
1702 boolean_t result
= FALSE
;
1705 struct proc
*p
= get_bsdtask_info(task
);
1706 result
= proc_is_classic(p
) ? TRUE
: FALSE
;
1712 * We need to export some functions to other components that
1713 * are currently implemented in macros within the osfmk
1714 * component. Just export them as functions of the same name.
1716 boolean_t
is_kerneltask(task_t t
)
1718 if (t
== kernel_task
)
1725 task_t
current_task()
1727 return (current_task_fast());