2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
61 * Task management primitives implementation.
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
86 * Copyright (c) 2005 SPARTA, Inc.
91 #include <platforms.h>
93 #include <mach/mach_types.h>
94 #include <mach/boolean.h>
95 #include <mach/host_priv.h>
96 #include <mach/machine/vm_types.h>
97 #include <mach/vm_param.h>
98 #include <mach/semaphore.h>
99 #include <mach/task_info.h>
100 #include <mach/task_special_ports.h>
102 #include <ipc/ipc_types.h>
103 #include <ipc/ipc_space.h>
104 #include <ipc/ipc_entry.h>
106 #include <kern/kern_types.h>
107 #include <kern/mach_param.h>
108 #include <kern/misc_protos.h>
109 #include <kern/task.h>
110 #include <kern/thread.h>
111 #include <kern/zalloc.h>
112 #include <kern/kalloc.h>
113 #include <kern/processor.h>
114 #include <kern/sched_prim.h> /* for thread_wakeup */
115 #include <kern/ipc_tt.h>
116 #include <kern/ledger.h>
117 #include <kern/host.h>
118 #include <kern/clock.h>
119 #include <kern/timer.h>
120 #include <kern/assert.h>
121 #include <kern/sync_lock.h>
122 #include <kern/affinity.h>
125 #include <vm/vm_map.h>
126 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
127 #include <vm/vm_pageout.h>
128 #include <vm/vm_protos.h>
131 #include <ddb/db_sym.h>
132 #endif /* MACH_KDB */
135 * Exported interfaces
138 #include <mach/task_server.h>
139 #include <mach/mach_host_server.h>
140 #include <mach/host_security_server.h>
141 #include <mach/mach_port_server.h>
142 #include <mach/security_server.h>
144 #include <vm/vm_shared_region.h>
147 #include <security/mac_mach_internal.h>
152 #endif /* CONFIG_COUNTERS */
156 lck_attr_t task_lck_attr
;
157 lck_grp_t task_lck_grp
;
158 lck_grp_attr_t task_lck_grp_attr
;
160 zinfo_usage_store_t tasks_tkm_private
;
161 zinfo_usage_store_t tasks_tkm_shared
;
163 int task_max
= CONFIG_TASK_MAX
; /* Max number of tasks */
165 /* externs for BSD kernel */
166 extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
170 void task_hold_locked(
172 void task_wait_locked(
174 void task_release_locked(
178 void task_synchronizer_destroy_all(
181 kern_return_t
task_set_ledger(
186 int check_for_tasksuspend(
190 task_backing_store_privileged(
194 task
->priv_flags
|= VM_BACKING_STORE_PRIV
;
205 #if defined(__i386__) || defined(__x86_64__)
207 #endif /* __i386__ */
211 if (task_has_64BitAddr(task
))
214 task_set_64BitAddr(task
);
216 if ( !task_has_64BitAddr(task
))
220 * Deallocate all memory previously allocated
221 * above the 32-bit address space, since it won't
222 * be accessible anymore.
224 /* remove regular VM map entries & pmap mappings */
225 (void) vm_map_remove(task
->map
,
226 (vm_map_offset_t
) VM_MAX_ADDRESS
,
229 /* remove the higher VM mappings */
230 (void) vm_map_remove(task
->map
,
232 0xFFFFFFFFFFFFF000ULL
,
234 task_clear_64BitAddr(task
);
236 /* FIXME: On x86, the thread save state flavor can diverge from the
237 * task's 64-bit feature flag due to the 32-bit/64-bit register save
238 * state dichotomy. Since we can be pre-empted in this interval,
239 * certain routines may observe the thread as being in an inconsistent
240 * state with respect to its task's 64-bitness.
242 #if defined(__i386__) || defined(__x86_64__)
244 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
245 thread_mtx_lock(thread
);
246 machine_thread_switch_addrmode(thread
);
247 thread_mtx_unlock(thread
);
250 #endif /* __i386__ */
255 task_set_dyld_info(task_t task
, mach_vm_address_t addr
, mach_vm_size_t size
)
258 task
->all_image_info_addr
= addr
;
259 task
->all_image_info_size
= size
;
267 lck_grp_attr_setdefault(&task_lck_grp_attr
);
268 lck_grp_init(&task_lck_grp
, "task", &task_lck_grp_attr
);
269 lck_attr_setdefault(&task_lck_attr
);
270 lck_mtx_init(&tasks_threads_lock
, &task_lck_grp
, &task_lck_attr
);
274 task_max
* sizeof(struct task
),
275 TASK_CHUNK
* sizeof(struct task
),
278 zone_change(task_zone
, Z_NOENCRYPT
, TRUE
);
281 * Create the kernel task as the first task.
284 if (task_create_internal(TASK_NULL
, FALSE
, TRUE
, &kernel_task
) != KERN_SUCCESS
)
286 if (task_create_internal(TASK_NULL
, FALSE
, FALSE
, &kernel_task
) != KERN_SUCCESS
)
288 panic("task_init\n");
290 vm_map_deallocate(kernel_task
->map
);
291 kernel_task
->map
= kernel_map
;
295 * Create a task running in the kernel address space. It may
296 * have its own map of size mem_size and may have ipc privileges.
300 __unused task_t parent_task
,
301 __unused vm_offset_t map_base
,
302 __unused vm_size_t map_size
,
303 __unused task_t
*child_task
)
305 return (KERN_INVALID_ARGUMENT
);
311 __unused ledger_port_array_t ledger_ports
,
312 __unused mach_msg_type_number_t num_ledger_ports
,
313 __unused boolean_t inherit_memory
,
314 __unused task_t
*child_task
) /* OUT */
316 if (parent_task
== TASK_NULL
)
317 return(KERN_INVALID_ARGUMENT
);
320 * No longer supported: too many calls assume that a task has a valid
323 return(KERN_FAILURE
);
327 host_security_create_task_token(
328 host_security_t host_security
,
330 __unused security_token_t sec_token
,
331 __unused audit_token_t audit_token
,
332 __unused host_priv_t host_priv
,
333 __unused ledger_port_array_t ledger_ports
,
334 __unused mach_msg_type_number_t num_ledger_ports
,
335 __unused boolean_t inherit_memory
,
336 __unused task_t
*child_task
) /* OUT */
338 if (parent_task
== TASK_NULL
)
339 return(KERN_INVALID_ARGUMENT
);
341 if (host_security
== HOST_NULL
)
342 return(KERN_INVALID_SECURITY
);
345 * No longer supported.
347 return(KERN_FAILURE
);
351 task_create_internal(
353 boolean_t inherit_memory
,
355 task_t
*child_task
) /* OUT */
358 vm_shared_region_t shared_region
;
360 new_task
= (task_t
) zalloc(task_zone
);
362 if (new_task
== TASK_NULL
)
363 return(KERN_RESOURCE_SHORTAGE
);
365 /* one ref for just being alive; one for our caller */
366 new_task
->ref_count
= 2;
368 /* if inherit_memory is true, parent_task MUST not be NULL */
370 new_task
->map
= vm_map_fork(parent_task
->map
);
372 new_task
->map
= vm_map_create(pmap_create(0, is_64bit
),
373 (vm_map_offset_t
)(VM_MIN_ADDRESS
),
374 (vm_map_offset_t
)(VM_MAX_ADDRESS
), TRUE
);
376 /* Inherit memlock limit from parent */
378 vm_map_set_user_wire_limit(new_task
->map
, (vm_size_t
)parent_task
->map
->user_wire_limit
);
380 lck_mtx_init(&new_task
->lock
, &task_lck_grp
, &task_lck_attr
);
381 queue_init(&new_task
->threads
);
382 new_task
->suspend_count
= 0;
383 new_task
->thread_count
= 0;
384 new_task
->active_thread_count
= 0;
385 new_task
->user_stop_count
= 0;
386 new_task
->role
= TASK_UNSPECIFIED
;
387 new_task
->active
= TRUE
;
388 new_task
->halting
= FALSE
;
389 new_task
->user_data
= NULL
;
390 new_task
->faults
= 0;
391 new_task
->cow_faults
= 0;
392 new_task
->pageins
= 0;
393 new_task
->messages_sent
= 0;
394 new_task
->messages_received
= 0;
395 new_task
->syscalls_mach
= 0;
396 new_task
->priv_flags
= 0;
397 new_task
->syscalls_unix
=0;
398 new_task
->c_switch
= new_task
->p_switch
= new_task
->ps_switch
= 0;
399 new_task
->taskFeatures
[0] = 0; /* Init task features */
400 new_task
->taskFeatures
[1] = 0; /* Init task features */
402 new_task
->tkm_private
.alloc
= 0;
403 new_task
->tkm_private
.free
= 0;
404 new_task
->tkm_shared
.alloc
= 0;
405 new_task
->tkm_shared
.free
= 0;
407 zinfo_task_init(new_task
);
410 new_task
->bsd_info
= NULL
;
411 #endif /* MACH_BSD */
413 #if defined(__i386__) || defined(__x86_64__)
414 new_task
->i386_ldt
= 0;
415 new_task
->task_debug
= NULL
;
419 queue_init(&new_task
->semaphore_list
);
420 queue_init(&new_task
->lock_set_list
);
421 new_task
->semaphores_owned
= 0;
422 new_task
->lock_sets_owned
= 0;
425 new_task
->label
= labelh_new(1);
426 mac_task_label_init (&new_task
->maclabel
);
429 ipc_task_init(new_task
, parent_task
);
431 new_task
->total_user_time
= 0;
432 new_task
->total_system_time
= 0;
434 new_task
->vtimers
= 0;
436 new_task
->shared_region
= NULL
;
438 new_task
->affinity_space
= NULL
;
441 new_task
->t_chud
= 0U;
444 if (parent_task
!= TASK_NULL
) {
445 new_task
->sec_token
= parent_task
->sec_token
;
446 new_task
->audit_token
= parent_task
->audit_token
;
448 /* inherit the parent's shared region */
449 shared_region
= vm_shared_region_get(parent_task
);
450 vm_shared_region_set(new_task
, shared_region
);
452 new_task
->wired_ledger_port
= ledger_copy(
453 convert_port_to_ledger(parent_task
->wired_ledger_port
));
454 new_task
->paged_ledger_port
= ledger_copy(
455 convert_port_to_ledger(parent_task
->paged_ledger_port
));
456 if(task_has_64BitAddr(parent_task
))
457 task_set_64BitAddr(new_task
);
458 new_task
->all_image_info_addr
= parent_task
->all_image_info_addr
;
459 new_task
->all_image_info_size
= parent_task
->all_image_info_size
;
461 #if defined(__i386__) || defined(__x86_64__)
462 if (inherit_memory
&& parent_task
->i386_ldt
)
463 new_task
->i386_ldt
= user_ldt_copy(parent_task
->i386_ldt
);
465 if (inherit_memory
&& parent_task
->affinity_space
)
466 task_affinity_create(parent_task
, new_task
);
468 new_task
->pset_hint
= parent_task
->pset_hint
= task_choose_pset(parent_task
);
469 new_task
->policystate
= parent_task
->policystate
;
470 /* inherit the self action state */
471 new_task
->actionstate
= parent_task
->actionstate
;
472 new_task
->ext_policystate
= parent_task
->ext_policystate
;
474 /* till the child lifecycle is cleared do not inherit external action */
475 new_task
->ext_actionstate
= parent_task
->ext_actionstate
;
477 new_task
->ext_actionstate
= default_task_null_policy
;
481 new_task
->sec_token
= KERNEL_SECURITY_TOKEN
;
482 new_task
->audit_token
= KERNEL_AUDIT_TOKEN
;
483 new_task
->wired_ledger_port
= ledger_copy(root_wired_ledger
);
484 new_task
->paged_ledger_port
= ledger_copy(root_paged_ledger
);
487 task_set_64BitAddr(new_task
);
489 new_task
->all_image_info_addr
= (mach_vm_address_t
)0;
490 new_task
->all_image_info_size
= (mach_vm_size_t
)0;
492 new_task
->pset_hint
= PROCESSOR_SET_NULL
;
493 new_task
->policystate
= default_task_proc_policy
;
494 new_task
->ext_policystate
= default_task_proc_policy
;
495 new_task
->actionstate
= default_task_null_policy
;
496 new_task
->ext_actionstate
= default_task_null_policy
;
499 if (kernel_task
== TASK_NULL
) {
500 new_task
->priority
= BASEPRI_KERNEL
;
501 new_task
->max_priority
= MAXPRI_KERNEL
;
504 new_task
->priority
= BASEPRI_DEFAULT
;
505 new_task
->max_priority
= MAXPRI_USER
;
508 bzero(&new_task
->extmod_statistics
, sizeof(new_task
->extmod_statistics
));
510 lck_mtx_lock(&tasks_threads_lock
);
511 queue_enter(&tasks
, new_task
, task_t
, tasks
);
513 lck_mtx_unlock(&tasks_threads_lock
);
515 if (vm_backing_store_low
&& parent_task
!= NULL
)
516 new_task
->priv_flags
|= (parent_task
->priv_flags
&VM_BACKING_STORE_PRIV
);
518 ipc_task_enable(new_task
);
520 *child_task
= new_task
;
521 return(KERN_SUCCESS
);
527 * Drop a reference on a task.
533 if (task
== TASK_NULL
)
536 if (task_deallocate_internal(task
) > 0)
539 lck_mtx_lock(&tasks_threads_lock
);
540 queue_remove(&terminated_tasks
, task
, task_t
, tasks
);
541 lck_mtx_unlock(&tasks_threads_lock
);
543 ipc_task_terminate(task
);
545 if (task
->affinity_space
)
546 task_affinity_deallocate(task
);
548 vm_map_deallocate(task
->map
);
549 is_release(task
->itk_space
);
551 lck_mtx_destroy(&task
->lock
, &task_lck_grp
);
554 labelh_release(task
->label
);
556 OSAddAtomic64(task
->tkm_private
.alloc
, (int64_t *)&tasks_tkm_private
.alloc
);
557 OSAddAtomic64(task
->tkm_private
.free
, (int64_t *)&tasks_tkm_private
.free
);
558 OSAddAtomic64(task
->tkm_shared
.alloc
, (int64_t *)&tasks_tkm_shared
.alloc
);
559 OSAddAtomic64(task
->tkm_shared
.free
, (int64_t *)&tasks_tkm_shared
.free
);
560 zinfo_task_free(task
);
561 zfree(task_zone
, task
);
565 * task_name_deallocate:
567 * Drop a reference on a task name.
570 task_name_deallocate(
571 task_name_t task_name
)
573 return(task_deallocate((task_t
)task_name
));
580 * Terminate the specified task. See comments on thread_terminate
581 * (kern/thread.c) about problems with terminating the "current task."
588 if (task
== TASK_NULL
)
589 return (KERN_INVALID_ARGUMENT
);
592 return (KERN_FAILURE
);
594 return (task_terminate_internal(task
));
598 task_terminate_internal(
601 thread_t thread
, self
;
603 boolean_t interrupt_save
;
605 assert(task
!= kernel_task
);
607 self
= current_thread();
608 self_task
= self
->task
;
611 * Get the task locked and make sure that we are not racing
612 * with someone else trying to terminate us.
614 if (task
== self_task
)
617 if (task
< self_task
) {
619 task_lock(self_task
);
622 task_lock(self_task
);
628 * Task is already being terminated.
629 * Just return an error. If we are dying, this will
630 * just get us to our AST special handler and that
631 * will get us to finalize the termination of ourselves.
634 if (self_task
!= task
)
635 task_unlock(self_task
);
637 return (KERN_FAILURE
);
640 if (self_task
!= task
)
641 task_unlock(self_task
);
644 * Make sure the current thread does not get aborted out of
645 * the waits inside these operations.
647 interrupt_save
= thread_interrupt_level(THREAD_UNINT
);
650 * Indicate that we want all the threads to stop executing
651 * at user space by holding the task (we would have held
652 * each thread independently in thread_terminate_internal -
653 * but this way we may be more likely to already find it
654 * held there). Mark the task inactive, and prevent
655 * further task operations via the task port.
657 task_hold_locked(task
);
658 task
->active
= FALSE
;
659 ipc_task_disable(task
);
662 * Terminate each thread in the task.
664 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
665 thread_terminate_internal(thread
);
669 * Give the machine dependent code a chance
670 * to perform cleanup before ripping apart
673 if (self_task
== task
)
674 machine_thread_terminate_self();
679 * Destroy all synchronizers owned by the task.
681 task_synchronizer_destroy_all(task
);
684 * Destroy the IPC space, leaving just a reference for it.
686 ipc_space_destroy(task
->itk_space
);
688 if (vm_map_has_4GB_pagezero(task
->map
))
689 vm_map_clear_4GB_pagezero(task
->map
);
692 * If the current thread is a member of the task
693 * being terminated, then the last reference to
694 * the task will not be dropped until the thread
695 * is finally reaped. To avoid incurring the
696 * expense of removing the address space regions
697 * at reap time, we do it explictly here.
699 vm_map_remove(task
->map
,
700 task
->map
->min_offset
,
701 task
->map
->max_offset
,
704 /* release our shared region */
705 vm_shared_region_set(task
, NULL
);
707 lck_mtx_lock(&tasks_threads_lock
);
708 queue_remove(&tasks
, task
, task_t
, tasks
);
709 queue_enter(&terminated_tasks
, task
, task_t
, tasks
);
711 lck_mtx_unlock(&tasks_threads_lock
);
714 * We no longer need to guard against being aborted, so restore
715 * the previous interruptible state.
717 thread_interrupt_level(interrupt_save
);
720 * Get rid of the task active reference on itself.
722 task_deallocate(task
);
724 return (KERN_SUCCESS
);
730 * Shut the current task down (except for the current thread) in
731 * preparation for dramatic changes to the task (probably exec).
732 * We hold the task and mark all other threads in the task for
739 thread_t thread
, self
;
741 assert(task
!= kernel_task
);
743 self
= current_thread();
745 if (task
!= self
->task
)
746 return (KERN_INVALID_ARGUMENT
);
750 if (task
->halting
|| !task
->active
|| !self
->active
) {
752 * Task or current thread is already being terminated.
753 * Hurry up and return out of the current kernel context
754 * so that we run our AST special handler to terminate
759 return (KERN_FAILURE
);
762 task
->halting
= TRUE
;
764 if (task
->thread_count
> 1) {
767 * Mark all the threads to keep them from starting any more
768 * user-level execution. The thread_terminate_internal code
769 * would do this on a thread by thread basis anyway, but this
770 * gives us a better chance of not having to wait there.
772 task_hold_locked(task
);
775 * Terminate all the other threads in the task.
777 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
779 thread_terminate_internal(thread
);
782 task_release_locked(task
);
790 * task_complete_halt:
792 * Complete task halt by waiting for threads to terminate, then clean
793 * up task resources (VM, port namespace, etc...) and then let the
794 * current thread go in the (practically empty) task context.
797 task_complete_halt(task_t task
)
800 assert(task
->halting
);
801 assert(task
== current_task());
804 * Give the machine dependent code a chance
805 * to perform cleanup of task-level resources
806 * associated with the current thread before
807 * ripping apart the task.
809 * This must be done with the task locked.
811 machine_thread_terminate_self();
814 * Wait for the other threads to get shut down.
815 * When the last other thread is reaped, we'll be
818 if (task
->thread_count
> 1) {
819 assert_wait((event_t
)&task
->halting
, THREAD_UNINT
);
821 thread_block(THREAD_CONTINUE_NULL
);
827 * Destroy all synchronizers owned by the task.
829 task_synchronizer_destroy_all(task
);
832 * Destroy the contents of the IPC space, leaving just
833 * a reference for it.
835 ipc_space_clean(task
->itk_space
);
838 * Clean out the address space, as we are going to be
841 vm_map_remove(task
->map
, task
->map
->min_offset
,
842 task
->map
->max_offset
, VM_MAP_NO_FLAGS
);
844 task
->halting
= FALSE
;
850 * Suspend execution of the specified task.
851 * This is a recursive-style suspension of the task, a count of
852 * suspends is maintained.
854 * CONDITIONS: the task is locked and active.
858 register task_t task
)
860 register thread_t thread
;
862 assert(task
->active
);
864 if (task
->suspend_count
++ > 0)
868 * Iterate through all the threads and hold them.
870 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
871 thread_mtx_lock(thread
);
873 thread_mtx_unlock(thread
);
880 * Same as the internal routine above, except that is must lock
881 * and verify that the task is active. This differs from task_suspend
882 * in that it places a kernel hold on the task rather than just a
883 * user-level hold. This keeps users from over resuming and setting
884 * it running out from under the kernel.
886 * CONDITIONS: the caller holds a reference on the task
890 register task_t task
)
892 if (task
== TASK_NULL
)
893 return (KERN_INVALID_ARGUMENT
);
900 return (KERN_FAILURE
);
903 task_hold_locked(task
);
906 return (KERN_SUCCESS
);
912 * Wait for all threads in task to stop.
915 * Called with task locked, active, and held.
919 register task_t task
)
921 register thread_t thread
, self
;
923 assert(task
->active
);
924 assert(task
->suspend_count
> 0);
926 self
= current_thread();
929 * Iterate through all the threads and wait for them to
930 * stop. Do not wait for the current thread if it is within
933 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
940 * task_release_locked:
942 * Release a kernel hold on a task.
944 * CONDITIONS: the task is locked and active
948 register task_t task
)
950 register thread_t thread
;
952 assert(task
->active
);
953 assert(task
->suspend_count
> 0);
955 if (--task
->suspend_count
> 0)
958 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
959 thread_mtx_lock(thread
);
960 thread_release(thread
);
961 thread_mtx_unlock(thread
);
968 * Same as the internal routine above, except that it must lock
969 * and verify that the task is active.
971 * CONDITIONS: The caller holds a reference to the task
977 if (task
== TASK_NULL
)
978 return (KERN_INVALID_ARGUMENT
);
985 return (KERN_FAILURE
);
988 task_release_locked(task
);
991 return (KERN_SUCCESS
);
997 thread_act_array_t
*threads_out
,
998 mach_msg_type_number_t
*count
)
1000 mach_msg_type_number_t actual
;
1001 thread_t
*thread_list
;
1003 vm_size_t size
, size_needed
;
1007 if (task
== TASK_NULL
)
1008 return (KERN_INVALID_ARGUMENT
);
1010 size
= 0; addr
= NULL
;
1014 if (!task
->active
) {
1020 return (KERN_FAILURE
);
1023 actual
= task
->thread_count
;
1025 /* do we have the memory we need? */
1026 size_needed
= actual
* sizeof (mach_port_t
);
1027 if (size_needed
<= size
)
1030 /* unlock the task and allocate more memory */
1036 assert(size_needed
> 0);
1039 addr
= kalloc(size
);
1041 return (KERN_RESOURCE_SHORTAGE
);
1044 /* OK, have memory and the task is locked & active */
1045 thread_list
= (thread_t
*)addr
;
1049 for (thread
= (thread_t
)queue_first(&task
->threads
); i
< actual
;
1050 ++i
, thread
= (thread_t
)queue_next(&thread
->task_threads
)) {
1051 thread_reference_internal(thread
);
1052 thread_list
[j
++] = thread
;
1055 assert(queue_end(&task
->threads
, (queue_entry_t
)thread
));
1058 size_needed
= actual
* sizeof (mach_port_t
);
1060 /* can unlock task now that we've got the thread refs */
1064 /* no threads, so return null pointer and deallocate memory */
1066 *threads_out
= NULL
;
1073 /* if we allocated too much, must copy */
1075 if (size_needed
< size
) {
1078 newaddr
= kalloc(size_needed
);
1080 for (i
= 0; i
< actual
; ++i
)
1081 thread_deallocate(thread_list
[i
]);
1083 return (KERN_RESOURCE_SHORTAGE
);
1086 bcopy(addr
, newaddr
, size_needed
);
1088 thread_list
= (thread_t
*)newaddr
;
1091 *threads_out
= thread_list
;
1094 /* do the conversion that Mig should handle */
1096 for (i
= 0; i
< actual
; ++i
)
1097 ((ipc_port_t
*) thread_list
)[i
] = convert_thread_to_port(thread_list
[i
]);
1100 return (KERN_SUCCESS
);
1106 * Implement a user-level suspension on a task.
1109 * The caller holds a reference to the task
1113 register task_t task
)
1115 if (task
== TASK_NULL
|| task
== kernel_task
)
1116 return (KERN_INVALID_ARGUMENT
);
1120 if (!task
->active
) {
1123 return (KERN_FAILURE
);
1126 if (task
->user_stop_count
++ > 0) {
1128 * If the stop count was positive, the task is
1129 * already stopped and we can exit.
1133 return (KERN_SUCCESS
);
1137 * Put a kernel-level hold on the threads in the task (all
1138 * user-level task suspensions added together represent a
1139 * single kernel-level hold). We then wait for the threads
1140 * to stop executing user code.
1142 task_hold_locked(task
);
1143 task_wait_locked(task
);
1147 return (KERN_SUCCESS
);
1152 * Release a kernel hold on a task.
1155 * The caller holds a reference to the task
1159 register task_t task
)
1161 register boolean_t release
= FALSE
;
1163 if (task
== TASK_NULL
|| task
== kernel_task
)
1164 return (KERN_INVALID_ARGUMENT
);
1168 if (!task
->active
) {
1171 return (KERN_FAILURE
);
1174 if (task
->user_stop_count
> 0) {
1175 if (--task
->user_stop_count
== 0) {
1182 return (KERN_FAILURE
);
1186 * Release the task if necessary.
1189 task_release_locked(task
);
1193 return (KERN_SUCCESS
);
1201 * Freeze a currently suspended task.
1204 * The caller holds a reference to the task
1208 register task_t task
,
1209 uint32_t *purgeable_count
,
1210 uint32_t *wired_count
,
1211 uint32_t *clean_count
,
1212 uint32_t *dirty_count
,
1214 boolean_t walk_only
)
1216 if (task
== TASK_NULL
|| task
== kernel_task
)
1217 return (KERN_INVALID_ARGUMENT
);
1220 vm_map_freeze_walk(task
->map
, purgeable_count
, wired_count
, clean_count
, dirty_count
, shared
);
1222 vm_map_freeze(task
->map
, purgeable_count
, wired_count
, clean_count
, dirty_count
, shared
);
1225 return (KERN_SUCCESS
);
1231 * Thaw a currently frozen task.
1234 * The caller holds a reference to the task
1238 register task_t task
)
1240 if (task
== TASK_NULL
|| task
== kernel_task
)
1241 return (KERN_INVALID_ARGUMENT
);
1243 vm_map_thaw(task
->map
);
1245 return (KERN_SUCCESS
);
1248 #endif /* CONFIG_FREEZE */
1251 host_security_set_task_token(
1252 host_security_t host_security
,
1254 security_token_t sec_token
,
1255 audit_token_t audit_token
,
1256 host_priv_t host_priv
)
1258 ipc_port_t host_port
;
1261 if (task
== TASK_NULL
)
1262 return(KERN_INVALID_ARGUMENT
);
1264 if (host_security
== HOST_NULL
)
1265 return(KERN_INVALID_SECURITY
);
1268 task
->sec_token
= sec_token
;
1269 task
->audit_token
= audit_token
;
1272 if (host_priv
!= HOST_PRIV_NULL
) {
1273 kr
= host_get_host_priv_port(host_priv
, &host_port
);
1275 kr
= host_get_host_port(host_priv_self(), &host_port
);
1277 assert(kr
== KERN_SUCCESS
);
1278 kr
= task_set_special_port(task
, TASK_HOST_PORT
, host_port
);
1283 * Utility routine to set a ledger
1291 if (task
== TASK_NULL
)
1292 return(KERN_INVALID_ARGUMENT
);
1296 ipc_port_release_send(task
->wired_ledger_port
);
1297 task
->wired_ledger_port
= ledger_copy(wired
);
1300 ipc_port_release_send(task
->paged_ledger_port
);
1301 task
->paged_ledger_port
= ledger_copy(paged
);
1305 return(KERN_SUCCESS
);
1309 * This routine was added, pretty much exclusively, for registering the
1310 * RPC glue vector for in-kernel short circuited tasks. Rather than
1311 * removing it completely, I have only disabled that feature (which was
1312 * the only feature at the time). It just appears that we are going to
1313 * want to add some user data to tasks in the future (i.e. bsd info,
1314 * task names, etc...), so I left it in the formal task interface.
1319 task_flavor_t flavor
,
1320 __unused task_info_t task_info_in
, /* pointer to IN array */
1321 __unused mach_msg_type_number_t task_info_count
)
1323 if (task
== TASK_NULL
)
1324 return(KERN_INVALID_ARGUMENT
);
1328 return (KERN_INVALID_ARGUMENT
);
1330 return (KERN_SUCCESS
);
1336 task_flavor_t flavor
,
1337 task_info_t task_info_out
,
1338 mach_msg_type_number_t
*task_info_count
)
1340 kern_return_t error
= KERN_SUCCESS
;
1342 if (task
== TASK_NULL
)
1343 return (KERN_INVALID_ARGUMENT
);
1347 if ((task
!= current_task()) && (!task
->active
)) {
1349 return (KERN_INVALID_ARGUMENT
);
1354 case TASK_BASIC_INFO_32
:
1355 case TASK_BASIC2_INFO_32
:
1357 task_basic_info_32_t basic_info
;
1362 if (*task_info_count
< TASK_BASIC_INFO_32_COUNT
) {
1363 error
= KERN_INVALID_ARGUMENT
;
1367 basic_info
= (task_basic_info_32_t
)task_info_out
;
1369 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
1370 basic_info
->virtual_size
= (typeof(basic_info
->virtual_size
))map
->size
;
1371 if (flavor
== TASK_BASIC2_INFO_32
) {
1373 * The "BASIC2" flavor gets the maximum resident
1374 * size instead of the current resident size...
1376 basic_info
->resident_size
= pmap_resident_max(map
->pmap
);
1378 basic_info
->resident_size
= pmap_resident_count(map
->pmap
);
1380 basic_info
->resident_size
*= PAGE_SIZE
;
1382 basic_info
->policy
= ((task
!= kernel_task
)?
1383 POLICY_TIMESHARE
: POLICY_RR
);
1384 basic_info
->suspend_count
= task
->user_stop_count
;
1386 absolutetime_to_microtime(task
->total_user_time
, &secs
, &usecs
);
1387 basic_info
->user_time
.seconds
=
1388 (typeof(basic_info
->user_time
.seconds
))secs
;
1389 basic_info
->user_time
.microseconds
= usecs
;
1391 absolutetime_to_microtime(task
->total_system_time
, &secs
, &usecs
);
1392 basic_info
->system_time
.seconds
=
1393 (typeof(basic_info
->system_time
.seconds
))secs
;
1394 basic_info
->system_time
.microseconds
= usecs
;
1396 *task_info_count
= TASK_BASIC_INFO_32_COUNT
;
1400 case TASK_BASIC_INFO_64
:
1402 task_basic_info_64_t basic_info
;
1407 if (*task_info_count
< TASK_BASIC_INFO_64_COUNT
) {
1408 error
= KERN_INVALID_ARGUMENT
;
1412 basic_info
= (task_basic_info_64_t
)task_info_out
;
1414 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
1415 basic_info
->virtual_size
= map
->size
;
1416 basic_info
->resident_size
=
1417 (mach_vm_size_t
)(pmap_resident_count(map
->pmap
))
1420 basic_info
->policy
= ((task
!= kernel_task
)?
1421 POLICY_TIMESHARE
: POLICY_RR
);
1422 basic_info
->suspend_count
= task
->user_stop_count
;
1424 absolutetime_to_microtime(task
->total_user_time
, &secs
, &usecs
);
1425 basic_info
->user_time
.seconds
=
1426 (typeof(basic_info
->user_time
.seconds
))secs
;
1427 basic_info
->user_time
.microseconds
= usecs
;
1429 absolutetime_to_microtime(task
->total_system_time
, &secs
, &usecs
);
1430 basic_info
->system_time
.seconds
=
1431 (typeof(basic_info
->system_time
.seconds
))secs
;
1432 basic_info
->system_time
.microseconds
= usecs
;
1434 *task_info_count
= TASK_BASIC_INFO_64_COUNT
;
1438 case TASK_THREAD_TIMES_INFO
:
1440 register task_thread_times_info_t times_info
;
1441 register thread_t thread
;
1443 if (*task_info_count
< TASK_THREAD_TIMES_INFO_COUNT
) {
1444 error
= KERN_INVALID_ARGUMENT
;
1448 times_info
= (task_thread_times_info_t
) task_info_out
;
1449 times_info
->user_time
.seconds
= 0;
1450 times_info
->user_time
.microseconds
= 0;
1451 times_info
->system_time
.seconds
= 0;
1452 times_info
->system_time
.microseconds
= 0;
1455 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1456 time_value_t user_time
, system_time
;
1458 thread_read_times(thread
, &user_time
, &system_time
);
1460 time_value_add(×_info
->user_time
, &user_time
);
1461 time_value_add(×_info
->system_time
, &system_time
);
1465 *task_info_count
= TASK_THREAD_TIMES_INFO_COUNT
;
1469 case TASK_ABSOLUTETIME_INFO
:
1471 task_absolutetime_info_t info
;
1472 register thread_t thread
;
1474 if (*task_info_count
< TASK_ABSOLUTETIME_INFO_COUNT
) {
1475 error
= KERN_INVALID_ARGUMENT
;
1479 info
= (task_absolutetime_info_t
)task_info_out
;
1480 info
->threads_user
= info
->threads_system
= 0;
1483 info
->total_user
= task
->total_user_time
;
1484 info
->total_system
= task
->total_system_time
;
1486 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1489 tval
= timer_grab(&thread
->user_timer
);
1490 info
->threads_user
+= tval
;
1491 info
->total_user
+= tval
;
1493 tval
= timer_grab(&thread
->system_timer
);
1494 info
->threads_system
+= tval
;
1495 info
->total_system
+= tval
;
1499 *task_info_count
= TASK_ABSOLUTETIME_INFO_COUNT
;
1503 case TASK_DYLD_INFO
:
1505 task_dyld_info_t info
;
1508 * We added the format field to TASK_DYLD_INFO output. For
1509 * temporary backward compatibility, accept the fact that
1510 * clients may ask for the old version - distinquished by the
1511 * size of the expected result structure.
1513 #define TASK_LEGACY_DYLD_INFO_COUNT \
1514 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
1516 if (*task_info_count
< TASK_LEGACY_DYLD_INFO_COUNT
) {
1517 error
= KERN_INVALID_ARGUMENT
;
1521 info
= (task_dyld_info_t
)task_info_out
;
1522 info
->all_image_info_addr
= task
->all_image_info_addr
;
1523 info
->all_image_info_size
= task
->all_image_info_size
;
1525 /* only set format on output for those expecting it */
1526 if (*task_info_count
>= TASK_DYLD_INFO_COUNT
) {
1527 info
->all_image_info_format
= task_has_64BitAddr(task
) ?
1528 TASK_DYLD_ALL_IMAGE_INFO_64
:
1529 TASK_DYLD_ALL_IMAGE_INFO_32
;
1530 *task_info_count
= TASK_DYLD_INFO_COUNT
;
1532 *task_info_count
= TASK_LEGACY_DYLD_INFO_COUNT
;
1537 case TASK_EXTMOD_INFO
:
1539 task_extmod_info_t info
;
1542 if (*task_info_count
< TASK_EXTMOD_INFO_COUNT
) {
1543 error
= KERN_INVALID_ARGUMENT
;
1547 info
= (task_extmod_info_t
)task_info_out
;
1549 p
= get_bsdtask_info(task
);
1551 proc_getexecutableuuid(p
, info
->task_uuid
, sizeof(info
->task_uuid
));
1553 bzero(info
->task_uuid
, sizeof(info
->task_uuid
));
1555 info
->extmod_statistics
= task
->extmod_statistics
;
1556 *task_info_count
= TASK_EXTMOD_INFO_COUNT
;
1561 case TASK_KERNELMEMORY_INFO
:
1563 task_kernelmemory_info_t tkm_info
;
1566 if (*task_info_count
< TASK_KERNELMEMORY_INFO_COUNT
) {
1567 error
= KERN_INVALID_ARGUMENT
;
1571 tkm_info
= (task_kernelmemory_info_t
) task_info_out
;
1573 if (task
== kernel_task
) {
1575 * All shared allocs/frees from other tasks count against
1576 * the kernel private memory usage. If we are looking up
1577 * info for the kernel task, gather from everywhere.
1581 /* start by accounting for all the terminated tasks against the kernel */
1582 tkm_info
->total_palloc
= tasks_tkm_private
.alloc
+ tasks_tkm_shared
.alloc
;
1583 tkm_info
->total_pfree
= tasks_tkm_private
.free
+ tasks_tkm_shared
.free
;
1584 tkm_info
->total_salloc
= 0;
1585 tkm_info
->total_sfree
= 0;
1587 /* count all other task/thread shared alloc/free against the kernel */
1588 lck_mtx_lock(&tasks_threads_lock
);
1589 queue_iterate(&tasks
, task
, task_t
, tasks
) {
1590 if (task
== kernel_task
) {
1591 tkm_info
->total_palloc
+= task
->tkm_private
.alloc
;
1592 tkm_info
->total_pfree
+= task
->tkm_private
.free
;
1594 tkm_info
->total_palloc
+= task
->tkm_shared
.alloc
;
1595 tkm_info
->total_pfree
+= task
->tkm_shared
.free
;
1597 queue_iterate(&threads
, thread
, thread_t
, threads
) {
1598 if (thread
->task
== kernel_task
) {
1599 tkm_info
->total_palloc
+= thread
->tkm_private
.alloc
;
1600 tkm_info
->total_pfree
+= thread
->tkm_private
.free
;
1602 tkm_info
->total_palloc
+= thread
->tkm_shared
.alloc
;
1603 tkm_info
->total_pfree
+= thread
->tkm_shared
.free
;
1605 lck_mtx_unlock(&tasks_threads_lock
);
1607 /* account for all the terminated threads in the process */
1608 tkm_info
->total_palloc
= task
->tkm_private
.alloc
;
1609 tkm_info
->total_pfree
= task
->tkm_private
.free
;
1610 tkm_info
->total_salloc
= task
->tkm_shared
.alloc
;
1611 tkm_info
->total_sfree
= task
->tkm_shared
.free
;
1613 /* then add in all the running threads */
1614 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1615 tkm_info
->total_palloc
+= thread
->tkm_private
.alloc
;
1616 tkm_info
->total_pfree
+= thread
->tkm_private
.free
;
1617 tkm_info
->total_salloc
+= thread
->tkm_shared
.alloc
;
1618 tkm_info
->total_sfree
+= thread
->tkm_shared
.free
;
1623 *task_info_count
= TASK_KERNELMEMORY_INFO_COUNT
;
1624 return KERN_SUCCESS
;
1628 case TASK_SCHED_FIFO_INFO
:
1631 if (*task_info_count
< POLICY_FIFO_BASE_COUNT
) {
1632 error
= KERN_INVALID_ARGUMENT
;
1636 error
= KERN_INVALID_POLICY
;
1641 case TASK_SCHED_RR_INFO
:
1643 register policy_rr_base_t rr_base
;
1644 uint32_t quantum_time
;
1645 uint64_t quantum_ns
;
1647 if (*task_info_count
< POLICY_RR_BASE_COUNT
) {
1648 error
= KERN_INVALID_ARGUMENT
;
1652 rr_base
= (policy_rr_base_t
) task_info_out
;
1654 if (task
!= kernel_task
) {
1655 error
= KERN_INVALID_POLICY
;
1659 rr_base
->base_priority
= task
->priority
;
1661 quantum_time
= SCHED(initial_quantum_size
)(THREAD_NULL
);
1662 absolutetime_to_nanoseconds(quantum_time
, &quantum_ns
);
1664 rr_base
->quantum
= (uint32_t)(quantum_ns
/ 1000 / 1000);
1666 *task_info_count
= POLICY_RR_BASE_COUNT
;
1671 case TASK_SCHED_TIMESHARE_INFO
:
1673 register policy_timeshare_base_t ts_base
;
1675 if (*task_info_count
< POLICY_TIMESHARE_BASE_COUNT
) {
1676 error
= KERN_INVALID_ARGUMENT
;
1680 ts_base
= (policy_timeshare_base_t
) task_info_out
;
1682 if (task
== kernel_task
) {
1683 error
= KERN_INVALID_POLICY
;
1687 ts_base
->base_priority
= task
->priority
;
1689 *task_info_count
= POLICY_TIMESHARE_BASE_COUNT
;
1693 case TASK_SECURITY_TOKEN
:
1695 register security_token_t
*sec_token_p
;
1697 if (*task_info_count
< TASK_SECURITY_TOKEN_COUNT
) {
1698 error
= KERN_INVALID_ARGUMENT
;
1702 sec_token_p
= (security_token_t
*) task_info_out
;
1704 *sec_token_p
= task
->sec_token
;
1706 *task_info_count
= TASK_SECURITY_TOKEN_COUNT
;
1710 case TASK_AUDIT_TOKEN
:
1712 register audit_token_t
*audit_token_p
;
1714 if (*task_info_count
< TASK_AUDIT_TOKEN_COUNT
) {
1715 error
= KERN_INVALID_ARGUMENT
;
1719 audit_token_p
= (audit_token_t
*) task_info_out
;
1721 *audit_token_p
= task
->audit_token
;
1723 *task_info_count
= TASK_AUDIT_TOKEN_COUNT
;
1727 case TASK_SCHED_INFO
:
1728 error
= KERN_INVALID_ARGUMENT
;
1731 case TASK_EVENTS_INFO
:
1733 register task_events_info_t events_info
;
1734 register thread_t thread
;
1736 if (*task_info_count
< TASK_EVENTS_INFO_COUNT
) {
1737 error
= KERN_INVALID_ARGUMENT
;
1741 events_info
= (task_events_info_t
) task_info_out
;
1744 events_info
->faults
= task
->faults
;
1745 events_info
->pageins
= task
->pageins
;
1746 events_info
->cow_faults
= task
->cow_faults
;
1747 events_info
->messages_sent
= task
->messages_sent
;
1748 events_info
->messages_received
= task
->messages_received
;
1749 events_info
->syscalls_mach
= task
->syscalls_mach
;
1750 events_info
->syscalls_unix
= task
->syscalls_unix
;
1752 events_info
->csw
= task
->c_switch
;
1754 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1755 events_info
->csw
+= thread
->c_switch
;
1756 events_info
->syscalls_mach
+= thread
->syscalls_mach
;
1757 events_info
->syscalls_unix
+= thread
->syscalls_unix
;
1761 *task_info_count
= TASK_EVENTS_INFO_COUNT
;
1764 case TASK_AFFINITY_TAG_INFO
:
1766 if (*task_info_count
< TASK_AFFINITY_TAG_INFO_COUNT
) {
1767 error
= KERN_INVALID_ARGUMENT
;
1771 error
= task_affinity_info(task
, task_info_out
, task_info_count
);
1775 error
= KERN_INVALID_ARGUMENT
;
1789 /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
1793 task
->vtimers
|= which
;
1797 case TASK_VTIMER_USER
:
1798 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1799 thread
->vtimer_user_save
= timer_grab(&thread
->user_timer
);
1803 case TASK_VTIMER_PROF
:
1804 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1805 thread
->vtimer_prof_save
= timer_grab(&thread
->user_timer
);
1806 thread
->vtimer_prof_save
+= timer_grab(&thread
->system_timer
);
1810 case TASK_VTIMER_RLIM
:
1811 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1812 thread
->vtimer_rlim_save
= timer_grab(&thread
->user_timer
);
1813 thread
->vtimer_rlim_save
+= timer_grab(&thread
->system_timer
);
1826 assert(task
== current_task());
1830 task
->vtimers
&= ~which
;
1840 uint32_t *microsecs
)
1842 thread_t thread
= current_thread();
1847 assert(task
== current_task());
1849 assert(task
->vtimers
& which
);
1855 case TASK_VTIMER_USER
:
1856 tdelt
= (uint32_t)timer_delta(&thread
->user_timer
,
1857 &thread
->vtimer_user_save
);
1858 absolutetime_to_microtime(tdelt
, &secs
, microsecs
);
1861 case TASK_VTIMER_PROF
:
1862 tsum
= timer_grab(&thread
->user_timer
);
1863 tsum
+= timer_grab(&thread
->system_timer
);
1864 tdelt
= (uint32_t)(tsum
- thread
->vtimer_prof_save
);
1865 absolutetime_to_microtime(tdelt
, &secs
, microsecs
);
1866 /* if the time delta is smaller than a usec, ignore */
1867 if (*microsecs
!= 0)
1868 thread
->vtimer_prof_save
= tsum
;
1871 case TASK_VTIMER_RLIM
:
1872 tsum
= timer_grab(&thread
->user_timer
);
1873 tsum
+= timer_grab(&thread
->system_timer
);
1874 tdelt
= (uint32_t)(tsum
- thread
->vtimer_rlim_save
);
1875 thread
->vtimer_rlim_save
= tsum
;
1876 absolutetime_to_microtime(tdelt
, &secs
, microsecs
);
1885 * Change the assigned processor set for the task
1889 __unused task_t task
,
1890 __unused processor_set_t new_pset
,
1891 __unused boolean_t assign_threads
)
1893 return(KERN_FAILURE
);
1897 * task_assign_default:
1899 * Version of task_assign to assign to default processor set.
1902 task_assign_default(
1904 boolean_t assign_threads
)
1906 return (task_assign(task
, &pset0
, assign_threads
));
1910 * task_get_assignment
1912 * Return name of processor set that task is assigned to.
1915 task_get_assignment(
1917 processor_set_t
*pset
)
1920 return(KERN_FAILURE
);
1924 return (KERN_SUCCESS
);
1931 * Set scheduling policy and parameters, both base and limit, for
1932 * the given task. Policy must be a policy which is enabled for the
1933 * processor set. Change contained threads if requested.
1937 __unused task_t task
,
1938 __unused policy_t policy_id
,
1939 __unused policy_base_t base
,
1940 __unused mach_msg_type_number_t count
,
1941 __unused boolean_t set_limit
,
1942 __unused boolean_t change
)
1944 return(KERN_FAILURE
);
1950 * Set scheduling policy and parameters, both base and limit, for
1951 * the given task. Policy can be any policy implemented by the
1952 * processor set, whether enabled or not. Change contained threads
1957 __unused task_t task
,
1958 __unused processor_set_t pset
,
1959 __unused policy_t policy_id
,
1960 __unused policy_base_t base
,
1961 __unused mach_msg_type_number_t base_count
,
1962 __unused policy_limit_t limit
,
1963 __unused mach_msg_type_number_t limit_count
,
1964 __unused boolean_t change
)
1966 return(KERN_FAILURE
);
1976 extern int fast_tas_debug
;
1978 if (fast_tas_debug
) {
1979 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1983 task
->fast_tas_base
= pc
;
1984 task
->fast_tas_end
= endpc
;
1986 return KERN_SUCCESS
;
1988 #else /* FAST_TAS */
1991 __unused task_t task
,
1992 __unused vm_offset_t pc
,
1993 __unused vm_offset_t endpc
)
1995 return KERN_FAILURE
;
1997 #endif /* FAST_TAS */
2000 task_synchronizer_destroy_all(task_t task
)
2002 semaphore_t semaphore
;
2003 lock_set_t lock_set
;
2006 * Destroy owned semaphores
2009 while (!queue_empty(&task
->semaphore_list
)) {
2010 semaphore
= (semaphore_t
) queue_first(&task
->semaphore_list
);
2011 (void) semaphore_destroy(task
, semaphore
);
2015 * Destroy owned lock sets
2018 while (!queue_empty(&task
->lock_set_list
)) {
2019 lock_set
= (lock_set_t
) queue_first(&task
->lock_set_list
);
2020 (void) lock_set_destroy(task
, lock_set
);
2025 * Install default (machine-dependent) initial thread state
2026 * on the task. Subsequent thread creation will have this initial
2027 * state set on the thread by machine_thread_inherit_taskwide().
2028 * Flavors and structures are exactly the same as those to thread_set_state()
2034 thread_state_t state
,
2035 mach_msg_type_number_t state_count
)
2039 if (task
== TASK_NULL
) {
2040 return (KERN_INVALID_ARGUMENT
);
2045 if (!task
->active
) {
2047 return (KERN_FAILURE
);
2050 ret
= machine_task_set_state(task
, flavor
, state
, state_count
);
2057 * Examine the default (machine-dependent) initial thread state
2058 * on the task, as set by task_set_state(). Flavors and structures
2059 * are exactly the same as those passed to thread_get_state().
2065 thread_state_t state
,
2066 mach_msg_type_number_t
*state_count
)
2070 if (task
== TASK_NULL
) {
2071 return (KERN_INVALID_ARGUMENT
);
2076 if (!task
->active
) {
2078 return (KERN_FAILURE
);
2081 ret
= machine_task_get_state(task
, flavor
, state
, state_count
);
2089 * We need to export some functions to other components that
2090 * are currently implemented in macros within the osfmk
2091 * component. Just export them as functions of the same name.
2093 boolean_t
is_kerneltask(task_t t
)
2095 if (t
== kernel_task
)
2102 check_for_tasksuspend(task_t task
)
2105 if (task
== TASK_NULL
)
2108 return (task
->suspend_count
> 0);
2112 task_t
current_task(void);
2113 task_t
current_task(void)
2115 return (current_task_fast());
2118 #undef task_reference
2119 void task_reference(task_t task
);
2124 if (task
!= TASK_NULL
)
2125 task_reference_internal(task
);
2129 * This routine is called always with task lock held.
2130 * And it returns a thread handle without reference as the caller
2131 * operates on it under the task lock held.
2134 task_findtid(task_t task
, uint64_t tid
)
2136 thread_t thread
= THREAD_NULL
;
2138 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
2139 if (thread
->thread_id
== tid
)
2146 #if CONFIG_MACF_MACH
2148 * Protect 2 task labels against modification by adding a reference on
2149 * both label handles. The locks do not actually have to be held while
2150 * using the labels as only labels with one reference can be modified
2159 labelh_reference(a
->label
);
2160 labelh_reference(b
->label
);
2168 labelh_release(a
->label
);
2169 labelh_release(b
->label
);
2173 mac_task_label_update_internal(
2178 tasklabel_lock(task
);
2179 task
->label
= labelh_modify(task
->label
);
2180 mac_task_label_update(pl
, &task
->maclabel
);
2181 tasklabel_unlock(task
);
2182 ip_lock(task
->itk_self
);
2183 mac_port_label_update_cred(pl
, &task
->itk_self
->ip_label
);
2184 ip_unlock(task
->itk_self
);
2188 mac_task_label_modify(
2191 void (*f
) (struct label
*l
, void *arg
))
2194 tasklabel_lock(task
);
2195 task
->label
= labelh_modify(task
->label
);
2196 (*f
)(&task
->maclabel
, arg
);
2197 tasklabel_unlock(task
);
2201 mac_task_get_label(struct task
*task
)
2203 return (&task
->maclabel
);