2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
61 * Task management primitives implementation.
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
86 * Copyright (c) 2005 SPARTA, Inc.
90 #include <platforms.h>
92 #include <mach/mach_types.h>
93 #include <mach/boolean.h>
94 #include <mach/host_priv.h>
95 #include <mach/machine/vm_types.h>
96 #include <mach/vm_param.h>
97 #include <mach/semaphore.h>
98 #include <mach/task_info.h>
99 #include <mach/task_special_ports.h>
101 #include <ipc/ipc_types.h>
102 #include <ipc/ipc_space.h>
103 #include <ipc/ipc_entry.h>
105 #include <kern/kern_types.h>
106 #include <kern/mach_param.h>
107 #include <kern/misc_protos.h>
108 #include <kern/task.h>
109 #include <kern/thread.h>
110 #include <kern/zalloc.h>
111 #include <kern/kalloc.h>
112 #include <kern/processor.h>
113 #include <kern/sched_prim.h> /* for thread_wakeup */
114 #include <kern/ipc_tt.h>
115 #include <kern/host.h>
116 #include <kern/clock.h>
117 #include <kern/timer.h>
118 #include <kern/assert.h>
119 #include <kern/sync_lock.h>
120 #include <kern/affinity.h>
123 #include <vm/vm_map.h>
124 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
125 #include <vm/vm_pageout.h>
126 #include <vm/vm_protos.h>
129 * Exported interfaces
132 #include <mach/task_server.h>
133 #include <mach/mach_host_server.h>
134 #include <mach/host_security_server.h>
135 #include <mach/mach_port_server.h>
136 #include <mach/security_server.h>
138 #include <vm/vm_shared_region.h>
141 #include <security/mac_mach_internal.h>
146 #endif /* CONFIG_COUNTERS */
150 lck_attr_t task_lck_attr
;
151 lck_grp_t task_lck_grp
;
152 lck_grp_attr_t task_lck_grp_attr
;
154 lck_mtx_t task_watch_mtx
;
155 #endif /* CONFIG_EMBEDDED */
157 zinfo_usage_store_t tasks_tkm_private
;
158 zinfo_usage_store_t tasks_tkm_shared
;
160 static ledger_template_t task_ledger_template
= NULL
;
161 struct _task_ledger_indices task_ledgers
= {-1, -1, -1, -1, -1};
162 void init_task_ledgers(void);
165 int task_max
= CONFIG_TASK_MAX
; /* Max number of tasks */
167 /* externs for BSD kernel */
168 extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
172 void task_hold_locked(
174 void task_wait_locked(
176 boolean_t until_not_runnable
);
177 void task_release_locked(
181 void task_synchronizer_destroy_all(
184 int check_for_tasksuspend(
188 task_backing_store_privileged(
192 task
->priv_flags
|= VM_BACKING_STORE_PRIV
;
203 #if defined(__i386__) || defined(__x86_64__)
205 #endif /* __i386__ */
209 if (task_has_64BitAddr(task
))
212 task_set_64BitAddr(task
);
214 if ( !task_has_64BitAddr(task
))
218 * Deallocate all memory previously allocated
219 * above the 32-bit address space, since it won't
220 * be accessible anymore.
222 /* remove regular VM map entries & pmap mappings */
223 (void) vm_map_remove(task
->map
,
224 (vm_map_offset_t
) VM_MAX_ADDRESS
,
227 /* remove the higher VM mappings */
228 (void) vm_map_remove(task
->map
,
230 0xFFFFFFFFFFFFF000ULL
,
232 task_clear_64BitAddr(task
);
234 /* FIXME: On x86, the thread save state flavor can diverge from the
235 * task's 64-bit feature flag due to the 32-bit/64-bit register save
236 * state dichotomy. Since we can be pre-empted in this interval,
237 * certain routines may observe the thread as being in an inconsistent
238 * state with respect to its task's 64-bitness.
240 #if defined(__i386__) || defined(__x86_64__)
242 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
243 thread_mtx_lock(thread
);
244 machine_thread_switch_addrmode(thread
);
245 thread_mtx_unlock(thread
);
248 #endif /* __i386__ */
253 task_set_dyld_info(task_t task
, mach_vm_address_t addr
, mach_vm_size_t size
)
256 task
->all_image_info_addr
= addr
;
257 task
->all_image_info_size
= size
;
265 lck_grp_attr_setdefault(&task_lck_grp_attr
);
266 lck_grp_init(&task_lck_grp
, "task", &task_lck_grp_attr
);
267 lck_attr_setdefault(&task_lck_attr
);
268 lck_mtx_init(&tasks_threads_lock
, &task_lck_grp
, &task_lck_attr
);
270 lck_mtx_init(&task_watch_mtx
, &task_lck_grp
, &task_lck_attr
);
271 #endif /* CONFIG_EMBEDDED */
275 task_max
* sizeof(struct task
),
276 TASK_CHUNK
* sizeof(struct task
),
279 zone_change(task_zone
, Z_NOENCRYPT
, TRUE
);
284 * Create the kernel task as the first task.
287 if (task_create_internal(TASK_NULL
, FALSE
, TRUE
, &kernel_task
) != KERN_SUCCESS
)
289 if (task_create_internal(TASK_NULL
, FALSE
, FALSE
, &kernel_task
) != KERN_SUCCESS
)
291 panic("task_init\n");
293 vm_map_deallocate(kernel_task
->map
);
294 kernel_task
->map
= kernel_map
;
299 * Create a task running in the kernel address space. It may
300 * have its own map of size mem_size and may have ipc privileges.
304 __unused task_t parent_task
,
305 __unused vm_offset_t map_base
,
306 __unused vm_size_t map_size
,
307 __unused task_t
*child_task
)
309 return (KERN_INVALID_ARGUMENT
);
315 __unused ledger_port_array_t ledger_ports
,
316 __unused mach_msg_type_number_t num_ledger_ports
,
317 __unused boolean_t inherit_memory
,
318 __unused task_t
*child_task
) /* OUT */
320 if (parent_task
== TASK_NULL
)
321 return(KERN_INVALID_ARGUMENT
);
324 * No longer supported: too many calls assume that a task has a valid
327 return(KERN_FAILURE
);
331 host_security_create_task_token(
332 host_security_t host_security
,
334 __unused security_token_t sec_token
,
335 __unused audit_token_t audit_token
,
336 __unused host_priv_t host_priv
,
337 __unused ledger_port_array_t ledger_ports
,
338 __unused mach_msg_type_number_t num_ledger_ports
,
339 __unused boolean_t inherit_memory
,
340 __unused task_t
*child_task
) /* OUT */
342 if (parent_task
== TASK_NULL
)
343 return(KERN_INVALID_ARGUMENT
);
345 if (host_security
== HOST_NULL
)
346 return(KERN_INVALID_SECURITY
);
349 * No longer supported.
351 return(KERN_FAILURE
);
355 init_task_ledgers(void)
359 assert(task_ledger_template
== NULL
);
360 assert(kernel_task
== TASK_NULL
);
362 if ((t
= ledger_template_create("Per-task ledger")) == NULL
)
363 panic("couldn't create task ledger template");
365 task_ledgers
.cpu_time
= ledger_entry_add(t
, "cpu_time", "sched", "ns");
366 task_ledgers
.tkm_private
= ledger_entry_add(t
, "tkm_private",
368 task_ledgers
.tkm_shared
= ledger_entry_add(t
, "tkm_shared", "physmem",
370 task_ledgers
.phys_mem
= ledger_entry_add(t
, "phys_mem", "physmem",
372 task_ledgers
.wired_mem
= ledger_entry_add(t
, "wired_mem", "physmem",
375 if ((task_ledgers
.cpu_time
< 0) || (task_ledgers
.tkm_private
< 0) ||
376 (task_ledgers
.tkm_shared
< 0) || (task_ledgers
.phys_mem
< 0) ||
377 (task_ledgers
.wired_mem
< 0)) {
378 panic("couldn't create entries for task ledger template");
381 task_ledger_template
= t
;
385 task_create_internal(
387 boolean_t inherit_memory
,
389 task_t
*child_task
) /* OUT */
392 vm_shared_region_t shared_region
;
393 ledger_t ledger
= NULL
;
395 new_task
= (task_t
) zalloc(task_zone
);
397 if (new_task
== TASK_NULL
)
398 return(KERN_RESOURCE_SHORTAGE
);
400 /* one ref for just being alive; one for our caller */
401 new_task
->ref_count
= 2;
403 /* allocate with active entries */
404 assert(task_ledger_template
!= NULL
);
405 if ((ledger
= ledger_instantiate(task_ledger_template
,
406 LEDGER_CREATE_ACTIVE_ENTRIES
)) == NULL
) {
407 zfree(task_zone
, new_task
);
408 return(KERN_RESOURCE_SHORTAGE
);
410 new_task
->ledger
= ledger
;
412 /* if inherit_memory is true, parent_task MUST not be NULL */
414 new_task
->map
= vm_map_fork(ledger
, parent_task
->map
);
416 new_task
->map
= vm_map_create(pmap_create(ledger
, 0, is_64bit
),
417 (vm_map_offset_t
)(VM_MIN_ADDRESS
),
418 (vm_map_offset_t
)(VM_MAX_ADDRESS
), TRUE
);
420 /* Inherit memlock limit from parent */
422 vm_map_set_user_wire_limit(new_task
->map
, (vm_size_t
)parent_task
->map
->user_wire_limit
);
424 lck_mtx_init(&new_task
->lock
, &task_lck_grp
, &task_lck_attr
);
425 queue_init(&new_task
->threads
);
426 new_task
->suspend_count
= 0;
427 new_task
->thread_count
= 0;
428 new_task
->active_thread_count
= 0;
429 new_task
->user_stop_count
= 0;
430 new_task
->role
= TASK_UNSPECIFIED
;
431 new_task
->active
= TRUE
;
432 new_task
->halting
= FALSE
;
433 new_task
->user_data
= NULL
;
434 new_task
->faults
= 0;
435 new_task
->cow_faults
= 0;
436 new_task
->pageins
= 0;
437 new_task
->messages_sent
= 0;
438 new_task
->messages_received
= 0;
439 new_task
->syscalls_mach
= 0;
440 new_task
->priv_flags
= 0;
441 new_task
->syscalls_unix
=0;
442 new_task
->c_switch
= new_task
->p_switch
= new_task
->ps_switch
= 0;
443 new_task
->taskFeatures
[0] = 0; /* Init task features */
444 new_task
->taskFeatures
[1] = 0; /* Init task features */
446 zinfo_task_init(new_task
);
449 new_task
->bsd_info
= NULL
;
450 #endif /* MACH_BSD */
452 #if defined(__i386__) || defined(__x86_64__)
453 new_task
->i386_ldt
= 0;
454 new_task
->task_debug
= NULL
;
458 queue_init(&new_task
->semaphore_list
);
459 queue_init(&new_task
->lock_set_list
);
460 new_task
->semaphores_owned
= 0;
461 new_task
->lock_sets_owned
= 0;
464 new_task
->label
= labelh_new(1);
465 mac_task_label_init (&new_task
->maclabel
);
468 ipc_task_init(new_task
, parent_task
);
470 new_task
->total_user_time
= 0;
471 new_task
->total_system_time
= 0;
473 new_task
->vtimers
= 0;
475 new_task
->shared_region
= NULL
;
477 new_task
->affinity_space
= NULL
;
480 new_task
->t_chud
= 0U;
483 new_task
->pidsuspended
= FALSE
;
484 new_task
->frozen
= FALSE
;
485 new_task
->rusage_cpu_flags
= 0;
486 new_task
->rusage_cpu_percentage
= 0;
487 new_task
->rusage_cpu_interval
= 0;
488 new_task
->rusage_cpu_deadline
= 0;
489 new_task
->rusage_cpu_callt
= NULL
;
490 new_task
->proc_terminate
= 0;
492 queue_init(&new_task
->task_watchers
);
493 new_task
->appstate
= TASK_APPSTATE_ACTIVE
;
494 new_task
->num_taskwatchers
= 0;
495 new_task
->watchapplying
= 0;
496 #endif /* CONFIG_EMBEDDED */
498 if (parent_task
!= TASK_NULL
) {
499 new_task
->sec_token
= parent_task
->sec_token
;
500 new_task
->audit_token
= parent_task
->audit_token
;
502 /* inherit the parent's shared region */
503 shared_region
= vm_shared_region_get(parent_task
);
504 vm_shared_region_set(new_task
, shared_region
);
506 if(task_has_64BitAddr(parent_task
))
507 task_set_64BitAddr(new_task
);
508 new_task
->all_image_info_addr
= parent_task
->all_image_info_addr
;
509 new_task
->all_image_info_size
= parent_task
->all_image_info_size
;
511 #if defined(__i386__) || defined(__x86_64__)
512 if (inherit_memory
&& parent_task
->i386_ldt
)
513 new_task
->i386_ldt
= user_ldt_copy(parent_task
->i386_ldt
);
515 if (inherit_memory
&& parent_task
->affinity_space
)
516 task_affinity_create(parent_task
, new_task
);
518 new_task
->pset_hint
= parent_task
->pset_hint
= task_choose_pset(parent_task
);
519 new_task
->policystate
= parent_task
->policystate
;
520 /* inherit the self action state */
521 new_task
->appliedstate
= parent_task
->appliedstate
;
522 new_task
->ext_policystate
= parent_task
->ext_policystate
;
524 /* till the child lifecycle is cleared do not inherit external action */
525 new_task
->ext_appliedstate
= parent_task
->ext_appliedstate
;
527 new_task
->ext_appliedstate
= default_task_null_policy
;
531 new_task
->sec_token
= KERNEL_SECURITY_TOKEN
;
532 new_task
->audit_token
= KERNEL_AUDIT_TOKEN
;
535 task_set_64BitAddr(new_task
);
537 new_task
->all_image_info_addr
= (mach_vm_address_t
)0;
538 new_task
->all_image_info_size
= (mach_vm_size_t
)0;
540 new_task
->pset_hint
= PROCESSOR_SET_NULL
;
541 new_task
->policystate
= default_task_proc_policy
;
542 new_task
->ext_policystate
= default_task_proc_policy
;
543 new_task
->appliedstate
= default_task_null_policy
;
544 new_task
->ext_appliedstate
= default_task_null_policy
;
547 if (kernel_task
== TASK_NULL
) {
548 new_task
->priority
= BASEPRI_KERNEL
;
549 new_task
->max_priority
= MAXPRI_KERNEL
;
552 new_task
->priority
= BASEPRI_DEFAULT
;
553 new_task
->max_priority
= MAXPRI_USER
;
556 bzero(&new_task
->extmod_statistics
, sizeof(new_task
->extmod_statistics
));
558 lck_mtx_lock(&tasks_threads_lock
);
559 queue_enter(&tasks
, new_task
, task_t
, tasks
);
561 lck_mtx_unlock(&tasks_threads_lock
);
563 if (vm_backing_store_low
&& parent_task
!= NULL
)
564 new_task
->priv_flags
|= (parent_task
->priv_flags
&VM_BACKING_STORE_PRIV
);
566 ipc_task_enable(new_task
);
568 *child_task
= new_task
;
569 return(KERN_SUCCESS
);
575 * Drop a reference on a task.
581 ledger_amount_t credit
, debit
;
583 if (task
== TASK_NULL
)
586 if (task_deallocate_internal(task
) > 0)
589 lck_mtx_lock(&tasks_threads_lock
);
590 queue_remove(&terminated_tasks
, task
, task_t
, tasks
);
591 lck_mtx_unlock(&tasks_threads_lock
);
594 * Give the machine dependent code a chance
595 * to perform cleanup before ripping apart
598 machine_task_terminate(task
);
600 ipc_task_terminate(task
);
602 if (task
->affinity_space
)
603 task_affinity_deallocate(task
);
605 vm_map_deallocate(task
->map
);
606 is_release(task
->itk_space
);
608 lck_mtx_destroy(&task
->lock
, &task_lck_grp
);
611 labelh_release(task
->label
);
614 if (!ledger_get_entries(task
->ledger
, task_ledgers
.tkm_private
, &credit
,
616 OSAddAtomic64(credit
, (int64_t *)&tasks_tkm_private
.alloc
);
617 OSAddAtomic64(debit
, (int64_t *)&tasks_tkm_private
.free
);
619 if (!ledger_get_entries(task
->ledger
, task_ledgers
.tkm_shared
, &credit
,
621 OSAddAtomic64(credit
, (int64_t *)&tasks_tkm_shared
.alloc
);
622 OSAddAtomic64(debit
, (int64_t *)&tasks_tkm_shared
.free
);
624 ledger_dereference(task
->ledger
);
625 zinfo_task_free(task
);
626 zfree(task_zone
, task
);
630 * task_name_deallocate:
632 * Drop a reference on a task name.
635 task_name_deallocate(
636 task_name_t task_name
)
638 return(task_deallocate((task_t
)task_name
));
645 * Terminate the specified task. See comments on thread_terminate
646 * (kern/thread.c) about problems with terminating the "current task."
653 if (task
== TASK_NULL
)
654 return (KERN_INVALID_ARGUMENT
);
657 return (KERN_FAILURE
);
659 return (task_terminate_internal(task
));
663 task_terminate_internal(
666 thread_t thread
, self
;
668 boolean_t interrupt_save
;
670 assert(task
!= kernel_task
);
672 self
= current_thread();
673 self_task
= self
->task
;
676 * Get the task locked and make sure that we are not racing
677 * with someone else trying to terminate us.
679 if (task
== self_task
)
682 if (task
< self_task
) {
684 task_lock(self_task
);
687 task_lock(self_task
);
693 * Task is already being terminated.
694 * Just return an error. If we are dying, this will
695 * just get us to our AST special handler and that
696 * will get us to finalize the termination of ourselves.
699 if (self_task
!= task
)
700 task_unlock(self_task
);
702 return (KERN_FAILURE
);
705 if (self_task
!= task
)
706 task_unlock(self_task
);
709 * Make sure the current thread does not get aborted out of
710 * the waits inside these operations.
712 interrupt_save
= thread_interrupt_level(THREAD_UNINT
);
715 * Indicate that we want all the threads to stop executing
716 * at user space by holding the task (we would have held
717 * each thread independently in thread_terminate_internal -
718 * but this way we may be more likely to already find it
719 * held there). Mark the task inactive, and prevent
720 * further task operations via the task port.
722 task_hold_locked(task
);
723 task
->active
= FALSE
;
724 ipc_task_disable(task
);
727 * Terminate each thread in the task.
729 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
730 thread_terminate_internal(thread
);
737 * remove all task watchers
739 task_removewatchers(task
);
740 #endif /* CONFIG_EMBEDDED */
743 * Destroy all synchronizers owned by the task.
745 task_synchronizer_destroy_all(task
);
748 * Destroy the IPC space, leaving just a reference for it.
750 ipc_space_terminate(task
->itk_space
);
752 if (vm_map_has_4GB_pagezero(task
->map
))
753 vm_map_clear_4GB_pagezero(task
->map
);
756 * If the current thread is a member of the task
757 * being terminated, then the last reference to
758 * the task will not be dropped until the thread
759 * is finally reaped. To avoid incurring the
760 * expense of removing the address space regions
761 * at reap time, we do it explictly here.
763 vm_map_remove(task
->map
,
764 task
->map
->min_offset
,
765 task
->map
->max_offset
,
768 /* release our shared region */
769 vm_shared_region_set(task
, NULL
);
771 lck_mtx_lock(&tasks_threads_lock
);
772 queue_remove(&tasks
, task
, task_t
, tasks
);
773 queue_enter(&terminated_tasks
, task
, task_t
, tasks
);
775 lck_mtx_unlock(&tasks_threads_lock
);
778 * We no longer need to guard against being aborted, so restore
779 * the previous interruptible state.
781 thread_interrupt_level(interrupt_save
);
784 * Get rid of the task active reference on itself.
786 task_deallocate(task
);
788 return (KERN_SUCCESS
);
794 * Shut the current task down (except for the current thread) in
795 * preparation for dramatic changes to the task (probably exec).
796 * We hold the task and mark all other threads in the task for
803 thread_t thread
, self
;
805 assert(task
!= kernel_task
);
807 self
= current_thread();
809 if (task
!= self
->task
)
810 return (KERN_INVALID_ARGUMENT
);
814 if (task
->halting
|| !task
->active
|| !self
->active
) {
816 * Task or current thread is already being terminated.
817 * Hurry up and return out of the current kernel context
818 * so that we run our AST special handler to terminate
823 return (KERN_FAILURE
);
826 task
->halting
= TRUE
;
828 if (task
->thread_count
> 1) {
831 * Mark all the threads to keep them from starting any more
832 * user-level execution. The thread_terminate_internal code
833 * would do this on a thread by thread basis anyway, but this
834 * gives us a better chance of not having to wait there.
836 task_hold_locked(task
);
839 * Terminate all the other threads in the task.
841 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
843 thread_terminate_internal(thread
);
846 task_release_locked(task
);
854 * task_complete_halt:
856 * Complete task halt by waiting for threads to terminate, then clean
857 * up task resources (VM, port namespace, etc...) and then let the
858 * current thread go in the (practically empty) task context.
861 task_complete_halt(task_t task
)
864 assert(task
->halting
);
865 assert(task
== current_task());
868 * Wait for the other threads to get shut down.
869 * When the last other thread is reaped, we'll be
872 if (task
->thread_count
> 1) {
873 assert_wait((event_t
)&task
->halting
, THREAD_UNINT
);
875 thread_block(THREAD_CONTINUE_NULL
);
881 * Give the machine dependent code a chance
882 * to perform cleanup of task-level resources
883 * associated with the current thread before
884 * ripping apart the task.
886 machine_task_terminate(task
);
889 * Destroy all synchronizers owned by the task.
891 task_synchronizer_destroy_all(task
);
894 * Destroy the contents of the IPC space, leaving just
895 * a reference for it.
897 ipc_space_clean(task
->itk_space
);
900 * Clean out the address space, as we are going to be
903 vm_map_remove(task
->map
, task
->map
->min_offset
,
904 task
->map
->max_offset
, VM_MAP_NO_FLAGS
);
906 task
->halting
= FALSE
;
912 * Suspend execution of the specified task.
913 * This is a recursive-style suspension of the task, a count of
914 * suspends is maintained.
916 * CONDITIONS: the task is locked and active.
920 register task_t task
)
922 register thread_t thread
;
924 assert(task
->active
);
926 if (task
->suspend_count
++ > 0)
930 * Iterate through all the threads and hold them.
932 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
933 thread_mtx_lock(thread
);
935 thread_mtx_unlock(thread
);
942 * Same as the internal routine above, except that is must lock
943 * and verify that the task is active. This differs from task_suspend
944 * in that it places a kernel hold on the task rather than just a
945 * user-level hold. This keeps users from over resuming and setting
946 * it running out from under the kernel.
948 * CONDITIONS: the caller holds a reference on the task
952 register task_t task
)
954 if (task
== TASK_NULL
)
955 return (KERN_INVALID_ARGUMENT
);
962 return (KERN_FAILURE
);
965 task_hold_locked(task
);
968 return (KERN_SUCCESS
);
974 boolean_t until_not_runnable
)
976 if (task
== TASK_NULL
)
977 return (KERN_INVALID_ARGUMENT
);
984 return (KERN_FAILURE
);
987 task_wait_locked(task
, until_not_runnable
);
990 return (KERN_SUCCESS
);
996 * Wait for all threads in task to stop.
999 * Called with task locked, active, and held.
1003 register task_t task
,
1004 boolean_t until_not_runnable
)
1006 register thread_t thread
, self
;
1008 assert(task
->active
);
1009 assert(task
->suspend_count
> 0);
1011 self
= current_thread();
1014 * Iterate through all the threads and wait for them to
1015 * stop. Do not wait for the current thread if it is within
1018 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1020 thread_wait(thread
, until_not_runnable
);
1025 * task_release_locked:
1027 * Release a kernel hold on a task.
1029 * CONDITIONS: the task is locked and active
1032 task_release_locked(
1033 register task_t task
)
1035 register thread_t thread
;
1037 assert(task
->active
);
1038 assert(task
->suspend_count
> 0);
1040 if (--task
->suspend_count
> 0)
1043 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1044 thread_mtx_lock(thread
);
1045 thread_release(thread
);
1046 thread_mtx_unlock(thread
);
1053 * Same as the internal routine above, except that it must lock
1054 * and verify that the task is active.
1056 * CONDITIONS: The caller holds a reference to the task
1062 if (task
== TASK_NULL
)
1063 return (KERN_INVALID_ARGUMENT
);
1067 if (!task
->active
) {
1070 return (KERN_FAILURE
);
1073 task_release_locked(task
);
1076 return (KERN_SUCCESS
);
1082 thread_act_array_t
*threads_out
,
1083 mach_msg_type_number_t
*count
)
1085 mach_msg_type_number_t actual
;
1086 thread_t
*thread_list
;
1088 vm_size_t size
, size_needed
;
1092 if (task
== TASK_NULL
)
1093 return (KERN_INVALID_ARGUMENT
);
1095 size
= 0; addr
= NULL
;
1099 if (!task
->active
) {
1105 return (KERN_FAILURE
);
1108 actual
= task
->thread_count
;
1110 /* do we have the memory we need? */
1111 size_needed
= actual
* sizeof (mach_port_t
);
1112 if (size_needed
<= size
)
1115 /* unlock the task and allocate more memory */
1121 assert(size_needed
> 0);
1124 addr
= kalloc(size
);
1126 return (KERN_RESOURCE_SHORTAGE
);
1129 /* OK, have memory and the task is locked & active */
1130 thread_list
= (thread_t
*)addr
;
1134 for (thread
= (thread_t
)queue_first(&task
->threads
); i
< actual
;
1135 ++i
, thread
= (thread_t
)queue_next(&thread
->task_threads
)) {
1136 thread_reference_internal(thread
);
1137 thread_list
[j
++] = thread
;
1140 assert(queue_end(&task
->threads
, (queue_entry_t
)thread
));
1143 size_needed
= actual
* sizeof (mach_port_t
);
1145 /* can unlock task now that we've got the thread refs */
1149 /* no threads, so return null pointer and deallocate memory */
1151 *threads_out
= NULL
;
1158 /* if we allocated too much, must copy */
1160 if (size_needed
< size
) {
1163 newaddr
= kalloc(size_needed
);
1165 for (i
= 0; i
< actual
; ++i
)
1166 thread_deallocate(thread_list
[i
]);
1168 return (KERN_RESOURCE_SHORTAGE
);
1171 bcopy(addr
, newaddr
, size_needed
);
1173 thread_list
= (thread_t
*)newaddr
;
1176 *threads_out
= thread_list
;
1179 /* do the conversion that Mig should handle */
1181 for (i
= 0; i
< actual
; ++i
)
1182 ((ipc_port_t
*) thread_list
)[i
] = convert_thread_to_port(thread_list
[i
]);
1185 return (KERN_SUCCESS
);
1188 static kern_return_t
1190 register task_t task
)
1192 if (!task
->active
) {
1193 return (KERN_FAILURE
);
1196 if (task
->user_stop_count
++ > 0) {
1198 * If the stop count was positive, the task is
1199 * already stopped and we can exit.
1201 return (KERN_SUCCESS
);
1205 * Put a kernel-level hold on the threads in the task (all
1206 * user-level task suspensions added together represent a
1207 * single kernel-level hold). We then wait for the threads
1208 * to stop executing user code.
1210 task_hold_locked(task
);
1211 task_wait_locked(task
, TRUE
);
1213 return (KERN_SUCCESS
);
1216 static kern_return_t
1218 register task_t task
,
1219 boolean_t pidresume
)
1221 register boolean_t release
= FALSE
;
1223 if (!task
->active
) {
1224 return (KERN_FAILURE
);
1228 if (task
->pidsuspended
== FALSE
) {
1229 return (KERN_FAILURE
);
1231 task
->pidsuspended
= FALSE
;
1234 if (task
->user_stop_count
> (task
->pidsuspended
? 1 : 0)) {
1235 if (--task
->user_stop_count
== 0) {
1240 return (KERN_FAILURE
);
1244 * Release the task if necessary.
1247 task_release_locked(task
);
1249 return (KERN_SUCCESS
);
1255 * Implement a user-level suspension on a task.
1258 * The caller holds a reference to the task
1262 register task_t task
)
1266 if (task
== TASK_NULL
|| task
== kernel_task
)
1267 return (KERN_INVALID_ARGUMENT
);
1271 kr
= place_task_hold(task
);
1280 * Release a kernel hold on a task.
1283 * The caller holds a reference to the task
1287 register task_t task
)
1291 if (task
== TASK_NULL
|| task
== kernel_task
)
1292 return (KERN_INVALID_ARGUMENT
);
1296 kr
= release_task_hold(task
, FALSE
);
1304 task_pidsuspend_locked(task_t task
)
1308 if (task
->pidsuspended
) {
1313 task
->pidsuspended
= TRUE
;
1315 kr
= place_task_hold(task
);
1316 if (kr
!= KERN_SUCCESS
) {
1317 task
->pidsuspended
= FALSE
;
1327 * Suspends a task by placing a hold on its threads.
1330 * The caller holds a reference to the task
1334 register task_t task
)
1338 if (task
== TASK_NULL
|| task
== kernel_task
)
1339 return (KERN_INVALID_ARGUMENT
);
1343 kr
= task_pidsuspend_locked(task
);
1350 /* If enabled, we bring all the frozen pages back in prior to resumption; otherwise, they're faulted back in on demand */
1351 #define THAW_ON_RESUME 1
1355 * Resumes a previously suspended task.
1358 * The caller holds a reference to the task
1362 register task_t task
)
1365 #if (CONFIG_FREEZE && THAW_ON_RESUME)
1369 if (task
== TASK_NULL
|| task
== kernel_task
)
1370 return (KERN_INVALID_ARGUMENT
);
1374 #if (CONFIG_FREEZE && THAW_ON_RESUME)
1375 frozen
= task
->frozen
;
1376 task
->frozen
= FALSE
;
1379 kr
= release_task_hold(task
, TRUE
);
1383 #if (CONFIG_FREEZE && THAW_ON_RESUME)
1384 if ((kr
== KERN_SUCCESS
) && (frozen
== TRUE
)) {
1385 kr
= vm_map_thaw(task
->map
);
1400 * The caller holds a reference to the task
1404 register task_t task
,
1405 uint32_t *purgeable_count
,
1406 uint32_t *wired_count
,
1407 uint32_t *clean_count
,
1408 uint32_t *dirty_count
,
1409 uint32_t dirty_budget
,
1411 boolean_t walk_only
)
1415 if (task
== TASK_NULL
|| task
== kernel_task
)
1416 return (KERN_INVALID_ARGUMENT
);
1422 return (KERN_FAILURE
);
1425 if (walk_only
== FALSE
) {
1426 task
->frozen
= TRUE
;
1432 kr
= vm_map_freeze_walk(task
->map
, purgeable_count
, wired_count
, clean_count
, dirty_count
, dirty_budget
, shared
);
1434 kr
= vm_map_freeze(task
->map
, purgeable_count
, wired_count
, clean_count
, dirty_count
, dirty_budget
, shared
);
1443 * Thaw a currently frozen task.
1446 * The caller holds a reference to the task
1450 register task_t task
)
1454 if (task
== TASK_NULL
|| task
== kernel_task
)
1455 return (KERN_INVALID_ARGUMENT
);
1459 if (!task
->frozen
) {
1461 return (KERN_FAILURE
);
1464 task
->frozen
= FALSE
;
1468 kr
= vm_map_thaw(task
->map
);
1473 #endif /* CONFIG_FREEZE */
1476 host_security_set_task_token(
1477 host_security_t host_security
,
1479 security_token_t sec_token
,
1480 audit_token_t audit_token
,
1481 host_priv_t host_priv
)
1483 ipc_port_t host_port
;
1486 if (task
== TASK_NULL
)
1487 return(KERN_INVALID_ARGUMENT
);
1489 if (host_security
== HOST_NULL
)
1490 return(KERN_INVALID_SECURITY
);
1493 task
->sec_token
= sec_token
;
1494 task
->audit_token
= audit_token
;
1497 if (host_priv
!= HOST_PRIV_NULL
) {
1498 kr
= host_get_host_priv_port(host_priv
, &host_port
);
1500 kr
= host_get_host_port(host_priv_self(), &host_port
);
1502 assert(kr
== KERN_SUCCESS
);
1503 kr
= task_set_special_port(task
, TASK_HOST_PORT
, host_port
);
1508 * This routine was added, pretty much exclusively, for registering the
1509 * RPC glue vector for in-kernel short circuited tasks. Rather than
1510 * removing it completely, I have only disabled that feature (which was
1511 * the only feature at the time). It just appears that we are going to
1512 * want to add some user data to tasks in the future (i.e. bsd info,
1513 * task names, etc...), so I left it in the formal task interface.
1518 task_flavor_t flavor
,
1519 __unused task_info_t task_info_in
, /* pointer to IN array */
1520 __unused mach_msg_type_number_t task_info_count
)
1522 if (task
== TASK_NULL
)
1523 return(KERN_INVALID_ARGUMENT
);
1527 return (KERN_INVALID_ARGUMENT
);
1529 return (KERN_SUCCESS
);
1535 task_flavor_t flavor
,
1536 task_info_t task_info_out
,
1537 mach_msg_type_number_t
*task_info_count
)
1539 kern_return_t error
= KERN_SUCCESS
;
1541 if (task
== TASK_NULL
)
1542 return (KERN_INVALID_ARGUMENT
);
1546 if ((task
!= current_task()) && (!task
->active
)) {
1548 return (KERN_INVALID_ARGUMENT
);
1553 case TASK_BASIC_INFO_32
:
1554 case TASK_BASIC2_INFO_32
:
1556 task_basic_info_32_t basic_info
;
1561 if (*task_info_count
< TASK_BASIC_INFO_32_COUNT
) {
1562 error
= KERN_INVALID_ARGUMENT
;
1566 basic_info
= (task_basic_info_32_t
)task_info_out
;
1568 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
1569 basic_info
->virtual_size
= (typeof(basic_info
->virtual_size
))map
->size
;
1570 if (flavor
== TASK_BASIC2_INFO_32
) {
1572 * The "BASIC2" flavor gets the maximum resident
1573 * size instead of the current resident size...
1575 basic_info
->resident_size
= pmap_resident_max(map
->pmap
);
1577 basic_info
->resident_size
= pmap_resident_count(map
->pmap
);
1579 basic_info
->resident_size
*= PAGE_SIZE
;
1581 basic_info
->policy
= ((task
!= kernel_task
)?
1582 POLICY_TIMESHARE
: POLICY_RR
);
1583 basic_info
->suspend_count
= task
->user_stop_count
;
1585 absolutetime_to_microtime(task
->total_user_time
, &secs
, &usecs
);
1586 basic_info
->user_time
.seconds
=
1587 (typeof(basic_info
->user_time
.seconds
))secs
;
1588 basic_info
->user_time
.microseconds
= usecs
;
1590 absolutetime_to_microtime(task
->total_system_time
, &secs
, &usecs
);
1591 basic_info
->system_time
.seconds
=
1592 (typeof(basic_info
->system_time
.seconds
))secs
;
1593 basic_info
->system_time
.microseconds
= usecs
;
1595 *task_info_count
= TASK_BASIC_INFO_32_COUNT
;
1599 case TASK_BASIC_INFO_64
:
1601 task_basic_info_64_t basic_info
;
1606 if (*task_info_count
< TASK_BASIC_INFO_64_COUNT
) {
1607 error
= KERN_INVALID_ARGUMENT
;
1611 basic_info
= (task_basic_info_64_t
)task_info_out
;
1613 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
1614 basic_info
->virtual_size
= map
->size
;
1615 basic_info
->resident_size
=
1616 (mach_vm_size_t
)(pmap_resident_count(map
->pmap
))
1619 basic_info
->policy
= ((task
!= kernel_task
)?
1620 POLICY_TIMESHARE
: POLICY_RR
);
1621 basic_info
->suspend_count
= task
->user_stop_count
;
1623 absolutetime_to_microtime(task
->total_user_time
, &secs
, &usecs
);
1624 basic_info
->user_time
.seconds
=
1625 (typeof(basic_info
->user_time
.seconds
))secs
;
1626 basic_info
->user_time
.microseconds
= usecs
;
1628 absolutetime_to_microtime(task
->total_system_time
, &secs
, &usecs
);
1629 basic_info
->system_time
.seconds
=
1630 (typeof(basic_info
->system_time
.seconds
))secs
;
1631 basic_info
->system_time
.microseconds
= usecs
;
1633 *task_info_count
= TASK_BASIC_INFO_64_COUNT
;
1637 case MACH_TASK_BASIC_INFO
:
1639 mach_task_basic_info_t basic_info
;
1644 if (*task_info_count
< MACH_TASK_BASIC_INFO_COUNT
) {
1645 error
= KERN_INVALID_ARGUMENT
;
1649 basic_info
= (mach_task_basic_info_t
)task_info_out
;
1651 map
= (task
== kernel_task
) ? kernel_map
: task
->map
;
1653 basic_info
->virtual_size
= map
->size
;
1655 basic_info
->resident_size
=
1656 (mach_vm_size_t
)(pmap_resident_count(map
->pmap
));
1657 basic_info
->resident_size
*= PAGE_SIZE_64
;
1659 basic_info
->resident_size_max
=
1660 (mach_vm_size_t
)(pmap_resident_max(map
->pmap
));
1661 basic_info
->resident_size_max
*= PAGE_SIZE_64
;
1663 basic_info
->policy
= ((task
!= kernel_task
) ?
1664 POLICY_TIMESHARE
: POLICY_RR
);
1666 basic_info
->suspend_count
= task
->user_stop_count
;
1668 absolutetime_to_microtime(task
->total_user_time
, &secs
, &usecs
);
1669 basic_info
->user_time
.seconds
=
1670 (typeof(basic_info
->user_time
.seconds
))secs
;
1671 basic_info
->user_time
.microseconds
= usecs
;
1673 absolutetime_to_microtime(task
->total_system_time
, &secs
, &usecs
);
1674 basic_info
->system_time
.seconds
=
1675 (typeof(basic_info
->system_time
.seconds
))secs
;
1676 basic_info
->system_time
.microseconds
= usecs
;
1678 *task_info_count
= MACH_TASK_BASIC_INFO_COUNT
;
1682 case TASK_THREAD_TIMES_INFO
:
1684 register task_thread_times_info_t times_info
;
1685 register thread_t thread
;
1687 if (*task_info_count
< TASK_THREAD_TIMES_INFO_COUNT
) {
1688 error
= KERN_INVALID_ARGUMENT
;
1692 times_info
= (task_thread_times_info_t
) task_info_out
;
1693 times_info
->user_time
.seconds
= 0;
1694 times_info
->user_time
.microseconds
= 0;
1695 times_info
->system_time
.seconds
= 0;
1696 times_info
->system_time
.microseconds
= 0;
1699 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1700 time_value_t user_time
, system_time
;
1702 thread_read_times(thread
, &user_time
, &system_time
);
1704 time_value_add(×_info
->user_time
, &user_time
);
1705 time_value_add(×_info
->system_time
, &system_time
);
1709 *task_info_count
= TASK_THREAD_TIMES_INFO_COUNT
;
1713 case TASK_ABSOLUTETIME_INFO
:
1715 task_absolutetime_info_t info
;
1716 register thread_t thread
;
1718 if (*task_info_count
< TASK_ABSOLUTETIME_INFO_COUNT
) {
1719 error
= KERN_INVALID_ARGUMENT
;
1723 info
= (task_absolutetime_info_t
)task_info_out
;
1724 info
->threads_user
= info
->threads_system
= 0;
1727 info
->total_user
= task
->total_user_time
;
1728 info
->total_system
= task
->total_system_time
;
1730 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
1735 thread_lock(thread
);
1737 tval
= timer_grab(&thread
->user_timer
);
1738 info
->threads_user
+= tval
;
1739 info
->total_user
+= tval
;
1741 tval
= timer_grab(&thread
->system_timer
);
1742 if (thread
->precise_user_kernel_time
) {
1743 info
->threads_system
+= tval
;
1744 info
->total_system
+= tval
;
1746 /* system_timer may represent either sys or user */
1747 info
->threads_user
+= tval
;
1748 info
->total_user
+= tval
;
1751 thread_unlock(thread
);
1756 *task_info_count
= TASK_ABSOLUTETIME_INFO_COUNT
;
1760 case TASK_DYLD_INFO
:
1762 task_dyld_info_t info
;
1765 * We added the format field to TASK_DYLD_INFO output. For
1766 * temporary backward compatibility, accept the fact that
1767 * clients may ask for the old version - distinquished by the
1768 * size of the expected result structure.
1770 #define TASK_LEGACY_DYLD_INFO_COUNT \
1771 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
1773 if (*task_info_count
< TASK_LEGACY_DYLD_INFO_COUNT
) {
1774 error
= KERN_INVALID_ARGUMENT
;
1778 info
= (task_dyld_info_t
)task_info_out
;
1779 info
->all_image_info_addr
= task
->all_image_info_addr
;
1780 info
->all_image_info_size
= task
->all_image_info_size
;
1782 /* only set format on output for those expecting it */
1783 if (*task_info_count
>= TASK_DYLD_INFO_COUNT
) {
1784 info
->all_image_info_format
= task_has_64BitAddr(task
) ?
1785 TASK_DYLD_ALL_IMAGE_INFO_64
:
1786 TASK_DYLD_ALL_IMAGE_INFO_32
;
1787 *task_info_count
= TASK_DYLD_INFO_COUNT
;
1789 *task_info_count
= TASK_LEGACY_DYLD_INFO_COUNT
;
1794 case TASK_EXTMOD_INFO
:
1796 task_extmod_info_t info
;
1799 if (*task_info_count
< TASK_EXTMOD_INFO_COUNT
) {
1800 error
= KERN_INVALID_ARGUMENT
;
1804 info
= (task_extmod_info_t
)task_info_out
;
1806 p
= get_bsdtask_info(task
);
1808 proc_getexecutableuuid(p
, info
->task_uuid
, sizeof(info
->task_uuid
));
1810 bzero(info
->task_uuid
, sizeof(info
->task_uuid
));
1812 info
->extmod_statistics
= task
->extmod_statistics
;
1813 *task_info_count
= TASK_EXTMOD_INFO_COUNT
;
1818 case TASK_KERNELMEMORY_INFO
:
1820 task_kernelmemory_info_t tkm_info
;
1821 ledger_amount_t credit
, debit
;
1823 if (*task_info_count
< TASK_KERNELMEMORY_INFO_COUNT
) {
1824 error
= KERN_INVALID_ARGUMENT
;
1828 tkm_info
= (task_kernelmemory_info_t
) task_info_out
;
1829 tkm_info
->total_palloc
= 0;
1830 tkm_info
->total_pfree
= 0;
1831 tkm_info
->total_salloc
= 0;
1832 tkm_info
->total_sfree
= 0;
1834 if (task
== kernel_task
) {
1836 * All shared allocs/frees from other tasks count against
1837 * the kernel private memory usage. If we are looking up
1838 * info for the kernel task, gather from everywhere.
1842 /* start by accounting for all the terminated tasks against the kernel */
1843 tkm_info
->total_palloc
= tasks_tkm_private
.alloc
+ tasks_tkm_shared
.alloc
;
1844 tkm_info
->total_pfree
= tasks_tkm_private
.free
+ tasks_tkm_shared
.free
;
1846 /* count all other task/thread shared alloc/free against the kernel */
1847 lck_mtx_lock(&tasks_threads_lock
);
1849 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
1850 queue_iterate(&tasks
, task
, task_t
, tasks
) {
1851 if (task
== kernel_task
) {
1852 if (ledger_get_entries(task
->ledger
,
1853 task_ledgers
.tkm_private
, &credit
,
1854 &debit
) == KERN_SUCCESS
) {
1855 tkm_info
->total_palloc
+= credit
;
1856 tkm_info
->total_pfree
+= debit
;
1859 if (!ledger_get_entries(task
->ledger
,
1860 task_ledgers
.tkm_shared
, &credit
, &debit
)) {
1861 tkm_info
->total_palloc
+= credit
;
1862 tkm_info
->total_pfree
+= debit
;
1865 lck_mtx_unlock(&tasks_threads_lock
);
1867 if (!ledger_get_entries(task
->ledger
,
1868 task_ledgers
.tkm_private
, &credit
, &debit
)) {
1869 tkm_info
->total_palloc
= credit
;
1870 tkm_info
->total_pfree
= debit
;
1872 if (!ledger_get_entries(task
->ledger
,
1873 task_ledgers
.tkm_shared
, &credit
, &debit
)) {
1874 tkm_info
->total_salloc
= credit
;
1875 tkm_info
->total_sfree
= debit
;
1880 *task_info_count
= TASK_KERNELMEMORY_INFO_COUNT
;
1881 return KERN_SUCCESS
;
1885 case TASK_SCHED_FIFO_INFO
:
1888 if (*task_info_count
< POLICY_FIFO_BASE_COUNT
) {
1889 error
= KERN_INVALID_ARGUMENT
;
1893 error
= KERN_INVALID_POLICY
;
1898 case TASK_SCHED_RR_INFO
:
1900 register policy_rr_base_t rr_base
;
1901 uint32_t quantum_time
;
1902 uint64_t quantum_ns
;
1904 if (*task_info_count
< POLICY_RR_BASE_COUNT
) {
1905 error
= KERN_INVALID_ARGUMENT
;
1909 rr_base
= (policy_rr_base_t
) task_info_out
;
1911 if (task
!= kernel_task
) {
1912 error
= KERN_INVALID_POLICY
;
1916 rr_base
->base_priority
= task
->priority
;
1918 quantum_time
= SCHED(initial_quantum_size
)(THREAD_NULL
);
1919 absolutetime_to_nanoseconds(quantum_time
, &quantum_ns
);
1921 rr_base
->quantum
= (uint32_t)(quantum_ns
/ 1000 / 1000);
1923 *task_info_count
= POLICY_RR_BASE_COUNT
;
1928 case TASK_SCHED_TIMESHARE_INFO
:
1930 register policy_timeshare_base_t ts_base
;
1932 if (*task_info_count
< POLICY_TIMESHARE_BASE_COUNT
) {
1933 error
= KERN_INVALID_ARGUMENT
;
1937 ts_base
= (policy_timeshare_base_t
) task_info_out
;
1939 if (task
== kernel_task
) {
1940 error
= KERN_INVALID_POLICY
;
1944 ts_base
->base_priority
= task
->priority
;
1946 *task_info_count
= POLICY_TIMESHARE_BASE_COUNT
;
1950 case TASK_SECURITY_TOKEN
:
1952 register security_token_t
*sec_token_p
;
1954 if (*task_info_count
< TASK_SECURITY_TOKEN_COUNT
) {
1955 error
= KERN_INVALID_ARGUMENT
;
1959 sec_token_p
= (security_token_t
*) task_info_out
;
1961 *sec_token_p
= task
->sec_token
;
1963 *task_info_count
= TASK_SECURITY_TOKEN_COUNT
;
1967 case TASK_AUDIT_TOKEN
:
1969 register audit_token_t
*audit_token_p
;
1971 if (*task_info_count
< TASK_AUDIT_TOKEN_COUNT
) {
1972 error
= KERN_INVALID_ARGUMENT
;
1976 audit_token_p
= (audit_token_t
*) task_info_out
;
1978 *audit_token_p
= task
->audit_token
;
1980 *task_info_count
= TASK_AUDIT_TOKEN_COUNT
;
1984 case TASK_SCHED_INFO
:
1985 error
= KERN_INVALID_ARGUMENT
;
1988 case TASK_EVENTS_INFO
:
1990 register task_events_info_t events_info
;
1991 register thread_t thread
;
1993 if (*task_info_count
< TASK_EVENTS_INFO_COUNT
) {
1994 error
= KERN_INVALID_ARGUMENT
;
1998 events_info
= (task_events_info_t
) task_info_out
;
2001 events_info
->faults
= task
->faults
;
2002 events_info
->pageins
= task
->pageins
;
2003 events_info
->cow_faults
= task
->cow_faults
;
2004 events_info
->messages_sent
= task
->messages_sent
;
2005 events_info
->messages_received
= task
->messages_received
;
2006 events_info
->syscalls_mach
= task
->syscalls_mach
;
2007 events_info
->syscalls_unix
= task
->syscalls_unix
;
2009 events_info
->csw
= task
->c_switch
;
2011 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
2012 events_info
->csw
+= thread
->c_switch
;
2013 events_info
->syscalls_mach
+= thread
->syscalls_mach
;
2014 events_info
->syscalls_unix
+= thread
->syscalls_unix
;
2018 *task_info_count
= TASK_EVENTS_INFO_COUNT
;
2021 case TASK_AFFINITY_TAG_INFO
:
2023 if (*task_info_count
< TASK_AFFINITY_TAG_INFO_COUNT
) {
2024 error
= KERN_INVALID_ARGUMENT
;
2028 error
= task_affinity_info(task
, task_info_out
, task_info_count
);
2032 error
= KERN_INVALID_ARGUMENT
;
2047 /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
2051 task
->vtimers
|= which
;
2055 case TASK_VTIMER_USER
:
2056 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
2058 thread_lock(thread
);
2059 if (thread
->precise_user_kernel_time
)
2060 thread
->vtimer_user_save
= timer_grab(&thread
->user_timer
);
2062 thread
->vtimer_user_save
= timer_grab(&thread
->system_timer
);
2063 thread_unlock(thread
);
2068 case TASK_VTIMER_PROF
:
2069 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
2071 thread_lock(thread
);
2072 thread
->vtimer_prof_save
= timer_grab(&thread
->user_timer
);
2073 thread
->vtimer_prof_save
+= timer_grab(&thread
->system_timer
);
2074 thread_unlock(thread
);
2079 case TASK_VTIMER_RLIM
:
2080 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
2082 thread_lock(thread
);
2083 thread
->vtimer_rlim_save
= timer_grab(&thread
->user_timer
);
2084 thread
->vtimer_rlim_save
+= timer_grab(&thread
->system_timer
);
2085 thread_unlock(thread
);
2099 assert(task
== current_task());
2103 task
->vtimers
&= ~which
;
2113 uint32_t *microsecs
)
2115 thread_t thread
= current_thread();
2120 assert(task
== current_task());
2122 assert(task
->vtimers
& which
);
2128 case TASK_VTIMER_USER
:
2129 if (thread
->precise_user_kernel_time
) {
2130 tdelt
= (uint32_t)timer_delta(&thread
->user_timer
,
2131 &thread
->vtimer_user_save
);
2133 tdelt
= (uint32_t)timer_delta(&thread
->system_timer
,
2134 &thread
->vtimer_user_save
);
2136 absolutetime_to_microtime(tdelt
, &secs
, microsecs
);
2139 case TASK_VTIMER_PROF
:
2140 tsum
= timer_grab(&thread
->user_timer
);
2141 tsum
+= timer_grab(&thread
->system_timer
);
2142 tdelt
= (uint32_t)(tsum
- thread
->vtimer_prof_save
);
2143 absolutetime_to_microtime(tdelt
, &secs
, microsecs
);
2144 /* if the time delta is smaller than a usec, ignore */
2145 if (*microsecs
!= 0)
2146 thread
->vtimer_prof_save
= tsum
;
2149 case TASK_VTIMER_RLIM
:
2150 tsum
= timer_grab(&thread
->user_timer
);
2151 tsum
+= timer_grab(&thread
->system_timer
);
2152 tdelt
= (uint32_t)(tsum
- thread
->vtimer_rlim_save
);
2153 thread
->vtimer_rlim_save
= tsum
;
2154 absolutetime_to_microtime(tdelt
, &secs
, microsecs
);
2163 * Change the assigned processor set for the task
2167 __unused task_t task
,
2168 __unused processor_set_t new_pset
,
2169 __unused boolean_t assign_threads
)
2171 return(KERN_FAILURE
);
2175 * task_assign_default:
2177 * Version of task_assign to assign to default processor set.
2180 task_assign_default(
2182 boolean_t assign_threads
)
2184 return (task_assign(task
, &pset0
, assign_threads
));
2188 * task_get_assignment
2190 * Return name of processor set that task is assigned to.
2193 task_get_assignment(
2195 processor_set_t
*pset
)
2198 return(KERN_FAILURE
);
2202 return (KERN_SUCCESS
);
2209 * Set scheduling policy and parameters, both base and limit, for
2210 * the given task. Policy must be a policy which is enabled for the
2211 * processor set. Change contained threads if requested.
2215 __unused task_t task
,
2216 __unused policy_t policy_id
,
2217 __unused policy_base_t base
,
2218 __unused mach_msg_type_number_t count
,
2219 __unused boolean_t set_limit
,
2220 __unused boolean_t change
)
2222 return(KERN_FAILURE
);
2228 * Set scheduling policy and parameters, both base and limit, for
2229 * the given task. Policy can be any policy implemented by the
2230 * processor set, whether enabled or not. Change contained threads
2235 __unused task_t task
,
2236 __unused processor_set_t pset
,
2237 __unused policy_t policy_id
,
2238 __unused policy_base_t base
,
2239 __unused mach_msg_type_number_t base_count
,
2240 __unused policy_limit_t limit
,
2241 __unused mach_msg_type_number_t limit_count
,
2242 __unused boolean_t change
)
2244 return(KERN_FAILURE
);
2254 extern int fast_tas_debug
;
2256 if (fast_tas_debug
) {
2257 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
2261 task
->fast_tas_base
= pc
;
2262 task
->fast_tas_end
= endpc
;
2264 return KERN_SUCCESS
;
2266 #else /* FAST_TAS */
2269 __unused task_t task
,
2270 __unused vm_offset_t pc
,
2271 __unused vm_offset_t endpc
)
2273 return KERN_FAILURE
;
2275 #endif /* FAST_TAS */
2278 task_synchronizer_destroy_all(task_t task
)
2280 semaphore_t semaphore
;
2281 lock_set_t lock_set
;
2284 * Destroy owned semaphores
2287 while (!queue_empty(&task
->semaphore_list
)) {
2288 semaphore
= (semaphore_t
) queue_first(&task
->semaphore_list
);
2289 (void) semaphore_destroy(task
, semaphore
);
2293 * Destroy owned lock sets
2296 while (!queue_empty(&task
->lock_set_list
)) {
2297 lock_set
= (lock_set_t
) queue_first(&task
->lock_set_list
);
2298 (void) lock_set_destroy(task
, lock_set
);
2303 * Install default (machine-dependent) initial thread state
2304 * on the task. Subsequent thread creation will have this initial
2305 * state set on the thread by machine_thread_inherit_taskwide().
2306 * Flavors and structures are exactly the same as those to thread_set_state()
2312 thread_state_t state
,
2313 mach_msg_type_number_t state_count
)
2317 if (task
== TASK_NULL
) {
2318 return (KERN_INVALID_ARGUMENT
);
2323 if (!task
->active
) {
2325 return (KERN_FAILURE
);
2328 ret
= machine_task_set_state(task
, flavor
, state
, state_count
);
2335 * Examine the default (machine-dependent) initial thread state
2336 * on the task, as set by task_set_state(). Flavors and structures
2337 * are exactly the same as those passed to thread_get_state().
2343 thread_state_t state
,
2344 mach_msg_type_number_t
*state_count
)
2348 if (task
== TASK_NULL
) {
2349 return (KERN_INVALID_ARGUMENT
);
2354 if (!task
->active
) {
2356 return (KERN_FAILURE
);
2359 ret
= machine_task_get_state(task
, flavor
, state
, state_count
);
2367 * We need to export some functions to other components that
2368 * are currently implemented in macros within the osfmk
2369 * component. Just export them as functions of the same name.
2371 boolean_t
is_kerneltask(task_t t
)
2373 if (t
== kernel_task
)
2380 check_for_tasksuspend(task_t task
)
2383 if (task
== TASK_NULL
)
2386 return (task
->suspend_count
> 0);
2390 task_t
current_task(void);
2391 task_t
current_task(void)
2393 return (current_task_fast());
2396 #undef task_reference
2397 void task_reference(task_t task
);
2402 if (task
!= TASK_NULL
)
2403 task_reference_internal(task
);
2407 * This routine is called always with task lock held.
2408 * And it returns a thread handle without reference as the caller
2409 * operates on it under the task lock held.
2412 task_findtid(task_t task
, uint64_t tid
)
2414 thread_t thread
= THREAD_NULL
;
2416 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
2417 if (thread
->thread_id
== tid
)
2420 return(THREAD_NULL
);
2424 #if CONFIG_MACF_MACH
2426 * Protect 2 task labels against modification by adding a reference on
2427 * both label handles. The locks do not actually have to be held while
2428 * using the labels as only labels with one reference can be modified
2437 labelh_reference(a
->label
);
2438 labelh_reference(b
->label
);
2446 labelh_release(a
->label
);
2447 labelh_release(b
->label
);
2451 mac_task_label_update_internal(
2456 tasklabel_lock(task
);
2457 task
->label
= labelh_modify(task
->label
);
2458 mac_task_label_update(pl
, &task
->maclabel
);
2459 tasklabel_unlock(task
);
2460 ip_lock(task
->itk_self
);
2461 mac_port_label_update_cred(pl
, &task
->itk_self
->ip_label
);
2462 ip_unlock(task
->itk_self
);
2466 mac_task_label_modify(
2469 void (*f
) (struct label
*l
, void *arg
))
2472 tasklabel_lock(task
);
2473 task
->label
= labelh_modify(task
->label
);
2474 (*f
)(&task
->maclabel
, arg
);
2475 tasklabel_unlock(task
);
2479 mac_task_get_label(struct task
*task
)
2481 return (&task
->maclabel
);