]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/task.c
xnu-2050.18.24.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63/*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81/*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89#include <fast_tas.h>
90#include <platforms.h>
91
92#include <mach/mach_types.h>
93#include <mach/boolean.h>
94#include <mach/host_priv.h>
95#include <mach/machine/vm_types.h>
96#include <mach/vm_param.h>
97#include <mach/semaphore.h>
98#include <mach/task_info.h>
99#include <mach/task_special_ports.h>
100
101#include <ipc/ipc_types.h>
102#include <ipc/ipc_space.h>
103#include <ipc/ipc_entry.h>
104
105#include <kern/kern_types.h>
106#include <kern/mach_param.h>
107#include <kern/misc_protos.h>
108#include <kern/task.h>
109#include <kern/thread.h>
110#include <kern/zalloc.h>
111#include <kern/kalloc.h>
112#include <kern/processor.h>
113#include <kern/sched_prim.h> /* for thread_wakeup */
114#include <kern/ipc_tt.h>
115#include <kern/host.h>
116#include <kern/clock.h>
117#include <kern/timer.h>
118#include <kern/assert.h>
119#include <kern/sync_lock.h>
120#include <kern/affinity.h>
121
122#include <vm/pmap.h>
123#include <vm/vm_map.h>
124#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
125#include <vm/vm_pageout.h>
126#include <vm/vm_protos.h>
127
128/*
129 * Exported interfaces
130 */
131
132#include <mach/task_server.h>
133#include <mach/mach_host_server.h>
134#include <mach/host_security_server.h>
135#include <mach/mach_port_server.h>
136#include <mach/security_server.h>
137
138#include <vm/vm_shared_region.h>
139
140#if CONFIG_MACF_MACH
141#include <security/mac_mach_internal.h>
142#endif
143
144#if CONFIG_COUNTERS
145#include <pmc/pmc.h>
146#endif /* CONFIG_COUNTERS */
147
148task_t kernel_task;
149zone_t task_zone;
150lck_attr_t task_lck_attr;
151lck_grp_t task_lck_grp;
152lck_grp_attr_t task_lck_grp_attr;
153#if CONFIG_EMBEDDED
154lck_mtx_t task_watch_mtx;
155#endif /* CONFIG_EMBEDDED */
156
157zinfo_usage_store_t tasks_tkm_private;
158zinfo_usage_store_t tasks_tkm_shared;
159
160static ledger_template_t task_ledger_template = NULL;
161struct _task_ledger_indices task_ledgers = {-1, -1, -1, -1, -1};
162void init_task_ledgers(void);
163
164
165int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
166
167/* externs for BSD kernel */
168extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
169
170/* Forwards */
171
172void task_hold_locked(
173 task_t task);
174void task_wait_locked(
175 task_t task,
176 boolean_t until_not_runnable);
177void task_release_locked(
178 task_t task);
179void task_free(
180 task_t task );
181void task_synchronizer_destroy_all(
182 task_t task);
183
184int check_for_tasksuspend(
185 task_t task);
186
187void
188task_backing_store_privileged(
189 task_t task)
190{
191 task_lock(task);
192 task->priv_flags |= VM_BACKING_STORE_PRIV;
193 task_unlock(task);
194 return;
195}
196
197
198void
199task_set_64bit(
200 task_t task,
201 boolean_t is64bit)
202{
203#if defined(__i386__) || defined(__x86_64__)
204 thread_t thread;
205#endif /* __i386__ */
206 int vm_flags = 0;
207
208 if (is64bit) {
209 if (task_has_64BitAddr(task))
210 return;
211
212 task_set_64BitAddr(task);
213 } else {
214 if ( !task_has_64BitAddr(task))
215 return;
216
217 /*
218 * Deallocate all memory previously allocated
219 * above the 32-bit address space, since it won't
220 * be accessible anymore.
221 */
222 /* remove regular VM map entries & pmap mappings */
223 (void) vm_map_remove(task->map,
224 (vm_map_offset_t) VM_MAX_ADDRESS,
225 MACH_VM_MAX_ADDRESS,
226 0);
227 /* remove the higher VM mappings */
228 (void) vm_map_remove(task->map,
229 MACH_VM_MAX_ADDRESS,
230 0xFFFFFFFFFFFFF000ULL,
231 vm_flags);
232 task_clear_64BitAddr(task);
233 }
234 /* FIXME: On x86, the thread save state flavor can diverge from the
235 * task's 64-bit feature flag due to the 32-bit/64-bit register save
236 * state dichotomy. Since we can be pre-empted in this interval,
237 * certain routines may observe the thread as being in an inconsistent
238 * state with respect to its task's 64-bitness.
239 */
240#if defined(__i386__) || defined(__x86_64__)
241 task_lock(task);
242 queue_iterate(&task->threads, thread, thread_t, task_threads) {
243 thread_mtx_lock(thread);
244 machine_thread_switch_addrmode(thread);
245 thread_mtx_unlock(thread);
246 }
247 task_unlock(task);
248#endif /* __i386__ */
249}
250
251
252void
253task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size)
254{
255 task_lock(task);
256 task->all_image_info_addr = addr;
257 task->all_image_info_size = size;
258 task_unlock(task);
259}
260
261void
262task_init(void)
263{
264
265 lck_grp_attr_setdefault(&task_lck_grp_attr);
266 lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
267 lck_attr_setdefault(&task_lck_attr);
268 lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
269#if CONFIG_EMBEDDED
270 lck_mtx_init(&task_watch_mtx, &task_lck_grp, &task_lck_attr);
271#endif /* CONFIG_EMBEDDED */
272
273 task_zone = zinit(
274 sizeof(struct task),
275 task_max * sizeof(struct task),
276 TASK_CHUNK * sizeof(struct task),
277 "tasks");
278
279 zone_change(task_zone, Z_NOENCRYPT, TRUE);
280
281 init_task_ledgers();
282
283 /*
284 * Create the kernel task as the first task.
285 */
286#ifdef __LP64__
287 if (task_create_internal(TASK_NULL, FALSE, TRUE, &kernel_task) != KERN_SUCCESS)
288#else
289 if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
290#endif
291 panic("task_init\n");
292
293 vm_map_deallocate(kernel_task->map);
294 kernel_task->map = kernel_map;
295
296}
297
298/*
299 * Create a task running in the kernel address space. It may
300 * have its own map of size mem_size and may have ipc privileges.
301 */
302kern_return_t
303kernel_task_create(
304 __unused task_t parent_task,
305 __unused vm_offset_t map_base,
306 __unused vm_size_t map_size,
307 __unused task_t *child_task)
308{
309 return (KERN_INVALID_ARGUMENT);
310}
311
312kern_return_t
313task_create(
314 task_t parent_task,
315 __unused ledger_port_array_t ledger_ports,
316 __unused mach_msg_type_number_t num_ledger_ports,
317 __unused boolean_t inherit_memory,
318 __unused task_t *child_task) /* OUT */
319{
320 if (parent_task == TASK_NULL)
321 return(KERN_INVALID_ARGUMENT);
322
323 /*
324 * No longer supported: too many calls assume that a task has a valid
325 * process attached.
326 */
327 return(KERN_FAILURE);
328}
329
330kern_return_t
331host_security_create_task_token(
332 host_security_t host_security,
333 task_t parent_task,
334 __unused security_token_t sec_token,
335 __unused audit_token_t audit_token,
336 __unused host_priv_t host_priv,
337 __unused ledger_port_array_t ledger_ports,
338 __unused mach_msg_type_number_t num_ledger_ports,
339 __unused boolean_t inherit_memory,
340 __unused task_t *child_task) /* OUT */
341{
342 if (parent_task == TASK_NULL)
343 return(KERN_INVALID_ARGUMENT);
344
345 if (host_security == HOST_NULL)
346 return(KERN_INVALID_SECURITY);
347
348 /*
349 * No longer supported.
350 */
351 return(KERN_FAILURE);
352}
353
354void
355init_task_ledgers(void)
356{
357 ledger_template_t t;
358
359 assert(task_ledger_template == NULL);
360 assert(kernel_task == TASK_NULL);
361
362 if ((t = ledger_template_create("Per-task ledger")) == NULL)
363 panic("couldn't create task ledger template");
364
365 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
366 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
367 "physmem", "bytes");
368 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
369 "bytes");
370 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
371 "bytes");
372 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
373 "bytes");
374
375 if ((task_ledgers.cpu_time < 0) || (task_ledgers.tkm_private < 0) ||
376 (task_ledgers.tkm_shared < 0) || (task_ledgers.phys_mem < 0) ||
377 (task_ledgers.wired_mem < 0)) {
378 panic("couldn't create entries for task ledger template");
379 }
380
381 task_ledger_template = t;
382}
383
384kern_return_t
385task_create_internal(
386 task_t parent_task,
387 boolean_t inherit_memory,
388 boolean_t is_64bit,
389 task_t *child_task) /* OUT */
390{
391 task_t new_task;
392 vm_shared_region_t shared_region;
393 ledger_t ledger = NULL;
394
395 new_task = (task_t) zalloc(task_zone);
396
397 if (new_task == TASK_NULL)
398 return(KERN_RESOURCE_SHORTAGE);
399
400 /* one ref for just being alive; one for our caller */
401 new_task->ref_count = 2;
402
403 /* allocate with active entries */
404 assert(task_ledger_template != NULL);
405 if ((ledger = ledger_instantiate(task_ledger_template,
406 LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
407 zfree(task_zone, new_task);
408 return(KERN_RESOURCE_SHORTAGE);
409 }
410 new_task->ledger = ledger;
411
412 /* if inherit_memory is true, parent_task MUST not be NULL */
413 if (inherit_memory)
414 new_task->map = vm_map_fork(ledger, parent_task->map);
415 else
416 new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit),
417 (vm_map_offset_t)(VM_MIN_ADDRESS),
418 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
419
420 /* Inherit memlock limit from parent */
421 if (parent_task)
422 vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
423
424 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
425 queue_init(&new_task->threads);
426 new_task->suspend_count = 0;
427 new_task->thread_count = 0;
428 new_task->active_thread_count = 0;
429 new_task->user_stop_count = 0;
430 new_task->role = TASK_UNSPECIFIED;
431 new_task->active = TRUE;
432 new_task->halting = FALSE;
433 new_task->user_data = NULL;
434 new_task->faults = 0;
435 new_task->cow_faults = 0;
436 new_task->pageins = 0;
437 new_task->messages_sent = 0;
438 new_task->messages_received = 0;
439 new_task->syscalls_mach = 0;
440 new_task->priv_flags = 0;
441 new_task->syscalls_unix=0;
442 new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
443 new_task->taskFeatures[0] = 0; /* Init task features */
444 new_task->taskFeatures[1] = 0; /* Init task features */
445
446 zinfo_task_init(new_task);
447
448#ifdef MACH_BSD
449 new_task->bsd_info = NULL;
450#endif /* MACH_BSD */
451
452#if defined(__i386__) || defined(__x86_64__)
453 new_task->i386_ldt = 0;
454 new_task->task_debug = NULL;
455#endif
456
457
458 queue_init(&new_task->semaphore_list);
459 queue_init(&new_task->lock_set_list);
460 new_task->semaphores_owned = 0;
461 new_task->lock_sets_owned = 0;
462
463#if CONFIG_MACF_MACH
464 new_task->label = labelh_new(1);
465 mac_task_label_init (&new_task->maclabel);
466#endif
467
468 ipc_task_init(new_task, parent_task);
469
470 new_task->total_user_time = 0;
471 new_task->total_system_time = 0;
472
473 new_task->vtimers = 0;
474
475 new_task->shared_region = NULL;
476
477 new_task->affinity_space = NULL;
478
479#if CONFIG_COUNTERS
480 new_task->t_chud = 0U;
481#endif
482
483 new_task->pidsuspended = FALSE;
484 new_task->frozen = FALSE;
485 new_task->rusage_cpu_flags = 0;
486 new_task->rusage_cpu_percentage = 0;
487 new_task->rusage_cpu_interval = 0;
488 new_task->rusage_cpu_deadline = 0;
489 new_task->rusage_cpu_callt = NULL;
490 new_task->proc_terminate = 0;
491#if CONFIG_EMBEDDED
492 queue_init(&new_task->task_watchers);
493 new_task->appstate = TASK_APPSTATE_ACTIVE;
494 new_task->num_taskwatchers = 0;
495 new_task->watchapplying = 0;
496#endif /* CONFIG_EMBEDDED */
497
498 if (parent_task != TASK_NULL) {
499 new_task->sec_token = parent_task->sec_token;
500 new_task->audit_token = parent_task->audit_token;
501
502 /* inherit the parent's shared region */
503 shared_region = vm_shared_region_get(parent_task);
504 vm_shared_region_set(new_task, shared_region);
505
506 if(task_has_64BitAddr(parent_task))
507 task_set_64BitAddr(new_task);
508 new_task->all_image_info_addr = parent_task->all_image_info_addr;
509 new_task->all_image_info_size = parent_task->all_image_info_size;
510
511#if defined(__i386__) || defined(__x86_64__)
512 if (inherit_memory && parent_task->i386_ldt)
513 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
514#endif
515 if (inherit_memory && parent_task->affinity_space)
516 task_affinity_create(parent_task, new_task);
517
518 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
519 new_task->policystate = parent_task->policystate;
520 /* inherit the self action state */
521 new_task->appliedstate = parent_task->appliedstate;
522 new_task->ext_policystate = parent_task->ext_policystate;
523#if NOTYET
524 /* till the child lifecycle is cleared do not inherit external action */
525 new_task->ext_appliedstate = parent_task->ext_appliedstate;
526#else
527 new_task->ext_appliedstate = default_task_null_policy;
528#endif
529 }
530 else {
531 new_task->sec_token = KERNEL_SECURITY_TOKEN;
532 new_task->audit_token = KERNEL_AUDIT_TOKEN;
533#ifdef __LP64__
534 if(is_64bit)
535 task_set_64BitAddr(new_task);
536#endif
537 new_task->all_image_info_addr = (mach_vm_address_t)0;
538 new_task->all_image_info_size = (mach_vm_size_t)0;
539
540 new_task->pset_hint = PROCESSOR_SET_NULL;
541 new_task->policystate = default_task_proc_policy;
542 new_task->ext_policystate = default_task_proc_policy;
543 new_task->appliedstate = default_task_null_policy;
544 new_task->ext_appliedstate = default_task_null_policy;
545 }
546
547 if (kernel_task == TASK_NULL) {
548 new_task->priority = BASEPRI_KERNEL;
549 new_task->max_priority = MAXPRI_KERNEL;
550 }
551 else {
552 new_task->priority = BASEPRI_DEFAULT;
553 new_task->max_priority = MAXPRI_USER;
554 }
555
556 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
557
558 lck_mtx_lock(&tasks_threads_lock);
559 queue_enter(&tasks, new_task, task_t, tasks);
560 tasks_count++;
561 lck_mtx_unlock(&tasks_threads_lock);
562
563 if (vm_backing_store_low && parent_task != NULL)
564 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
565
566 ipc_task_enable(new_task);
567
568 *child_task = new_task;
569 return(KERN_SUCCESS);
570}
571
572/*
573 * task_deallocate:
574 *
575 * Drop a reference on a task.
576 */
577void
578task_deallocate(
579 task_t task)
580{
581 ledger_amount_t credit, debit;
582
583 if (task == TASK_NULL)
584 return;
585
586 if (task_deallocate_internal(task) > 0)
587 return;
588
589 lck_mtx_lock(&tasks_threads_lock);
590 queue_remove(&terminated_tasks, task, task_t, tasks);
591 lck_mtx_unlock(&tasks_threads_lock);
592
593 /*
594 * Give the machine dependent code a chance
595 * to perform cleanup before ripping apart
596 * the task.
597 */
598 machine_task_terminate(task);
599
600 ipc_task_terminate(task);
601
602 if (task->affinity_space)
603 task_affinity_deallocate(task);
604
605 vm_map_deallocate(task->map);
606 is_release(task->itk_space);
607
608 lck_mtx_destroy(&task->lock, &task_lck_grp);
609
610#if CONFIG_MACF_MACH
611 labelh_release(task->label);
612#endif
613
614 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
615 &debit)) {
616 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
617 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
618 }
619 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
620 &debit)) {
621 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
622 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
623 }
624 ledger_dereference(task->ledger);
625 zinfo_task_free(task);
626 zfree(task_zone, task);
627}
628
629/*
630 * task_name_deallocate:
631 *
632 * Drop a reference on a task name.
633 */
634void
635task_name_deallocate(
636 task_name_t task_name)
637{
638 return(task_deallocate((task_t)task_name));
639}
640
641
642/*
643 * task_terminate:
644 *
645 * Terminate the specified task. See comments on thread_terminate
646 * (kern/thread.c) about problems with terminating the "current task."
647 */
648
649kern_return_t
650task_terminate(
651 task_t task)
652{
653 if (task == TASK_NULL)
654 return (KERN_INVALID_ARGUMENT);
655
656 if (task->bsd_info)
657 return (KERN_FAILURE);
658
659 return (task_terminate_internal(task));
660}
661
662kern_return_t
663task_terminate_internal(
664 task_t task)
665{
666 thread_t thread, self;
667 task_t self_task;
668 boolean_t interrupt_save;
669
670 assert(task != kernel_task);
671
672 self = current_thread();
673 self_task = self->task;
674
675 /*
676 * Get the task locked and make sure that we are not racing
677 * with someone else trying to terminate us.
678 */
679 if (task == self_task)
680 task_lock(task);
681 else
682 if (task < self_task) {
683 task_lock(task);
684 task_lock(self_task);
685 }
686 else {
687 task_lock(self_task);
688 task_lock(task);
689 }
690
691 if (!task->active) {
692 /*
693 * Task is already being terminated.
694 * Just return an error. If we are dying, this will
695 * just get us to our AST special handler and that
696 * will get us to finalize the termination of ourselves.
697 */
698 task_unlock(task);
699 if (self_task != task)
700 task_unlock(self_task);
701
702 return (KERN_FAILURE);
703 }
704
705 if (self_task != task)
706 task_unlock(self_task);
707
708 /*
709 * Make sure the current thread does not get aborted out of
710 * the waits inside these operations.
711 */
712 interrupt_save = thread_interrupt_level(THREAD_UNINT);
713
714 /*
715 * Indicate that we want all the threads to stop executing
716 * at user space by holding the task (we would have held
717 * each thread independently in thread_terminate_internal -
718 * but this way we may be more likely to already find it
719 * held there). Mark the task inactive, and prevent
720 * further task operations via the task port.
721 */
722 task_hold_locked(task);
723 task->active = FALSE;
724 ipc_task_disable(task);
725
726 /*
727 * Terminate each thread in the task.
728 */
729 queue_iterate(&task->threads, thread, thread_t, task_threads) {
730 thread_terminate_internal(thread);
731 }
732
733 task_unlock(task);
734
735#if CONFIG_EMBEDDED
736 /*
737 * remove all task watchers
738 */
739 task_removewatchers(task);
740#endif /* CONFIG_EMBEDDED */
741
742 /*
743 * Destroy all synchronizers owned by the task.
744 */
745 task_synchronizer_destroy_all(task);
746
747 /*
748 * Destroy the IPC space, leaving just a reference for it.
749 */
750 ipc_space_terminate(task->itk_space);
751
752 if (vm_map_has_4GB_pagezero(task->map))
753 vm_map_clear_4GB_pagezero(task->map);
754
755 /*
756 * If the current thread is a member of the task
757 * being terminated, then the last reference to
758 * the task will not be dropped until the thread
759 * is finally reaped. To avoid incurring the
760 * expense of removing the address space regions
761 * at reap time, we do it explictly here.
762 */
763 vm_map_remove(task->map,
764 task->map->min_offset,
765 task->map->max_offset,
766 VM_MAP_NO_FLAGS);
767
768 /* release our shared region */
769 vm_shared_region_set(task, NULL);
770
771 lck_mtx_lock(&tasks_threads_lock);
772 queue_remove(&tasks, task, task_t, tasks);
773 queue_enter(&terminated_tasks, task, task_t, tasks);
774 tasks_count--;
775 lck_mtx_unlock(&tasks_threads_lock);
776
777 /*
778 * We no longer need to guard against being aborted, so restore
779 * the previous interruptible state.
780 */
781 thread_interrupt_level(interrupt_save);
782
783 /*
784 * Get rid of the task active reference on itself.
785 */
786 task_deallocate(task);
787
788 return (KERN_SUCCESS);
789}
790
791/*
792 * task_start_halt:
793 *
794 * Shut the current task down (except for the current thread) in
795 * preparation for dramatic changes to the task (probably exec).
796 * We hold the task and mark all other threads in the task for
797 * termination.
798 */
799kern_return_t
800task_start_halt(
801 task_t task)
802{
803 thread_t thread, self;
804
805 assert(task != kernel_task);
806
807 self = current_thread();
808
809 if (task != self->task)
810 return (KERN_INVALID_ARGUMENT);
811
812 task_lock(task);
813
814 if (task->halting || !task->active || !self->active) {
815 /*
816 * Task or current thread is already being terminated.
817 * Hurry up and return out of the current kernel context
818 * so that we run our AST special handler to terminate
819 * ourselves.
820 */
821 task_unlock(task);
822
823 return (KERN_FAILURE);
824 }
825
826 task->halting = TRUE;
827
828 if (task->thread_count > 1) {
829
830 /*
831 * Mark all the threads to keep them from starting any more
832 * user-level execution. The thread_terminate_internal code
833 * would do this on a thread by thread basis anyway, but this
834 * gives us a better chance of not having to wait there.
835 */
836 task_hold_locked(task);
837
838 /*
839 * Terminate all the other threads in the task.
840 */
841 queue_iterate(&task->threads, thread, thread_t, task_threads) {
842 if (thread != self)
843 thread_terminate_internal(thread);
844 }
845
846 task_release_locked(task);
847 }
848 task_unlock(task);
849 return KERN_SUCCESS;
850}
851
852
853/*
854 * task_complete_halt:
855 *
856 * Complete task halt by waiting for threads to terminate, then clean
857 * up task resources (VM, port namespace, etc...) and then let the
858 * current thread go in the (practically empty) task context.
859 */
860void
861task_complete_halt(task_t task)
862{
863 task_lock(task);
864 assert(task->halting);
865 assert(task == current_task());
866
867 /*
868 * Wait for the other threads to get shut down.
869 * When the last other thread is reaped, we'll be
870 * woken up.
871 */
872 if (task->thread_count > 1) {
873 assert_wait((event_t)&task->halting, THREAD_UNINT);
874 task_unlock(task);
875 thread_block(THREAD_CONTINUE_NULL);
876 } else {
877 task_unlock(task);
878 }
879
880 /*
881 * Give the machine dependent code a chance
882 * to perform cleanup of task-level resources
883 * associated with the current thread before
884 * ripping apart the task.
885 */
886 machine_task_terminate(task);
887
888 /*
889 * Destroy all synchronizers owned by the task.
890 */
891 task_synchronizer_destroy_all(task);
892
893 /*
894 * Destroy the contents of the IPC space, leaving just
895 * a reference for it.
896 */
897 ipc_space_clean(task->itk_space);
898
899 /*
900 * Clean out the address space, as we are going to be
901 * getting a new one.
902 */
903 vm_map_remove(task->map, task->map->min_offset,
904 task->map->max_offset, VM_MAP_NO_FLAGS);
905
906 task->halting = FALSE;
907}
908
909/*
910 * task_hold_locked:
911 *
912 * Suspend execution of the specified task.
913 * This is a recursive-style suspension of the task, a count of
914 * suspends is maintained.
915 *
916 * CONDITIONS: the task is locked and active.
917 */
918void
919task_hold_locked(
920 register task_t task)
921{
922 register thread_t thread;
923
924 assert(task->active);
925
926 if (task->suspend_count++ > 0)
927 return;
928
929 /*
930 * Iterate through all the threads and hold them.
931 */
932 queue_iterate(&task->threads, thread, thread_t, task_threads) {
933 thread_mtx_lock(thread);
934 thread_hold(thread);
935 thread_mtx_unlock(thread);
936 }
937}
938
939/*
940 * task_hold:
941 *
942 * Same as the internal routine above, except that is must lock
943 * and verify that the task is active. This differs from task_suspend
944 * in that it places a kernel hold on the task rather than just a
945 * user-level hold. This keeps users from over resuming and setting
946 * it running out from under the kernel.
947 *
948 * CONDITIONS: the caller holds a reference on the task
949 */
950kern_return_t
951task_hold(
952 register task_t task)
953{
954 if (task == TASK_NULL)
955 return (KERN_INVALID_ARGUMENT);
956
957 task_lock(task);
958
959 if (!task->active) {
960 task_unlock(task);
961
962 return (KERN_FAILURE);
963 }
964
965 task_hold_locked(task);
966 task_unlock(task);
967
968 return (KERN_SUCCESS);
969}
970
971kern_return_t
972task_wait(
973 task_t task,
974 boolean_t until_not_runnable)
975{
976 if (task == TASK_NULL)
977 return (KERN_INVALID_ARGUMENT);
978
979 task_lock(task);
980
981 if (!task->active) {
982 task_unlock(task);
983
984 return (KERN_FAILURE);
985 }
986
987 task_wait_locked(task, until_not_runnable);
988 task_unlock(task);
989
990 return (KERN_SUCCESS);
991}
992
993/*
994 * task_wait_locked:
995 *
996 * Wait for all threads in task to stop.
997 *
998 * Conditions:
999 * Called with task locked, active, and held.
1000 */
1001void
1002task_wait_locked(
1003 register task_t task,
1004 boolean_t until_not_runnable)
1005{
1006 register thread_t thread, self;
1007
1008 assert(task->active);
1009 assert(task->suspend_count > 0);
1010
1011 self = current_thread();
1012
1013 /*
1014 * Iterate through all the threads and wait for them to
1015 * stop. Do not wait for the current thread if it is within
1016 * the task.
1017 */
1018 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1019 if (thread != self)
1020 thread_wait(thread, until_not_runnable);
1021 }
1022}
1023
1024/*
1025 * task_release_locked:
1026 *
1027 * Release a kernel hold on a task.
1028 *
1029 * CONDITIONS: the task is locked and active
1030 */
1031void
1032task_release_locked(
1033 register task_t task)
1034{
1035 register thread_t thread;
1036
1037 assert(task->active);
1038 assert(task->suspend_count > 0);
1039
1040 if (--task->suspend_count > 0)
1041 return;
1042
1043 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1044 thread_mtx_lock(thread);
1045 thread_release(thread);
1046 thread_mtx_unlock(thread);
1047 }
1048}
1049
1050/*
1051 * task_release:
1052 *
1053 * Same as the internal routine above, except that it must lock
1054 * and verify that the task is active.
1055 *
1056 * CONDITIONS: The caller holds a reference to the task
1057 */
1058kern_return_t
1059task_release(
1060 task_t task)
1061{
1062 if (task == TASK_NULL)
1063 return (KERN_INVALID_ARGUMENT);
1064
1065 task_lock(task);
1066
1067 if (!task->active) {
1068 task_unlock(task);
1069
1070 return (KERN_FAILURE);
1071 }
1072
1073 task_release_locked(task);
1074 task_unlock(task);
1075
1076 return (KERN_SUCCESS);
1077}
1078
1079kern_return_t
1080task_threads(
1081 task_t task,
1082 thread_act_array_t *threads_out,
1083 mach_msg_type_number_t *count)
1084{
1085 mach_msg_type_number_t actual;
1086 thread_t *thread_list;
1087 thread_t thread;
1088 vm_size_t size, size_needed;
1089 void *addr;
1090 unsigned int i, j;
1091
1092 if (task == TASK_NULL)
1093 return (KERN_INVALID_ARGUMENT);
1094
1095 size = 0; addr = NULL;
1096
1097 for (;;) {
1098 task_lock(task);
1099 if (!task->active) {
1100 task_unlock(task);
1101
1102 if (size != 0)
1103 kfree(addr, size);
1104
1105 return (KERN_FAILURE);
1106 }
1107
1108 actual = task->thread_count;
1109
1110 /* do we have the memory we need? */
1111 size_needed = actual * sizeof (mach_port_t);
1112 if (size_needed <= size)
1113 break;
1114
1115 /* unlock the task and allocate more memory */
1116 task_unlock(task);
1117
1118 if (size != 0)
1119 kfree(addr, size);
1120
1121 assert(size_needed > 0);
1122 size = size_needed;
1123
1124 addr = kalloc(size);
1125 if (addr == 0)
1126 return (KERN_RESOURCE_SHORTAGE);
1127 }
1128
1129 /* OK, have memory and the task is locked & active */
1130 thread_list = (thread_t *)addr;
1131
1132 i = j = 0;
1133
1134 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1135 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1136 thread_reference_internal(thread);
1137 thread_list[j++] = thread;
1138 }
1139
1140 assert(queue_end(&task->threads, (queue_entry_t)thread));
1141
1142 actual = j;
1143 size_needed = actual * sizeof (mach_port_t);
1144
1145 /* can unlock task now that we've got the thread refs */
1146 task_unlock(task);
1147
1148 if (actual == 0) {
1149 /* no threads, so return null pointer and deallocate memory */
1150
1151 *threads_out = NULL;
1152 *count = 0;
1153
1154 if (size != 0)
1155 kfree(addr, size);
1156 }
1157 else {
1158 /* if we allocated too much, must copy */
1159
1160 if (size_needed < size) {
1161 void *newaddr;
1162
1163 newaddr = kalloc(size_needed);
1164 if (newaddr == 0) {
1165 for (i = 0; i < actual; ++i)
1166 thread_deallocate(thread_list[i]);
1167 kfree(addr, size);
1168 return (KERN_RESOURCE_SHORTAGE);
1169 }
1170
1171 bcopy(addr, newaddr, size_needed);
1172 kfree(addr, size);
1173 thread_list = (thread_t *)newaddr;
1174 }
1175
1176 *threads_out = thread_list;
1177 *count = actual;
1178
1179 /* do the conversion that Mig should handle */
1180
1181 for (i = 0; i < actual; ++i)
1182 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1183 }
1184
1185 return (KERN_SUCCESS);
1186}
1187
1188static kern_return_t
1189place_task_hold (
1190 register task_t task)
1191{
1192 if (!task->active) {
1193 return (KERN_FAILURE);
1194 }
1195
1196 if (task->user_stop_count++ > 0) {
1197 /*
1198 * If the stop count was positive, the task is
1199 * already stopped and we can exit.
1200 */
1201 return (KERN_SUCCESS);
1202 }
1203
1204 /*
1205 * Put a kernel-level hold on the threads in the task (all
1206 * user-level task suspensions added together represent a
1207 * single kernel-level hold). We then wait for the threads
1208 * to stop executing user code.
1209 */
1210 task_hold_locked(task);
1211 task_wait_locked(task, TRUE);
1212
1213 return (KERN_SUCCESS);
1214}
1215
1216static kern_return_t
1217release_task_hold (
1218 register task_t task,
1219 boolean_t pidresume)
1220{
1221 register boolean_t release = FALSE;
1222
1223 if (!task->active) {
1224 return (KERN_FAILURE);
1225 }
1226
1227 if (pidresume) {
1228 if (task->pidsuspended == FALSE) {
1229 return (KERN_FAILURE);
1230 }
1231 task->pidsuspended = FALSE;
1232 }
1233
1234 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
1235 if (--task->user_stop_count == 0) {
1236 release = TRUE;
1237 }
1238 }
1239 else {
1240 return (KERN_FAILURE);
1241 }
1242
1243 /*
1244 * Release the task if necessary.
1245 */
1246 if (release)
1247 task_release_locked(task);
1248
1249 return (KERN_SUCCESS);
1250}
1251
1252/*
1253 * task_suspend:
1254 *
1255 * Implement a user-level suspension on a task.
1256 *
1257 * Conditions:
1258 * The caller holds a reference to the task
1259 */
1260kern_return_t
1261task_suspend(
1262 register task_t task)
1263{
1264 kern_return_t kr;
1265
1266 if (task == TASK_NULL || task == kernel_task)
1267 return (KERN_INVALID_ARGUMENT);
1268
1269 task_lock(task);
1270
1271 kr = place_task_hold(task);
1272
1273 task_unlock(task);
1274
1275 return (kr);
1276}
1277
1278/*
1279 * task_resume:
1280 * Release a kernel hold on a task.
1281 *
1282 * Conditions:
1283 * The caller holds a reference to the task
1284 */
1285kern_return_t
1286task_resume(
1287 register task_t task)
1288{
1289 kern_return_t kr;
1290
1291 if (task == TASK_NULL || task == kernel_task)
1292 return (KERN_INVALID_ARGUMENT);
1293
1294 task_lock(task);
1295
1296 kr = release_task_hold(task, FALSE);
1297
1298 task_unlock(task);
1299
1300 return (kr);
1301}
1302
1303kern_return_t
1304task_pidsuspend_locked(task_t task)
1305{
1306 kern_return_t kr;
1307
1308 if (task->pidsuspended) {
1309 kr = KERN_FAILURE;
1310 goto out;
1311 }
1312
1313 task->pidsuspended = TRUE;
1314
1315 kr = place_task_hold(task);
1316 if (kr != KERN_SUCCESS) {
1317 task->pidsuspended = FALSE;
1318 }
1319out:
1320 return(kr);
1321}
1322
1323
1324/*
1325 * task_pidsuspend:
1326 *
1327 * Suspends a task by placing a hold on its threads.
1328 *
1329 * Conditions:
1330 * The caller holds a reference to the task
1331 */
1332kern_return_t
1333task_pidsuspend(
1334 register task_t task)
1335{
1336 kern_return_t kr;
1337
1338 if (task == TASK_NULL || task == kernel_task)
1339 return (KERN_INVALID_ARGUMENT);
1340
1341 task_lock(task);
1342
1343 kr = task_pidsuspend_locked(task);
1344
1345 task_unlock(task);
1346
1347 return (kr);
1348}
1349
1350/* If enabled, we bring all the frozen pages back in prior to resumption; otherwise, they're faulted back in on demand */
1351#define THAW_ON_RESUME 1
1352
1353/*
1354 * task_pidresume:
1355 * Resumes a previously suspended task.
1356 *
1357 * Conditions:
1358 * The caller holds a reference to the task
1359 */
1360kern_return_t
1361task_pidresume(
1362 register task_t task)
1363{
1364 kern_return_t kr;
1365#if (CONFIG_FREEZE && THAW_ON_RESUME)
1366 boolean_t frozen;
1367#endif
1368
1369 if (task == TASK_NULL || task == kernel_task)
1370 return (KERN_INVALID_ARGUMENT);
1371
1372 task_lock(task);
1373
1374#if (CONFIG_FREEZE && THAW_ON_RESUME)
1375 frozen = task->frozen;
1376 task->frozen = FALSE;
1377#endif
1378
1379 kr = release_task_hold(task, TRUE);
1380
1381 task_unlock(task);
1382
1383#if (CONFIG_FREEZE && THAW_ON_RESUME)
1384 if ((kr == KERN_SUCCESS) && (frozen == TRUE)) {
1385 kr = vm_map_thaw(task->map);
1386 }
1387#endif
1388
1389 return (kr);
1390}
1391
1392#if CONFIG_FREEZE
1393
1394/*
1395 * task_freeze:
1396 *
1397 * Freeze a task.
1398 *
1399 * Conditions:
1400 * The caller holds a reference to the task
1401 */
1402kern_return_t
1403task_freeze(
1404 register task_t task,
1405 uint32_t *purgeable_count,
1406 uint32_t *wired_count,
1407 uint32_t *clean_count,
1408 uint32_t *dirty_count,
1409 uint32_t dirty_budget,
1410 boolean_t *shared,
1411 boolean_t walk_only)
1412{
1413 kern_return_t kr;
1414
1415 if (task == TASK_NULL || task == kernel_task)
1416 return (KERN_INVALID_ARGUMENT);
1417
1418 task_lock(task);
1419
1420 if (task->frozen) {
1421 task_unlock(task);
1422 return (KERN_FAILURE);
1423 }
1424
1425 if (walk_only == FALSE) {
1426 task->frozen = TRUE;
1427 }
1428
1429 task_unlock(task);
1430
1431 if (walk_only) {
1432 kr = vm_map_freeze_walk(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
1433 } else {
1434 kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
1435 }
1436
1437 return (kr);
1438}
1439
1440/*
1441 * task_thaw:
1442 *
1443 * Thaw a currently frozen task.
1444 *
1445 * Conditions:
1446 * The caller holds a reference to the task
1447 */
1448kern_return_t
1449task_thaw(
1450 register task_t task)
1451{
1452 kern_return_t kr;
1453
1454 if (task == TASK_NULL || task == kernel_task)
1455 return (KERN_INVALID_ARGUMENT);
1456
1457 task_lock(task);
1458
1459 if (!task->frozen) {
1460 task_unlock(task);
1461 return (KERN_FAILURE);
1462 }
1463
1464 task->frozen = FALSE;
1465
1466 task_unlock(task);
1467
1468 kr = vm_map_thaw(task->map);
1469
1470 return (kr);
1471}
1472
1473#endif /* CONFIG_FREEZE */
1474
1475kern_return_t
1476host_security_set_task_token(
1477 host_security_t host_security,
1478 task_t task,
1479 security_token_t sec_token,
1480 audit_token_t audit_token,
1481 host_priv_t host_priv)
1482{
1483 ipc_port_t host_port;
1484 kern_return_t kr;
1485
1486 if (task == TASK_NULL)
1487 return(KERN_INVALID_ARGUMENT);
1488
1489 if (host_security == HOST_NULL)
1490 return(KERN_INVALID_SECURITY);
1491
1492 task_lock(task);
1493 task->sec_token = sec_token;
1494 task->audit_token = audit_token;
1495 task_unlock(task);
1496
1497 if (host_priv != HOST_PRIV_NULL) {
1498 kr = host_get_host_priv_port(host_priv, &host_port);
1499 } else {
1500 kr = host_get_host_port(host_priv_self(), &host_port);
1501 }
1502 assert(kr == KERN_SUCCESS);
1503 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1504 return(kr);
1505}
1506
1507/*
1508 * This routine was added, pretty much exclusively, for registering the
1509 * RPC glue vector for in-kernel short circuited tasks. Rather than
1510 * removing it completely, I have only disabled that feature (which was
1511 * the only feature at the time). It just appears that we are going to
1512 * want to add some user data to tasks in the future (i.e. bsd info,
1513 * task names, etc...), so I left it in the formal task interface.
1514 */
1515kern_return_t
1516task_set_info(
1517 task_t task,
1518 task_flavor_t flavor,
1519 __unused task_info_t task_info_in, /* pointer to IN array */
1520 __unused mach_msg_type_number_t task_info_count)
1521{
1522 if (task == TASK_NULL)
1523 return(KERN_INVALID_ARGUMENT);
1524
1525 switch (flavor) {
1526 default:
1527 return (KERN_INVALID_ARGUMENT);
1528 }
1529 return (KERN_SUCCESS);
1530}
1531
1532kern_return_t
1533task_info(
1534 task_t task,
1535 task_flavor_t flavor,
1536 task_info_t task_info_out,
1537 mach_msg_type_number_t *task_info_count)
1538{
1539 kern_return_t error = KERN_SUCCESS;
1540
1541 if (task == TASK_NULL)
1542 return (KERN_INVALID_ARGUMENT);
1543
1544 task_lock(task);
1545
1546 if ((task != current_task()) && (!task->active)) {
1547 task_unlock(task);
1548 return (KERN_INVALID_ARGUMENT);
1549 }
1550
1551 switch (flavor) {
1552
1553 case TASK_BASIC_INFO_32:
1554 case TASK_BASIC2_INFO_32:
1555 {
1556 task_basic_info_32_t basic_info;
1557 vm_map_t map;
1558 clock_sec_t secs;
1559 clock_usec_t usecs;
1560
1561 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
1562 error = KERN_INVALID_ARGUMENT;
1563 break;
1564 }
1565
1566 basic_info = (task_basic_info_32_t)task_info_out;
1567
1568 map = (task == kernel_task)? kernel_map: task->map;
1569 basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
1570 if (flavor == TASK_BASIC2_INFO_32) {
1571 /*
1572 * The "BASIC2" flavor gets the maximum resident
1573 * size instead of the current resident size...
1574 */
1575 basic_info->resident_size = pmap_resident_max(map->pmap);
1576 } else {
1577 basic_info->resident_size = pmap_resident_count(map->pmap);
1578 }
1579 basic_info->resident_size *= PAGE_SIZE;
1580
1581 basic_info->policy = ((task != kernel_task)?
1582 POLICY_TIMESHARE: POLICY_RR);
1583 basic_info->suspend_count = task->user_stop_count;
1584
1585 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1586 basic_info->user_time.seconds =
1587 (typeof(basic_info->user_time.seconds))secs;
1588 basic_info->user_time.microseconds = usecs;
1589
1590 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1591 basic_info->system_time.seconds =
1592 (typeof(basic_info->system_time.seconds))secs;
1593 basic_info->system_time.microseconds = usecs;
1594
1595 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1596 break;
1597 }
1598
1599 case TASK_BASIC_INFO_64:
1600 {
1601 task_basic_info_64_t basic_info;
1602 vm_map_t map;
1603 clock_sec_t secs;
1604 clock_usec_t usecs;
1605
1606 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
1607 error = KERN_INVALID_ARGUMENT;
1608 break;
1609 }
1610
1611 basic_info = (task_basic_info_64_t)task_info_out;
1612
1613 map = (task == kernel_task)? kernel_map: task->map;
1614 basic_info->virtual_size = map->size;
1615 basic_info->resident_size =
1616 (mach_vm_size_t)(pmap_resident_count(map->pmap))
1617 * PAGE_SIZE_64;
1618
1619 basic_info->policy = ((task != kernel_task)?
1620 POLICY_TIMESHARE: POLICY_RR);
1621 basic_info->suspend_count = task->user_stop_count;
1622
1623 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1624 basic_info->user_time.seconds =
1625 (typeof(basic_info->user_time.seconds))secs;
1626 basic_info->user_time.microseconds = usecs;
1627
1628 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1629 basic_info->system_time.seconds =
1630 (typeof(basic_info->system_time.seconds))secs;
1631 basic_info->system_time.microseconds = usecs;
1632
1633 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1634 break;
1635 }
1636
1637 case MACH_TASK_BASIC_INFO:
1638 {
1639 mach_task_basic_info_t basic_info;
1640 vm_map_t map;
1641 clock_sec_t secs;
1642 clock_usec_t usecs;
1643
1644 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
1645 error = KERN_INVALID_ARGUMENT;
1646 break;
1647 }
1648
1649 basic_info = (mach_task_basic_info_t)task_info_out;
1650
1651 map = (task == kernel_task) ? kernel_map : task->map;
1652
1653 basic_info->virtual_size = map->size;
1654
1655 basic_info->resident_size =
1656 (mach_vm_size_t)(pmap_resident_count(map->pmap));
1657 basic_info->resident_size *= PAGE_SIZE_64;
1658
1659 basic_info->resident_size_max =
1660 (mach_vm_size_t)(pmap_resident_max(map->pmap));
1661 basic_info->resident_size_max *= PAGE_SIZE_64;
1662
1663 basic_info->policy = ((task != kernel_task) ?
1664 POLICY_TIMESHARE : POLICY_RR);
1665
1666 basic_info->suspend_count = task->user_stop_count;
1667
1668 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1669 basic_info->user_time.seconds =
1670 (typeof(basic_info->user_time.seconds))secs;
1671 basic_info->user_time.microseconds = usecs;
1672
1673 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1674 basic_info->system_time.seconds =
1675 (typeof(basic_info->system_time.seconds))secs;
1676 basic_info->system_time.microseconds = usecs;
1677
1678 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1679 break;
1680 }
1681
1682 case TASK_THREAD_TIMES_INFO:
1683 {
1684 register task_thread_times_info_t times_info;
1685 register thread_t thread;
1686
1687 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1688 error = KERN_INVALID_ARGUMENT;
1689 break;
1690 }
1691
1692 times_info = (task_thread_times_info_t) task_info_out;
1693 times_info->user_time.seconds = 0;
1694 times_info->user_time.microseconds = 0;
1695 times_info->system_time.seconds = 0;
1696 times_info->system_time.microseconds = 0;
1697
1698
1699 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1700 time_value_t user_time, system_time;
1701
1702 thread_read_times(thread, &user_time, &system_time);
1703
1704 time_value_add(&times_info->user_time, &user_time);
1705 time_value_add(&times_info->system_time, &system_time);
1706 }
1707
1708
1709 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1710 break;
1711 }
1712
1713 case TASK_ABSOLUTETIME_INFO:
1714 {
1715 task_absolutetime_info_t info;
1716 register thread_t thread;
1717
1718 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
1719 error = KERN_INVALID_ARGUMENT;
1720 break;
1721 }
1722
1723 info = (task_absolutetime_info_t)task_info_out;
1724 info->threads_user = info->threads_system = 0;
1725
1726
1727 info->total_user = task->total_user_time;
1728 info->total_system = task->total_system_time;
1729
1730 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1731 uint64_t tval;
1732 spl_t x;
1733
1734 x = splsched();
1735 thread_lock(thread);
1736
1737 tval = timer_grab(&thread->user_timer);
1738 info->threads_user += tval;
1739 info->total_user += tval;
1740
1741 tval = timer_grab(&thread->system_timer);
1742 if (thread->precise_user_kernel_time) {
1743 info->threads_system += tval;
1744 info->total_system += tval;
1745 } else {
1746 /* system_timer may represent either sys or user */
1747 info->threads_user += tval;
1748 info->total_user += tval;
1749 }
1750
1751 thread_unlock(thread);
1752 splx(x);
1753 }
1754
1755
1756 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1757 break;
1758 }
1759
1760 case TASK_DYLD_INFO:
1761 {
1762 task_dyld_info_t info;
1763
1764 /*
1765 * We added the format field to TASK_DYLD_INFO output. For
1766 * temporary backward compatibility, accept the fact that
1767 * clients may ask for the old version - distinquished by the
1768 * size of the expected result structure.
1769 */
1770#define TASK_LEGACY_DYLD_INFO_COUNT \
1771 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
1772
1773 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
1774 error = KERN_INVALID_ARGUMENT;
1775 break;
1776 }
1777
1778 info = (task_dyld_info_t)task_info_out;
1779 info->all_image_info_addr = task->all_image_info_addr;
1780 info->all_image_info_size = task->all_image_info_size;
1781
1782 /* only set format on output for those expecting it */
1783 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
1784 info->all_image_info_format = task_has_64BitAddr(task) ?
1785 TASK_DYLD_ALL_IMAGE_INFO_64 :
1786 TASK_DYLD_ALL_IMAGE_INFO_32 ;
1787 *task_info_count = TASK_DYLD_INFO_COUNT;
1788 } else {
1789 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
1790 }
1791 break;
1792 }
1793
1794 case TASK_EXTMOD_INFO:
1795 {
1796 task_extmod_info_t info;
1797 void *p;
1798
1799 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
1800 error = KERN_INVALID_ARGUMENT;
1801 break;
1802 }
1803
1804 info = (task_extmod_info_t)task_info_out;
1805
1806 p = get_bsdtask_info(task);
1807 if (p) {
1808 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
1809 } else {
1810 bzero(info->task_uuid, sizeof(info->task_uuid));
1811 }
1812 info->extmod_statistics = task->extmod_statistics;
1813 *task_info_count = TASK_EXTMOD_INFO_COUNT;
1814
1815 break;
1816 }
1817
1818 case TASK_KERNELMEMORY_INFO:
1819 {
1820 task_kernelmemory_info_t tkm_info;
1821 ledger_amount_t credit, debit;
1822
1823 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
1824 error = KERN_INVALID_ARGUMENT;
1825 break;
1826 }
1827
1828 tkm_info = (task_kernelmemory_info_t) task_info_out;
1829 tkm_info->total_palloc = 0;
1830 tkm_info->total_pfree = 0;
1831 tkm_info->total_salloc = 0;
1832 tkm_info->total_sfree = 0;
1833
1834 if (task == kernel_task) {
1835 /*
1836 * All shared allocs/frees from other tasks count against
1837 * the kernel private memory usage. If we are looking up
1838 * info for the kernel task, gather from everywhere.
1839 */
1840 task_unlock(task);
1841
1842 /* start by accounting for all the terminated tasks against the kernel */
1843 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
1844 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
1845
1846 /* count all other task/thread shared alloc/free against the kernel */
1847 lck_mtx_lock(&tasks_threads_lock);
1848
1849 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
1850 queue_iterate(&tasks, task, task_t, tasks) {
1851 if (task == kernel_task) {
1852 if (ledger_get_entries(task->ledger,
1853 task_ledgers.tkm_private, &credit,
1854 &debit) == KERN_SUCCESS) {
1855 tkm_info->total_palloc += credit;
1856 tkm_info->total_pfree += debit;
1857 }
1858 }
1859 if (!ledger_get_entries(task->ledger,
1860 task_ledgers.tkm_shared, &credit, &debit)) {
1861 tkm_info->total_palloc += credit;
1862 tkm_info->total_pfree += debit;
1863 }
1864 }
1865 lck_mtx_unlock(&tasks_threads_lock);
1866 } else {
1867 if (!ledger_get_entries(task->ledger,
1868 task_ledgers.tkm_private, &credit, &debit)) {
1869 tkm_info->total_palloc = credit;
1870 tkm_info->total_pfree = debit;
1871 }
1872 if (!ledger_get_entries(task->ledger,
1873 task_ledgers.tkm_shared, &credit, &debit)) {
1874 tkm_info->total_salloc = credit;
1875 tkm_info->total_sfree = debit;
1876 }
1877 task_unlock(task);
1878 }
1879
1880 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
1881 return KERN_SUCCESS;
1882 }
1883
1884 /* OBSOLETE */
1885 case TASK_SCHED_FIFO_INFO:
1886 {
1887
1888 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
1889 error = KERN_INVALID_ARGUMENT;
1890 break;
1891 }
1892
1893 error = KERN_INVALID_POLICY;
1894 break;
1895 }
1896
1897 /* OBSOLETE */
1898 case TASK_SCHED_RR_INFO:
1899 {
1900 register policy_rr_base_t rr_base;
1901 uint32_t quantum_time;
1902 uint64_t quantum_ns;
1903
1904 if (*task_info_count < POLICY_RR_BASE_COUNT) {
1905 error = KERN_INVALID_ARGUMENT;
1906 break;
1907 }
1908
1909 rr_base = (policy_rr_base_t) task_info_out;
1910
1911 if (task != kernel_task) {
1912 error = KERN_INVALID_POLICY;
1913 break;
1914 }
1915
1916 rr_base->base_priority = task->priority;
1917
1918 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
1919 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
1920
1921 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
1922
1923 *task_info_count = POLICY_RR_BASE_COUNT;
1924 break;
1925 }
1926
1927 /* OBSOLETE */
1928 case TASK_SCHED_TIMESHARE_INFO:
1929 {
1930 register policy_timeshare_base_t ts_base;
1931
1932 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
1933 error = KERN_INVALID_ARGUMENT;
1934 break;
1935 }
1936
1937 ts_base = (policy_timeshare_base_t) task_info_out;
1938
1939 if (task == kernel_task) {
1940 error = KERN_INVALID_POLICY;
1941 break;
1942 }
1943
1944 ts_base->base_priority = task->priority;
1945
1946 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1947 break;
1948 }
1949
1950 case TASK_SECURITY_TOKEN:
1951 {
1952 register security_token_t *sec_token_p;
1953
1954 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1955 error = KERN_INVALID_ARGUMENT;
1956 break;
1957 }
1958
1959 sec_token_p = (security_token_t *) task_info_out;
1960
1961 *sec_token_p = task->sec_token;
1962
1963 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1964 break;
1965 }
1966
1967 case TASK_AUDIT_TOKEN:
1968 {
1969 register audit_token_t *audit_token_p;
1970
1971 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
1972 error = KERN_INVALID_ARGUMENT;
1973 break;
1974 }
1975
1976 audit_token_p = (audit_token_t *) task_info_out;
1977
1978 *audit_token_p = task->audit_token;
1979
1980 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1981 break;
1982 }
1983
1984 case TASK_SCHED_INFO:
1985 error = KERN_INVALID_ARGUMENT;
1986 break;
1987
1988 case TASK_EVENTS_INFO:
1989 {
1990 register task_events_info_t events_info;
1991 register thread_t thread;
1992
1993 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1994 error = KERN_INVALID_ARGUMENT;
1995 break;
1996 }
1997
1998 events_info = (task_events_info_t) task_info_out;
1999
2000
2001 events_info->faults = task->faults;
2002 events_info->pageins = task->pageins;
2003 events_info->cow_faults = task->cow_faults;
2004 events_info->messages_sent = task->messages_sent;
2005 events_info->messages_received = task->messages_received;
2006 events_info->syscalls_mach = task->syscalls_mach;
2007 events_info->syscalls_unix = task->syscalls_unix;
2008
2009 events_info->csw = task->c_switch;
2010
2011 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2012 events_info->csw += thread->c_switch;
2013 events_info->syscalls_mach += thread->syscalls_mach;
2014 events_info->syscalls_unix += thread->syscalls_unix;
2015 }
2016
2017
2018 *task_info_count = TASK_EVENTS_INFO_COUNT;
2019 break;
2020 }
2021 case TASK_AFFINITY_TAG_INFO:
2022 {
2023 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
2024 error = KERN_INVALID_ARGUMENT;
2025 break;
2026 }
2027
2028 error = task_affinity_info(task, task_info_out, task_info_count);
2029 break;
2030 }
2031 default:
2032 error = KERN_INVALID_ARGUMENT;
2033 }
2034
2035 task_unlock(task);
2036 return (error);
2037}
2038
2039void
2040task_vtimer_set(
2041 task_t task,
2042 integer_t which)
2043{
2044 thread_t thread;
2045 spl_t x;
2046
2047 /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
2048
2049 task_lock(task);
2050
2051 task->vtimers |= which;
2052
2053 switch (which) {
2054
2055 case TASK_VTIMER_USER:
2056 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2057 x = splsched();
2058 thread_lock(thread);
2059 if (thread->precise_user_kernel_time)
2060 thread->vtimer_user_save = timer_grab(&thread->user_timer);
2061 else
2062 thread->vtimer_user_save = timer_grab(&thread->system_timer);
2063 thread_unlock(thread);
2064 splx(x);
2065 }
2066 break;
2067
2068 case TASK_VTIMER_PROF:
2069 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2070 x = splsched();
2071 thread_lock(thread);
2072 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
2073 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
2074 thread_unlock(thread);
2075 splx(x);
2076 }
2077 break;
2078
2079 case TASK_VTIMER_RLIM:
2080 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2081 x = splsched();
2082 thread_lock(thread);
2083 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
2084 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
2085 thread_unlock(thread);
2086 splx(x);
2087 }
2088 break;
2089 }
2090
2091 task_unlock(task);
2092}
2093
2094void
2095task_vtimer_clear(
2096 task_t task,
2097 integer_t which)
2098{
2099 assert(task == current_task());
2100
2101 task_lock(task);
2102
2103 task->vtimers &= ~which;
2104
2105 task_unlock(task);
2106}
2107
2108void
2109task_vtimer_update(
2110__unused
2111 task_t task,
2112 integer_t which,
2113 uint32_t *microsecs)
2114{
2115 thread_t thread = current_thread();
2116 uint32_t tdelt;
2117 clock_sec_t secs;
2118 uint64_t tsum;
2119
2120 assert(task == current_task());
2121
2122 assert(task->vtimers & which);
2123
2124 secs = tdelt = 0;
2125
2126 switch (which) {
2127
2128 case TASK_VTIMER_USER:
2129 if (thread->precise_user_kernel_time) {
2130 tdelt = (uint32_t)timer_delta(&thread->user_timer,
2131 &thread->vtimer_user_save);
2132 } else {
2133 tdelt = (uint32_t)timer_delta(&thread->system_timer,
2134 &thread->vtimer_user_save);
2135 }
2136 absolutetime_to_microtime(tdelt, &secs, microsecs);
2137 break;
2138
2139 case TASK_VTIMER_PROF:
2140 tsum = timer_grab(&thread->user_timer);
2141 tsum += timer_grab(&thread->system_timer);
2142 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
2143 absolutetime_to_microtime(tdelt, &secs, microsecs);
2144 /* if the time delta is smaller than a usec, ignore */
2145 if (*microsecs != 0)
2146 thread->vtimer_prof_save = tsum;
2147 break;
2148
2149 case TASK_VTIMER_RLIM:
2150 tsum = timer_grab(&thread->user_timer);
2151 tsum += timer_grab(&thread->system_timer);
2152 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
2153 thread->vtimer_rlim_save = tsum;
2154 absolutetime_to_microtime(tdelt, &secs, microsecs);
2155 break;
2156 }
2157
2158}
2159
2160/*
2161 * task_assign:
2162 *
2163 * Change the assigned processor set for the task
2164 */
2165kern_return_t
2166task_assign(
2167 __unused task_t task,
2168 __unused processor_set_t new_pset,
2169 __unused boolean_t assign_threads)
2170{
2171 return(KERN_FAILURE);
2172}
2173
2174/*
2175 * task_assign_default:
2176 *
2177 * Version of task_assign to assign to default processor set.
2178 */
2179kern_return_t
2180task_assign_default(
2181 task_t task,
2182 boolean_t assign_threads)
2183{
2184 return (task_assign(task, &pset0, assign_threads));
2185}
2186
2187/*
2188 * task_get_assignment
2189 *
2190 * Return name of processor set that task is assigned to.
2191 */
2192kern_return_t
2193task_get_assignment(
2194 task_t task,
2195 processor_set_t *pset)
2196{
2197 if (!task->active)
2198 return(KERN_FAILURE);
2199
2200 *pset = &pset0;
2201
2202 return (KERN_SUCCESS);
2203}
2204
2205
2206/*
2207 * task_policy
2208 *
2209 * Set scheduling policy and parameters, both base and limit, for
2210 * the given task. Policy must be a policy which is enabled for the
2211 * processor set. Change contained threads if requested.
2212 */
2213kern_return_t
2214task_policy(
2215 __unused task_t task,
2216 __unused policy_t policy_id,
2217 __unused policy_base_t base,
2218 __unused mach_msg_type_number_t count,
2219 __unused boolean_t set_limit,
2220 __unused boolean_t change)
2221{
2222 return(KERN_FAILURE);
2223}
2224
2225/*
2226 * task_set_policy
2227 *
2228 * Set scheduling policy and parameters, both base and limit, for
2229 * the given task. Policy can be any policy implemented by the
2230 * processor set, whether enabled or not. Change contained threads
2231 * if requested.
2232 */
2233kern_return_t
2234task_set_policy(
2235 __unused task_t task,
2236 __unused processor_set_t pset,
2237 __unused policy_t policy_id,
2238 __unused policy_base_t base,
2239 __unused mach_msg_type_number_t base_count,
2240 __unused policy_limit_t limit,
2241 __unused mach_msg_type_number_t limit_count,
2242 __unused boolean_t change)
2243{
2244 return(KERN_FAILURE);
2245}
2246
2247#if FAST_TAS
2248kern_return_t
2249task_set_ras_pc(
2250 task_t task,
2251 vm_offset_t pc,
2252 vm_offset_t endpc)
2253{
2254 extern int fast_tas_debug;
2255
2256 if (fast_tas_debug) {
2257 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
2258 task, pc, endpc);
2259 }
2260 task_lock(task);
2261 task->fast_tas_base = pc;
2262 task->fast_tas_end = endpc;
2263 task_unlock(task);
2264 return KERN_SUCCESS;
2265}
2266#else /* FAST_TAS */
2267kern_return_t
2268task_set_ras_pc(
2269 __unused task_t task,
2270 __unused vm_offset_t pc,
2271 __unused vm_offset_t endpc)
2272{
2273 return KERN_FAILURE;
2274}
2275#endif /* FAST_TAS */
2276
2277void
2278task_synchronizer_destroy_all(task_t task)
2279{
2280 semaphore_t semaphore;
2281 lock_set_t lock_set;
2282
2283 /*
2284 * Destroy owned semaphores
2285 */
2286
2287 while (!queue_empty(&task->semaphore_list)) {
2288 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
2289 (void) semaphore_destroy(task, semaphore);
2290 }
2291
2292 /*
2293 * Destroy owned lock sets
2294 */
2295
2296 while (!queue_empty(&task->lock_set_list)) {
2297 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
2298 (void) lock_set_destroy(task, lock_set);
2299 }
2300}
2301
2302/*
2303 * Install default (machine-dependent) initial thread state
2304 * on the task. Subsequent thread creation will have this initial
2305 * state set on the thread by machine_thread_inherit_taskwide().
2306 * Flavors and structures are exactly the same as those to thread_set_state()
2307 */
2308kern_return_t
2309task_set_state(
2310 task_t task,
2311 int flavor,
2312 thread_state_t state,
2313 mach_msg_type_number_t state_count)
2314{
2315 kern_return_t ret;
2316
2317 if (task == TASK_NULL) {
2318 return (KERN_INVALID_ARGUMENT);
2319 }
2320
2321 task_lock(task);
2322
2323 if (!task->active) {
2324 task_unlock(task);
2325 return (KERN_FAILURE);
2326 }
2327
2328 ret = machine_task_set_state(task, flavor, state, state_count);
2329
2330 task_unlock(task);
2331 return ret;
2332}
2333
2334/*
2335 * Examine the default (machine-dependent) initial thread state
2336 * on the task, as set by task_set_state(). Flavors and structures
2337 * are exactly the same as those passed to thread_get_state().
2338 */
2339kern_return_t
2340task_get_state(
2341 task_t task,
2342 int flavor,
2343 thread_state_t state,
2344 mach_msg_type_number_t *state_count)
2345{
2346 kern_return_t ret;
2347
2348 if (task == TASK_NULL) {
2349 return (KERN_INVALID_ARGUMENT);
2350 }
2351
2352 task_lock(task);
2353
2354 if (!task->active) {
2355 task_unlock(task);
2356 return (KERN_FAILURE);
2357 }
2358
2359 ret = machine_task_get_state(task, flavor, state, state_count);
2360
2361 task_unlock(task);
2362 return ret;
2363}
2364
2365
2366/*
2367 * We need to export some functions to other components that
2368 * are currently implemented in macros within the osfmk
2369 * component. Just export them as functions of the same name.
2370 */
2371boolean_t is_kerneltask(task_t t)
2372{
2373 if (t == kernel_task)
2374 return (TRUE);
2375
2376 return (FALSE);
2377}
2378
2379int
2380check_for_tasksuspend(task_t task)
2381{
2382
2383 if (task == TASK_NULL)
2384 return (0);
2385
2386 return (task->suspend_count > 0);
2387}
2388
2389#undef current_task
2390task_t current_task(void);
2391task_t current_task(void)
2392{
2393 return (current_task_fast());
2394}
2395
2396#undef task_reference
2397void task_reference(task_t task);
2398void
2399task_reference(
2400 task_t task)
2401{
2402 if (task != TASK_NULL)
2403 task_reference_internal(task);
2404}
2405
2406/*
2407 * This routine is called always with task lock held.
2408 * And it returns a thread handle without reference as the caller
2409 * operates on it under the task lock held.
2410 */
2411thread_t
2412task_findtid(task_t task, uint64_t tid)
2413{
2414 thread_t thread= THREAD_NULL;
2415
2416 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2417 if (thread->thread_id == tid)
2418 return(thread);
2419 }
2420 return(THREAD_NULL);
2421}
2422
2423
2424#if CONFIG_MACF_MACH
2425/*
2426 * Protect 2 task labels against modification by adding a reference on
2427 * both label handles. The locks do not actually have to be held while
2428 * using the labels as only labels with one reference can be modified
2429 * in place.
2430 */
2431
2432void
2433tasklabel_lock2(
2434 task_t a,
2435 task_t b)
2436{
2437 labelh_reference(a->label);
2438 labelh_reference(b->label);
2439}
2440
2441void
2442tasklabel_unlock2(
2443 task_t a,
2444 task_t b)
2445{
2446 labelh_release(a->label);
2447 labelh_release(b->label);
2448}
2449
2450void
2451mac_task_label_update_internal(
2452 struct label *pl,
2453 struct task *task)
2454{
2455
2456 tasklabel_lock(task);
2457 task->label = labelh_modify(task->label);
2458 mac_task_label_update(pl, &task->maclabel);
2459 tasklabel_unlock(task);
2460 ip_lock(task->itk_self);
2461 mac_port_label_update_cred(pl, &task->itk_self->ip_label);
2462 ip_unlock(task->itk_self);
2463}
2464
2465void
2466mac_task_label_modify(
2467 struct task *task,
2468 void *arg,
2469 void (*f) (struct label *l, void *arg))
2470{
2471
2472 tasklabel_lock(task);
2473 task->label = labelh_modify(task->label);
2474 (*f)(&task->maclabel, arg);
2475 tasklabel_unlock(task);
2476}
2477
2478struct label *
2479mac_task_get_label(struct task *task)
2480{
2481 return (&task->maclabel);
2482}
2483#endif