]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
1378bf4150bc3c606fd76dda90ebeb7483e30b68
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81 /*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89 #include <fast_tas.h>
90 #include <platforms.h>
91
92 #include <mach/mach_types.h>
93 #include <mach/boolean.h>
94 #include <mach/host_priv.h>
95 #include <mach/machine/vm_types.h>
96 #include <mach/vm_param.h>
97 #include <mach/semaphore.h>
98 #include <mach/task_info.h>
99 #include <mach/task_special_ports.h>
100
101 #include <ipc/ipc_types.h>
102 #include <ipc/ipc_space.h>
103 #include <ipc/ipc_entry.h>
104
105 #include <kern/kern_types.h>
106 #include <kern/mach_param.h>
107 #include <kern/misc_protos.h>
108 #include <kern/task.h>
109 #include <kern/thread.h>
110 #include <kern/zalloc.h>
111 #include <kern/kalloc.h>
112 #include <kern/processor.h>
113 #include <kern/sched_prim.h> /* for thread_wakeup */
114 #include <kern/ipc_tt.h>
115 #include <kern/host.h>
116 #include <kern/clock.h>
117 #include <kern/timer.h>
118 #include <kern/assert.h>
119 #include <kern/sync_lock.h>
120 #include <kern/affinity.h>
121
122 #include <vm/pmap.h>
123 #include <vm/vm_map.h>
124 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
125 #include <vm/vm_pageout.h>
126 #include <vm/vm_protos.h>
127
128 /*
129 * Exported interfaces
130 */
131
132 #include <mach/task_server.h>
133 #include <mach/mach_host_server.h>
134 #include <mach/host_security_server.h>
135 #include <mach/mach_port_server.h>
136 #include <mach/security_server.h>
137
138 #include <vm/vm_shared_region.h>
139
140 #if CONFIG_MACF_MACH
141 #include <security/mac_mach_internal.h>
142 #endif
143
144 #if CONFIG_COUNTERS
145 #include <pmc/pmc.h>
146 #endif /* CONFIG_COUNTERS */
147
148 task_t kernel_task;
149 zone_t task_zone;
150 lck_attr_t task_lck_attr;
151 lck_grp_t task_lck_grp;
152 lck_grp_attr_t task_lck_grp_attr;
153 #if CONFIG_EMBEDDED
154 lck_mtx_t task_watch_mtx;
155 #endif /* CONFIG_EMBEDDED */
156
157 zinfo_usage_store_t tasks_tkm_private;
158 zinfo_usage_store_t tasks_tkm_shared;
159
160 static ledger_template_t task_ledger_template = NULL;
161 struct _task_ledger_indices task_ledgers = {-1, -1, -1, -1, -1};
162 void init_task_ledgers(void);
163
164
165 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
166
167 /* externs for BSD kernel */
168 extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
169
170 /* Forwards */
171
172 void task_hold_locked(
173 task_t task);
174 void task_wait_locked(
175 task_t task,
176 boolean_t until_not_runnable);
177 void task_release_locked(
178 task_t task);
179 void task_free(
180 task_t task );
181 void task_synchronizer_destroy_all(
182 task_t task);
183
184 int check_for_tasksuspend(
185 task_t task);
186
187 void
188 task_backing_store_privileged(
189 task_t task)
190 {
191 task_lock(task);
192 task->priv_flags |= VM_BACKING_STORE_PRIV;
193 task_unlock(task);
194 return;
195 }
196
197
198 void
199 task_set_64bit(
200 task_t task,
201 boolean_t is64bit)
202 {
203 #if defined(__i386__) || defined(__x86_64__)
204 thread_t thread;
205 #endif /* __i386__ */
206 int vm_flags = 0;
207
208 if (is64bit) {
209 if (task_has_64BitAddr(task))
210 return;
211
212 task_set_64BitAddr(task);
213 } else {
214 if ( !task_has_64BitAddr(task))
215 return;
216
217 /*
218 * Deallocate all memory previously allocated
219 * above the 32-bit address space, since it won't
220 * be accessible anymore.
221 */
222 /* remove regular VM map entries & pmap mappings */
223 (void) vm_map_remove(task->map,
224 (vm_map_offset_t) VM_MAX_ADDRESS,
225 MACH_VM_MAX_ADDRESS,
226 0);
227 /* remove the higher VM mappings */
228 (void) vm_map_remove(task->map,
229 MACH_VM_MAX_ADDRESS,
230 0xFFFFFFFFFFFFF000ULL,
231 vm_flags);
232 task_clear_64BitAddr(task);
233 }
234 /* FIXME: On x86, the thread save state flavor can diverge from the
235 * task's 64-bit feature flag due to the 32-bit/64-bit register save
236 * state dichotomy. Since we can be pre-empted in this interval,
237 * certain routines may observe the thread as being in an inconsistent
238 * state with respect to its task's 64-bitness.
239 */
240 #if defined(__i386__) || defined(__x86_64__)
241 task_lock(task);
242 queue_iterate(&task->threads, thread, thread_t, task_threads) {
243 thread_mtx_lock(thread);
244 machine_thread_switch_addrmode(thread);
245 thread_mtx_unlock(thread);
246 }
247 task_unlock(task);
248 #endif /* __i386__ */
249 }
250
251
252 void
253 task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size)
254 {
255 task_lock(task);
256 task->all_image_info_addr = addr;
257 task->all_image_info_size = size;
258 task_unlock(task);
259 }
260
261 void
262 task_init(void)
263 {
264
265 lck_grp_attr_setdefault(&task_lck_grp_attr);
266 lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
267 lck_attr_setdefault(&task_lck_attr);
268 lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
269 #if CONFIG_EMBEDDED
270 lck_mtx_init(&task_watch_mtx, &task_lck_grp, &task_lck_attr);
271 #endif /* CONFIG_EMBEDDED */
272
273 task_zone = zinit(
274 sizeof(struct task),
275 task_max * sizeof(struct task),
276 TASK_CHUNK * sizeof(struct task),
277 "tasks");
278
279 zone_change(task_zone, Z_NOENCRYPT, TRUE);
280
281 init_task_ledgers();
282
283 /*
284 * Create the kernel task as the first task.
285 */
286 #ifdef __LP64__
287 if (task_create_internal(TASK_NULL, FALSE, TRUE, &kernel_task) != KERN_SUCCESS)
288 #else
289 if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
290 #endif
291 panic("task_init\n");
292
293 vm_map_deallocate(kernel_task->map);
294 kernel_task->map = kernel_map;
295
296 }
297
298 /*
299 * Create a task running in the kernel address space. It may
300 * have its own map of size mem_size and may have ipc privileges.
301 */
302 kern_return_t
303 kernel_task_create(
304 __unused task_t parent_task,
305 __unused vm_offset_t map_base,
306 __unused vm_size_t map_size,
307 __unused task_t *child_task)
308 {
309 return (KERN_INVALID_ARGUMENT);
310 }
311
312 kern_return_t
313 task_create(
314 task_t parent_task,
315 __unused ledger_port_array_t ledger_ports,
316 __unused mach_msg_type_number_t num_ledger_ports,
317 __unused boolean_t inherit_memory,
318 __unused task_t *child_task) /* OUT */
319 {
320 if (parent_task == TASK_NULL)
321 return(KERN_INVALID_ARGUMENT);
322
323 /*
324 * No longer supported: too many calls assume that a task has a valid
325 * process attached.
326 */
327 return(KERN_FAILURE);
328 }
329
330 kern_return_t
331 host_security_create_task_token(
332 host_security_t host_security,
333 task_t parent_task,
334 __unused security_token_t sec_token,
335 __unused audit_token_t audit_token,
336 __unused host_priv_t host_priv,
337 __unused ledger_port_array_t ledger_ports,
338 __unused mach_msg_type_number_t num_ledger_ports,
339 __unused boolean_t inherit_memory,
340 __unused task_t *child_task) /* OUT */
341 {
342 if (parent_task == TASK_NULL)
343 return(KERN_INVALID_ARGUMENT);
344
345 if (host_security == HOST_NULL)
346 return(KERN_INVALID_SECURITY);
347
348 /*
349 * No longer supported.
350 */
351 return(KERN_FAILURE);
352 }
353
354 void
355 init_task_ledgers(void)
356 {
357 ledger_template_t t;
358
359 assert(task_ledger_template == NULL);
360 assert(kernel_task == TASK_NULL);
361
362 if ((t = ledger_template_create("Per-task ledger")) == NULL)
363 panic("couldn't create task ledger template");
364
365 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
366 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
367 "physmem", "bytes");
368 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
369 "bytes");
370 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
371 "bytes");
372 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
373 "bytes");
374
375 if ((task_ledgers.cpu_time < 0) || (task_ledgers.tkm_private < 0) ||
376 (task_ledgers.tkm_shared < 0) || (task_ledgers.phys_mem < 0) ||
377 (task_ledgers.wired_mem < 0)) {
378 panic("couldn't create entries for task ledger template");
379 }
380
381 task_ledger_template = t;
382 }
383
384 kern_return_t
385 task_create_internal(
386 task_t parent_task,
387 boolean_t inherit_memory,
388 boolean_t is_64bit,
389 task_t *child_task) /* OUT */
390 {
391 task_t new_task;
392 vm_shared_region_t shared_region;
393 ledger_t ledger = NULL;
394
395 new_task = (task_t) zalloc(task_zone);
396
397 if (new_task == TASK_NULL)
398 return(KERN_RESOURCE_SHORTAGE);
399
400 /* one ref for just being alive; one for our caller */
401 new_task->ref_count = 2;
402
403 /* allocate with active entries */
404 assert(task_ledger_template != NULL);
405 if ((ledger = ledger_instantiate(task_ledger_template,
406 LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
407 zfree(task_zone, new_task);
408 return(KERN_RESOURCE_SHORTAGE);
409 }
410 new_task->ledger = ledger;
411
412 /* if inherit_memory is true, parent_task MUST not be NULL */
413 if (inherit_memory)
414 new_task->map = vm_map_fork(ledger, parent_task->map);
415 else
416 new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit),
417 (vm_map_offset_t)(VM_MIN_ADDRESS),
418 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
419
420 /* Inherit memlock limit from parent */
421 if (parent_task)
422 vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
423
424 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
425 queue_init(&new_task->threads);
426 new_task->suspend_count = 0;
427 new_task->thread_count = 0;
428 new_task->active_thread_count = 0;
429 new_task->user_stop_count = 0;
430 new_task->role = TASK_UNSPECIFIED;
431 new_task->active = TRUE;
432 new_task->halting = FALSE;
433 new_task->user_data = NULL;
434 new_task->faults = 0;
435 new_task->cow_faults = 0;
436 new_task->pageins = 0;
437 new_task->messages_sent = 0;
438 new_task->messages_received = 0;
439 new_task->syscalls_mach = 0;
440 new_task->priv_flags = 0;
441 new_task->syscalls_unix=0;
442 new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
443 new_task->taskFeatures[0] = 0; /* Init task features */
444 new_task->taskFeatures[1] = 0; /* Init task features */
445
446 zinfo_task_init(new_task);
447
448 #ifdef MACH_BSD
449 new_task->bsd_info = NULL;
450 #endif /* MACH_BSD */
451
452 #if defined(__i386__) || defined(__x86_64__)
453 new_task->i386_ldt = 0;
454 new_task->task_debug = NULL;
455 #endif
456
457
458 queue_init(&new_task->semaphore_list);
459 queue_init(&new_task->lock_set_list);
460 new_task->semaphores_owned = 0;
461 new_task->lock_sets_owned = 0;
462
463 #if CONFIG_MACF_MACH
464 new_task->label = labelh_new(1);
465 mac_task_label_init (&new_task->maclabel);
466 #endif
467
468 ipc_task_init(new_task, parent_task);
469
470 new_task->total_user_time = 0;
471 new_task->total_system_time = 0;
472
473 new_task->vtimers = 0;
474
475 new_task->shared_region = NULL;
476
477 new_task->affinity_space = NULL;
478
479 #if CONFIG_COUNTERS
480 new_task->t_chud = 0U;
481 #endif
482
483 new_task->pidsuspended = FALSE;
484 new_task->frozen = FALSE;
485 new_task->rusage_cpu_flags = 0;
486 new_task->rusage_cpu_percentage = 0;
487 new_task->rusage_cpu_interval = 0;
488 new_task->rusage_cpu_deadline = 0;
489 new_task->rusage_cpu_callt = NULL;
490 new_task->proc_terminate = 0;
491 #if CONFIG_EMBEDDED
492 queue_init(&new_task->task_watchers);
493 new_task->appstate = TASK_APPSTATE_ACTIVE;
494 new_task->num_taskwatchers = 0;
495 new_task->watchapplying = 0;
496 #endif /* CONFIG_EMBEDDED */
497
498 new_task->uexc_range_start = new_task->uexc_range_size = new_task->uexc_handler = 0;
499
500 if (parent_task != TASK_NULL) {
501 new_task->sec_token = parent_task->sec_token;
502 new_task->audit_token = parent_task->audit_token;
503
504 /* inherit the parent's shared region */
505 shared_region = vm_shared_region_get(parent_task);
506 vm_shared_region_set(new_task, shared_region);
507
508 if(task_has_64BitAddr(parent_task))
509 task_set_64BitAddr(new_task);
510 new_task->all_image_info_addr = parent_task->all_image_info_addr;
511 new_task->all_image_info_size = parent_task->all_image_info_size;
512
513 #if defined(__i386__) || defined(__x86_64__)
514 if (inherit_memory && parent_task->i386_ldt)
515 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
516 #endif
517 if (inherit_memory && parent_task->affinity_space)
518 task_affinity_create(parent_task, new_task);
519
520 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
521 new_task->policystate = parent_task->policystate;
522 /* inherit the self action state */
523 new_task->appliedstate = parent_task->appliedstate;
524 new_task->ext_policystate = parent_task->ext_policystate;
525 #if NOTYET
526 /* till the child lifecycle is cleared do not inherit external action */
527 new_task->ext_appliedstate = parent_task->ext_appliedstate;
528 #else
529 new_task->ext_appliedstate = default_task_null_policy;
530 #endif
531 }
532 else {
533 new_task->sec_token = KERNEL_SECURITY_TOKEN;
534 new_task->audit_token = KERNEL_AUDIT_TOKEN;
535 #ifdef __LP64__
536 if(is_64bit)
537 task_set_64BitAddr(new_task);
538 #endif
539 new_task->all_image_info_addr = (mach_vm_address_t)0;
540 new_task->all_image_info_size = (mach_vm_size_t)0;
541
542 new_task->pset_hint = PROCESSOR_SET_NULL;
543 new_task->policystate = default_task_proc_policy;
544 new_task->ext_policystate = default_task_proc_policy;
545 new_task->appliedstate = default_task_null_policy;
546 new_task->ext_appliedstate = default_task_null_policy;
547 }
548
549 if (kernel_task == TASK_NULL) {
550 new_task->priority = BASEPRI_KERNEL;
551 new_task->max_priority = MAXPRI_KERNEL;
552 }
553 else {
554 new_task->priority = BASEPRI_DEFAULT;
555 new_task->max_priority = MAXPRI_USER;
556 }
557
558 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
559
560 lck_mtx_lock(&tasks_threads_lock);
561 queue_enter(&tasks, new_task, task_t, tasks);
562 tasks_count++;
563 lck_mtx_unlock(&tasks_threads_lock);
564
565 if (vm_backing_store_low && parent_task != NULL)
566 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
567
568 ipc_task_enable(new_task);
569
570 *child_task = new_task;
571 return(KERN_SUCCESS);
572 }
573
574 /*
575 * task_deallocate:
576 *
577 * Drop a reference on a task.
578 */
579 void
580 task_deallocate(
581 task_t task)
582 {
583 ledger_amount_t credit, debit;
584
585 if (task == TASK_NULL)
586 return;
587
588 if (task_deallocate_internal(task) > 0)
589 return;
590
591 lck_mtx_lock(&tasks_threads_lock);
592 queue_remove(&terminated_tasks, task, task_t, tasks);
593 lck_mtx_unlock(&tasks_threads_lock);
594
595 /*
596 * Give the machine dependent code a chance
597 * to perform cleanup before ripping apart
598 * the task.
599 */
600 machine_task_terminate(task);
601
602 ipc_task_terminate(task);
603
604 if (task->affinity_space)
605 task_affinity_deallocate(task);
606
607 vm_map_deallocate(task->map);
608 is_release(task->itk_space);
609
610 lck_mtx_destroy(&task->lock, &task_lck_grp);
611
612 #if CONFIG_MACF_MACH
613 labelh_release(task->label);
614 #endif
615
616 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
617 &debit)) {
618 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
619 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
620 }
621 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
622 &debit)) {
623 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
624 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
625 }
626 ledger_dereference(task->ledger);
627 zinfo_task_free(task);
628 zfree(task_zone, task);
629 }
630
631 /*
632 * task_name_deallocate:
633 *
634 * Drop a reference on a task name.
635 */
636 void
637 task_name_deallocate(
638 task_name_t task_name)
639 {
640 return(task_deallocate((task_t)task_name));
641 }
642
643
644 /*
645 * task_terminate:
646 *
647 * Terminate the specified task. See comments on thread_terminate
648 * (kern/thread.c) about problems with terminating the "current task."
649 */
650
651 kern_return_t
652 task_terminate(
653 task_t task)
654 {
655 if (task == TASK_NULL)
656 return (KERN_INVALID_ARGUMENT);
657
658 if (task->bsd_info)
659 return (KERN_FAILURE);
660
661 return (task_terminate_internal(task));
662 }
663
664 kern_return_t
665 task_terminate_internal(
666 task_t task)
667 {
668 thread_t thread, self;
669 task_t self_task;
670 boolean_t interrupt_save;
671
672 assert(task != kernel_task);
673
674 self = current_thread();
675 self_task = self->task;
676
677 /*
678 * Get the task locked and make sure that we are not racing
679 * with someone else trying to terminate us.
680 */
681 if (task == self_task)
682 task_lock(task);
683 else
684 if (task < self_task) {
685 task_lock(task);
686 task_lock(self_task);
687 }
688 else {
689 task_lock(self_task);
690 task_lock(task);
691 }
692
693 if (!task->active) {
694 /*
695 * Task is already being terminated.
696 * Just return an error. If we are dying, this will
697 * just get us to our AST special handler and that
698 * will get us to finalize the termination of ourselves.
699 */
700 task_unlock(task);
701 if (self_task != task)
702 task_unlock(self_task);
703
704 return (KERN_FAILURE);
705 }
706
707 if (self_task != task)
708 task_unlock(self_task);
709
710 /*
711 * Make sure the current thread does not get aborted out of
712 * the waits inside these operations.
713 */
714 interrupt_save = thread_interrupt_level(THREAD_UNINT);
715
716 /*
717 * Indicate that we want all the threads to stop executing
718 * at user space by holding the task (we would have held
719 * each thread independently in thread_terminate_internal -
720 * but this way we may be more likely to already find it
721 * held there). Mark the task inactive, and prevent
722 * further task operations via the task port.
723 */
724 task_hold_locked(task);
725 task->active = FALSE;
726 ipc_task_disable(task);
727
728 /*
729 * Terminate each thread in the task.
730 */
731 queue_iterate(&task->threads, thread, thread_t, task_threads) {
732 thread_terminate_internal(thread);
733 }
734
735 task_unlock(task);
736
737 #if CONFIG_EMBEDDED
738 /*
739 * remove all task watchers
740 */
741 task_removewatchers(task);
742 #endif /* CONFIG_EMBEDDED */
743
744 /*
745 * Destroy all synchronizers owned by the task.
746 */
747 task_synchronizer_destroy_all(task);
748
749 /*
750 * Destroy the IPC space, leaving just a reference for it.
751 */
752 ipc_space_terminate(task->itk_space);
753
754 if (vm_map_has_4GB_pagezero(task->map))
755 vm_map_clear_4GB_pagezero(task->map);
756
757 /*
758 * If the current thread is a member of the task
759 * being terminated, then the last reference to
760 * the task will not be dropped until the thread
761 * is finally reaped. To avoid incurring the
762 * expense of removing the address space regions
763 * at reap time, we do it explictly here.
764 */
765 vm_map_remove(task->map,
766 task->map->min_offset,
767 task->map->max_offset,
768 VM_MAP_NO_FLAGS);
769
770 /* release our shared region */
771 vm_shared_region_set(task, NULL);
772
773 lck_mtx_lock(&tasks_threads_lock);
774 queue_remove(&tasks, task, task_t, tasks);
775 queue_enter(&terminated_tasks, task, task_t, tasks);
776 tasks_count--;
777 lck_mtx_unlock(&tasks_threads_lock);
778
779 /*
780 * We no longer need to guard against being aborted, so restore
781 * the previous interruptible state.
782 */
783 thread_interrupt_level(interrupt_save);
784
785 /*
786 * Get rid of the task active reference on itself.
787 */
788 task_deallocate(task);
789
790 return (KERN_SUCCESS);
791 }
792
793 /*
794 * task_start_halt:
795 *
796 * Shut the current task down (except for the current thread) in
797 * preparation for dramatic changes to the task (probably exec).
798 * We hold the task and mark all other threads in the task for
799 * termination.
800 */
801 kern_return_t
802 task_start_halt(
803 task_t task)
804 {
805 thread_t thread, self;
806
807 assert(task != kernel_task);
808
809 self = current_thread();
810
811 if (task != self->task)
812 return (KERN_INVALID_ARGUMENT);
813
814 task_lock(task);
815
816 if (task->halting || !task->active || !self->active) {
817 /*
818 * Task or current thread is already being terminated.
819 * Hurry up and return out of the current kernel context
820 * so that we run our AST special handler to terminate
821 * ourselves.
822 */
823 task_unlock(task);
824
825 return (KERN_FAILURE);
826 }
827
828 task->halting = TRUE;
829
830 if (task->thread_count > 1) {
831
832 /*
833 * Mark all the threads to keep them from starting any more
834 * user-level execution. The thread_terminate_internal code
835 * would do this on a thread by thread basis anyway, but this
836 * gives us a better chance of not having to wait there.
837 */
838 task_hold_locked(task);
839
840 /*
841 * Terminate all the other threads in the task.
842 */
843 queue_iterate(&task->threads, thread, thread_t, task_threads) {
844 if (thread != self)
845 thread_terminate_internal(thread);
846 }
847
848 task_release_locked(task);
849 }
850 task_unlock(task);
851 return KERN_SUCCESS;
852 }
853
854
855 /*
856 * task_complete_halt:
857 *
858 * Complete task halt by waiting for threads to terminate, then clean
859 * up task resources (VM, port namespace, etc...) and then let the
860 * current thread go in the (practically empty) task context.
861 */
862 void
863 task_complete_halt(task_t task)
864 {
865 task_lock(task);
866 assert(task->halting);
867 assert(task == current_task());
868
869 /*
870 * Wait for the other threads to get shut down.
871 * When the last other thread is reaped, we'll be
872 * woken up.
873 */
874 if (task->thread_count > 1) {
875 assert_wait((event_t)&task->halting, THREAD_UNINT);
876 task_unlock(task);
877 thread_block(THREAD_CONTINUE_NULL);
878 } else {
879 task_unlock(task);
880 }
881
882 /*
883 * Give the machine dependent code a chance
884 * to perform cleanup of task-level resources
885 * associated with the current thread before
886 * ripping apart the task.
887 */
888 machine_task_terminate(task);
889
890 /*
891 * Destroy all synchronizers owned by the task.
892 */
893 task_synchronizer_destroy_all(task);
894
895 /*
896 * Destroy the contents of the IPC space, leaving just
897 * a reference for it.
898 */
899 ipc_space_clean(task->itk_space);
900
901 /*
902 * Clean out the address space, as we are going to be
903 * getting a new one.
904 */
905 vm_map_remove(task->map, task->map->min_offset,
906 task->map->max_offset, VM_MAP_NO_FLAGS);
907
908 task->halting = FALSE;
909 }
910
911 /*
912 * task_hold_locked:
913 *
914 * Suspend execution of the specified task.
915 * This is a recursive-style suspension of the task, a count of
916 * suspends is maintained.
917 *
918 * CONDITIONS: the task is locked and active.
919 */
920 void
921 task_hold_locked(
922 register task_t task)
923 {
924 register thread_t thread;
925
926 assert(task->active);
927
928 if (task->suspend_count++ > 0)
929 return;
930
931 /*
932 * Iterate through all the threads and hold them.
933 */
934 queue_iterate(&task->threads, thread, thread_t, task_threads) {
935 thread_mtx_lock(thread);
936 thread_hold(thread);
937 thread_mtx_unlock(thread);
938 }
939 }
940
941 /*
942 * task_hold:
943 *
944 * Same as the internal routine above, except that is must lock
945 * and verify that the task is active. This differs from task_suspend
946 * in that it places a kernel hold on the task rather than just a
947 * user-level hold. This keeps users from over resuming and setting
948 * it running out from under the kernel.
949 *
950 * CONDITIONS: the caller holds a reference on the task
951 */
952 kern_return_t
953 task_hold(
954 register task_t task)
955 {
956 if (task == TASK_NULL)
957 return (KERN_INVALID_ARGUMENT);
958
959 task_lock(task);
960
961 if (!task->active) {
962 task_unlock(task);
963
964 return (KERN_FAILURE);
965 }
966
967 task_hold_locked(task);
968 task_unlock(task);
969
970 return (KERN_SUCCESS);
971 }
972
973 kern_return_t
974 task_wait(
975 task_t task,
976 boolean_t until_not_runnable)
977 {
978 if (task == TASK_NULL)
979 return (KERN_INVALID_ARGUMENT);
980
981 task_lock(task);
982
983 if (!task->active) {
984 task_unlock(task);
985
986 return (KERN_FAILURE);
987 }
988
989 task_wait_locked(task, until_not_runnable);
990 task_unlock(task);
991
992 return (KERN_SUCCESS);
993 }
994
995 /*
996 * task_wait_locked:
997 *
998 * Wait for all threads in task to stop.
999 *
1000 * Conditions:
1001 * Called with task locked, active, and held.
1002 */
1003 void
1004 task_wait_locked(
1005 register task_t task,
1006 boolean_t until_not_runnable)
1007 {
1008 register thread_t thread, self;
1009
1010 assert(task->active);
1011 assert(task->suspend_count > 0);
1012
1013 self = current_thread();
1014
1015 /*
1016 * Iterate through all the threads and wait for them to
1017 * stop. Do not wait for the current thread if it is within
1018 * the task.
1019 */
1020 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1021 if (thread != self)
1022 thread_wait(thread, until_not_runnable);
1023 }
1024 }
1025
1026 /*
1027 * task_release_locked:
1028 *
1029 * Release a kernel hold on a task.
1030 *
1031 * CONDITIONS: the task is locked and active
1032 */
1033 void
1034 task_release_locked(
1035 register task_t task)
1036 {
1037 register thread_t thread;
1038
1039 assert(task->active);
1040 assert(task->suspend_count > 0);
1041
1042 if (--task->suspend_count > 0)
1043 return;
1044
1045 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1046 thread_mtx_lock(thread);
1047 thread_release(thread);
1048 thread_mtx_unlock(thread);
1049 }
1050 }
1051
1052 /*
1053 * task_release:
1054 *
1055 * Same as the internal routine above, except that it must lock
1056 * and verify that the task is active.
1057 *
1058 * CONDITIONS: The caller holds a reference to the task
1059 */
1060 kern_return_t
1061 task_release(
1062 task_t task)
1063 {
1064 if (task == TASK_NULL)
1065 return (KERN_INVALID_ARGUMENT);
1066
1067 task_lock(task);
1068
1069 if (!task->active) {
1070 task_unlock(task);
1071
1072 return (KERN_FAILURE);
1073 }
1074
1075 task_release_locked(task);
1076 task_unlock(task);
1077
1078 return (KERN_SUCCESS);
1079 }
1080
1081 kern_return_t
1082 task_threads(
1083 task_t task,
1084 thread_act_array_t *threads_out,
1085 mach_msg_type_number_t *count)
1086 {
1087 mach_msg_type_number_t actual;
1088 thread_t *thread_list;
1089 thread_t thread;
1090 vm_size_t size, size_needed;
1091 void *addr;
1092 unsigned int i, j;
1093
1094 if (task == TASK_NULL)
1095 return (KERN_INVALID_ARGUMENT);
1096
1097 size = 0; addr = NULL;
1098
1099 for (;;) {
1100 task_lock(task);
1101 if (!task->active) {
1102 task_unlock(task);
1103
1104 if (size != 0)
1105 kfree(addr, size);
1106
1107 return (KERN_FAILURE);
1108 }
1109
1110 actual = task->thread_count;
1111
1112 /* do we have the memory we need? */
1113 size_needed = actual * sizeof (mach_port_t);
1114 if (size_needed <= size)
1115 break;
1116
1117 /* unlock the task and allocate more memory */
1118 task_unlock(task);
1119
1120 if (size != 0)
1121 kfree(addr, size);
1122
1123 assert(size_needed > 0);
1124 size = size_needed;
1125
1126 addr = kalloc(size);
1127 if (addr == 0)
1128 return (KERN_RESOURCE_SHORTAGE);
1129 }
1130
1131 /* OK, have memory and the task is locked & active */
1132 thread_list = (thread_t *)addr;
1133
1134 i = j = 0;
1135
1136 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1137 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1138 thread_reference_internal(thread);
1139 thread_list[j++] = thread;
1140 }
1141
1142 assert(queue_end(&task->threads, (queue_entry_t)thread));
1143
1144 actual = j;
1145 size_needed = actual * sizeof (mach_port_t);
1146
1147 /* can unlock task now that we've got the thread refs */
1148 task_unlock(task);
1149
1150 if (actual == 0) {
1151 /* no threads, so return null pointer and deallocate memory */
1152
1153 *threads_out = NULL;
1154 *count = 0;
1155
1156 if (size != 0)
1157 kfree(addr, size);
1158 }
1159 else {
1160 /* if we allocated too much, must copy */
1161
1162 if (size_needed < size) {
1163 void *newaddr;
1164
1165 newaddr = kalloc(size_needed);
1166 if (newaddr == 0) {
1167 for (i = 0; i < actual; ++i)
1168 thread_deallocate(thread_list[i]);
1169 kfree(addr, size);
1170 return (KERN_RESOURCE_SHORTAGE);
1171 }
1172
1173 bcopy(addr, newaddr, size_needed);
1174 kfree(addr, size);
1175 thread_list = (thread_t *)newaddr;
1176 }
1177
1178 *threads_out = thread_list;
1179 *count = actual;
1180
1181 /* do the conversion that Mig should handle */
1182
1183 for (i = 0; i < actual; ++i)
1184 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1185 }
1186
1187 return (KERN_SUCCESS);
1188 }
1189
1190 static kern_return_t
1191 place_task_hold (
1192 register task_t task)
1193 {
1194 if (!task->active) {
1195 return (KERN_FAILURE);
1196 }
1197
1198 if (task->user_stop_count++ > 0) {
1199 /*
1200 * If the stop count was positive, the task is
1201 * already stopped and we can exit.
1202 */
1203 return (KERN_SUCCESS);
1204 }
1205
1206 /*
1207 * Put a kernel-level hold on the threads in the task (all
1208 * user-level task suspensions added together represent a
1209 * single kernel-level hold). We then wait for the threads
1210 * to stop executing user code.
1211 */
1212 task_hold_locked(task);
1213 task_wait_locked(task, TRUE);
1214
1215 return (KERN_SUCCESS);
1216 }
1217
1218 static kern_return_t
1219 release_task_hold (
1220 register task_t task,
1221 boolean_t pidresume)
1222 {
1223 register boolean_t release = FALSE;
1224
1225 if (!task->active) {
1226 return (KERN_FAILURE);
1227 }
1228
1229 if (pidresume) {
1230 if (task->pidsuspended == FALSE) {
1231 return (KERN_FAILURE);
1232 }
1233 task->pidsuspended = FALSE;
1234 }
1235
1236 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
1237 if (--task->user_stop_count == 0) {
1238 release = TRUE;
1239 }
1240 }
1241 else {
1242 return (KERN_FAILURE);
1243 }
1244
1245 /*
1246 * Release the task if necessary.
1247 */
1248 if (release)
1249 task_release_locked(task);
1250
1251 return (KERN_SUCCESS);
1252 }
1253
1254 /*
1255 * task_suspend:
1256 *
1257 * Implement a user-level suspension on a task.
1258 *
1259 * Conditions:
1260 * The caller holds a reference to the task
1261 */
1262 kern_return_t
1263 task_suspend(
1264 register task_t task)
1265 {
1266 kern_return_t kr;
1267
1268 if (task == TASK_NULL || task == kernel_task)
1269 return (KERN_INVALID_ARGUMENT);
1270
1271 task_lock(task);
1272
1273 kr = place_task_hold(task);
1274
1275 task_unlock(task);
1276
1277 return (kr);
1278 }
1279
1280 /*
1281 * task_resume:
1282 * Release a kernel hold on a task.
1283 *
1284 * Conditions:
1285 * The caller holds a reference to the task
1286 */
1287 kern_return_t
1288 task_resume(
1289 register task_t task)
1290 {
1291 kern_return_t kr;
1292
1293 if (task == TASK_NULL || task == kernel_task)
1294 return (KERN_INVALID_ARGUMENT);
1295
1296 task_lock(task);
1297
1298 kr = release_task_hold(task, FALSE);
1299
1300 task_unlock(task);
1301
1302 return (kr);
1303 }
1304
1305 kern_return_t
1306 task_pidsuspend_locked(task_t task)
1307 {
1308 kern_return_t kr;
1309
1310 if (task->pidsuspended) {
1311 kr = KERN_FAILURE;
1312 goto out;
1313 }
1314
1315 task->pidsuspended = TRUE;
1316
1317 kr = place_task_hold(task);
1318 if (kr != KERN_SUCCESS) {
1319 task->pidsuspended = FALSE;
1320 }
1321 out:
1322 return(kr);
1323 }
1324
1325
1326 /*
1327 * task_pidsuspend:
1328 *
1329 * Suspends a task by placing a hold on its threads.
1330 *
1331 * Conditions:
1332 * The caller holds a reference to the task
1333 */
1334 kern_return_t
1335 task_pidsuspend(
1336 register task_t task)
1337 {
1338 kern_return_t kr;
1339
1340 if (task == TASK_NULL || task == kernel_task)
1341 return (KERN_INVALID_ARGUMENT);
1342
1343 task_lock(task);
1344
1345 kr = task_pidsuspend_locked(task);
1346
1347 task_unlock(task);
1348
1349 return (kr);
1350 }
1351
1352 /* If enabled, we bring all the frozen pages back in prior to resumption; otherwise, they're faulted back in on demand */
1353 #define THAW_ON_RESUME 1
1354
1355 /*
1356 * task_pidresume:
1357 * Resumes a previously suspended task.
1358 *
1359 * Conditions:
1360 * The caller holds a reference to the task
1361 */
1362 kern_return_t
1363 task_pidresume(
1364 register task_t task)
1365 {
1366 kern_return_t kr;
1367 #if (CONFIG_FREEZE && THAW_ON_RESUME)
1368 boolean_t frozen;
1369 #endif
1370
1371 if (task == TASK_NULL || task == kernel_task)
1372 return (KERN_INVALID_ARGUMENT);
1373
1374 task_lock(task);
1375
1376 #if (CONFIG_FREEZE && THAW_ON_RESUME)
1377 frozen = task->frozen;
1378 task->frozen = FALSE;
1379 #endif
1380
1381 kr = release_task_hold(task, TRUE);
1382
1383 task_unlock(task);
1384
1385 #if (CONFIG_FREEZE && THAW_ON_RESUME)
1386 if ((kr == KERN_SUCCESS) && (frozen == TRUE)) {
1387 kr = vm_map_thaw(task->map);
1388 }
1389 #endif
1390
1391 return (kr);
1392 }
1393
1394 #if CONFIG_FREEZE
1395
1396 /*
1397 * task_freeze:
1398 *
1399 * Freeze a task.
1400 *
1401 * Conditions:
1402 * The caller holds a reference to the task
1403 */
1404 kern_return_t
1405 task_freeze(
1406 register task_t task,
1407 uint32_t *purgeable_count,
1408 uint32_t *wired_count,
1409 uint32_t *clean_count,
1410 uint32_t *dirty_count,
1411 uint32_t dirty_budget,
1412 boolean_t *shared,
1413 boolean_t walk_only)
1414 {
1415 kern_return_t kr;
1416
1417 if (task == TASK_NULL || task == kernel_task)
1418 return (KERN_INVALID_ARGUMENT);
1419
1420 task_lock(task);
1421
1422 if (task->frozen) {
1423 task_unlock(task);
1424 return (KERN_FAILURE);
1425 }
1426
1427 if (walk_only == FALSE) {
1428 task->frozen = TRUE;
1429 }
1430
1431 task_unlock(task);
1432
1433 if (walk_only) {
1434 kr = vm_map_freeze_walk(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
1435 } else {
1436 kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
1437 }
1438
1439 return (kr);
1440 }
1441
1442 /*
1443 * task_thaw:
1444 *
1445 * Thaw a currently frozen task.
1446 *
1447 * Conditions:
1448 * The caller holds a reference to the task
1449 */
1450 kern_return_t
1451 task_thaw(
1452 register task_t task)
1453 {
1454 kern_return_t kr;
1455
1456 if (task == TASK_NULL || task == kernel_task)
1457 return (KERN_INVALID_ARGUMENT);
1458
1459 task_lock(task);
1460
1461 if (!task->frozen) {
1462 task_unlock(task);
1463 return (KERN_FAILURE);
1464 }
1465
1466 task->frozen = FALSE;
1467
1468 task_unlock(task);
1469
1470 kr = vm_map_thaw(task->map);
1471
1472 return (kr);
1473 }
1474
1475 #endif /* CONFIG_FREEZE */
1476
1477 kern_return_t
1478 host_security_set_task_token(
1479 host_security_t host_security,
1480 task_t task,
1481 security_token_t sec_token,
1482 audit_token_t audit_token,
1483 host_priv_t host_priv)
1484 {
1485 ipc_port_t host_port;
1486 kern_return_t kr;
1487
1488 if (task == TASK_NULL)
1489 return(KERN_INVALID_ARGUMENT);
1490
1491 if (host_security == HOST_NULL)
1492 return(KERN_INVALID_SECURITY);
1493
1494 task_lock(task);
1495 task->sec_token = sec_token;
1496 task->audit_token = audit_token;
1497 task_unlock(task);
1498
1499 if (host_priv != HOST_PRIV_NULL) {
1500 kr = host_get_host_priv_port(host_priv, &host_port);
1501 } else {
1502 kr = host_get_host_port(host_priv_self(), &host_port);
1503 }
1504 assert(kr == KERN_SUCCESS);
1505 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1506 return(kr);
1507 }
1508
1509 /*
1510 * This routine was added, pretty much exclusively, for registering the
1511 * RPC glue vector for in-kernel short circuited tasks. Rather than
1512 * removing it completely, I have only disabled that feature (which was
1513 * the only feature at the time). It just appears that we are going to
1514 * want to add some user data to tasks in the future (i.e. bsd info,
1515 * task names, etc...), so I left it in the formal task interface.
1516 */
1517 kern_return_t
1518 task_set_info(
1519 task_t task,
1520 task_flavor_t flavor,
1521 __unused task_info_t task_info_in, /* pointer to IN array */
1522 __unused mach_msg_type_number_t task_info_count)
1523 {
1524 if (task == TASK_NULL)
1525 return(KERN_INVALID_ARGUMENT);
1526
1527 switch (flavor) {
1528 default:
1529 return (KERN_INVALID_ARGUMENT);
1530 }
1531 return (KERN_SUCCESS);
1532 }
1533
1534 kern_return_t
1535 task_info(
1536 task_t task,
1537 task_flavor_t flavor,
1538 task_info_t task_info_out,
1539 mach_msg_type_number_t *task_info_count)
1540 {
1541 kern_return_t error = KERN_SUCCESS;
1542
1543 if (task == TASK_NULL)
1544 return (KERN_INVALID_ARGUMENT);
1545
1546 task_lock(task);
1547
1548 if ((task != current_task()) && (!task->active)) {
1549 task_unlock(task);
1550 return (KERN_INVALID_ARGUMENT);
1551 }
1552
1553 switch (flavor) {
1554
1555 case TASK_BASIC_INFO_32:
1556 case TASK_BASIC2_INFO_32:
1557 {
1558 task_basic_info_32_t basic_info;
1559 vm_map_t map;
1560 clock_sec_t secs;
1561 clock_usec_t usecs;
1562
1563 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
1564 error = KERN_INVALID_ARGUMENT;
1565 break;
1566 }
1567
1568 basic_info = (task_basic_info_32_t)task_info_out;
1569
1570 map = (task == kernel_task)? kernel_map: task->map;
1571 basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
1572 if (flavor == TASK_BASIC2_INFO_32) {
1573 /*
1574 * The "BASIC2" flavor gets the maximum resident
1575 * size instead of the current resident size...
1576 */
1577 basic_info->resident_size = pmap_resident_max(map->pmap);
1578 } else {
1579 basic_info->resident_size = pmap_resident_count(map->pmap);
1580 }
1581 basic_info->resident_size *= PAGE_SIZE;
1582
1583 basic_info->policy = ((task != kernel_task)?
1584 POLICY_TIMESHARE: POLICY_RR);
1585 basic_info->suspend_count = task->user_stop_count;
1586
1587 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1588 basic_info->user_time.seconds =
1589 (typeof(basic_info->user_time.seconds))secs;
1590 basic_info->user_time.microseconds = usecs;
1591
1592 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1593 basic_info->system_time.seconds =
1594 (typeof(basic_info->system_time.seconds))secs;
1595 basic_info->system_time.microseconds = usecs;
1596
1597 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1598 break;
1599 }
1600
1601 case TASK_BASIC_INFO_64:
1602 {
1603 task_basic_info_64_t basic_info;
1604 vm_map_t map;
1605 clock_sec_t secs;
1606 clock_usec_t usecs;
1607
1608 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
1609 error = KERN_INVALID_ARGUMENT;
1610 break;
1611 }
1612
1613 basic_info = (task_basic_info_64_t)task_info_out;
1614
1615 map = (task == kernel_task)? kernel_map: task->map;
1616 basic_info->virtual_size = map->size;
1617 basic_info->resident_size =
1618 (mach_vm_size_t)(pmap_resident_count(map->pmap))
1619 * PAGE_SIZE_64;
1620
1621 basic_info->policy = ((task != kernel_task)?
1622 POLICY_TIMESHARE: POLICY_RR);
1623 basic_info->suspend_count = task->user_stop_count;
1624
1625 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1626 basic_info->user_time.seconds =
1627 (typeof(basic_info->user_time.seconds))secs;
1628 basic_info->user_time.microseconds = usecs;
1629
1630 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1631 basic_info->system_time.seconds =
1632 (typeof(basic_info->system_time.seconds))secs;
1633 basic_info->system_time.microseconds = usecs;
1634
1635 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1636 break;
1637 }
1638
1639 case MACH_TASK_BASIC_INFO:
1640 {
1641 mach_task_basic_info_t basic_info;
1642 vm_map_t map;
1643 clock_sec_t secs;
1644 clock_usec_t usecs;
1645
1646 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
1647 error = KERN_INVALID_ARGUMENT;
1648 break;
1649 }
1650
1651 basic_info = (mach_task_basic_info_t)task_info_out;
1652
1653 map = (task == kernel_task) ? kernel_map : task->map;
1654
1655 basic_info->virtual_size = map->size;
1656
1657 basic_info->resident_size =
1658 (mach_vm_size_t)(pmap_resident_count(map->pmap));
1659 basic_info->resident_size *= PAGE_SIZE_64;
1660
1661 basic_info->resident_size_max =
1662 (mach_vm_size_t)(pmap_resident_max(map->pmap));
1663 basic_info->resident_size_max *= PAGE_SIZE_64;
1664
1665 basic_info->policy = ((task != kernel_task) ?
1666 POLICY_TIMESHARE : POLICY_RR);
1667
1668 basic_info->suspend_count = task->user_stop_count;
1669
1670 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1671 basic_info->user_time.seconds =
1672 (typeof(basic_info->user_time.seconds))secs;
1673 basic_info->user_time.microseconds = usecs;
1674
1675 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1676 basic_info->system_time.seconds =
1677 (typeof(basic_info->system_time.seconds))secs;
1678 basic_info->system_time.microseconds = usecs;
1679
1680 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1681 break;
1682 }
1683
1684 case TASK_THREAD_TIMES_INFO:
1685 {
1686 register task_thread_times_info_t times_info;
1687 register thread_t thread;
1688
1689 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1690 error = KERN_INVALID_ARGUMENT;
1691 break;
1692 }
1693
1694 times_info = (task_thread_times_info_t) task_info_out;
1695 times_info->user_time.seconds = 0;
1696 times_info->user_time.microseconds = 0;
1697 times_info->system_time.seconds = 0;
1698 times_info->system_time.microseconds = 0;
1699
1700
1701 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1702 time_value_t user_time, system_time;
1703
1704 thread_read_times(thread, &user_time, &system_time);
1705
1706 time_value_add(&times_info->user_time, &user_time);
1707 time_value_add(&times_info->system_time, &system_time);
1708 }
1709
1710
1711 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1712 break;
1713 }
1714
1715 case TASK_ABSOLUTETIME_INFO:
1716 {
1717 task_absolutetime_info_t info;
1718 register thread_t thread;
1719
1720 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
1721 error = KERN_INVALID_ARGUMENT;
1722 break;
1723 }
1724
1725 info = (task_absolutetime_info_t)task_info_out;
1726 info->threads_user = info->threads_system = 0;
1727
1728
1729 info->total_user = task->total_user_time;
1730 info->total_system = task->total_system_time;
1731
1732 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1733 uint64_t tval;
1734 spl_t x;
1735
1736 x = splsched();
1737 thread_lock(thread);
1738
1739 tval = timer_grab(&thread->user_timer);
1740 info->threads_user += tval;
1741 info->total_user += tval;
1742
1743 tval = timer_grab(&thread->system_timer);
1744 if (thread->precise_user_kernel_time) {
1745 info->threads_system += tval;
1746 info->total_system += tval;
1747 } else {
1748 /* system_timer may represent either sys or user */
1749 info->threads_user += tval;
1750 info->total_user += tval;
1751 }
1752
1753 thread_unlock(thread);
1754 splx(x);
1755 }
1756
1757
1758 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1759 break;
1760 }
1761
1762 case TASK_DYLD_INFO:
1763 {
1764 task_dyld_info_t info;
1765
1766 /*
1767 * We added the format field to TASK_DYLD_INFO output. For
1768 * temporary backward compatibility, accept the fact that
1769 * clients may ask for the old version - distinquished by the
1770 * size of the expected result structure.
1771 */
1772 #define TASK_LEGACY_DYLD_INFO_COUNT \
1773 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
1774
1775 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
1776 error = KERN_INVALID_ARGUMENT;
1777 break;
1778 }
1779
1780 info = (task_dyld_info_t)task_info_out;
1781 info->all_image_info_addr = task->all_image_info_addr;
1782 info->all_image_info_size = task->all_image_info_size;
1783
1784 /* only set format on output for those expecting it */
1785 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
1786 info->all_image_info_format = task_has_64BitAddr(task) ?
1787 TASK_DYLD_ALL_IMAGE_INFO_64 :
1788 TASK_DYLD_ALL_IMAGE_INFO_32 ;
1789 *task_info_count = TASK_DYLD_INFO_COUNT;
1790 } else {
1791 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
1792 }
1793 break;
1794 }
1795
1796 case TASK_EXTMOD_INFO:
1797 {
1798 task_extmod_info_t info;
1799 void *p;
1800
1801 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
1802 error = KERN_INVALID_ARGUMENT;
1803 break;
1804 }
1805
1806 info = (task_extmod_info_t)task_info_out;
1807
1808 p = get_bsdtask_info(task);
1809 if (p) {
1810 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
1811 } else {
1812 bzero(info->task_uuid, sizeof(info->task_uuid));
1813 }
1814 info->extmod_statistics = task->extmod_statistics;
1815 *task_info_count = TASK_EXTMOD_INFO_COUNT;
1816
1817 break;
1818 }
1819
1820 case TASK_KERNELMEMORY_INFO:
1821 {
1822 task_kernelmemory_info_t tkm_info;
1823 ledger_amount_t credit, debit;
1824
1825 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
1826 error = KERN_INVALID_ARGUMENT;
1827 break;
1828 }
1829
1830 tkm_info = (task_kernelmemory_info_t) task_info_out;
1831 tkm_info->total_palloc = 0;
1832 tkm_info->total_pfree = 0;
1833 tkm_info->total_salloc = 0;
1834 tkm_info->total_sfree = 0;
1835
1836 if (task == kernel_task) {
1837 /*
1838 * All shared allocs/frees from other tasks count against
1839 * the kernel private memory usage. If we are looking up
1840 * info for the kernel task, gather from everywhere.
1841 */
1842 task_unlock(task);
1843
1844 /* start by accounting for all the terminated tasks against the kernel */
1845 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
1846 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
1847
1848 /* count all other task/thread shared alloc/free against the kernel */
1849 lck_mtx_lock(&tasks_threads_lock);
1850
1851 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
1852 queue_iterate(&tasks, task, task_t, tasks) {
1853 if (task == kernel_task) {
1854 if (ledger_get_entries(task->ledger,
1855 task_ledgers.tkm_private, &credit,
1856 &debit) == KERN_SUCCESS) {
1857 tkm_info->total_palloc += credit;
1858 tkm_info->total_pfree += debit;
1859 }
1860 }
1861 if (!ledger_get_entries(task->ledger,
1862 task_ledgers.tkm_shared, &credit, &debit)) {
1863 tkm_info->total_palloc += credit;
1864 tkm_info->total_pfree += debit;
1865 }
1866 }
1867 lck_mtx_unlock(&tasks_threads_lock);
1868 } else {
1869 if (!ledger_get_entries(task->ledger,
1870 task_ledgers.tkm_private, &credit, &debit)) {
1871 tkm_info->total_palloc = credit;
1872 tkm_info->total_pfree = debit;
1873 }
1874 if (!ledger_get_entries(task->ledger,
1875 task_ledgers.tkm_shared, &credit, &debit)) {
1876 tkm_info->total_salloc = credit;
1877 tkm_info->total_sfree = debit;
1878 }
1879 task_unlock(task);
1880 }
1881
1882 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
1883 return KERN_SUCCESS;
1884 }
1885
1886 /* OBSOLETE */
1887 case TASK_SCHED_FIFO_INFO:
1888 {
1889
1890 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
1891 error = KERN_INVALID_ARGUMENT;
1892 break;
1893 }
1894
1895 error = KERN_INVALID_POLICY;
1896 break;
1897 }
1898
1899 /* OBSOLETE */
1900 case TASK_SCHED_RR_INFO:
1901 {
1902 register policy_rr_base_t rr_base;
1903 uint32_t quantum_time;
1904 uint64_t quantum_ns;
1905
1906 if (*task_info_count < POLICY_RR_BASE_COUNT) {
1907 error = KERN_INVALID_ARGUMENT;
1908 break;
1909 }
1910
1911 rr_base = (policy_rr_base_t) task_info_out;
1912
1913 if (task != kernel_task) {
1914 error = KERN_INVALID_POLICY;
1915 break;
1916 }
1917
1918 rr_base->base_priority = task->priority;
1919
1920 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
1921 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
1922
1923 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
1924
1925 *task_info_count = POLICY_RR_BASE_COUNT;
1926 break;
1927 }
1928
1929 /* OBSOLETE */
1930 case TASK_SCHED_TIMESHARE_INFO:
1931 {
1932 register policy_timeshare_base_t ts_base;
1933
1934 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
1935 error = KERN_INVALID_ARGUMENT;
1936 break;
1937 }
1938
1939 ts_base = (policy_timeshare_base_t) task_info_out;
1940
1941 if (task == kernel_task) {
1942 error = KERN_INVALID_POLICY;
1943 break;
1944 }
1945
1946 ts_base->base_priority = task->priority;
1947
1948 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1949 break;
1950 }
1951
1952 case TASK_SECURITY_TOKEN:
1953 {
1954 register security_token_t *sec_token_p;
1955
1956 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1957 error = KERN_INVALID_ARGUMENT;
1958 break;
1959 }
1960
1961 sec_token_p = (security_token_t *) task_info_out;
1962
1963 *sec_token_p = task->sec_token;
1964
1965 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1966 break;
1967 }
1968
1969 case TASK_AUDIT_TOKEN:
1970 {
1971 register audit_token_t *audit_token_p;
1972
1973 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
1974 error = KERN_INVALID_ARGUMENT;
1975 break;
1976 }
1977
1978 audit_token_p = (audit_token_t *) task_info_out;
1979
1980 *audit_token_p = task->audit_token;
1981
1982 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1983 break;
1984 }
1985
1986 case TASK_SCHED_INFO:
1987 error = KERN_INVALID_ARGUMENT;
1988 break;
1989
1990 case TASK_EVENTS_INFO:
1991 {
1992 register task_events_info_t events_info;
1993 register thread_t thread;
1994
1995 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1996 error = KERN_INVALID_ARGUMENT;
1997 break;
1998 }
1999
2000 events_info = (task_events_info_t) task_info_out;
2001
2002
2003 events_info->faults = task->faults;
2004 events_info->pageins = task->pageins;
2005 events_info->cow_faults = task->cow_faults;
2006 events_info->messages_sent = task->messages_sent;
2007 events_info->messages_received = task->messages_received;
2008 events_info->syscalls_mach = task->syscalls_mach;
2009 events_info->syscalls_unix = task->syscalls_unix;
2010
2011 events_info->csw = task->c_switch;
2012
2013 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2014 events_info->csw += thread->c_switch;
2015 events_info->syscalls_mach += thread->syscalls_mach;
2016 events_info->syscalls_unix += thread->syscalls_unix;
2017 }
2018
2019
2020 *task_info_count = TASK_EVENTS_INFO_COUNT;
2021 break;
2022 }
2023 case TASK_AFFINITY_TAG_INFO:
2024 {
2025 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
2026 error = KERN_INVALID_ARGUMENT;
2027 break;
2028 }
2029
2030 error = task_affinity_info(task, task_info_out, task_info_count);
2031 break;
2032 }
2033 default:
2034 error = KERN_INVALID_ARGUMENT;
2035 }
2036
2037 task_unlock(task);
2038 return (error);
2039 }
2040
2041 void
2042 task_vtimer_set(
2043 task_t task,
2044 integer_t which)
2045 {
2046 thread_t thread;
2047 spl_t x;
2048
2049 /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
2050
2051 task_lock(task);
2052
2053 task->vtimers |= which;
2054
2055 switch (which) {
2056
2057 case TASK_VTIMER_USER:
2058 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2059 x = splsched();
2060 thread_lock(thread);
2061 if (thread->precise_user_kernel_time)
2062 thread->vtimer_user_save = timer_grab(&thread->user_timer);
2063 else
2064 thread->vtimer_user_save = timer_grab(&thread->system_timer);
2065 thread_unlock(thread);
2066 splx(x);
2067 }
2068 break;
2069
2070 case TASK_VTIMER_PROF:
2071 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2072 x = splsched();
2073 thread_lock(thread);
2074 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
2075 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
2076 thread_unlock(thread);
2077 splx(x);
2078 }
2079 break;
2080
2081 case TASK_VTIMER_RLIM:
2082 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2083 x = splsched();
2084 thread_lock(thread);
2085 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
2086 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
2087 thread_unlock(thread);
2088 splx(x);
2089 }
2090 break;
2091 }
2092
2093 task_unlock(task);
2094 }
2095
2096 void
2097 task_vtimer_clear(
2098 task_t task,
2099 integer_t which)
2100 {
2101 assert(task == current_task());
2102
2103 task_lock(task);
2104
2105 task->vtimers &= ~which;
2106
2107 task_unlock(task);
2108 }
2109
2110 void
2111 task_vtimer_update(
2112 __unused
2113 task_t task,
2114 integer_t which,
2115 uint32_t *microsecs)
2116 {
2117 thread_t thread = current_thread();
2118 uint32_t tdelt;
2119 clock_sec_t secs;
2120 uint64_t tsum;
2121
2122 assert(task == current_task());
2123
2124 assert(task->vtimers & which);
2125
2126 secs = tdelt = 0;
2127
2128 switch (which) {
2129
2130 case TASK_VTIMER_USER:
2131 if (thread->precise_user_kernel_time) {
2132 tdelt = (uint32_t)timer_delta(&thread->user_timer,
2133 &thread->vtimer_user_save);
2134 } else {
2135 tdelt = (uint32_t)timer_delta(&thread->system_timer,
2136 &thread->vtimer_user_save);
2137 }
2138 absolutetime_to_microtime(tdelt, &secs, microsecs);
2139 break;
2140
2141 case TASK_VTIMER_PROF:
2142 tsum = timer_grab(&thread->user_timer);
2143 tsum += timer_grab(&thread->system_timer);
2144 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
2145 absolutetime_to_microtime(tdelt, &secs, microsecs);
2146 /* if the time delta is smaller than a usec, ignore */
2147 if (*microsecs != 0)
2148 thread->vtimer_prof_save = tsum;
2149 break;
2150
2151 case TASK_VTIMER_RLIM:
2152 tsum = timer_grab(&thread->user_timer);
2153 tsum += timer_grab(&thread->system_timer);
2154 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
2155 thread->vtimer_rlim_save = tsum;
2156 absolutetime_to_microtime(tdelt, &secs, microsecs);
2157 break;
2158 }
2159
2160 }
2161
2162 /*
2163 * task_assign:
2164 *
2165 * Change the assigned processor set for the task
2166 */
2167 kern_return_t
2168 task_assign(
2169 __unused task_t task,
2170 __unused processor_set_t new_pset,
2171 __unused boolean_t assign_threads)
2172 {
2173 return(KERN_FAILURE);
2174 }
2175
2176 /*
2177 * task_assign_default:
2178 *
2179 * Version of task_assign to assign to default processor set.
2180 */
2181 kern_return_t
2182 task_assign_default(
2183 task_t task,
2184 boolean_t assign_threads)
2185 {
2186 return (task_assign(task, &pset0, assign_threads));
2187 }
2188
2189 /*
2190 * task_get_assignment
2191 *
2192 * Return name of processor set that task is assigned to.
2193 */
2194 kern_return_t
2195 task_get_assignment(
2196 task_t task,
2197 processor_set_t *pset)
2198 {
2199 if (!task->active)
2200 return(KERN_FAILURE);
2201
2202 *pset = &pset0;
2203
2204 return (KERN_SUCCESS);
2205 }
2206
2207
2208 /*
2209 * task_policy
2210 *
2211 * Set scheduling policy and parameters, both base and limit, for
2212 * the given task. Policy must be a policy which is enabled for the
2213 * processor set. Change contained threads if requested.
2214 */
2215 kern_return_t
2216 task_policy(
2217 __unused task_t task,
2218 __unused policy_t policy_id,
2219 __unused policy_base_t base,
2220 __unused mach_msg_type_number_t count,
2221 __unused boolean_t set_limit,
2222 __unused boolean_t change)
2223 {
2224 return(KERN_FAILURE);
2225 }
2226
2227 /*
2228 * task_set_policy
2229 *
2230 * Set scheduling policy and parameters, both base and limit, for
2231 * the given task. Policy can be any policy implemented by the
2232 * processor set, whether enabled or not. Change contained threads
2233 * if requested.
2234 */
2235 kern_return_t
2236 task_set_policy(
2237 __unused task_t task,
2238 __unused processor_set_t pset,
2239 __unused policy_t policy_id,
2240 __unused policy_base_t base,
2241 __unused mach_msg_type_number_t base_count,
2242 __unused policy_limit_t limit,
2243 __unused mach_msg_type_number_t limit_count,
2244 __unused boolean_t change)
2245 {
2246 return(KERN_FAILURE);
2247 }
2248
2249 #if FAST_TAS
2250 kern_return_t
2251 task_set_ras_pc(
2252 task_t task,
2253 vm_offset_t pc,
2254 vm_offset_t endpc)
2255 {
2256 extern int fast_tas_debug;
2257
2258 if (fast_tas_debug) {
2259 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
2260 task, pc, endpc);
2261 }
2262 task_lock(task);
2263 task->fast_tas_base = pc;
2264 task->fast_tas_end = endpc;
2265 task_unlock(task);
2266 return KERN_SUCCESS;
2267 }
2268 #else /* FAST_TAS */
2269 kern_return_t
2270 task_set_ras_pc(
2271 __unused task_t task,
2272 __unused vm_offset_t pc,
2273 __unused vm_offset_t endpc)
2274 {
2275 return KERN_FAILURE;
2276 }
2277 #endif /* FAST_TAS */
2278
2279 void
2280 task_synchronizer_destroy_all(task_t task)
2281 {
2282 semaphore_t semaphore;
2283 lock_set_t lock_set;
2284
2285 /*
2286 * Destroy owned semaphores
2287 */
2288
2289 while (!queue_empty(&task->semaphore_list)) {
2290 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
2291 (void) semaphore_destroy(task, semaphore);
2292 }
2293
2294 /*
2295 * Destroy owned lock sets
2296 */
2297
2298 while (!queue_empty(&task->lock_set_list)) {
2299 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
2300 (void) lock_set_destroy(task, lock_set);
2301 }
2302 }
2303
2304 /*
2305 * Install default (machine-dependent) initial thread state
2306 * on the task. Subsequent thread creation will have this initial
2307 * state set on the thread by machine_thread_inherit_taskwide().
2308 * Flavors and structures are exactly the same as those to thread_set_state()
2309 */
2310 kern_return_t
2311 task_set_state(
2312 task_t task,
2313 int flavor,
2314 thread_state_t state,
2315 mach_msg_type_number_t state_count)
2316 {
2317 kern_return_t ret;
2318
2319 if (task == TASK_NULL) {
2320 return (KERN_INVALID_ARGUMENT);
2321 }
2322
2323 task_lock(task);
2324
2325 if (!task->active) {
2326 task_unlock(task);
2327 return (KERN_FAILURE);
2328 }
2329
2330 ret = machine_task_set_state(task, flavor, state, state_count);
2331
2332 task_unlock(task);
2333 return ret;
2334 }
2335
2336 /*
2337 * Examine the default (machine-dependent) initial thread state
2338 * on the task, as set by task_set_state(). Flavors and structures
2339 * are exactly the same as those passed to thread_get_state().
2340 */
2341 kern_return_t
2342 task_get_state(
2343 task_t task,
2344 int flavor,
2345 thread_state_t state,
2346 mach_msg_type_number_t *state_count)
2347 {
2348 kern_return_t ret;
2349
2350 if (task == TASK_NULL) {
2351 return (KERN_INVALID_ARGUMENT);
2352 }
2353
2354 task_lock(task);
2355
2356 if (!task->active) {
2357 task_unlock(task);
2358 return (KERN_FAILURE);
2359 }
2360
2361 ret = machine_task_get_state(task, flavor, state, state_count);
2362
2363 task_unlock(task);
2364 return ret;
2365 }
2366
2367
2368 /*
2369 * We need to export some functions to other components that
2370 * are currently implemented in macros within the osfmk
2371 * component. Just export them as functions of the same name.
2372 */
2373 boolean_t is_kerneltask(task_t t)
2374 {
2375 if (t == kernel_task)
2376 return (TRUE);
2377
2378 return (FALSE);
2379 }
2380
2381 int
2382 check_for_tasksuspend(task_t task)
2383 {
2384
2385 if (task == TASK_NULL)
2386 return (0);
2387
2388 return (task->suspend_count > 0);
2389 }
2390
2391 #undef current_task
2392 task_t current_task(void);
2393 task_t current_task(void)
2394 {
2395 return (current_task_fast());
2396 }
2397
2398 #undef task_reference
2399 void task_reference(task_t task);
2400 void
2401 task_reference(
2402 task_t task)
2403 {
2404 if (task != TASK_NULL)
2405 task_reference_internal(task);
2406 }
2407
2408 /*
2409 * This routine is called always with task lock held.
2410 * And it returns a thread handle without reference as the caller
2411 * operates on it under the task lock held.
2412 */
2413 thread_t
2414 task_findtid(task_t task, uint64_t tid)
2415 {
2416 thread_t thread= THREAD_NULL;
2417
2418 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2419 if (thread->thread_id == tid)
2420 return(thread);
2421 }
2422 return(THREAD_NULL);
2423 }
2424
2425
2426 #if CONFIG_MACF_MACH
2427 /*
2428 * Protect 2 task labels against modification by adding a reference on
2429 * both label handles. The locks do not actually have to be held while
2430 * using the labels as only labels with one reference can be modified
2431 * in place.
2432 */
2433
2434 void
2435 tasklabel_lock2(
2436 task_t a,
2437 task_t b)
2438 {
2439 labelh_reference(a->label);
2440 labelh_reference(b->label);
2441 }
2442
2443 void
2444 tasklabel_unlock2(
2445 task_t a,
2446 task_t b)
2447 {
2448 labelh_release(a->label);
2449 labelh_release(b->label);
2450 }
2451
2452 void
2453 mac_task_label_update_internal(
2454 struct label *pl,
2455 struct task *task)
2456 {
2457
2458 tasklabel_lock(task);
2459 task->label = labelh_modify(task->label);
2460 mac_task_label_update(pl, &task->maclabel);
2461 tasklabel_unlock(task);
2462 ip_lock(task->itk_self);
2463 mac_port_label_update_cred(pl, &task->itk_self->ip_label);
2464 ip_unlock(task->itk_self);
2465 }
2466
2467 void
2468 mac_task_label_modify(
2469 struct task *task,
2470 void *arg,
2471 void (*f) (struct label *l, void *arg))
2472 {
2473
2474 tasklabel_lock(task);
2475 task->label = labelh_modify(task->label);
2476 (*f)(&task->maclabel, arg);
2477 tasklabel_unlock(task);
2478 }
2479
2480 struct label *
2481 mac_task_get_label(struct task *task)
2482 {
2483 return (&task->maclabel);
2484 }
2485 #endif