]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
aedd993a153d758dc05f6531d01a8e199f1719d7
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81 /*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89 #include <mach_kdb.h>
90 #include <fast_tas.h>
91 #include <platforms.h>
92
93 #include <mach/mach_types.h>
94 #include <mach/boolean.h>
95 #include <mach/host_priv.h>
96 #include <mach/machine/vm_types.h>
97 #include <mach/vm_param.h>
98 #include <mach/semaphore.h>
99 #include <mach/task_info.h>
100 #include <mach/task_special_ports.h>
101
102 #include <ipc/ipc_types.h>
103 #include <ipc/ipc_space.h>
104 #include <ipc/ipc_entry.h>
105
106 #include <kern/kern_types.h>
107 #include <kern/mach_param.h>
108 #include <kern/misc_protos.h>
109 #include <kern/task.h>
110 #include <kern/thread.h>
111 #include <kern/zalloc.h>
112 #include <kern/kalloc.h>
113 #include <kern/processor.h>
114 #include <kern/sched_prim.h> /* for thread_wakeup */
115 #include <kern/ipc_tt.h>
116 #include <kern/ledger.h>
117 #include <kern/host.h>
118 #include <kern/clock.h>
119 #include <kern/timer.h>
120 #include <kern/assert.h>
121 #include <kern/sync_lock.h>
122 #include <kern/affinity.h>
123
124 #include <vm/pmap.h>
125 #include <vm/vm_map.h>
126 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
127 #include <vm/vm_pageout.h>
128 #include <vm/vm_protos.h>
129
130 #if MACH_KDB
131 #include <ddb/db_sym.h>
132 #endif /* MACH_KDB */
133
134 #ifdef __ppc__
135 #include <ppc/exception.h>
136 #include <ppc/hw_perfmon.h>
137 #endif
138
139
140 /*
141 * Exported interfaces
142 */
143
144 #include <mach/task_server.h>
145 #include <mach/mach_host_server.h>
146 #include <mach/host_security_server.h>
147 #include <mach/mach_port_server.h>
148 #include <mach/security_server.h>
149
150 #include <vm/vm_shared_region.h>
151
152 #if CONFIG_MACF_MACH
153 #include <security/mac_mach_internal.h>
154 #endif
155
156 #if CONFIG_COUNTERS
157 #include <pmc/pmc.h>
158 #endif /* CONFIG_COUNTERS */
159
160 task_t kernel_task;
161 zone_t task_zone;
162 lck_attr_t task_lck_attr;
163 lck_grp_t task_lck_grp;
164 lck_grp_attr_t task_lck_grp_attr;
165
166 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
167
168 /* Forwards */
169
170 void task_hold_locked(
171 task_t task);
172 void task_wait_locked(
173 task_t task);
174 void task_release_locked(
175 task_t task);
176 void task_free(
177 task_t task );
178 void task_synchronizer_destroy_all(
179 task_t task);
180
181 kern_return_t task_set_ledger(
182 task_t task,
183 ledger_t wired,
184 ledger_t paged);
185
186 int check_for_tasksuspend(
187 task_t task);
188
189 void
190 task_backing_store_privileged(
191 task_t task)
192 {
193 task_lock(task);
194 task->priv_flags |= VM_BACKING_STORE_PRIV;
195 task_unlock(task);
196 return;
197 }
198
199
200 void
201 task_set_64bit(
202 task_t task,
203 boolean_t is64bit)
204 {
205 #if defined(__i386__) || defined(__x86_64__)
206 thread_t thread;
207 #endif /* __i386__ */
208 int vm_flags = 0;
209
210 if (is64bit) {
211 if (task_has_64BitAddr(task))
212 return;
213
214 task_set_64BitAddr(task);
215 } else {
216 if ( !task_has_64BitAddr(task))
217 return;
218
219 /*
220 * Deallocate all memory previously allocated
221 * above the 32-bit address space, since it won't
222 * be accessible anymore.
223 */
224 /* remove regular VM map entries & pmap mappings */
225 (void) vm_map_remove(task->map,
226 (vm_map_offset_t) VM_MAX_ADDRESS,
227 MACH_VM_MAX_ADDRESS,
228 0);
229 #ifdef __ppc__
230 /*
231 * PPC51: ppc64 is limited to 51-bit addresses.
232 * Memory mapped above that limit is handled specially
233 * at the pmap level, so let pmap clean the commpage mapping
234 * explicitly...
235 */
236 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
237 /* ... and avoid regular pmap cleanup */
238 vm_flags |= VM_MAP_REMOVE_NO_PMAP_CLEANUP;
239 #endif /* __ppc__ */
240 /* remove the higher VM mappings */
241 (void) vm_map_remove(task->map,
242 MACH_VM_MAX_ADDRESS,
243 0xFFFFFFFFFFFFF000ULL,
244 vm_flags);
245 task_clear_64BitAddr(task);
246 }
247 /* FIXME: On x86, the thread save state flavor can diverge from the
248 * task's 64-bit feature flag due to the 32-bit/64-bit register save
249 * state dichotomy. Since we can be pre-empted in this interval,
250 * certain routines may observe the thread as being in an inconsistent
251 * state with respect to its task's 64-bitness.
252 */
253 #if defined(__i386__) || defined(__x86_64__)
254 task_lock(task);
255 queue_iterate(&task->threads, thread, thread_t, task_threads) {
256 thread_mtx_lock(thread);
257 machine_thread_switch_addrmode(thread);
258 thread_mtx_unlock(thread);
259 }
260 task_unlock(task);
261 #endif /* __i386__ */
262 }
263
264
265 void
266 task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size)
267 {
268 task_lock(task);
269 task->all_image_info_addr = addr;
270 task->all_image_info_size = size;
271 task_unlock(task);
272 }
273
274 void
275 task_init(void)
276 {
277
278 lck_grp_attr_setdefault(&task_lck_grp_attr);
279 lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
280 lck_attr_setdefault(&task_lck_attr);
281 lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
282
283 task_zone = zinit(
284 sizeof(struct task),
285 task_max * sizeof(struct task),
286 TASK_CHUNK * sizeof(struct task),
287 "tasks");
288
289 /*
290 * Create the kernel task as the first task.
291 */
292 #ifdef __LP64__
293 if (task_create_internal(TASK_NULL, FALSE, TRUE, &kernel_task) != KERN_SUCCESS)
294 #else
295 if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
296 #endif
297 panic("task_init\n");
298
299 vm_map_deallocate(kernel_task->map);
300 kernel_task->map = kernel_map;
301 }
302
303 /*
304 * Create a task running in the kernel address space. It may
305 * have its own map of size mem_size and may have ipc privileges.
306 */
307 kern_return_t
308 kernel_task_create(
309 __unused task_t parent_task,
310 __unused vm_offset_t map_base,
311 __unused vm_size_t map_size,
312 __unused task_t *child_task)
313 {
314 return (KERN_INVALID_ARGUMENT);
315 }
316
317 kern_return_t
318 task_create(
319 task_t parent_task,
320 __unused ledger_port_array_t ledger_ports,
321 __unused mach_msg_type_number_t num_ledger_ports,
322 __unused boolean_t inherit_memory,
323 __unused task_t *child_task) /* OUT */
324 {
325 if (parent_task == TASK_NULL)
326 return(KERN_INVALID_ARGUMENT);
327
328 /*
329 * No longer supported: too many calls assume that a task has a valid
330 * process attached.
331 */
332 return(KERN_FAILURE);
333 }
334
335 kern_return_t
336 host_security_create_task_token(
337 host_security_t host_security,
338 task_t parent_task,
339 __unused security_token_t sec_token,
340 __unused audit_token_t audit_token,
341 __unused host_priv_t host_priv,
342 __unused ledger_port_array_t ledger_ports,
343 __unused mach_msg_type_number_t num_ledger_ports,
344 __unused boolean_t inherit_memory,
345 __unused task_t *child_task) /* OUT */
346 {
347 if (parent_task == TASK_NULL)
348 return(KERN_INVALID_ARGUMENT);
349
350 if (host_security == HOST_NULL)
351 return(KERN_INVALID_SECURITY);
352
353 /*
354 * No longer supported.
355 */
356 return(KERN_FAILURE);
357 }
358
359 kern_return_t
360 task_create_internal(
361 task_t parent_task,
362 boolean_t inherit_memory,
363 boolean_t is_64bit,
364 task_t *child_task) /* OUT */
365 {
366 task_t new_task;
367 vm_shared_region_t shared_region;
368
369 new_task = (task_t) zalloc(task_zone);
370
371 if (new_task == TASK_NULL)
372 return(KERN_RESOURCE_SHORTAGE);
373
374 /* one ref for just being alive; one for our caller */
375 new_task->ref_count = 2;
376
377 /* if inherit_memory is true, parent_task MUST not be NULL */
378 if (inherit_memory)
379 new_task->map = vm_map_fork(parent_task->map);
380 else
381 new_task->map = vm_map_create(pmap_create(0, is_64bit),
382 (vm_map_offset_t)(VM_MIN_ADDRESS),
383 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
384
385 /* Inherit memlock limit from parent */
386 if (parent_task)
387 vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
388
389 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
390 queue_init(&new_task->threads);
391 new_task->suspend_count = 0;
392 new_task->thread_count = 0;
393 new_task->active_thread_count = 0;
394 new_task->user_stop_count = 0;
395 new_task->role = TASK_UNSPECIFIED;
396 new_task->active = TRUE;
397 new_task->halting = FALSE;
398 new_task->user_data = NULL;
399 new_task->faults = 0;
400 new_task->cow_faults = 0;
401 new_task->pageins = 0;
402 new_task->messages_sent = 0;
403 new_task->messages_received = 0;
404 new_task->syscalls_mach = 0;
405 new_task->priv_flags = 0;
406 new_task->syscalls_unix=0;
407 new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
408 new_task->taskFeatures[0] = 0; /* Init task features */
409 new_task->taskFeatures[1] = 0; /* Init task features */
410
411 #ifdef MACH_BSD
412 new_task->bsd_info = NULL;
413 #endif /* MACH_BSD */
414
415 #if defined(__i386__) || defined(__x86_64__)
416 new_task->i386_ldt = 0;
417 new_task->task_debug = NULL;
418
419 #endif
420
421 #ifdef __ppc__
422 if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */
423 #endif
424
425 queue_init(&new_task->semaphore_list);
426 queue_init(&new_task->lock_set_list);
427 new_task->semaphores_owned = 0;
428 new_task->lock_sets_owned = 0;
429
430 #if CONFIG_MACF_MACH
431 new_task->label = labelh_new(1);
432 mac_task_label_init (&new_task->maclabel);
433 #endif
434
435 ipc_task_init(new_task, parent_task);
436
437 new_task->total_user_time = 0;
438 new_task->total_system_time = 0;
439
440 new_task->vtimers = 0;
441
442 new_task->shared_region = NULL;
443
444 new_task->affinity_space = NULL;
445
446 #if CONFIG_COUNTERS
447 new_task->t_chud = 0U;
448 #endif
449
450 if (parent_task != TASK_NULL) {
451 new_task->sec_token = parent_task->sec_token;
452 new_task->audit_token = parent_task->audit_token;
453
454 /* inherit the parent's shared region */
455 shared_region = vm_shared_region_get(parent_task);
456 vm_shared_region_set(new_task, shared_region);
457
458 new_task->wired_ledger_port = ledger_copy(
459 convert_port_to_ledger(parent_task->wired_ledger_port));
460 new_task->paged_ledger_port = ledger_copy(
461 convert_port_to_ledger(parent_task->paged_ledger_port));
462 if(task_has_64BitAddr(parent_task))
463 task_set_64BitAddr(new_task);
464 new_task->all_image_info_addr = parent_task->all_image_info_addr;
465 new_task->all_image_info_size = parent_task->all_image_info_size;
466
467 #if defined(__i386__) || defined(__x86_64__)
468 if (inherit_memory && parent_task->i386_ldt)
469 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
470 #endif
471 if (inherit_memory && parent_task->affinity_space)
472 task_affinity_create(parent_task, new_task);
473
474 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
475 }
476 else {
477 new_task->sec_token = KERNEL_SECURITY_TOKEN;
478 new_task->audit_token = KERNEL_AUDIT_TOKEN;
479 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
480 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
481 #ifdef __LP64__
482 if(is_64bit)
483 task_set_64BitAddr(new_task);
484 #endif
485
486 new_task->pset_hint = PROCESSOR_SET_NULL;
487 }
488
489 if (kernel_task == TASK_NULL) {
490 new_task->priority = BASEPRI_KERNEL;
491 new_task->max_priority = MAXPRI_KERNEL;
492 }
493 else {
494 new_task->priority = BASEPRI_DEFAULT;
495 new_task->max_priority = MAXPRI_USER;
496 }
497
498 lck_mtx_lock(&tasks_threads_lock);
499 queue_enter(&tasks, new_task, task_t, tasks);
500 tasks_count++;
501 lck_mtx_unlock(&tasks_threads_lock);
502
503 if (vm_backing_store_low && parent_task != NULL)
504 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
505
506 ipc_task_enable(new_task);
507
508 *child_task = new_task;
509 return(KERN_SUCCESS);
510 }
511
512 /*
513 * task_deallocate:
514 *
515 * Drop a reference on a task.
516 */
517 void
518 task_deallocate(
519 task_t task)
520 {
521 if (task == TASK_NULL)
522 return;
523
524 if (task_deallocate_internal(task) > 0)
525 return;
526
527 ipc_task_terminate(task);
528
529 if (task->affinity_space)
530 task_affinity_deallocate(task);
531
532 vm_map_deallocate(task->map);
533 is_release(task->itk_space);
534
535 lck_mtx_destroy(&task->lock, &task_lck_grp);
536
537 #if CONFIG_MACF_MACH
538 labelh_release(task->label);
539 #endif
540 zfree(task_zone, task);
541 }
542
543 /*
544 * task_name_deallocate:
545 *
546 * Drop a reference on a task name.
547 */
548 void
549 task_name_deallocate(
550 task_name_t task_name)
551 {
552 return(task_deallocate((task_t)task_name));
553 }
554
555
556 /*
557 * task_terminate:
558 *
559 * Terminate the specified task. See comments on thread_terminate
560 * (kern/thread.c) about problems with terminating the "current task."
561 */
562
563 kern_return_t
564 task_terminate(
565 task_t task)
566 {
567 if (task == TASK_NULL)
568 return (KERN_INVALID_ARGUMENT);
569
570 if (task->bsd_info)
571 return (KERN_FAILURE);
572
573 return (task_terminate_internal(task));
574 }
575
576 kern_return_t
577 task_terminate_internal(
578 task_t task)
579 {
580 thread_t thread, self;
581 task_t self_task;
582 boolean_t interrupt_save;
583
584 assert(task != kernel_task);
585
586 self = current_thread();
587 self_task = self->task;
588
589 /*
590 * Get the task locked and make sure that we are not racing
591 * with someone else trying to terminate us.
592 */
593 if (task == self_task)
594 task_lock(task);
595 else
596 if (task < self_task) {
597 task_lock(task);
598 task_lock(self_task);
599 }
600 else {
601 task_lock(self_task);
602 task_lock(task);
603 }
604
605 if (!task->active || !self->active) {
606 /*
607 * Task or current act is already being terminated.
608 * Just return an error. If we are dying, this will
609 * just get us to our AST special handler and that
610 * will get us to finalize the termination of ourselves.
611 */
612 task_unlock(task);
613 if (self_task != task)
614 task_unlock(self_task);
615
616 return (KERN_FAILURE);
617 }
618
619 if (self_task != task)
620 task_unlock(self_task);
621
622 /*
623 * Make sure the current thread does not get aborted out of
624 * the waits inside these operations.
625 */
626 interrupt_save = thread_interrupt_level(THREAD_UNINT);
627
628 /*
629 * Indicate that we want all the threads to stop executing
630 * at user space by holding the task (we would have held
631 * each thread independently in thread_terminate_internal -
632 * but this way we may be more likely to already find it
633 * held there). Mark the task inactive, and prevent
634 * further task operations via the task port.
635 */
636 task_hold_locked(task);
637 task->active = FALSE;
638 ipc_task_disable(task);
639
640 /*
641 * Terminate each thread in the task.
642 */
643 queue_iterate(&task->threads, thread, thread_t, task_threads) {
644 thread_terminate_internal(thread);
645 }
646
647 /*
648 * Give the machine dependent code a chance
649 * to perform cleanup before ripping apart
650 * the task.
651 */
652 if (self_task == task)
653 machine_thread_terminate_self();
654
655 task_unlock(task);
656
657 /*
658 * Destroy all synchronizers owned by the task.
659 */
660 task_synchronizer_destroy_all(task);
661
662 /*
663 * Destroy the IPC space, leaving just a reference for it.
664 */
665 ipc_space_destroy(task->itk_space);
666
667 #ifdef __ppc__
668 /*
669 * PPC51: ppc64 is limited to 51-bit addresses.
670 */
671 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
672 #endif /* __ppc__ */
673
674 if (vm_map_has_4GB_pagezero(task->map))
675 vm_map_clear_4GB_pagezero(task->map);
676
677 /*
678 * If the current thread is a member of the task
679 * being terminated, then the last reference to
680 * the task will not be dropped until the thread
681 * is finally reaped. To avoid incurring the
682 * expense of removing the address space regions
683 * at reap time, we do it explictly here.
684 */
685 vm_map_remove(task->map,
686 task->map->min_offset,
687 task->map->max_offset,
688 VM_MAP_NO_FLAGS);
689
690 /* release our shared region */
691 vm_shared_region_set(task, NULL);
692
693 lck_mtx_lock(&tasks_threads_lock);
694 queue_remove(&tasks, task, task_t, tasks);
695 tasks_count--;
696 lck_mtx_unlock(&tasks_threads_lock);
697
698 /*
699 * We no longer need to guard against being aborted, so restore
700 * the previous interruptible state.
701 */
702 thread_interrupt_level(interrupt_save);
703
704 #if __ppc__
705 perfmon_release_facility(task); // notify the perfmon facility
706 #endif
707
708 /*
709 * Get rid of the task active reference on itself.
710 */
711 task_deallocate(task);
712
713 return (KERN_SUCCESS);
714 }
715
716 /*
717 * task_start_halt:
718 *
719 * Shut the current task down (except for the current thread) in
720 * preparation for dramatic changes to the task (probably exec).
721 * We hold the task and mark all other threads in the task for
722 * termination.
723 */
724 kern_return_t
725 task_start_halt(
726 task_t task)
727 {
728 thread_t thread, self;
729
730 assert(task != kernel_task);
731
732 self = current_thread();
733
734 if (task != self->task)
735 return (KERN_INVALID_ARGUMENT);
736
737 task_lock(task);
738
739 if (task->halting || !task->active || !self->active) {
740 /*
741 * Task or current thread is already being terminated.
742 * Hurry up and return out of the current kernel context
743 * so that we run our AST special handler to terminate
744 * ourselves.
745 */
746 task_unlock(task);
747
748 return (KERN_FAILURE);
749 }
750
751 task->halting = TRUE;
752
753 if (task->thread_count > 1) {
754
755 /*
756 * Mark all the threads to keep them from starting any more
757 * user-level execution. The thread_terminate_internal code
758 * would do this on a thread by thread basis anyway, but this
759 * gives us a better chance of not having to wait there.
760 */
761 task_hold_locked(task);
762
763 /*
764 * Terminate all the other threads in the task.
765 */
766 queue_iterate(&task->threads, thread, thread_t, task_threads) {
767 if (thread != self)
768 thread_terminate_internal(thread);
769 }
770
771 task_release_locked(task);
772 }
773 task_unlock(task);
774 return KERN_SUCCESS;
775 }
776
777
778 /*
779 * task_complete_halt:
780 *
781 * Complete task halt by waiting for threads to terminate, then clean
782 * up task resources (VM, port namespace, etc...) and then let the
783 * current thread go in the (practically empty) task context.
784 */
785 void
786 task_complete_halt(task_t task)
787 {
788 task_lock(task);
789 assert(task->halting);
790 assert(task == current_task());
791
792 /*
793 * Give the machine dependent code a chance
794 * to perform cleanup of task-level resources
795 * associated with the current thread before
796 * ripping apart the task.
797 *
798 * This must be done with the task locked.
799 */
800 machine_thread_terminate_self();
801
802 /*
803 * Wait for the other threads to get shut down.
804 * When the last other thread is reaped, we'll be
805 * worken up.
806 */
807 if (task->thread_count > 1) {
808 assert_wait((event_t)&task->halting, THREAD_UNINT);
809 task_unlock(task);
810 thread_block(THREAD_CONTINUE_NULL);
811 } else {
812 task_unlock(task);
813 }
814
815 /*
816 * Destroy all synchronizers owned by the task.
817 */
818 task_synchronizer_destroy_all(task);
819
820 /*
821 * Destroy the contents of the IPC space, leaving just
822 * a reference for it.
823 */
824 ipc_space_clean(task->itk_space);
825
826 /*
827 * Clean out the address space, as we are going to be
828 * getting a new one.
829 */
830 vm_map_remove(task->map, task->map->min_offset,
831 task->map->max_offset, VM_MAP_NO_FLAGS);
832
833 task->halting = FALSE;
834 }
835
836 /*
837 * task_hold_locked:
838 *
839 * Suspend execution of the specified task.
840 * This is a recursive-style suspension of the task, a count of
841 * suspends is maintained.
842 *
843 * CONDITIONS: the task is locked and active.
844 */
845 void
846 task_hold_locked(
847 register task_t task)
848 {
849 register thread_t thread;
850
851 assert(task->active);
852
853 if (task->suspend_count++ > 0)
854 return;
855
856 /*
857 * Iterate through all the threads and hold them.
858 */
859 queue_iterate(&task->threads, thread, thread_t, task_threads) {
860 thread_mtx_lock(thread);
861 thread_hold(thread);
862 thread_mtx_unlock(thread);
863 }
864 }
865
866 /*
867 * task_hold:
868 *
869 * Same as the internal routine above, except that is must lock
870 * and verify that the task is active. This differs from task_suspend
871 * in that it places a kernel hold on the task rather than just a
872 * user-level hold. This keeps users from over resuming and setting
873 * it running out from under the kernel.
874 *
875 * CONDITIONS: the caller holds a reference on the task
876 */
877 kern_return_t
878 task_hold(
879 register task_t task)
880 {
881 if (task == TASK_NULL)
882 return (KERN_INVALID_ARGUMENT);
883
884 task_lock(task);
885
886 if (!task->active) {
887 task_unlock(task);
888
889 return (KERN_FAILURE);
890 }
891
892 task_hold_locked(task);
893 task_unlock(task);
894
895 return (KERN_SUCCESS);
896 }
897
898 /*
899 * task_wait_locked:
900 *
901 * Wait for all threads in task to stop.
902 *
903 * Conditions:
904 * Called with task locked, active, and held.
905 */
906 void
907 task_wait_locked(
908 register task_t task)
909 {
910 register thread_t thread, self;
911
912 assert(task->active);
913 assert(task->suspend_count > 0);
914
915 self = current_thread();
916
917 /*
918 * Iterate through all the threads and wait for them to
919 * stop. Do not wait for the current thread if it is within
920 * the task.
921 */
922 queue_iterate(&task->threads, thread, thread_t, task_threads) {
923 if (thread != self)
924 thread_wait(thread);
925 }
926 }
927
928 /*
929 * task_release_locked:
930 *
931 * Release a kernel hold on a task.
932 *
933 * CONDITIONS: the task is locked and active
934 */
935 void
936 task_release_locked(
937 register task_t task)
938 {
939 register thread_t thread;
940
941 assert(task->active);
942 assert(task->suspend_count > 0);
943
944 if (--task->suspend_count > 0)
945 return;
946
947 queue_iterate(&task->threads, thread, thread_t, task_threads) {
948 thread_mtx_lock(thread);
949 thread_release(thread);
950 thread_mtx_unlock(thread);
951 }
952 }
953
954 /*
955 * task_release:
956 *
957 * Same as the internal routine above, except that it must lock
958 * and verify that the task is active.
959 *
960 * CONDITIONS: The caller holds a reference to the task
961 */
962 kern_return_t
963 task_release(
964 task_t task)
965 {
966 if (task == TASK_NULL)
967 return (KERN_INVALID_ARGUMENT);
968
969 task_lock(task);
970
971 if (!task->active) {
972 task_unlock(task);
973
974 return (KERN_FAILURE);
975 }
976
977 task_release_locked(task);
978 task_unlock(task);
979
980 return (KERN_SUCCESS);
981 }
982
983 kern_return_t
984 task_threads(
985 task_t task,
986 thread_act_array_t *threads_out,
987 mach_msg_type_number_t *count)
988 {
989 mach_msg_type_number_t actual;
990 thread_t *thread_list;
991 thread_t thread;
992 vm_size_t size, size_needed;
993 void *addr;
994 unsigned int i, j;
995
996 if (task == TASK_NULL)
997 return (KERN_INVALID_ARGUMENT);
998
999 size = 0; addr = NULL;
1000
1001 for (;;) {
1002 task_lock(task);
1003 if (!task->active) {
1004 task_unlock(task);
1005
1006 if (size != 0)
1007 kfree(addr, size);
1008
1009 return (KERN_FAILURE);
1010 }
1011
1012 actual = task->thread_count;
1013
1014 /* do we have the memory we need? */
1015 size_needed = actual * sizeof (mach_port_t);
1016 if (size_needed <= size)
1017 break;
1018
1019 /* unlock the task and allocate more memory */
1020 task_unlock(task);
1021
1022 if (size != 0)
1023 kfree(addr, size);
1024
1025 assert(size_needed > 0);
1026 size = size_needed;
1027
1028 addr = kalloc(size);
1029 if (addr == 0)
1030 return (KERN_RESOURCE_SHORTAGE);
1031 }
1032
1033 /* OK, have memory and the task is locked & active */
1034 thread_list = (thread_t *)addr;
1035
1036 i = j = 0;
1037
1038 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1039 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1040 thread_reference_internal(thread);
1041 thread_list[j++] = thread;
1042 }
1043
1044 assert(queue_end(&task->threads, (queue_entry_t)thread));
1045
1046 actual = j;
1047 size_needed = actual * sizeof (mach_port_t);
1048
1049 /* can unlock task now that we've got the thread refs */
1050 task_unlock(task);
1051
1052 if (actual == 0) {
1053 /* no threads, so return null pointer and deallocate memory */
1054
1055 *threads_out = NULL;
1056 *count = 0;
1057
1058 if (size != 0)
1059 kfree(addr, size);
1060 }
1061 else {
1062 /* if we allocated too much, must copy */
1063
1064 if (size_needed < size) {
1065 void *newaddr;
1066
1067 newaddr = kalloc(size_needed);
1068 if (newaddr == 0) {
1069 for (i = 0; i < actual; ++i)
1070 thread_deallocate(thread_list[i]);
1071 kfree(addr, size);
1072 return (KERN_RESOURCE_SHORTAGE);
1073 }
1074
1075 bcopy(addr, newaddr, size_needed);
1076 kfree(addr, size);
1077 thread_list = (thread_t *)newaddr;
1078 }
1079
1080 *threads_out = thread_list;
1081 *count = actual;
1082
1083 /* do the conversion that Mig should handle */
1084
1085 for (i = 0; i < actual; ++i)
1086 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1087 }
1088
1089 return (KERN_SUCCESS);
1090 }
1091
1092 /*
1093 * task_suspend:
1094 *
1095 * Implement a user-level suspension on a task.
1096 *
1097 * Conditions:
1098 * The caller holds a reference to the task
1099 */
1100 kern_return_t
1101 task_suspend(
1102 register task_t task)
1103 {
1104 if (task == TASK_NULL || task == kernel_task)
1105 return (KERN_INVALID_ARGUMENT);
1106
1107 task_lock(task);
1108
1109 if (!task->active) {
1110 task_unlock(task);
1111
1112 return (KERN_FAILURE);
1113 }
1114
1115 if (task->user_stop_count++ > 0) {
1116 /*
1117 * If the stop count was positive, the task is
1118 * already stopped and we can exit.
1119 */
1120 task_unlock(task);
1121
1122 return (KERN_SUCCESS);
1123 }
1124
1125 /*
1126 * Put a kernel-level hold on the threads in the task (all
1127 * user-level task suspensions added together represent a
1128 * single kernel-level hold). We then wait for the threads
1129 * to stop executing user code.
1130 */
1131 task_hold_locked(task);
1132 task_wait_locked(task);
1133
1134 task_unlock(task);
1135
1136 return (KERN_SUCCESS);
1137 }
1138
1139 /*
1140 * task_resume:
1141 * Release a kernel hold on a task.
1142 *
1143 * Conditions:
1144 * The caller holds a reference to the task
1145 */
1146 kern_return_t
1147 task_resume(
1148 register task_t task)
1149 {
1150 register boolean_t release = FALSE;
1151
1152 if (task == TASK_NULL || task == kernel_task)
1153 return (KERN_INVALID_ARGUMENT);
1154
1155 task_lock(task);
1156
1157 if (!task->active) {
1158 task_unlock(task);
1159
1160 return (KERN_FAILURE);
1161 }
1162
1163 if (task->user_stop_count > 0) {
1164 if (--task->user_stop_count == 0)
1165 release = TRUE;
1166 }
1167 else {
1168 task_unlock(task);
1169
1170 return (KERN_FAILURE);
1171 }
1172
1173 /*
1174 * Release the task if necessary.
1175 */
1176 if (release)
1177 task_release_locked(task);
1178
1179 task_unlock(task);
1180
1181 return (KERN_SUCCESS);
1182 }
1183
1184 kern_return_t
1185 host_security_set_task_token(
1186 host_security_t host_security,
1187 task_t task,
1188 security_token_t sec_token,
1189 audit_token_t audit_token,
1190 host_priv_t host_priv)
1191 {
1192 ipc_port_t host_port;
1193 kern_return_t kr;
1194
1195 if (task == TASK_NULL)
1196 return(KERN_INVALID_ARGUMENT);
1197
1198 if (host_security == HOST_NULL)
1199 return(KERN_INVALID_SECURITY);
1200
1201 task_lock(task);
1202 task->sec_token = sec_token;
1203 task->audit_token = audit_token;
1204 task_unlock(task);
1205
1206 if (host_priv != HOST_PRIV_NULL) {
1207 kr = host_get_host_priv_port(host_priv, &host_port);
1208 } else {
1209 kr = host_get_host_port(host_priv_self(), &host_port);
1210 }
1211 assert(kr == KERN_SUCCESS);
1212 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1213 return(kr);
1214 }
1215
1216 /*
1217 * Utility routine to set a ledger
1218 */
1219 kern_return_t
1220 task_set_ledger(
1221 task_t task,
1222 ledger_t wired,
1223 ledger_t paged)
1224 {
1225 if (task == TASK_NULL)
1226 return(KERN_INVALID_ARGUMENT);
1227
1228 task_lock(task);
1229 if (wired) {
1230 ipc_port_release_send(task->wired_ledger_port);
1231 task->wired_ledger_port = ledger_copy(wired);
1232 }
1233 if (paged) {
1234 ipc_port_release_send(task->paged_ledger_port);
1235 task->paged_ledger_port = ledger_copy(paged);
1236 }
1237 task_unlock(task);
1238
1239 return(KERN_SUCCESS);
1240 }
1241
1242 /*
1243 * This routine was added, pretty much exclusively, for registering the
1244 * RPC glue vector for in-kernel short circuited tasks. Rather than
1245 * removing it completely, I have only disabled that feature (which was
1246 * the only feature at the time). It just appears that we are going to
1247 * want to add some user data to tasks in the future (i.e. bsd info,
1248 * task names, etc...), so I left it in the formal task interface.
1249 */
1250 kern_return_t
1251 task_set_info(
1252 task_t task,
1253 task_flavor_t flavor,
1254 __unused task_info_t task_info_in, /* pointer to IN array */
1255 __unused mach_msg_type_number_t task_info_count)
1256 {
1257 if (task == TASK_NULL)
1258 return(KERN_INVALID_ARGUMENT);
1259
1260 switch (flavor) {
1261 default:
1262 return (KERN_INVALID_ARGUMENT);
1263 }
1264 return (KERN_SUCCESS);
1265 }
1266
1267 kern_return_t
1268 task_info(
1269 task_t task,
1270 task_flavor_t flavor,
1271 task_info_t task_info_out,
1272 mach_msg_type_number_t *task_info_count)
1273 {
1274 kern_return_t error = KERN_SUCCESS;
1275
1276 if (task == TASK_NULL)
1277 return (KERN_INVALID_ARGUMENT);
1278
1279 task_lock(task);
1280
1281 if ((task != current_task()) && (!task->active)) {
1282 task_unlock(task);
1283 return (KERN_INVALID_ARGUMENT);
1284 }
1285
1286 switch (flavor) {
1287
1288 case TASK_BASIC_INFO_32:
1289 case TASK_BASIC2_INFO_32:
1290 {
1291 task_basic_info_32_t basic_info;
1292 vm_map_t map;
1293 clock_sec_t secs;
1294 clock_usec_t usecs;
1295
1296 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
1297 error = KERN_INVALID_ARGUMENT;
1298 break;
1299 }
1300
1301 basic_info = (task_basic_info_32_t)task_info_out;
1302
1303 map = (task == kernel_task)? kernel_map: task->map;
1304 basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
1305 if (flavor == TASK_BASIC2_INFO_32) {
1306 /*
1307 * The "BASIC2" flavor gets the maximum resident
1308 * size instead of the current resident size...
1309 */
1310 basic_info->resident_size = pmap_resident_max(map->pmap);
1311 } else {
1312 basic_info->resident_size = pmap_resident_count(map->pmap);
1313 }
1314 basic_info->resident_size *= PAGE_SIZE;
1315
1316 basic_info->policy = ((task != kernel_task)?
1317 POLICY_TIMESHARE: POLICY_RR);
1318 basic_info->suspend_count = task->user_stop_count;
1319
1320 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1321 basic_info->user_time.seconds =
1322 (typeof(basic_info->user_time.seconds))secs;
1323 basic_info->user_time.microseconds = usecs;
1324
1325 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1326 basic_info->system_time.seconds =
1327 (typeof(basic_info->system_time.seconds))secs;
1328 basic_info->system_time.microseconds = usecs;
1329
1330 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1331 break;
1332 }
1333
1334 case TASK_BASIC_INFO_64:
1335 {
1336 task_basic_info_64_t basic_info;
1337 vm_map_t map;
1338 clock_sec_t secs;
1339 clock_usec_t usecs;
1340
1341 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
1342 error = KERN_INVALID_ARGUMENT;
1343 break;
1344 }
1345
1346 basic_info = (task_basic_info_64_t)task_info_out;
1347
1348 map = (task == kernel_task)? kernel_map: task->map;
1349 basic_info->virtual_size = map->size;
1350 basic_info->resident_size =
1351 (mach_vm_size_t)(pmap_resident_count(map->pmap))
1352 * PAGE_SIZE_64;
1353
1354 basic_info->policy = ((task != kernel_task)?
1355 POLICY_TIMESHARE: POLICY_RR);
1356 basic_info->suspend_count = task->user_stop_count;
1357
1358 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1359 basic_info->user_time.seconds =
1360 (typeof(basic_info->user_time.seconds))secs;
1361 basic_info->user_time.microseconds = usecs;
1362
1363 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1364 basic_info->system_time.seconds =
1365 (typeof(basic_info->system_time.seconds))secs;
1366 basic_info->system_time.microseconds = usecs;
1367
1368 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1369 break;
1370 }
1371
1372 case TASK_THREAD_TIMES_INFO:
1373 {
1374 register task_thread_times_info_t times_info;
1375 register thread_t thread;
1376
1377 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1378 error = KERN_INVALID_ARGUMENT;
1379 break;
1380 }
1381
1382 times_info = (task_thread_times_info_t) task_info_out;
1383 times_info->user_time.seconds = 0;
1384 times_info->user_time.microseconds = 0;
1385 times_info->system_time.seconds = 0;
1386 times_info->system_time.microseconds = 0;
1387
1388
1389 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1390 time_value_t user_time, system_time;
1391
1392 thread_read_times(thread, &user_time, &system_time);
1393
1394 time_value_add(&times_info->user_time, &user_time);
1395 time_value_add(&times_info->system_time, &system_time);
1396 }
1397
1398
1399 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1400 break;
1401 }
1402
1403 case TASK_ABSOLUTETIME_INFO:
1404 {
1405 task_absolutetime_info_t info;
1406 register thread_t thread;
1407
1408 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
1409 error = KERN_INVALID_ARGUMENT;
1410 break;
1411 }
1412
1413 info = (task_absolutetime_info_t)task_info_out;
1414 info->threads_user = info->threads_system = 0;
1415
1416
1417 info->total_user = task->total_user_time;
1418 info->total_system = task->total_system_time;
1419
1420 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1421 uint64_t tval;
1422
1423 tval = timer_grab(&thread->user_timer);
1424 info->threads_user += tval;
1425 info->total_user += tval;
1426
1427 tval = timer_grab(&thread->system_timer);
1428 info->threads_system += tval;
1429 info->total_system += tval;
1430 }
1431
1432
1433 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1434 break;
1435 }
1436
1437 case TASK_DYLD_INFO:
1438 {
1439 task_dyld_info_t info;
1440
1441 if (*task_info_count < TASK_DYLD_INFO_COUNT) {
1442 error = KERN_INVALID_ARGUMENT;
1443 break;
1444 }
1445 info = (task_dyld_info_t)task_info_out;
1446 info->all_image_info_addr = task->all_image_info_addr;
1447 info->all_image_info_size = task->all_image_info_size;
1448 *task_info_count = TASK_DYLD_INFO_COUNT;
1449 break;
1450 }
1451
1452 /* OBSOLETE */
1453 case TASK_SCHED_FIFO_INFO:
1454 {
1455
1456 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
1457 error = KERN_INVALID_ARGUMENT;
1458 break;
1459 }
1460
1461 error = KERN_INVALID_POLICY;
1462 }
1463
1464 /* OBSOLETE */
1465 case TASK_SCHED_RR_INFO:
1466 {
1467 register policy_rr_base_t rr_base;
1468
1469 if (*task_info_count < POLICY_RR_BASE_COUNT) {
1470 error = KERN_INVALID_ARGUMENT;
1471 break;
1472 }
1473
1474 rr_base = (policy_rr_base_t) task_info_out;
1475
1476 if (task != kernel_task) {
1477 error = KERN_INVALID_POLICY;
1478 break;
1479 }
1480
1481 rr_base->base_priority = task->priority;
1482
1483 rr_base->quantum = std_quantum_us / 1000;
1484
1485 *task_info_count = POLICY_RR_BASE_COUNT;
1486 break;
1487 }
1488
1489 /* OBSOLETE */
1490 case TASK_SCHED_TIMESHARE_INFO:
1491 {
1492 register policy_timeshare_base_t ts_base;
1493
1494 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
1495 error = KERN_INVALID_ARGUMENT;
1496 break;
1497 }
1498
1499 ts_base = (policy_timeshare_base_t) task_info_out;
1500
1501 if (task == kernel_task) {
1502 error = KERN_INVALID_POLICY;
1503 break;
1504 }
1505
1506 ts_base->base_priority = task->priority;
1507
1508 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1509 break;
1510 }
1511
1512 case TASK_SECURITY_TOKEN:
1513 {
1514 register security_token_t *sec_token_p;
1515
1516 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1517 error = KERN_INVALID_ARGUMENT;
1518 break;
1519 }
1520
1521 sec_token_p = (security_token_t *) task_info_out;
1522
1523 *sec_token_p = task->sec_token;
1524
1525 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1526 break;
1527 }
1528
1529 case TASK_AUDIT_TOKEN:
1530 {
1531 register audit_token_t *audit_token_p;
1532
1533 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
1534 error = KERN_INVALID_ARGUMENT;
1535 break;
1536 }
1537
1538 audit_token_p = (audit_token_t *) task_info_out;
1539
1540 *audit_token_p = task->audit_token;
1541
1542 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1543 break;
1544 }
1545
1546 case TASK_SCHED_INFO:
1547 error = KERN_INVALID_ARGUMENT;
1548
1549 case TASK_EVENTS_INFO:
1550 {
1551 register task_events_info_t events_info;
1552 register thread_t thread;
1553
1554 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1555 error = KERN_INVALID_ARGUMENT;
1556 break;
1557 }
1558
1559 events_info = (task_events_info_t) task_info_out;
1560
1561
1562 events_info->faults = task->faults;
1563 events_info->pageins = task->pageins;
1564 events_info->cow_faults = task->cow_faults;
1565 events_info->messages_sent = task->messages_sent;
1566 events_info->messages_received = task->messages_received;
1567 events_info->syscalls_mach = task->syscalls_mach;
1568 events_info->syscalls_unix = task->syscalls_unix;
1569
1570 events_info->csw = task->c_switch;
1571
1572 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1573 events_info->csw += thread->c_switch;
1574 }
1575
1576
1577 *task_info_count = TASK_EVENTS_INFO_COUNT;
1578 break;
1579 }
1580 case TASK_AFFINITY_TAG_INFO:
1581 {
1582 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
1583 error = KERN_INVALID_ARGUMENT;
1584 break;
1585 }
1586
1587 error = task_affinity_info(task, task_info_out, task_info_count);
1588 }
1589
1590 default:
1591 error = KERN_INVALID_ARGUMENT;
1592 }
1593
1594 task_unlock(task);
1595 return (error);
1596 }
1597
1598 void
1599 task_vtimer_set(
1600 task_t task,
1601 integer_t which)
1602 {
1603 thread_t thread;
1604
1605 /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
1606
1607 task_lock(task);
1608
1609 task->vtimers |= which;
1610
1611 switch (which) {
1612
1613 case TASK_VTIMER_USER:
1614 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1615 thread->vtimer_user_save = timer_grab(&thread->user_timer);
1616 }
1617 break;
1618
1619 case TASK_VTIMER_PROF:
1620 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1621 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
1622 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
1623 }
1624 break;
1625
1626 case TASK_VTIMER_RLIM:
1627 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1628 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
1629 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
1630 }
1631 break;
1632 }
1633
1634 task_unlock(task);
1635 }
1636
1637 void
1638 task_vtimer_clear(
1639 task_t task,
1640 integer_t which)
1641 {
1642 assert(task == current_task());
1643
1644 task_lock(task);
1645
1646 task->vtimers &= ~which;
1647
1648 task_unlock(task);
1649 }
1650
1651 void
1652 task_vtimer_update(
1653 __unused
1654 task_t task,
1655 integer_t which,
1656 uint32_t *microsecs)
1657 {
1658 thread_t thread = current_thread();
1659 uint32_t tdelt;
1660 clock_sec_t secs;
1661 uint64_t tsum;
1662
1663 assert(task == current_task());
1664
1665 assert(task->vtimers & which);
1666
1667 secs = tdelt = 0;
1668
1669 switch (which) {
1670
1671 case TASK_VTIMER_USER:
1672 tdelt = (uint32_t)timer_delta(&thread->user_timer,
1673 &thread->vtimer_user_save);
1674 absolutetime_to_microtime(tdelt, &secs, microsecs);
1675 break;
1676
1677 case TASK_VTIMER_PROF:
1678 tsum = timer_grab(&thread->user_timer);
1679 tsum += timer_grab(&thread->system_timer);
1680 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
1681 absolutetime_to_microtime(tdelt, &secs, microsecs);
1682 /* if the time delta is smaller than a usec, ignore */
1683 if (*microsecs != 0)
1684 thread->vtimer_prof_save = tsum;
1685 break;
1686
1687 case TASK_VTIMER_RLIM:
1688 tsum = timer_grab(&thread->user_timer);
1689 tsum += timer_grab(&thread->system_timer);
1690 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
1691 thread->vtimer_rlim_save = tsum;
1692 absolutetime_to_microtime(tdelt, &secs, microsecs);
1693 break;
1694 }
1695
1696 }
1697
1698 /*
1699 * task_assign:
1700 *
1701 * Change the assigned processor set for the task
1702 */
1703 kern_return_t
1704 task_assign(
1705 __unused task_t task,
1706 __unused processor_set_t new_pset,
1707 __unused boolean_t assign_threads)
1708 {
1709 return(KERN_FAILURE);
1710 }
1711
1712 /*
1713 * task_assign_default:
1714 *
1715 * Version of task_assign to assign to default processor set.
1716 */
1717 kern_return_t
1718 task_assign_default(
1719 task_t task,
1720 boolean_t assign_threads)
1721 {
1722 return (task_assign(task, &pset0, assign_threads));
1723 }
1724
1725 /*
1726 * task_get_assignment
1727 *
1728 * Return name of processor set that task is assigned to.
1729 */
1730 kern_return_t
1731 task_get_assignment(
1732 task_t task,
1733 processor_set_t *pset)
1734 {
1735 if (!task->active)
1736 return(KERN_FAILURE);
1737
1738 *pset = &pset0;
1739
1740 return (KERN_SUCCESS);
1741 }
1742
1743
1744 /*
1745 * task_policy
1746 *
1747 * Set scheduling policy and parameters, both base and limit, for
1748 * the given task. Policy must be a policy which is enabled for the
1749 * processor set. Change contained threads if requested.
1750 */
1751 kern_return_t
1752 task_policy(
1753 __unused task_t task,
1754 __unused policy_t policy_id,
1755 __unused policy_base_t base,
1756 __unused mach_msg_type_number_t count,
1757 __unused boolean_t set_limit,
1758 __unused boolean_t change)
1759 {
1760 return(KERN_FAILURE);
1761 }
1762
1763 /*
1764 * task_set_policy
1765 *
1766 * Set scheduling policy and parameters, both base and limit, for
1767 * the given task. Policy can be any policy implemented by the
1768 * processor set, whether enabled or not. Change contained threads
1769 * if requested.
1770 */
1771 kern_return_t
1772 task_set_policy(
1773 __unused task_t task,
1774 __unused processor_set_t pset,
1775 __unused policy_t policy_id,
1776 __unused policy_base_t base,
1777 __unused mach_msg_type_number_t base_count,
1778 __unused policy_limit_t limit,
1779 __unused mach_msg_type_number_t limit_count,
1780 __unused boolean_t change)
1781 {
1782 return(KERN_FAILURE);
1783 }
1784
1785 #if FAST_TAS
1786 kern_return_t
1787 task_set_ras_pc(
1788 task_t task,
1789 vm_offset_t pc,
1790 vm_offset_t endpc)
1791 {
1792 extern int fast_tas_debug;
1793
1794 if (fast_tas_debug) {
1795 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1796 task, pc, endpc);
1797 }
1798 task_lock(task);
1799 task->fast_tas_base = pc;
1800 task->fast_tas_end = endpc;
1801 task_unlock(task);
1802 return KERN_SUCCESS;
1803 }
1804 #else /* FAST_TAS */
1805 kern_return_t
1806 task_set_ras_pc(
1807 __unused task_t task,
1808 __unused vm_offset_t pc,
1809 __unused vm_offset_t endpc)
1810 {
1811 return KERN_FAILURE;
1812 }
1813 #endif /* FAST_TAS */
1814
1815 void
1816 task_synchronizer_destroy_all(task_t task)
1817 {
1818 semaphore_t semaphore;
1819 lock_set_t lock_set;
1820
1821 /*
1822 * Destroy owned semaphores
1823 */
1824
1825 while (!queue_empty(&task->semaphore_list)) {
1826 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1827 (void) semaphore_destroy(task, semaphore);
1828 }
1829
1830 /*
1831 * Destroy owned lock sets
1832 */
1833
1834 while (!queue_empty(&task->lock_set_list)) {
1835 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1836 (void) lock_set_destroy(task, lock_set);
1837 }
1838 }
1839
1840 /*
1841 * Install default (machine-dependent) initial thread state
1842 * on the task. Subsequent thread creation will have this initial
1843 * state set on the thread by machine_thread_inherit_taskwide().
1844 * Flavors and structures are exactly the same as those to thread_set_state()
1845 */
1846 kern_return_t
1847 task_set_state(
1848 task_t task,
1849 int flavor,
1850 thread_state_t state,
1851 mach_msg_type_number_t state_count)
1852 {
1853 kern_return_t ret;
1854
1855 if (task == TASK_NULL) {
1856 return (KERN_INVALID_ARGUMENT);
1857 }
1858
1859 task_lock(task);
1860
1861 if (!task->active) {
1862 task_unlock(task);
1863 return (KERN_FAILURE);
1864 }
1865
1866 ret = machine_task_set_state(task, flavor, state, state_count);
1867
1868 task_unlock(task);
1869 return ret;
1870 }
1871
1872 /*
1873 * Examine the default (machine-dependent) initial thread state
1874 * on the task, as set by task_set_state(). Flavors and structures
1875 * are exactly the same as those passed to thread_get_state().
1876 */
1877 kern_return_t
1878 task_get_state(
1879 task_t task,
1880 int flavor,
1881 thread_state_t state,
1882 mach_msg_type_number_t *state_count)
1883 {
1884 kern_return_t ret;
1885
1886 if (task == TASK_NULL) {
1887 return (KERN_INVALID_ARGUMENT);
1888 }
1889
1890 task_lock(task);
1891
1892 if (!task->active) {
1893 task_unlock(task);
1894 return (KERN_FAILURE);
1895 }
1896
1897 ret = machine_task_get_state(task, flavor, state, state_count);
1898
1899 task_unlock(task);
1900 return ret;
1901 }
1902
1903
1904 /*
1905 * We need to export some functions to other components that
1906 * are currently implemented in macros within the osfmk
1907 * component. Just export them as functions of the same name.
1908 */
1909 boolean_t is_kerneltask(task_t t)
1910 {
1911 if (t == kernel_task)
1912 return (TRUE);
1913
1914 return (FALSE);
1915 }
1916
1917 int
1918 check_for_tasksuspend(task_t task)
1919 {
1920
1921 if (task == TASK_NULL)
1922 return (0);
1923
1924 return (task->suspend_count > 0);
1925 }
1926
1927 #undef current_task
1928 task_t current_task(void);
1929 task_t current_task(void)
1930 {
1931 return (current_task_fast());
1932 }
1933
1934 #undef task_reference
1935 void task_reference(task_t task);
1936 void
1937 task_reference(
1938 task_t task)
1939 {
1940 if (task != TASK_NULL)
1941 task_reference_internal(task);
1942 }
1943
1944 #if CONFIG_MACF_MACH
1945 /*
1946 * Protect 2 task labels against modification by adding a reference on
1947 * both label handles. The locks do not actually have to be held while
1948 * using the labels as only labels with one reference can be modified
1949 * in place.
1950 */
1951
1952 void
1953 tasklabel_lock2(
1954 task_t a,
1955 task_t b)
1956 {
1957 labelh_reference(a->label);
1958 labelh_reference(b->label);
1959 }
1960
1961 void
1962 tasklabel_unlock2(
1963 task_t a,
1964 task_t b)
1965 {
1966 labelh_release(a->label);
1967 labelh_release(b->label);
1968 }
1969
1970 void
1971 mac_task_label_update_internal(
1972 struct label *pl,
1973 struct task *task)
1974 {
1975
1976 tasklabel_lock(task);
1977 task->label = labelh_modify(task->label);
1978 mac_task_label_update(pl, &task->maclabel);
1979 tasklabel_unlock(task);
1980 ip_lock(task->itk_self);
1981 mac_port_label_update_cred(pl, &task->itk_self->ip_label);
1982 ip_unlock(task->itk_self);
1983 }
1984
1985 void
1986 mac_task_label_modify(
1987 struct task *task,
1988 void *arg,
1989 void (*f) (struct label *l, void *arg))
1990 {
1991
1992 tasklabel_lock(task);
1993 task->label = labelh_modify(task->label);
1994 (*f)(&task->maclabel, arg);
1995 tasklabel_unlock(task);
1996 }
1997
1998 struct label *
1999 mac_task_get_label(struct task *task)
2000 {
2001 return (&task->maclabel);
2002 }
2003 #endif