]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
2a2fde2d817ef06099385731d0f32ca479a6d1bd
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81 /*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89 #include <mach_kdb.h>
90 #include <fast_tas.h>
91 #include <platforms.h>
92
93 #include <mach/mach_types.h>
94 #include <mach/boolean.h>
95 #include <mach/host_priv.h>
96 #include <mach/machine/vm_types.h>
97 #include <mach/vm_param.h>
98 #include <mach/semaphore.h>
99 #include <mach/task_info.h>
100 #include <mach/task_special_ports.h>
101
102 #include <ipc/ipc_types.h>
103 #include <ipc/ipc_space.h>
104 #include <ipc/ipc_entry.h>
105
106 #include <kern/kern_types.h>
107 #include <kern/mach_param.h>
108 #include <kern/misc_protos.h>
109 #include <kern/task.h>
110 #include <kern/thread.h>
111 #include <kern/zalloc.h>
112 #include <kern/kalloc.h>
113 #include <kern/processor.h>
114 #include <kern/sched_prim.h> /* for thread_wakeup */
115 #include <kern/ipc_tt.h>
116 #include <kern/ledger.h>
117 #include <kern/host.h>
118 #include <kern/clock.h>
119 #include <kern/timer.h>
120 #include <kern/assert.h>
121 #include <kern/sync_lock.h>
122 #include <kern/affinity.h>
123
124 #include <vm/pmap.h>
125 #include <vm/vm_map.h>
126 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
127 #include <vm/vm_pageout.h>
128 #include <vm/vm_protos.h>
129
130 #if MACH_KDB
131 #include <ddb/db_sym.h>
132 #endif /* MACH_KDB */
133
134 #ifdef __ppc__
135 #include <ppc/exception.h>
136 #include <ppc/hw_perfmon.h>
137 #endif
138
139 /*
140 * Exported interfaces
141 */
142
143 #include <mach/task_server.h>
144 #include <mach/mach_host_server.h>
145 #include <mach/host_security_server.h>
146 #include <mach/mach_port_server.h>
147 #include <mach/security_server.h>
148
149 #include <vm/vm_shared_region.h>
150
151 #if CONFIG_MACF_MACH
152 #include <security/mac_mach_internal.h>
153 #endif
154
155 task_t kernel_task;
156 zone_t task_zone;
157
158 /* Forwards */
159
160 void task_hold_locked(
161 task_t task);
162 void task_wait_locked(
163 task_t task);
164 void task_release_locked(
165 task_t task);
166 void task_free(
167 task_t task );
168 void task_synchronizer_destroy_all(
169 task_t task);
170
171 kern_return_t task_set_ledger(
172 task_t task,
173 ledger_t wired,
174 ledger_t paged);
175
176 void
177 task_backing_store_privileged(
178 task_t task)
179 {
180 task_lock(task);
181 task->priv_flags |= VM_BACKING_STORE_PRIV;
182 task_unlock(task);
183 return;
184 }
185
186
187 void
188 task_set_64bit(
189 task_t task,
190 boolean_t is64bit)
191 {
192 #ifdef __i386__
193 thread_t thread;
194 #endif /* __i386__ */
195 int vm_flags = 0;
196
197 if (is64bit) {
198 if (task_has_64BitAddr(task))
199 return;
200
201 task_set_64BitAddr(task);
202 } else {
203 if ( !task_has_64BitAddr(task))
204 return;
205
206 /*
207 * Deallocate all memory previously allocated
208 * above the 32-bit address space, since it won't
209 * be accessible anymore.
210 */
211 /* remove regular VM map entries & pmap mappings */
212 (void) vm_map_remove(task->map,
213 (vm_map_offset_t) VM_MAX_ADDRESS,
214 MACH_VM_MAX_ADDRESS,
215 0);
216 #ifdef __ppc__
217 /* LP64todo - make this clean */
218 /*
219 * PPC51: ppc64 is limited to 51-bit addresses.
220 * Memory mapped above that limit is handled specially
221 * at the pmap level, so let pmap clean the commpage mapping
222 * explicitly...
223 */
224 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
225 /* ... and avoid regular pmap cleanup */
226 vm_flags |= VM_MAP_REMOVE_NO_PMAP_CLEANUP;
227 #endif /* __ppc__ */
228 /* remove the higher VM mappings */
229 (void) vm_map_remove(task->map,
230 MACH_VM_MAX_ADDRESS,
231 0xFFFFFFFFFFFFF000ULL,
232 vm_flags);
233 task_clear_64BitAddr(task);
234 }
235 /* FIXME: On x86, the thread save state flavor can diverge from the
236 * task's 64-bit feature flag due to the 32-bit/64-bit register save
237 * state dichotomy. Since we can be pre-empted in this interval,
238 * certain routines may observe the thread as being in an inconsistent
239 * state with respect to its task's 64-bitness.
240 */
241 #ifdef __i386__
242 queue_iterate(&task->threads, thread, thread_t, task_threads) {
243 machine_thread_switch_addrmode(thread);
244 }
245 #endif /* __i386__ */
246 }
247
248 void
249 task_init(void)
250 {
251 task_zone = zinit(
252 sizeof(struct task),
253 TASK_MAX * sizeof(struct task),
254 TASK_CHUNK * sizeof(struct task),
255 "tasks");
256
257 /*
258 * Create the kernel task as the first task.
259 */
260 if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
261 panic("task_init\n");
262
263 vm_map_deallocate(kernel_task->map);
264 kernel_task->map = kernel_map;
265 }
266
267 /*
268 * Create a task running in the kernel address space. It may
269 * have its own map of size mem_size and may have ipc privileges.
270 */
271 kern_return_t
272 kernel_task_create(
273 __unused task_t parent_task,
274 __unused vm_offset_t map_base,
275 __unused vm_size_t map_size,
276 __unused task_t *child_task)
277 {
278 return (KERN_INVALID_ARGUMENT);
279 }
280
281 kern_return_t
282 task_create(
283 task_t parent_task,
284 __unused ledger_port_array_t ledger_ports,
285 __unused mach_msg_type_number_t num_ledger_ports,
286 __unused boolean_t inherit_memory,
287 __unused task_t *child_task) /* OUT */
288 {
289 if (parent_task == TASK_NULL)
290 return(KERN_INVALID_ARGUMENT);
291
292 /*
293 * No longer supported: too many calls assume that a task has a valid
294 * process attached.
295 */
296 return(KERN_FAILURE);
297 }
298
299 kern_return_t
300 host_security_create_task_token(
301 host_security_t host_security,
302 task_t parent_task,
303 __unused security_token_t sec_token,
304 __unused audit_token_t audit_token,
305 __unused host_priv_t host_priv,
306 __unused ledger_port_array_t ledger_ports,
307 __unused mach_msg_type_number_t num_ledger_ports,
308 __unused boolean_t inherit_memory,
309 __unused task_t *child_task) /* OUT */
310 {
311 if (parent_task == TASK_NULL)
312 return(KERN_INVALID_ARGUMENT);
313
314 if (host_security == HOST_NULL)
315 return(KERN_INVALID_SECURITY);
316
317 /*
318 * No longer supported.
319 */
320 return(KERN_FAILURE);
321 }
322
323 kern_return_t
324 task_create_internal(
325 task_t parent_task,
326 boolean_t inherit_memory,
327 boolean_t is_64bit,
328 task_t *child_task) /* OUT */
329 {
330 task_t new_task;
331 vm_shared_region_t shared_region;
332
333 new_task = (task_t) zalloc(task_zone);
334
335 if (new_task == TASK_NULL)
336 return(KERN_RESOURCE_SHORTAGE);
337
338 /* one ref for just being alive; one for our caller */
339 new_task->ref_count = 2;
340
341 if (inherit_memory)
342 new_task->map = vm_map_fork(parent_task->map);
343 else
344 new_task->map = vm_map_create(pmap_create(0, is_64bit),
345 (vm_map_offset_t)(VM_MIN_ADDRESS),
346 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
347
348 /* Inherit memlock limit from parent */
349 if (parent_task)
350 vm_map_set_user_wire_limit(new_task->map, parent_task->map->user_wire_limit);
351
352 mutex_init(&new_task->lock, 0);
353 queue_init(&new_task->threads);
354 new_task->suspend_count = 0;
355 new_task->thread_count = 0;
356 new_task->active_thread_count = 0;
357 new_task->user_stop_count = 0;
358 new_task->role = TASK_UNSPECIFIED;
359 new_task->active = TRUE;
360 new_task->user_data = NULL;
361 new_task->faults = 0;
362 new_task->cow_faults = 0;
363 new_task->pageins = 0;
364 new_task->messages_sent = 0;
365 new_task->messages_received = 0;
366 new_task->syscalls_mach = 0;
367 new_task->priv_flags = 0;
368 new_task->syscalls_unix=0;
369 new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
370 new_task->taskFeatures[0] = 0; /* Init task features */
371 new_task->taskFeatures[1] = 0; /* Init task features */
372
373 #ifdef MACH_BSD
374 new_task->bsd_info = NULL;
375 #endif /* MACH_BSD */
376
377 #ifdef __i386__
378 new_task->i386_ldt = 0;
379 #endif
380
381 #ifdef __ppc__
382 if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */
383 #endif
384
385 queue_init(&new_task->semaphore_list);
386 queue_init(&new_task->lock_set_list);
387 new_task->semaphores_owned = 0;
388 new_task->lock_sets_owned = 0;
389
390 #if CONFIG_MACF_MACH
391 /*mutex_init(&new_task->labellock, ETAP_NO_TRACE);*/
392 new_task->label = labelh_new(1);
393 mac_task_label_init (&new_task->maclabel);
394 #endif
395
396 ipc_task_init(new_task, parent_task);
397
398 new_task->total_user_time = 0;
399 new_task->total_system_time = 0;
400
401 new_task->vtimers = 0;
402
403 new_task->shared_region = NULL;
404
405 new_task->affinity_space = NULL;
406
407 if (parent_task != TASK_NULL) {
408 new_task->sec_token = parent_task->sec_token;
409 new_task->audit_token = parent_task->audit_token;
410
411 /* inherit the parent's shared region */
412 shared_region = vm_shared_region_get(parent_task);
413 vm_shared_region_set(new_task, shared_region);
414
415 new_task->wired_ledger_port = ledger_copy(
416 convert_port_to_ledger(parent_task->wired_ledger_port));
417 new_task->paged_ledger_port = ledger_copy(
418 convert_port_to_ledger(parent_task->paged_ledger_port));
419 if(task_has_64BitAddr(parent_task))
420 task_set_64BitAddr(new_task);
421
422 #ifdef __i386__
423 if (inherit_memory && parent_task->i386_ldt)
424 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
425 #endif
426 if (inherit_memory && parent_task->affinity_space)
427 task_affinity_create(parent_task, new_task);
428 }
429 else {
430 new_task->sec_token = KERNEL_SECURITY_TOKEN;
431 new_task->audit_token = KERNEL_AUDIT_TOKEN;
432 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
433 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
434 }
435
436 if (kernel_task == TASK_NULL) {
437 new_task->priority = BASEPRI_KERNEL;
438 new_task->max_priority = MAXPRI_KERNEL;
439 }
440 else {
441 new_task->priority = BASEPRI_DEFAULT;
442 new_task->max_priority = MAXPRI_USER;
443 }
444
445 mutex_lock(&tasks_threads_lock);
446 queue_enter(&tasks, new_task, task_t, tasks);
447 tasks_count++;
448 mutex_unlock(&tasks_threads_lock);
449
450 if (vm_backing_store_low && parent_task != NULL)
451 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
452
453 ipc_task_enable(new_task);
454
455 *child_task = new_task;
456 return(KERN_SUCCESS);
457 }
458
459 /*
460 * task_deallocate:
461 *
462 * Drop a reference on a task.
463 */
464 void
465 task_deallocate(
466 task_t task)
467 {
468 if (task == TASK_NULL)
469 return;
470
471 if (task_deallocate_internal(task) > 0)
472 return;
473
474 ipc_task_terminate(task);
475
476 if (task->affinity_space)
477 task_affinity_deallocate(task);
478
479 vm_map_deallocate(task->map);
480 is_release(task->itk_space);
481
482 #if CONFIG_MACF_MACH
483 labelh_release(task->label);
484 #endif
485 zfree(task_zone, task);
486 }
487
488 /*
489 * task_name_deallocate:
490 *
491 * Drop a reference on a task name.
492 */
493 void
494 task_name_deallocate(
495 task_name_t task_name)
496 {
497 return(task_deallocate((task_t)task_name));
498 }
499
500
501 /*
502 * task_terminate:
503 *
504 * Terminate the specified task. See comments on thread_terminate
505 * (kern/thread.c) about problems with terminating the "current task."
506 */
507
508 kern_return_t
509 task_terminate(
510 task_t task)
511 {
512 if (task == TASK_NULL)
513 return (KERN_INVALID_ARGUMENT);
514
515 if (task->bsd_info)
516 return (KERN_FAILURE);
517
518 return (task_terminate_internal(task));
519 }
520
521 kern_return_t
522 task_terminate_internal(
523 task_t task)
524 {
525 thread_t thread, self;
526 task_t self_task;
527 boolean_t interrupt_save;
528
529 assert(task != kernel_task);
530
531 self = current_thread();
532 self_task = self->task;
533
534 /*
535 * Get the task locked and make sure that we are not racing
536 * with someone else trying to terminate us.
537 */
538 if (task == self_task)
539 task_lock(task);
540 else
541 if (task < self_task) {
542 task_lock(task);
543 task_lock(self_task);
544 }
545 else {
546 task_lock(self_task);
547 task_lock(task);
548 }
549
550 if (!task->active || !self->active) {
551 /*
552 * Task or current act is already being terminated.
553 * Just return an error. If we are dying, this will
554 * just get us to our AST special handler and that
555 * will get us to finalize the termination of ourselves.
556 */
557 task_unlock(task);
558 if (self_task != task)
559 task_unlock(self_task);
560
561 return (KERN_FAILURE);
562 }
563
564 if (self_task != task)
565 task_unlock(self_task);
566
567 /*
568 * Make sure the current thread does not get aborted out of
569 * the waits inside these operations.
570 */
571 interrupt_save = thread_interrupt_level(THREAD_UNINT);
572
573 /*
574 * Indicate that we want all the threads to stop executing
575 * at user space by holding the task (we would have held
576 * each thread independently in thread_terminate_internal -
577 * but this way we may be more likely to already find it
578 * held there). Mark the task inactive, and prevent
579 * further task operations via the task port.
580 */
581 task_hold_locked(task);
582 task->active = FALSE;
583 ipc_task_disable(task);
584
585 /*
586 * Terminate each thread in the task.
587 */
588 queue_iterate(&task->threads, thread, thread_t, task_threads) {
589 thread_terminate_internal(thread);
590 }
591
592 /*
593 * Give the machine dependent code a chance
594 * to perform cleanup before ripping apart
595 * the task.
596 */
597 if (self_task == task)
598 machine_thread_terminate_self();
599
600 task_unlock(task);
601
602 /*
603 * Destroy all synchronizers owned by the task.
604 */
605 task_synchronizer_destroy_all(task);
606
607 /*
608 * Destroy the IPC space, leaving just a reference for it.
609 */
610 ipc_space_destroy(task->itk_space);
611
612 #ifdef __ppc__
613 /* LP64todo - make this clean */
614 /*
615 * PPC51: ppc64 is limited to 51-bit addresses.
616 */
617 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
618 #endif /* __ppc__ */
619
620 if (vm_map_has_4GB_pagezero(task->map))
621 vm_map_clear_4GB_pagezero(task->map);
622
623 /*
624 * If the current thread is a member of the task
625 * being terminated, then the last reference to
626 * the task will not be dropped until the thread
627 * is finally reaped. To avoid incurring the
628 * expense of removing the address space regions
629 * at reap time, we do it explictly here.
630 */
631 vm_map_remove(task->map,
632 task->map->min_offset,
633 task->map->max_offset,
634 VM_MAP_NO_FLAGS);
635
636 /* release our shared region */
637 vm_shared_region_set(task, NULL);
638
639 mutex_lock(&tasks_threads_lock);
640 queue_remove(&tasks, task, task_t, tasks);
641 tasks_count--;
642 mutex_unlock(&tasks_threads_lock);
643
644 /*
645 * We no longer need to guard against being aborted, so restore
646 * the previous interruptible state.
647 */
648 thread_interrupt_level(interrupt_save);
649
650 #if __ppc__
651 perfmon_release_facility(task); // notify the perfmon facility
652 #endif
653
654 /*
655 * Get rid of the task active reference on itself.
656 */
657 task_deallocate(task);
658
659 return (KERN_SUCCESS);
660 }
661
662 /*
663 * task_halt:
664 *
665 * Shut the current task down (except for the current thread) in
666 * preparation for dramatic changes to the task (probably exec).
667 * We hold the task, terminate all other threads in the task and
668 * wait for them to terminate, clean up the portspace, and when
669 * all done, let the current thread go.
670 */
671 kern_return_t
672 task_halt(
673 task_t task)
674 {
675 thread_t thread, self;
676
677 assert(task != kernel_task);
678
679 self = current_thread();
680
681 if (task != self->task)
682 return (KERN_INVALID_ARGUMENT);
683
684 task_lock(task);
685
686 if (!task->active || !self->active) {
687 /*
688 * Task or current thread is already being terminated.
689 * Hurry up and return out of the current kernel context
690 * so that we run our AST special handler to terminate
691 * ourselves.
692 */
693 task_unlock(task);
694
695 return (KERN_FAILURE);
696 }
697
698 if (task->thread_count > 1) {
699 /*
700 * Mark all the threads to keep them from starting any more
701 * user-level execution. The thread_terminate_internal code
702 * would do this on a thread by thread basis anyway, but this
703 * gives us a better chance of not having to wait there.
704 */
705 task_hold_locked(task);
706
707 /*
708 * Terminate all the other threads in the task.
709 */
710 queue_iterate(&task->threads, thread, thread_t, task_threads) {
711 if (thread != self)
712 thread_terminate_internal(thread);
713 }
714
715 task_release_locked(task);
716 }
717
718 /*
719 * Give the machine dependent code a chance
720 * to perform cleanup before ripping apart
721 * the task.
722 */
723 machine_thread_terminate_self();
724
725 task_unlock(task);
726
727 /*
728 * Destroy all synchronizers owned by the task.
729 */
730 task_synchronizer_destroy_all(task);
731
732 /*
733 * Destroy the contents of the IPC space, leaving just
734 * a reference for it.
735 */
736 ipc_space_clean(task->itk_space);
737
738 /*
739 * Clean out the address space, as we are going to be
740 * getting a new one.
741 */
742 vm_map_remove(task->map, task->map->min_offset,
743 task->map->max_offset, VM_MAP_NO_FLAGS);
744
745 return (KERN_SUCCESS);
746 }
747
748 /*
749 * task_hold_locked:
750 *
751 * Suspend execution of the specified task.
752 * This is a recursive-style suspension of the task, a count of
753 * suspends is maintained.
754 *
755 * CONDITIONS: the task is locked and active.
756 */
757 void
758 task_hold_locked(
759 register task_t task)
760 {
761 register thread_t thread;
762
763 assert(task->active);
764
765 if (task->suspend_count++ > 0)
766 return;
767
768 /*
769 * Iterate through all the threads and hold them.
770 */
771 queue_iterate(&task->threads, thread, thread_t, task_threads) {
772 thread_mtx_lock(thread);
773 thread_hold(thread);
774 thread_mtx_unlock(thread);
775 }
776 }
777
778 /*
779 * task_hold:
780 *
781 * Same as the internal routine above, except that is must lock
782 * and verify that the task is active. This differs from task_suspend
783 * in that it places a kernel hold on the task rather than just a
784 * user-level hold. This keeps users from over resuming and setting
785 * it running out from under the kernel.
786 *
787 * CONDITIONS: the caller holds a reference on the task
788 */
789 kern_return_t
790 task_hold(
791 register task_t task)
792 {
793 if (task == TASK_NULL)
794 return (KERN_INVALID_ARGUMENT);
795
796 task_lock(task);
797
798 if (!task->active) {
799 task_unlock(task);
800
801 return (KERN_FAILURE);
802 }
803
804 task_hold_locked(task);
805 task_unlock(task);
806
807 return (KERN_SUCCESS);
808 }
809
810 /*
811 * task_wait_locked:
812 *
813 * Wait for all threads in task to stop.
814 *
815 * Conditions:
816 * Called with task locked, active, and held.
817 */
818 void
819 task_wait_locked(
820 register task_t task)
821 {
822 register thread_t thread, self;
823
824 assert(task->active);
825 assert(task->suspend_count > 0);
826
827 self = current_thread();
828
829 /*
830 * Iterate through all the threads and wait for them to
831 * stop. Do not wait for the current thread if it is within
832 * the task.
833 */
834 queue_iterate(&task->threads, thread, thread_t, task_threads) {
835 if (thread != self)
836 thread_wait(thread);
837 }
838 }
839
840 /*
841 * task_release_locked:
842 *
843 * Release a kernel hold on a task.
844 *
845 * CONDITIONS: the task is locked and active
846 */
847 void
848 task_release_locked(
849 register task_t task)
850 {
851 register thread_t thread;
852
853 assert(task->active);
854 assert(task->suspend_count > 0);
855
856 if (--task->suspend_count > 0)
857 return;
858
859 queue_iterate(&task->threads, thread, thread_t, task_threads) {
860 thread_mtx_lock(thread);
861 thread_release(thread);
862 thread_mtx_unlock(thread);
863 }
864 }
865
866 /*
867 * task_release:
868 *
869 * Same as the internal routine above, except that it must lock
870 * and verify that the task is active.
871 *
872 * CONDITIONS: The caller holds a reference to the task
873 */
874 kern_return_t
875 task_release(
876 task_t task)
877 {
878 if (task == TASK_NULL)
879 return (KERN_INVALID_ARGUMENT);
880
881 task_lock(task);
882
883 if (!task->active) {
884 task_unlock(task);
885
886 return (KERN_FAILURE);
887 }
888
889 task_release_locked(task);
890 task_unlock(task);
891
892 return (KERN_SUCCESS);
893 }
894
895 kern_return_t
896 task_threads(
897 task_t task,
898 thread_act_array_t *threads_out,
899 mach_msg_type_number_t *count)
900 {
901 mach_msg_type_number_t actual;
902 thread_t *thread_list;
903 thread_t thread;
904 vm_size_t size, size_needed;
905 void *addr;
906 unsigned int i, j;
907
908 if (task == TASK_NULL)
909 return (KERN_INVALID_ARGUMENT);
910
911 size = 0; addr = NULL;
912
913 for (;;) {
914 task_lock(task);
915 if (!task->active) {
916 task_unlock(task);
917
918 if (size != 0)
919 kfree(addr, size);
920
921 return (KERN_FAILURE);
922 }
923
924 actual = task->thread_count;
925
926 /* do we have the memory we need? */
927 size_needed = actual * sizeof (mach_port_t);
928 if (size_needed <= size)
929 break;
930
931 /* unlock the task and allocate more memory */
932 task_unlock(task);
933
934 if (size != 0)
935 kfree(addr, size);
936
937 assert(size_needed > 0);
938 size = size_needed;
939
940 addr = kalloc(size);
941 if (addr == 0)
942 return (KERN_RESOURCE_SHORTAGE);
943 }
944
945 /* OK, have memory and the task is locked & active */
946 thread_list = (thread_t *)addr;
947
948 i = j = 0;
949
950 for (thread = (thread_t)queue_first(&task->threads); i < actual;
951 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
952 thread_reference_internal(thread);
953 thread_list[j++] = thread;
954 }
955
956 assert(queue_end(&task->threads, (queue_entry_t)thread));
957
958 actual = j;
959 size_needed = actual * sizeof (mach_port_t);
960
961 /* can unlock task now that we've got the thread refs */
962 task_unlock(task);
963
964 if (actual == 0) {
965 /* no threads, so return null pointer and deallocate memory */
966
967 *threads_out = NULL;
968 *count = 0;
969
970 if (size != 0)
971 kfree(addr, size);
972 }
973 else {
974 /* if we allocated too much, must copy */
975
976 if (size_needed < size) {
977 void *newaddr;
978
979 newaddr = kalloc(size_needed);
980 if (newaddr == 0) {
981 for (i = 0; i < actual; ++i)
982 thread_deallocate(thread_list[i]);
983 kfree(addr, size);
984 return (KERN_RESOURCE_SHORTAGE);
985 }
986
987 bcopy(addr, newaddr, size_needed);
988 kfree(addr, size);
989 thread_list = (thread_t *)newaddr;
990 }
991
992 *threads_out = thread_list;
993 *count = actual;
994
995 /* do the conversion that Mig should handle */
996
997 for (i = 0; i < actual; ++i)
998 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
999 }
1000
1001 return (KERN_SUCCESS);
1002 }
1003
1004 /*
1005 * task_suspend:
1006 *
1007 * Implement a user-level suspension on a task.
1008 *
1009 * Conditions:
1010 * The caller holds a reference to the task
1011 */
1012 kern_return_t
1013 task_suspend(
1014 register task_t task)
1015 {
1016 if (task == TASK_NULL || task == kernel_task)
1017 return (KERN_INVALID_ARGUMENT);
1018
1019 task_lock(task);
1020
1021 if (!task->active) {
1022 task_unlock(task);
1023
1024 return (KERN_FAILURE);
1025 }
1026
1027 if (task->user_stop_count++ > 0) {
1028 /*
1029 * If the stop count was positive, the task is
1030 * already stopped and we can exit.
1031 */
1032 task_unlock(task);
1033
1034 return (KERN_SUCCESS);
1035 }
1036
1037 /*
1038 * Put a kernel-level hold on the threads in the task (all
1039 * user-level task suspensions added together represent a
1040 * single kernel-level hold). We then wait for the threads
1041 * to stop executing user code.
1042 */
1043 task_hold_locked(task);
1044 task_wait_locked(task);
1045
1046 task_unlock(task);
1047
1048 return (KERN_SUCCESS);
1049 }
1050
1051 /*
1052 * task_resume:
1053 * Release a kernel hold on a task.
1054 *
1055 * Conditions:
1056 * The caller holds a reference to the task
1057 */
1058 kern_return_t
1059 task_resume(
1060 register task_t task)
1061 {
1062 register boolean_t release = FALSE;
1063
1064 if (task == TASK_NULL || task == kernel_task)
1065 return (KERN_INVALID_ARGUMENT);
1066
1067 task_lock(task);
1068
1069 if (!task->active) {
1070 task_unlock(task);
1071
1072 return (KERN_FAILURE);
1073 }
1074
1075 if (task->user_stop_count > 0) {
1076 if (--task->user_stop_count == 0)
1077 release = TRUE;
1078 }
1079 else {
1080 task_unlock(task);
1081
1082 return (KERN_FAILURE);
1083 }
1084
1085 /*
1086 * Release the task if necessary.
1087 */
1088 if (release)
1089 task_release_locked(task);
1090
1091 task_unlock(task);
1092
1093 return (KERN_SUCCESS);
1094 }
1095
1096 kern_return_t
1097 host_security_set_task_token(
1098 host_security_t host_security,
1099 task_t task,
1100 security_token_t sec_token,
1101 audit_token_t audit_token,
1102 host_priv_t host_priv)
1103 {
1104 ipc_port_t host_port;
1105 kern_return_t kr;
1106
1107 if (task == TASK_NULL)
1108 return(KERN_INVALID_ARGUMENT);
1109
1110 if (host_security == HOST_NULL)
1111 return(KERN_INVALID_SECURITY);
1112
1113 task_lock(task);
1114 task->sec_token = sec_token;
1115 task->audit_token = audit_token;
1116 task_unlock(task);
1117
1118 if (host_priv != HOST_PRIV_NULL) {
1119 kr = host_get_host_priv_port(host_priv, &host_port);
1120 } else {
1121 kr = host_get_host_port(host_priv_self(), &host_port);
1122 }
1123 assert(kr == KERN_SUCCESS);
1124 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1125 return(kr);
1126 }
1127
1128 /*
1129 * Utility routine to set a ledger
1130 */
1131 kern_return_t
1132 task_set_ledger(
1133 task_t task,
1134 ledger_t wired,
1135 ledger_t paged)
1136 {
1137 if (task == TASK_NULL)
1138 return(KERN_INVALID_ARGUMENT);
1139
1140 task_lock(task);
1141 if (wired) {
1142 ipc_port_release_send(task->wired_ledger_port);
1143 task->wired_ledger_port = ledger_copy(wired);
1144 }
1145 if (paged) {
1146 ipc_port_release_send(task->paged_ledger_port);
1147 task->paged_ledger_port = ledger_copy(paged);
1148 }
1149 task_unlock(task);
1150
1151 return(KERN_SUCCESS);
1152 }
1153
1154 /*
1155 * This routine was added, pretty much exclusively, for registering the
1156 * RPC glue vector for in-kernel short circuited tasks. Rather than
1157 * removing it completely, I have only disabled that feature (which was
1158 * the only feature at the time). It just appears that we are going to
1159 * want to add some user data to tasks in the future (i.e. bsd info,
1160 * task names, etc...), so I left it in the formal task interface.
1161 */
1162 kern_return_t
1163 task_set_info(
1164 task_t task,
1165 task_flavor_t flavor,
1166 __unused task_info_t task_info_in, /* pointer to IN array */
1167 __unused mach_msg_type_number_t task_info_count)
1168 {
1169 if (task == TASK_NULL)
1170 return(KERN_INVALID_ARGUMENT);
1171
1172 switch (flavor) {
1173 default:
1174 return (KERN_INVALID_ARGUMENT);
1175 }
1176 return (KERN_SUCCESS);
1177 }
1178
1179 kern_return_t
1180 task_info(
1181 task_t task,
1182 task_flavor_t flavor,
1183 task_info_t task_info_out,
1184 mach_msg_type_number_t *task_info_count)
1185 {
1186 if (task == TASK_NULL)
1187 return (KERN_INVALID_ARGUMENT);
1188
1189 switch (flavor) {
1190
1191 case TASK_BASIC_INFO_32:
1192 case TASK_BASIC2_INFO_32:
1193 {
1194 task_basic_info_32_t basic_info;
1195 vm_map_t map;
1196
1197 if (*task_info_count < TASK_BASIC_INFO_32_COUNT)
1198 return (KERN_INVALID_ARGUMENT);
1199
1200 basic_info = (task_basic_info_32_t)task_info_out;
1201
1202 map = (task == kernel_task)? kernel_map: task->map;
1203 basic_info->virtual_size = CAST_DOWN(vm_offset_t,map->size);
1204 if (flavor == TASK_BASIC2_INFO_32) {
1205 /*
1206 * The "BASIC2" flavor gets the maximum resident
1207 * size instead of the current resident size...
1208 */
1209 basic_info->resident_size = pmap_resident_max(map->pmap);
1210 } else {
1211 basic_info->resident_size = pmap_resident_count(map->pmap);
1212 }
1213 basic_info->resident_size *= PAGE_SIZE;
1214
1215 task_lock(task);
1216 basic_info->policy = ((task != kernel_task)?
1217 POLICY_TIMESHARE: POLICY_RR);
1218 basic_info->suspend_count = task->user_stop_count;
1219
1220 absolutetime_to_microtime(task->total_user_time,
1221 (unsigned *)&basic_info->user_time.seconds,
1222 (unsigned *)&basic_info->user_time.microseconds);
1223 absolutetime_to_microtime(task->total_system_time,
1224 (unsigned *)&basic_info->system_time.seconds,
1225 (unsigned *)&basic_info->system_time.microseconds);
1226 task_unlock(task);
1227
1228 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1229 break;
1230 }
1231
1232 case TASK_BASIC_INFO_64:
1233 {
1234 task_basic_info_64_t basic_info;
1235 vm_map_t map;
1236
1237 if (*task_info_count < TASK_BASIC_INFO_64_COUNT)
1238 return (KERN_INVALID_ARGUMENT);
1239
1240 basic_info = (task_basic_info_64_t)task_info_out;
1241
1242 map = (task == kernel_task)? kernel_map: task->map;
1243 basic_info->virtual_size = map->size;
1244 basic_info->resident_size =
1245 (mach_vm_size_t)(pmap_resident_count(map->pmap))
1246 * PAGE_SIZE_64;
1247
1248 task_lock(task);
1249 basic_info->policy = ((task != kernel_task)?
1250 POLICY_TIMESHARE: POLICY_RR);
1251 basic_info->suspend_count = task->user_stop_count;
1252
1253 absolutetime_to_microtime(task->total_user_time,
1254 (unsigned *)&basic_info->user_time.seconds,
1255 (unsigned *)&basic_info->user_time.microseconds);
1256 absolutetime_to_microtime(task->total_system_time,
1257 (unsigned *)&basic_info->system_time.seconds,
1258 (unsigned *)&basic_info->system_time.microseconds);
1259 task_unlock(task);
1260
1261 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1262 break;
1263 }
1264
1265 case TASK_THREAD_TIMES_INFO:
1266 {
1267 register task_thread_times_info_t times_info;
1268 register thread_t thread;
1269
1270 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT)
1271 return (KERN_INVALID_ARGUMENT);
1272
1273 times_info = (task_thread_times_info_t) task_info_out;
1274 times_info->user_time.seconds = 0;
1275 times_info->user_time.microseconds = 0;
1276 times_info->system_time.seconds = 0;
1277 times_info->system_time.microseconds = 0;
1278
1279 task_lock(task);
1280
1281 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1282 time_value_t user_time, system_time;
1283
1284 thread_read_times(thread, &user_time, &system_time);
1285
1286 time_value_add(&times_info->user_time, &user_time);
1287 time_value_add(&times_info->system_time, &system_time);
1288 }
1289
1290 task_unlock(task);
1291
1292 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1293 break;
1294 }
1295
1296 case TASK_ABSOLUTETIME_INFO:
1297 {
1298 task_absolutetime_info_t info;
1299 register thread_t thread;
1300
1301 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT)
1302 return (KERN_INVALID_ARGUMENT);
1303
1304 info = (task_absolutetime_info_t)task_info_out;
1305 info->threads_user = info->threads_system = 0;
1306
1307 task_lock(task);
1308
1309 info->total_user = task->total_user_time;
1310 info->total_system = task->total_system_time;
1311
1312 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1313 uint64_t tval;
1314
1315 tval = timer_grab(&thread->user_timer);
1316 info->threads_user += tval;
1317 info->total_user += tval;
1318
1319 tval = timer_grab(&thread->system_timer);
1320 info->threads_system += tval;
1321 info->total_system += tval;
1322 }
1323
1324 task_unlock(task);
1325
1326 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1327 break;
1328 }
1329
1330 /* OBSOLETE */
1331 case TASK_SCHED_FIFO_INFO:
1332 {
1333
1334 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1335 return (KERN_INVALID_ARGUMENT);
1336
1337 return (KERN_INVALID_POLICY);
1338 }
1339
1340 /* OBSOLETE */
1341 case TASK_SCHED_RR_INFO:
1342 {
1343 register policy_rr_base_t rr_base;
1344
1345 if (*task_info_count < POLICY_RR_BASE_COUNT)
1346 return (KERN_INVALID_ARGUMENT);
1347
1348 rr_base = (policy_rr_base_t) task_info_out;
1349
1350 task_lock(task);
1351 if (task != kernel_task) {
1352 task_unlock(task);
1353 return (KERN_INVALID_POLICY);
1354 }
1355
1356 rr_base->base_priority = task->priority;
1357 task_unlock(task);
1358
1359 rr_base->quantum = std_quantum_us / 1000;
1360
1361 *task_info_count = POLICY_RR_BASE_COUNT;
1362 break;
1363 }
1364
1365 /* OBSOLETE */
1366 case TASK_SCHED_TIMESHARE_INFO:
1367 {
1368 register policy_timeshare_base_t ts_base;
1369
1370 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1371 return (KERN_INVALID_ARGUMENT);
1372
1373 ts_base = (policy_timeshare_base_t) task_info_out;
1374
1375 task_lock(task);
1376 if (task == kernel_task) {
1377 task_unlock(task);
1378 return (KERN_INVALID_POLICY);
1379 }
1380
1381 ts_base->base_priority = task->priority;
1382 task_unlock(task);
1383
1384 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1385 break;
1386 }
1387
1388 case TASK_SECURITY_TOKEN:
1389 {
1390 register security_token_t *sec_token_p;
1391
1392 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT)
1393 return (KERN_INVALID_ARGUMENT);
1394
1395 sec_token_p = (security_token_t *) task_info_out;
1396
1397 task_lock(task);
1398 *sec_token_p = task->sec_token;
1399 task_unlock(task);
1400
1401 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1402 break;
1403 }
1404
1405 case TASK_AUDIT_TOKEN:
1406 {
1407 register audit_token_t *audit_token_p;
1408
1409 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT)
1410 return (KERN_INVALID_ARGUMENT);
1411
1412 audit_token_p = (audit_token_t *) task_info_out;
1413
1414 task_lock(task);
1415 *audit_token_p = task->audit_token;
1416 task_unlock(task);
1417
1418 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1419 break;
1420 }
1421
1422 case TASK_SCHED_INFO:
1423 return (KERN_INVALID_ARGUMENT);
1424
1425 case TASK_EVENTS_INFO:
1426 {
1427 register task_events_info_t events_info;
1428 register thread_t thread;
1429
1430 if (*task_info_count < TASK_EVENTS_INFO_COUNT)
1431 return (KERN_INVALID_ARGUMENT);
1432
1433 events_info = (task_events_info_t) task_info_out;
1434
1435 task_lock(task);
1436
1437 events_info->faults = task->faults;
1438 events_info->pageins = task->pageins;
1439 events_info->cow_faults = task->cow_faults;
1440 events_info->messages_sent = task->messages_sent;
1441 events_info->messages_received = task->messages_received;
1442 events_info->syscalls_mach = task->syscalls_mach;
1443 events_info->syscalls_unix = task->syscalls_unix;
1444
1445 events_info->csw = task->c_switch;
1446
1447 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1448 events_info->csw += thread->c_switch;
1449 }
1450
1451 task_unlock(task);
1452
1453 *task_info_count = TASK_EVENTS_INFO_COUNT;
1454 break;
1455 }
1456 case TASK_AFFINITY_TAG_INFO:
1457 {
1458 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT)
1459 return (KERN_INVALID_ARGUMENT);
1460
1461 return task_affinity_info(task, task_info_out, task_info_count);
1462 }
1463
1464 default:
1465 return (KERN_INVALID_ARGUMENT);
1466 }
1467
1468 return (KERN_SUCCESS);
1469 }
1470
1471 void
1472 task_vtimer_set(
1473 task_t task,
1474 integer_t which)
1475 {
1476 thread_t thread;
1477
1478 /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
1479
1480 task_lock(task);
1481
1482 task->vtimers |= which;
1483
1484 switch (which) {
1485
1486 case TASK_VTIMER_USER:
1487 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1488 thread->vtimer_user_save = timer_grab(&thread->user_timer);
1489 }
1490 break;
1491
1492 case TASK_VTIMER_PROF:
1493 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1494 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
1495 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
1496 }
1497 break;
1498
1499 case TASK_VTIMER_RLIM:
1500 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1501 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
1502 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
1503 }
1504 break;
1505 }
1506
1507 task_unlock(task);
1508 }
1509
1510 void
1511 task_vtimer_clear(
1512 task_t task,
1513 integer_t which)
1514 {
1515 assert(task == current_task());
1516
1517 task_lock(task);
1518
1519 task->vtimers &= ~which;
1520
1521 task_unlock(task);
1522 }
1523
1524 void
1525 task_vtimer_update(
1526 __unused
1527 task_t task,
1528 integer_t which,
1529 uint32_t *microsecs)
1530 {
1531 thread_t thread = current_thread();
1532 uint32_t tdelt, secs;
1533 uint64_t tsum;
1534
1535 assert(task == current_task());
1536
1537 assert(task->vtimers & which);
1538
1539 tdelt = secs = 0;
1540
1541 switch (which) {
1542
1543 case TASK_VTIMER_USER:
1544 tdelt = timer_delta(&thread->user_timer,
1545 &thread->vtimer_user_save);
1546 break;
1547
1548 case TASK_VTIMER_PROF:
1549 tsum = timer_grab(&thread->user_timer);
1550 tsum += timer_grab(&thread->system_timer);
1551 tdelt = tsum - thread->vtimer_prof_save;
1552 thread->vtimer_prof_save = tsum;
1553 break;
1554
1555 case TASK_VTIMER_RLIM:
1556 tsum = timer_grab(&thread->user_timer);
1557 tsum += timer_grab(&thread->system_timer);
1558 tdelt = tsum - thread->vtimer_rlim_save;
1559 thread->vtimer_rlim_save = tsum;
1560 break;
1561 }
1562
1563 absolutetime_to_microtime(tdelt, &secs, microsecs);
1564 }
1565
1566 /*
1567 * task_assign:
1568 *
1569 * Change the assigned processor set for the task
1570 */
1571 kern_return_t
1572 task_assign(
1573 __unused task_t task,
1574 __unused processor_set_t new_pset,
1575 __unused boolean_t assign_threads)
1576 {
1577 return(KERN_FAILURE);
1578 }
1579
1580 /*
1581 * task_assign_default:
1582 *
1583 * Version of task_assign to assign to default processor set.
1584 */
1585 kern_return_t
1586 task_assign_default(
1587 task_t task,
1588 boolean_t assign_threads)
1589 {
1590 return (task_assign(task, &pset0, assign_threads));
1591 }
1592
1593 /*
1594 * task_get_assignment
1595 *
1596 * Return name of processor set that task is assigned to.
1597 */
1598 kern_return_t
1599 task_get_assignment(
1600 task_t task,
1601 processor_set_t *pset)
1602 {
1603 if (!task->active)
1604 return(KERN_FAILURE);
1605
1606 *pset = &pset0;
1607
1608 return (KERN_SUCCESS);
1609 }
1610
1611
1612 /*
1613 * task_policy
1614 *
1615 * Set scheduling policy and parameters, both base and limit, for
1616 * the given task. Policy must be a policy which is enabled for the
1617 * processor set. Change contained threads if requested.
1618 */
1619 kern_return_t
1620 task_policy(
1621 __unused task_t task,
1622 __unused policy_t policy_id,
1623 __unused policy_base_t base,
1624 __unused mach_msg_type_number_t count,
1625 __unused boolean_t set_limit,
1626 __unused boolean_t change)
1627 {
1628 return(KERN_FAILURE);
1629 }
1630
1631 /*
1632 * task_set_policy
1633 *
1634 * Set scheduling policy and parameters, both base and limit, for
1635 * the given task. Policy can be any policy implemented by the
1636 * processor set, whether enabled or not. Change contained threads
1637 * if requested.
1638 */
1639 kern_return_t
1640 task_set_policy(
1641 __unused task_t task,
1642 __unused processor_set_t pset,
1643 __unused policy_t policy_id,
1644 __unused policy_base_t base,
1645 __unused mach_msg_type_number_t base_count,
1646 __unused policy_limit_t limit,
1647 __unused mach_msg_type_number_t limit_count,
1648 __unused boolean_t change)
1649 {
1650 return(KERN_FAILURE);
1651 }
1652
1653 #if FAST_TAS
1654 kern_return_t
1655 task_set_ras_pc(
1656 task_t task,
1657 vm_offset_t pc,
1658 vm_offset_t endpc)
1659 {
1660 extern int fast_tas_debug;
1661
1662 if (fast_tas_debug) {
1663 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1664 task, pc, endpc);
1665 }
1666 task_lock(task);
1667 task->fast_tas_base = pc;
1668 task->fast_tas_end = endpc;
1669 task_unlock(task);
1670 return KERN_SUCCESS;
1671 }
1672 #else /* FAST_TAS */
1673 kern_return_t
1674 task_set_ras_pc(
1675 __unused task_t task,
1676 __unused vm_offset_t pc,
1677 __unused vm_offset_t endpc)
1678 {
1679 return KERN_FAILURE;
1680 }
1681 #endif /* FAST_TAS */
1682
1683 void
1684 task_synchronizer_destroy_all(task_t task)
1685 {
1686 semaphore_t semaphore;
1687 lock_set_t lock_set;
1688
1689 /*
1690 * Destroy owned semaphores
1691 */
1692
1693 while (!queue_empty(&task->semaphore_list)) {
1694 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1695 (void) semaphore_destroy(task, semaphore);
1696 }
1697
1698 /*
1699 * Destroy owned lock sets
1700 */
1701
1702 while (!queue_empty(&task->lock_set_list)) {
1703 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1704 (void) lock_set_destroy(task, lock_set);
1705 }
1706 }
1707
1708 /*
1709 * We need to export some functions to other components that
1710 * are currently implemented in macros within the osfmk
1711 * component. Just export them as functions of the same name.
1712 */
1713 boolean_t is_kerneltask(task_t t)
1714 {
1715 if (t == kernel_task)
1716 return (TRUE);
1717
1718 return (FALSE);
1719 }
1720
1721 #undef current_task
1722 task_t current_task(void);
1723 task_t current_task(void)
1724 {
1725 return (current_task_fast());
1726 }
1727
1728 #undef task_reference
1729 void task_reference(task_t task);
1730 void
1731 task_reference(
1732 task_t task)
1733 {
1734 if (task != TASK_NULL)
1735 task_reference_internal(task);
1736 }
1737
1738 #if CONFIG_MACF_MACH
1739 /*
1740 * Protect 2 task labels against modification by adding a reference on
1741 * both label handles. The locks do not actually have to be held while
1742 * using the labels as only labels with one reference can be modified
1743 * in place.
1744 */
1745
1746 void
1747 tasklabel_lock2(
1748 task_t a,
1749 task_t b)
1750 {
1751 labelh_reference(a->label);
1752 labelh_reference(b->label);
1753 }
1754
1755 void
1756 tasklabel_unlock2(
1757 task_t a,
1758 task_t b)
1759 {
1760 labelh_release(a->label);
1761 labelh_release(b->label);
1762 }
1763
1764 void
1765 mac_task_label_update_internal(
1766 struct label *pl,
1767 struct task *task)
1768 {
1769
1770 tasklabel_lock(task);
1771 task->label = labelh_modify(task->label);
1772 mac_task_label_update(pl, &task->maclabel);
1773 tasklabel_unlock(task);
1774 ip_lock(task->itk_self);
1775 mac_port_label_update_cred(pl, &task->itk_self->ip_label);
1776 ip_unlock(task->itk_self);
1777 }
1778
1779 void
1780 mac_task_label_modify(
1781 struct task *task,
1782 void *arg,
1783 void (*f) (struct label *l, void *arg))
1784 {
1785
1786 tasklabel_lock(task);
1787 task->label = labelh_modify(task->label);
1788 (*f)(&task->maclabel, arg);
1789 tasklabel_unlock(task);
1790 }
1791
1792 struct label *
1793 mac_task_get_label(struct task *task)
1794 {
1795 return (&task->maclabel);
1796 }
1797 #endif