]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
b800c7421377fcbc7e4d29658ea4da8d48b85fce
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81
82 #include <mach_kdb.h>
83 #include <mach_host.h>
84 #include <mach_prof.h>
85 #include <fast_tas.h>
86 #include <platforms.h>
87
88 #include <mach/mach_types.h>
89 #include <mach/boolean.h>
90 #include <mach/host_priv.h>
91 #include <mach/machine/vm_types.h>
92 #include <mach/vm_param.h>
93 #include <mach/semaphore.h>
94 #include <mach/task_info.h>
95 #include <mach/task_special_ports.h>
96
97 #include <ipc/ipc_types.h>
98 #include <ipc/ipc_space.h>
99 #include <ipc/ipc_entry.h>
100
101 #include <kern/kern_types.h>
102 #include <kern/mach_param.h>
103 #include <kern/misc_protos.h>
104 #include <kern/task.h>
105 #include <kern/thread.h>
106 #include <kern/zalloc.h>
107 #include <kern/kalloc.h>
108 #include <kern/processor.h>
109 #include <kern/sched_prim.h> /* for thread_wakeup */
110 #include <kern/ipc_tt.h>
111 #include <kern/ledger.h>
112 #include <kern/host.h>
113 #include <kern/clock.h>
114 #include <kern/timer.h>
115 #include <kern/profile.h>
116 #include <kern/assert.h>
117 #include <kern/sync_lock.h>
118
119 #include <vm/pmap.h>
120 #include <vm/vm_map.h>
121 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
122 #include <vm/vm_pageout.h>
123 #include <vm/vm_protos.h> /* for vm_map_remove_commpage */
124
125 #if MACH_KDB
126 #include <ddb/db_sym.h>
127 #endif /* MACH_KDB */
128
129 #ifdef __ppc__
130 #include <ppc/exception.h>
131 #include <ppc/hw_perfmon.h>
132 #endif
133
134 /*
135 * Exported interfaces
136 */
137
138 #include <mach/task_server.h>
139 #include <mach/mach_host_server.h>
140 #include <mach/host_security_server.h>
141 #include <mach/mach_port_server.h>
142
143 #include <vm/task_working_set.h>
144 #include <vm/vm_shared_memory_server.h>
145
146 task_t kernel_task;
147 zone_t task_zone;
148
149 /* Forwards */
150
151 void task_hold_locked(
152 task_t task);
153 void task_wait_locked(
154 task_t task);
155 void task_release_locked(
156 task_t task);
157 void task_free(
158 task_t task );
159 void task_synchronizer_destroy_all(
160 task_t task);
161
162 kern_return_t task_set_ledger(
163 task_t task,
164 ledger_t wired,
165 ledger_t paged);
166
167 void
168 task_backing_store_privileged(
169 task_t task)
170 {
171 task_lock(task);
172 task->priv_flags |= VM_BACKING_STORE_PRIV;
173 task_unlock(task);
174 return;
175 }
176
177 void
178 task_working_set_disable(task_t task)
179 {
180 struct tws_hash *ws;
181
182 task_lock(task);
183 ws = task->dynamic_working_set;
184 task->dynamic_working_set = NULL;
185 task_unlock(task);
186 if (ws) {
187 tws_hash_ws_flush(ws);
188 tws_hash_destroy(ws);
189 }
190 }
191
192 void
193 task_set_64bit(
194 task_t task,
195 boolean_t is64bit)
196 {
197 thread_t thread;
198
199 if (is64bit) {
200 if (task_has_64BitAddr(task))
201 return;
202
203 /* LP64todo - no task working set for 64-bit */
204 task_set_64BitAddr(task);
205 task_working_set_disable(task);
206 } else {
207 if ( !task_has_64BitAddr(task))
208 return;
209
210 /*
211 * Deallocate all memory previously allocated
212 * above the 32-bit address space, since it won't
213 * be accessible anymore.
214 */
215 /* LP64todo - make this clean */
216 vm_map_remove_commpage(task->map);
217 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
218 (void) vm_map_remove(task->map,
219 (vm_map_offset_t) VM_MAX_ADDRESS,
220 MACH_VM_MAX_ADDRESS,
221 VM_MAP_NO_FLAGS);
222 task_clear_64BitAddr(task);
223 }
224 /* FIXME: On x86, the thread save state flavor can diverge from the
225 * task's 64-bit feature flag due to the 32-bit/64-bit register save
226 * state dichotomy. Since we can be pre-empted in this interval,
227 * certain routines may observe the thread as being in an inconsistent
228 * state with respect to its task's 64-bitness.
229 */
230 #ifdef __i386__
231 queue_iterate(&task->threads, thread, thread_t, task_threads) {
232 machine_thread_switch_addrmode(thread, !is64bit);
233 }
234 #endif
235 }
236
237 void
238 task_init(void)
239 {
240 task_zone = zinit(
241 sizeof(struct task),
242 TASK_MAX * sizeof(struct task),
243 TASK_CHUNK * sizeof(struct task),
244 "tasks");
245
246 /*
247 * Create the kernel task as the first task.
248 */
249 if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
250 panic("task_init\n");
251
252 vm_map_deallocate(kernel_task->map);
253 kernel_task->map = kernel_map;
254 }
255
256 #if MACH_HOST
257
258 #if 0
259 static void
260 task_freeze(
261 task_t task)
262 {
263 task_lock(task);
264 /*
265 * If may_assign is false, task is already being assigned,
266 * wait for that to finish.
267 */
268 while (task->may_assign == FALSE) {
269 wait_result_t res;
270
271 task->assign_active = TRUE;
272 res = thread_sleep_mutex((event_t) &task->assign_active,
273 &task->lock, THREAD_UNINT);
274 assert(res == THREAD_AWAKENED);
275 }
276 task->may_assign = FALSE;
277 task_unlock(task);
278 return;
279 }
280 #else
281 #define thread_freeze(thread) assert(task->processor_set == &default_pset)
282 #endif
283
284 #if 0
285 static void
286 task_unfreeze(
287 task_t task)
288 {
289 task_lock(task);
290 assert(task->may_assign == FALSE);
291 task->may_assign = TRUE;
292 if (task->assign_active == TRUE) {
293 task->assign_active = FALSE;
294 thread_wakeup((event_t)&task->assign_active);
295 }
296 task_unlock(task);
297 return;
298 }
299 #else
300 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
301 #endif
302
303 #endif /* MACH_HOST */
304
305 /*
306 * Create a task running in the kernel address space. It may
307 * have its own map of size mem_size and may have ipc privileges.
308 */
309 kern_return_t
310 kernel_task_create(
311 __unused task_t parent_task,
312 __unused vm_offset_t map_base,
313 __unused vm_size_t map_size,
314 __unused task_t *child_task)
315 {
316 return (KERN_INVALID_ARGUMENT);
317 }
318
319 kern_return_t
320 task_create(
321 task_t parent_task,
322 __unused ledger_port_array_t ledger_ports,
323 __unused mach_msg_type_number_t num_ledger_ports,
324 boolean_t inherit_memory,
325 task_t *child_task) /* OUT */
326 {
327 if (parent_task == TASK_NULL)
328 return(KERN_INVALID_ARGUMENT);
329
330 return task_create_internal(
331 parent_task, inherit_memory, task_has_64BitAddr(parent_task), child_task);
332 }
333
334 kern_return_t
335 host_security_create_task_token(
336 host_security_t host_security,
337 task_t parent_task,
338 security_token_t sec_token,
339 audit_token_t audit_token,
340 host_priv_t host_priv,
341 __unused ledger_port_array_t ledger_ports,
342 __unused mach_msg_type_number_t num_ledger_ports,
343 boolean_t inherit_memory,
344 task_t *child_task) /* OUT */
345 {
346 kern_return_t result;
347
348 if (parent_task == TASK_NULL)
349 return(KERN_INVALID_ARGUMENT);
350
351 if (host_security == HOST_NULL)
352 return(KERN_INVALID_SECURITY);
353
354 result = task_create_internal(
355 parent_task, inherit_memory, task_has_64BitAddr(parent_task), child_task);
356
357 if (result != KERN_SUCCESS)
358 return(result);
359
360 result = host_security_set_task_token(host_security,
361 *child_task,
362 sec_token,
363 audit_token,
364 host_priv);
365
366 if (result != KERN_SUCCESS)
367 return(result);
368
369 return(result);
370 }
371
372 kern_return_t
373 task_create_internal(
374 task_t parent_task,
375 boolean_t inherit_memory,
376 boolean_t is_64bit,
377 task_t *child_task) /* OUT */
378 {
379 task_t new_task;
380 processor_set_t pset;
381
382 new_task = (task_t) zalloc(task_zone);
383
384 if (new_task == TASK_NULL)
385 return(KERN_RESOURCE_SHORTAGE);
386
387 /* one ref for just being alive; one for our caller */
388 new_task->ref_count = 2;
389
390 if (inherit_memory)
391 new_task->map = vm_map_fork(parent_task->map);
392 else
393 new_task->map = vm_map_create(pmap_create(0, is_64bit),
394 (vm_map_offset_t)(VM_MIN_ADDRESS),
395 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
396
397 mutex_init(&new_task->lock, 0);
398 queue_init(&new_task->threads);
399 new_task->suspend_count = 0;
400 new_task->thread_count = 0;
401 new_task->active_thread_count = 0;
402 new_task->user_stop_count = 0;
403 new_task->role = TASK_UNSPECIFIED;
404 new_task->active = TRUE;
405 new_task->user_data = 0;
406 new_task->faults = 0;
407 new_task->cow_faults = 0;
408 new_task->pageins = 0;
409 new_task->messages_sent = 0;
410 new_task->messages_received = 0;
411 new_task->syscalls_mach = 0;
412 new_task->priv_flags = 0;
413 new_task->syscalls_unix=0;
414 new_task->csw=0;
415 new_task->taskFeatures[0] = 0; /* Init task features */
416 new_task->taskFeatures[1] = 0; /* Init task features */
417 new_task->dynamic_working_set = 0;
418
419 task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT,
420 0, TWS_HASH_STYLE_DEFAULT);
421
422 #ifdef MACH_BSD
423 new_task->bsd_info = 0;
424 #endif /* MACH_BSD */
425
426 #ifdef __i386__
427 new_task->i386_ldt = 0;
428 #endif
429
430 #ifdef __ppc__
431 if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */
432 #endif
433
434 queue_init(&new_task->semaphore_list);
435 queue_init(&new_task->lock_set_list);
436 new_task->semaphores_owned = 0;
437 new_task->lock_sets_owned = 0;
438
439 #if MACH_HOST
440 new_task->may_assign = TRUE;
441 new_task->assign_active = FALSE;
442 #endif /* MACH_HOST */
443
444 ipc_task_init(new_task, parent_task);
445
446 new_task->total_user_time = 0;
447 new_task->total_system_time = 0;
448
449 task_prof_init(new_task);
450
451 if (parent_task != TASK_NULL) {
452 #if MACH_HOST
453 /*
454 * Freeze the parent, so that parent_task->processor_set
455 * cannot change.
456 */
457 task_freeze(parent_task);
458 #endif /* MACH_HOST */
459 pset = parent_task->processor_set;
460 if (!pset->active)
461 pset = &default_pset;
462
463 new_task->sec_token = parent_task->sec_token;
464 new_task->audit_token = parent_task->audit_token;
465
466 shared_region_mapping_ref(parent_task->system_shared_region);
467 new_task->system_shared_region = parent_task->system_shared_region;
468
469 new_task->wired_ledger_port = ledger_copy(
470 convert_port_to_ledger(parent_task->wired_ledger_port));
471 new_task->paged_ledger_port = ledger_copy(
472 convert_port_to_ledger(parent_task->paged_ledger_port));
473 if(task_has_64BitAddr(parent_task))
474 task_set_64BitAddr(new_task);
475
476 #ifdef __i386__
477 if (inherit_memory && parent_task->i386_ldt)
478 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
479 #endif
480 }
481 else {
482 pset = &default_pset;
483
484 new_task->sec_token = KERNEL_SECURITY_TOKEN;
485 new_task->audit_token = KERNEL_AUDIT_TOKEN;
486 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
487 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
488 }
489
490 if (kernel_task == TASK_NULL) {
491 new_task->priority = BASEPRI_KERNEL;
492 new_task->max_priority = MAXPRI_KERNEL;
493 }
494 else {
495 new_task->priority = BASEPRI_DEFAULT;
496 new_task->max_priority = MAXPRI_USER;
497 }
498
499 pset_lock(pset);
500 pset_add_task(pset, new_task);
501 pset_unlock(pset);
502 #if MACH_HOST
503 if (parent_task != TASK_NULL)
504 task_unfreeze(parent_task);
505 #endif /* MACH_HOST */
506
507 if (vm_backing_store_low && parent_task != NULL)
508 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
509
510 ipc_task_enable(new_task);
511
512 *child_task = new_task;
513 return(KERN_SUCCESS);
514 }
515
516 /*
517 * task_deallocate:
518 *
519 * Drop a reference on a task.
520 */
521 void
522 task_deallocate(
523 task_t task)
524 {
525 processor_set_t pset;
526
527 if (task == TASK_NULL)
528 return;
529
530 if (task_deallocate_internal(task) > 0)
531 return;
532
533 pset = task->processor_set;
534 pset_deallocate(pset);
535
536 if(task->dynamic_working_set)
537 tws_hash_destroy(task->dynamic_working_set);
538
539 ipc_task_terminate(task);
540
541 vm_map_deallocate(task->map);
542 is_release(task->itk_space);
543
544 task_prof_deallocate(task);
545 zfree(task_zone, task);
546 }
547
548 /*
549 * task_name_deallocate:
550 *
551 * Drop a reference on a task name.
552 */
553 void
554 task_name_deallocate(
555 task_name_t task_name)
556 {
557 return(task_deallocate((task_t)task_name));
558 }
559
560
561 /*
562 * task_terminate:
563 *
564 * Terminate the specified task. See comments on thread_terminate
565 * (kern/thread.c) about problems with terminating the "current task."
566 */
567
568 kern_return_t
569 task_terminate(
570 task_t task)
571 {
572 if (task == TASK_NULL)
573 return (KERN_INVALID_ARGUMENT);
574
575 if (task->bsd_info)
576 return (KERN_FAILURE);
577
578 return (task_terminate_internal(task));
579 }
580
581 kern_return_t
582 task_terminate_internal(
583 task_t task)
584 {
585 processor_set_t pset;
586 thread_t thread, self;
587 task_t self_task;
588 boolean_t interrupt_save;
589
590 assert(task != kernel_task);
591
592 self = current_thread();
593 self_task = self->task;
594
595 /*
596 * Get the task locked and make sure that we are not racing
597 * with someone else trying to terminate us.
598 */
599 if (task == self_task)
600 task_lock(task);
601 else
602 if (task < self_task) {
603 task_lock(task);
604 task_lock(self_task);
605 }
606 else {
607 task_lock(self_task);
608 task_lock(task);
609 }
610
611 if (!task->active || !self->active) {
612 /*
613 * Task or current act is already being terminated.
614 * Just return an error. If we are dying, this will
615 * just get us to our AST special handler and that
616 * will get us to finalize the termination of ourselves.
617 */
618 task_unlock(task);
619 if (self_task != task)
620 task_unlock(self_task);
621
622 return (KERN_FAILURE);
623 }
624
625 if (self_task != task)
626 task_unlock(self_task);
627
628 /*
629 * Make sure the current thread does not get aborted out of
630 * the waits inside these operations.
631 */
632 interrupt_save = thread_interrupt_level(THREAD_UNINT);
633
634 /*
635 * Indicate that we want all the threads to stop executing
636 * at user space by holding the task (we would have held
637 * each thread independently in thread_terminate_internal -
638 * but this way we may be more likely to already find it
639 * held there). Mark the task inactive, and prevent
640 * further task operations via the task port.
641 */
642 task_hold_locked(task);
643 task->active = FALSE;
644 ipc_task_disable(task);
645
646 /*
647 * Terminate each thread in the task.
648 */
649 queue_iterate(&task->threads, thread, thread_t, task_threads) {
650 thread_terminate_internal(thread);
651 }
652
653 /*
654 * Give the machine dependent code a chance
655 * to perform cleanup before ripping apart
656 * the task.
657 */
658 if (self_task == task)
659 machine_thread_terminate_self();
660
661 task_unlock(task);
662
663 /*
664 * Destroy all synchronizers owned by the task.
665 */
666 task_synchronizer_destroy_all(task);
667
668 /*
669 * Destroy the IPC space, leaving just a reference for it.
670 */
671 ipc_space_destroy(task->itk_space);
672
673 /* LP64todo - make this clean */
674 vm_map_remove_commpage(task->map);
675 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
676
677 if (vm_map_has_4GB_pagezero(task->map))
678 vm_map_clear_4GB_pagezero(task->map);
679
680 /*
681 * If the current thread is a member of the task
682 * being terminated, then the last reference to
683 * the task will not be dropped until the thread
684 * is finally reaped. To avoid incurring the
685 * expense of removing the address space regions
686 * at reap time, we do it explictly here.
687 */
688 vm_map_remove(task->map, task->map->min_offset,
689 task->map->max_offset, VM_MAP_NO_FLAGS);
690
691 shared_region_mapping_dealloc(task->system_shared_region);
692
693 /*
694 * Flush working set here to avoid I/O in reaper thread
695 */
696 if (task->dynamic_working_set)
697 tws_hash_ws_flush(task->dynamic_working_set);
698
699 pset = task->processor_set;
700 pset_lock(pset);
701 pset_remove_task(pset,task);
702 pset_unlock(pset);
703
704 /*
705 * We no longer need to guard against being aborted, so restore
706 * the previous interruptible state.
707 */
708 thread_interrupt_level(interrupt_save);
709
710 #if __ppc__
711 perfmon_release_facility(task); // notify the perfmon facility
712 #endif
713
714 /*
715 * Get rid of the task active reference on itself.
716 */
717 task_deallocate(task);
718
719 return (KERN_SUCCESS);
720 }
721
722 /*
723 * task_halt:
724 *
725 * Shut the current task down (except for the current thread) in
726 * preparation for dramatic changes to the task (probably exec).
727 * We hold the task, terminate all other threads in the task and
728 * wait for them to terminate, clean up the portspace, and when
729 * all done, let the current thread go.
730 */
731 kern_return_t
732 task_halt(
733 task_t task)
734 {
735 thread_t thread, self;
736
737 assert(task != kernel_task);
738
739 self = current_thread();
740
741 if (task != self->task)
742 return (KERN_INVALID_ARGUMENT);
743
744 task_lock(task);
745
746 if (!task->active || !self->active) {
747 /*
748 * Task or current thread is already being terminated.
749 * Hurry up and return out of the current kernel context
750 * so that we run our AST special handler to terminate
751 * ourselves.
752 */
753 task_unlock(task);
754
755 return (KERN_FAILURE);
756 }
757
758 if (task->thread_count > 1) {
759 /*
760 * Mark all the threads to keep them from starting any more
761 * user-level execution. The thread_terminate_internal code
762 * would do this on a thread by thread basis anyway, but this
763 * gives us a better chance of not having to wait there.
764 */
765 task_hold_locked(task);
766
767 /*
768 * Terminate all the other threads in the task.
769 */
770 queue_iterate(&task->threads, thread, thread_t, task_threads) {
771 if (thread != self)
772 thread_terminate_internal(thread);
773 }
774
775 task_release_locked(task);
776 }
777
778 /*
779 * Give the machine dependent code a chance
780 * to perform cleanup before ripping apart
781 * the task.
782 */
783 machine_thread_terminate_self();
784
785 task_unlock(task);
786
787 /*
788 * Destroy all synchronizers owned by the task.
789 */
790 task_synchronizer_destroy_all(task);
791
792 /*
793 * Destroy the contents of the IPC space, leaving just
794 * a reference for it.
795 */
796 ipc_space_clean(task->itk_space);
797
798 /*
799 * Clean out the address space, as we are going to be
800 * getting a new one.
801 */
802 vm_map_remove(task->map, task->map->min_offset,
803 task->map->max_offset, VM_MAP_NO_FLAGS);
804
805 return (KERN_SUCCESS);
806 }
807
808 /*
809 * task_hold_locked:
810 *
811 * Suspend execution of the specified task.
812 * This is a recursive-style suspension of the task, a count of
813 * suspends is maintained.
814 *
815 * CONDITIONS: the task is locked and active.
816 */
817 void
818 task_hold_locked(
819 register task_t task)
820 {
821 register thread_t thread;
822
823 assert(task->active);
824
825 if (task->suspend_count++ > 0)
826 return;
827
828 /*
829 * Iterate through all the threads and hold them.
830 */
831 queue_iterate(&task->threads, thread, thread_t, task_threads) {
832 thread_mtx_lock(thread);
833 thread_hold(thread);
834 thread_mtx_unlock(thread);
835 }
836 }
837
838 /*
839 * task_hold:
840 *
841 * Same as the internal routine above, except that is must lock
842 * and verify that the task is active. This differs from task_suspend
843 * in that it places a kernel hold on the task rather than just a
844 * user-level hold. This keeps users from over resuming and setting
845 * it running out from under the kernel.
846 *
847 * CONDITIONS: the caller holds a reference on the task
848 */
849 kern_return_t
850 task_hold(
851 register task_t task)
852 {
853 if (task == TASK_NULL)
854 return (KERN_INVALID_ARGUMENT);
855
856 task_lock(task);
857
858 if (!task->active) {
859 task_unlock(task);
860
861 return (KERN_FAILURE);
862 }
863
864 task_hold_locked(task);
865 task_unlock(task);
866
867 return (KERN_SUCCESS);
868 }
869
870 /*
871 * task_wait_locked:
872 *
873 * Wait for all threads in task to stop.
874 *
875 * Conditions:
876 * Called with task locked, active, and held.
877 */
878 void
879 task_wait_locked(
880 register task_t task)
881 {
882 register thread_t thread, self;
883
884 assert(task->active);
885 assert(task->suspend_count > 0);
886
887 self = current_thread();
888
889 /*
890 * Iterate through all the threads and wait for them to
891 * stop. Do not wait for the current thread if it is within
892 * the task.
893 */
894 queue_iterate(&task->threads, thread, thread_t, task_threads) {
895 if (thread != self)
896 thread_wait(thread);
897 }
898 }
899
900 /*
901 * task_release_locked:
902 *
903 * Release a kernel hold on a task.
904 *
905 * CONDITIONS: the task is locked and active
906 */
907 void
908 task_release_locked(
909 register task_t task)
910 {
911 register thread_t thread;
912
913 assert(task->active);
914 assert(task->suspend_count > 0);
915
916 if (--task->suspend_count > 0)
917 return;
918
919 queue_iterate(&task->threads, thread, thread_t, task_threads) {
920 thread_mtx_lock(thread);
921 thread_release(thread);
922 thread_mtx_unlock(thread);
923 }
924 }
925
926 /*
927 * task_release:
928 *
929 * Same as the internal routine above, except that it must lock
930 * and verify that the task is active.
931 *
932 * CONDITIONS: The caller holds a reference to the task
933 */
934 kern_return_t
935 task_release(
936 task_t task)
937 {
938 if (task == TASK_NULL)
939 return (KERN_INVALID_ARGUMENT);
940
941 task_lock(task);
942
943 if (!task->active) {
944 task_unlock(task);
945
946 return (KERN_FAILURE);
947 }
948
949 task_release_locked(task);
950 task_unlock(task);
951
952 return (KERN_SUCCESS);
953 }
954
955 kern_return_t
956 task_threads(
957 task_t task,
958 thread_act_array_t *threads_out,
959 mach_msg_type_number_t *count)
960 {
961 mach_msg_type_number_t actual;
962 thread_t *threads;
963 thread_t thread;
964 vm_size_t size, size_needed;
965 void *addr;
966 unsigned int i, j;
967
968 if (task == TASK_NULL)
969 return (KERN_INVALID_ARGUMENT);
970
971 size = 0; addr = 0;
972
973 for (;;) {
974 task_lock(task);
975 if (!task->active) {
976 task_unlock(task);
977
978 if (size != 0)
979 kfree(addr, size);
980
981 return (KERN_FAILURE);
982 }
983
984 actual = task->thread_count;
985
986 /* do we have the memory we need? */
987 size_needed = actual * sizeof (mach_port_t);
988 if (size_needed <= size)
989 break;
990
991 /* unlock the task and allocate more memory */
992 task_unlock(task);
993
994 if (size != 0)
995 kfree(addr, size);
996
997 assert(size_needed > 0);
998 size = size_needed;
999
1000 addr = kalloc(size);
1001 if (addr == 0)
1002 return (KERN_RESOURCE_SHORTAGE);
1003 }
1004
1005 /* OK, have memory and the task is locked & active */
1006 threads = (thread_t *)addr;
1007
1008 i = j = 0;
1009
1010 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1011 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1012 thread_reference_internal(thread);
1013 threads[j++] = thread;
1014 }
1015
1016 assert(queue_end(&task->threads, (queue_entry_t)thread));
1017
1018 actual = j;
1019 size_needed = actual * sizeof (mach_port_t);
1020
1021 /* can unlock task now that we've got the thread refs */
1022 task_unlock(task);
1023
1024 if (actual == 0) {
1025 /* no threads, so return null pointer and deallocate memory */
1026
1027 *threads_out = 0;
1028 *count = 0;
1029
1030 if (size != 0)
1031 kfree(addr, size);
1032 }
1033 else {
1034 /* if we allocated too much, must copy */
1035
1036 if (size_needed < size) {
1037 void *newaddr;
1038
1039 newaddr = kalloc(size_needed);
1040 if (newaddr == 0) {
1041 for (i = 0; i < actual; ++i)
1042 thread_deallocate(threads[i]);
1043 kfree(addr, size);
1044 return (KERN_RESOURCE_SHORTAGE);
1045 }
1046
1047 bcopy(addr, newaddr, size_needed);
1048 kfree(addr, size);
1049 threads = (thread_t *)newaddr;
1050 }
1051
1052 *threads_out = threads;
1053 *count = actual;
1054
1055 /* do the conversion that Mig should handle */
1056
1057 for (i = 0; i < actual; ++i)
1058 ((ipc_port_t *) threads)[i] = convert_thread_to_port(threads[i]);
1059 }
1060
1061 return (KERN_SUCCESS);
1062 }
1063
1064 /*
1065 * task_suspend:
1066 *
1067 * Implement a user-level suspension on a task.
1068 *
1069 * Conditions:
1070 * The caller holds a reference to the task
1071 */
1072 kern_return_t
1073 task_suspend(
1074 register task_t task)
1075 {
1076 if (task == TASK_NULL || task == kernel_task)
1077 return (KERN_INVALID_ARGUMENT);
1078
1079 task_lock(task);
1080
1081 if (!task->active) {
1082 task_unlock(task);
1083
1084 return (KERN_FAILURE);
1085 }
1086
1087 if (task->user_stop_count++ > 0) {
1088 /*
1089 * If the stop count was positive, the task is
1090 * already stopped and we can exit.
1091 */
1092 task_unlock(task);
1093
1094 return (KERN_SUCCESS);
1095 }
1096
1097 /*
1098 * Put a kernel-level hold on the threads in the task (all
1099 * user-level task suspensions added together represent a
1100 * single kernel-level hold). We then wait for the threads
1101 * to stop executing user code.
1102 */
1103 task_hold_locked(task);
1104 task_wait_locked(task);
1105
1106 task_unlock(task);
1107
1108 return (KERN_SUCCESS);
1109 }
1110
1111 /*
1112 * task_resume:
1113 * Release a kernel hold on a task.
1114 *
1115 * Conditions:
1116 * The caller holds a reference to the task
1117 */
1118 kern_return_t
1119 task_resume(
1120 register task_t task)
1121 {
1122 register boolean_t release = FALSE;
1123
1124 if (task == TASK_NULL || task == kernel_task)
1125 return (KERN_INVALID_ARGUMENT);
1126
1127 task_lock(task);
1128
1129 if (!task->active) {
1130 task_unlock(task);
1131
1132 return (KERN_FAILURE);
1133 }
1134
1135 if (task->user_stop_count > 0) {
1136 if (--task->user_stop_count == 0)
1137 release = TRUE;
1138 }
1139 else {
1140 task_unlock(task);
1141
1142 return (KERN_FAILURE);
1143 }
1144
1145 /*
1146 * Release the task if necessary.
1147 */
1148 if (release)
1149 task_release_locked(task);
1150
1151 task_unlock(task);
1152
1153 return (KERN_SUCCESS);
1154 }
1155
1156 kern_return_t
1157 host_security_set_task_token(
1158 host_security_t host_security,
1159 task_t task,
1160 security_token_t sec_token,
1161 audit_token_t audit_token,
1162 host_priv_t host_priv)
1163 {
1164 ipc_port_t host_port;
1165 kern_return_t kr;
1166
1167 if (task == TASK_NULL)
1168 return(KERN_INVALID_ARGUMENT);
1169
1170 if (host_security == HOST_NULL)
1171 return(KERN_INVALID_SECURITY);
1172
1173 task_lock(task);
1174 task->sec_token = sec_token;
1175 task->audit_token = audit_token;
1176 task_unlock(task);
1177
1178 if (host_priv != HOST_PRIV_NULL) {
1179 kr = host_get_host_priv_port(host_priv, &host_port);
1180 } else {
1181 kr = host_get_host_port(host_priv_self(), &host_port);
1182 }
1183 assert(kr == KERN_SUCCESS);
1184 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1185 return(kr);
1186 }
1187
1188 /*
1189 * Utility routine to set a ledger
1190 */
1191 kern_return_t
1192 task_set_ledger(
1193 task_t task,
1194 ledger_t wired,
1195 ledger_t paged)
1196 {
1197 if (task == TASK_NULL)
1198 return(KERN_INVALID_ARGUMENT);
1199
1200 task_lock(task);
1201 if (wired) {
1202 ipc_port_release_send(task->wired_ledger_port);
1203 task->wired_ledger_port = ledger_copy(wired);
1204 }
1205 if (paged) {
1206 ipc_port_release_send(task->paged_ledger_port);
1207 task->paged_ledger_port = ledger_copy(paged);
1208 }
1209 task_unlock(task);
1210
1211 return(KERN_SUCCESS);
1212 }
1213
1214 /*
1215 * This routine was added, pretty much exclusively, for registering the
1216 * RPC glue vector for in-kernel short circuited tasks. Rather than
1217 * removing it completely, I have only disabled that feature (which was
1218 * the only feature at the time). It just appears that we are going to
1219 * want to add some user data to tasks in the future (i.e. bsd info,
1220 * task names, etc...), so I left it in the formal task interface.
1221 */
1222 kern_return_t
1223 task_set_info(
1224 task_t task,
1225 task_flavor_t flavor,
1226 __unused task_info_t task_info_in, /* pointer to IN array */
1227 __unused mach_msg_type_number_t task_info_count)
1228 {
1229 if (task == TASK_NULL)
1230 return(KERN_INVALID_ARGUMENT);
1231
1232 switch (flavor) {
1233 default:
1234 return (KERN_INVALID_ARGUMENT);
1235 }
1236 return (KERN_SUCCESS);
1237 }
1238
1239 kern_return_t
1240 task_info(
1241 task_t task,
1242 task_flavor_t flavor,
1243 task_info_t task_info_out,
1244 mach_msg_type_number_t *task_info_count)
1245 {
1246 if (task == TASK_NULL)
1247 return (KERN_INVALID_ARGUMENT);
1248
1249 switch (flavor) {
1250
1251 case TASK_BASIC_INFO_32:
1252 {
1253 task_basic_info_32_t basic_info;
1254 vm_map_t map;
1255
1256 if (*task_info_count < TASK_BASIC_INFO_32_COUNT)
1257 return (KERN_INVALID_ARGUMENT);
1258
1259 basic_info = (task_basic_info_32_t)task_info_out;
1260
1261 map = (task == kernel_task)? kernel_map: task->map;
1262 basic_info->virtual_size = CAST_DOWN(vm_offset_t,map->size);
1263 basic_info->resident_size = pmap_resident_count(map->pmap)
1264 * PAGE_SIZE;
1265
1266 task_lock(task);
1267 basic_info->policy = ((task != kernel_task)?
1268 POLICY_TIMESHARE: POLICY_RR);
1269 basic_info->suspend_count = task->user_stop_count;
1270
1271 absolutetime_to_microtime(
1272 task->total_user_time,
1273 &basic_info->user_time.seconds,
1274 &basic_info->user_time.microseconds);
1275 absolutetime_to_microtime(
1276 task->total_system_time,
1277 &basic_info->system_time.seconds,
1278 &basic_info->system_time.microseconds);
1279 task_unlock(task);
1280
1281 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1282 break;
1283 }
1284
1285 case TASK_BASIC_INFO_64:
1286 {
1287 task_basic_info_64_t basic_info;
1288 vm_map_t map;
1289
1290 if (*task_info_count < TASK_BASIC_INFO_64_COUNT)
1291 return (KERN_INVALID_ARGUMENT);
1292
1293 basic_info = (task_basic_info_64_t)task_info_out;
1294
1295 map = (task == kernel_task)? kernel_map: task->map;
1296 basic_info->virtual_size = map->size;
1297 basic_info->resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)
1298 * PAGE_SIZE);
1299
1300 task_lock(task);
1301 basic_info->policy = ((task != kernel_task)?
1302 POLICY_TIMESHARE: POLICY_RR);
1303 basic_info->suspend_count = task->user_stop_count;
1304
1305 absolutetime_to_microtime(
1306 task->total_user_time,
1307 &basic_info->user_time.seconds,
1308 &basic_info->user_time.microseconds);
1309 absolutetime_to_microtime(
1310 task->total_system_time,
1311 &basic_info->system_time.seconds,
1312 &basic_info->system_time.microseconds);
1313 task_unlock(task);
1314
1315 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1316 break;
1317 }
1318
1319 case TASK_THREAD_TIMES_INFO:
1320 {
1321 register task_thread_times_info_t times_info;
1322 register thread_t thread;
1323
1324 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT)
1325 return (KERN_INVALID_ARGUMENT);
1326
1327 times_info = (task_thread_times_info_t) task_info_out;
1328 times_info->user_time.seconds = 0;
1329 times_info->user_time.microseconds = 0;
1330 times_info->system_time.seconds = 0;
1331 times_info->system_time.microseconds = 0;
1332
1333 task_lock(task);
1334
1335 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1336 time_value_t user_time, system_time;
1337
1338 thread_read_times(thread, &user_time, &system_time);
1339
1340 time_value_add(&times_info->user_time, &user_time);
1341 time_value_add(&times_info->system_time, &system_time);
1342 }
1343
1344 task_unlock(task);
1345
1346 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1347 break;
1348 }
1349
1350 case TASK_ABSOLUTETIME_INFO:
1351 {
1352 task_absolutetime_info_t info;
1353 register thread_t thread;
1354
1355 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT)
1356 return (KERN_INVALID_ARGUMENT);
1357
1358 info = (task_absolutetime_info_t)task_info_out;
1359 info->threads_user = info->threads_system = 0;
1360
1361 task_lock(task);
1362
1363 info->total_user = task->total_user_time;
1364 info->total_system = task->total_system_time;
1365
1366 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1367 uint64_t tval;
1368
1369 tval = timer_grab(&thread->user_timer);
1370 info->threads_user += tval;
1371 info->total_user += tval;
1372
1373 tval = timer_grab(&thread->system_timer);
1374 info->threads_system += tval;
1375 info->total_system += tval;
1376 }
1377
1378 task_unlock(task);
1379
1380 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1381 break;
1382 }
1383
1384 /* OBSOLETE */
1385 case TASK_SCHED_FIFO_INFO:
1386 {
1387
1388 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1389 return (KERN_INVALID_ARGUMENT);
1390
1391 return (KERN_INVALID_POLICY);
1392 }
1393
1394 /* OBSOLETE */
1395 case TASK_SCHED_RR_INFO:
1396 {
1397 register policy_rr_base_t rr_base;
1398
1399 if (*task_info_count < POLICY_RR_BASE_COUNT)
1400 return (KERN_INVALID_ARGUMENT);
1401
1402 rr_base = (policy_rr_base_t) task_info_out;
1403
1404 task_lock(task);
1405 if (task != kernel_task) {
1406 task_unlock(task);
1407 return (KERN_INVALID_POLICY);
1408 }
1409
1410 rr_base->base_priority = task->priority;
1411 task_unlock(task);
1412
1413 rr_base->quantum = std_quantum_us / 1000;
1414
1415 *task_info_count = POLICY_RR_BASE_COUNT;
1416 break;
1417 }
1418
1419 /* OBSOLETE */
1420 case TASK_SCHED_TIMESHARE_INFO:
1421 {
1422 register policy_timeshare_base_t ts_base;
1423
1424 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1425 return (KERN_INVALID_ARGUMENT);
1426
1427 ts_base = (policy_timeshare_base_t) task_info_out;
1428
1429 task_lock(task);
1430 if (task == kernel_task) {
1431 task_unlock(task);
1432 return (KERN_INVALID_POLICY);
1433 }
1434
1435 ts_base->base_priority = task->priority;
1436 task_unlock(task);
1437
1438 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1439 break;
1440 }
1441
1442 case TASK_SECURITY_TOKEN:
1443 {
1444 register security_token_t *sec_token_p;
1445
1446 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT)
1447 return (KERN_INVALID_ARGUMENT);
1448
1449 sec_token_p = (security_token_t *) task_info_out;
1450
1451 task_lock(task);
1452 *sec_token_p = task->sec_token;
1453 task_unlock(task);
1454
1455 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1456 break;
1457 }
1458
1459 case TASK_AUDIT_TOKEN:
1460 {
1461 register audit_token_t *audit_token_p;
1462
1463 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT)
1464 return (KERN_INVALID_ARGUMENT);
1465
1466 audit_token_p = (audit_token_t *) task_info_out;
1467
1468 task_lock(task);
1469 *audit_token_p = task->audit_token;
1470 task_unlock(task);
1471
1472 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1473 break;
1474 }
1475
1476 case TASK_SCHED_INFO:
1477 return (KERN_INVALID_ARGUMENT);
1478
1479 case TASK_EVENTS_INFO:
1480 {
1481 register task_events_info_t events_info;
1482
1483 if (*task_info_count < TASK_EVENTS_INFO_COUNT)
1484 return (KERN_INVALID_ARGUMENT);
1485
1486 events_info = (task_events_info_t) task_info_out;
1487
1488 task_lock(task);
1489 events_info->faults = task->faults;
1490 events_info->pageins = task->pageins;
1491 events_info->cow_faults = task->cow_faults;
1492 events_info->messages_sent = task->messages_sent;
1493 events_info->messages_received = task->messages_received;
1494 events_info->syscalls_mach = task->syscalls_mach;
1495 events_info->syscalls_unix = task->syscalls_unix;
1496 events_info->csw = task->csw;
1497 task_unlock(task);
1498
1499 *task_info_count = TASK_EVENTS_INFO_COUNT;
1500 break;
1501 }
1502
1503 default:
1504 return (KERN_INVALID_ARGUMENT);
1505 }
1506
1507 return (KERN_SUCCESS);
1508 }
1509
1510 /*
1511 * task_assign:
1512 *
1513 * Change the assigned processor set for the task
1514 */
1515 kern_return_t
1516 task_assign(
1517 __unused task_t task,
1518 __unused processor_set_t new_pset,
1519 __unused boolean_t assign_threads)
1520 {
1521 return(KERN_FAILURE);
1522 }
1523
1524 /*
1525 * task_assign_default:
1526 *
1527 * Version of task_assign to assign to default processor set.
1528 */
1529 kern_return_t
1530 task_assign_default(
1531 task_t task,
1532 boolean_t assign_threads)
1533 {
1534 return (task_assign(task, &default_pset, assign_threads));
1535 }
1536
1537 /*
1538 * task_get_assignment
1539 *
1540 * Return name of processor set that task is assigned to.
1541 */
1542 kern_return_t
1543 task_get_assignment(
1544 task_t task,
1545 processor_set_t *pset)
1546 {
1547 if (!task->active)
1548 return(KERN_FAILURE);
1549
1550 *pset = task->processor_set;
1551 pset_reference(*pset);
1552 return(KERN_SUCCESS);
1553 }
1554
1555
1556 /*
1557 * task_policy
1558 *
1559 * Set scheduling policy and parameters, both base and limit, for
1560 * the given task. Policy must be a policy which is enabled for the
1561 * processor set. Change contained threads if requested.
1562 */
1563 kern_return_t
1564 task_policy(
1565 __unused task_t task,
1566 __unused policy_t policy_id,
1567 __unused policy_base_t base,
1568 __unused mach_msg_type_number_t count,
1569 __unused boolean_t set_limit,
1570 __unused boolean_t change)
1571 {
1572 return(KERN_FAILURE);
1573 }
1574
1575 /*
1576 * task_set_policy
1577 *
1578 * Set scheduling policy and parameters, both base and limit, for
1579 * the given task. Policy can be any policy implemented by the
1580 * processor set, whether enabled or not. Change contained threads
1581 * if requested.
1582 */
1583 kern_return_t
1584 task_set_policy(
1585 __unused task_t task,
1586 __unused processor_set_t pset,
1587 __unused policy_t policy_id,
1588 __unused policy_base_t base,
1589 __unused mach_msg_type_number_t base_count,
1590 __unused policy_limit_t limit,
1591 __unused mach_msg_type_number_t limit_count,
1592 __unused boolean_t change)
1593 {
1594 return(KERN_FAILURE);
1595 }
1596
1597 #if FAST_TAS
1598 kern_return_t
1599 task_set_ras_pc(
1600 task_t task,
1601 vm_offset_t pc,
1602 vm_offset_t endpc)
1603 {
1604 extern int fast_tas_debug;
1605
1606 if (fast_tas_debug) {
1607 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1608 task, pc, endpc);
1609 }
1610 task_lock(task);
1611 task->fast_tas_base = pc;
1612 task->fast_tas_end = endpc;
1613 task_unlock(task);
1614 return KERN_SUCCESS;
1615 }
1616 #else /* FAST_TAS */
1617 kern_return_t
1618 task_set_ras_pc(
1619 __unused task_t task,
1620 __unused vm_offset_t pc,
1621 __unused vm_offset_t endpc)
1622 {
1623 return KERN_FAILURE;
1624 }
1625 #endif /* FAST_TAS */
1626
1627 void
1628 task_synchronizer_destroy_all(task_t task)
1629 {
1630 semaphore_t semaphore;
1631 lock_set_t lock_set;
1632
1633 /*
1634 * Destroy owned semaphores
1635 */
1636
1637 while (!queue_empty(&task->semaphore_list)) {
1638 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1639 (void) semaphore_destroy(task, semaphore);
1640 }
1641
1642 /*
1643 * Destroy owned lock sets
1644 */
1645
1646 while (!queue_empty(&task->lock_set_list)) {
1647 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1648 (void) lock_set_destroy(task, lock_set);
1649 }
1650 }
1651
1652 /*
1653 * We need to export some functions to other components that
1654 * are currently implemented in macros within the osfmk
1655 * component. Just export them as functions of the same name.
1656 */
1657 boolean_t is_kerneltask(task_t t)
1658 {
1659 if (t == kernel_task)
1660 return (TRUE);
1661
1662 return (FALSE);
1663 }
1664
1665 #undef current_task
1666 task_t current_task(void);
1667 task_t current_task(void)
1668 {
1669 return (current_task_fast());
1670 }
1671
1672 #undef task_reference
1673 void task_reference(task_t task);
1674 void
1675 task_reference(
1676 task_t task)
1677 {
1678 if (task != TASK_NULL)
1679 task_reference_internal(task);
1680 }