]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
5c288e37b3d6da268ff0aa847e88b8797c45db70
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 * File: kern/task.c
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
53 * David Black
54 *
55 * Task management primitives implementation.
56 */
57 /*
58 * Copyright (c) 1993 The University of Utah and
59 * the Computer Systems Laboratory (CSL). All rights reserved.
60 *
61 * Permission to use, copy, modify and distribute this software and its
62 * documentation is hereby granted, provided that both the copyright
63 * notice and this permission notice appear in all copies of the
64 * software, derivative works or modified versions, and any portions
65 * thereof, and that both notices appear in supporting documentation.
66 *
67 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
68 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
69 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
70 *
71 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
72 * improvements that they make and grant CSL redistribution rights.
73 *
74 */
75
76 #include <mach_kdb.h>
77 #include <mach_host.h>
78 #include <mach_prof.h>
79 #include <fast_tas.h>
80 #include <platforms.h>
81
82 #include <mach/mach_types.h>
83 #include <mach/boolean.h>
84 #include <mach/host_priv.h>
85 #include <mach/machine/vm_types.h>
86 #include <mach/vm_param.h>
87 #include <mach/semaphore.h>
88 #include <mach/task_info.h>
89 #include <mach/task_special_ports.h>
90
91 #include <ipc/ipc_types.h>
92 #include <ipc/ipc_space.h>
93 #include <ipc/ipc_entry.h>
94
95 #include <kern/kern_types.h>
96 #include <kern/mach_param.h>
97 #include <kern/misc_protos.h>
98 #include <kern/task.h>
99 #include <kern/thread.h>
100 #include <kern/zalloc.h>
101 #include <kern/kalloc.h>
102 #include <kern/processor.h>
103 #include <kern/sched_prim.h> /* for thread_wakeup */
104 #include <kern/ipc_tt.h>
105 #include <kern/ledger.h>
106 #include <kern/host.h>
107 #include <kern/clock.h>
108 #include <kern/timer.h>
109 #include <kern/profile.h>
110 #include <kern/assert.h>
111 #include <kern/sync_lock.h>
112
113 #include <vm/pmap.h>
114 #include <vm/vm_map.h>
115 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
116 #include <vm/vm_pageout.h>
117 #include <vm/vm_protos.h> /* for vm_map_remove_commpage */
118
119 #if MACH_KDB
120 #include <ddb/db_sym.h>
121 #endif /* MACH_KDB */
122
123 #ifdef __ppc__
124 #include <ppc/exception.h>
125 #include <ppc/hw_perfmon.h>
126 #endif
127
128 /*
129 * Exported interfaces
130 */
131
132 #include <mach/task_server.h>
133 #include <mach/mach_host_server.h>
134 #include <mach/host_security_server.h>
135 #include <mach/mach_port_server.h>
136
137 #include <vm/task_working_set.h>
138 #include <vm/vm_shared_memory_server.h>
139
140 task_t kernel_task;
141 zone_t task_zone;
142
143 /* Forwards */
144
145 void task_hold_locked(
146 task_t task);
147 void task_wait_locked(
148 task_t task);
149 void task_release_locked(
150 task_t task);
151 void task_free(
152 task_t task );
153 void task_synchronizer_destroy_all(
154 task_t task);
155
156 kern_return_t task_set_ledger(
157 task_t task,
158 ledger_t wired,
159 ledger_t paged);
160
161 void
162 task_backing_store_privileged(
163 task_t task)
164 {
165 task_lock(task);
166 task->priv_flags |= VM_BACKING_STORE_PRIV;
167 task_unlock(task);
168 return;
169 }
170
171 void
172 task_working_set_disable(task_t task)
173 {
174 struct tws_hash *ws;
175
176 task_lock(task);
177 ws = task->dynamic_working_set;
178 task->dynamic_working_set = NULL;
179 task_unlock(task);
180 if (ws) {
181 tws_hash_ws_flush(ws);
182 tws_hash_destroy(ws);
183 }
184 }
185
186 void
187 task_set_64bit(
188 task_t task,
189 boolean_t is64bit)
190 {
191 thread_t thread;
192
193 if (is64bit) {
194 if (task_has_64BitAddr(task))
195 return;
196
197 /* LP64todo - no task working set for 64-bit */
198 task_set_64BitAddr(task);
199 task_working_set_disable(task);
200 } else {
201 if ( !task_has_64BitAddr(task))
202 return;
203
204 /*
205 * Deallocate all memory previously allocated
206 * above the 32-bit address space, since it won't
207 * be accessible anymore.
208 */
209 /* LP64todo - make this clean */
210 vm_map_remove_commpage(task->map);
211 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
212 (void) vm_map_remove(task->map,
213 (vm_map_offset_t) VM_MAX_ADDRESS,
214 MACH_VM_MAX_ADDRESS,
215 VM_MAP_NO_FLAGS);
216 task_clear_64BitAddr(task);
217 }
218 /* FIXME: On x86, the thread save state flavor can diverge from the
219 * task's 64-bit feature flag due to the 32-bit/64-bit register save
220 * state dichotomy. Since we can be pre-empted in this interval,
221 * certain routines may observe the thread as being in an inconsistent
222 * state with respect to its task's 64-bitness.
223 */
224 #ifdef __i386__
225 queue_iterate(&task->threads, thread, thread_t, task_threads) {
226 machine_thread_switch_addrmode(thread, !is64bit);
227 }
228 #endif
229 }
230
231 void
232 task_init(void)
233 {
234 task_zone = zinit(
235 sizeof(struct task),
236 TASK_MAX * sizeof(struct task),
237 TASK_CHUNK * sizeof(struct task),
238 "tasks");
239
240 /*
241 * Create the kernel task as the first task.
242 */
243 if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
244 panic("task_init\n");
245
246 vm_map_deallocate(kernel_task->map);
247 kernel_task->map = kernel_map;
248 }
249
250 #if MACH_HOST
251
252 #if 0
253 static void
254 task_freeze(
255 task_t task)
256 {
257 task_lock(task);
258 /*
259 * If may_assign is false, task is already being assigned,
260 * wait for that to finish.
261 */
262 while (task->may_assign == FALSE) {
263 wait_result_t res;
264
265 task->assign_active = TRUE;
266 res = thread_sleep_mutex((event_t) &task->assign_active,
267 &task->lock, THREAD_UNINT);
268 assert(res == THREAD_AWAKENED);
269 }
270 task->may_assign = FALSE;
271 task_unlock(task);
272 return;
273 }
274 #else
275 #define thread_freeze(thread) assert(task->processor_set == &default_pset)
276 #endif
277
278 #if 0
279 static void
280 task_unfreeze(
281 task_t task)
282 {
283 task_lock(task);
284 assert(task->may_assign == FALSE);
285 task->may_assign = TRUE;
286 if (task->assign_active == TRUE) {
287 task->assign_active = FALSE;
288 thread_wakeup((event_t)&task->assign_active);
289 }
290 task_unlock(task);
291 return;
292 }
293 #else
294 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
295 #endif
296
297 #endif /* MACH_HOST */
298
299 /*
300 * Create a task running in the kernel address space. It may
301 * have its own map of size mem_size and may have ipc privileges.
302 */
303 kern_return_t
304 kernel_task_create(
305 __unused task_t parent_task,
306 __unused vm_offset_t map_base,
307 __unused vm_size_t map_size,
308 __unused task_t *child_task)
309 {
310 return (KERN_INVALID_ARGUMENT);
311 }
312
313 kern_return_t
314 task_create(
315 task_t parent_task,
316 __unused ledger_port_array_t ledger_ports,
317 __unused mach_msg_type_number_t num_ledger_ports,
318 boolean_t inherit_memory,
319 task_t *child_task) /* OUT */
320 {
321 if (parent_task == TASK_NULL)
322 return(KERN_INVALID_ARGUMENT);
323
324 return task_create_internal(
325 parent_task, inherit_memory, task_has_64BitAddr(parent_task), child_task);
326 }
327
328 kern_return_t
329 host_security_create_task_token(
330 host_security_t host_security,
331 task_t parent_task,
332 security_token_t sec_token,
333 audit_token_t audit_token,
334 host_priv_t host_priv,
335 __unused ledger_port_array_t ledger_ports,
336 __unused mach_msg_type_number_t num_ledger_ports,
337 boolean_t inherit_memory,
338 task_t *child_task) /* OUT */
339 {
340 kern_return_t result;
341
342 if (parent_task == TASK_NULL)
343 return(KERN_INVALID_ARGUMENT);
344
345 if (host_security == HOST_NULL)
346 return(KERN_INVALID_SECURITY);
347
348 result = task_create_internal(
349 parent_task, inherit_memory, task_has_64BitAddr(parent_task), child_task);
350
351 if (result != KERN_SUCCESS)
352 return(result);
353
354 result = host_security_set_task_token(host_security,
355 *child_task,
356 sec_token,
357 audit_token,
358 host_priv);
359
360 if (result != KERN_SUCCESS)
361 return(result);
362
363 return(result);
364 }
365
366 kern_return_t
367 task_create_internal(
368 task_t parent_task,
369 boolean_t inherit_memory,
370 boolean_t is_64bit,
371 task_t *child_task) /* OUT */
372 {
373 task_t new_task;
374 processor_set_t pset;
375
376 new_task = (task_t) zalloc(task_zone);
377
378 if (new_task == TASK_NULL)
379 return(KERN_RESOURCE_SHORTAGE);
380
381 /* one ref for just being alive; one for our caller */
382 new_task->ref_count = 2;
383
384 if (inherit_memory)
385 new_task->map = vm_map_fork(parent_task->map);
386 else
387 new_task->map = vm_map_create(pmap_create(0, is_64bit),
388 (vm_map_offset_t)(VM_MIN_ADDRESS),
389 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
390
391 mutex_init(&new_task->lock, 0);
392 queue_init(&new_task->threads);
393 new_task->suspend_count = 0;
394 new_task->thread_count = 0;
395 new_task->active_thread_count = 0;
396 new_task->user_stop_count = 0;
397 new_task->role = TASK_UNSPECIFIED;
398 new_task->active = TRUE;
399 new_task->user_data = 0;
400 new_task->faults = 0;
401 new_task->cow_faults = 0;
402 new_task->pageins = 0;
403 new_task->messages_sent = 0;
404 new_task->messages_received = 0;
405 new_task->syscalls_mach = 0;
406 new_task->priv_flags = 0;
407 new_task->syscalls_unix=0;
408 new_task->csw=0;
409 new_task->taskFeatures[0] = 0; /* Init task features */
410 new_task->taskFeatures[1] = 0; /* Init task features */
411 new_task->dynamic_working_set = 0;
412
413 task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT,
414 0, TWS_HASH_STYLE_DEFAULT);
415
416 #ifdef MACH_BSD
417 new_task->bsd_info = 0;
418 #endif /* MACH_BSD */
419
420 #ifdef __i386__
421 new_task->i386_ldt = 0;
422 #endif
423
424 #ifdef __ppc__
425 if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */
426 #endif
427
428 queue_init(&new_task->semaphore_list);
429 queue_init(&new_task->lock_set_list);
430 new_task->semaphores_owned = 0;
431 new_task->lock_sets_owned = 0;
432
433 #if MACH_HOST
434 new_task->may_assign = TRUE;
435 new_task->assign_active = FALSE;
436 #endif /* MACH_HOST */
437
438 ipc_task_init(new_task, parent_task);
439
440 new_task->total_user_time = 0;
441 new_task->total_system_time = 0;
442
443 task_prof_init(new_task);
444
445 if (parent_task != TASK_NULL) {
446 #if MACH_HOST
447 /*
448 * Freeze the parent, so that parent_task->processor_set
449 * cannot change.
450 */
451 task_freeze(parent_task);
452 #endif /* MACH_HOST */
453 pset = parent_task->processor_set;
454 if (!pset->active)
455 pset = &default_pset;
456
457 new_task->sec_token = parent_task->sec_token;
458 new_task->audit_token = parent_task->audit_token;
459
460 shared_region_mapping_ref(parent_task->system_shared_region);
461 new_task->system_shared_region = parent_task->system_shared_region;
462
463 new_task->wired_ledger_port = ledger_copy(
464 convert_port_to_ledger(parent_task->wired_ledger_port));
465 new_task->paged_ledger_port = ledger_copy(
466 convert_port_to_ledger(parent_task->paged_ledger_port));
467 if(task_has_64BitAddr(parent_task))
468 task_set_64BitAddr(new_task);
469
470 #ifdef __i386__
471 if (inherit_memory && parent_task->i386_ldt)
472 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
473 #endif
474 }
475 else {
476 pset = &default_pset;
477
478 new_task->sec_token = KERNEL_SECURITY_TOKEN;
479 new_task->audit_token = KERNEL_AUDIT_TOKEN;
480 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
481 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
482 }
483
484 if (kernel_task == TASK_NULL) {
485 new_task->priority = BASEPRI_KERNEL;
486 new_task->max_priority = MAXPRI_KERNEL;
487 }
488 else {
489 new_task->priority = BASEPRI_DEFAULT;
490 new_task->max_priority = MAXPRI_USER;
491 }
492
493 pset_lock(pset);
494 pset_add_task(pset, new_task);
495 pset_unlock(pset);
496 #if MACH_HOST
497 if (parent_task != TASK_NULL)
498 task_unfreeze(parent_task);
499 #endif /* MACH_HOST */
500
501 if (vm_backing_store_low && parent_task != NULL)
502 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
503
504 ipc_task_enable(new_task);
505
506 *child_task = new_task;
507 return(KERN_SUCCESS);
508 }
509
510 /*
511 * task_deallocate:
512 *
513 * Drop a reference on a task.
514 */
515 void
516 task_deallocate(
517 task_t task)
518 {
519 processor_set_t pset;
520
521 if (task == TASK_NULL)
522 return;
523
524 if (task_deallocate_internal(task) > 0)
525 return;
526
527 pset = task->processor_set;
528 pset_deallocate(pset);
529
530 if(task->dynamic_working_set)
531 tws_hash_destroy(task->dynamic_working_set);
532
533 ipc_task_terminate(task);
534
535 vm_map_deallocate(task->map);
536 is_release(task->itk_space);
537
538 task_prof_deallocate(task);
539 zfree(task_zone, task);
540 }
541
542 /*
543 * task_name_deallocate:
544 *
545 * Drop a reference on a task name.
546 */
547 void
548 task_name_deallocate(
549 task_name_t task_name)
550 {
551 return(task_deallocate((task_t)task_name));
552 }
553
554
555 /*
556 * task_terminate:
557 *
558 * Terminate the specified task. See comments on thread_terminate
559 * (kern/thread.c) about problems with terminating the "current task."
560 */
561
562 kern_return_t
563 task_terminate(
564 task_t task)
565 {
566 if (task == TASK_NULL)
567 return (KERN_INVALID_ARGUMENT);
568
569 if (task->bsd_info)
570 return (KERN_FAILURE);
571
572 return (task_terminate_internal(task));
573 }
574
575 kern_return_t
576 task_terminate_internal(
577 task_t task)
578 {
579 processor_set_t pset;
580 thread_t thread, self;
581 task_t self_task;
582 boolean_t interrupt_save;
583
584 assert(task != kernel_task);
585
586 self = current_thread();
587 self_task = self->task;
588
589 /*
590 * Get the task locked and make sure that we are not racing
591 * with someone else trying to terminate us.
592 */
593 if (task == self_task)
594 task_lock(task);
595 else
596 if (task < self_task) {
597 task_lock(task);
598 task_lock(self_task);
599 }
600 else {
601 task_lock(self_task);
602 task_lock(task);
603 }
604
605 if (!task->active || !self->active) {
606 /*
607 * Task or current act is already being terminated.
608 * Just return an error. If we are dying, this will
609 * just get us to our AST special handler and that
610 * will get us to finalize the termination of ourselves.
611 */
612 task_unlock(task);
613 if (self_task != task)
614 task_unlock(self_task);
615
616 return (KERN_FAILURE);
617 }
618
619 if (self_task != task)
620 task_unlock(self_task);
621
622 /*
623 * Make sure the current thread does not get aborted out of
624 * the waits inside these operations.
625 */
626 interrupt_save = thread_interrupt_level(THREAD_UNINT);
627
628 /*
629 * Indicate that we want all the threads to stop executing
630 * at user space by holding the task (we would have held
631 * each thread independently in thread_terminate_internal -
632 * but this way we may be more likely to already find it
633 * held there). Mark the task inactive, and prevent
634 * further task operations via the task port.
635 */
636 task_hold_locked(task);
637 task->active = FALSE;
638 ipc_task_disable(task);
639
640 /*
641 * Terminate each thread in the task.
642 */
643 queue_iterate(&task->threads, thread, thread_t, task_threads) {
644 thread_terminate_internal(thread);
645 }
646
647 /*
648 * Give the machine dependent code a chance
649 * to perform cleanup before ripping apart
650 * the task.
651 */
652 if (self_task == task)
653 machine_thread_terminate_self();
654
655 task_unlock(task);
656
657 /*
658 * Destroy all synchronizers owned by the task.
659 */
660 task_synchronizer_destroy_all(task);
661
662 /*
663 * Destroy the IPC space, leaving just a reference for it.
664 */
665 ipc_space_destroy(task->itk_space);
666
667 /* LP64todo - make this clean */
668 vm_map_remove_commpage(task->map);
669 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
670
671 if (vm_map_has_4GB_pagezero(task->map))
672 vm_map_clear_4GB_pagezero(task->map);
673
674 /*
675 * If the current thread is a member of the task
676 * being terminated, then the last reference to
677 * the task will not be dropped until the thread
678 * is finally reaped. To avoid incurring the
679 * expense of removing the address space regions
680 * at reap time, we do it explictly here.
681 */
682 vm_map_remove(task->map, task->map->min_offset,
683 task->map->max_offset, VM_MAP_NO_FLAGS);
684
685 shared_region_mapping_dealloc(task->system_shared_region);
686
687 /*
688 * Flush working set here to avoid I/O in reaper thread
689 */
690 if (task->dynamic_working_set)
691 tws_hash_ws_flush(task->dynamic_working_set);
692
693 pset = task->processor_set;
694 pset_lock(pset);
695 pset_remove_task(pset,task);
696 pset_unlock(pset);
697
698 /*
699 * We no longer need to guard against being aborted, so restore
700 * the previous interruptible state.
701 */
702 thread_interrupt_level(interrupt_save);
703
704 #if __ppc__
705 perfmon_release_facility(task); // notify the perfmon facility
706 #endif
707
708 /*
709 * Get rid of the task active reference on itself.
710 */
711 task_deallocate(task);
712
713 return (KERN_SUCCESS);
714 }
715
716 /*
717 * task_halt:
718 *
719 * Shut the current task down (except for the current thread) in
720 * preparation for dramatic changes to the task (probably exec).
721 * We hold the task, terminate all other threads in the task and
722 * wait for them to terminate, clean up the portspace, and when
723 * all done, let the current thread go.
724 */
725 kern_return_t
726 task_halt(
727 task_t task)
728 {
729 thread_t thread, self;
730
731 assert(task != kernel_task);
732
733 self = current_thread();
734
735 if (task != self->task)
736 return (KERN_INVALID_ARGUMENT);
737
738 task_lock(task);
739
740 if (!task->active || !self->active) {
741 /*
742 * Task or current thread is already being terminated.
743 * Hurry up and return out of the current kernel context
744 * so that we run our AST special handler to terminate
745 * ourselves.
746 */
747 task_unlock(task);
748
749 return (KERN_FAILURE);
750 }
751
752 if (task->thread_count > 1) {
753 /*
754 * Mark all the threads to keep them from starting any more
755 * user-level execution. The thread_terminate_internal code
756 * would do this on a thread by thread basis anyway, but this
757 * gives us a better chance of not having to wait there.
758 */
759 task_hold_locked(task);
760
761 /*
762 * Terminate all the other threads in the task.
763 */
764 queue_iterate(&task->threads, thread, thread_t, task_threads) {
765 if (thread != self)
766 thread_terminate_internal(thread);
767 }
768
769 task_release_locked(task);
770 }
771
772 /*
773 * Give the machine dependent code a chance
774 * to perform cleanup before ripping apart
775 * the task.
776 */
777 machine_thread_terminate_self();
778
779 task_unlock(task);
780
781 /*
782 * Destroy all synchronizers owned by the task.
783 */
784 task_synchronizer_destroy_all(task);
785
786 /*
787 * Destroy the contents of the IPC space, leaving just
788 * a reference for it.
789 */
790 ipc_space_clean(task->itk_space);
791
792 /*
793 * Clean out the address space, as we are going to be
794 * getting a new one.
795 */
796 vm_map_remove(task->map, task->map->min_offset,
797 task->map->max_offset, VM_MAP_NO_FLAGS);
798
799 return (KERN_SUCCESS);
800 }
801
802 /*
803 * task_hold_locked:
804 *
805 * Suspend execution of the specified task.
806 * This is a recursive-style suspension of the task, a count of
807 * suspends is maintained.
808 *
809 * CONDITIONS: the task is locked and active.
810 */
811 void
812 task_hold_locked(
813 register task_t task)
814 {
815 register thread_t thread;
816
817 assert(task->active);
818
819 if (task->suspend_count++ > 0)
820 return;
821
822 /*
823 * Iterate through all the threads and hold them.
824 */
825 queue_iterate(&task->threads, thread, thread_t, task_threads) {
826 thread_mtx_lock(thread);
827 thread_hold(thread);
828 thread_mtx_unlock(thread);
829 }
830 }
831
832 /*
833 * task_hold:
834 *
835 * Same as the internal routine above, except that is must lock
836 * and verify that the task is active. This differs from task_suspend
837 * in that it places a kernel hold on the task rather than just a
838 * user-level hold. This keeps users from over resuming and setting
839 * it running out from under the kernel.
840 *
841 * CONDITIONS: the caller holds a reference on the task
842 */
843 kern_return_t
844 task_hold(
845 register task_t task)
846 {
847 if (task == TASK_NULL)
848 return (KERN_INVALID_ARGUMENT);
849
850 task_lock(task);
851
852 if (!task->active) {
853 task_unlock(task);
854
855 return (KERN_FAILURE);
856 }
857
858 task_hold_locked(task);
859 task_unlock(task);
860
861 return (KERN_SUCCESS);
862 }
863
864 /*
865 * task_wait_locked:
866 *
867 * Wait for all threads in task to stop.
868 *
869 * Conditions:
870 * Called with task locked, active, and held.
871 */
872 void
873 task_wait_locked(
874 register task_t task)
875 {
876 register thread_t thread, self;
877
878 assert(task->active);
879 assert(task->suspend_count > 0);
880
881 self = current_thread();
882
883 /*
884 * Iterate through all the threads and wait for them to
885 * stop. Do not wait for the current thread if it is within
886 * the task.
887 */
888 queue_iterate(&task->threads, thread, thread_t, task_threads) {
889 if (thread != self)
890 thread_wait(thread);
891 }
892 }
893
894 /*
895 * task_release_locked:
896 *
897 * Release a kernel hold on a task.
898 *
899 * CONDITIONS: the task is locked and active
900 */
901 void
902 task_release_locked(
903 register task_t task)
904 {
905 register thread_t thread;
906
907 assert(task->active);
908 assert(task->suspend_count > 0);
909
910 if (--task->suspend_count > 0)
911 return;
912
913 queue_iterate(&task->threads, thread, thread_t, task_threads) {
914 thread_mtx_lock(thread);
915 thread_release(thread);
916 thread_mtx_unlock(thread);
917 }
918 }
919
920 /*
921 * task_release:
922 *
923 * Same as the internal routine above, except that it must lock
924 * and verify that the task is active.
925 *
926 * CONDITIONS: The caller holds a reference to the task
927 */
928 kern_return_t
929 task_release(
930 task_t task)
931 {
932 if (task == TASK_NULL)
933 return (KERN_INVALID_ARGUMENT);
934
935 task_lock(task);
936
937 if (!task->active) {
938 task_unlock(task);
939
940 return (KERN_FAILURE);
941 }
942
943 task_release_locked(task);
944 task_unlock(task);
945
946 return (KERN_SUCCESS);
947 }
948
949 kern_return_t
950 task_threads(
951 task_t task,
952 thread_act_array_t *threads_out,
953 mach_msg_type_number_t *count)
954 {
955 mach_msg_type_number_t actual;
956 thread_t *threads;
957 thread_t thread;
958 vm_size_t size, size_needed;
959 void *addr;
960 unsigned int i, j;
961
962 if (task == TASK_NULL)
963 return (KERN_INVALID_ARGUMENT);
964
965 size = 0; addr = 0;
966
967 for (;;) {
968 task_lock(task);
969 if (!task->active) {
970 task_unlock(task);
971
972 if (size != 0)
973 kfree(addr, size);
974
975 return (KERN_FAILURE);
976 }
977
978 actual = task->thread_count;
979
980 /* do we have the memory we need? */
981 size_needed = actual * sizeof (mach_port_t);
982 if (size_needed <= size)
983 break;
984
985 /* unlock the task and allocate more memory */
986 task_unlock(task);
987
988 if (size != 0)
989 kfree(addr, size);
990
991 assert(size_needed > 0);
992 size = size_needed;
993
994 addr = kalloc(size);
995 if (addr == 0)
996 return (KERN_RESOURCE_SHORTAGE);
997 }
998
999 /* OK, have memory and the task is locked & active */
1000 threads = (thread_t *)addr;
1001
1002 i = j = 0;
1003
1004 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1005 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1006 thread_reference_internal(thread);
1007 threads[j++] = thread;
1008 }
1009
1010 assert(queue_end(&task->threads, (queue_entry_t)thread));
1011
1012 actual = j;
1013 size_needed = actual * sizeof (mach_port_t);
1014
1015 /* can unlock task now that we've got the thread refs */
1016 task_unlock(task);
1017
1018 if (actual == 0) {
1019 /* no threads, so return null pointer and deallocate memory */
1020
1021 *threads_out = 0;
1022 *count = 0;
1023
1024 if (size != 0)
1025 kfree(addr, size);
1026 }
1027 else {
1028 /* if we allocated too much, must copy */
1029
1030 if (size_needed < size) {
1031 void *newaddr;
1032
1033 newaddr = kalloc(size_needed);
1034 if (newaddr == 0) {
1035 for (i = 0; i < actual; ++i)
1036 thread_deallocate(threads[i]);
1037 kfree(addr, size);
1038 return (KERN_RESOURCE_SHORTAGE);
1039 }
1040
1041 bcopy(addr, newaddr, size_needed);
1042 kfree(addr, size);
1043 threads = (thread_t *)newaddr;
1044 }
1045
1046 *threads_out = threads;
1047 *count = actual;
1048
1049 /* do the conversion that Mig should handle */
1050
1051 for (i = 0; i < actual; ++i)
1052 ((ipc_port_t *) threads)[i] = convert_thread_to_port(threads[i]);
1053 }
1054
1055 return (KERN_SUCCESS);
1056 }
1057
1058 /*
1059 * task_suspend:
1060 *
1061 * Implement a user-level suspension on a task.
1062 *
1063 * Conditions:
1064 * The caller holds a reference to the task
1065 */
1066 kern_return_t
1067 task_suspend(
1068 register task_t task)
1069 {
1070 if (task == TASK_NULL || task == kernel_task)
1071 return (KERN_INVALID_ARGUMENT);
1072
1073 task_lock(task);
1074
1075 if (!task->active) {
1076 task_unlock(task);
1077
1078 return (KERN_FAILURE);
1079 }
1080
1081 if (task->user_stop_count++ > 0) {
1082 /*
1083 * If the stop count was positive, the task is
1084 * already stopped and we can exit.
1085 */
1086 task_unlock(task);
1087
1088 return (KERN_SUCCESS);
1089 }
1090
1091 /*
1092 * Put a kernel-level hold on the threads in the task (all
1093 * user-level task suspensions added together represent a
1094 * single kernel-level hold). We then wait for the threads
1095 * to stop executing user code.
1096 */
1097 task_hold_locked(task);
1098 task_wait_locked(task);
1099
1100 task_unlock(task);
1101
1102 return (KERN_SUCCESS);
1103 }
1104
1105 /*
1106 * task_resume:
1107 * Release a kernel hold on a task.
1108 *
1109 * Conditions:
1110 * The caller holds a reference to the task
1111 */
1112 kern_return_t
1113 task_resume(
1114 register task_t task)
1115 {
1116 register boolean_t release = FALSE;
1117
1118 if (task == TASK_NULL || task == kernel_task)
1119 return (KERN_INVALID_ARGUMENT);
1120
1121 task_lock(task);
1122
1123 if (!task->active) {
1124 task_unlock(task);
1125
1126 return (KERN_FAILURE);
1127 }
1128
1129 if (task->user_stop_count > 0) {
1130 if (--task->user_stop_count == 0)
1131 release = TRUE;
1132 }
1133 else {
1134 task_unlock(task);
1135
1136 return (KERN_FAILURE);
1137 }
1138
1139 /*
1140 * Release the task if necessary.
1141 */
1142 if (release)
1143 task_release_locked(task);
1144
1145 task_unlock(task);
1146
1147 return (KERN_SUCCESS);
1148 }
1149
1150 kern_return_t
1151 host_security_set_task_token(
1152 host_security_t host_security,
1153 task_t task,
1154 security_token_t sec_token,
1155 audit_token_t audit_token,
1156 host_priv_t host_priv)
1157 {
1158 ipc_port_t host_port;
1159 kern_return_t kr;
1160
1161 if (task == TASK_NULL)
1162 return(KERN_INVALID_ARGUMENT);
1163
1164 if (host_security == HOST_NULL)
1165 return(KERN_INVALID_SECURITY);
1166
1167 task_lock(task);
1168 task->sec_token = sec_token;
1169 task->audit_token = audit_token;
1170 task_unlock(task);
1171
1172 if (host_priv != HOST_PRIV_NULL) {
1173 kr = host_get_host_priv_port(host_priv, &host_port);
1174 } else {
1175 kr = host_get_host_port(host_priv_self(), &host_port);
1176 }
1177 assert(kr == KERN_SUCCESS);
1178 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1179 return(kr);
1180 }
1181
1182 /*
1183 * Utility routine to set a ledger
1184 */
1185 kern_return_t
1186 task_set_ledger(
1187 task_t task,
1188 ledger_t wired,
1189 ledger_t paged)
1190 {
1191 if (task == TASK_NULL)
1192 return(KERN_INVALID_ARGUMENT);
1193
1194 task_lock(task);
1195 if (wired) {
1196 ipc_port_release_send(task->wired_ledger_port);
1197 task->wired_ledger_port = ledger_copy(wired);
1198 }
1199 if (paged) {
1200 ipc_port_release_send(task->paged_ledger_port);
1201 task->paged_ledger_port = ledger_copy(paged);
1202 }
1203 task_unlock(task);
1204
1205 return(KERN_SUCCESS);
1206 }
1207
1208 /*
1209 * This routine was added, pretty much exclusively, for registering the
1210 * RPC glue vector for in-kernel short circuited tasks. Rather than
1211 * removing it completely, I have only disabled that feature (which was
1212 * the only feature at the time). It just appears that we are going to
1213 * want to add some user data to tasks in the future (i.e. bsd info,
1214 * task names, etc...), so I left it in the formal task interface.
1215 */
1216 kern_return_t
1217 task_set_info(
1218 task_t task,
1219 task_flavor_t flavor,
1220 __unused task_info_t task_info_in, /* pointer to IN array */
1221 __unused mach_msg_type_number_t task_info_count)
1222 {
1223 if (task == TASK_NULL)
1224 return(KERN_INVALID_ARGUMENT);
1225
1226 switch (flavor) {
1227 default:
1228 return (KERN_INVALID_ARGUMENT);
1229 }
1230 return (KERN_SUCCESS);
1231 }
1232
1233 kern_return_t
1234 task_info(
1235 task_t task,
1236 task_flavor_t flavor,
1237 task_info_t task_info_out,
1238 mach_msg_type_number_t *task_info_count)
1239 {
1240 if (task == TASK_NULL)
1241 return (KERN_INVALID_ARGUMENT);
1242
1243 switch (flavor) {
1244
1245 case TASK_BASIC_INFO_32:
1246 {
1247 task_basic_info_32_t basic_info;
1248 vm_map_t map;
1249
1250 if (*task_info_count < TASK_BASIC_INFO_32_COUNT)
1251 return (KERN_INVALID_ARGUMENT);
1252
1253 basic_info = (task_basic_info_32_t)task_info_out;
1254
1255 map = (task == kernel_task)? kernel_map: task->map;
1256 basic_info->virtual_size = CAST_DOWN(vm_offset_t,map->size);
1257 basic_info->resident_size = pmap_resident_count(map->pmap)
1258 * PAGE_SIZE;
1259
1260 task_lock(task);
1261 basic_info->policy = ((task != kernel_task)?
1262 POLICY_TIMESHARE: POLICY_RR);
1263 basic_info->suspend_count = task->user_stop_count;
1264
1265 absolutetime_to_microtime(
1266 task->total_user_time,
1267 &basic_info->user_time.seconds,
1268 &basic_info->user_time.microseconds);
1269 absolutetime_to_microtime(
1270 task->total_system_time,
1271 &basic_info->system_time.seconds,
1272 &basic_info->system_time.microseconds);
1273 task_unlock(task);
1274
1275 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1276 break;
1277 }
1278
1279 case TASK_BASIC_INFO_64:
1280 {
1281 task_basic_info_64_t basic_info;
1282 vm_map_t map;
1283
1284 if (*task_info_count < TASK_BASIC_INFO_64_COUNT)
1285 return (KERN_INVALID_ARGUMENT);
1286
1287 basic_info = (task_basic_info_64_t)task_info_out;
1288
1289 map = (task == kernel_task)? kernel_map: task->map;
1290 basic_info->virtual_size = map->size;
1291 basic_info->resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)
1292 * PAGE_SIZE);
1293
1294 task_lock(task);
1295 basic_info->policy = ((task != kernel_task)?
1296 POLICY_TIMESHARE: POLICY_RR);
1297 basic_info->suspend_count = task->user_stop_count;
1298
1299 absolutetime_to_microtime(
1300 task->total_user_time,
1301 &basic_info->user_time.seconds,
1302 &basic_info->user_time.microseconds);
1303 absolutetime_to_microtime(
1304 task->total_system_time,
1305 &basic_info->system_time.seconds,
1306 &basic_info->system_time.microseconds);
1307 task_unlock(task);
1308
1309 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1310 break;
1311 }
1312
1313 case TASK_THREAD_TIMES_INFO:
1314 {
1315 register task_thread_times_info_t times_info;
1316 register thread_t thread;
1317
1318 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT)
1319 return (KERN_INVALID_ARGUMENT);
1320
1321 times_info = (task_thread_times_info_t) task_info_out;
1322 times_info->user_time.seconds = 0;
1323 times_info->user_time.microseconds = 0;
1324 times_info->system_time.seconds = 0;
1325 times_info->system_time.microseconds = 0;
1326
1327 task_lock(task);
1328
1329 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1330 time_value_t user_time, system_time;
1331
1332 thread_read_times(thread, &user_time, &system_time);
1333
1334 time_value_add(&times_info->user_time, &user_time);
1335 time_value_add(&times_info->system_time, &system_time);
1336 }
1337
1338 task_unlock(task);
1339
1340 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1341 break;
1342 }
1343
1344 case TASK_ABSOLUTETIME_INFO:
1345 {
1346 task_absolutetime_info_t info;
1347 register thread_t thread;
1348
1349 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT)
1350 return (KERN_INVALID_ARGUMENT);
1351
1352 info = (task_absolutetime_info_t)task_info_out;
1353 info->threads_user = info->threads_system = 0;
1354
1355 task_lock(task);
1356
1357 info->total_user = task->total_user_time;
1358 info->total_system = task->total_system_time;
1359
1360 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1361 uint64_t tval;
1362
1363 tval = timer_grab(&thread->user_timer);
1364 info->threads_user += tval;
1365 info->total_user += tval;
1366
1367 tval = timer_grab(&thread->system_timer);
1368 info->threads_system += tval;
1369 info->total_system += tval;
1370 }
1371
1372 task_unlock(task);
1373
1374 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1375 break;
1376 }
1377
1378 /* OBSOLETE */
1379 case TASK_SCHED_FIFO_INFO:
1380 {
1381
1382 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1383 return (KERN_INVALID_ARGUMENT);
1384
1385 return (KERN_INVALID_POLICY);
1386 }
1387
1388 /* OBSOLETE */
1389 case TASK_SCHED_RR_INFO:
1390 {
1391 register policy_rr_base_t rr_base;
1392
1393 if (*task_info_count < POLICY_RR_BASE_COUNT)
1394 return (KERN_INVALID_ARGUMENT);
1395
1396 rr_base = (policy_rr_base_t) task_info_out;
1397
1398 task_lock(task);
1399 if (task != kernel_task) {
1400 task_unlock(task);
1401 return (KERN_INVALID_POLICY);
1402 }
1403
1404 rr_base->base_priority = task->priority;
1405 task_unlock(task);
1406
1407 rr_base->quantum = std_quantum_us / 1000;
1408
1409 *task_info_count = POLICY_RR_BASE_COUNT;
1410 break;
1411 }
1412
1413 /* OBSOLETE */
1414 case TASK_SCHED_TIMESHARE_INFO:
1415 {
1416 register policy_timeshare_base_t ts_base;
1417
1418 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1419 return (KERN_INVALID_ARGUMENT);
1420
1421 ts_base = (policy_timeshare_base_t) task_info_out;
1422
1423 task_lock(task);
1424 if (task == kernel_task) {
1425 task_unlock(task);
1426 return (KERN_INVALID_POLICY);
1427 }
1428
1429 ts_base->base_priority = task->priority;
1430 task_unlock(task);
1431
1432 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1433 break;
1434 }
1435
1436 case TASK_SECURITY_TOKEN:
1437 {
1438 register security_token_t *sec_token_p;
1439
1440 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT)
1441 return (KERN_INVALID_ARGUMENT);
1442
1443 sec_token_p = (security_token_t *) task_info_out;
1444
1445 task_lock(task);
1446 *sec_token_p = task->sec_token;
1447 task_unlock(task);
1448
1449 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1450 break;
1451 }
1452
1453 case TASK_AUDIT_TOKEN:
1454 {
1455 register audit_token_t *audit_token_p;
1456
1457 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT)
1458 return (KERN_INVALID_ARGUMENT);
1459
1460 audit_token_p = (audit_token_t *) task_info_out;
1461
1462 task_lock(task);
1463 *audit_token_p = task->audit_token;
1464 task_unlock(task);
1465
1466 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1467 break;
1468 }
1469
1470 case TASK_SCHED_INFO:
1471 return (KERN_INVALID_ARGUMENT);
1472
1473 case TASK_EVENTS_INFO:
1474 {
1475 register task_events_info_t events_info;
1476
1477 if (*task_info_count < TASK_EVENTS_INFO_COUNT)
1478 return (KERN_INVALID_ARGUMENT);
1479
1480 events_info = (task_events_info_t) task_info_out;
1481
1482 task_lock(task);
1483 events_info->faults = task->faults;
1484 events_info->pageins = task->pageins;
1485 events_info->cow_faults = task->cow_faults;
1486 events_info->messages_sent = task->messages_sent;
1487 events_info->messages_received = task->messages_received;
1488 events_info->syscalls_mach = task->syscalls_mach;
1489 events_info->syscalls_unix = task->syscalls_unix;
1490 events_info->csw = task->csw;
1491 task_unlock(task);
1492
1493 *task_info_count = TASK_EVENTS_INFO_COUNT;
1494 break;
1495 }
1496
1497 default:
1498 return (KERN_INVALID_ARGUMENT);
1499 }
1500
1501 return (KERN_SUCCESS);
1502 }
1503
1504 /*
1505 * task_assign:
1506 *
1507 * Change the assigned processor set for the task
1508 */
1509 kern_return_t
1510 task_assign(
1511 __unused task_t task,
1512 __unused processor_set_t new_pset,
1513 __unused boolean_t assign_threads)
1514 {
1515 return(KERN_FAILURE);
1516 }
1517
1518 /*
1519 * task_assign_default:
1520 *
1521 * Version of task_assign to assign to default processor set.
1522 */
1523 kern_return_t
1524 task_assign_default(
1525 task_t task,
1526 boolean_t assign_threads)
1527 {
1528 return (task_assign(task, &default_pset, assign_threads));
1529 }
1530
1531 /*
1532 * task_get_assignment
1533 *
1534 * Return name of processor set that task is assigned to.
1535 */
1536 kern_return_t
1537 task_get_assignment(
1538 task_t task,
1539 processor_set_t *pset)
1540 {
1541 if (!task->active)
1542 return(KERN_FAILURE);
1543
1544 *pset = task->processor_set;
1545 pset_reference(*pset);
1546 return(KERN_SUCCESS);
1547 }
1548
1549
1550 /*
1551 * task_policy
1552 *
1553 * Set scheduling policy and parameters, both base and limit, for
1554 * the given task. Policy must be a policy which is enabled for the
1555 * processor set. Change contained threads if requested.
1556 */
1557 kern_return_t
1558 task_policy(
1559 __unused task_t task,
1560 __unused policy_t policy_id,
1561 __unused policy_base_t base,
1562 __unused mach_msg_type_number_t count,
1563 __unused boolean_t set_limit,
1564 __unused boolean_t change)
1565 {
1566 return(KERN_FAILURE);
1567 }
1568
1569 /*
1570 * task_set_policy
1571 *
1572 * Set scheduling policy and parameters, both base and limit, for
1573 * the given task. Policy can be any policy implemented by the
1574 * processor set, whether enabled or not. Change contained threads
1575 * if requested.
1576 */
1577 kern_return_t
1578 task_set_policy(
1579 __unused task_t task,
1580 __unused processor_set_t pset,
1581 __unused policy_t policy_id,
1582 __unused policy_base_t base,
1583 __unused mach_msg_type_number_t base_count,
1584 __unused policy_limit_t limit,
1585 __unused mach_msg_type_number_t limit_count,
1586 __unused boolean_t change)
1587 {
1588 return(KERN_FAILURE);
1589 }
1590
1591 #if FAST_TAS
1592 kern_return_t
1593 task_set_ras_pc(
1594 task_t task,
1595 vm_offset_t pc,
1596 vm_offset_t endpc)
1597 {
1598 extern int fast_tas_debug;
1599
1600 if (fast_tas_debug) {
1601 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1602 task, pc, endpc);
1603 }
1604 task_lock(task);
1605 task->fast_tas_base = pc;
1606 task->fast_tas_end = endpc;
1607 task_unlock(task);
1608 return KERN_SUCCESS;
1609 }
1610 #else /* FAST_TAS */
1611 kern_return_t
1612 task_set_ras_pc(
1613 __unused task_t task,
1614 __unused vm_offset_t pc,
1615 __unused vm_offset_t endpc)
1616 {
1617 return KERN_FAILURE;
1618 }
1619 #endif /* FAST_TAS */
1620
1621 void
1622 task_synchronizer_destroy_all(task_t task)
1623 {
1624 semaphore_t semaphore;
1625 lock_set_t lock_set;
1626
1627 /*
1628 * Destroy owned semaphores
1629 */
1630
1631 while (!queue_empty(&task->semaphore_list)) {
1632 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1633 (void) semaphore_destroy(task, semaphore);
1634 }
1635
1636 /*
1637 * Destroy owned lock sets
1638 */
1639
1640 while (!queue_empty(&task->lock_set_list)) {
1641 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1642 (void) lock_set_destroy(task, lock_set);
1643 }
1644 }
1645
1646 /*
1647 * We need to export some functions to other components that
1648 * are currently implemented in macros within the osfmk
1649 * component. Just export them as functions of the same name.
1650 */
1651 boolean_t is_kerneltask(task_t t)
1652 {
1653 if (t == kernel_task)
1654 return (TRUE);
1655
1656 return (FALSE);
1657 }
1658
1659 #undef current_task
1660 task_t current_task(void);
1661 task_t current_task(void)
1662 {
1663 return (current_task_fast());
1664 }
1665
1666 #undef task_reference
1667 void task_reference(task_t task);
1668 void
1669 task_reference(
1670 task_t task)
1671 {
1672 if (task != TASK_NULL)
1673 task_reference_internal(task);
1674 }