]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
xnu-517.9.4.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 * File: kern/task.c
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
53 * David Black
54 *
55 * Task management primitives implementation.
56 */
57 /*
58 * Copyright (c) 1993 The University of Utah and
59 * the Computer Systems Laboratory (CSL). All rights reserved.
60 *
61 * Permission to use, copy, modify and distribute this software and its
62 * documentation is hereby granted, provided that both the copyright
63 * notice and this permission notice appear in all copies of the
64 * software, derivative works or modified versions, and any portions
65 * thereof, and that both notices appear in supporting documentation.
66 *
67 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
68 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
69 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
70 *
71 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
72 * improvements that they make and grant CSL redistribution rights.
73 *
74 */
75
76 #include <mach_kdb.h>
77 #include <mach_host.h>
78 #include <mach_prof.h>
79 #include <fast_tas.h>
80 #include <task_swapper.h>
81 #include <platforms.h>
82
83 #include <mach/boolean.h>
84 #include <mach/machine/vm_types.h>
85 #include <mach/vm_param.h>
86 #include <mach/semaphore.h>
87 #include <mach/task_info.h>
88 #include <mach/task_special_ports.h>
89 #include <mach/mach_types.h>
90 #include <ipc/ipc_space.h>
91 #include <ipc/ipc_entry.h>
92 #include <kern/mach_param.h>
93 #include <kern/misc_protos.h>
94 #include <kern/task.h>
95 #include <kern/thread.h>
96 #include <kern/zalloc.h>
97 #include <kern/kalloc.h>
98 #include <kern/processor.h>
99 #include <kern/sched_prim.h> /* for thread_wakeup */
100 #include <kern/ipc_tt.h>
101 #include <kern/ledger.h>
102 #include <kern/host.h>
103 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
104 #include <kern/profile.h>
105 #include <kern/assert.h>
106 #include <kern/sync_lock.h>
107 #if MACH_KDB
108 #include <ddb/db_sym.h>
109 #endif /* MACH_KDB */
110
111 #if TASK_SWAPPER
112 #include <kern/task_swap.h>
113 #endif /* TASK_SWAPPER */
114
115 #ifdef __ppc__
116 #include <ppc/exception.h>
117 #include <ppc/hw_perfmon.h>
118 #endif
119
120 /*
121 * Exported interfaces
122 */
123
124 #include <mach/task_server.h>
125 #include <mach/mach_host_server.h>
126 #include <mach/host_security_server.h>
127 #include <vm/task_working_set.h>
128
129 task_t kernel_task;
130 zone_t task_zone;
131
132 /* Forwards */
133
134 void task_hold_locked(
135 task_t task);
136 void task_wait_locked(
137 task_t task);
138 void task_release_locked(
139 task_t task);
140 void task_collect_scan(void);
141 void task_free(
142 task_t task );
143 void task_synchronizer_destroy_all(
144 task_t task);
145
146 kern_return_t task_set_ledger(
147 task_t task,
148 ledger_t wired,
149 ledger_t paged);
150
151 void
152 task_backing_store_privileged(
153 task_t task)
154 {
155 task_lock(task);
156 task->priv_flags |= VM_BACKING_STORE_PRIV;
157 task_unlock(task);
158 return;
159 }
160
161 void
162 task_init(void)
163 {
164 task_zone = zinit(
165 sizeof(struct task),
166 TASK_MAX * sizeof(struct task),
167 TASK_CHUNK * sizeof(struct task),
168 "tasks");
169
170 eml_init();
171
172 /*
173 * Create the kernel task as the first task.
174 */
175 if (task_create_internal(TASK_NULL, FALSE, &kernel_task) != KERN_SUCCESS)
176 panic("task_init\n");
177
178 vm_map_deallocate(kernel_task->map);
179 kernel_task->map = kernel_map;
180 }
181
182 #if MACH_HOST
183
184 #if 0
185 static void
186 task_freeze(
187 task_t task)
188 {
189 task_lock(task);
190 /*
191 * If may_assign is false, task is already being assigned,
192 * wait for that to finish.
193 */
194 while (task->may_assign == FALSE) {
195 wait_result_t res;
196
197 task->assign_active = TRUE;
198 res = thread_sleep_mutex((event_t) &task->assign_active,
199 &task->lock, THREAD_UNINT);
200 assert(res == THREAD_AWAKENED);
201 }
202 task->may_assign = FALSE;
203 task_unlock(task);
204 return;
205 }
206 #else
207 #define thread_freeze(thread) assert(task->processor_set == &default_pset)
208 #endif
209
210 #if 0
211 static void
212 task_unfreeze(
213 task_t task)
214 {
215 task_lock(task);
216 assert(task->may_assign == FALSE);
217 task->may_assign = TRUE;
218 if (task->assign_active == TRUE) {
219 task->assign_active = FALSE;
220 thread_wakeup((event_t)&task->assign_active);
221 }
222 task_unlock(task);
223 return;
224 }
225 #else
226 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
227 #endif
228
229 #endif /* MACH_HOST */
230
231 /*
232 * Create a task running in the kernel address space. It may
233 * have its own map of size mem_size and may have ipc privileges.
234 */
235 kern_return_t
236 kernel_task_create(
237 task_t parent_task,
238 vm_offset_t map_base,
239 vm_size_t map_size,
240 task_t *child_task)
241 {
242 return (KERN_INVALID_ARGUMENT);
243 }
244
245 kern_return_t
246 task_create(
247 task_t parent_task,
248 ledger_port_array_t ledger_ports,
249 mach_msg_type_number_t num_ledger_ports,
250 boolean_t inherit_memory,
251 task_t *child_task) /* OUT */
252 {
253 if (parent_task == TASK_NULL)
254 return(KERN_INVALID_ARGUMENT);
255
256 return task_create_internal(
257 parent_task, inherit_memory, child_task);
258 }
259
260 kern_return_t
261 host_security_create_task_token(
262 host_security_t host_security,
263 task_t parent_task,
264 security_token_t sec_token,
265 audit_token_t audit_token,
266 host_priv_t host_priv,
267 ledger_port_array_t ledger_ports,
268 mach_msg_type_number_t num_ledger_ports,
269 boolean_t inherit_memory,
270 task_t *child_task) /* OUT */
271 {
272 kern_return_t result;
273
274 if (parent_task == TASK_NULL)
275 return(KERN_INVALID_ARGUMENT);
276
277 if (host_security == HOST_NULL)
278 return(KERN_INVALID_SECURITY);
279
280 result = task_create_internal(
281 parent_task, inherit_memory, child_task);
282
283 if (result != KERN_SUCCESS)
284 return(result);
285
286 result = host_security_set_task_token(host_security,
287 *child_task,
288 sec_token,
289 audit_token,
290 host_priv);
291
292 if (result != KERN_SUCCESS)
293 return(result);
294
295 return(result);
296 }
297
298 kern_return_t
299 task_create_internal(
300 task_t parent_task,
301 boolean_t inherit_memory,
302 task_t *child_task) /* OUT */
303 {
304 task_t new_task;
305 processor_set_t pset;
306
307 new_task = (task_t) zalloc(task_zone);
308
309 if (new_task == TASK_NULL)
310 return(KERN_RESOURCE_SHORTAGE);
311
312 /* one ref for just being alive; one for our caller */
313 new_task->ref_count = 2;
314
315 if (inherit_memory)
316 new_task->map = vm_map_fork(parent_task->map);
317 else
318 new_task->map = vm_map_create(pmap_create(0),
319 round_page_32(VM_MIN_ADDRESS),
320 trunc_page_32(VM_MAX_ADDRESS), TRUE);
321
322 mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW);
323 queue_init(&new_task->threads);
324 new_task->suspend_count = 0;
325 new_task->thread_count = 0;
326 new_task->res_thread_count = 0;
327 new_task->active_thread_count = 0;
328 new_task->user_stop_count = 0;
329 new_task->role = TASK_UNSPECIFIED;
330 new_task->active = TRUE;
331 new_task->user_data = 0;
332 new_task->faults = 0;
333 new_task->cow_faults = 0;
334 new_task->pageins = 0;
335 new_task->messages_sent = 0;
336 new_task->messages_received = 0;
337 new_task->syscalls_mach = 0;
338 new_task->priv_flags = 0;
339 new_task->syscalls_unix=0;
340 new_task->csw=0;
341 new_task->taskFeatures[0] = 0; /* Init task features */
342 new_task->taskFeatures[1] = 0; /* Init task features */
343 new_task->dynamic_working_set = 0;
344
345 task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT,
346 0, TWS_HASH_STYLE_DEFAULT);
347
348 #ifdef MACH_BSD
349 new_task->bsd_info = 0;
350 #endif /* MACH_BSD */
351
352 #ifdef __ppc__
353 if(per_proc_info[0].pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */
354 #endif
355
356 #if TASK_SWAPPER
357 new_task->swap_state = TASK_SW_IN;
358 new_task->swap_flags = 0;
359 new_task->swap_ast_waiting = 0;
360 new_task->swap_stamp = sched_tick;
361 new_task->swap_rss = 0;
362 new_task->swap_nswap = 0;
363 #endif /* TASK_SWAPPER */
364
365 queue_init(&new_task->semaphore_list);
366 queue_init(&new_task->lock_set_list);
367 new_task->semaphores_owned = 0;
368 new_task->lock_sets_owned = 0;
369
370 #if MACH_HOST
371 new_task->may_assign = TRUE;
372 new_task->assign_active = FALSE;
373 #endif /* MACH_HOST */
374 eml_task_reference(new_task, parent_task);
375
376 ipc_task_init(new_task, parent_task);
377
378 new_task->total_user_time.seconds = 0;
379 new_task->total_user_time.microseconds = 0;
380 new_task->total_system_time.seconds = 0;
381 new_task->total_system_time.microseconds = 0;
382
383 task_prof_init(new_task);
384
385 if (parent_task != TASK_NULL) {
386 #if MACH_HOST
387 /*
388 * Freeze the parent, so that parent_task->processor_set
389 * cannot change.
390 */
391 task_freeze(parent_task);
392 #endif /* MACH_HOST */
393 pset = parent_task->processor_set;
394 if (!pset->active)
395 pset = &default_pset;
396
397 new_task->sec_token = parent_task->sec_token;
398 new_task->audit_token = parent_task->audit_token;
399
400 shared_region_mapping_ref(parent_task->system_shared_region);
401 new_task->system_shared_region = parent_task->system_shared_region;
402
403 new_task->wired_ledger_port = ledger_copy(
404 convert_port_to_ledger(parent_task->wired_ledger_port));
405 new_task->paged_ledger_port = ledger_copy(
406 convert_port_to_ledger(parent_task->paged_ledger_port));
407 }
408 else {
409 pset = &default_pset;
410
411 new_task->sec_token = KERNEL_SECURITY_TOKEN;
412 new_task->audit_token = KERNEL_AUDIT_TOKEN;
413 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
414 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
415 }
416
417 if (kernel_task == TASK_NULL) {
418 new_task->priority = BASEPRI_KERNEL;
419 new_task->max_priority = MAXPRI_KERNEL;
420 }
421 else {
422 new_task->priority = BASEPRI_DEFAULT;
423 new_task->max_priority = MAXPRI_USER;
424 }
425
426 pset_lock(pset);
427 pset_add_task(pset, new_task);
428 pset_unlock(pset);
429 #if MACH_HOST
430 if (parent_task != TASK_NULL)
431 task_unfreeze(parent_task);
432 #endif /* MACH_HOST */
433
434 if (vm_backing_store_low && parent_task != NULL)
435 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
436
437 ipc_task_enable(new_task);
438
439 *child_task = new_task;
440 return(KERN_SUCCESS);
441 }
442
443 /*
444 * task_deallocate
445 *
446 * Drop a reference on a task
447 * Task is locked.
448 */
449 void
450 task_deallocate(
451 task_t task)
452 {
453 processor_set_t pset;
454 int refs;
455
456 if (task == TASK_NULL)
457 return;
458
459 task_lock(task);
460 refs = --task->ref_count;
461 task_unlock(task);
462
463 if (refs > 0)
464 return;
465
466 #if TASK_SWAPPER
467 /* task_terminate guarantees that this task is off the list */
468 assert((task->swap_state & TASK_SW_ELIGIBLE) == 0);
469 #endif /* TASK_SWAPPER */
470
471 if(task->dynamic_working_set)
472 tws_hash_destroy((tws_hash_t)task->dynamic_working_set);
473
474 eml_task_deallocate(task);
475
476 ipc_task_terminate(task);
477
478 #if MACH_HOST
479 task_freeze(task);
480 #endif
481
482 pset = task->processor_set;
483 pset_lock(pset);
484 pset_remove_task(pset,task);
485 pset_unlock(pset);
486 pset_deallocate(pset);
487
488 #if MACH_HOST
489 task_unfreeze(task);
490 #endif
491
492 vm_map_deallocate(task->map);
493 is_release(task->itk_space);
494 task_prof_deallocate(task);
495 zfree(task_zone, (vm_offset_t) task);
496 }
497
498
499 void
500 task_reference(
501 task_t task)
502 {
503 if (task != TASK_NULL) {
504 task_lock(task);
505 task->ref_count++;
506 task_unlock(task);
507 }
508 }
509
510 boolean_t
511 task_reference_try(
512 task_t task)
513 {
514 if (task != TASK_NULL) {
515 if (task_lock_try(task)) {
516 task->ref_count++;
517 task_unlock(task);
518 return TRUE;
519 }
520 }
521 return FALSE;
522 }
523
524 /*
525 * task_terminate:
526 *
527 * Terminate the specified task. See comments on thread_terminate
528 * (kern/thread.c) about problems with terminating the "current task."
529 */
530
531 kern_return_t
532 task_terminate(
533 task_t task)
534 {
535 if (task == TASK_NULL)
536 return(KERN_INVALID_ARGUMENT);
537 if (task->bsd_info)
538 return(KERN_FAILURE);
539 return (task_terminate_internal(task));
540 }
541
542 kern_return_t
543 task_terminate_internal(
544 task_t task)
545 {
546 thread_act_t thr_act, cur_thr_act;
547 task_t cur_task;
548 boolean_t interrupt_save;
549
550 assert(task != kernel_task);
551
552 cur_thr_act = current_act();
553 cur_task = cur_thr_act->task;
554
555 #if TASK_SWAPPER
556 /*
557 * If task is not resident (swapped out, or being swapped
558 * out), we want to bring it back in (this can block).
559 * NOTE: The only way that this can happen in the current
560 * system is if the task is swapped while it has a thread
561 * in exit(), and the thread does not hit a clean point
562 * to swap itself before getting here.
563 * Terminating other tasks is another way to this code, but
564 * it is not yet fully supported.
565 * The task_swapin is unconditional. It used to be done
566 * only if the task is not resident. Swapping in a
567 * resident task will prevent it from being swapped out
568 * while it terminates.
569 */
570 task_swapin(task, TRUE); /* TRUE means make it unswappable */
571 #endif /* TASK_SWAPPER */
572
573 /*
574 * Get the task locked and make sure that we are not racing
575 * with someone else trying to terminate us.
576 */
577 if (task == cur_task) {
578 task_lock(task);
579 } else if (task < cur_task) {
580 task_lock(task);
581 task_lock(cur_task);
582 } else {
583 task_lock(cur_task);
584 task_lock(task);
585 }
586
587 if (!task->active || !cur_thr_act->active) {
588 /*
589 * Task or current act is already being terminated.
590 * Just return an error. If we are dying, this will
591 * just get us to our AST special handler and that
592 * will get us to finalize the termination of ourselves.
593 */
594 task_unlock(task);
595 if (cur_task != task)
596 task_unlock(cur_task);
597 return(KERN_FAILURE);
598 }
599 if (cur_task != task)
600 task_unlock(cur_task);
601
602 /*
603 * Make sure the current thread does not get aborted out of
604 * the waits inside these operations.
605 */
606 interrupt_save = thread_interrupt_level(THREAD_UNINT);
607
608 /*
609 * Indicate that we want all the threads to stop executing
610 * at user space by holding the task (we would have held
611 * each thread independently in thread_terminate_internal -
612 * but this way we may be more likely to already find it
613 * held there). Mark the task inactive, and prevent
614 * further task operations via the task port.
615 */
616 task_hold_locked(task);
617 task->active = FALSE;
618 ipc_task_disable(task);
619
620 /*
621 * Terminate each activation in the task.
622 *
623 * Each terminated activation will run it's special handler
624 * when its current kernel context is unwound. That will
625 * clean up most of the thread resources. Then it will be
626 * handed over to the reaper, who will finally remove the
627 * thread from the task list and free the structures.
628 */
629 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
630 thread_terminate_internal(thr_act);
631 }
632
633 /*
634 * Give the machine dependent code a chance
635 * to perform cleanup before ripping apart
636 * the task.
637 */
638 if (cur_thr_act->task == task)
639 machine_thread_terminate_self();
640
641 task_unlock(task);
642
643 /*
644 * Destroy all synchronizers owned by the task.
645 */
646 task_synchronizer_destroy_all(task);
647
648 /*
649 * Destroy the IPC space, leaving just a reference for it.
650 */
651 ipc_space_destroy(task->itk_space);
652
653 /*
654 * If the current thread is a member of the task
655 * being terminated, then the last reference to
656 * the task will not be dropped until the thread
657 * is finally reaped. To avoid incurring the
658 * expense of removing the address space regions
659 * at reap time, we do it explictly here.
660 */
661 (void) vm_map_remove(task->map,
662 task->map->min_offset,
663 task->map->max_offset, VM_MAP_NO_FLAGS);
664
665 shared_region_mapping_dealloc(task->system_shared_region);
666
667 /*
668 * Flush working set here to avoid I/O in reaper thread
669 */
670 if(task->dynamic_working_set)
671 tws_hash_ws_flush((tws_hash_t)
672 task->dynamic_working_set);
673
674 /*
675 * We no longer need to guard against being aborted, so restore
676 * the previous interruptible state.
677 */
678 thread_interrupt_level(interrupt_save);
679
680 #if __ppc__
681 perfmon_release_facility(task); // notify the perfmon facility
682 #endif
683
684 /*
685 * Get rid of the task active reference on itself.
686 */
687 task_deallocate(task);
688
689 return(KERN_SUCCESS);
690 }
691
692 /*
693 * task_halt - Shut the current task down (except for the current thread) in
694 * preparation for dramatic changes to the task (probably exec).
695 * We hold the task, terminate all other threads in the task and
696 * wait for them to terminate, clean up the portspace, and when
697 * all done, let the current thread go.
698 */
699 kern_return_t
700 task_halt(
701 task_t task)
702 {
703 thread_act_t thr_act, cur_thr_act;
704 task_t cur_task;
705
706 assert(task != kernel_task);
707
708 cur_thr_act = current_act();
709 cur_task = cur_thr_act->task;
710
711 if (task != cur_task) {
712 return(KERN_INVALID_ARGUMENT);
713 }
714
715 #if TASK_SWAPPER
716 /*
717 * If task is not resident (swapped out, or being swapped
718 * out), we want to bring it back in and make it unswappable.
719 * This can block, so do it early.
720 */
721 task_swapin(task, TRUE); /* TRUE means make it unswappable */
722 #endif /* TASK_SWAPPER */
723
724 task_lock(task);
725
726 if (!task->active || !cur_thr_act->active) {
727 /*
728 * Task or current thread is already being terminated.
729 * Hurry up and return out of the current kernel context
730 * so that we run our AST special handler to terminate
731 * ourselves.
732 */
733 task_unlock(task);
734 return(KERN_FAILURE);
735 }
736
737 if (task->thread_count > 1) {
738 /*
739 * Mark all the threads to keep them from starting any more
740 * user-level execution. The thread_terminate_internal code
741 * would do this on a thread by thread basis anyway, but this
742 * gives us a better chance of not having to wait there.
743 */
744 task_hold_locked(task);
745
746 /*
747 * Terminate all the other activations in the task.
748 *
749 * Each terminated activation will run it's special handler
750 * when its current kernel context is unwound. That will
751 * clean up most of the thread resources. Then it will be
752 * handed over to the reaper, who will finally remove the
753 * thread from the task list and free the structures.
754 */
755 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
756 if (thr_act != cur_thr_act)
757 thread_terminate_internal(thr_act);
758 }
759 task_release_locked(task);
760 }
761
762 /*
763 * Give the machine dependent code a chance
764 * to perform cleanup before ripping apart
765 * the task.
766 */
767 machine_thread_terminate_self();
768
769 task_unlock(task);
770
771 /*
772 * Destroy all synchronizers owned by the task.
773 */
774 task_synchronizer_destroy_all(task);
775
776 /*
777 * Destroy the contents of the IPC space, leaving just
778 * a reference for it.
779 */
780 ipc_space_clean(task->itk_space);
781
782 /*
783 * Clean out the address space, as we are going to be
784 * getting a new one.
785 */
786 (void) vm_map_remove(task->map,
787 task->map->min_offset,
788 task->map->max_offset, VM_MAP_NO_FLAGS);
789
790 return KERN_SUCCESS;
791 }
792
793 /*
794 * task_hold_locked:
795 *
796 * Suspend execution of the specified task.
797 * This is a recursive-style suspension of the task, a count of
798 * suspends is maintained.
799 *
800 * CONDITIONS: the task is locked and active.
801 */
802 void
803 task_hold_locked(
804 register task_t task)
805 {
806 register thread_act_t thr_act;
807
808 assert(task->active);
809
810 if (task->suspend_count++ > 0)
811 return;
812
813 /*
814 * Iterate through all the thread_act's and hold them.
815 */
816 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
817 act_lock_thread(thr_act);
818 thread_hold(thr_act);
819 act_unlock_thread(thr_act);
820 }
821 }
822
823 /*
824 * task_hold:
825 *
826 * Same as the internal routine above, except that is must lock
827 * and verify that the task is active. This differs from task_suspend
828 * in that it places a kernel hold on the task rather than just a
829 * user-level hold. This keeps users from over resuming and setting
830 * it running out from under the kernel.
831 *
832 * CONDITIONS: the caller holds a reference on the task
833 */
834 kern_return_t
835 task_hold(task_t task)
836 {
837 kern_return_t kret;
838
839 if (task == TASK_NULL)
840 return (KERN_INVALID_ARGUMENT);
841 task_lock(task);
842 if (!task->active) {
843 task_unlock(task);
844 return (KERN_FAILURE);
845 }
846 task_hold_locked(task);
847 task_unlock(task);
848
849 return(KERN_SUCCESS);
850 }
851
852 /*
853 * Routine: task_wait_locked
854 * Wait for all threads in task to stop.
855 *
856 * Conditions:
857 * Called with task locked, active, and held.
858 */
859 void
860 task_wait_locked(
861 register task_t task)
862 {
863 register thread_act_t thr_act, cur_thr_act;
864
865 assert(task->active);
866 assert(task->suspend_count > 0);
867
868 cur_thr_act = current_act();
869 /*
870 * Iterate through all the thread's and wait for them to
871 * stop. Do not wait for the current thread if it is within
872 * the task.
873 */
874 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
875 if (thr_act != cur_thr_act) {
876 thread_t thread;
877
878 thread = act_lock_thread(thr_act);
879 thread_wait(thread);
880 act_unlock_thread(thr_act);
881 }
882 }
883 }
884
885 /*
886 * task_release_locked:
887 *
888 * Release a kernel hold on a task.
889 *
890 * CONDITIONS: the task is locked and active
891 */
892 void
893 task_release_locked(
894 register task_t task)
895 {
896 register thread_act_t thr_act;
897
898 assert(task->active);
899 assert(task->suspend_count > 0);
900
901 if (--task->suspend_count > 0)
902 return;
903
904 /*
905 * Iterate through all the thread_act's and hold them.
906 * Do not hold the current thread_act if it is within the
907 * task.
908 */
909 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
910 act_lock_thread(thr_act);
911 thread_release(thr_act);
912 act_unlock_thread(thr_act);
913 }
914 }
915
916 /*
917 * task_release:
918 *
919 * Same as the internal routine above, except that it must lock
920 * and verify that the task is active.
921 *
922 * CONDITIONS: The caller holds a reference to the task
923 */
924 kern_return_t
925 task_release(task_t task)
926 {
927 kern_return_t kret;
928
929 if (task == TASK_NULL)
930 return (KERN_INVALID_ARGUMENT);
931 task_lock(task);
932 if (!task->active) {
933 task_unlock(task);
934 return (KERN_FAILURE);
935 }
936 task_release_locked(task);
937 task_unlock(task);
938
939 return(KERN_SUCCESS);
940 }
941
942 kern_return_t
943 task_threads(
944 task_t task,
945 thread_act_array_t *thr_act_list,
946 mach_msg_type_number_t *count)
947 {
948 unsigned int actual; /* this many thr_acts */
949 thread_act_t thr_act;
950 thread_act_t *thr_acts;
951 thread_t thread;
952 int i, j;
953
954 vm_size_t size, size_needed;
955 vm_offset_t addr;
956
957 if (task == TASK_NULL)
958 return KERN_INVALID_ARGUMENT;
959
960 size = 0; addr = 0;
961
962 for (;;) {
963 task_lock(task);
964 if (!task->active) {
965 task_unlock(task);
966 if (size != 0)
967 kfree(addr, size);
968 return KERN_FAILURE;
969 }
970
971 actual = task->thread_count;
972
973 /* do we have the memory we need? */
974 size_needed = actual * sizeof(mach_port_t);
975 if (size_needed <= size)
976 break;
977
978 /* unlock the task and allocate more memory */
979 task_unlock(task);
980
981 if (size != 0)
982 kfree(addr, size);
983
984 assert(size_needed > 0);
985 size = size_needed;
986
987 addr = kalloc(size);
988 if (addr == 0)
989 return KERN_RESOURCE_SHORTAGE;
990 }
991
992 /* OK, have memory and the task is locked & active */
993 thr_acts = (thread_act_t *) addr;
994
995 for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->threads);
996 i < actual;
997 i++, thr_act = (thread_act_t) queue_next(&thr_act->task_threads)) {
998 act_lock(thr_act);
999 if (thr_act->act_ref_count > 0) {
1000 act_reference_locked(thr_act);
1001 thr_acts[j++] = thr_act;
1002 }
1003 act_unlock(thr_act);
1004 }
1005 assert(queue_end(&task->threads, (queue_entry_t) thr_act));
1006
1007 actual = j;
1008 size_needed = actual * sizeof(mach_port_t);
1009
1010 /* can unlock task now that we've got the thr_act refs */
1011 task_unlock(task);
1012
1013 if (actual == 0) {
1014 /* no thr_acts, so return null pointer and deallocate memory */
1015
1016 *thr_act_list = 0;
1017 *count = 0;
1018
1019 if (size != 0)
1020 kfree(addr, size);
1021 } else {
1022 /* if we allocated too much, must copy */
1023
1024 if (size_needed < size) {
1025 vm_offset_t newaddr;
1026
1027 newaddr = kalloc(size_needed);
1028 if (newaddr == 0) {
1029 for (i = 0; i < actual; i++)
1030 act_deallocate(thr_acts[i]);
1031 kfree(addr, size);
1032 return KERN_RESOURCE_SHORTAGE;
1033 }
1034
1035 bcopy((char *) addr, (char *) newaddr, size_needed);
1036 kfree(addr, size);
1037 thr_acts = (thread_act_t *) newaddr;
1038 }
1039
1040 *thr_act_list = thr_acts;
1041 *count = actual;
1042
1043 /* do the conversion that Mig should handle */
1044
1045 for (i = 0; i < actual; i++)
1046 ((ipc_port_t *) thr_acts)[i] =
1047 convert_act_to_port(thr_acts[i]);
1048 }
1049
1050 return KERN_SUCCESS;
1051 }
1052
1053 /*
1054 * Routine: task_suspend
1055 * Implement a user-level suspension on a task.
1056 *
1057 * Conditions:
1058 * The caller holds a reference to the task
1059 */
1060 kern_return_t
1061 task_suspend(
1062 register task_t task)
1063 {
1064 if (task == TASK_NULL)
1065 return (KERN_INVALID_ARGUMENT);
1066
1067 task_lock(task);
1068 if (!task->active) {
1069 task_unlock(task);
1070 return (KERN_FAILURE);
1071 }
1072 if ((task->user_stop_count)++ > 0) {
1073 /*
1074 * If the stop count was positive, the task is
1075 * already stopped and we can exit.
1076 */
1077 task_unlock(task);
1078 return (KERN_SUCCESS);
1079 }
1080
1081 /*
1082 * Put a kernel-level hold on the threads in the task (all
1083 * user-level task suspensions added together represent a
1084 * single kernel-level hold). We then wait for the threads
1085 * to stop executing user code.
1086 */
1087 task_hold_locked(task);
1088 task_wait_locked(task);
1089 task_unlock(task);
1090 return (KERN_SUCCESS);
1091 }
1092
1093 /*
1094 * Routine: task_resume
1095 * Release a kernel hold on a task.
1096 *
1097 * Conditions:
1098 * The caller holds a reference to the task
1099 */
1100 kern_return_t
1101 task_resume(register task_t task)
1102 {
1103 register boolean_t release;
1104
1105 if (task == TASK_NULL)
1106 return(KERN_INVALID_ARGUMENT);
1107
1108 release = FALSE;
1109 task_lock(task);
1110 if (!task->active) {
1111 task_unlock(task);
1112 return(KERN_FAILURE);
1113 }
1114 if (task->user_stop_count > 0) {
1115 if (--(task->user_stop_count) == 0)
1116 release = TRUE;
1117 }
1118 else {
1119 task_unlock(task);
1120 return(KERN_FAILURE);
1121 }
1122
1123 /*
1124 * Release the task if necessary.
1125 */
1126 if (release)
1127 task_release_locked(task);
1128
1129 task_unlock(task);
1130 return(KERN_SUCCESS);
1131 }
1132
1133 kern_return_t
1134 host_security_set_task_token(
1135 host_security_t host_security,
1136 task_t task,
1137 security_token_t sec_token,
1138 audit_token_t audit_token,
1139 host_priv_t host_priv)
1140 {
1141 ipc_port_t host_port;
1142 kern_return_t kr;
1143
1144 if (task == TASK_NULL)
1145 return(KERN_INVALID_ARGUMENT);
1146
1147 if (host_security == HOST_NULL)
1148 return(KERN_INVALID_SECURITY);
1149
1150 task_lock(task);
1151 task->sec_token = sec_token;
1152 task->audit_token = audit_token;
1153 task_unlock(task);
1154
1155 if (host_priv != HOST_PRIV_NULL) {
1156 kr = host_get_host_priv_port(host_priv, &host_port);
1157 } else {
1158 kr = host_get_host_port(host_priv_self(), &host_port);
1159 }
1160 assert(kr == KERN_SUCCESS);
1161 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1162 return(kr);
1163 }
1164
1165 /*
1166 * Utility routine to set a ledger
1167 */
1168 kern_return_t
1169 task_set_ledger(
1170 task_t task,
1171 ledger_t wired,
1172 ledger_t paged)
1173 {
1174 if (task == TASK_NULL)
1175 return(KERN_INVALID_ARGUMENT);
1176
1177 task_lock(task);
1178 if (wired) {
1179 ipc_port_release_send(task->wired_ledger_port);
1180 task->wired_ledger_port = ledger_copy(wired);
1181 }
1182 if (paged) {
1183 ipc_port_release_send(task->paged_ledger_port);
1184 task->paged_ledger_port = ledger_copy(paged);
1185 }
1186 task_unlock(task);
1187
1188 return(KERN_SUCCESS);
1189 }
1190
1191 /*
1192 * This routine was added, pretty much exclusively, for registering the
1193 * RPC glue vector for in-kernel short circuited tasks. Rather than
1194 * removing it completely, I have only disabled that feature (which was
1195 * the only feature at the time). It just appears that we are going to
1196 * want to add some user data to tasks in the future (i.e. bsd info,
1197 * task names, etc...), so I left it in the formal task interface.
1198 */
1199 kern_return_t
1200 task_set_info(
1201 task_t task,
1202 task_flavor_t flavor,
1203 task_info_t task_info_in, /* pointer to IN array */
1204 mach_msg_type_number_t task_info_count)
1205 {
1206 vm_map_t map;
1207
1208 if (task == TASK_NULL)
1209 return(KERN_INVALID_ARGUMENT);
1210
1211 switch (flavor) {
1212 default:
1213 return (KERN_INVALID_ARGUMENT);
1214 }
1215 return (KERN_SUCCESS);
1216 }
1217
1218 kern_return_t
1219 task_info(
1220 task_t task,
1221 task_flavor_t flavor,
1222 task_info_t task_info_out,
1223 mach_msg_type_number_t *task_info_count)
1224 {
1225 thread_t thread;
1226 vm_map_t map;
1227
1228 if (task == TASK_NULL)
1229 return(KERN_INVALID_ARGUMENT);
1230
1231 switch (flavor) {
1232
1233 case TASK_BASIC_INFO:
1234 {
1235 register task_basic_info_t basic_info;
1236
1237 if (*task_info_count < TASK_BASIC_INFO_COUNT) {
1238 return(KERN_INVALID_ARGUMENT);
1239 }
1240
1241 basic_info = (task_basic_info_t) task_info_out;
1242
1243 map = (task == kernel_task) ? kernel_map : task->map;
1244
1245 basic_info->virtual_size = map->size;
1246 basic_info->resident_size = pmap_resident_count(map->pmap)
1247 * PAGE_SIZE;
1248
1249 task_lock(task);
1250 basic_info->policy = ((task != kernel_task)?
1251 POLICY_TIMESHARE: POLICY_RR);
1252 basic_info->suspend_count = task->user_stop_count;
1253 basic_info->user_time.seconds
1254 = task->total_user_time.seconds;
1255 basic_info->user_time.microseconds
1256 = task->total_user_time.microseconds;
1257 basic_info->system_time.seconds
1258 = task->total_system_time.seconds;
1259 basic_info->system_time.microseconds
1260 = task->total_system_time.microseconds;
1261 task_unlock(task);
1262
1263 *task_info_count = TASK_BASIC_INFO_COUNT;
1264 break;
1265 }
1266
1267 case TASK_THREAD_TIMES_INFO:
1268 {
1269 register task_thread_times_info_t times_info;
1270 register thread_t thread;
1271 register thread_act_t thr_act;
1272
1273 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1274 return (KERN_INVALID_ARGUMENT);
1275 }
1276
1277 times_info = (task_thread_times_info_t) task_info_out;
1278 times_info->user_time.seconds = 0;
1279 times_info->user_time.microseconds = 0;
1280 times_info->system_time.seconds = 0;
1281 times_info->system_time.microseconds = 0;
1282
1283 task_lock(task);
1284 queue_iterate(&task->threads, thr_act,
1285 thread_act_t, task_threads)
1286 {
1287 time_value_t user_time, system_time;
1288 spl_t s;
1289
1290 thread = act_lock_thread(thr_act);
1291
1292 /* JMM - add logic to skip threads that have migrated
1293 * into this task?
1294 */
1295
1296 assert(thread); /* Must have thread */
1297 s = splsched();
1298 thread_lock(thread);
1299
1300 thread_read_times(thread, &user_time, &system_time);
1301
1302 thread_unlock(thread);
1303 splx(s);
1304 act_unlock_thread(thr_act);
1305
1306 time_value_add(&times_info->user_time, &user_time);
1307 time_value_add(&times_info->system_time, &system_time);
1308 }
1309 task_unlock(task);
1310
1311 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1312 break;
1313 }
1314
1315 case TASK_SCHED_FIFO_INFO:
1316 {
1317
1318 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1319 return(KERN_INVALID_ARGUMENT);
1320
1321 return(KERN_INVALID_POLICY);
1322 }
1323
1324 case TASK_SCHED_RR_INFO:
1325 {
1326 register policy_rr_base_t rr_base;
1327
1328 if (*task_info_count < POLICY_RR_BASE_COUNT)
1329 return(KERN_INVALID_ARGUMENT);
1330
1331 rr_base = (policy_rr_base_t) task_info_out;
1332
1333 task_lock(task);
1334 if (task != kernel_task) {
1335 task_unlock(task);
1336 return(KERN_INVALID_POLICY);
1337 }
1338
1339 rr_base->base_priority = task->priority;
1340 task_unlock(task);
1341
1342 rr_base->quantum = tick / 1000;
1343
1344 *task_info_count = POLICY_RR_BASE_COUNT;
1345 break;
1346 }
1347
1348 case TASK_SCHED_TIMESHARE_INFO:
1349 {
1350 register policy_timeshare_base_t ts_base;
1351
1352 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1353 return(KERN_INVALID_ARGUMENT);
1354
1355 ts_base = (policy_timeshare_base_t) task_info_out;
1356
1357 task_lock(task);
1358 if (task == kernel_task) {
1359 task_unlock(task);
1360 return(KERN_INVALID_POLICY);
1361 }
1362
1363 ts_base->base_priority = task->priority;
1364 task_unlock(task);
1365
1366 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1367 break;
1368 }
1369
1370 case TASK_SECURITY_TOKEN:
1371 {
1372 register security_token_t *sec_token_p;
1373
1374 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1375 return(KERN_INVALID_ARGUMENT);
1376 }
1377
1378 sec_token_p = (security_token_t *) task_info_out;
1379
1380 task_lock(task);
1381 *sec_token_p = task->sec_token;
1382 task_unlock(task);
1383
1384 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1385 break;
1386 }
1387
1388 case TASK_AUDIT_TOKEN:
1389 {
1390 register audit_token_t *audit_token_p;
1391
1392 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
1393 return(KERN_INVALID_ARGUMENT);
1394 }
1395
1396 audit_token_p = (audit_token_t *) task_info_out;
1397
1398 task_lock(task);
1399 *audit_token_p = task->audit_token;
1400 task_unlock(task);
1401
1402 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1403 break;
1404 }
1405
1406 case TASK_SCHED_INFO:
1407 return(KERN_INVALID_ARGUMENT);
1408
1409 case TASK_EVENTS_INFO:
1410 {
1411 register task_events_info_t events_info;
1412
1413 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1414 return(KERN_INVALID_ARGUMENT);
1415 }
1416
1417 events_info = (task_events_info_t) task_info_out;
1418
1419 task_lock(task);
1420 events_info->faults = task->faults;
1421 events_info->pageins = task->pageins;
1422 events_info->cow_faults = task->cow_faults;
1423 events_info->messages_sent = task->messages_sent;
1424 events_info->messages_received = task->messages_received;
1425 events_info->syscalls_mach = task->syscalls_mach;
1426 events_info->syscalls_unix = task->syscalls_unix;
1427 events_info->csw = task->csw;
1428 task_unlock(task);
1429
1430 *task_info_count = TASK_EVENTS_INFO_COUNT;
1431 break;
1432 }
1433
1434 default:
1435 return (KERN_INVALID_ARGUMENT);
1436 }
1437
1438 return(KERN_SUCCESS);
1439 }
1440
1441 /*
1442 * task_assign:
1443 *
1444 * Change the assigned processor set for the task
1445 */
1446 kern_return_t
1447 task_assign(
1448 task_t task,
1449 processor_set_t new_pset,
1450 boolean_t assign_threads)
1451 {
1452 #ifdef lint
1453 task++; new_pset++; assign_threads++;
1454 #endif /* lint */
1455 return(KERN_FAILURE);
1456 }
1457
1458 /*
1459 * task_assign_default:
1460 *
1461 * Version of task_assign to assign to default processor set.
1462 */
1463 kern_return_t
1464 task_assign_default(
1465 task_t task,
1466 boolean_t assign_threads)
1467 {
1468 return (task_assign(task, &default_pset, assign_threads));
1469 }
1470
1471 /*
1472 * task_get_assignment
1473 *
1474 * Return name of processor set that task is assigned to.
1475 */
1476 kern_return_t
1477 task_get_assignment(
1478 task_t task,
1479 processor_set_t *pset)
1480 {
1481 if (!task->active)
1482 return(KERN_FAILURE);
1483
1484 *pset = task->processor_set;
1485 pset_reference(*pset);
1486 return(KERN_SUCCESS);
1487 }
1488
1489
1490 /*
1491 * task_policy
1492 *
1493 * Set scheduling policy and parameters, both base and limit, for
1494 * the given task. Policy must be a policy which is enabled for the
1495 * processor set. Change contained threads if requested.
1496 */
1497 kern_return_t
1498 task_policy(
1499 task_t task,
1500 policy_t policy_id,
1501 policy_base_t base,
1502 mach_msg_type_number_t count,
1503 boolean_t set_limit,
1504 boolean_t change)
1505 {
1506 return(KERN_FAILURE);
1507 }
1508
1509 /*
1510 * task_set_policy
1511 *
1512 * Set scheduling policy and parameters, both base and limit, for
1513 * the given task. Policy can be any policy implemented by the
1514 * processor set, whether enabled or not. Change contained threads
1515 * if requested.
1516 */
1517 kern_return_t
1518 task_set_policy(
1519 task_t task,
1520 processor_set_t pset,
1521 policy_t policy_id,
1522 policy_base_t base,
1523 mach_msg_type_number_t base_count,
1524 policy_limit_t limit,
1525 mach_msg_type_number_t limit_count,
1526 boolean_t change)
1527 {
1528 return(KERN_FAILURE);
1529 }
1530
1531 /*
1532 * task_collect_scan:
1533 *
1534 * Attempt to free resources owned by tasks.
1535 */
1536
1537 void
1538 task_collect_scan(void)
1539 {
1540 register task_t task, prev_task;
1541 processor_set_t pset = &default_pset;
1542
1543 pset_lock(pset);
1544 pset->ref_count++;
1545 task = (task_t) queue_first(&pset->tasks);
1546 while (!queue_end(&pset->tasks, (queue_entry_t) task)) {
1547 task_lock(task);
1548 if (task->ref_count > 0) {
1549
1550 task_reference_locked(task);
1551 task_unlock(task);
1552
1553 #if MACH_HOST
1554 /*
1555 * While we still have the pset locked, freeze the task in
1556 * this pset. That way, when we get back from collecting
1557 * it, we can dereference the pset_tasks chain for the task
1558 * and be assured that we are still in this chain.
1559 */
1560 task_freeze(task);
1561 #endif
1562
1563 pset_unlock(pset);
1564
1565 pmap_collect(task->map->pmap);
1566
1567 pset_lock(pset);
1568 prev_task = task;
1569 task = (task_t) queue_next(&task->pset_tasks);
1570
1571 #if MACH_HOST
1572 task_unfreeze(prev_task);
1573 #endif
1574
1575 task_deallocate(prev_task);
1576 } else {
1577 task_unlock(task);
1578 task = (task_t) queue_next(&task->pset_tasks);
1579 }
1580 }
1581
1582 pset_unlock(pset);
1583
1584 pset_deallocate(pset);
1585 }
1586
1587 /* Also disabled in vm/vm_pageout.c */
1588 boolean_t task_collect_allowed = FALSE;
1589 unsigned task_collect_last_tick = 0;
1590 unsigned task_collect_max_rate = 0; /* in ticks */
1591
1592 /*
1593 * consider_task_collect:
1594 *
1595 * Called by the pageout daemon when the system needs more free pages.
1596 */
1597
1598 void
1599 consider_task_collect(void)
1600 {
1601 /*
1602 * By default, don't attempt task collection more frequently
1603 * than once per second.
1604 */
1605
1606 if (task_collect_max_rate == 0)
1607 task_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1;
1608
1609 if (task_collect_allowed &&
1610 (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
1611 task_collect_last_tick = sched_tick;
1612 task_collect_scan();
1613 }
1614 }
1615
1616 kern_return_t
1617 task_set_ras_pc(
1618 task_t task,
1619 vm_offset_t pc,
1620 vm_offset_t endpc)
1621 {
1622 #if FAST_TAS
1623 extern int fast_tas_debug;
1624
1625 if (fast_tas_debug) {
1626 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1627 task, pc, endpc);
1628 }
1629 task_lock(task);
1630 task->fast_tas_base = pc;
1631 task->fast_tas_end = endpc;
1632 task_unlock(task);
1633 return KERN_SUCCESS;
1634
1635 #else /* FAST_TAS */
1636 #ifdef lint
1637 task++;
1638 pc++;
1639 endpc++;
1640 #endif /* lint */
1641
1642 return KERN_FAILURE;
1643
1644 #endif /* FAST_TAS */
1645 }
1646
1647 void
1648 task_synchronizer_destroy_all(task_t task)
1649 {
1650 semaphore_t semaphore;
1651 lock_set_t lock_set;
1652
1653 /*
1654 * Destroy owned semaphores
1655 */
1656
1657 while (!queue_empty(&task->semaphore_list)) {
1658 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1659 (void) semaphore_destroy(task, semaphore);
1660 }
1661
1662 /*
1663 * Destroy owned lock sets
1664 */
1665
1666 while (!queue_empty(&task->lock_set_list)) {
1667 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1668 (void) lock_set_destroy(task, lock_set);
1669 }
1670 }
1671
1672 /*
1673 * task_set_port_space:
1674 *
1675 * Set port name space of task to specified size.
1676 */
1677
1678 kern_return_t
1679 task_set_port_space(
1680 task_t task,
1681 int table_entries)
1682 {
1683 kern_return_t kr;
1684
1685 is_write_lock(task->itk_space);
1686 kr = ipc_entry_grow_table(task->itk_space, table_entries);
1687 if (kr == KERN_SUCCESS)
1688 is_write_unlock(task->itk_space);
1689 return kr;
1690 }
1691
1692 /*
1693 * Routine:
1694 * task_is_classic
1695 * Purpose:
1696 * Returns true if the task is a P_CLASSIC task.
1697 */
1698 boolean_t
1699 task_is_classic(
1700 task_t task)
1701 {
1702 boolean_t result = FALSE;
1703
1704 if (task) {
1705 struct proc *p = get_bsdtask_info(task);
1706 result = proc_is_classic(p) ? TRUE : FALSE;
1707 }
1708 return result;
1709 }
1710
1711 /*
1712 * We need to export some functions to other components that
1713 * are currently implemented in macros within the osfmk
1714 * component. Just export them as functions of the same name.
1715 */
1716 boolean_t is_kerneltask(task_t t)
1717 {
1718 if (t == kernel_task)
1719 return (TRUE);
1720
1721 return (FALSE);
1722 }
1723
1724 #undef current_task
1725 task_t current_task()
1726 {
1727 return (current_task_fast());
1728 }