]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
xnu-344.34.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 * File: kern/task.c
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
53 * David Black
54 *
55 * Task management primitives implementation.
56 */
57 /*
58 * Copyright (c) 1993 The University of Utah and
59 * the Computer Systems Laboratory (CSL). All rights reserved.
60 *
61 * Permission to use, copy, modify and distribute this software and its
62 * documentation is hereby granted, provided that both the copyright
63 * notice and this permission notice appear in all copies of the
64 * software, derivative works or modified versions, and any portions
65 * thereof, and that both notices appear in supporting documentation.
66 *
67 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
68 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
69 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
70 *
71 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
72 * improvements that they make and grant CSL redistribution rights.
73 *
74 */
75
76 #include <mach_kdb.h>
77 #include <mach_host.h>
78 #include <mach_prof.h>
79 #include <fast_tas.h>
80 #include <task_swapper.h>
81 #include <platforms.h>
82
83 #include <mach/boolean.h>
84 #include <mach/machine/vm_types.h>
85 #include <mach/vm_param.h>
86 #include <mach/semaphore.h>
87 #include <mach/task_info.h>
88 #include <mach/task_special_ports.h>
89 #include <mach/mach_types.h>
90 #include <ipc/ipc_space.h>
91 #include <ipc/ipc_entry.h>
92 #include <kern/mach_param.h>
93 #include <kern/misc_protos.h>
94 #include <kern/task.h>
95 #include <kern/thread.h>
96 #include <kern/zalloc.h>
97 #include <kern/kalloc.h>
98 #include <kern/processor.h>
99 #include <kern/sched_prim.h> /* for thread_wakeup */
100 #include <kern/ipc_tt.h>
101 #include <kern/ledger.h>
102 #include <kern/host.h>
103 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
104 #include <kern/profile.h>
105 #include <kern/assert.h>
106 #include <kern/sync_lock.h>
107 #if MACH_KDB
108 #include <ddb/db_sym.h>
109 #endif /* MACH_KDB */
110
111 #if TASK_SWAPPER
112 #include <kern/task_swap.h>
113 #endif /* TASK_SWAPPER */
114
115 /*
116 * Exported interfaces
117 */
118
119 #include <mach/task_server.h>
120 #include <mach/mach_host_server.h>
121 #include <mach/host_security_server.h>
122 #include <vm/task_working_set.h>
123
124 task_t kernel_task;
125 zone_t task_zone;
126
127 /* Forwards */
128
129 void task_hold_locked(
130 task_t task);
131 void task_wait_locked(
132 task_t task);
133 void task_release_locked(
134 task_t task);
135 void task_collect_scan(void);
136 void task_free(
137 task_t task );
138 void task_synchronizer_destroy_all(
139 task_t task);
140
141 kern_return_t task_set_ledger(
142 task_t task,
143 ledger_t wired,
144 ledger_t paged);
145
146 void
147 task_init(void)
148 {
149 task_zone = zinit(
150 sizeof(struct task),
151 TASK_MAX * sizeof(struct task),
152 TASK_CHUNK * sizeof(struct task),
153 "tasks");
154
155 eml_init();
156
157 /*
158 * Create the kernel task as the first task.
159 * Task_create_local must assign to kernel_task as a side effect,
160 * for other initialization. (:-()
161 */
162 if (task_create_local(
163 TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
164 panic("task_init\n");
165 vm_map_deallocate(kernel_task->map);
166 kernel_task->map = kernel_map;
167
168 #if MACH_ASSERT
169 if (watchacts & WA_TASK)
170 printf("task_init: kernel_task = %x map=%x\n",
171 kernel_task, kernel_map);
172 #endif /* MACH_ASSERT */
173 }
174
175 #if MACH_HOST
176
177 #if 0
178 static void
179 task_freeze(
180 task_t task)
181 {
182 task_lock(task);
183 /*
184 * If may_assign is false, task is already being assigned,
185 * wait for that to finish.
186 */
187 while (task->may_assign == FALSE) {
188 wait_result_t res;
189
190 task->assign_active = TRUE;
191 res = thread_sleep_mutex((event_t) &task->assign_active,
192 &task->lock, THREAD_UNINT);
193 assert(res == THREAD_AWAKENED);
194 }
195 task->may_assign = FALSE;
196 task_unlock(task);
197 return;
198 }
199 #else
200 #define thread_freeze(thread) assert(task->processor_set == &default_pset)
201 #endif
202
203 #if 0
204 static void
205 task_unfreeze(
206 task_t task)
207 {
208 task_lock(task);
209 assert(task->may_assign == FALSE);
210 task->may_assign = TRUE;
211 if (task->assign_active == TRUE) {
212 task->assign_active = FALSE;
213 thread_wakeup((event_t)&task->assign_active);
214 }
215 task_unlock(task);
216 return;
217 }
218 #else
219 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
220 #endif
221
222 #endif /* MACH_HOST */
223
224 /*
225 * Create a task running in the kernel address space. It may
226 * have its own map of size mem_size and may have ipc privileges.
227 */
228 kern_return_t
229 kernel_task_create(
230 task_t parent_task,
231 vm_offset_t map_base,
232 vm_size_t map_size,
233 task_t *child_task)
234 {
235 kern_return_t result;
236 task_t new_task;
237 vm_map_t old_map;
238
239 /*
240 * Create the task.
241 */
242 result = task_create_local(parent_task, FALSE, TRUE, &new_task);
243 if (result != KERN_SUCCESS)
244 return (result);
245
246 /*
247 * Task_create_local creates the task with a user-space map.
248 * We attempt to replace the map and free it afterwards; else
249 * task_deallocate will free it (can NOT set map to null before
250 * task_deallocate, this impersonates a norma placeholder task).
251 * _Mark the memory as pageable_ -- this is what we
252 * want for images (like servers) loaded into the kernel.
253 */
254 if (map_size == 0) {
255 vm_map_deallocate(new_task->map);
256 new_task->map = kernel_map;
257 *child_task = new_task;
258 } else {
259 old_map = new_task->map;
260 if ((result = kmem_suballoc(kernel_map, &map_base,
261 map_size, TRUE, FALSE,
262 &new_task->map)) != KERN_SUCCESS) {
263 /*
264 * New task created with ref count of 2 -- decrement by
265 * one to force task deletion.
266 */
267 printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n",
268 kernel_map, map_base, map_size);
269 --new_task->ref_count;
270 task_deallocate(new_task);
271 return (result);
272 }
273 vm_map_deallocate(old_map);
274 *child_task = new_task;
275 }
276 return (KERN_SUCCESS);
277 }
278
279 kern_return_t
280 task_create(
281 task_t parent_task,
282 ledger_port_array_t ledger_ports,
283 mach_msg_type_number_t num_ledger_ports,
284 boolean_t inherit_memory,
285 task_t *child_task) /* OUT */
286 {
287 if (parent_task == TASK_NULL)
288 return(KERN_INVALID_ARGUMENT);
289
290 return task_create_local(
291 parent_task, inherit_memory, FALSE, child_task);
292 }
293
294 kern_return_t
295 host_security_create_task_token(
296 host_security_t host_security,
297 task_t parent_task,
298 security_token_t sec_token,
299 host_priv_t host_priv,
300 ledger_port_array_t ledger_ports,
301 mach_msg_type_number_t num_ledger_ports,
302 boolean_t inherit_memory,
303 task_t *child_task) /* OUT */
304 {
305 kern_return_t result;
306
307 if (parent_task == TASK_NULL)
308 return(KERN_INVALID_ARGUMENT);
309
310 if (host_security == HOST_NULL)
311 return(KERN_INVALID_SECURITY);
312
313 result = task_create_local(
314 parent_task, inherit_memory, FALSE, child_task);
315
316 if (result != KERN_SUCCESS)
317 return(result);
318
319 result = host_security_set_task_token(host_security,
320 *child_task,
321 sec_token,
322 host_priv);
323
324 if (result != KERN_SUCCESS)
325 return(result);
326
327 return(result);
328 }
329
330 kern_return_t
331 task_create_local(
332 task_t parent_task,
333 boolean_t inherit_memory,
334 boolean_t kernel_loaded,
335 task_t *child_task) /* OUT */
336 {
337 task_t new_task;
338 processor_set_t pset;
339
340 new_task = (task_t) zalloc(task_zone);
341
342 if (new_task == TASK_NULL)
343 return(KERN_RESOURCE_SHORTAGE);
344
345 /* one ref for just being alive; one for our caller */
346 new_task->ref_count = 2;
347
348 if (inherit_memory)
349 new_task->map = vm_map_fork(parent_task->map);
350 else
351 new_task->map = vm_map_create(pmap_create(0),
352 round_page(VM_MIN_ADDRESS),
353 trunc_page(VM_MAX_ADDRESS), TRUE);
354
355 mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW);
356 queue_init(&new_task->thr_acts);
357 new_task->suspend_count = 0;
358 new_task->thr_act_count = 0;
359 new_task->res_act_count = 0;
360 new_task->active_act_count = 0;
361 new_task->user_stop_count = 0;
362 new_task->role = TASK_UNSPECIFIED;
363 new_task->active = TRUE;
364 new_task->kernel_loaded = kernel_loaded;
365 new_task->user_data = 0;
366 new_task->faults = 0;
367 new_task->cow_faults = 0;
368 new_task->pageins = 0;
369 new_task->messages_sent = 0;
370 new_task->messages_received = 0;
371 new_task->syscalls_mach = 0;
372 new_task->syscalls_unix=0;
373 new_task->csw=0;
374 new_task->dynamic_working_set = 0;
375
376 task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT,
377 0, TWS_HASH_STYLE_DEFAULT);
378
379 #ifdef MACH_BSD
380 new_task->bsd_info = 0;
381 #endif /* MACH_BSD */
382
383 #if TASK_SWAPPER
384 new_task->swap_state = TASK_SW_IN;
385 new_task->swap_flags = 0;
386 new_task->swap_ast_waiting = 0;
387 new_task->swap_stamp = sched_tick;
388 new_task->swap_rss = 0;
389 new_task->swap_nswap = 0;
390 #endif /* TASK_SWAPPER */
391
392 queue_init(&new_task->semaphore_list);
393 queue_init(&new_task->lock_set_list);
394 new_task->semaphores_owned = 0;
395 new_task->lock_sets_owned = 0;
396
397 #if MACH_HOST
398 new_task->may_assign = TRUE;
399 new_task->assign_active = FALSE;
400 #endif /* MACH_HOST */
401 eml_task_reference(new_task, parent_task);
402
403 ipc_task_init(new_task, parent_task);
404
405 new_task->total_user_time.seconds = 0;
406 new_task->total_user_time.microseconds = 0;
407 new_task->total_system_time.seconds = 0;
408 new_task->total_system_time.microseconds = 0;
409
410 task_prof_init(new_task);
411
412 if (parent_task != TASK_NULL) {
413 #if MACH_HOST
414 /*
415 * Freeze the parent, so that parent_task->processor_set
416 * cannot change.
417 */
418 task_freeze(parent_task);
419 #endif /* MACH_HOST */
420 pset = parent_task->processor_set;
421 if (!pset->active)
422 pset = &default_pset;
423
424 new_task->sec_token = parent_task->sec_token;
425
426 shared_region_mapping_ref(parent_task->system_shared_region);
427 new_task->system_shared_region = parent_task->system_shared_region;
428
429 new_task->wired_ledger_port = ledger_copy(
430 convert_port_to_ledger(parent_task->wired_ledger_port));
431 new_task->paged_ledger_port = ledger_copy(
432 convert_port_to_ledger(parent_task->paged_ledger_port));
433 }
434 else {
435 pset = &default_pset;
436
437 new_task->sec_token = KERNEL_SECURITY_TOKEN;
438 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
439 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
440 }
441
442 if (kernel_task == TASK_NULL) {
443 new_task->priority = MINPRI_KERNEL;
444 new_task->max_priority = MAXPRI_KERNEL;
445 }
446 else {
447 new_task->priority = BASEPRI_DEFAULT;
448 new_task->max_priority = MAXPRI_USER;
449 }
450
451 pset_lock(pset);
452 pset_add_task(pset, new_task);
453 pset_unlock(pset);
454 #if MACH_HOST
455 if (parent_task != TASK_NULL)
456 task_unfreeze(parent_task);
457 #endif /* MACH_HOST */
458
459 #if FAST_TAS
460 if (inherit_memory) {
461 new_task->fast_tas_base = parent_task->fast_tas_base;
462 new_task->fast_tas_end = parent_task->fast_tas_end;
463 } else {
464 new_task->fast_tas_base = (vm_offset_t)0;
465 new_task->fast_tas_end = (vm_offset_t)0;
466 }
467 #endif /* FAST_TAS */
468
469 ipc_task_enable(new_task);
470
471 #if TASK_SWAPPER
472 task_swapout_eligible(new_task);
473 #endif /* TASK_SWAPPER */
474
475 #if MACH_ASSERT
476 if (watchacts & WA_TASK)
477 printf("*** task_create_local(par=%x inh=%x) == 0x%x\n",
478 parent_task, inherit_memory, new_task);
479 #endif /* MACH_ASSERT */
480
481 *child_task = new_task;
482 return(KERN_SUCCESS);
483 }
484
485 /*
486 * task_deallocate
487 *
488 * Drop a reference on a task
489 * Task is locked.
490 */
491 void
492 task_deallocate(
493 task_t task)
494 {
495 processor_set_t pset;
496 int refs;
497
498 if (task == TASK_NULL)
499 return;
500
501 task_lock(task);
502 refs = --task->ref_count;
503 task_unlock(task);
504
505 if (refs > 0)
506 return;
507
508 #if TASK_SWAPPER
509 /* task_terminate guarantees that this task is off the list */
510 assert((task->swap_state & TASK_SW_ELIGIBLE) == 0);
511 #endif /* TASK_SWAPPER */
512
513 if(task->dynamic_working_set)
514 tws_hash_destroy((tws_hash_t)task->dynamic_working_set);
515
516
517 eml_task_deallocate(task);
518
519 ipc_task_terminate(task);
520
521 #if MACH_HOST
522 task_freeze(task);
523 #endif
524
525 pset = task->processor_set;
526 pset_lock(pset);
527 pset_remove_task(pset,task);
528 pset_unlock(pset);
529 pset_deallocate(pset);
530
531 #if MACH_HOST
532 task_unfreeze(task);
533 #endif
534
535 if (task->kernel_loaded)
536 vm_map_remove(kernel_map, task->map->min_offset,
537 task->map->max_offset, VM_MAP_NO_FLAGS);
538 vm_map_deallocate(task->map);
539 is_release(task->itk_space);
540 task_prof_deallocate(task);
541 zfree(task_zone, (vm_offset_t) task);
542 }
543
544
545 void
546 task_reference(
547 task_t task)
548 {
549 if (task != TASK_NULL) {
550 task_lock(task);
551 task->ref_count++;
552 task_unlock(task);
553 }
554 }
555
556 boolean_t
557 task_reference_try(
558 task_t task)
559 {
560 if (task != TASK_NULL) {
561 if (task_lock_try(task)) {
562 task->ref_count++;
563 task_unlock(task);
564 return TRUE;
565 }
566 }
567 return FALSE;
568 }
569
570 /*
571 * task_terminate:
572 *
573 * Terminate the specified task. See comments on thread_terminate
574 * (kern/thread.c) about problems with terminating the "current task."
575 */
576
577 kern_return_t
578 task_terminate(
579 task_t task)
580 {
581 if (task == TASK_NULL)
582 return(KERN_INVALID_ARGUMENT);
583 if (task->bsd_info)
584 return(KERN_FAILURE);
585 return (task_terminate_internal(task));
586 }
587
588 kern_return_t
589 task_terminate_internal(
590 task_t task)
591 {
592 thread_act_t thr_act, cur_thr_act;
593 task_t cur_task;
594 boolean_t interrupt_save;
595
596 assert(task != kernel_task);
597
598 cur_thr_act = current_act();
599 cur_task = cur_thr_act->task;
600
601 #if TASK_SWAPPER
602 /*
603 * If task is not resident (swapped out, or being swapped
604 * out), we want to bring it back in (this can block).
605 * NOTE: The only way that this can happen in the current
606 * system is if the task is swapped while it has a thread
607 * in exit(), and the thread does not hit a clean point
608 * to swap itself before getting here.
609 * Terminating other tasks is another way to this code, but
610 * it is not yet fully supported.
611 * The task_swapin is unconditional. It used to be done
612 * only if the task is not resident. Swapping in a
613 * resident task will prevent it from being swapped out
614 * while it terminates.
615 */
616 task_swapin(task, TRUE); /* TRUE means make it unswappable */
617 #endif /* TASK_SWAPPER */
618
619 /*
620 * Get the task locked and make sure that we are not racing
621 * with someone else trying to terminate us.
622 */
623 if (task == cur_task) {
624 task_lock(task);
625 } else if (task < cur_task) {
626 task_lock(task);
627 task_lock(cur_task);
628 } else {
629 task_lock(cur_task);
630 task_lock(task);
631 }
632
633 if (!task->active || !cur_thr_act->active) {
634 /*
635 * Task or current act is already being terminated.
636 * Just return an error. If we are dying, this will
637 * just get us to our AST special handler and that
638 * will get us to finalize the termination of ourselves.
639 */
640 task_unlock(task);
641 if (cur_task != task)
642 task_unlock(cur_task);
643 return(KERN_FAILURE);
644 }
645 if (cur_task != task)
646 task_unlock(cur_task);
647
648 /*
649 * Make sure the current thread does not get aborted out of
650 * the waits inside these operations.
651 */
652 interrupt_save = thread_interrupt_level(THREAD_UNINT);
653
654 /*
655 * Indicate that we want all the threads to stop executing
656 * at user space by holding the task (we would have held
657 * each thread independently in thread_terminate_internal -
658 * but this way we may be more likely to already find it
659 * held there). Mark the task inactive, and prevent
660 * further task operations via the task port.
661 */
662 task_hold_locked(task);
663 task->active = FALSE;
664 ipc_task_disable(task);
665
666 /*
667 * Terminate each activation in the task.
668 *
669 * Each terminated activation will run it's special handler
670 * when its current kernel context is unwound. That will
671 * clean up most of the thread resources. Then it will be
672 * handed over to the reaper, who will finally remove the
673 * thread from the task list and free the structures.
674 */
675 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
676 thread_terminate_internal(thr_act);
677 }
678
679 /*
680 * Clean up any virtual machine state/resources associated
681 * with the current activation because it may hold wiring
682 * and other references on resources we will be trying to
683 * release below.
684 */
685 if (cur_thr_act->task == task)
686 act_virtual_machine_destroy(cur_thr_act);
687
688 task_unlock(task);
689
690 /*
691 * Destroy all synchronizers owned by the task.
692 */
693 task_synchronizer_destroy_all(task);
694
695 /*
696 * Destroy the IPC space, leaving just a reference for it.
697 */
698 if (!task->kernel_loaded)
699 ipc_space_destroy(task->itk_space);
700
701 /*
702 * If the current thread is a member of the task
703 * being terminated, then the last reference to
704 * the task will not be dropped until the thread
705 * is finally reaped. To avoid incurring the
706 * expense of removing the address space regions
707 * at reap time, we do it explictly here.
708 */
709 (void) vm_map_remove(task->map,
710 task->map->min_offset,
711 task->map->max_offset, VM_MAP_NO_FLAGS);
712
713 shared_region_mapping_dealloc(task->system_shared_region);
714
715 /*
716 * Flush working set here to avoid I/O in reaper thread
717 */
718 if(task->dynamic_working_set)
719 tws_hash_ws_flush((tws_hash_t)
720 task->dynamic_working_set);
721
722 /*
723 * We no longer need to guard against being aborted, so restore
724 * the previous interruptible state.
725 */
726 thread_interrupt_level(interrupt_save);
727
728 /*
729 * Get rid of the task active reference on itself.
730 */
731 task_deallocate(task);
732
733 return(KERN_SUCCESS);
734 }
735
736 /*
737 * task_halt - Shut the current task down (except for the current thread) in
738 * preparation for dramatic changes to the task (probably exec).
739 * We hold the task, terminate all other threads in the task and
740 * wait for them to terminate, clean up the portspace, and when
741 * all done, let the current thread go.
742 */
743 kern_return_t
744 task_halt(
745 task_t task)
746 {
747 thread_act_t thr_act, cur_thr_act;
748 task_t cur_task;
749
750 assert(task != kernel_task);
751
752 cur_thr_act = current_act();
753 cur_task = cur_thr_act->task;
754
755 if (task != cur_task) {
756 return(KERN_INVALID_ARGUMENT);
757 }
758
759 #if TASK_SWAPPER
760 /*
761 * If task is not resident (swapped out, or being swapped
762 * out), we want to bring it back in and make it unswappable.
763 * This can block, so do it early.
764 */
765 task_swapin(task, TRUE); /* TRUE means make it unswappable */
766 #endif /* TASK_SWAPPER */
767
768 task_lock(task);
769
770 if (!task->active || !cur_thr_act->active) {
771 /*
772 * Task or current thread is already being terminated.
773 * Hurry up and return out of the current kernel context
774 * so that we run our AST special handler to terminate
775 * ourselves.
776 */
777 task_unlock(task);
778 return(KERN_FAILURE);
779 }
780
781 if (task->thr_act_count > 1) {
782 /*
783 * Mark all the threads to keep them from starting any more
784 * user-level execution. The thread_terminate_internal code
785 * would do this on a thread by thread basis anyway, but this
786 * gives us a better chance of not having to wait there.
787 */
788 task_hold_locked(task);
789
790 /*
791 * Terminate all the other activations in the task.
792 *
793 * Each terminated activation will run it's special handler
794 * when its current kernel context is unwound. That will
795 * clean up most of the thread resources. Then it will be
796 * handed over to the reaper, who will finally remove the
797 * thread from the task list and free the structures.
798 */
799 queue_iterate(&task->thr_acts, thr_act, thread_act_t,thr_acts) {
800 if (thr_act != cur_thr_act)
801 thread_terminate_internal(thr_act);
802 }
803 task_release_locked(task);
804 }
805
806 /*
807 * If the current thread has any virtual machine state
808 * associated with it, we need to explicitly clean that
809 * up now (because we did not terminate the current act)
810 * before we try to clean up the task VM and port spaces.
811 */
812 act_virtual_machine_destroy(cur_thr_act);
813
814 task_unlock(task);
815
816 /*
817 * Destroy all synchronizers owned by the task.
818 */
819 task_synchronizer_destroy_all(task);
820
821 /*
822 * Destroy the contents of the IPC space, leaving just
823 * a reference for it.
824 */
825 if (!task->kernel_loaded)
826 ipc_space_clean(task->itk_space);
827
828 /*
829 * Clean out the address space, as we are going to be
830 * getting a new one.
831 */
832 (void) vm_map_remove(task->map,
833 task->map->min_offset,
834 task->map->max_offset, VM_MAP_NO_FLAGS);
835
836 return KERN_SUCCESS;
837 }
838
839 /*
840 * task_hold_locked:
841 *
842 * Suspend execution of the specified task.
843 * This is a recursive-style suspension of the task, a count of
844 * suspends is maintained.
845 *
846 * CONDITIONS: the task is locked and active.
847 */
848 void
849 task_hold_locked(
850 register task_t task)
851 {
852 register thread_act_t thr_act;
853
854 assert(task->active);
855
856 if (task->suspend_count++ > 0)
857 return;
858
859 /*
860 * Iterate through all the thread_act's and hold them.
861 */
862 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
863 act_lock_thread(thr_act);
864 thread_hold(thr_act);
865 act_unlock_thread(thr_act);
866 }
867 }
868
869 /*
870 * task_hold:
871 *
872 * Same as the internal routine above, except that is must lock
873 * and verify that the task is active. This differs from task_suspend
874 * in that it places a kernel hold on the task rather than just a
875 * user-level hold. This keeps users from over resuming and setting
876 * it running out from under the kernel.
877 *
878 * CONDITIONS: the caller holds a reference on the task
879 */
880 kern_return_t
881 task_hold(task_t task)
882 {
883 kern_return_t kret;
884
885 if (task == TASK_NULL)
886 return (KERN_INVALID_ARGUMENT);
887 task_lock(task);
888 if (!task->active) {
889 task_unlock(task);
890 return (KERN_FAILURE);
891 }
892 task_hold_locked(task);
893 task_unlock(task);
894
895 return(KERN_SUCCESS);
896 }
897
898 /*
899 * Routine: task_wait_locked
900 * Wait for all threads in task to stop.
901 *
902 * Conditions:
903 * Called with task locked, active, and held.
904 */
905 void
906 task_wait_locked(
907 register task_t task)
908 {
909 register thread_act_t thr_act, cur_thr_act;
910
911 assert(task->active);
912 assert(task->suspend_count > 0);
913
914 cur_thr_act = current_act();
915 /*
916 * Iterate through all the thread's and wait for them to
917 * stop. Do not wait for the current thread if it is within
918 * the task.
919 */
920 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
921 if (thr_act != cur_thr_act) {
922 thread_shuttle_t thr_shuttle;
923
924 thr_shuttle = act_lock_thread(thr_act);
925 thread_wait(thr_shuttle);
926 act_unlock_thread(thr_act);
927 }
928 }
929 }
930
931 /*
932 * task_release_locked:
933 *
934 * Release a kernel hold on a task.
935 *
936 * CONDITIONS: the task is locked and active
937 */
938 void
939 task_release_locked(
940 register task_t task)
941 {
942 register thread_act_t thr_act;
943
944 assert(task->active);
945 assert(task->suspend_count > 0);
946
947 if (--task->suspend_count > 0)
948 return;
949
950 /*
951 * Iterate through all the thread_act's and hold them.
952 * Do not hold the current thread_act if it is within the
953 * task.
954 */
955 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
956 act_lock_thread(thr_act);
957 thread_release(thr_act);
958 act_unlock_thread(thr_act);
959 }
960 }
961
962 /*
963 * task_release:
964 *
965 * Same as the internal routine above, except that it must lock
966 * and verify that the task is active.
967 *
968 * CONDITIONS: The caller holds a reference to the task
969 */
970 kern_return_t
971 task_release(task_t task)
972 {
973 kern_return_t kret;
974
975 if (task == TASK_NULL)
976 return (KERN_INVALID_ARGUMENT);
977 task_lock(task);
978 if (!task->active) {
979 task_unlock(task);
980 return (KERN_FAILURE);
981 }
982 task_release_locked(task);
983 task_unlock(task);
984
985 return(KERN_SUCCESS);
986 }
987
988 kern_return_t
989 task_threads(
990 task_t task,
991 thread_act_array_t *thr_act_list,
992 mach_msg_type_number_t *count)
993 {
994 unsigned int actual; /* this many thr_acts */
995 thread_act_t thr_act;
996 thread_act_t *thr_acts;
997 thread_t thread;
998 int i, j;
999
1000 vm_size_t size, size_needed;
1001 vm_offset_t addr;
1002
1003 if (task == TASK_NULL)
1004 return KERN_INVALID_ARGUMENT;
1005
1006 size = 0; addr = 0;
1007
1008 for (;;) {
1009 task_lock(task);
1010 if (!task->active) {
1011 task_unlock(task);
1012 if (size != 0)
1013 kfree(addr, size);
1014 return KERN_FAILURE;
1015 }
1016
1017 actual = task->thr_act_count;
1018
1019 /* do we have the memory we need? */
1020 size_needed = actual * sizeof(mach_port_t);
1021 if (size_needed <= size)
1022 break;
1023
1024 /* unlock the task and allocate more memory */
1025 task_unlock(task);
1026
1027 if (size != 0)
1028 kfree(addr, size);
1029
1030 assert(size_needed > 0);
1031 size = size_needed;
1032
1033 addr = kalloc(size);
1034 if (addr == 0)
1035 return KERN_RESOURCE_SHORTAGE;
1036 }
1037
1038 /* OK, have memory and the task is locked & active */
1039 thr_acts = (thread_act_t *) addr;
1040
1041 for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->thr_acts);
1042 i < actual;
1043 i++, thr_act = (thread_act_t) queue_next(&thr_act->thr_acts)) {
1044 act_lock(thr_act);
1045 if (thr_act->ref_count > 0) {
1046 act_locked_act_reference(thr_act);
1047 thr_acts[j++] = thr_act;
1048 }
1049 act_unlock(thr_act);
1050 }
1051 assert(queue_end(&task->thr_acts, (queue_entry_t) thr_act));
1052
1053 actual = j;
1054 size_needed = actual * sizeof(mach_port_t);
1055
1056 /* can unlock task now that we've got the thr_act refs */
1057 task_unlock(task);
1058
1059 if (actual == 0) {
1060 /* no thr_acts, so return null pointer and deallocate memory */
1061
1062 *thr_act_list = 0;
1063 *count = 0;
1064
1065 if (size != 0)
1066 kfree(addr, size);
1067 } else {
1068 /* if we allocated too much, must copy */
1069
1070 if (size_needed < size) {
1071 vm_offset_t newaddr;
1072
1073 newaddr = kalloc(size_needed);
1074 if (newaddr == 0) {
1075 for (i = 0; i < actual; i++)
1076 act_deallocate(thr_acts[i]);
1077 kfree(addr, size);
1078 return KERN_RESOURCE_SHORTAGE;
1079 }
1080
1081 bcopy((char *) addr, (char *) newaddr, size_needed);
1082 kfree(addr, size);
1083 thr_acts = (thread_act_t *) newaddr;
1084 }
1085
1086 *thr_act_list = thr_acts;
1087 *count = actual;
1088
1089 /* do the conversion that Mig should handle */
1090
1091 for (i = 0; i < actual; i++)
1092 ((ipc_port_t *) thr_acts)[i] =
1093 convert_act_to_port(thr_acts[i]);
1094 }
1095
1096 return KERN_SUCCESS;
1097 }
1098
1099 /*
1100 * Routine: task_suspend
1101 * Implement a user-level suspension on a task.
1102 *
1103 * Conditions:
1104 * The caller holds a reference to the task
1105 */
1106 kern_return_t
1107 task_suspend(
1108 register task_t task)
1109 {
1110 if (task == TASK_NULL)
1111 return (KERN_INVALID_ARGUMENT);
1112
1113 task_lock(task);
1114 if (!task->active) {
1115 task_unlock(task);
1116 return (KERN_FAILURE);
1117 }
1118 if ((task->user_stop_count)++ > 0) {
1119 /*
1120 * If the stop count was positive, the task is
1121 * already stopped and we can exit.
1122 */
1123 task_unlock(task);
1124 return (KERN_SUCCESS);
1125 }
1126
1127 /*
1128 * Put a kernel-level hold on the threads in the task (all
1129 * user-level task suspensions added together represent a
1130 * single kernel-level hold). We then wait for the threads
1131 * to stop executing user code.
1132 */
1133 task_hold_locked(task);
1134 task_wait_locked(task);
1135 task_unlock(task);
1136 return (KERN_SUCCESS);
1137 }
1138
1139 /*
1140 * Routine: task_resume
1141 * Release a kernel hold on a task.
1142 *
1143 * Conditions:
1144 * The caller holds a reference to the task
1145 */
1146 kern_return_t
1147 task_resume(register task_t task)
1148 {
1149 register boolean_t release;
1150
1151 if (task == TASK_NULL)
1152 return(KERN_INVALID_ARGUMENT);
1153
1154 release = FALSE;
1155 task_lock(task);
1156 if (!task->active) {
1157 task_unlock(task);
1158 return(KERN_FAILURE);
1159 }
1160 if (task->user_stop_count > 0) {
1161 if (--(task->user_stop_count) == 0)
1162 release = TRUE;
1163 }
1164 else {
1165 task_unlock(task);
1166 return(KERN_FAILURE);
1167 }
1168
1169 /*
1170 * Release the task if necessary.
1171 */
1172 if (release)
1173 task_release_locked(task);
1174
1175 task_unlock(task);
1176 return(KERN_SUCCESS);
1177 }
1178
1179 kern_return_t
1180 host_security_set_task_token(
1181 host_security_t host_security,
1182 task_t task,
1183 security_token_t sec_token,
1184 host_priv_t host_priv)
1185 {
1186 kern_return_t kr;
1187
1188 if (task == TASK_NULL)
1189 return(KERN_INVALID_ARGUMENT);
1190
1191 if (host_security == HOST_NULL)
1192 return(KERN_INVALID_SECURITY);
1193
1194 task_lock(task);
1195 task->sec_token = sec_token;
1196 task_unlock(task);
1197
1198 if (host_priv != HOST_PRIV_NULL) {
1199 kr = task_set_special_port(task,
1200 TASK_HOST_PORT,
1201 ipc_port_make_send(realhost.host_priv_self));
1202 } else {
1203 kr = task_set_special_port(task,
1204 TASK_HOST_PORT,
1205 ipc_port_make_send(realhost.host_self));
1206 }
1207 return(kr);
1208 }
1209
1210 /*
1211 * Utility routine to set a ledger
1212 */
1213 kern_return_t
1214 task_set_ledger(
1215 task_t task,
1216 ledger_t wired,
1217 ledger_t paged)
1218 {
1219 if (task == TASK_NULL)
1220 return(KERN_INVALID_ARGUMENT);
1221
1222 task_lock(task);
1223 if (wired) {
1224 ipc_port_release_send(task->wired_ledger_port);
1225 task->wired_ledger_port = ledger_copy(wired);
1226 }
1227 if (paged) {
1228 ipc_port_release_send(task->paged_ledger_port);
1229 task->paged_ledger_port = ledger_copy(paged);
1230 }
1231 task_unlock(task);
1232
1233 return(KERN_SUCCESS);
1234 }
1235
1236 /*
1237 * This routine was added, pretty much exclusively, for registering the
1238 * RPC glue vector for in-kernel short circuited tasks. Rather than
1239 * removing it completely, I have only disabled that feature (which was
1240 * the only feature at the time). It just appears that we are going to
1241 * want to add some user data to tasks in the future (i.e. bsd info,
1242 * task names, etc...), so I left it in the formal task interface.
1243 */
1244 kern_return_t
1245 task_set_info(
1246 task_t task,
1247 task_flavor_t flavor,
1248 task_info_t task_info_in, /* pointer to IN array */
1249 mach_msg_type_number_t task_info_count)
1250 {
1251 vm_map_t map;
1252
1253 if (task == TASK_NULL)
1254 return(KERN_INVALID_ARGUMENT);
1255
1256 switch (flavor) {
1257 default:
1258 return (KERN_INVALID_ARGUMENT);
1259 }
1260 return (KERN_SUCCESS);
1261 }
1262
1263 kern_return_t
1264 task_info(
1265 task_t task,
1266 task_flavor_t flavor,
1267 task_info_t task_info_out,
1268 mach_msg_type_number_t *task_info_count)
1269 {
1270 thread_t thread;
1271 vm_map_t map;
1272
1273 if (task == TASK_NULL)
1274 return(KERN_INVALID_ARGUMENT);
1275
1276 switch (flavor) {
1277
1278 case TASK_BASIC_INFO:
1279 {
1280 register task_basic_info_t basic_info;
1281
1282 if (*task_info_count < TASK_BASIC_INFO_COUNT) {
1283 return(KERN_INVALID_ARGUMENT);
1284 }
1285
1286 basic_info = (task_basic_info_t) task_info_out;
1287
1288 map = (task == kernel_task) ? kernel_map : task->map;
1289
1290 basic_info->virtual_size = map->size;
1291 basic_info->resident_size = pmap_resident_count(map->pmap)
1292 * PAGE_SIZE;
1293
1294 task_lock(task);
1295 basic_info->policy = ((task != kernel_task)?
1296 POLICY_TIMESHARE: POLICY_RR);
1297 basic_info->suspend_count = task->user_stop_count;
1298 basic_info->user_time.seconds
1299 = task->total_user_time.seconds;
1300 basic_info->user_time.microseconds
1301 = task->total_user_time.microseconds;
1302 basic_info->system_time.seconds
1303 = task->total_system_time.seconds;
1304 basic_info->system_time.microseconds
1305 = task->total_system_time.microseconds;
1306 task_unlock(task);
1307
1308 *task_info_count = TASK_BASIC_INFO_COUNT;
1309 break;
1310 }
1311
1312 case TASK_THREAD_TIMES_INFO:
1313 {
1314 register task_thread_times_info_t times_info;
1315 register thread_t thread;
1316 register thread_act_t thr_act;
1317
1318 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1319 return (KERN_INVALID_ARGUMENT);
1320 }
1321
1322 times_info = (task_thread_times_info_t) task_info_out;
1323 times_info->user_time.seconds = 0;
1324 times_info->user_time.microseconds = 0;
1325 times_info->system_time.seconds = 0;
1326 times_info->system_time.microseconds = 0;
1327
1328 task_lock(task);
1329 queue_iterate(&task->thr_acts, thr_act,
1330 thread_act_t, thr_acts)
1331 {
1332 time_value_t user_time, system_time;
1333 spl_t s;
1334
1335 thread = act_lock_thread(thr_act);
1336
1337 /* JMM - add logic to skip threads that have migrated
1338 * into this task?
1339 */
1340
1341 assert(thread); /* Must have thread */
1342 s = splsched();
1343 thread_lock(thread);
1344
1345 thread_read_times(thread, &user_time, &system_time);
1346
1347 thread_unlock(thread);
1348 splx(s);
1349 act_unlock_thread(thr_act);
1350
1351 time_value_add(&times_info->user_time, &user_time);
1352 time_value_add(&times_info->system_time, &system_time);
1353 }
1354 task_unlock(task);
1355
1356 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1357 break;
1358 }
1359
1360 case TASK_SCHED_FIFO_INFO:
1361 {
1362
1363 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1364 return(KERN_INVALID_ARGUMENT);
1365
1366 return(KERN_INVALID_POLICY);
1367 }
1368
1369 case TASK_SCHED_RR_INFO:
1370 {
1371 register policy_rr_base_t rr_base;
1372
1373 if (*task_info_count < POLICY_RR_BASE_COUNT)
1374 return(KERN_INVALID_ARGUMENT);
1375
1376 rr_base = (policy_rr_base_t) task_info_out;
1377
1378 task_lock(task);
1379 if (task != kernel_task) {
1380 task_unlock(task);
1381 return(KERN_INVALID_POLICY);
1382 }
1383
1384 rr_base->base_priority = task->priority;
1385 task_unlock(task);
1386
1387 rr_base->quantum = tick / 1000;
1388
1389 *task_info_count = POLICY_RR_BASE_COUNT;
1390 break;
1391 }
1392
1393 case TASK_SCHED_TIMESHARE_INFO:
1394 {
1395 register policy_timeshare_base_t ts_base;
1396
1397 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1398 return(KERN_INVALID_ARGUMENT);
1399
1400 ts_base = (policy_timeshare_base_t) task_info_out;
1401
1402 task_lock(task);
1403 if (task == kernel_task) {
1404 task_unlock(task);
1405 return(KERN_INVALID_POLICY);
1406 }
1407
1408 ts_base->base_priority = task->priority;
1409 task_unlock(task);
1410
1411 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1412 break;
1413 }
1414
1415 case TASK_SECURITY_TOKEN:
1416 {
1417 register security_token_t *sec_token_p;
1418
1419 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1420 return(KERN_INVALID_ARGUMENT);
1421 }
1422
1423 sec_token_p = (security_token_t *) task_info_out;
1424
1425 task_lock(task);
1426 *sec_token_p = task->sec_token;
1427 task_unlock(task);
1428
1429 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1430 break;
1431 }
1432
1433 case TASK_SCHED_INFO:
1434 return(KERN_INVALID_ARGUMENT);
1435
1436 case TASK_EVENTS_INFO:
1437 {
1438 register task_events_info_t events_info;
1439
1440 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1441 return(KERN_INVALID_ARGUMENT);
1442 }
1443
1444 events_info = (task_events_info_t) task_info_out;
1445
1446 task_lock(task);
1447 events_info->faults = task->faults;
1448 events_info->pageins = task->pageins;
1449 events_info->cow_faults = task->cow_faults;
1450 events_info->messages_sent = task->messages_sent;
1451 events_info->messages_received = task->messages_received;
1452 events_info->syscalls_mach = task->syscalls_mach;
1453 events_info->syscalls_unix = task->syscalls_unix;
1454 events_info->csw = task->csw;
1455 task_unlock(task);
1456
1457 *task_info_count = TASK_EVENTS_INFO_COUNT;
1458 break;
1459 }
1460
1461 default:
1462 return (KERN_INVALID_ARGUMENT);
1463 }
1464
1465 return(KERN_SUCCESS);
1466 }
1467
1468 /*
1469 * task_assign:
1470 *
1471 * Change the assigned processor set for the task
1472 */
1473 kern_return_t
1474 task_assign(
1475 task_t task,
1476 processor_set_t new_pset,
1477 boolean_t assign_threads)
1478 {
1479 #ifdef lint
1480 task++; new_pset++; assign_threads++;
1481 #endif /* lint */
1482 return(KERN_FAILURE);
1483 }
1484
1485 /*
1486 * task_assign_default:
1487 *
1488 * Version of task_assign to assign to default processor set.
1489 */
1490 kern_return_t
1491 task_assign_default(
1492 task_t task,
1493 boolean_t assign_threads)
1494 {
1495 return (task_assign(task, &default_pset, assign_threads));
1496 }
1497
1498 /*
1499 * task_get_assignment
1500 *
1501 * Return name of processor set that task is assigned to.
1502 */
1503 kern_return_t
1504 task_get_assignment(
1505 task_t task,
1506 processor_set_t *pset)
1507 {
1508 if (!task->active)
1509 return(KERN_FAILURE);
1510
1511 *pset = task->processor_set;
1512 pset_reference(*pset);
1513 return(KERN_SUCCESS);
1514 }
1515
1516
1517 /*
1518 * task_policy
1519 *
1520 * Set scheduling policy and parameters, both base and limit, for
1521 * the given task. Policy must be a policy which is enabled for the
1522 * processor set. Change contained threads if requested.
1523 */
1524 kern_return_t
1525 task_policy(
1526 task_t task,
1527 policy_t policy_id,
1528 policy_base_t base,
1529 mach_msg_type_number_t count,
1530 boolean_t set_limit,
1531 boolean_t change)
1532 {
1533 return(KERN_FAILURE);
1534 }
1535
1536 /*
1537 * task_set_policy
1538 *
1539 * Set scheduling policy and parameters, both base and limit, for
1540 * the given task. Policy can be any policy implemented by the
1541 * processor set, whether enabled or not. Change contained threads
1542 * if requested.
1543 */
1544 kern_return_t
1545 task_set_policy(
1546 task_t task,
1547 processor_set_t pset,
1548 policy_t policy_id,
1549 policy_base_t base,
1550 mach_msg_type_number_t base_count,
1551 policy_limit_t limit,
1552 mach_msg_type_number_t limit_count,
1553 boolean_t change)
1554 {
1555 return(KERN_FAILURE);
1556 }
1557
1558 /*
1559 * task_collect_scan:
1560 *
1561 * Attempt to free resources owned by tasks.
1562 */
1563
1564 void
1565 task_collect_scan(void)
1566 {
1567 register task_t task, prev_task;
1568 processor_set_t pset = &default_pset;
1569
1570 pset_lock(pset);
1571 pset->ref_count++;
1572 task = (task_t) queue_first(&pset->tasks);
1573 while (!queue_end(&pset->tasks, (queue_entry_t) task)) {
1574 task_lock(task);
1575 if (task->ref_count > 0) {
1576
1577 task_reference_locked(task);
1578 task_unlock(task);
1579
1580 #if MACH_HOST
1581 /*
1582 * While we still have the pset locked, freeze the task in
1583 * this pset. That way, when we get back from collecting
1584 * it, we can dereference the pset_tasks chain for the task
1585 * and be assured that we are still in this chain.
1586 */
1587 task_freeze(task);
1588 #endif
1589
1590 pset_unlock(pset);
1591
1592 pmap_collect(task->map->pmap);
1593
1594 pset_lock(pset);
1595 prev_task = task;
1596 task = (task_t) queue_next(&task->pset_tasks);
1597
1598 #if MACH_HOST
1599 task_unfreeze(prev_task);
1600 #endif
1601
1602 task_deallocate(prev_task);
1603 } else {
1604 task_unlock(task);
1605 task = (task_t) queue_next(&task->pset_tasks);
1606 }
1607 }
1608
1609 pset_unlock(pset);
1610
1611 pset_deallocate(pset);
1612 }
1613
1614 /* Also disabled in vm/vm_pageout.c */
1615 boolean_t task_collect_allowed = FALSE;
1616 unsigned task_collect_last_tick = 0;
1617 unsigned task_collect_max_rate = 0; /* in ticks */
1618
1619 /*
1620 * consider_task_collect:
1621 *
1622 * Called by the pageout daemon when the system needs more free pages.
1623 */
1624
1625 void
1626 consider_task_collect(void)
1627 {
1628 /*
1629 * By default, don't attempt task collection more frequently
1630 * than once per second.
1631 */
1632
1633 if (task_collect_max_rate == 0)
1634 task_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1;
1635
1636 if (task_collect_allowed &&
1637 (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
1638 task_collect_last_tick = sched_tick;
1639 task_collect_scan();
1640 }
1641 }
1642
1643 kern_return_t
1644 task_set_ras_pc(
1645 task_t task,
1646 vm_offset_t pc,
1647 vm_offset_t endpc)
1648 {
1649 #if FAST_TAS
1650 extern int fast_tas_debug;
1651
1652 if (fast_tas_debug) {
1653 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1654 task, pc, endpc);
1655 }
1656 task_lock(task);
1657 task->fast_tas_base = pc;
1658 task->fast_tas_end = endpc;
1659 task_unlock(task);
1660 return KERN_SUCCESS;
1661
1662 #else /* FAST_TAS */
1663 #ifdef lint
1664 task++;
1665 pc++;
1666 endpc++;
1667 #endif /* lint */
1668
1669 return KERN_FAILURE;
1670
1671 #endif /* FAST_TAS */
1672 }
1673
1674 void
1675 task_synchronizer_destroy_all(task_t task)
1676 {
1677 semaphore_t semaphore;
1678 lock_set_t lock_set;
1679
1680 /*
1681 * Destroy owned semaphores
1682 */
1683
1684 while (!queue_empty(&task->semaphore_list)) {
1685 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1686 (void) semaphore_destroy(task, semaphore);
1687 }
1688
1689 /*
1690 * Destroy owned lock sets
1691 */
1692
1693 while (!queue_empty(&task->lock_set_list)) {
1694 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1695 (void) lock_set_destroy(task, lock_set);
1696 }
1697 }
1698
1699 /*
1700 * task_set_port_space:
1701 *
1702 * Set port name space of task to specified size.
1703 */
1704
1705 kern_return_t
1706 task_set_port_space(
1707 task_t task,
1708 int table_entries)
1709 {
1710 kern_return_t kr;
1711
1712 is_write_lock(task->itk_space);
1713 kr = ipc_entry_grow_table(task->itk_space, table_entries);
1714 if (kr == KERN_SUCCESS)
1715 is_write_unlock(task->itk_space);
1716 return kr;
1717 }
1718
1719 /*
1720 * We need to export some functions to other components that
1721 * are currently implemented in macros within the osfmk
1722 * component. Just export them as functions of the same name.
1723 */
1724 boolean_t is_kerneltask(task_t t)
1725 {
1726 if (t == kernel_task)
1727 return(TRUE);
1728 else
1729 return((t->kernel_loaded));
1730 }
1731
1732 #undef current_task
1733 task_t current_task()
1734 {
1735 return (current_task_fast());
1736 }