]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
ea12d7b399f46b1fdf2cc2f115ff49f9ee2dcbb3
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 * File: kern/task.c
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
53 * David Black
54 *
55 * Task management primitives implementation.
56 */
57 /*
58 * Copyright (c) 1993 The University of Utah and
59 * the Computer Systems Laboratory (CSL). All rights reserved.
60 *
61 * Permission to use, copy, modify and distribute this software and its
62 * documentation is hereby granted, provided that both the copyright
63 * notice and this permission notice appear in all copies of the
64 * software, derivative works or modified versions, and any portions
65 * thereof, and that both notices appear in supporting documentation.
66 *
67 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
68 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
69 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
70 *
71 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
72 * improvements that they make and grant CSL redistribution rights.
73 *
74 */
75
76 #include <mach_kdb.h>
77 #include <mach_host.h>
78 #include <mach_prof.h>
79 #include <fast_tas.h>
80 #include <task_swapper.h>
81 #include <platforms.h>
82
83 #include <mach/boolean.h>
84 #include <mach/machine/vm_types.h>
85 #include <mach/vm_param.h>
86 #include <mach/semaphore.h>
87 #include <mach/task_info.h>
88 #include <mach/task_special_ports.h>
89 #include <mach/mach_types.h>
90 #include <ipc/ipc_space.h>
91 #include <ipc/ipc_entry.h>
92 #include <kern/mach_param.h>
93 #include <kern/misc_protos.h>
94 #include <kern/task.h>
95 #include <kern/thread.h>
96 #include <kern/zalloc.h>
97 #include <kern/kalloc.h>
98 #include <kern/processor.h>
99 #include <kern/sched_prim.h> /* for thread_wakeup */
100 #include <kern/ipc_tt.h>
101 #include <kern/ledger.h>
102 #include <kern/host.h>
103 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
104 #include <kern/profile.h>
105 #include <kern/assert.h>
106 #include <kern/sync_lock.h>
107 #if MACH_KDB
108 #include <ddb/db_sym.h>
109 #endif /* MACH_KDB */
110
111 #if TASK_SWAPPER
112 #include <kern/task_swap.h>
113 #endif /* TASK_SWAPPER */
114
115 /*
116 * Exported interfaces
117 */
118
119 #include <mach/task_server.h>
120 #include <mach/mach_host_server.h>
121 #include <mach/host_security_server.h>
122 #include <vm/task_working_set.h>
123
124 task_t kernel_task;
125 zone_t task_zone;
126
127 /* Forwards */
128
129 void task_hold_locked(
130 task_t task);
131 void task_wait_locked(
132 task_t task);
133 void task_release_locked(
134 task_t task);
135 void task_collect_scan(void);
136 void task_free(
137 task_t task );
138 void task_synchronizer_destroy_all(
139 task_t task);
140
141 kern_return_t task_set_ledger(
142 task_t task,
143 ledger_t wired,
144 ledger_t paged);
145
146 void
147 task_init(void)
148 {
149 task_zone = zinit(
150 sizeof(struct task),
151 TASK_MAX * sizeof(struct task),
152 TASK_CHUNK * sizeof(struct task),
153 "tasks");
154
155 eml_init();
156
157 /*
158 * Create the kernel task as the first task.
159 * Task_create_local must assign to kernel_task as a side effect,
160 * for other initialization. (:-()
161 */
162 if (task_create_local(
163 TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
164 panic("task_init\n");
165 vm_map_deallocate(kernel_task->map);
166 kernel_task->map = kernel_map;
167
168 #if MACH_ASSERT
169 if (watchacts & WA_TASK)
170 printf("task_init: kernel_task = %x map=%x\n",
171 kernel_task, kernel_map);
172 #endif /* MACH_ASSERT */
173 }
174
175 #if MACH_HOST
176
177 #if 0
178 static void
179 task_freeze(
180 task_t task)
181 {
182 task_lock(task);
183 /*
184 * If may_assign is false, task is already being assigned,
185 * wait for that to finish.
186 */
187 while (task->may_assign == FALSE) {
188 wait_result_t res;
189
190 task->assign_active = TRUE;
191 res = thread_sleep_mutex((event_t) &task->assign_active,
192 &task->lock, THREAD_UNINT);
193 assert(res == THREAD_AWAKENED);
194 }
195 task->may_assign = FALSE;
196 task_unlock(task);
197 return;
198 }
199 #else
200 #define thread_freeze(thread) assert(task->processor_set == &default_pset)
201 #endif
202
203 #if 0
204 static void
205 task_unfreeze(
206 task_t task)
207 {
208 task_lock(task);
209 assert(task->may_assign == FALSE);
210 task->may_assign = TRUE;
211 if (task->assign_active == TRUE) {
212 task->assign_active = FALSE;
213 thread_wakeup((event_t)&task->assign_active);
214 }
215 task_unlock(task);
216 return;
217 }
218 #else
219 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
220 #endif
221
222 #endif /* MACH_HOST */
223
224 /*
225 * Create a task running in the kernel address space. It may
226 * have its own map of size mem_size and may have ipc privileges.
227 */
228 kern_return_t
229 kernel_task_create(
230 task_t parent_task,
231 vm_offset_t map_base,
232 vm_size_t map_size,
233 task_t *child_task)
234 {
235 kern_return_t result;
236 task_t new_task;
237 vm_map_t old_map;
238
239 /*
240 * Create the task.
241 */
242 result = task_create_local(parent_task, FALSE, TRUE, &new_task);
243 if (result != KERN_SUCCESS)
244 return (result);
245
246 /*
247 * Task_create_local creates the task with a user-space map.
248 * We attempt to replace the map and free it afterwards; else
249 * task_deallocate will free it (can NOT set map to null before
250 * task_deallocate, this impersonates a norma placeholder task).
251 * _Mark the memory as pageable_ -- this is what we
252 * want for images (like servers) loaded into the kernel.
253 */
254 if (map_size == 0) {
255 vm_map_deallocate(new_task->map);
256 new_task->map = kernel_map;
257 *child_task = new_task;
258 } else {
259 old_map = new_task->map;
260 if ((result = kmem_suballoc(kernel_map, &map_base,
261 map_size, TRUE, FALSE,
262 &new_task->map)) != KERN_SUCCESS) {
263 /*
264 * New task created with ref count of 2 -- decrement by
265 * one to force task deletion.
266 */
267 printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n",
268 kernel_map, map_base, map_size);
269 --new_task->ref_count;
270 task_deallocate(new_task);
271 return (result);
272 }
273 vm_map_deallocate(old_map);
274 *child_task = new_task;
275 }
276 return (KERN_SUCCESS);
277 }
278
279 kern_return_t
280 task_create(
281 task_t parent_task,
282 ledger_port_array_t ledger_ports,
283 mach_msg_type_number_t num_ledger_ports,
284 boolean_t inherit_memory,
285 task_t *child_task) /* OUT */
286 {
287 if (parent_task == TASK_NULL)
288 return(KERN_INVALID_ARGUMENT);
289
290 return task_create_local(
291 parent_task, inherit_memory, FALSE, child_task);
292 }
293
294 kern_return_t
295 host_security_create_task_token(
296 host_security_t host_security,
297 task_t parent_task,
298 security_token_t sec_token,
299 host_priv_t host_priv,
300 ledger_port_array_t ledger_ports,
301 mach_msg_type_number_t num_ledger_ports,
302 boolean_t inherit_memory,
303 task_t *child_task) /* OUT */
304 {
305 kern_return_t result;
306
307 if (parent_task == TASK_NULL)
308 return(KERN_INVALID_ARGUMENT);
309
310 if (host_security == HOST_NULL)
311 return(KERN_INVALID_SECURITY);
312
313 result = task_create_local(
314 parent_task, inherit_memory, FALSE, child_task);
315
316 if (result != KERN_SUCCESS)
317 return(result);
318
319 result = host_security_set_task_token(host_security,
320 *child_task,
321 sec_token,
322 host_priv);
323
324 if (result != KERN_SUCCESS)
325 return(result);
326
327 return(result);
328 }
329
330 kern_return_t
331 task_create_local(
332 task_t parent_task,
333 boolean_t inherit_memory,
334 boolean_t kernel_loaded,
335 task_t *child_task) /* OUT */
336 {
337 task_t new_task;
338 processor_set_t pset;
339
340 new_task = (task_t) zalloc(task_zone);
341
342 if (new_task == TASK_NULL)
343 return(KERN_RESOURCE_SHORTAGE);
344
345 /* one ref for just being alive; one for our caller */
346 new_task->ref_count = 2;
347
348 if (inherit_memory)
349 new_task->map = vm_map_fork(parent_task->map);
350 else
351 new_task->map = vm_map_create(pmap_create(0),
352 round_page(VM_MIN_ADDRESS),
353 trunc_page(VM_MAX_ADDRESS), TRUE);
354
355 mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW);
356 queue_init(&new_task->thr_acts);
357 new_task->suspend_count = 0;
358 new_task->thr_act_count = 0;
359 new_task->res_act_count = 0;
360 new_task->active_act_count = 0;
361 new_task->user_stop_count = 0;
362 new_task->role = TASK_UNSPECIFIED;
363 new_task->active = TRUE;
364 new_task->kernel_loaded = kernel_loaded;
365 new_task->user_data = 0;
366 new_task->faults = 0;
367 new_task->cow_faults = 0;
368 new_task->pageins = 0;
369 new_task->messages_sent = 0;
370 new_task->messages_received = 0;
371 new_task->syscalls_mach = 0;
372 new_task->syscalls_unix=0;
373 new_task->csw=0;
374 new_task->dynamic_working_set = 0;
375
376 task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT,
377 0, TWS_HASH_STYLE_DEFAULT);
378
379 #ifdef MACH_BSD
380 new_task->bsd_info = 0;
381 #endif /* MACH_BSD */
382
383 #if TASK_SWAPPER
384 new_task->swap_state = TASK_SW_IN;
385 new_task->swap_flags = 0;
386 new_task->swap_ast_waiting = 0;
387 new_task->swap_stamp = sched_tick;
388 new_task->swap_rss = 0;
389 new_task->swap_nswap = 0;
390 #endif /* TASK_SWAPPER */
391
392 queue_init(&new_task->semaphore_list);
393 queue_init(&new_task->lock_set_list);
394 new_task->semaphores_owned = 0;
395 new_task->lock_sets_owned = 0;
396
397 #if MACH_HOST
398 new_task->may_assign = TRUE;
399 new_task->assign_active = FALSE;
400 #endif /* MACH_HOST */
401 eml_task_reference(new_task, parent_task);
402
403 ipc_task_init(new_task, parent_task);
404
405 new_task->total_user_time.seconds = 0;
406 new_task->total_user_time.microseconds = 0;
407 new_task->total_system_time.seconds = 0;
408 new_task->total_system_time.microseconds = 0;
409
410 task_prof_init(new_task);
411
412 if (parent_task != TASK_NULL) {
413 #if MACH_HOST
414 /*
415 * Freeze the parent, so that parent_task->processor_set
416 * cannot change.
417 */
418 task_freeze(parent_task);
419 #endif /* MACH_HOST */
420 pset = parent_task->processor_set;
421 if (!pset->active)
422 pset = &default_pset;
423
424 new_task->sec_token = parent_task->sec_token;
425
426 shared_region_mapping_ref(parent_task->system_shared_region);
427 new_task->system_shared_region = parent_task->system_shared_region;
428
429 new_task->wired_ledger_port = ledger_copy(
430 convert_port_to_ledger(parent_task->wired_ledger_port));
431 new_task->paged_ledger_port = ledger_copy(
432 convert_port_to_ledger(parent_task->paged_ledger_port));
433 }
434 else {
435 pset = &default_pset;
436
437 new_task->sec_token = KERNEL_SECURITY_TOKEN;
438 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
439 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
440 }
441
442 if (kernel_task == TASK_NULL) {
443 new_task->priority = MINPRI_KERNEL;
444 new_task->max_priority = MAXPRI_KERNEL;
445 }
446 else {
447 new_task->priority = BASEPRI_DEFAULT;
448 new_task->max_priority = MAXPRI_USER;
449 }
450
451 pset_lock(pset);
452 pset_add_task(pset, new_task);
453 pset_unlock(pset);
454 #if MACH_HOST
455 if (parent_task != TASK_NULL)
456 task_unfreeze(parent_task);
457 #endif /* MACH_HOST */
458
459 #if FAST_TAS
460 if (inherit_memory) {
461 new_task->fast_tas_base = parent_task->fast_tas_base;
462 new_task->fast_tas_end = parent_task->fast_tas_end;
463 } else {
464 new_task->fast_tas_base = (vm_offset_t)0;
465 new_task->fast_tas_end = (vm_offset_t)0;
466 }
467 #endif /* FAST_TAS */
468
469 ipc_task_enable(new_task);
470
471 #if TASK_SWAPPER
472 task_swapout_eligible(new_task);
473 #endif /* TASK_SWAPPER */
474
475 #if MACH_ASSERT
476 if (watchacts & WA_TASK)
477 printf("*** task_create_local(par=%x inh=%x) == 0x%x\n",
478 parent_task, inherit_memory, new_task);
479 #endif /* MACH_ASSERT */
480
481 *child_task = new_task;
482 return(KERN_SUCCESS);
483 }
484
485 /*
486 * task_deallocate
487 *
488 * Drop a reference on a task
489 * Task is locked.
490 */
491 void
492 task_deallocate(
493 task_t task)
494 {
495 processor_set_t pset;
496 int refs;
497
498 if (task == TASK_NULL)
499 return;
500
501 task_lock(task);
502 refs = --task->ref_count;
503 task_unlock(task);
504
505 if (refs > 0)
506 return;
507
508 #if TASK_SWAPPER
509 /* task_terminate guarantees that this task is off the list */
510 assert((task->swap_state & TASK_SW_ELIGIBLE) == 0);
511 #endif /* TASK_SWAPPER */
512
513 eml_task_deallocate(task);
514
515 ipc_task_terminate(task);
516
517 #if MACH_HOST
518 task_freeze(task);
519 #endif
520
521 pset = task->processor_set;
522 pset_lock(pset);
523 pset_remove_task(pset,task);
524 pset_unlock(pset);
525 pset_deallocate(pset);
526
527 #if MACH_HOST
528 task_unfreeze(task);
529 #endif
530
531 if (task->kernel_loaded)
532 vm_map_remove(kernel_map, task->map->min_offset,
533 task->map->max_offset, VM_MAP_NO_FLAGS);
534 vm_map_deallocate(task->map);
535 is_release(task->itk_space);
536 task_prof_deallocate(task);
537 zfree(task_zone, (vm_offset_t) task);
538 }
539
540
541 void
542 task_reference(
543 task_t task)
544 {
545 if (task != TASK_NULL) {
546 task_lock(task);
547 task->ref_count++;
548 task_unlock(task);
549 }
550 }
551
552 boolean_t
553 task_reference_try(
554 task_t task)
555 {
556 if (task != TASK_NULL) {
557 if (task_lock_try(task)) {
558 task->ref_count++;
559 task_unlock(task);
560 return TRUE;
561 }
562 }
563 return FALSE;
564 }
565
566 /*
567 * task_terminate:
568 *
569 * Terminate the specified task. See comments on thread_terminate
570 * (kern/thread.c) about problems with terminating the "current task."
571 */
572
573 kern_return_t
574 task_terminate(
575 task_t task)
576 {
577 if (task == TASK_NULL)
578 return(KERN_INVALID_ARGUMENT);
579 if (task->bsd_info)
580 return(KERN_FAILURE);
581 return (task_terminate_internal(task));
582 }
583
584 kern_return_t
585 task_terminate_internal(
586 task_t task)
587 {
588 thread_act_t thr_act, cur_thr_act;
589 task_t cur_task;
590 boolean_t interrupt_save;
591
592 assert(task != kernel_task);
593
594 cur_thr_act = current_act();
595 cur_task = cur_thr_act->task;
596
597 #if TASK_SWAPPER
598 /*
599 * If task is not resident (swapped out, or being swapped
600 * out), we want to bring it back in (this can block).
601 * NOTE: The only way that this can happen in the current
602 * system is if the task is swapped while it has a thread
603 * in exit(), and the thread does not hit a clean point
604 * to swap itself before getting here.
605 * Terminating other tasks is another way to this code, but
606 * it is not yet fully supported.
607 * The task_swapin is unconditional. It used to be done
608 * only if the task is not resident. Swapping in a
609 * resident task will prevent it from being swapped out
610 * while it terminates.
611 */
612 task_swapin(task, TRUE); /* TRUE means make it unswappable */
613 #endif /* TASK_SWAPPER */
614
615 /*
616 * Get the task locked and make sure that we are not racing
617 * with someone else trying to terminate us.
618 */
619 if (task == cur_task) {
620 task_lock(task);
621 } else if (task < cur_task) {
622 task_lock(task);
623 task_lock(cur_task);
624 } else {
625 task_lock(cur_task);
626 task_lock(task);
627 }
628
629 if (!task->active || !cur_thr_act->active) {
630 /*
631 * Task or current act is already being terminated.
632 * Just return an error. If we are dying, this will
633 * just get us to our AST special handler and that
634 * will get us to finalize the termination of ourselves.
635 */
636 task_unlock(task);
637 if (cur_task != task)
638 task_unlock(cur_task);
639 return(KERN_FAILURE);
640 }
641 if (cur_task != task)
642 task_unlock(cur_task);
643
644 /*
645 * Make sure the current thread does not get aborted out of
646 * the waits inside these operations.
647 */
648 interrupt_save = thread_interrupt_level(THREAD_UNINT);
649
650 /*
651 * Indicate that we want all the threads to stop executing
652 * at user space by holding the task (we would have held
653 * each thread independently in thread_terminate_internal -
654 * but this way we may be more likely to already find it
655 * held there). Mark the task inactive, and prevent
656 * further task operations via the task port.
657 */
658 task_hold_locked(task);
659 task->active = FALSE;
660 ipc_task_disable(task);
661
662 /*
663 * Terminate each activation in the task.
664 *
665 * Each terminated activation will run it's special handler
666 * when its current kernel context is unwound. That will
667 * clean up most of the thread resources. Then it will be
668 * handed over to the reaper, who will finally remove the
669 * thread from the task list and free the structures.
670 */
671 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
672 thread_terminate_internal(thr_act);
673 }
674
675 /*
676 * Clean up any virtual machine state/resources associated
677 * with the current activation because it may hold wiring
678 * and other references on resources we will be trying to
679 * release below.
680 */
681 if (cur_thr_act->task == task)
682 act_virtual_machine_destroy(cur_thr_act);
683
684 task_unlock(task);
685
686 /*
687 * Destroy all synchronizers owned by the task.
688 */
689 task_synchronizer_destroy_all(task);
690
691 /*
692 * Destroy the IPC space, leaving just a reference for it.
693 */
694 if (!task->kernel_loaded)
695 ipc_space_destroy(task->itk_space);
696
697 /*
698 * If the current thread is a member of the task
699 * being terminated, then the last reference to
700 * the task will not be dropped until the thread
701 * is finally reaped. To avoid incurring the
702 * expense of removing the address space regions
703 * at reap time, we do it explictly here.
704 */
705 (void) vm_map_remove(task->map,
706 task->map->min_offset,
707 task->map->max_offset, VM_MAP_NO_FLAGS);
708
709 shared_region_mapping_dealloc(task->system_shared_region);
710
711 if(task->dynamic_working_set)
712 tws_hash_destroy((tws_hash_t)task->dynamic_working_set);
713
714 /*
715 * We no longer need to guard against being aborted, so restore
716 * the previous interruptible state.
717 */
718 thread_interrupt_level(interrupt_save);
719
720 /*
721 * Get rid of the task active reference on itself.
722 */
723 task_deallocate(task);
724
725 return(KERN_SUCCESS);
726 }
727
728 /*
729 * task_halt - Shut the current task down (except for the current thread) in
730 * preparation for dramatic changes to the task (probably exec).
731 * We hold the task, terminate all other threads in the task and
732 * wait for them to terminate, clean up the portspace, and when
733 * all done, let the current thread go.
734 */
735 kern_return_t
736 task_halt(
737 task_t task)
738 {
739 thread_act_t thr_act, cur_thr_act;
740 task_t cur_task;
741
742 assert(task != kernel_task);
743
744 cur_thr_act = current_act();
745 cur_task = cur_thr_act->task;
746
747 if (task != cur_task) {
748 return(KERN_INVALID_ARGUMENT);
749 }
750
751 #if TASK_SWAPPER
752 /*
753 * If task is not resident (swapped out, or being swapped
754 * out), we want to bring it back in and make it unswappable.
755 * This can block, so do it early.
756 */
757 task_swapin(task, TRUE); /* TRUE means make it unswappable */
758 #endif /* TASK_SWAPPER */
759
760 task_lock(task);
761
762 if (!task->active || !cur_thr_act->active) {
763 /*
764 * Task or current thread is already being terminated.
765 * Hurry up and return out of the current kernel context
766 * so that we run our AST special handler to terminate
767 * ourselves.
768 */
769 task_unlock(task);
770 return(KERN_FAILURE);
771 }
772
773 if (task->thr_act_count > 1) {
774 /*
775 * Mark all the threads to keep them from starting any more
776 * user-level execution. The thread_terminate_internal code
777 * would do this on a thread by thread basis anyway, but this
778 * gives us a better chance of not having to wait there.
779 */
780 task_hold_locked(task);
781
782 /*
783 * Terminate all the other activations in the task.
784 *
785 * Each terminated activation will run it's special handler
786 * when its current kernel context is unwound. That will
787 * clean up most of the thread resources. Then it will be
788 * handed over to the reaper, who will finally remove the
789 * thread from the task list and free the structures.
790 */
791 queue_iterate(&task->thr_acts, thr_act, thread_act_t,thr_acts) {
792 if (thr_act != cur_thr_act)
793 thread_terminate_internal(thr_act);
794 }
795 task_release_locked(task);
796 }
797
798 /*
799 * If the current thread has any virtual machine state
800 * associated with it, we need to explicitly clean that
801 * up now (because we did not terminate the current act)
802 * before we try to clean up the task VM and port spaces.
803 */
804 act_virtual_machine_destroy(cur_thr_act);
805
806 task_unlock(task);
807
808 /*
809 * Destroy all synchronizers owned by the task.
810 */
811 task_synchronizer_destroy_all(task);
812
813 /*
814 * Destroy the contents of the IPC space, leaving just
815 * a reference for it.
816 */
817 if (!task->kernel_loaded)
818 ipc_space_clean(task->itk_space);
819
820 /*
821 * Clean out the address space, as we are going to be
822 * getting a new one.
823 */
824 (void) vm_map_remove(task->map,
825 task->map->min_offset,
826 task->map->max_offset, VM_MAP_NO_FLAGS);
827
828 return KERN_SUCCESS;
829 }
830
831 /*
832 * task_hold_locked:
833 *
834 * Suspend execution of the specified task.
835 * This is a recursive-style suspension of the task, a count of
836 * suspends is maintained.
837 *
838 * CONDITIONS: the task is locked and active.
839 */
840 void
841 task_hold_locked(
842 register task_t task)
843 {
844 register thread_act_t thr_act;
845
846 assert(task->active);
847
848 if (task->suspend_count++ > 0)
849 return;
850
851 /*
852 * Iterate through all the thread_act's and hold them.
853 */
854 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
855 act_lock_thread(thr_act);
856 thread_hold(thr_act);
857 act_unlock_thread(thr_act);
858 }
859 }
860
861 /*
862 * task_hold:
863 *
864 * Same as the internal routine above, except that is must lock
865 * and verify that the task is active. This differs from task_suspend
866 * in that it places a kernel hold on the task rather than just a
867 * user-level hold. This keeps users from over resuming and setting
868 * it running out from under the kernel.
869 *
870 * CONDITIONS: the caller holds a reference on the task
871 */
872 kern_return_t
873 task_hold(task_t task)
874 {
875 kern_return_t kret;
876
877 if (task == TASK_NULL)
878 return (KERN_INVALID_ARGUMENT);
879 task_lock(task);
880 if (!task->active) {
881 task_unlock(task);
882 return (KERN_FAILURE);
883 }
884 task_hold_locked(task);
885 task_unlock(task);
886
887 return(KERN_SUCCESS);
888 }
889
890 /*
891 * Routine: task_wait_locked
892 * Wait for all threads in task to stop.
893 *
894 * Conditions:
895 * Called with task locked, active, and held.
896 */
897 void
898 task_wait_locked(
899 register task_t task)
900 {
901 register thread_act_t thr_act, cur_thr_act;
902
903 assert(task->active);
904 assert(task->suspend_count > 0);
905
906 cur_thr_act = current_act();
907 /*
908 * Iterate through all the thread's and wait for them to
909 * stop. Do not wait for the current thread if it is within
910 * the task.
911 */
912 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
913 if (thr_act != cur_thr_act) {
914 thread_shuttle_t thr_shuttle;
915
916 thr_shuttle = act_lock_thread(thr_act);
917 thread_wait(thr_shuttle);
918 act_unlock_thread(thr_act);
919 }
920 }
921 }
922
923 /*
924 * task_release_locked:
925 *
926 * Release a kernel hold on a task.
927 *
928 * CONDITIONS: the task is locked and active
929 */
930 void
931 task_release_locked(
932 register task_t task)
933 {
934 register thread_act_t thr_act;
935
936 assert(task->active);
937 assert(task->suspend_count > 0);
938
939 if (--task->suspend_count > 0)
940 return;
941
942 /*
943 * Iterate through all the thread_act's and hold them.
944 * Do not hold the current thread_act if it is within the
945 * task.
946 */
947 queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) {
948 act_lock_thread(thr_act);
949 thread_release(thr_act);
950 act_unlock_thread(thr_act);
951 }
952 }
953
954 /*
955 * task_release:
956 *
957 * Same as the internal routine above, except that it must lock
958 * and verify that the task is active.
959 *
960 * CONDITIONS: The caller holds a reference to the task
961 */
962 kern_return_t
963 task_release(task_t task)
964 {
965 kern_return_t kret;
966
967 if (task == TASK_NULL)
968 return (KERN_INVALID_ARGUMENT);
969 task_lock(task);
970 if (!task->active) {
971 task_unlock(task);
972 return (KERN_FAILURE);
973 }
974 task_release_locked(task);
975 task_unlock(task);
976
977 return(KERN_SUCCESS);
978 }
979
980 kern_return_t
981 task_threads(
982 task_t task,
983 thread_act_array_t *thr_act_list,
984 mach_msg_type_number_t *count)
985 {
986 unsigned int actual; /* this many thr_acts */
987 thread_act_t thr_act;
988 thread_act_t *thr_acts;
989 thread_t thread;
990 int i, j;
991
992 vm_size_t size, size_needed;
993 vm_offset_t addr;
994
995 if (task == TASK_NULL)
996 return KERN_INVALID_ARGUMENT;
997
998 size = 0; addr = 0;
999
1000 for (;;) {
1001 task_lock(task);
1002 if (!task->active) {
1003 task_unlock(task);
1004 if (size != 0)
1005 kfree(addr, size);
1006 return KERN_FAILURE;
1007 }
1008
1009 actual = task->thr_act_count;
1010
1011 /* do we have the memory we need? */
1012 size_needed = actual * sizeof(mach_port_t);
1013 if (size_needed <= size)
1014 break;
1015
1016 /* unlock the task and allocate more memory */
1017 task_unlock(task);
1018
1019 if (size != 0)
1020 kfree(addr, size);
1021
1022 assert(size_needed > 0);
1023 size = size_needed;
1024
1025 addr = kalloc(size);
1026 if (addr == 0)
1027 return KERN_RESOURCE_SHORTAGE;
1028 }
1029
1030 /* OK, have memory and the task is locked & active */
1031 thr_acts = (thread_act_t *) addr;
1032
1033 for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->thr_acts);
1034 i < actual;
1035 i++, thr_act = (thread_act_t) queue_next(&thr_act->thr_acts)) {
1036 act_lock(thr_act);
1037 if (thr_act->ref_count > 0) {
1038 act_locked_act_reference(thr_act);
1039 thr_acts[j++] = thr_act;
1040 }
1041 act_unlock(thr_act);
1042 }
1043 assert(queue_end(&task->thr_acts, (queue_entry_t) thr_act));
1044
1045 actual = j;
1046 size_needed = actual * sizeof(mach_port_t);
1047
1048 /* can unlock task now that we've got the thr_act refs */
1049 task_unlock(task);
1050
1051 if (actual == 0) {
1052 /* no thr_acts, so return null pointer and deallocate memory */
1053
1054 *thr_act_list = 0;
1055 *count = 0;
1056
1057 if (size != 0)
1058 kfree(addr, size);
1059 } else {
1060 /* if we allocated too much, must copy */
1061
1062 if (size_needed < size) {
1063 vm_offset_t newaddr;
1064
1065 newaddr = kalloc(size_needed);
1066 if (newaddr == 0) {
1067 for (i = 0; i < actual; i++)
1068 act_deallocate(thr_acts[i]);
1069 kfree(addr, size);
1070 return KERN_RESOURCE_SHORTAGE;
1071 }
1072
1073 bcopy((char *) addr, (char *) newaddr, size_needed);
1074 kfree(addr, size);
1075 thr_acts = (thread_act_t *) newaddr;
1076 }
1077
1078 *thr_act_list = thr_acts;
1079 *count = actual;
1080
1081 /* do the conversion that Mig should handle */
1082
1083 for (i = 0; i < actual; i++)
1084 ((ipc_port_t *) thr_acts)[i] =
1085 convert_act_to_port(thr_acts[i]);
1086 }
1087
1088 return KERN_SUCCESS;
1089 }
1090
1091 /*
1092 * Routine: task_suspend
1093 * Implement a user-level suspension on a task.
1094 *
1095 * Conditions:
1096 * The caller holds a reference to the task
1097 */
1098 kern_return_t
1099 task_suspend(
1100 register task_t task)
1101 {
1102 if (task == TASK_NULL)
1103 return (KERN_INVALID_ARGUMENT);
1104
1105 task_lock(task);
1106 if (!task->active) {
1107 task_unlock(task);
1108 return (KERN_FAILURE);
1109 }
1110 if ((task->user_stop_count)++ > 0) {
1111 /*
1112 * If the stop count was positive, the task is
1113 * already stopped and we can exit.
1114 */
1115 task_unlock(task);
1116 return (KERN_SUCCESS);
1117 }
1118
1119 /*
1120 * Put a kernel-level hold on the threads in the task (all
1121 * user-level task suspensions added together represent a
1122 * single kernel-level hold). We then wait for the threads
1123 * to stop executing user code.
1124 */
1125 task_hold_locked(task);
1126 task_wait_locked(task);
1127 task_unlock(task);
1128 return (KERN_SUCCESS);
1129 }
1130
1131 /*
1132 * Routine: task_resume
1133 * Release a kernel hold on a task.
1134 *
1135 * Conditions:
1136 * The caller holds a reference to the task
1137 */
1138 kern_return_t
1139 task_resume(register task_t task)
1140 {
1141 register boolean_t release;
1142
1143 if (task == TASK_NULL)
1144 return(KERN_INVALID_ARGUMENT);
1145
1146 release = FALSE;
1147 task_lock(task);
1148 if (!task->active) {
1149 task_unlock(task);
1150 return(KERN_FAILURE);
1151 }
1152 if (task->user_stop_count > 0) {
1153 if (--(task->user_stop_count) == 0)
1154 release = TRUE;
1155 }
1156 else {
1157 task_unlock(task);
1158 return(KERN_FAILURE);
1159 }
1160
1161 /*
1162 * Release the task if necessary.
1163 */
1164 if (release)
1165 task_release_locked(task);
1166
1167 task_unlock(task);
1168 return(KERN_SUCCESS);
1169 }
1170
1171 kern_return_t
1172 host_security_set_task_token(
1173 host_security_t host_security,
1174 task_t task,
1175 security_token_t sec_token,
1176 host_priv_t host_priv)
1177 {
1178 kern_return_t kr;
1179
1180 if (task == TASK_NULL)
1181 return(KERN_INVALID_ARGUMENT);
1182
1183 if (host_security == HOST_NULL)
1184 return(KERN_INVALID_SECURITY);
1185
1186 task_lock(task);
1187 task->sec_token = sec_token;
1188 task_unlock(task);
1189
1190 if (host_priv != HOST_PRIV_NULL) {
1191 kr = task_set_special_port(task,
1192 TASK_HOST_PORT,
1193 ipc_port_make_send(realhost.host_priv_self));
1194 } else {
1195 kr = task_set_special_port(task,
1196 TASK_HOST_PORT,
1197 ipc_port_make_send(realhost.host_self));
1198 }
1199 return(kr);
1200 }
1201
1202 /*
1203 * Utility routine to set a ledger
1204 */
1205 kern_return_t
1206 task_set_ledger(
1207 task_t task,
1208 ledger_t wired,
1209 ledger_t paged)
1210 {
1211 if (task == TASK_NULL)
1212 return(KERN_INVALID_ARGUMENT);
1213
1214 task_lock(task);
1215 if (wired) {
1216 ipc_port_release_send(task->wired_ledger_port);
1217 task->wired_ledger_port = ledger_copy(wired);
1218 }
1219 if (paged) {
1220 ipc_port_release_send(task->paged_ledger_port);
1221 task->paged_ledger_port = ledger_copy(paged);
1222 }
1223 task_unlock(task);
1224
1225 return(KERN_SUCCESS);
1226 }
1227
1228 /*
1229 * This routine was added, pretty much exclusively, for registering the
1230 * RPC glue vector for in-kernel short circuited tasks. Rather than
1231 * removing it completely, I have only disabled that feature (which was
1232 * the only feature at the time). It just appears that we are going to
1233 * want to add some user data to tasks in the future (i.e. bsd info,
1234 * task names, etc...), so I left it in the formal task interface.
1235 */
1236 kern_return_t
1237 task_set_info(
1238 task_t task,
1239 task_flavor_t flavor,
1240 task_info_t task_info_in, /* pointer to IN array */
1241 mach_msg_type_number_t task_info_count)
1242 {
1243 vm_map_t map;
1244
1245 if (task == TASK_NULL)
1246 return(KERN_INVALID_ARGUMENT);
1247
1248 switch (flavor) {
1249 default:
1250 return (KERN_INVALID_ARGUMENT);
1251 }
1252 return (KERN_SUCCESS);
1253 }
1254
1255 kern_return_t
1256 task_info(
1257 task_t task,
1258 task_flavor_t flavor,
1259 task_info_t task_info_out,
1260 mach_msg_type_number_t *task_info_count)
1261 {
1262 thread_t thread;
1263 vm_map_t map;
1264
1265 if (task == TASK_NULL)
1266 return(KERN_INVALID_ARGUMENT);
1267
1268 switch (flavor) {
1269
1270 case TASK_BASIC_INFO:
1271 {
1272 register task_basic_info_t basic_info;
1273
1274 if (*task_info_count < TASK_BASIC_INFO_COUNT) {
1275 return(KERN_INVALID_ARGUMENT);
1276 }
1277
1278 basic_info = (task_basic_info_t) task_info_out;
1279
1280 map = (task == kernel_task) ? kernel_map : task->map;
1281
1282 basic_info->virtual_size = map->size;
1283 basic_info->resident_size = pmap_resident_count(map->pmap)
1284 * PAGE_SIZE;
1285
1286 task_lock(task);
1287 basic_info->policy = ((task != kernel_task)?
1288 POLICY_TIMESHARE: POLICY_RR);
1289 basic_info->suspend_count = task->user_stop_count;
1290 basic_info->user_time.seconds
1291 = task->total_user_time.seconds;
1292 basic_info->user_time.microseconds
1293 = task->total_user_time.microseconds;
1294 basic_info->system_time.seconds
1295 = task->total_system_time.seconds;
1296 basic_info->system_time.microseconds
1297 = task->total_system_time.microseconds;
1298 task_unlock(task);
1299
1300 *task_info_count = TASK_BASIC_INFO_COUNT;
1301 break;
1302 }
1303
1304 case TASK_THREAD_TIMES_INFO:
1305 {
1306 register task_thread_times_info_t times_info;
1307 register thread_t thread;
1308 register thread_act_t thr_act;
1309
1310 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1311 return (KERN_INVALID_ARGUMENT);
1312 }
1313
1314 times_info = (task_thread_times_info_t) task_info_out;
1315 times_info->user_time.seconds = 0;
1316 times_info->user_time.microseconds = 0;
1317 times_info->system_time.seconds = 0;
1318 times_info->system_time.microseconds = 0;
1319
1320 task_lock(task);
1321 queue_iterate(&task->thr_acts, thr_act,
1322 thread_act_t, thr_acts)
1323 {
1324 time_value_t user_time, system_time;
1325 spl_t s;
1326
1327 thread = act_lock_thread(thr_act);
1328
1329 /* JMM - add logic to skip threads that have migrated
1330 * into this task?
1331 */
1332
1333 assert(thread); /* Must have thread */
1334 s = splsched();
1335 thread_lock(thread);
1336
1337 thread_read_times(thread, &user_time, &system_time);
1338
1339 thread_unlock(thread);
1340 splx(s);
1341 act_unlock_thread(thr_act);
1342
1343 time_value_add(&times_info->user_time, &user_time);
1344 time_value_add(&times_info->system_time, &system_time);
1345 }
1346 task_unlock(task);
1347
1348 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1349 break;
1350 }
1351
1352 case TASK_SCHED_FIFO_INFO:
1353 {
1354
1355 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1356 return(KERN_INVALID_ARGUMENT);
1357
1358 return(KERN_INVALID_POLICY);
1359 }
1360
1361 case TASK_SCHED_RR_INFO:
1362 {
1363 register policy_rr_base_t rr_base;
1364
1365 if (*task_info_count < POLICY_RR_BASE_COUNT)
1366 return(KERN_INVALID_ARGUMENT);
1367
1368 rr_base = (policy_rr_base_t) task_info_out;
1369
1370 task_lock(task);
1371 if (task != kernel_task) {
1372 task_unlock(task);
1373 return(KERN_INVALID_POLICY);
1374 }
1375
1376 rr_base->base_priority = task->priority;
1377 task_unlock(task);
1378
1379 rr_base->quantum = tick / 1000;
1380
1381 *task_info_count = POLICY_RR_BASE_COUNT;
1382 break;
1383 }
1384
1385 case TASK_SCHED_TIMESHARE_INFO:
1386 {
1387 register policy_timeshare_base_t ts_base;
1388
1389 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1390 return(KERN_INVALID_ARGUMENT);
1391
1392 ts_base = (policy_timeshare_base_t) task_info_out;
1393
1394 task_lock(task);
1395 if (task == kernel_task) {
1396 task_unlock(task);
1397 return(KERN_INVALID_POLICY);
1398 }
1399
1400 ts_base->base_priority = task->priority;
1401 task_unlock(task);
1402
1403 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1404 break;
1405 }
1406
1407 case TASK_SECURITY_TOKEN:
1408 {
1409 register security_token_t *sec_token_p;
1410
1411 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1412 return(KERN_INVALID_ARGUMENT);
1413 }
1414
1415 sec_token_p = (security_token_t *) task_info_out;
1416
1417 task_lock(task);
1418 *sec_token_p = task->sec_token;
1419 task_unlock(task);
1420
1421 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1422 break;
1423 }
1424
1425 case TASK_SCHED_INFO:
1426 return(KERN_INVALID_ARGUMENT);
1427
1428 case TASK_EVENTS_INFO:
1429 {
1430 register task_events_info_t events_info;
1431
1432 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1433 return(KERN_INVALID_ARGUMENT);
1434 }
1435
1436 events_info = (task_events_info_t) task_info_out;
1437
1438 task_lock(task);
1439 events_info->faults = task->faults;
1440 events_info->pageins = task->pageins;
1441 events_info->cow_faults = task->cow_faults;
1442 events_info->messages_sent = task->messages_sent;
1443 events_info->messages_received = task->messages_received;
1444 events_info->syscalls_mach = task->syscalls_mach;
1445 events_info->syscalls_unix = task->syscalls_unix;
1446 events_info->csw = task->csw;
1447 task_unlock(task);
1448
1449 *task_info_count = TASK_EVENTS_INFO_COUNT;
1450 break;
1451 }
1452
1453 default:
1454 return (KERN_INVALID_ARGUMENT);
1455 }
1456
1457 return(KERN_SUCCESS);
1458 }
1459
1460 /*
1461 * task_assign:
1462 *
1463 * Change the assigned processor set for the task
1464 */
1465 kern_return_t
1466 task_assign(
1467 task_t task,
1468 processor_set_t new_pset,
1469 boolean_t assign_threads)
1470 {
1471 #ifdef lint
1472 task++; new_pset++; assign_threads++;
1473 #endif /* lint */
1474 return(KERN_FAILURE);
1475 }
1476
1477 /*
1478 * task_assign_default:
1479 *
1480 * Version of task_assign to assign to default processor set.
1481 */
1482 kern_return_t
1483 task_assign_default(
1484 task_t task,
1485 boolean_t assign_threads)
1486 {
1487 return (task_assign(task, &default_pset, assign_threads));
1488 }
1489
1490 /*
1491 * task_get_assignment
1492 *
1493 * Return name of processor set that task is assigned to.
1494 */
1495 kern_return_t
1496 task_get_assignment(
1497 task_t task,
1498 processor_set_t *pset)
1499 {
1500 if (!task->active)
1501 return(KERN_FAILURE);
1502
1503 *pset = task->processor_set;
1504 pset_reference(*pset);
1505 return(KERN_SUCCESS);
1506 }
1507
1508
1509 /*
1510 * task_policy
1511 *
1512 * Set scheduling policy and parameters, both base and limit, for
1513 * the given task. Policy must be a policy which is enabled for the
1514 * processor set. Change contained threads if requested.
1515 */
1516 kern_return_t
1517 task_policy(
1518 task_t task,
1519 policy_t policy_id,
1520 policy_base_t base,
1521 mach_msg_type_number_t count,
1522 boolean_t set_limit,
1523 boolean_t change)
1524 {
1525 return(KERN_FAILURE);
1526 }
1527
1528 /*
1529 * task_set_policy
1530 *
1531 * Set scheduling policy and parameters, both base and limit, for
1532 * the given task. Policy can be any policy implemented by the
1533 * processor set, whether enabled or not. Change contained threads
1534 * if requested.
1535 */
1536 kern_return_t
1537 task_set_policy(
1538 task_t task,
1539 processor_set_t pset,
1540 policy_t policy_id,
1541 policy_base_t base,
1542 mach_msg_type_number_t base_count,
1543 policy_limit_t limit,
1544 mach_msg_type_number_t limit_count,
1545 boolean_t change)
1546 {
1547 return(KERN_FAILURE);
1548 }
1549
1550 /*
1551 * task_collect_scan:
1552 *
1553 * Attempt to free resources owned by tasks.
1554 */
1555
1556 void
1557 task_collect_scan(void)
1558 {
1559 register task_t task, prev_task;
1560 processor_set_t pset = &default_pset;
1561
1562 pset_lock(pset);
1563 pset->ref_count++;
1564 task = (task_t) queue_first(&pset->tasks);
1565 while (!queue_end(&pset->tasks, (queue_entry_t) task)) {
1566 task_lock(task);
1567 if (task->ref_count > 0) {
1568
1569 task_reference_locked(task);
1570 task_unlock(task);
1571
1572 #if MACH_HOST
1573 /*
1574 * While we still have the pset locked, freeze the task in
1575 * this pset. That way, when we get back from collecting
1576 * it, we can dereference the pset_tasks chain for the task
1577 * and be assured that we are still in this chain.
1578 */
1579 task_freeze(task);
1580 #endif
1581
1582 pset_unlock(pset);
1583
1584 pmap_collect(task->map->pmap);
1585
1586 pset_lock(pset);
1587 prev_task = task;
1588 task = (task_t) queue_next(&task->pset_tasks);
1589
1590 #if MACH_HOST
1591 task_unfreeze(prev_task);
1592 #endif
1593
1594 task_deallocate(prev_task);
1595 } else {
1596 task_unlock(task);
1597 task = (task_t) queue_next(&task->pset_tasks);
1598 }
1599 }
1600
1601 pset_unlock(pset);
1602
1603 pset_deallocate(pset);
1604 }
1605
1606 /* Also disabled in vm/vm_pageout.c */
1607 boolean_t task_collect_allowed = FALSE;
1608 unsigned task_collect_last_tick = 0;
1609 unsigned task_collect_max_rate = 0; /* in ticks */
1610
1611 /*
1612 * consider_task_collect:
1613 *
1614 * Called by the pageout daemon when the system needs more free pages.
1615 */
1616
1617 void
1618 consider_task_collect(void)
1619 {
1620 /*
1621 * By default, don't attempt task collection more frequently
1622 * than once per second.
1623 */
1624
1625 if (task_collect_max_rate == 0)
1626 task_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1;
1627
1628 if (task_collect_allowed &&
1629 (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
1630 task_collect_last_tick = sched_tick;
1631 task_collect_scan();
1632 }
1633 }
1634
1635 kern_return_t
1636 task_set_ras_pc(
1637 task_t task,
1638 vm_offset_t pc,
1639 vm_offset_t endpc)
1640 {
1641 #if FAST_TAS
1642 extern int fast_tas_debug;
1643
1644 if (fast_tas_debug) {
1645 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1646 task, pc, endpc);
1647 }
1648 task_lock(task);
1649 task->fast_tas_base = pc;
1650 task->fast_tas_end = endpc;
1651 task_unlock(task);
1652 return KERN_SUCCESS;
1653
1654 #else /* FAST_TAS */
1655 #ifdef lint
1656 task++;
1657 pc++;
1658 endpc++;
1659 #endif /* lint */
1660
1661 return KERN_FAILURE;
1662
1663 #endif /* FAST_TAS */
1664 }
1665
1666 void
1667 task_synchronizer_destroy_all(task_t task)
1668 {
1669 semaphore_t semaphore;
1670 lock_set_t lock_set;
1671
1672 /*
1673 * Destroy owned semaphores
1674 */
1675
1676 while (!queue_empty(&task->semaphore_list)) {
1677 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1678 (void) semaphore_destroy(task, semaphore);
1679 }
1680
1681 /*
1682 * Destroy owned lock sets
1683 */
1684
1685 while (!queue_empty(&task->lock_set_list)) {
1686 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1687 (void) lock_set_destroy(task, lock_set);
1688 }
1689 }
1690
1691 /*
1692 * task_set_port_space:
1693 *
1694 * Set port name space of task to specified size.
1695 */
1696
1697 kern_return_t
1698 task_set_port_space(
1699 task_t task,
1700 int table_entries)
1701 {
1702 kern_return_t kr;
1703
1704 is_write_lock(task->itk_space);
1705 kr = ipc_entry_grow_table(task->itk_space, table_entries);
1706 if (kr == KERN_SUCCESS)
1707 is_write_unlock(task->itk_space);
1708 return kr;
1709 }
1710
1711 /*
1712 * We need to export some functions to other components that
1713 * are currently implemented in macros within the osfmk
1714 * component. Just export them as functions of the same name.
1715 */
1716 boolean_t is_kerneltask(task_t t)
1717 {
1718 if (t == kernel_task)
1719 return(TRUE);
1720 else
1721 return((t->kernel_loaded));
1722 }
1723
1724 #undef current_task
1725 task_t current_task()
1726 {
1727 return (current_task_fast());
1728 }