]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/task.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
CommitLineData
1c79356b 1/*
55e303ae 2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_FREE_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 * File: kern/task.c
55 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
56 * David Black
57 *
58 * Task management primitives implementation.
59 */
60/*
61 * Copyright (c) 1993 The University of Utah and
62 * the Computer Systems Laboratory (CSL). All rights reserved.
63 *
64 * Permission to use, copy, modify and distribute this software and its
65 * documentation is hereby granted, provided that both the copyright
66 * notice and this permission notice appear in all copies of the
67 * software, derivative works or modified versions, and any portions
68 * thereof, and that both notices appear in supporting documentation.
69 *
70 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
71 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
72 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
73 *
74 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
75 * improvements that they make and grant CSL redistribution rights.
76 *
77 */
78
79#include <mach_kdb.h>
80#include <mach_host.h>
81#include <mach_prof.h>
82#include <fast_tas.h>
83#include <task_swapper.h>
84#include <platforms.h>
85
86#include <mach/boolean.h>
87#include <mach/machine/vm_types.h>
88#include <mach/vm_param.h>
89#include <mach/semaphore.h>
90#include <mach/task_info.h>
91#include <mach/task_special_ports.h>
92#include <mach/mach_types.h>
1c79356b
A
93#include <ipc/ipc_space.h>
94#include <ipc/ipc_entry.h>
95#include <kern/mach_param.h>
96#include <kern/misc_protos.h>
97#include <kern/task.h>
98#include <kern/thread.h>
99#include <kern/zalloc.h>
100#include <kern/kalloc.h>
101#include <kern/processor.h>
102#include <kern/sched_prim.h> /* for thread_wakeup */
1c79356b
A
103#include <kern/ipc_tt.h>
104#include <kern/ledger.h>
105#include <kern/host.h>
106#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
107#include <kern/profile.h>
108#include <kern/assert.h>
109#include <kern/sync_lock.h>
110#if MACH_KDB
111#include <ddb/db_sym.h>
112#endif /* MACH_KDB */
113
114#if TASK_SWAPPER
115#include <kern/task_swap.h>
116#endif /* TASK_SWAPPER */
117
55e303ae
A
118#ifdef __ppc__
119#include <ppc/exception.h>
120#include <ppc/hw_perfmon.h>
121#endif
122
1c79356b
A
123/*
124 * Exported interfaces
125 */
126
127#include <mach/task_server.h>
128#include <mach/mach_host_server.h>
129#include <mach/host_security_server.h>
0b4e3aa0 130#include <vm/task_working_set.h>
1c79356b
A
131
132task_t kernel_task;
133zone_t task_zone;
134
135/* Forwards */
136
137void task_hold_locked(
138 task_t task);
139void task_wait_locked(
140 task_t task);
141void task_release_locked(
142 task_t task);
143void task_collect_scan(void);
144void task_free(
145 task_t task );
146void task_synchronizer_destroy_all(
147 task_t task);
1c79356b
A
148
149kern_return_t task_set_ledger(
150 task_t task,
151 ledger_t wired,
152 ledger_t paged);
153
55e303ae
A
154void
155task_backing_store_privileged(
156 task_t task)
157{
158 task_lock(task);
159 task->priv_flags |= VM_BACKING_STORE_PRIV;
160 task_unlock(task);
161 return;
162}
163
1c79356b
A
164void
165task_init(void)
166{
167 task_zone = zinit(
168 sizeof(struct task),
169 TASK_MAX * sizeof(struct task),
170 TASK_CHUNK * sizeof(struct task),
171 "tasks");
172
173 eml_init();
174
175 /*
176 * Create the kernel task as the first task.
1c79356b 177 */
55e303ae 178 if (task_create_internal(TASK_NULL, FALSE, &kernel_task) != KERN_SUCCESS)
1c79356b 179 panic("task_init\n");
55e303ae 180
1c79356b
A
181 vm_map_deallocate(kernel_task->map);
182 kernel_task->map = kernel_map;
1c79356b
A
183}
184
185#if MACH_HOST
9bccf70c
A
186
187#if 0
188static void
1c79356b
A
189task_freeze(
190 task_t task)
191{
192 task_lock(task);
193 /*
194 * If may_assign is false, task is already being assigned,
195 * wait for that to finish.
196 */
197 while (task->may_assign == FALSE) {
9bccf70c
A
198 wait_result_t res;
199
1c79356b 200 task->assign_active = TRUE;
9bccf70c
A
201 res = thread_sleep_mutex((event_t) &task->assign_active,
202 &task->lock, THREAD_UNINT);
203 assert(res == THREAD_AWAKENED);
1c79356b
A
204 }
205 task->may_assign = FALSE;
206 task_unlock(task);
1c79356b
A
207 return;
208}
9bccf70c
A
209#else
210#define thread_freeze(thread) assert(task->processor_set == &default_pset)
211#endif
1c79356b 212
9bccf70c
A
213#if 0
214static void
1c79356b
A
215task_unfreeze(
216 task_t task)
217{
218 task_lock(task);
219 assert(task->may_assign == FALSE);
220 task->may_assign = TRUE;
221 if (task->assign_active == TRUE) {
222 task->assign_active = FALSE;
223 thread_wakeup((event_t)&task->assign_active);
224 }
225 task_unlock(task);
1c79356b
A
226 return;
227}
9bccf70c
A
228#else
229#define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
230#endif
231
1c79356b
A
232#endif /* MACH_HOST */
233
234/*
235 * Create a task running in the kernel address space. It may
236 * have its own map of size mem_size and may have ipc privileges.
237 */
238kern_return_t
239kernel_task_create(
240 task_t parent_task,
241 vm_offset_t map_base,
242 vm_size_t map_size,
243 task_t *child_task)
244{
55e303ae 245 return (KERN_INVALID_ARGUMENT);
1c79356b
A
246}
247
248kern_return_t
249task_create(
250 task_t parent_task,
251 ledger_port_array_t ledger_ports,
252 mach_msg_type_number_t num_ledger_ports,
253 boolean_t inherit_memory,
254 task_t *child_task) /* OUT */
255{
256 if (parent_task == TASK_NULL)
257 return(KERN_INVALID_ARGUMENT);
258
55e303ae
A
259 return task_create_internal(
260 parent_task, inherit_memory, child_task);
1c79356b
A
261}
262
263kern_return_t
264host_security_create_task_token(
265 host_security_t host_security,
266 task_t parent_task,
267 security_token_t sec_token,
55e303ae 268 audit_token_t audit_token,
1c79356b
A
269 host_priv_t host_priv,
270 ledger_port_array_t ledger_ports,
271 mach_msg_type_number_t num_ledger_ports,
272 boolean_t inherit_memory,
273 task_t *child_task) /* OUT */
274{
275 kern_return_t result;
276
277 if (parent_task == TASK_NULL)
278 return(KERN_INVALID_ARGUMENT);
279
280 if (host_security == HOST_NULL)
281 return(KERN_INVALID_SECURITY);
282
55e303ae
A
283 result = task_create_internal(
284 parent_task, inherit_memory, child_task);
1c79356b
A
285
286 if (result != KERN_SUCCESS)
287 return(result);
288
289 result = host_security_set_task_token(host_security,
290 *child_task,
291 sec_token,
55e303ae 292 audit_token,
1c79356b
A
293 host_priv);
294
295 if (result != KERN_SUCCESS)
296 return(result);
297
298 return(result);
299}
300
301kern_return_t
55e303ae 302task_create_internal(
1c79356b
A
303 task_t parent_task,
304 boolean_t inherit_memory,
1c79356b
A
305 task_t *child_task) /* OUT */
306{
307 task_t new_task;
308 processor_set_t pset;
309
310 new_task = (task_t) zalloc(task_zone);
311
312 if (new_task == TASK_NULL)
313 return(KERN_RESOURCE_SHORTAGE);
314
315 /* one ref for just being alive; one for our caller */
316 new_task->ref_count = 2;
317
318 if (inherit_memory)
319 new_task->map = vm_map_fork(parent_task->map);
320 else
321 new_task->map = vm_map_create(pmap_create(0),
55e303ae
A
322 round_page_32(VM_MIN_ADDRESS),
323 trunc_page_32(VM_MAX_ADDRESS), TRUE);
1c79356b
A
324
325 mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW);
55e303ae 326 queue_init(&new_task->threads);
1c79356b 327 new_task->suspend_count = 0;
55e303ae
A
328 new_task->thread_count = 0;
329 new_task->res_thread_count = 0;
330 new_task->active_thread_count = 0;
1c79356b 331 new_task->user_stop_count = 0;
0b4e3aa0 332 new_task->role = TASK_UNSPECIFIED;
1c79356b 333 new_task->active = TRUE;
1c79356b
A
334 new_task->user_data = 0;
335 new_task->faults = 0;
336 new_task->cow_faults = 0;
337 new_task->pageins = 0;
338 new_task->messages_sent = 0;
339 new_task->messages_received = 0;
340 new_task->syscalls_mach = 0;
55e303ae 341 new_task->priv_flags = 0;
1c79356b
A
342 new_task->syscalls_unix=0;
343 new_task->csw=0;
55e303ae
A
344 new_task->taskFeatures[0] = 0; /* Init task features */
345 new_task->taskFeatures[1] = 0; /* Init task features */
0b4e3aa0
A
346 new_task->dynamic_working_set = 0;
347
348 task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT,
349 0, TWS_HASH_STYLE_DEFAULT);
1c79356b
A
350
351#ifdef MACH_BSD
352 new_task->bsd_info = 0;
353#endif /* MACH_BSD */
354
55e303ae
A
355#ifdef __ppc__
356 if(per_proc_info[0].pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */
357#endif
358
1c79356b
A
359#if TASK_SWAPPER
360 new_task->swap_state = TASK_SW_IN;
361 new_task->swap_flags = 0;
362 new_task->swap_ast_waiting = 0;
363 new_task->swap_stamp = sched_tick;
364 new_task->swap_rss = 0;
365 new_task->swap_nswap = 0;
366#endif /* TASK_SWAPPER */
367
368 queue_init(&new_task->semaphore_list);
369 queue_init(&new_task->lock_set_list);
370 new_task->semaphores_owned = 0;
371 new_task->lock_sets_owned = 0;
372
373#if MACH_HOST
374 new_task->may_assign = TRUE;
375 new_task->assign_active = FALSE;
376#endif /* MACH_HOST */
377 eml_task_reference(new_task, parent_task);
378
379 ipc_task_init(new_task, parent_task);
380
381 new_task->total_user_time.seconds = 0;
382 new_task->total_user_time.microseconds = 0;
383 new_task->total_system_time.seconds = 0;
384 new_task->total_system_time.microseconds = 0;
385
386 task_prof_init(new_task);
387
388 if (parent_task != TASK_NULL) {
389#if MACH_HOST
390 /*
391 * Freeze the parent, so that parent_task->processor_set
392 * cannot change.
393 */
394 task_freeze(parent_task);
395#endif /* MACH_HOST */
396 pset = parent_task->processor_set;
397 if (!pset->active)
398 pset = &default_pset;
399
1c79356b 400 new_task->sec_token = parent_task->sec_token;
55e303ae 401 new_task->audit_token = parent_task->audit_token;
1c79356b
A
402
403 shared_region_mapping_ref(parent_task->system_shared_region);
404 new_task->system_shared_region = parent_task->system_shared_region;
405
406 new_task->wired_ledger_port = ledger_copy(
407 convert_port_to_ledger(parent_task->wired_ledger_port));
408 new_task->paged_ledger_port = ledger_copy(
409 convert_port_to_ledger(parent_task->paged_ledger_port));
410 }
411 else {
412 pset = &default_pset;
413
1c79356b 414 new_task->sec_token = KERNEL_SECURITY_TOKEN;
55e303ae 415 new_task->audit_token = KERNEL_AUDIT_TOKEN;
1c79356b
A
416 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
417 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
418 }
419
0b4e3aa0 420 if (kernel_task == TASK_NULL) {
55e303ae 421 new_task->priority = BASEPRI_KERNEL;
0b4e3aa0
A
422 new_task->max_priority = MAXPRI_KERNEL;
423 }
424 else {
425 new_task->priority = BASEPRI_DEFAULT;
426 new_task->max_priority = MAXPRI_USER;
427 }
428
1c79356b
A
429 pset_lock(pset);
430 pset_add_task(pset, new_task);
431 pset_unlock(pset);
432#if MACH_HOST
433 if (parent_task != TASK_NULL)
434 task_unfreeze(parent_task);
435#endif /* MACH_HOST */
436
55e303ae
A
437 if (vm_backing_store_low && parent_task != NULL)
438 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
1c79356b
A
439
440 ipc_task_enable(new_task);
441
1c79356b
A
442 *child_task = new_task;
443 return(KERN_SUCCESS);
444}
445
446/*
9bccf70c 447 * task_deallocate
1c79356b 448 *
9bccf70c 449 * Drop a reference on a task
1c79356b
A
450 * Task is locked.
451 */
452void
9bccf70c 453task_deallocate(
1c79356b
A
454 task_t task)
455{
456 processor_set_t pset;
9bccf70c 457 int refs;
1c79356b 458
9bccf70c
A
459 if (task == TASK_NULL)
460 return;
461
462 task_lock(task);
463 refs = --task->ref_count;
464 task_unlock(task);
465
466 if (refs > 0)
467 return;
1c79356b
A
468
469#if TASK_SWAPPER
470 /* task_terminate guarantees that this task is off the list */
471 assert((task->swap_state & TASK_SW_ELIGIBLE) == 0);
472#endif /* TASK_SWAPPER */
473
90556fb8
A
474 if(task->dynamic_working_set)
475 tws_hash_destroy((tws_hash_t)task->dynamic_working_set);
476
1c79356b
A
477 eml_task_deallocate(task);
478
9bccf70c
A
479 ipc_task_terminate(task);
480
481#if MACH_HOST
1c79356b 482 task_freeze(task);
9bccf70c
A
483#endif
484
1c79356b
A
485 pset = task->processor_set;
486 pset_lock(pset);
1c79356b 487 pset_remove_task(pset,task);
1c79356b
A
488 pset_unlock(pset);
489 pset_deallocate(pset);
490
9bccf70c
A
491#if MACH_HOST
492 task_unfreeze(task);
493#endif
1c79356b 494
1c79356b
A
495 vm_map_deallocate(task->map);
496 is_release(task->itk_space);
497 task_prof_deallocate(task);
498 zfree(task_zone, (vm_offset_t) task);
499}
500
1c79356b
A
501
502void
503task_reference(
504 task_t task)
505{
506 if (task != TASK_NULL) {
507 task_lock(task);
508 task->ref_count++;
509 task_unlock(task);
510 }
511}
512
513boolean_t
514task_reference_try(
515 task_t task)
516{
517 if (task != TASK_NULL) {
518 if (task_lock_try(task)) {
519 task->ref_count++;
520 task_unlock(task);
521 return TRUE;
522 }
523 }
524 return FALSE;
525}
526
527/*
528 * task_terminate:
529 *
530 * Terminate the specified task. See comments on thread_terminate
531 * (kern/thread.c) about problems with terminating the "current task."
532 */
533
534kern_return_t
535task_terminate(
536 task_t task)
537{
538 if (task == TASK_NULL)
539 return(KERN_INVALID_ARGUMENT);
540 if (task->bsd_info)
541 return(KERN_FAILURE);
542 return (task_terminate_internal(task));
543}
544
545kern_return_t
546task_terminate_internal(
547 task_t task)
548{
549 thread_act_t thr_act, cur_thr_act;
550 task_t cur_task;
e7c99d92 551 boolean_t interrupt_save;
1c79356b
A
552
553 assert(task != kernel_task);
554
555 cur_thr_act = current_act();
556 cur_task = cur_thr_act->task;
557
558#if TASK_SWAPPER
559 /*
560 * If task is not resident (swapped out, or being swapped
561 * out), we want to bring it back in (this can block).
562 * NOTE: The only way that this can happen in the current
563 * system is if the task is swapped while it has a thread
564 * in exit(), and the thread does not hit a clean point
565 * to swap itself before getting here.
566 * Terminating other tasks is another way to this code, but
567 * it is not yet fully supported.
568 * The task_swapin is unconditional. It used to be done
569 * only if the task is not resident. Swapping in a
570 * resident task will prevent it from being swapped out
571 * while it terminates.
572 */
573 task_swapin(task, TRUE); /* TRUE means make it unswappable */
574#endif /* TASK_SWAPPER */
575
576 /*
577 * Get the task locked and make sure that we are not racing
578 * with someone else trying to terminate us.
579 */
580 if (task == cur_task) {
581 task_lock(task);
582 } else if (task < cur_task) {
583 task_lock(task);
584 task_lock(cur_task);
585 } else {
586 task_lock(cur_task);
587 task_lock(task);
588 }
589
590 if (!task->active || !cur_thr_act->active) {
591 /*
592 * Task or current act is already being terminated.
593 * Just return an error. If we are dying, this will
594 * just get us to our AST special handler and that
595 * will get us to finalize the termination of ourselves.
596 */
597 task_unlock(task);
598 if (cur_task != task)
599 task_unlock(cur_task);
600 return(KERN_FAILURE);
601 }
602 if (cur_task != task)
603 task_unlock(cur_task);
604
e7c99d92
A
605 /*
606 * Make sure the current thread does not get aborted out of
607 * the waits inside these operations.
608 */
9bccf70c 609 interrupt_save = thread_interrupt_level(THREAD_UNINT);
e7c99d92 610
1c79356b
A
611 /*
612 * Indicate that we want all the threads to stop executing
613 * at user space by holding the task (we would have held
614 * each thread independently in thread_terminate_internal -
615 * but this way we may be more likely to already find it
616 * held there). Mark the task inactive, and prevent
617 * further task operations via the task port.
618 */
619 task_hold_locked(task);
620 task->active = FALSE;
621 ipc_task_disable(task);
622
623 /*
624 * Terminate each activation in the task.
625 *
626 * Each terminated activation will run it's special handler
627 * when its current kernel context is unwound. That will
628 * clean up most of the thread resources. Then it will be
629 * handed over to the reaper, who will finally remove the
630 * thread from the task list and free the structures.
1c79356b 631 */
55e303ae 632 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
1c79356b 633 thread_terminate_internal(thr_act);
1c79356b 634 }
e7c99d92
A
635
636 /*
55e303ae
A
637 * Give the machine dependent code a chance
638 * to perform cleanup before ripping apart
639 * the task.
e7c99d92
A
640 */
641 if (cur_thr_act->task == task)
55e303ae 642 machine_thread_terminate_self();
e7c99d92 643
1c79356b
A
644 task_unlock(task);
645
646 /*
647 * Destroy all synchronizers owned by the task.
648 */
649 task_synchronizer_destroy_all(task);
650
1c79356b
A
651 /*
652 * Destroy the IPC space, leaving just a reference for it.
653 */
55e303ae 654 ipc_space_destroy(task->itk_space);
1c79356b
A
655
656 /*
657 * If the current thread is a member of the task
658 * being terminated, then the last reference to
659 * the task will not be dropped until the thread
660 * is finally reaped. To avoid incurring the
661 * expense of removing the address space regions
662 * at reap time, we do it explictly here.
663 */
664 (void) vm_map_remove(task->map,
665 task->map->min_offset,
666 task->map->max_offset, VM_MAP_NO_FLAGS);
667
9bccf70c
A
668 shared_region_mapping_dealloc(task->system_shared_region);
669
90556fb8
A
670 /*
671 * Flush working set here to avoid I/O in reaper thread
672 */
9bccf70c 673 if(task->dynamic_working_set)
90556fb8
A
674 tws_hash_ws_flush((tws_hash_t)
675 task->dynamic_working_set);
9bccf70c 676
1c79356b 677 /*
e7c99d92
A
678 * We no longer need to guard against being aborted, so restore
679 * the previous interruptible state.
680 */
9bccf70c 681 thread_interrupt_level(interrupt_save);
e7c99d92 682
55e303ae
A
683#if __ppc__
684 perfmon_release_facility(task); // notify the perfmon facility
685#endif
686
e7c99d92
A
687 /*
688 * Get rid of the task active reference on itself.
1c79356b 689 */
1c79356b
A
690 task_deallocate(task);
691
692 return(KERN_SUCCESS);
693}
694
695/*
696 * task_halt - Shut the current task down (except for the current thread) in
697 * preparation for dramatic changes to the task (probably exec).
698 * We hold the task, terminate all other threads in the task and
699 * wait for them to terminate, clean up the portspace, and when
700 * all done, let the current thread go.
701 */
702kern_return_t
703task_halt(
704 task_t task)
705{
706 thread_act_t thr_act, cur_thr_act;
707 task_t cur_task;
708
709 assert(task != kernel_task);
710
711 cur_thr_act = current_act();
712 cur_task = cur_thr_act->task;
713
714 if (task != cur_task) {
715 return(KERN_INVALID_ARGUMENT);
716 }
717
718#if TASK_SWAPPER
719 /*
720 * If task is not resident (swapped out, or being swapped
721 * out), we want to bring it back in and make it unswappable.
722 * This can block, so do it early.
723 */
724 task_swapin(task, TRUE); /* TRUE means make it unswappable */
725#endif /* TASK_SWAPPER */
726
727 task_lock(task);
728
729 if (!task->active || !cur_thr_act->active) {
730 /*
731 * Task or current thread is already being terminated.
732 * Hurry up and return out of the current kernel context
733 * so that we run our AST special handler to terminate
734 * ourselves.
735 */
736 task_unlock(task);
737 return(KERN_FAILURE);
738 }
739
55e303ae 740 if (task->thread_count > 1) {
1c79356b
A
741 /*
742 * Mark all the threads to keep them from starting any more
743 * user-level execution. The thread_terminate_internal code
744 * would do this on a thread by thread basis anyway, but this
745 * gives us a better chance of not having to wait there.
746 */
747 task_hold_locked(task);
748
749 /*
750 * Terminate all the other activations in the task.
751 *
752 * Each terminated activation will run it's special handler
753 * when its current kernel context is unwound. That will
754 * clean up most of the thread resources. Then it will be
755 * handed over to the reaper, who will finally remove the
756 * thread from the task list and free the structures.
1c79356b 757 */
55e303ae 758 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
1c79356b
A
759 if (thr_act != cur_thr_act)
760 thread_terminate_internal(thr_act);
1c79356b
A
761 }
762 task_release_locked(task);
763 }
e7c99d92
A
764
765 /*
55e303ae
A
766 * Give the machine dependent code a chance
767 * to perform cleanup before ripping apart
768 * the task.
e7c99d92 769 */
55e303ae 770 machine_thread_terminate_self();
e7c99d92 771
1c79356b
A
772 task_unlock(task);
773
774 /*
775 * Destroy all synchronizers owned by the task.
776 */
777 task_synchronizer_destroy_all(task);
778
779 /*
9bccf70c
A
780 * Destroy the contents of the IPC space, leaving just
781 * a reference for it.
e7c99d92 782 */
55e303ae 783 ipc_space_clean(task->itk_space);
1c79356b
A
784
785 /*
786 * Clean out the address space, as we are going to be
787 * getting a new one.
788 */
789 (void) vm_map_remove(task->map,
790 task->map->min_offset,
791 task->map->max_offset, VM_MAP_NO_FLAGS);
792
793 return KERN_SUCCESS;
794}
795
796/*
797 * task_hold_locked:
798 *
799 * Suspend execution of the specified task.
800 * This is a recursive-style suspension of the task, a count of
801 * suspends is maintained.
802 *
803 * CONDITIONS: the task is locked and active.
804 */
805void
806task_hold_locked(
807 register task_t task)
808{
809 register thread_act_t thr_act;
810
811 assert(task->active);
812
9bccf70c
A
813 if (task->suspend_count++ > 0)
814 return;
1c79356b
A
815
816 /*
817 * Iterate through all the thread_act's and hold them.
818 */
55e303ae 819 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
1c79356b
A
820 act_lock_thread(thr_act);
821 thread_hold(thr_act);
822 act_unlock_thread(thr_act);
823 }
824}
825
826/*
827 * task_hold:
828 *
829 * Same as the internal routine above, except that is must lock
830 * and verify that the task is active. This differs from task_suspend
831 * in that it places a kernel hold on the task rather than just a
832 * user-level hold. This keeps users from over resuming and setting
833 * it running out from under the kernel.
834 *
835 * CONDITIONS: the caller holds a reference on the task
836 */
837kern_return_t
838task_hold(task_t task)
839{
840 kern_return_t kret;
841
842 if (task == TASK_NULL)
843 return (KERN_INVALID_ARGUMENT);
844 task_lock(task);
845 if (!task->active) {
846 task_unlock(task);
847 return (KERN_FAILURE);
848 }
849 task_hold_locked(task);
850 task_unlock(task);
851
852 return(KERN_SUCCESS);
853}
854
855/*
856 * Routine: task_wait_locked
857 * Wait for all threads in task to stop.
858 *
859 * Conditions:
860 * Called with task locked, active, and held.
861 */
862void
863task_wait_locked(
864 register task_t task)
865{
866 register thread_act_t thr_act, cur_thr_act;
867
868 assert(task->active);
869 assert(task->suspend_count > 0);
870
871 cur_thr_act = current_act();
872 /*
873 * Iterate through all the thread's and wait for them to
874 * stop. Do not wait for the current thread if it is within
875 * the task.
876 */
55e303ae 877 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
1c79356b 878 if (thr_act != cur_thr_act) {
55e303ae 879 thread_t thread;
1c79356b 880
55e303ae
A
881 thread = act_lock_thread(thr_act);
882 thread_wait(thread);
1c79356b
A
883 act_unlock_thread(thr_act);
884 }
885 }
886}
887
888/*
889 * task_release_locked:
890 *
891 * Release a kernel hold on a task.
892 *
893 * CONDITIONS: the task is locked and active
894 */
895void
896task_release_locked(
897 register task_t task)
898{
899 register thread_act_t thr_act;
900
901 assert(task->active);
9bccf70c 902 assert(task->suspend_count > 0);
1c79356b 903
9bccf70c
A
904 if (--task->suspend_count > 0)
905 return;
1c79356b
A
906
907 /*
908 * Iterate through all the thread_act's and hold them.
909 * Do not hold the current thread_act if it is within the
910 * task.
911 */
55e303ae 912 queue_iterate(&task->threads, thr_act, thread_act_t, task_threads) {
1c79356b
A
913 act_lock_thread(thr_act);
914 thread_release(thr_act);
915 act_unlock_thread(thr_act);
916 }
917}
918
919/*
920 * task_release:
921 *
922 * Same as the internal routine above, except that it must lock
923 * and verify that the task is active.
924 *
925 * CONDITIONS: The caller holds a reference to the task
926 */
927kern_return_t
928task_release(task_t task)
929{
930 kern_return_t kret;
931
932 if (task == TASK_NULL)
933 return (KERN_INVALID_ARGUMENT);
934 task_lock(task);
935 if (!task->active) {
936 task_unlock(task);
937 return (KERN_FAILURE);
938 }
939 task_release_locked(task);
940 task_unlock(task);
941
942 return(KERN_SUCCESS);
943}
944
945kern_return_t
946task_threads(
947 task_t task,
948 thread_act_array_t *thr_act_list,
949 mach_msg_type_number_t *count)
950{
951 unsigned int actual; /* this many thr_acts */
952 thread_act_t thr_act;
953 thread_act_t *thr_acts;
954 thread_t thread;
955 int i, j;
956
957 vm_size_t size, size_needed;
958 vm_offset_t addr;
959
960 if (task == TASK_NULL)
961 return KERN_INVALID_ARGUMENT;
962
963 size = 0; addr = 0;
964
965 for (;;) {
966 task_lock(task);
967 if (!task->active) {
968 task_unlock(task);
969 if (size != 0)
970 kfree(addr, size);
971 return KERN_FAILURE;
972 }
973
55e303ae 974 actual = task->thread_count;
1c79356b
A
975
976 /* do we have the memory we need? */
977 size_needed = actual * sizeof(mach_port_t);
978 if (size_needed <= size)
979 break;
980
981 /* unlock the task and allocate more memory */
982 task_unlock(task);
983
984 if (size != 0)
985 kfree(addr, size);
986
987 assert(size_needed > 0);
988 size = size_needed;
989
990 addr = kalloc(size);
991 if (addr == 0)
992 return KERN_RESOURCE_SHORTAGE;
993 }
994
995 /* OK, have memory and the task is locked & active */
996 thr_acts = (thread_act_t *) addr;
997
55e303ae 998 for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->threads);
1c79356b 999 i < actual;
55e303ae 1000 i++, thr_act = (thread_act_t) queue_next(&thr_act->task_threads)) {
1c79356b 1001 act_lock(thr_act);
55e303ae
A
1002 if (thr_act->act_ref_count > 0) {
1003 act_reference_locked(thr_act);
1c79356b
A
1004 thr_acts[j++] = thr_act;
1005 }
1006 act_unlock(thr_act);
1007 }
55e303ae 1008 assert(queue_end(&task->threads, (queue_entry_t) thr_act));
1c79356b
A
1009
1010 actual = j;
1011 size_needed = actual * sizeof(mach_port_t);
1012
1013 /* can unlock task now that we've got the thr_act refs */
1014 task_unlock(task);
1015
1016 if (actual == 0) {
1017 /* no thr_acts, so return null pointer and deallocate memory */
1018
1019 *thr_act_list = 0;
1020 *count = 0;
1021
1022 if (size != 0)
1023 kfree(addr, size);
1024 } else {
1025 /* if we allocated too much, must copy */
1026
1027 if (size_needed < size) {
1028 vm_offset_t newaddr;
1029
1030 newaddr = kalloc(size_needed);
1031 if (newaddr == 0) {
1032 for (i = 0; i < actual; i++)
1033 act_deallocate(thr_acts[i]);
1034 kfree(addr, size);
1035 return KERN_RESOURCE_SHORTAGE;
1036 }
1037
1038 bcopy((char *) addr, (char *) newaddr, size_needed);
1039 kfree(addr, size);
1040 thr_acts = (thread_act_t *) newaddr;
1041 }
1042
1043 *thr_act_list = thr_acts;
1044 *count = actual;
1045
1046 /* do the conversion that Mig should handle */
1047
1048 for (i = 0; i < actual; i++)
1049 ((ipc_port_t *) thr_acts)[i] =
1050 convert_act_to_port(thr_acts[i]);
1051 }
1052
1053 return KERN_SUCCESS;
1054}
1055
1056/*
1057 * Routine: task_suspend
1058 * Implement a user-level suspension on a task.
1059 *
1060 * Conditions:
1061 * The caller holds a reference to the task
1062 */
1063kern_return_t
1064task_suspend(
1065 register task_t task)
1066{
1067 if (task == TASK_NULL)
1068 return (KERN_INVALID_ARGUMENT);
1069
1070 task_lock(task);
1071 if (!task->active) {
1072 task_unlock(task);
1073 return (KERN_FAILURE);
1074 }
1075 if ((task->user_stop_count)++ > 0) {
1076 /*
1077 * If the stop count was positive, the task is
1078 * already stopped and we can exit.
1079 */
1080 task_unlock(task);
1081 return (KERN_SUCCESS);
1082 }
1083
1084 /*
1085 * Put a kernel-level hold on the threads in the task (all
1086 * user-level task suspensions added together represent a
1087 * single kernel-level hold). We then wait for the threads
1088 * to stop executing user code.
1089 */
1090 task_hold_locked(task);
1091 task_wait_locked(task);
1092 task_unlock(task);
1093 return (KERN_SUCCESS);
1094}
1095
1096/*
1097 * Routine: task_resume
1098 * Release a kernel hold on a task.
1099 *
1100 * Conditions:
1101 * The caller holds a reference to the task
1102 */
1103kern_return_t
1104task_resume(register task_t task)
1105{
1106 register boolean_t release;
1107
1108 if (task == TASK_NULL)
1109 return(KERN_INVALID_ARGUMENT);
1110
1111 release = FALSE;
1112 task_lock(task);
1113 if (!task->active) {
1114 task_unlock(task);
1115 return(KERN_FAILURE);
1116 }
1117 if (task->user_stop_count > 0) {
1118 if (--(task->user_stop_count) == 0)
1119 release = TRUE;
1120 }
1121 else {
1122 task_unlock(task);
1123 return(KERN_FAILURE);
1124 }
1125
1126 /*
1127 * Release the task if necessary.
1128 */
1129 if (release)
1130 task_release_locked(task);
1131
1132 task_unlock(task);
1133 return(KERN_SUCCESS);
1134}
1135
1136kern_return_t
1137host_security_set_task_token(
1138 host_security_t host_security,
1139 task_t task,
1140 security_token_t sec_token,
55e303ae 1141 audit_token_t audit_token,
1c79356b
A
1142 host_priv_t host_priv)
1143{
55e303ae 1144 ipc_port_t host_port;
1c79356b
A
1145 kern_return_t kr;
1146
1147 if (task == TASK_NULL)
1148 return(KERN_INVALID_ARGUMENT);
1149
1150 if (host_security == HOST_NULL)
1151 return(KERN_INVALID_SECURITY);
1152
1153 task_lock(task);
1154 task->sec_token = sec_token;
55e303ae 1155 task->audit_token = audit_token;
1c79356b
A
1156 task_unlock(task);
1157
1158 if (host_priv != HOST_PRIV_NULL) {
55e303ae 1159 kr = host_get_host_priv_port(host_priv, &host_port);
1c79356b 1160 } else {
55e303ae 1161 kr = host_get_host_port(host_priv_self(), &host_port);
1c79356b 1162 }
55e303ae
A
1163 assert(kr == KERN_SUCCESS);
1164 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1c79356b
A
1165 return(kr);
1166}
1167
1168/*
1169 * Utility routine to set a ledger
1170 */
1171kern_return_t
1172task_set_ledger(
1173 task_t task,
1174 ledger_t wired,
1175 ledger_t paged)
1176{
1177 if (task == TASK_NULL)
1178 return(KERN_INVALID_ARGUMENT);
1179
1180 task_lock(task);
1181 if (wired) {
1182 ipc_port_release_send(task->wired_ledger_port);
1183 task->wired_ledger_port = ledger_copy(wired);
1184 }
1185 if (paged) {
1186 ipc_port_release_send(task->paged_ledger_port);
1187 task->paged_ledger_port = ledger_copy(paged);
1188 }
1189 task_unlock(task);
1190
1191 return(KERN_SUCCESS);
1192}
1193
1194/*
1195 * This routine was added, pretty much exclusively, for registering the
1196 * RPC glue vector for in-kernel short circuited tasks. Rather than
1197 * removing it completely, I have only disabled that feature (which was
1198 * the only feature at the time). It just appears that we are going to
1199 * want to add some user data to tasks in the future (i.e. bsd info,
1200 * task names, etc...), so I left it in the formal task interface.
1201 */
1202kern_return_t
1203task_set_info(
1204 task_t task,
1205 task_flavor_t flavor,
1206 task_info_t task_info_in, /* pointer to IN array */
1207 mach_msg_type_number_t task_info_count)
1208{
1209 vm_map_t map;
1210
1211 if (task == TASK_NULL)
1212 return(KERN_INVALID_ARGUMENT);
1213
1214 switch (flavor) {
1215 default:
1216 return (KERN_INVALID_ARGUMENT);
1217 }
1218 return (KERN_SUCCESS);
1219}
1220
1221kern_return_t
1222task_info(
1223 task_t task,
1224 task_flavor_t flavor,
1225 task_info_t task_info_out,
1226 mach_msg_type_number_t *task_info_count)
1227{
1228 thread_t thread;
1229 vm_map_t map;
1230
1231 if (task == TASK_NULL)
1232 return(KERN_INVALID_ARGUMENT);
1233
1234 switch (flavor) {
1235
1236 case TASK_BASIC_INFO:
1237 {
1238 register task_basic_info_t basic_info;
1239
1240 if (*task_info_count < TASK_BASIC_INFO_COUNT) {
1241 return(KERN_INVALID_ARGUMENT);
1242 }
1243
1244 basic_info = (task_basic_info_t) task_info_out;
1245
1246 map = (task == kernel_task) ? kernel_map : task->map;
1247
1248 basic_info->virtual_size = map->size;
1249 basic_info->resident_size = pmap_resident_count(map->pmap)
1250 * PAGE_SIZE;
1251
1252 task_lock(task);
0b4e3aa0
A
1253 basic_info->policy = ((task != kernel_task)?
1254 POLICY_TIMESHARE: POLICY_RR);
1c79356b
A
1255 basic_info->suspend_count = task->user_stop_count;
1256 basic_info->user_time.seconds
1257 = task->total_user_time.seconds;
1258 basic_info->user_time.microseconds
1259 = task->total_user_time.microseconds;
1260 basic_info->system_time.seconds
1261 = task->total_system_time.seconds;
1262 basic_info->system_time.microseconds
1263 = task->total_system_time.microseconds;
1264 task_unlock(task);
1265
1266 *task_info_count = TASK_BASIC_INFO_COUNT;
1267 break;
1268 }
1269
1270 case TASK_THREAD_TIMES_INFO:
1271 {
1272 register task_thread_times_info_t times_info;
1273 register thread_t thread;
1274 register thread_act_t thr_act;
1275
1276 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1277 return (KERN_INVALID_ARGUMENT);
1278 }
1279
1280 times_info = (task_thread_times_info_t) task_info_out;
1281 times_info->user_time.seconds = 0;
1282 times_info->user_time.microseconds = 0;
1283 times_info->system_time.seconds = 0;
1284 times_info->system_time.microseconds = 0;
1285
1286 task_lock(task);
55e303ae
A
1287 queue_iterate(&task->threads, thr_act,
1288 thread_act_t, task_threads)
1c79356b
A
1289 {
1290 time_value_t user_time, system_time;
1291 spl_t s;
1292
1293 thread = act_lock_thread(thr_act);
1294
9bccf70c
A
1295 /* JMM - add logic to skip threads that have migrated
1296 * into this task?
1c79356b 1297 */
9bccf70c
A
1298
1299 assert(thread); /* Must have thread */
1c79356b
A
1300 s = splsched();
1301 thread_lock(thread);
1302
1303 thread_read_times(thread, &user_time, &system_time);
1304
1305 thread_unlock(thread);
1306 splx(s);
1307 act_unlock_thread(thr_act);
1308
1309 time_value_add(&times_info->user_time, &user_time);
1310 time_value_add(&times_info->system_time, &system_time);
1311 }
1312 task_unlock(task);
1313
1314 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1315 break;
1316 }
1317
1318 case TASK_SCHED_FIFO_INFO:
1319 {
1c79356b
A
1320
1321 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1322 return(KERN_INVALID_ARGUMENT);
1323
0b4e3aa0 1324 return(KERN_INVALID_POLICY);
1c79356b
A
1325 }
1326
1327 case TASK_SCHED_RR_INFO:
1328 {
1329 register policy_rr_base_t rr_base;
1330
1331 if (*task_info_count < POLICY_RR_BASE_COUNT)
1332 return(KERN_INVALID_ARGUMENT);
1333
1334 rr_base = (policy_rr_base_t) task_info_out;
1335
1336 task_lock(task);
0b4e3aa0 1337 if (task != kernel_task) {
1c79356b
A
1338 task_unlock(task);
1339 return(KERN_INVALID_POLICY);
1340 }
1341
1342 rr_base->base_priority = task->priority;
1343 task_unlock(task);
1344
0b4e3aa0 1345 rr_base->quantum = tick / 1000;
1c79356b
A
1346
1347 *task_info_count = POLICY_RR_BASE_COUNT;
1348 break;
1349 }
1350
1351 case TASK_SCHED_TIMESHARE_INFO:
1352 {
1353 register policy_timeshare_base_t ts_base;
1354
1355 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1356 return(KERN_INVALID_ARGUMENT);
1357
1358 ts_base = (policy_timeshare_base_t) task_info_out;
1359
1360 task_lock(task);
0b4e3aa0 1361 if (task == kernel_task) {
1c79356b
A
1362 task_unlock(task);
1363 return(KERN_INVALID_POLICY);
1364 }
1365
1366 ts_base->base_priority = task->priority;
1367 task_unlock(task);
1368
1369 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1370 break;
1371 }
1372
1373 case TASK_SECURITY_TOKEN:
1374 {
1375 register security_token_t *sec_token_p;
1376
1377 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1378 return(KERN_INVALID_ARGUMENT);
1379 }
1380
1381 sec_token_p = (security_token_t *) task_info_out;
1382
1383 task_lock(task);
1384 *sec_token_p = task->sec_token;
1385 task_unlock(task);
1386
1387 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1388 break;
1389 }
1390
55e303ae
A
1391 case TASK_AUDIT_TOKEN:
1392 {
1393 register audit_token_t *audit_token_p;
1394
1395 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
1396 return(KERN_INVALID_ARGUMENT);
1397 }
1398
1399 audit_token_p = (audit_token_t *) task_info_out;
1400
1401 task_lock(task);
1402 *audit_token_p = task->audit_token;
1403 task_unlock(task);
1404
1405 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1406 break;
1407 }
1408
1c79356b
A
1409 case TASK_SCHED_INFO:
1410 return(KERN_INVALID_ARGUMENT);
1411
1412 case TASK_EVENTS_INFO:
1413 {
1414 register task_events_info_t events_info;
1415
1416 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1417 return(KERN_INVALID_ARGUMENT);
1418 }
1419
1420 events_info = (task_events_info_t) task_info_out;
1421
1422 task_lock(task);
1423 events_info->faults = task->faults;
1424 events_info->pageins = task->pageins;
1425 events_info->cow_faults = task->cow_faults;
1426 events_info->messages_sent = task->messages_sent;
1427 events_info->messages_received = task->messages_received;
1428 events_info->syscalls_mach = task->syscalls_mach;
1429 events_info->syscalls_unix = task->syscalls_unix;
1430 events_info->csw = task->csw;
1431 task_unlock(task);
1432
1433 *task_info_count = TASK_EVENTS_INFO_COUNT;
1434 break;
1435 }
1436
1437 default:
1438 return (KERN_INVALID_ARGUMENT);
1439 }
1440
1441 return(KERN_SUCCESS);
1442}
1443
1444/*
1445 * task_assign:
1446 *
1447 * Change the assigned processor set for the task
1448 */
1449kern_return_t
1450task_assign(
1451 task_t task,
1452 processor_set_t new_pset,
1453 boolean_t assign_threads)
1454{
1455#ifdef lint
1456 task++; new_pset++; assign_threads++;
1457#endif /* lint */
1458 return(KERN_FAILURE);
1459}
1460
1461/*
1462 * task_assign_default:
1463 *
1464 * Version of task_assign to assign to default processor set.
1465 */
1466kern_return_t
1467task_assign_default(
1468 task_t task,
1469 boolean_t assign_threads)
1470{
1471 return (task_assign(task, &default_pset, assign_threads));
1472}
1473
1474/*
1475 * task_get_assignment
1476 *
1477 * Return name of processor set that task is assigned to.
1478 */
1479kern_return_t
1480task_get_assignment(
1481 task_t task,
1482 processor_set_t *pset)
1483{
1484 if (!task->active)
1485 return(KERN_FAILURE);
1486
1487 *pset = task->processor_set;
1488 pset_reference(*pset);
1489 return(KERN_SUCCESS);
1490}
1491
1492
1493/*
1494 * task_policy
1495 *
1496 * Set scheduling policy and parameters, both base and limit, for
1497 * the given task. Policy must be a policy which is enabled for the
1498 * processor set. Change contained threads if requested.
1499 */
1500kern_return_t
1501task_policy(
1502 task_t task,
1503 policy_t policy_id,
1504 policy_base_t base,
1505 mach_msg_type_number_t count,
1506 boolean_t set_limit,
1507 boolean_t change)
1508{
1509 return(KERN_FAILURE);
1510}
1511
1512/*
1513 * task_set_policy
1514 *
1515 * Set scheduling policy and parameters, both base and limit, for
1516 * the given task. Policy can be any policy implemented by the
1517 * processor set, whether enabled or not. Change contained threads
1518 * if requested.
1519 */
1520kern_return_t
1521task_set_policy(
1522 task_t task,
1523 processor_set_t pset,
1524 policy_t policy_id,
1525 policy_base_t base,
1526 mach_msg_type_number_t base_count,
1527 policy_limit_t limit,
1528 mach_msg_type_number_t limit_count,
1529 boolean_t change)
1530{
1531 return(KERN_FAILURE);
1532}
1533
1534/*
1535 * task_collect_scan:
1536 *
1537 * Attempt to free resources owned by tasks.
1538 */
1539
1540void
1541task_collect_scan(void)
1542{
1543 register task_t task, prev_task;
1544 processor_set_t pset = &default_pset;
1545
1c79356b
A
1546 pset_lock(pset);
1547 pset->ref_count++;
1548 task = (task_t) queue_first(&pset->tasks);
1549 while (!queue_end(&pset->tasks, (queue_entry_t) task)) {
9bccf70c
A
1550 task_lock(task);
1551 if (task->ref_count > 0) {
1c79356b 1552
9bccf70c
A
1553 task_reference_locked(task);
1554 task_unlock(task);
1c79356b 1555
9bccf70c
A
1556#if MACH_HOST
1557 /*
1558 * While we still have the pset locked, freeze the task in
1559 * this pset. That way, when we get back from collecting
1560 * it, we can dereference the pset_tasks chain for the task
1561 * and be assured that we are still in this chain.
1562 */
1563 task_freeze(task);
1564#endif
1565
1566 pset_unlock(pset);
1c79356b 1567
9bccf70c
A
1568 pmap_collect(task->map->pmap);
1569
1570 pset_lock(pset);
1571 prev_task = task;
1572 task = (task_t) queue_next(&task->pset_tasks);
1573
1574#if MACH_HOST
1575 task_unfreeze(prev_task);
1576#endif
1577
1578 task_deallocate(prev_task);
1579 } else {
1580 task_unlock(task);
1581 task = (task_t) queue_next(&task->pset_tasks);
1582 }
1c79356b 1583 }
9bccf70c 1584
1c79356b
A
1585 pset_unlock(pset);
1586
1587 pset_deallocate(pset);
1c79356b
A
1588}
1589
0b4e3aa0 1590/* Also disabled in vm/vm_pageout.c */
1c79356b
A
1591boolean_t task_collect_allowed = FALSE;
1592unsigned task_collect_last_tick = 0;
1593unsigned task_collect_max_rate = 0; /* in ticks */
1594
1595/*
1596 * consider_task_collect:
1597 *
1598 * Called by the pageout daemon when the system needs more free pages.
1599 */
1600
1601void
1602consider_task_collect(void)
1603{
1604 /*
1605 * By default, don't attempt task collection more frequently
1606 * than once per second.
1607 */
1608
1609 if (task_collect_max_rate == 0)
0b4e3aa0 1610 task_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1;
1c79356b
A
1611
1612 if (task_collect_allowed &&
1613 (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
1614 task_collect_last_tick = sched_tick;
1615 task_collect_scan();
1616 }
1617}
1618
1619kern_return_t
1620task_set_ras_pc(
1621 task_t task,
1622 vm_offset_t pc,
1623 vm_offset_t endpc)
1624{
1625#if FAST_TAS
1626 extern int fast_tas_debug;
1627
1628 if (fast_tas_debug) {
1629 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1630 task, pc, endpc);
1631 }
1632 task_lock(task);
1633 task->fast_tas_base = pc;
1634 task->fast_tas_end = endpc;
1635 task_unlock(task);
1636 return KERN_SUCCESS;
1637
1638#else /* FAST_TAS */
1639#ifdef lint
1640 task++;
1641 pc++;
1642 endpc++;
1643#endif /* lint */
1644
1645 return KERN_FAILURE;
1646
1647#endif /* FAST_TAS */
1648}
1649
1650void
1651task_synchronizer_destroy_all(task_t task)
1652{
1653 semaphore_t semaphore;
1654 lock_set_t lock_set;
1655
1656 /*
1657 * Destroy owned semaphores
1658 */
1659
1660 while (!queue_empty(&task->semaphore_list)) {
1661 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1662 (void) semaphore_destroy(task, semaphore);
1663 }
1664
1665 /*
1666 * Destroy owned lock sets
1667 */
1668
1669 while (!queue_empty(&task->lock_set_list)) {
1670 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1671 (void) lock_set_destroy(task, lock_set);
1672 }
1673}
1674
1c79356b
A
1675/*
1676 * task_set_port_space:
1677 *
1678 * Set port name space of task to specified size.
1679 */
1680
1681kern_return_t
1682task_set_port_space(
1683 task_t task,
1684 int table_entries)
1685{
1686 kern_return_t kr;
1687
1688 is_write_lock(task->itk_space);
1689 kr = ipc_entry_grow_table(task->itk_space, table_entries);
1690 if (kr == KERN_SUCCESS)
1691 is_write_unlock(task->itk_space);
1692 return kr;
1693}
1694
55e303ae
A
1695/*
1696 * Routine:
1697 * task_is_classic
1698 * Purpose:
1699 * Returns true if the task is a P_CLASSIC task.
1700 */
1701boolean_t
1702task_is_classic(
1703 task_t task)
1704{
1705 boolean_t result = FALSE;
1706
1707 if (task) {
1708 struct proc *p = get_bsdtask_info(task);
1709 result = proc_is_classic(p) ? TRUE : FALSE;
1710 }
1711 return result;
1712}
1713
1c79356b
A
1714/*
1715 * We need to export some functions to other components that
1716 * are currently implemented in macros within the osfmk
1717 * component. Just export them as functions of the same name.
1718 */
1719boolean_t is_kerneltask(task_t t)
1720{
1721 if (t == kernel_task)
55e303ae
A
1722 return (TRUE);
1723
1724 return (FALSE);
1c79356b
A
1725}
1726
1727#undef current_task
1728task_t current_task()
1729{
1730 return (current_task_fast());
1731}