]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 * File: kern/task.c
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
53 * David Black
54 *
55 * Task management primitives implementation.
56 */
57 /*
58 * Copyright (c) 1993 The University of Utah and
59 * the Computer Systems Laboratory (CSL). All rights reserved.
60 *
61 * Permission to use, copy, modify and distribute this software and its
62 * documentation is hereby granted, provided that both the copyright
63 * notice and this permission notice appear in all copies of the
64 * software, derivative works or modified versions, and any portions
65 * thereof, and that both notices appear in supporting documentation.
66 *
67 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
68 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
69 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
70 *
71 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
72 * improvements that they make and grant CSL redistribution rights.
73 *
74 */
75
76 #include <mach_kdb.h>
77 #include <mach_host.h>
78 #include <mach_prof.h>
79 #include <fast_tas.h>
80 #include <platforms.h>
81
82 #include <mach/mach_types.h>
83 #include <mach/boolean.h>
84 #include <mach/host_priv.h>
85 #include <mach/machine/vm_types.h>
86 #include <mach/vm_param.h>
87 #include <mach/semaphore.h>
88 #include <mach/task_info.h>
89 #include <mach/task_special_ports.h>
90
91 #include <ipc/ipc_types.h>
92 #include <ipc/ipc_space.h>
93 #include <ipc/ipc_entry.h>
94
95 #include <kern/kern_types.h>
96 #include <kern/mach_param.h>
97 #include <kern/misc_protos.h>
98 #include <kern/task.h>
99 #include <kern/thread.h>
100 #include <kern/zalloc.h>
101 #include <kern/kalloc.h>
102 #include <kern/processor.h>
103 #include <kern/sched_prim.h> /* for thread_wakeup */
104 #include <kern/ipc_tt.h>
105 #include <kern/ledger.h>
106 #include <kern/host.h>
107 #include <kern/clock.h>
108 #include <kern/timer.h>
109 #include <kern/profile.h>
110 #include <kern/assert.h>
111 #include <kern/sync_lock.h>
112
113 #include <vm/pmap.h>
114 #include <vm/vm_map.h>
115 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
116 #include <vm/vm_pageout.h>
117 #include <vm/vm_protos.h> /* for vm_map_remove_commpage64 */
118
119 #if MACH_KDB
120 #include <ddb/db_sym.h>
121 #endif /* MACH_KDB */
122
123 #ifdef __ppc__
124 #include <ppc/exception.h>
125 #include <ppc/hw_perfmon.h>
126 #endif
127
128 /*
129 * Exported interfaces
130 */
131
132 #include <mach/task_server.h>
133 #include <mach/mach_host_server.h>
134 #include <mach/host_security_server.h>
135 #include <mach/mach_port_server.h>
136
137 #include <vm/task_working_set.h>
138 #include <vm/vm_shared_memory_server.h>
139
140 task_t kernel_task;
141 zone_t task_zone;
142
143 /* Forwards */
144
145 void task_hold_locked(
146 task_t task);
147 void task_wait_locked(
148 task_t task);
149 void task_release_locked(
150 task_t task);
151 void task_free(
152 task_t task );
153 void task_synchronizer_destroy_all(
154 task_t task);
155
156 kern_return_t task_set_ledger(
157 task_t task,
158 ledger_t wired,
159 ledger_t paged);
160
161 void
162 task_backing_store_privileged(
163 task_t task)
164 {
165 task_lock(task);
166 task->priv_flags |= VM_BACKING_STORE_PRIV;
167 task_unlock(task);
168 return;
169 }
170
171 void
172 task_working_set_disable(task_t task)
173 {
174 struct tws_hash *ws;
175
176 task_lock(task);
177 ws = task->dynamic_working_set;
178 task->dynamic_working_set = NULL;
179 task_unlock(task);
180 if (ws) {
181 tws_hash_ws_flush(ws);
182 tws_hash_destroy(ws);
183 }
184 }
185
186 void
187 task_set_64bit(
188 task_t task,
189 boolean_t is64bit)
190 {
191 if(is64bit) {
192 /* LP64todo - no task working set for 64-bit */
193 task_set_64BitAddr(task);
194 task_working_set_disable(task);
195 task->map->max_offset = MACH_VM_MAX_ADDRESS;
196 } else {
197 /*
198 * Deallocate all memory previously allocated
199 * above the 32-bit address space, since it won't
200 * be accessible anymore.
201 */
202 /* LP64todo - make this clean */
203 #ifdef __ppc__
204 vm_map_remove_commpage64(task->map);
205 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
206 #endif
207 (void) vm_map_remove(task->map,
208 (vm_map_offset_t) VM_MAX_ADDRESS,
209 MACH_VM_MAX_ADDRESS,
210 VM_MAP_NO_FLAGS);
211 task_clear_64BitAddr(task);
212 task->map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS;
213 }
214 }
215
216 void
217 task_init(void)
218 {
219 task_zone = zinit(
220 sizeof(struct task),
221 TASK_MAX * sizeof(struct task),
222 TASK_CHUNK * sizeof(struct task),
223 "tasks");
224
225 /*
226 * Create the kernel task as the first task.
227 */
228 if (task_create_internal(TASK_NULL, FALSE, &kernel_task) != KERN_SUCCESS)
229 panic("task_init\n");
230
231 vm_map_deallocate(kernel_task->map);
232 kernel_task->map = kernel_map;
233 }
234
235 #if MACH_HOST
236
237 #if 0
238 static void
239 task_freeze(
240 task_t task)
241 {
242 task_lock(task);
243 /*
244 * If may_assign is false, task is already being assigned,
245 * wait for that to finish.
246 */
247 while (task->may_assign == FALSE) {
248 wait_result_t res;
249
250 task->assign_active = TRUE;
251 res = thread_sleep_mutex((event_t) &task->assign_active,
252 &task->lock, THREAD_UNINT);
253 assert(res == THREAD_AWAKENED);
254 }
255 task->may_assign = FALSE;
256 task_unlock(task);
257 return;
258 }
259 #else
260 #define thread_freeze(thread) assert(task->processor_set == &default_pset)
261 #endif
262
263 #if 0
264 static void
265 task_unfreeze(
266 task_t task)
267 {
268 task_lock(task);
269 assert(task->may_assign == FALSE);
270 task->may_assign = TRUE;
271 if (task->assign_active == TRUE) {
272 task->assign_active = FALSE;
273 thread_wakeup((event_t)&task->assign_active);
274 }
275 task_unlock(task);
276 return;
277 }
278 #else
279 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
280 #endif
281
282 #endif /* MACH_HOST */
283
284 /*
285 * Create a task running in the kernel address space. It may
286 * have its own map of size mem_size and may have ipc privileges.
287 */
288 kern_return_t
289 kernel_task_create(
290 __unused task_t parent_task,
291 __unused vm_offset_t map_base,
292 __unused vm_size_t map_size,
293 __unused task_t *child_task)
294 {
295 return (KERN_INVALID_ARGUMENT);
296 }
297
298 kern_return_t
299 task_create(
300 task_t parent_task,
301 __unused ledger_port_array_t ledger_ports,
302 __unused mach_msg_type_number_t num_ledger_ports,
303 boolean_t inherit_memory,
304 task_t *child_task) /* OUT */
305 {
306 if (parent_task == TASK_NULL)
307 return(KERN_INVALID_ARGUMENT);
308
309 return task_create_internal(
310 parent_task, inherit_memory, child_task);
311 }
312
313 kern_return_t
314 host_security_create_task_token(
315 host_security_t host_security,
316 task_t parent_task,
317 security_token_t sec_token,
318 audit_token_t audit_token,
319 host_priv_t host_priv,
320 __unused ledger_port_array_t ledger_ports,
321 __unused mach_msg_type_number_t num_ledger_ports,
322 boolean_t inherit_memory,
323 task_t *child_task) /* OUT */
324 {
325 kern_return_t result;
326
327 if (parent_task == TASK_NULL)
328 return(KERN_INVALID_ARGUMENT);
329
330 if (host_security == HOST_NULL)
331 return(KERN_INVALID_SECURITY);
332
333 result = task_create_internal(
334 parent_task, inherit_memory, child_task);
335
336 if (result != KERN_SUCCESS)
337 return(result);
338
339 result = host_security_set_task_token(host_security,
340 *child_task,
341 sec_token,
342 audit_token,
343 host_priv);
344
345 if (result != KERN_SUCCESS)
346 return(result);
347
348 return(result);
349 }
350
351 kern_return_t
352 task_create_internal(
353 task_t parent_task,
354 boolean_t inherit_memory,
355 task_t *child_task) /* OUT */
356 {
357 task_t new_task;
358 processor_set_t pset;
359
360 new_task = (task_t) zalloc(task_zone);
361
362 if (new_task == TASK_NULL)
363 return(KERN_RESOURCE_SHORTAGE);
364
365 /* one ref for just being alive; one for our caller */
366 new_task->ref_count = 2;
367
368 if (inherit_memory)
369 new_task->map = vm_map_fork(parent_task->map);
370 else
371 new_task->map = vm_map_create(pmap_create(0),
372 (vm_map_offset_t)(VM_MIN_ADDRESS),
373 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
374
375 mutex_init(&new_task->lock, 0);
376 queue_init(&new_task->threads);
377 new_task->suspend_count = 0;
378 new_task->thread_count = 0;
379 new_task->active_thread_count = 0;
380 new_task->user_stop_count = 0;
381 new_task->role = TASK_UNSPECIFIED;
382 new_task->active = TRUE;
383 new_task->user_data = 0;
384 new_task->faults = 0;
385 new_task->cow_faults = 0;
386 new_task->pageins = 0;
387 new_task->messages_sent = 0;
388 new_task->messages_received = 0;
389 new_task->syscalls_mach = 0;
390 new_task->priv_flags = 0;
391 new_task->syscalls_unix=0;
392 new_task->csw=0;
393 new_task->taskFeatures[0] = 0; /* Init task features */
394 new_task->taskFeatures[1] = 0; /* Init task features */
395 new_task->dynamic_working_set = 0;
396
397 task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT,
398 0, TWS_HASH_STYLE_DEFAULT);
399
400 #ifdef MACH_BSD
401 new_task->bsd_info = 0;
402 #endif /* MACH_BSD */
403
404 #ifdef __ppc__
405 if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */
406 #endif
407
408 queue_init(&new_task->semaphore_list);
409 queue_init(&new_task->lock_set_list);
410 new_task->semaphores_owned = 0;
411 new_task->lock_sets_owned = 0;
412
413 #if MACH_HOST
414 new_task->may_assign = TRUE;
415 new_task->assign_active = FALSE;
416 #endif /* MACH_HOST */
417
418 ipc_task_init(new_task, parent_task);
419
420 new_task->total_user_time = 0;
421 new_task->total_system_time = 0;
422
423 task_prof_init(new_task);
424
425 if (parent_task != TASK_NULL) {
426 #if MACH_HOST
427 /*
428 * Freeze the parent, so that parent_task->processor_set
429 * cannot change.
430 */
431 task_freeze(parent_task);
432 #endif /* MACH_HOST */
433 pset = parent_task->processor_set;
434 if (!pset->active)
435 pset = &default_pset;
436
437 new_task->sec_token = parent_task->sec_token;
438 new_task->audit_token = parent_task->audit_token;
439
440 shared_region_mapping_ref(parent_task->system_shared_region);
441 new_task->system_shared_region = parent_task->system_shared_region;
442
443 new_task->wired_ledger_port = ledger_copy(
444 convert_port_to_ledger(parent_task->wired_ledger_port));
445 new_task->paged_ledger_port = ledger_copy(
446 convert_port_to_ledger(parent_task->paged_ledger_port));
447 if(task_has_64BitAddr(parent_task))
448 task_set_64BitAddr(new_task);
449 }
450 else {
451 pset = &default_pset;
452
453 new_task->sec_token = KERNEL_SECURITY_TOKEN;
454 new_task->audit_token = KERNEL_AUDIT_TOKEN;
455 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
456 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
457 }
458
459 if (kernel_task == TASK_NULL) {
460 new_task->priority = BASEPRI_KERNEL;
461 new_task->max_priority = MAXPRI_KERNEL;
462 }
463 else {
464 new_task->priority = BASEPRI_DEFAULT;
465 new_task->max_priority = MAXPRI_USER;
466 }
467
468 pset_lock(pset);
469 pset_add_task(pset, new_task);
470 pset_unlock(pset);
471 #if MACH_HOST
472 if (parent_task != TASK_NULL)
473 task_unfreeze(parent_task);
474 #endif /* MACH_HOST */
475
476 if (vm_backing_store_low && parent_task != NULL)
477 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
478
479 ipc_task_enable(new_task);
480
481 *child_task = new_task;
482 return(KERN_SUCCESS);
483 }
484
485 /*
486 * task_deallocate:
487 *
488 * Drop a reference on a task.
489 */
490 void
491 task_deallocate(
492 task_t task)
493 {
494 processor_set_t pset;
495
496 if (task == TASK_NULL)
497 return;
498
499 if (task_deallocate_internal(task) > 0)
500 return;
501
502 pset = task->processor_set;
503 pset_deallocate(pset);
504
505 if(task->dynamic_working_set)
506 tws_hash_destroy(task->dynamic_working_set);
507
508 ipc_task_terminate(task);
509
510 vm_map_deallocate(task->map);
511 is_release(task->itk_space);
512
513 task_prof_deallocate(task);
514 zfree(task_zone, task);
515 }
516
517 /*
518 * task_terminate:
519 *
520 * Terminate the specified task. See comments on thread_terminate
521 * (kern/thread.c) about problems with terminating the "current task."
522 */
523
524 kern_return_t
525 task_terminate(
526 task_t task)
527 {
528 if (task == TASK_NULL)
529 return (KERN_INVALID_ARGUMENT);
530
531 if (task->bsd_info)
532 return (KERN_FAILURE);
533
534 return (task_terminate_internal(task));
535 }
536
537 kern_return_t
538 task_terminate_internal(
539 task_t task)
540 {
541 processor_set_t pset;
542 thread_t thread, self;
543 task_t self_task;
544 boolean_t interrupt_save;
545
546 assert(task != kernel_task);
547
548 self = current_thread();
549 self_task = self->task;
550
551 /*
552 * Get the task locked and make sure that we are not racing
553 * with someone else trying to terminate us.
554 */
555 if (task == self_task)
556 task_lock(task);
557 else
558 if (task < self_task) {
559 task_lock(task);
560 task_lock(self_task);
561 }
562 else {
563 task_lock(self_task);
564 task_lock(task);
565 }
566
567 if (!task->active || !self->active) {
568 /*
569 * Task or current act is already being terminated.
570 * Just return an error. If we are dying, this will
571 * just get us to our AST special handler and that
572 * will get us to finalize the termination of ourselves.
573 */
574 task_unlock(task);
575 if (self_task != task)
576 task_unlock(self_task);
577
578 return (KERN_FAILURE);
579 }
580
581 if (self_task != task)
582 task_unlock(self_task);
583
584 /*
585 * Make sure the current thread does not get aborted out of
586 * the waits inside these operations.
587 */
588 interrupt_save = thread_interrupt_level(THREAD_UNINT);
589
590 /*
591 * Indicate that we want all the threads to stop executing
592 * at user space by holding the task (we would have held
593 * each thread independently in thread_terminate_internal -
594 * but this way we may be more likely to already find it
595 * held there). Mark the task inactive, and prevent
596 * further task operations via the task port.
597 */
598 task_hold_locked(task);
599 task->active = FALSE;
600 ipc_task_disable(task);
601
602 /*
603 * Terminate each thread in the task.
604 */
605 queue_iterate(&task->threads, thread, thread_t, task_threads) {
606 thread_terminate_internal(thread);
607 }
608
609 /*
610 * Give the machine dependent code a chance
611 * to perform cleanup before ripping apart
612 * the task.
613 */
614 if (self_task == task)
615 machine_thread_terminate_self();
616
617 task_unlock(task);
618
619 /*
620 * Destroy all synchronizers owned by the task.
621 */
622 task_synchronizer_destroy_all(task);
623
624 /*
625 * Destroy the IPC space, leaving just a reference for it.
626 */
627 ipc_space_destroy(task->itk_space);
628
629 /* LP64todo - make this clean */
630 #ifdef __ppc__
631 vm_map_remove_commpage64(task->map);
632 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
633 #endif
634
635 /*
636 * If the current thread is a member of the task
637 * being terminated, then the last reference to
638 * the task will not be dropped until the thread
639 * is finally reaped. To avoid incurring the
640 * expense of removing the address space regions
641 * at reap time, we do it explictly here.
642 */
643 vm_map_remove(task->map, task->map->min_offset,
644 task->map->max_offset, VM_MAP_NO_FLAGS);
645
646 shared_region_mapping_dealloc(task->system_shared_region);
647
648 /*
649 * Flush working set here to avoid I/O in reaper thread
650 */
651 if (task->dynamic_working_set)
652 tws_hash_ws_flush(task->dynamic_working_set);
653
654 pset = task->processor_set;
655 pset_lock(pset);
656 pset_remove_task(pset,task);
657 pset_unlock(pset);
658
659 /*
660 * We no longer need to guard against being aborted, so restore
661 * the previous interruptible state.
662 */
663 thread_interrupt_level(interrupt_save);
664
665 #if __ppc__
666 perfmon_release_facility(task); // notify the perfmon facility
667 #endif
668
669 /*
670 * Get rid of the task active reference on itself.
671 */
672 task_deallocate(task);
673
674 return (KERN_SUCCESS);
675 }
676
677 /*
678 * task_halt:
679 *
680 * Shut the current task down (except for the current thread) in
681 * preparation for dramatic changes to the task (probably exec).
682 * We hold the task, terminate all other threads in the task and
683 * wait for them to terminate, clean up the portspace, and when
684 * all done, let the current thread go.
685 */
686 kern_return_t
687 task_halt(
688 task_t task)
689 {
690 thread_t thread, self;
691
692 assert(task != kernel_task);
693
694 self = current_thread();
695
696 if (task != self->task)
697 return (KERN_INVALID_ARGUMENT);
698
699 task_lock(task);
700
701 if (!task->active || !self->active) {
702 /*
703 * Task or current thread is already being terminated.
704 * Hurry up and return out of the current kernel context
705 * so that we run our AST special handler to terminate
706 * ourselves.
707 */
708 task_unlock(task);
709
710 return (KERN_FAILURE);
711 }
712
713 if (task->thread_count > 1) {
714 /*
715 * Mark all the threads to keep them from starting any more
716 * user-level execution. The thread_terminate_internal code
717 * would do this on a thread by thread basis anyway, but this
718 * gives us a better chance of not having to wait there.
719 */
720 task_hold_locked(task);
721
722 /*
723 * Terminate all the other threads in the task.
724 */
725 queue_iterate(&task->threads, thread, thread_t, task_threads) {
726 if (thread != self)
727 thread_terminate_internal(thread);
728 }
729
730 task_release_locked(task);
731 }
732
733 /*
734 * Give the machine dependent code a chance
735 * to perform cleanup before ripping apart
736 * the task.
737 */
738 machine_thread_terminate_self();
739
740 task_unlock(task);
741
742 /*
743 * Destroy all synchronizers owned by the task.
744 */
745 task_synchronizer_destroy_all(task);
746
747 /*
748 * Destroy the contents of the IPC space, leaving just
749 * a reference for it.
750 */
751 ipc_space_clean(task->itk_space);
752
753 /*
754 * Clean out the address space, as we are going to be
755 * getting a new one.
756 */
757 vm_map_remove(task->map, task->map->min_offset,
758 task->map->max_offset, VM_MAP_NO_FLAGS);
759
760 return (KERN_SUCCESS);
761 }
762
763 /*
764 * task_hold_locked:
765 *
766 * Suspend execution of the specified task.
767 * This is a recursive-style suspension of the task, a count of
768 * suspends is maintained.
769 *
770 * CONDITIONS: the task is locked and active.
771 */
772 void
773 task_hold_locked(
774 register task_t task)
775 {
776 register thread_t thread;
777
778 assert(task->active);
779
780 if (task->suspend_count++ > 0)
781 return;
782
783 /*
784 * Iterate through all the threads and hold them.
785 */
786 queue_iterate(&task->threads, thread, thread_t, task_threads) {
787 thread_mtx_lock(thread);
788 thread_hold(thread);
789 thread_mtx_unlock(thread);
790 }
791 }
792
793 /*
794 * task_hold:
795 *
796 * Same as the internal routine above, except that is must lock
797 * and verify that the task is active. This differs from task_suspend
798 * in that it places a kernel hold on the task rather than just a
799 * user-level hold. This keeps users from over resuming and setting
800 * it running out from under the kernel.
801 *
802 * CONDITIONS: the caller holds a reference on the task
803 */
804 kern_return_t
805 task_hold(
806 register task_t task)
807 {
808 if (task == TASK_NULL)
809 return (KERN_INVALID_ARGUMENT);
810
811 task_lock(task);
812
813 if (!task->active) {
814 task_unlock(task);
815
816 return (KERN_FAILURE);
817 }
818
819 task_hold_locked(task);
820 task_unlock(task);
821
822 return (KERN_SUCCESS);
823 }
824
825 /*
826 * task_wait_locked:
827 *
828 * Wait for all threads in task to stop.
829 *
830 * Conditions:
831 * Called with task locked, active, and held.
832 */
833 void
834 task_wait_locked(
835 register task_t task)
836 {
837 register thread_t thread, self;
838
839 assert(task->active);
840 assert(task->suspend_count > 0);
841
842 self = current_thread();
843
844 /*
845 * Iterate through all the threads and wait for them to
846 * stop. Do not wait for the current thread if it is within
847 * the task.
848 */
849 queue_iterate(&task->threads, thread, thread_t, task_threads) {
850 if (thread != self)
851 thread_wait(thread);
852 }
853 }
854
855 /*
856 * task_release_locked:
857 *
858 * Release a kernel hold on a task.
859 *
860 * CONDITIONS: the task is locked and active
861 */
862 void
863 task_release_locked(
864 register task_t task)
865 {
866 register thread_t thread;
867
868 assert(task->active);
869 assert(task->suspend_count > 0);
870
871 if (--task->suspend_count > 0)
872 return;
873
874 queue_iterate(&task->threads, thread, thread_t, task_threads) {
875 thread_mtx_lock(thread);
876 thread_release(thread);
877 thread_mtx_unlock(thread);
878 }
879 }
880
881 /*
882 * task_release:
883 *
884 * Same as the internal routine above, except that it must lock
885 * and verify that the task is active.
886 *
887 * CONDITIONS: The caller holds a reference to the task
888 */
889 kern_return_t
890 task_release(
891 task_t task)
892 {
893 if (task == TASK_NULL)
894 return (KERN_INVALID_ARGUMENT);
895
896 task_lock(task);
897
898 if (!task->active) {
899 task_unlock(task);
900
901 return (KERN_FAILURE);
902 }
903
904 task_release_locked(task);
905 task_unlock(task);
906
907 return (KERN_SUCCESS);
908 }
909
910 kern_return_t
911 task_threads(
912 task_t task,
913 thread_act_array_t *threads_out,
914 mach_msg_type_number_t *count)
915 {
916 mach_msg_type_number_t actual;
917 thread_t *threads;
918 thread_t thread;
919 vm_size_t size, size_needed;
920 void *addr;
921 unsigned int i, j;
922
923 if (task == TASK_NULL)
924 return (KERN_INVALID_ARGUMENT);
925
926 size = 0; addr = 0;
927
928 for (;;) {
929 task_lock(task);
930 if (!task->active) {
931 task_unlock(task);
932
933 if (size != 0)
934 kfree(addr, size);
935
936 return (KERN_FAILURE);
937 }
938
939 actual = task->thread_count;
940
941 /* do we have the memory we need? */
942 size_needed = actual * sizeof (mach_port_t);
943 if (size_needed <= size)
944 break;
945
946 /* unlock the task and allocate more memory */
947 task_unlock(task);
948
949 if (size != 0)
950 kfree(addr, size);
951
952 assert(size_needed > 0);
953 size = size_needed;
954
955 addr = kalloc(size);
956 if (addr == 0)
957 return (KERN_RESOURCE_SHORTAGE);
958 }
959
960 /* OK, have memory and the task is locked & active */
961 threads = (thread_t *)addr;
962
963 i = j = 0;
964
965 for (thread = (thread_t)queue_first(&task->threads); i < actual;
966 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
967 thread_reference_internal(thread);
968 threads[j++] = thread;
969 }
970
971 assert(queue_end(&task->threads, (queue_entry_t)thread));
972
973 actual = j;
974 size_needed = actual * sizeof (mach_port_t);
975
976 /* can unlock task now that we've got the thread refs */
977 task_unlock(task);
978
979 if (actual == 0) {
980 /* no threads, so return null pointer and deallocate memory */
981
982 *threads_out = 0;
983 *count = 0;
984
985 if (size != 0)
986 kfree(addr, size);
987 }
988 else {
989 /* if we allocated too much, must copy */
990
991 if (size_needed < size) {
992 void *newaddr;
993
994 newaddr = kalloc(size_needed);
995 if (newaddr == 0) {
996 for (i = 0; i < actual; ++i)
997 thread_deallocate(threads[i]);
998 kfree(addr, size);
999 return (KERN_RESOURCE_SHORTAGE);
1000 }
1001
1002 bcopy(addr, newaddr, size_needed);
1003 kfree(addr, size);
1004 threads = (thread_t *)newaddr;
1005 }
1006
1007 *threads_out = threads;
1008 *count = actual;
1009
1010 /* do the conversion that Mig should handle */
1011
1012 for (i = 0; i < actual; ++i)
1013 ((ipc_port_t *) threads)[i] = convert_thread_to_port(threads[i]);
1014 }
1015
1016 return (KERN_SUCCESS);
1017 }
1018
1019 /*
1020 * task_suspend:
1021 *
1022 * Implement a user-level suspension on a task.
1023 *
1024 * Conditions:
1025 * The caller holds a reference to the task
1026 */
1027 kern_return_t
1028 task_suspend(
1029 register task_t task)
1030 {
1031 if (task == TASK_NULL || task == kernel_task)
1032 return (KERN_INVALID_ARGUMENT);
1033
1034 task_lock(task);
1035
1036 if (!task->active) {
1037 task_unlock(task);
1038
1039 return (KERN_FAILURE);
1040 }
1041
1042 if (task->user_stop_count++ > 0) {
1043 /*
1044 * If the stop count was positive, the task is
1045 * already stopped and we can exit.
1046 */
1047 task_unlock(task);
1048
1049 return (KERN_SUCCESS);
1050 }
1051
1052 /*
1053 * Put a kernel-level hold on the threads in the task (all
1054 * user-level task suspensions added together represent a
1055 * single kernel-level hold). We then wait for the threads
1056 * to stop executing user code.
1057 */
1058 task_hold_locked(task);
1059 task_wait_locked(task);
1060
1061 task_unlock(task);
1062
1063 return (KERN_SUCCESS);
1064 }
1065
1066 /*
1067 * task_resume:
1068 * Release a kernel hold on a task.
1069 *
1070 * Conditions:
1071 * The caller holds a reference to the task
1072 */
1073 kern_return_t
1074 task_resume(
1075 register task_t task)
1076 {
1077 register boolean_t release = FALSE;
1078
1079 if (task == TASK_NULL || task == kernel_task)
1080 return (KERN_INVALID_ARGUMENT);
1081
1082 task_lock(task);
1083
1084 if (!task->active) {
1085 task_unlock(task);
1086
1087 return (KERN_FAILURE);
1088 }
1089
1090 if (task->user_stop_count > 0) {
1091 if (--task->user_stop_count == 0)
1092 release = TRUE;
1093 }
1094 else {
1095 task_unlock(task);
1096
1097 return (KERN_FAILURE);
1098 }
1099
1100 /*
1101 * Release the task if necessary.
1102 */
1103 if (release)
1104 task_release_locked(task);
1105
1106 task_unlock(task);
1107
1108 return (KERN_SUCCESS);
1109 }
1110
1111 kern_return_t
1112 host_security_set_task_token(
1113 host_security_t host_security,
1114 task_t task,
1115 security_token_t sec_token,
1116 audit_token_t audit_token,
1117 host_priv_t host_priv)
1118 {
1119 ipc_port_t host_port;
1120 kern_return_t kr;
1121
1122 if (task == TASK_NULL)
1123 return(KERN_INVALID_ARGUMENT);
1124
1125 if (host_security == HOST_NULL)
1126 return(KERN_INVALID_SECURITY);
1127
1128 task_lock(task);
1129 task->sec_token = sec_token;
1130 task->audit_token = audit_token;
1131 task_unlock(task);
1132
1133 if (host_priv != HOST_PRIV_NULL) {
1134 kr = host_get_host_priv_port(host_priv, &host_port);
1135 } else {
1136 kr = host_get_host_port(host_priv_self(), &host_port);
1137 }
1138 assert(kr == KERN_SUCCESS);
1139 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1140 return(kr);
1141 }
1142
1143 /*
1144 * Utility routine to set a ledger
1145 */
1146 kern_return_t
1147 task_set_ledger(
1148 task_t task,
1149 ledger_t wired,
1150 ledger_t paged)
1151 {
1152 if (task == TASK_NULL)
1153 return(KERN_INVALID_ARGUMENT);
1154
1155 task_lock(task);
1156 if (wired) {
1157 ipc_port_release_send(task->wired_ledger_port);
1158 task->wired_ledger_port = ledger_copy(wired);
1159 }
1160 if (paged) {
1161 ipc_port_release_send(task->paged_ledger_port);
1162 task->paged_ledger_port = ledger_copy(paged);
1163 }
1164 task_unlock(task);
1165
1166 return(KERN_SUCCESS);
1167 }
1168
1169 /*
1170 * This routine was added, pretty much exclusively, for registering the
1171 * RPC glue vector for in-kernel short circuited tasks. Rather than
1172 * removing it completely, I have only disabled that feature (which was
1173 * the only feature at the time). It just appears that we are going to
1174 * want to add some user data to tasks in the future (i.e. bsd info,
1175 * task names, etc...), so I left it in the formal task interface.
1176 */
1177 kern_return_t
1178 task_set_info(
1179 task_t task,
1180 task_flavor_t flavor,
1181 __unused task_info_t task_info_in, /* pointer to IN array */
1182 __unused mach_msg_type_number_t task_info_count)
1183 {
1184 if (task == TASK_NULL)
1185 return(KERN_INVALID_ARGUMENT);
1186
1187 switch (flavor) {
1188 default:
1189 return (KERN_INVALID_ARGUMENT);
1190 }
1191 return (KERN_SUCCESS);
1192 }
1193
1194 kern_return_t
1195 task_info(
1196 task_t task,
1197 task_flavor_t flavor,
1198 task_info_t task_info_out,
1199 mach_msg_type_number_t *task_info_count)
1200 {
1201 if (task == TASK_NULL)
1202 return (KERN_INVALID_ARGUMENT);
1203
1204 switch (flavor) {
1205
1206 case TASK_BASIC_INFO_32:
1207 {
1208 task_basic_info_32_t basic_info;
1209 vm_map_t map;
1210
1211 if (*task_info_count < TASK_BASIC_INFO_32_COUNT)
1212 return (KERN_INVALID_ARGUMENT);
1213
1214 basic_info = (task_basic_info_32_t)task_info_out;
1215
1216 map = (task == kernel_task)? kernel_map: task->map;
1217 basic_info->virtual_size = CAST_DOWN(vm_offset_t,map->size);
1218 basic_info->resident_size = pmap_resident_count(map->pmap)
1219 * PAGE_SIZE;
1220
1221 task_lock(task);
1222 basic_info->policy = ((task != kernel_task)?
1223 POLICY_TIMESHARE: POLICY_RR);
1224 basic_info->suspend_count = task->user_stop_count;
1225
1226 absolutetime_to_microtime(
1227 task->total_user_time,
1228 &basic_info->user_time.seconds,
1229 &basic_info->user_time.microseconds);
1230 absolutetime_to_microtime(
1231 task->total_system_time,
1232 &basic_info->system_time.seconds,
1233 &basic_info->system_time.microseconds);
1234 task_unlock(task);
1235
1236 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1237 break;
1238 }
1239
1240 case TASK_BASIC_INFO_64:
1241 {
1242 task_basic_info_64_t basic_info;
1243 vm_map_t map;
1244
1245 if (*task_info_count < TASK_BASIC_INFO_64_COUNT)
1246 return (KERN_INVALID_ARGUMENT);
1247
1248 basic_info = (task_basic_info_64_t)task_info_out;
1249
1250 map = (task == kernel_task)? kernel_map: task->map;
1251 basic_info->virtual_size = map->size;
1252 basic_info->resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)
1253 * PAGE_SIZE);
1254
1255 task_lock(task);
1256 basic_info->policy = ((task != kernel_task)?
1257 POLICY_TIMESHARE: POLICY_RR);
1258 basic_info->suspend_count = task->user_stop_count;
1259
1260 absolutetime_to_microtime(
1261 task->total_user_time,
1262 &basic_info->user_time.seconds,
1263 &basic_info->user_time.microseconds);
1264 absolutetime_to_microtime(
1265 task->total_system_time,
1266 &basic_info->system_time.seconds,
1267 &basic_info->system_time.microseconds);
1268 task_unlock(task);
1269
1270 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1271 break;
1272 }
1273
1274 case TASK_THREAD_TIMES_INFO:
1275 {
1276 register task_thread_times_info_t times_info;
1277 register thread_t thread;
1278
1279 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT)
1280 return (KERN_INVALID_ARGUMENT);
1281
1282 times_info = (task_thread_times_info_t) task_info_out;
1283 times_info->user_time.seconds = 0;
1284 times_info->user_time.microseconds = 0;
1285 times_info->system_time.seconds = 0;
1286 times_info->system_time.microseconds = 0;
1287
1288 task_lock(task);
1289
1290 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1291 time_value_t user_time, system_time;
1292
1293 thread_read_times(thread, &user_time, &system_time);
1294
1295 time_value_add(&times_info->user_time, &user_time);
1296 time_value_add(&times_info->system_time, &system_time);
1297 }
1298
1299 task_unlock(task);
1300
1301 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1302 break;
1303 }
1304
1305 case TASK_ABSOLUTETIME_INFO:
1306 {
1307 task_absolutetime_info_t info;
1308 register thread_t thread;
1309
1310 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT)
1311 return (KERN_INVALID_ARGUMENT);
1312
1313 info = (task_absolutetime_info_t)task_info_out;
1314 info->threads_user = info->threads_system = 0;
1315
1316 task_lock(task);
1317
1318 info->total_user = task->total_user_time;
1319 info->total_system = task->total_system_time;
1320
1321 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1322 uint64_t tval;
1323
1324 tval = timer_grab(&thread->user_timer);
1325 info->threads_user += tval;
1326 info->total_user += tval;
1327
1328 tval = timer_grab(&thread->system_timer);
1329 info->threads_system += tval;
1330 info->total_system += tval;
1331 }
1332
1333 task_unlock(task);
1334
1335 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1336 break;
1337 }
1338
1339 /* OBSOLETE */
1340 case TASK_SCHED_FIFO_INFO:
1341 {
1342
1343 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1344 return (KERN_INVALID_ARGUMENT);
1345
1346 return (KERN_INVALID_POLICY);
1347 }
1348
1349 /* OBSOLETE */
1350 case TASK_SCHED_RR_INFO:
1351 {
1352 register policy_rr_base_t rr_base;
1353
1354 if (*task_info_count < POLICY_RR_BASE_COUNT)
1355 return (KERN_INVALID_ARGUMENT);
1356
1357 rr_base = (policy_rr_base_t) task_info_out;
1358
1359 task_lock(task);
1360 if (task != kernel_task) {
1361 task_unlock(task);
1362 return (KERN_INVALID_POLICY);
1363 }
1364
1365 rr_base->base_priority = task->priority;
1366 task_unlock(task);
1367
1368 rr_base->quantum = std_quantum_us / 1000;
1369
1370 *task_info_count = POLICY_RR_BASE_COUNT;
1371 break;
1372 }
1373
1374 /* OBSOLETE */
1375 case TASK_SCHED_TIMESHARE_INFO:
1376 {
1377 register policy_timeshare_base_t ts_base;
1378
1379 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1380 return (KERN_INVALID_ARGUMENT);
1381
1382 ts_base = (policy_timeshare_base_t) task_info_out;
1383
1384 task_lock(task);
1385 if (task == kernel_task) {
1386 task_unlock(task);
1387 return (KERN_INVALID_POLICY);
1388 }
1389
1390 ts_base->base_priority = task->priority;
1391 task_unlock(task);
1392
1393 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1394 break;
1395 }
1396
1397 case TASK_SECURITY_TOKEN:
1398 {
1399 register security_token_t *sec_token_p;
1400
1401 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT)
1402 return (KERN_INVALID_ARGUMENT);
1403
1404 sec_token_p = (security_token_t *) task_info_out;
1405
1406 task_lock(task);
1407 *sec_token_p = task->sec_token;
1408 task_unlock(task);
1409
1410 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1411 break;
1412 }
1413
1414 case TASK_AUDIT_TOKEN:
1415 {
1416 register audit_token_t *audit_token_p;
1417
1418 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT)
1419 return (KERN_INVALID_ARGUMENT);
1420
1421 audit_token_p = (audit_token_t *) task_info_out;
1422
1423 task_lock(task);
1424 *audit_token_p = task->audit_token;
1425 task_unlock(task);
1426
1427 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1428 break;
1429 }
1430
1431 case TASK_SCHED_INFO:
1432 return (KERN_INVALID_ARGUMENT);
1433
1434 case TASK_EVENTS_INFO:
1435 {
1436 register task_events_info_t events_info;
1437
1438 if (*task_info_count < TASK_EVENTS_INFO_COUNT)
1439 return (KERN_INVALID_ARGUMENT);
1440
1441 events_info = (task_events_info_t) task_info_out;
1442
1443 task_lock(task);
1444 events_info->faults = task->faults;
1445 events_info->pageins = task->pageins;
1446 events_info->cow_faults = task->cow_faults;
1447 events_info->messages_sent = task->messages_sent;
1448 events_info->messages_received = task->messages_received;
1449 events_info->syscalls_mach = task->syscalls_mach;
1450 events_info->syscalls_unix = task->syscalls_unix;
1451 events_info->csw = task->csw;
1452 task_unlock(task);
1453
1454 *task_info_count = TASK_EVENTS_INFO_COUNT;
1455 break;
1456 }
1457
1458 default:
1459 return (KERN_INVALID_ARGUMENT);
1460 }
1461
1462 return (KERN_SUCCESS);
1463 }
1464
1465 /*
1466 * task_assign:
1467 *
1468 * Change the assigned processor set for the task
1469 */
1470 kern_return_t
1471 task_assign(
1472 __unused task_t task,
1473 __unused processor_set_t new_pset,
1474 __unused boolean_t assign_threads)
1475 {
1476 return(KERN_FAILURE);
1477 }
1478
1479 /*
1480 * task_assign_default:
1481 *
1482 * Version of task_assign to assign to default processor set.
1483 */
1484 kern_return_t
1485 task_assign_default(
1486 task_t task,
1487 boolean_t assign_threads)
1488 {
1489 return (task_assign(task, &default_pset, assign_threads));
1490 }
1491
1492 /*
1493 * task_get_assignment
1494 *
1495 * Return name of processor set that task is assigned to.
1496 */
1497 kern_return_t
1498 task_get_assignment(
1499 task_t task,
1500 processor_set_t *pset)
1501 {
1502 if (!task->active)
1503 return(KERN_FAILURE);
1504
1505 *pset = task->processor_set;
1506 pset_reference(*pset);
1507 return(KERN_SUCCESS);
1508 }
1509
1510
1511 /*
1512 * task_policy
1513 *
1514 * Set scheduling policy and parameters, both base and limit, for
1515 * the given task. Policy must be a policy which is enabled for the
1516 * processor set. Change contained threads if requested.
1517 */
1518 kern_return_t
1519 task_policy(
1520 __unused task_t task,
1521 __unused policy_t policy_id,
1522 __unused policy_base_t base,
1523 __unused mach_msg_type_number_t count,
1524 __unused boolean_t set_limit,
1525 __unused boolean_t change)
1526 {
1527 return(KERN_FAILURE);
1528 }
1529
1530 /*
1531 * task_set_policy
1532 *
1533 * Set scheduling policy and parameters, both base and limit, for
1534 * the given task. Policy can be any policy implemented by the
1535 * processor set, whether enabled or not. Change contained threads
1536 * if requested.
1537 */
1538 kern_return_t
1539 task_set_policy(
1540 __unused task_t task,
1541 __unused processor_set_t pset,
1542 __unused policy_t policy_id,
1543 __unused policy_base_t base,
1544 __unused mach_msg_type_number_t base_count,
1545 __unused policy_limit_t limit,
1546 __unused mach_msg_type_number_t limit_count,
1547 __unused boolean_t change)
1548 {
1549 return(KERN_FAILURE);
1550 }
1551
1552 #if FAST_TAS
1553 kern_return_t
1554 task_set_ras_pc(
1555 task_t task,
1556 vm_offset_t pc,
1557 vm_offset_t endpc)
1558 {
1559 extern int fast_tas_debug;
1560
1561 if (fast_tas_debug) {
1562 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1563 task, pc, endpc);
1564 }
1565 task_lock(task);
1566 task->fast_tas_base = pc;
1567 task->fast_tas_end = endpc;
1568 task_unlock(task);
1569 return KERN_SUCCESS;
1570 }
1571 #else /* FAST_TAS */
1572 kern_return_t
1573 task_set_ras_pc(
1574 __unused task_t task,
1575 __unused vm_offset_t pc,
1576 __unused vm_offset_t endpc)
1577 {
1578 return KERN_FAILURE;
1579 }
1580 #endif /* FAST_TAS */
1581
1582 void
1583 task_synchronizer_destroy_all(task_t task)
1584 {
1585 semaphore_t semaphore;
1586 lock_set_t lock_set;
1587
1588 /*
1589 * Destroy owned semaphores
1590 */
1591
1592 while (!queue_empty(&task->semaphore_list)) {
1593 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1594 (void) semaphore_destroy(task, semaphore);
1595 }
1596
1597 /*
1598 * Destroy owned lock sets
1599 */
1600
1601 while (!queue_empty(&task->lock_set_list)) {
1602 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1603 (void) lock_set_destroy(task, lock_set);
1604 }
1605 }
1606
1607 /*
1608 * We need to export some functions to other components that
1609 * are currently implemented in macros within the osfmk
1610 * component. Just export them as functions of the same name.
1611 */
1612 boolean_t is_kerneltask(task_t t)
1613 {
1614 if (t == kernel_task)
1615 return (TRUE);
1616
1617 return (FALSE);
1618 }
1619
1620 #undef current_task
1621 task_t current_task(void);
1622 task_t current_task(void)
1623 {
1624 return (current_task_fast());
1625 }
1626
1627 #undef task_reference
1628 void task_reference(task_t task);
1629 void
1630 task_reference(
1631 task_t task)
1632 {
1633 if (task != TASK_NULL)
1634 task_reference_internal(task);
1635 }