]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81
82 #include <mach_kdb.h>
83 #include <mach_host.h>
84 #include <mach_prof.h>
85 #include <fast_tas.h>
86 #include <platforms.h>
87
88 #include <mach/mach_types.h>
89 #include <mach/boolean.h>
90 #include <mach/host_priv.h>
91 #include <mach/machine/vm_types.h>
92 #include <mach/vm_param.h>
93 #include <mach/semaphore.h>
94 #include <mach/task_info.h>
95 #include <mach/task_special_ports.h>
96
97 #include <ipc/ipc_types.h>
98 #include <ipc/ipc_space.h>
99 #include <ipc/ipc_entry.h>
100
101 #include <kern/kern_types.h>
102 #include <kern/mach_param.h>
103 #include <kern/misc_protos.h>
104 #include <kern/task.h>
105 #include <kern/thread.h>
106 #include <kern/zalloc.h>
107 #include <kern/kalloc.h>
108 #include <kern/processor.h>
109 #include <kern/sched_prim.h> /* for thread_wakeup */
110 #include <kern/ipc_tt.h>
111 #include <kern/ledger.h>
112 #include <kern/host.h>
113 #include <kern/clock.h>
114 #include <kern/timer.h>
115 #include <kern/profile.h>
116 #include <kern/assert.h>
117 #include <kern/sync_lock.h>
118
119 #include <vm/pmap.h>
120 #include <vm/vm_map.h>
121 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
122 #include <vm/vm_pageout.h>
123 #include <vm/vm_protos.h> /* for vm_map_remove_commpage64 */
124
125 #if MACH_KDB
126 #include <ddb/db_sym.h>
127 #endif /* MACH_KDB */
128
129 #ifdef __ppc__
130 #include <ppc/exception.h>
131 #include <ppc/hw_perfmon.h>
132 #endif
133
134 /*
135 * Exported interfaces
136 */
137
138 #include <mach/task_server.h>
139 #include <mach/mach_host_server.h>
140 #include <mach/host_security_server.h>
141 #include <mach/mach_port_server.h>
142
143 #include <vm/task_working_set.h>
144 #include <vm/vm_shared_memory_server.h>
145
146 task_t kernel_task;
147 zone_t task_zone;
148
149 /* Forwards */
150
151 void task_hold_locked(
152 task_t task);
153 void task_wait_locked(
154 task_t task);
155 void task_release_locked(
156 task_t task);
157 void task_free(
158 task_t task );
159 void task_synchronizer_destroy_all(
160 task_t task);
161
162 kern_return_t task_set_ledger(
163 task_t task,
164 ledger_t wired,
165 ledger_t paged);
166
167 void
168 task_backing_store_privileged(
169 task_t task)
170 {
171 task_lock(task);
172 task->priv_flags |= VM_BACKING_STORE_PRIV;
173 task_unlock(task);
174 return;
175 }
176
177 void
178 task_working_set_disable(task_t task)
179 {
180 struct tws_hash *ws;
181
182 task_lock(task);
183 ws = task->dynamic_working_set;
184 task->dynamic_working_set = NULL;
185 task_unlock(task);
186 if (ws) {
187 tws_hash_ws_flush(ws);
188 tws_hash_destroy(ws);
189 }
190 }
191
192 void
193 task_set_64bit(
194 task_t task,
195 boolean_t is64bit)
196 {
197 if(is64bit) {
198 /* LP64todo - no task working set for 64-bit */
199 task_set_64BitAddr(task);
200 task_working_set_disable(task);
201 task->map->max_offset = MACH_VM_MAX_ADDRESS;
202 } else {
203 /*
204 * Deallocate all memory previously allocated
205 * above the 32-bit address space, since it won't
206 * be accessible anymore.
207 */
208 /* LP64todo - make this clean */
209 #ifdef __ppc__
210 vm_map_remove_commpage64(task->map);
211 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
212 #endif
213 (void) vm_map_remove(task->map,
214 (vm_map_offset_t) VM_MAX_ADDRESS,
215 MACH_VM_MAX_ADDRESS,
216 VM_MAP_NO_FLAGS);
217 task_clear_64BitAddr(task);
218 task->map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS;
219 }
220 }
221
222 void
223 task_init(void)
224 {
225 task_zone = zinit(
226 sizeof(struct task),
227 TASK_MAX * sizeof(struct task),
228 TASK_CHUNK * sizeof(struct task),
229 "tasks");
230
231 /*
232 * Create the kernel task as the first task.
233 */
234 if (task_create_internal(TASK_NULL, FALSE, &kernel_task) != KERN_SUCCESS)
235 panic("task_init\n");
236
237 vm_map_deallocate(kernel_task->map);
238 kernel_task->map = kernel_map;
239 }
240
241 #if MACH_HOST
242
243 #if 0
244 static void
245 task_freeze(
246 task_t task)
247 {
248 task_lock(task);
249 /*
250 * If may_assign is false, task is already being assigned,
251 * wait for that to finish.
252 */
253 while (task->may_assign == FALSE) {
254 wait_result_t res;
255
256 task->assign_active = TRUE;
257 res = thread_sleep_mutex((event_t) &task->assign_active,
258 &task->lock, THREAD_UNINT);
259 assert(res == THREAD_AWAKENED);
260 }
261 task->may_assign = FALSE;
262 task_unlock(task);
263 return;
264 }
265 #else
266 #define thread_freeze(thread) assert(task->processor_set == &default_pset)
267 #endif
268
269 #if 0
270 static void
271 task_unfreeze(
272 task_t task)
273 {
274 task_lock(task);
275 assert(task->may_assign == FALSE);
276 task->may_assign = TRUE;
277 if (task->assign_active == TRUE) {
278 task->assign_active = FALSE;
279 thread_wakeup((event_t)&task->assign_active);
280 }
281 task_unlock(task);
282 return;
283 }
284 #else
285 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
286 #endif
287
288 #endif /* MACH_HOST */
289
290 /*
291 * Create a task running in the kernel address space. It may
292 * have its own map of size mem_size and may have ipc privileges.
293 */
294 kern_return_t
295 kernel_task_create(
296 __unused task_t parent_task,
297 __unused vm_offset_t map_base,
298 __unused vm_size_t map_size,
299 __unused task_t *child_task)
300 {
301 return (KERN_INVALID_ARGUMENT);
302 }
303
304 kern_return_t
305 task_create(
306 task_t parent_task,
307 __unused ledger_port_array_t ledger_ports,
308 __unused mach_msg_type_number_t num_ledger_ports,
309 boolean_t inherit_memory,
310 task_t *child_task) /* OUT */
311 {
312 if (parent_task == TASK_NULL)
313 return(KERN_INVALID_ARGUMENT);
314
315 return task_create_internal(
316 parent_task, inherit_memory, child_task);
317 }
318
319 kern_return_t
320 host_security_create_task_token(
321 host_security_t host_security,
322 task_t parent_task,
323 security_token_t sec_token,
324 audit_token_t audit_token,
325 host_priv_t host_priv,
326 __unused ledger_port_array_t ledger_ports,
327 __unused mach_msg_type_number_t num_ledger_ports,
328 boolean_t inherit_memory,
329 task_t *child_task) /* OUT */
330 {
331 kern_return_t result;
332
333 if (parent_task == TASK_NULL)
334 return(KERN_INVALID_ARGUMENT);
335
336 if (host_security == HOST_NULL)
337 return(KERN_INVALID_SECURITY);
338
339 result = task_create_internal(
340 parent_task, inherit_memory, child_task);
341
342 if (result != KERN_SUCCESS)
343 return(result);
344
345 result = host_security_set_task_token(host_security,
346 *child_task,
347 sec_token,
348 audit_token,
349 host_priv);
350
351 if (result != KERN_SUCCESS)
352 return(result);
353
354 return(result);
355 }
356
357 kern_return_t
358 task_create_internal(
359 task_t parent_task,
360 boolean_t inherit_memory,
361 task_t *child_task) /* OUT */
362 {
363 task_t new_task;
364 processor_set_t pset;
365
366 new_task = (task_t) zalloc(task_zone);
367
368 if (new_task == TASK_NULL)
369 return(KERN_RESOURCE_SHORTAGE);
370
371 /* one ref for just being alive; one for our caller */
372 new_task->ref_count = 2;
373
374 if (inherit_memory)
375 new_task->map = vm_map_fork(parent_task->map);
376 else
377 new_task->map = vm_map_create(pmap_create(0),
378 (vm_map_offset_t)(VM_MIN_ADDRESS),
379 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
380
381 mutex_init(&new_task->lock, 0);
382 queue_init(&new_task->threads);
383 new_task->suspend_count = 0;
384 new_task->thread_count = 0;
385 new_task->active_thread_count = 0;
386 new_task->user_stop_count = 0;
387 new_task->role = TASK_UNSPECIFIED;
388 new_task->active = TRUE;
389 new_task->user_data = 0;
390 new_task->faults = 0;
391 new_task->cow_faults = 0;
392 new_task->pageins = 0;
393 new_task->messages_sent = 0;
394 new_task->messages_received = 0;
395 new_task->syscalls_mach = 0;
396 new_task->priv_flags = 0;
397 new_task->syscalls_unix=0;
398 new_task->csw=0;
399 new_task->taskFeatures[0] = 0; /* Init task features */
400 new_task->taskFeatures[1] = 0; /* Init task features */
401 new_task->dynamic_working_set = 0;
402
403 task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT,
404 0, TWS_HASH_STYLE_DEFAULT);
405
406 #ifdef MACH_BSD
407 new_task->bsd_info = 0;
408 #endif /* MACH_BSD */
409
410 #ifdef __ppc__
411 if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */
412 #endif
413
414 queue_init(&new_task->semaphore_list);
415 queue_init(&new_task->lock_set_list);
416 new_task->semaphores_owned = 0;
417 new_task->lock_sets_owned = 0;
418
419 #if MACH_HOST
420 new_task->may_assign = TRUE;
421 new_task->assign_active = FALSE;
422 #endif /* MACH_HOST */
423
424 ipc_task_init(new_task, parent_task);
425
426 new_task->total_user_time = 0;
427 new_task->total_system_time = 0;
428
429 task_prof_init(new_task);
430
431 if (parent_task != TASK_NULL) {
432 #if MACH_HOST
433 /*
434 * Freeze the parent, so that parent_task->processor_set
435 * cannot change.
436 */
437 task_freeze(parent_task);
438 #endif /* MACH_HOST */
439 pset = parent_task->processor_set;
440 if (!pset->active)
441 pset = &default_pset;
442
443 new_task->sec_token = parent_task->sec_token;
444 new_task->audit_token = parent_task->audit_token;
445
446 shared_region_mapping_ref(parent_task->system_shared_region);
447 new_task->system_shared_region = parent_task->system_shared_region;
448
449 new_task->wired_ledger_port = ledger_copy(
450 convert_port_to_ledger(parent_task->wired_ledger_port));
451 new_task->paged_ledger_port = ledger_copy(
452 convert_port_to_ledger(parent_task->paged_ledger_port));
453 if(task_has_64BitAddr(parent_task))
454 task_set_64BitAddr(new_task);
455 }
456 else {
457 pset = &default_pset;
458
459 new_task->sec_token = KERNEL_SECURITY_TOKEN;
460 new_task->audit_token = KERNEL_AUDIT_TOKEN;
461 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
462 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
463 }
464
465 if (kernel_task == TASK_NULL) {
466 new_task->priority = BASEPRI_KERNEL;
467 new_task->max_priority = MAXPRI_KERNEL;
468 }
469 else {
470 new_task->priority = BASEPRI_DEFAULT;
471 new_task->max_priority = MAXPRI_USER;
472 }
473
474 pset_lock(pset);
475 pset_add_task(pset, new_task);
476 pset_unlock(pset);
477 #if MACH_HOST
478 if (parent_task != TASK_NULL)
479 task_unfreeze(parent_task);
480 #endif /* MACH_HOST */
481
482 if (vm_backing_store_low && parent_task != NULL)
483 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
484
485 ipc_task_enable(new_task);
486
487 *child_task = new_task;
488 return(KERN_SUCCESS);
489 }
490
491 /*
492 * task_deallocate:
493 *
494 * Drop a reference on a task.
495 */
496 void
497 task_deallocate(
498 task_t task)
499 {
500 processor_set_t pset;
501
502 if (task == TASK_NULL)
503 return;
504
505 if (task_deallocate_internal(task) > 0)
506 return;
507
508 pset = task->processor_set;
509 pset_deallocate(pset);
510
511 if(task->dynamic_working_set)
512 tws_hash_destroy(task->dynamic_working_set);
513
514 ipc_task_terminate(task);
515
516 vm_map_deallocate(task->map);
517 is_release(task->itk_space);
518
519 task_prof_deallocate(task);
520 zfree(task_zone, task);
521 }
522
523 /*
524 * task_terminate:
525 *
526 * Terminate the specified task. See comments on thread_terminate
527 * (kern/thread.c) about problems with terminating the "current task."
528 */
529
530 kern_return_t
531 task_terminate(
532 task_t task)
533 {
534 if (task == TASK_NULL)
535 return (KERN_INVALID_ARGUMENT);
536
537 if (task->bsd_info)
538 return (KERN_FAILURE);
539
540 return (task_terminate_internal(task));
541 }
542
543 kern_return_t
544 task_terminate_internal(
545 task_t task)
546 {
547 processor_set_t pset;
548 thread_t thread, self;
549 task_t self_task;
550 boolean_t interrupt_save;
551
552 assert(task != kernel_task);
553
554 self = current_thread();
555 self_task = self->task;
556
557 /*
558 * Get the task locked and make sure that we are not racing
559 * with someone else trying to terminate us.
560 */
561 if (task == self_task)
562 task_lock(task);
563 else
564 if (task < self_task) {
565 task_lock(task);
566 task_lock(self_task);
567 }
568 else {
569 task_lock(self_task);
570 task_lock(task);
571 }
572
573 if (!task->active || !self->active) {
574 /*
575 * Task or current act is already being terminated.
576 * Just return an error. If we are dying, this will
577 * just get us to our AST special handler and that
578 * will get us to finalize the termination of ourselves.
579 */
580 task_unlock(task);
581 if (self_task != task)
582 task_unlock(self_task);
583
584 return (KERN_FAILURE);
585 }
586
587 if (self_task != task)
588 task_unlock(self_task);
589
590 /*
591 * Make sure the current thread does not get aborted out of
592 * the waits inside these operations.
593 */
594 interrupt_save = thread_interrupt_level(THREAD_UNINT);
595
596 /*
597 * Indicate that we want all the threads to stop executing
598 * at user space by holding the task (we would have held
599 * each thread independently in thread_terminate_internal -
600 * but this way we may be more likely to already find it
601 * held there). Mark the task inactive, and prevent
602 * further task operations via the task port.
603 */
604 task_hold_locked(task);
605 task->active = FALSE;
606 ipc_task_disable(task);
607
608 /*
609 * Terminate each thread in the task.
610 */
611 queue_iterate(&task->threads, thread, thread_t, task_threads) {
612 thread_terminate_internal(thread);
613 }
614
615 /*
616 * Give the machine dependent code a chance
617 * to perform cleanup before ripping apart
618 * the task.
619 */
620 if (self_task == task)
621 machine_thread_terminate_self();
622
623 task_unlock(task);
624
625 /*
626 * Destroy all synchronizers owned by the task.
627 */
628 task_synchronizer_destroy_all(task);
629
630 /*
631 * Destroy the IPC space, leaving just a reference for it.
632 */
633 ipc_space_destroy(task->itk_space);
634
635 /* LP64todo - make this clean */
636 #ifdef __ppc__
637 vm_map_remove_commpage64(task->map);
638 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
639 #endif
640
641 /*
642 * If the current thread is a member of the task
643 * being terminated, then the last reference to
644 * the task will not be dropped until the thread
645 * is finally reaped. To avoid incurring the
646 * expense of removing the address space regions
647 * at reap time, we do it explictly here.
648 */
649 vm_map_remove(task->map, task->map->min_offset,
650 task->map->max_offset, VM_MAP_NO_FLAGS);
651
652 shared_region_mapping_dealloc(task->system_shared_region);
653
654 /*
655 * Flush working set here to avoid I/O in reaper thread
656 */
657 if (task->dynamic_working_set)
658 tws_hash_ws_flush(task->dynamic_working_set);
659
660 pset = task->processor_set;
661 pset_lock(pset);
662 pset_remove_task(pset,task);
663 pset_unlock(pset);
664
665 /*
666 * We no longer need to guard against being aborted, so restore
667 * the previous interruptible state.
668 */
669 thread_interrupt_level(interrupt_save);
670
671 #if __ppc__
672 perfmon_release_facility(task); // notify the perfmon facility
673 #endif
674
675 /*
676 * Get rid of the task active reference on itself.
677 */
678 task_deallocate(task);
679
680 return (KERN_SUCCESS);
681 }
682
683 /*
684 * task_halt:
685 *
686 * Shut the current task down (except for the current thread) in
687 * preparation for dramatic changes to the task (probably exec).
688 * We hold the task, terminate all other threads in the task and
689 * wait for them to terminate, clean up the portspace, and when
690 * all done, let the current thread go.
691 */
692 kern_return_t
693 task_halt(
694 task_t task)
695 {
696 thread_t thread, self;
697
698 assert(task != kernel_task);
699
700 self = current_thread();
701
702 if (task != self->task)
703 return (KERN_INVALID_ARGUMENT);
704
705 task_lock(task);
706
707 if (!task->active || !self->active) {
708 /*
709 * Task or current thread is already being terminated.
710 * Hurry up and return out of the current kernel context
711 * so that we run our AST special handler to terminate
712 * ourselves.
713 */
714 task_unlock(task);
715
716 return (KERN_FAILURE);
717 }
718
719 if (task->thread_count > 1) {
720 /*
721 * Mark all the threads to keep them from starting any more
722 * user-level execution. The thread_terminate_internal code
723 * would do this on a thread by thread basis anyway, but this
724 * gives us a better chance of not having to wait there.
725 */
726 task_hold_locked(task);
727
728 /*
729 * Terminate all the other threads in the task.
730 */
731 queue_iterate(&task->threads, thread, thread_t, task_threads) {
732 if (thread != self)
733 thread_terminate_internal(thread);
734 }
735
736 task_release_locked(task);
737 }
738
739 /*
740 * Give the machine dependent code a chance
741 * to perform cleanup before ripping apart
742 * the task.
743 */
744 machine_thread_terminate_self();
745
746 task_unlock(task);
747
748 /*
749 * Destroy all synchronizers owned by the task.
750 */
751 task_synchronizer_destroy_all(task);
752
753 /*
754 * Destroy the contents of the IPC space, leaving just
755 * a reference for it.
756 */
757 ipc_space_clean(task->itk_space);
758
759 /*
760 * Clean out the address space, as we are going to be
761 * getting a new one.
762 */
763 vm_map_remove(task->map, task->map->min_offset,
764 task->map->max_offset, VM_MAP_NO_FLAGS);
765
766 return (KERN_SUCCESS);
767 }
768
769 /*
770 * task_hold_locked:
771 *
772 * Suspend execution of the specified task.
773 * This is a recursive-style suspension of the task, a count of
774 * suspends is maintained.
775 *
776 * CONDITIONS: the task is locked and active.
777 */
778 void
779 task_hold_locked(
780 register task_t task)
781 {
782 register thread_t thread;
783
784 assert(task->active);
785
786 if (task->suspend_count++ > 0)
787 return;
788
789 /*
790 * Iterate through all the threads and hold them.
791 */
792 queue_iterate(&task->threads, thread, thread_t, task_threads) {
793 thread_mtx_lock(thread);
794 thread_hold(thread);
795 thread_mtx_unlock(thread);
796 }
797 }
798
799 /*
800 * task_hold:
801 *
802 * Same as the internal routine above, except that is must lock
803 * and verify that the task is active. This differs from task_suspend
804 * in that it places a kernel hold on the task rather than just a
805 * user-level hold. This keeps users from over resuming and setting
806 * it running out from under the kernel.
807 *
808 * CONDITIONS: the caller holds a reference on the task
809 */
810 kern_return_t
811 task_hold(
812 register task_t task)
813 {
814 if (task == TASK_NULL)
815 return (KERN_INVALID_ARGUMENT);
816
817 task_lock(task);
818
819 if (!task->active) {
820 task_unlock(task);
821
822 return (KERN_FAILURE);
823 }
824
825 task_hold_locked(task);
826 task_unlock(task);
827
828 return (KERN_SUCCESS);
829 }
830
831 /*
832 * task_wait_locked:
833 *
834 * Wait for all threads in task to stop.
835 *
836 * Conditions:
837 * Called with task locked, active, and held.
838 */
839 void
840 task_wait_locked(
841 register task_t task)
842 {
843 register thread_t thread, self;
844
845 assert(task->active);
846 assert(task->suspend_count > 0);
847
848 self = current_thread();
849
850 /*
851 * Iterate through all the threads and wait for them to
852 * stop. Do not wait for the current thread if it is within
853 * the task.
854 */
855 queue_iterate(&task->threads, thread, thread_t, task_threads) {
856 if (thread != self)
857 thread_wait(thread);
858 }
859 }
860
861 /*
862 * task_release_locked:
863 *
864 * Release a kernel hold on a task.
865 *
866 * CONDITIONS: the task is locked and active
867 */
868 void
869 task_release_locked(
870 register task_t task)
871 {
872 register thread_t thread;
873
874 assert(task->active);
875 assert(task->suspend_count > 0);
876
877 if (--task->suspend_count > 0)
878 return;
879
880 queue_iterate(&task->threads, thread, thread_t, task_threads) {
881 thread_mtx_lock(thread);
882 thread_release(thread);
883 thread_mtx_unlock(thread);
884 }
885 }
886
887 /*
888 * task_release:
889 *
890 * Same as the internal routine above, except that it must lock
891 * and verify that the task is active.
892 *
893 * CONDITIONS: The caller holds a reference to the task
894 */
895 kern_return_t
896 task_release(
897 task_t task)
898 {
899 if (task == TASK_NULL)
900 return (KERN_INVALID_ARGUMENT);
901
902 task_lock(task);
903
904 if (!task->active) {
905 task_unlock(task);
906
907 return (KERN_FAILURE);
908 }
909
910 task_release_locked(task);
911 task_unlock(task);
912
913 return (KERN_SUCCESS);
914 }
915
916 kern_return_t
917 task_threads(
918 task_t task,
919 thread_act_array_t *threads_out,
920 mach_msg_type_number_t *count)
921 {
922 mach_msg_type_number_t actual;
923 thread_t *threads;
924 thread_t thread;
925 vm_size_t size, size_needed;
926 void *addr;
927 unsigned int i, j;
928
929 if (task == TASK_NULL)
930 return (KERN_INVALID_ARGUMENT);
931
932 size = 0; addr = 0;
933
934 for (;;) {
935 task_lock(task);
936 if (!task->active) {
937 task_unlock(task);
938
939 if (size != 0)
940 kfree(addr, size);
941
942 return (KERN_FAILURE);
943 }
944
945 actual = task->thread_count;
946
947 /* do we have the memory we need? */
948 size_needed = actual * sizeof (mach_port_t);
949 if (size_needed <= size)
950 break;
951
952 /* unlock the task and allocate more memory */
953 task_unlock(task);
954
955 if (size != 0)
956 kfree(addr, size);
957
958 assert(size_needed > 0);
959 size = size_needed;
960
961 addr = kalloc(size);
962 if (addr == 0)
963 return (KERN_RESOURCE_SHORTAGE);
964 }
965
966 /* OK, have memory and the task is locked & active */
967 threads = (thread_t *)addr;
968
969 i = j = 0;
970
971 for (thread = (thread_t)queue_first(&task->threads); i < actual;
972 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
973 thread_reference_internal(thread);
974 threads[j++] = thread;
975 }
976
977 assert(queue_end(&task->threads, (queue_entry_t)thread));
978
979 actual = j;
980 size_needed = actual * sizeof (mach_port_t);
981
982 /* can unlock task now that we've got the thread refs */
983 task_unlock(task);
984
985 if (actual == 0) {
986 /* no threads, so return null pointer and deallocate memory */
987
988 *threads_out = 0;
989 *count = 0;
990
991 if (size != 0)
992 kfree(addr, size);
993 }
994 else {
995 /* if we allocated too much, must copy */
996
997 if (size_needed < size) {
998 void *newaddr;
999
1000 newaddr = kalloc(size_needed);
1001 if (newaddr == 0) {
1002 for (i = 0; i < actual; ++i)
1003 thread_deallocate(threads[i]);
1004 kfree(addr, size);
1005 return (KERN_RESOURCE_SHORTAGE);
1006 }
1007
1008 bcopy(addr, newaddr, size_needed);
1009 kfree(addr, size);
1010 threads = (thread_t *)newaddr;
1011 }
1012
1013 *threads_out = threads;
1014 *count = actual;
1015
1016 /* do the conversion that Mig should handle */
1017
1018 for (i = 0; i < actual; ++i)
1019 ((ipc_port_t *) threads)[i] = convert_thread_to_port(threads[i]);
1020 }
1021
1022 return (KERN_SUCCESS);
1023 }
1024
1025 /*
1026 * task_suspend:
1027 *
1028 * Implement a user-level suspension on a task.
1029 *
1030 * Conditions:
1031 * The caller holds a reference to the task
1032 */
1033 kern_return_t
1034 task_suspend(
1035 register task_t task)
1036 {
1037 if (task == TASK_NULL || task == kernel_task)
1038 return (KERN_INVALID_ARGUMENT);
1039
1040 task_lock(task);
1041
1042 if (!task->active) {
1043 task_unlock(task);
1044
1045 return (KERN_FAILURE);
1046 }
1047
1048 if (task->user_stop_count++ > 0) {
1049 /*
1050 * If the stop count was positive, the task is
1051 * already stopped and we can exit.
1052 */
1053 task_unlock(task);
1054
1055 return (KERN_SUCCESS);
1056 }
1057
1058 /*
1059 * Put a kernel-level hold on the threads in the task (all
1060 * user-level task suspensions added together represent a
1061 * single kernel-level hold). We then wait for the threads
1062 * to stop executing user code.
1063 */
1064 task_hold_locked(task);
1065 task_wait_locked(task);
1066
1067 task_unlock(task);
1068
1069 return (KERN_SUCCESS);
1070 }
1071
1072 /*
1073 * task_resume:
1074 * Release a kernel hold on a task.
1075 *
1076 * Conditions:
1077 * The caller holds a reference to the task
1078 */
1079 kern_return_t
1080 task_resume(
1081 register task_t task)
1082 {
1083 register boolean_t release = FALSE;
1084
1085 if (task == TASK_NULL || task == kernel_task)
1086 return (KERN_INVALID_ARGUMENT);
1087
1088 task_lock(task);
1089
1090 if (!task->active) {
1091 task_unlock(task);
1092
1093 return (KERN_FAILURE);
1094 }
1095
1096 if (task->user_stop_count > 0) {
1097 if (--task->user_stop_count == 0)
1098 release = TRUE;
1099 }
1100 else {
1101 task_unlock(task);
1102
1103 return (KERN_FAILURE);
1104 }
1105
1106 /*
1107 * Release the task if necessary.
1108 */
1109 if (release)
1110 task_release_locked(task);
1111
1112 task_unlock(task);
1113
1114 return (KERN_SUCCESS);
1115 }
1116
1117 kern_return_t
1118 host_security_set_task_token(
1119 host_security_t host_security,
1120 task_t task,
1121 security_token_t sec_token,
1122 audit_token_t audit_token,
1123 host_priv_t host_priv)
1124 {
1125 ipc_port_t host_port;
1126 kern_return_t kr;
1127
1128 if (task == TASK_NULL)
1129 return(KERN_INVALID_ARGUMENT);
1130
1131 if (host_security == HOST_NULL)
1132 return(KERN_INVALID_SECURITY);
1133
1134 task_lock(task);
1135 task->sec_token = sec_token;
1136 task->audit_token = audit_token;
1137 task_unlock(task);
1138
1139 if (host_priv != HOST_PRIV_NULL) {
1140 kr = host_get_host_priv_port(host_priv, &host_port);
1141 } else {
1142 kr = host_get_host_port(host_priv_self(), &host_port);
1143 }
1144 assert(kr == KERN_SUCCESS);
1145 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1146 return(kr);
1147 }
1148
1149 /*
1150 * Utility routine to set a ledger
1151 */
1152 kern_return_t
1153 task_set_ledger(
1154 task_t task,
1155 ledger_t wired,
1156 ledger_t paged)
1157 {
1158 if (task == TASK_NULL)
1159 return(KERN_INVALID_ARGUMENT);
1160
1161 task_lock(task);
1162 if (wired) {
1163 ipc_port_release_send(task->wired_ledger_port);
1164 task->wired_ledger_port = ledger_copy(wired);
1165 }
1166 if (paged) {
1167 ipc_port_release_send(task->paged_ledger_port);
1168 task->paged_ledger_port = ledger_copy(paged);
1169 }
1170 task_unlock(task);
1171
1172 return(KERN_SUCCESS);
1173 }
1174
1175 /*
1176 * This routine was added, pretty much exclusively, for registering the
1177 * RPC glue vector for in-kernel short circuited tasks. Rather than
1178 * removing it completely, I have only disabled that feature (which was
1179 * the only feature at the time). It just appears that we are going to
1180 * want to add some user data to tasks in the future (i.e. bsd info,
1181 * task names, etc...), so I left it in the formal task interface.
1182 */
1183 kern_return_t
1184 task_set_info(
1185 task_t task,
1186 task_flavor_t flavor,
1187 __unused task_info_t task_info_in, /* pointer to IN array */
1188 __unused mach_msg_type_number_t task_info_count)
1189 {
1190 if (task == TASK_NULL)
1191 return(KERN_INVALID_ARGUMENT);
1192
1193 switch (flavor) {
1194 default:
1195 return (KERN_INVALID_ARGUMENT);
1196 }
1197 return (KERN_SUCCESS);
1198 }
1199
1200 kern_return_t
1201 task_info(
1202 task_t task,
1203 task_flavor_t flavor,
1204 task_info_t task_info_out,
1205 mach_msg_type_number_t *task_info_count)
1206 {
1207 if (task == TASK_NULL)
1208 return (KERN_INVALID_ARGUMENT);
1209
1210 switch (flavor) {
1211
1212 case TASK_BASIC_INFO_32:
1213 {
1214 task_basic_info_32_t basic_info;
1215 vm_map_t map;
1216
1217 if (*task_info_count < TASK_BASIC_INFO_32_COUNT)
1218 return (KERN_INVALID_ARGUMENT);
1219
1220 basic_info = (task_basic_info_32_t)task_info_out;
1221
1222 map = (task == kernel_task)? kernel_map: task->map;
1223 basic_info->virtual_size = CAST_DOWN(vm_offset_t,map->size);
1224 basic_info->resident_size = pmap_resident_count(map->pmap)
1225 * PAGE_SIZE;
1226
1227 task_lock(task);
1228 basic_info->policy = ((task != kernel_task)?
1229 POLICY_TIMESHARE: POLICY_RR);
1230 basic_info->suspend_count = task->user_stop_count;
1231
1232 absolutetime_to_microtime(
1233 task->total_user_time,
1234 &basic_info->user_time.seconds,
1235 &basic_info->user_time.microseconds);
1236 absolutetime_to_microtime(
1237 task->total_system_time,
1238 &basic_info->system_time.seconds,
1239 &basic_info->system_time.microseconds);
1240 task_unlock(task);
1241
1242 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1243 break;
1244 }
1245
1246 case TASK_BASIC_INFO_64:
1247 {
1248 task_basic_info_64_t basic_info;
1249 vm_map_t map;
1250
1251 if (*task_info_count < TASK_BASIC_INFO_64_COUNT)
1252 return (KERN_INVALID_ARGUMENT);
1253
1254 basic_info = (task_basic_info_64_t)task_info_out;
1255
1256 map = (task == kernel_task)? kernel_map: task->map;
1257 basic_info->virtual_size = map->size;
1258 basic_info->resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)
1259 * PAGE_SIZE);
1260
1261 task_lock(task);
1262 basic_info->policy = ((task != kernel_task)?
1263 POLICY_TIMESHARE: POLICY_RR);
1264 basic_info->suspend_count = task->user_stop_count;
1265
1266 absolutetime_to_microtime(
1267 task->total_user_time,
1268 &basic_info->user_time.seconds,
1269 &basic_info->user_time.microseconds);
1270 absolutetime_to_microtime(
1271 task->total_system_time,
1272 &basic_info->system_time.seconds,
1273 &basic_info->system_time.microseconds);
1274 task_unlock(task);
1275
1276 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1277 break;
1278 }
1279
1280 case TASK_THREAD_TIMES_INFO:
1281 {
1282 register task_thread_times_info_t times_info;
1283 register thread_t thread;
1284
1285 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT)
1286 return (KERN_INVALID_ARGUMENT);
1287
1288 times_info = (task_thread_times_info_t) task_info_out;
1289 times_info->user_time.seconds = 0;
1290 times_info->user_time.microseconds = 0;
1291 times_info->system_time.seconds = 0;
1292 times_info->system_time.microseconds = 0;
1293
1294 task_lock(task);
1295
1296 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1297 time_value_t user_time, system_time;
1298
1299 thread_read_times(thread, &user_time, &system_time);
1300
1301 time_value_add(&times_info->user_time, &user_time);
1302 time_value_add(&times_info->system_time, &system_time);
1303 }
1304
1305 task_unlock(task);
1306
1307 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1308 break;
1309 }
1310
1311 case TASK_ABSOLUTETIME_INFO:
1312 {
1313 task_absolutetime_info_t info;
1314 register thread_t thread;
1315
1316 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT)
1317 return (KERN_INVALID_ARGUMENT);
1318
1319 info = (task_absolutetime_info_t)task_info_out;
1320 info->threads_user = info->threads_system = 0;
1321
1322 task_lock(task);
1323
1324 info->total_user = task->total_user_time;
1325 info->total_system = task->total_system_time;
1326
1327 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1328 uint64_t tval;
1329
1330 tval = timer_grab(&thread->user_timer);
1331 info->threads_user += tval;
1332 info->total_user += tval;
1333
1334 tval = timer_grab(&thread->system_timer);
1335 info->threads_system += tval;
1336 info->total_system += tval;
1337 }
1338
1339 task_unlock(task);
1340
1341 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1342 break;
1343 }
1344
1345 /* OBSOLETE */
1346 case TASK_SCHED_FIFO_INFO:
1347 {
1348
1349 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1350 return (KERN_INVALID_ARGUMENT);
1351
1352 return (KERN_INVALID_POLICY);
1353 }
1354
1355 /* OBSOLETE */
1356 case TASK_SCHED_RR_INFO:
1357 {
1358 register policy_rr_base_t rr_base;
1359
1360 if (*task_info_count < POLICY_RR_BASE_COUNT)
1361 return (KERN_INVALID_ARGUMENT);
1362
1363 rr_base = (policy_rr_base_t) task_info_out;
1364
1365 task_lock(task);
1366 if (task != kernel_task) {
1367 task_unlock(task);
1368 return (KERN_INVALID_POLICY);
1369 }
1370
1371 rr_base->base_priority = task->priority;
1372 task_unlock(task);
1373
1374 rr_base->quantum = std_quantum_us / 1000;
1375
1376 *task_info_count = POLICY_RR_BASE_COUNT;
1377 break;
1378 }
1379
1380 /* OBSOLETE */
1381 case TASK_SCHED_TIMESHARE_INFO:
1382 {
1383 register policy_timeshare_base_t ts_base;
1384
1385 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1386 return (KERN_INVALID_ARGUMENT);
1387
1388 ts_base = (policy_timeshare_base_t) task_info_out;
1389
1390 task_lock(task);
1391 if (task == kernel_task) {
1392 task_unlock(task);
1393 return (KERN_INVALID_POLICY);
1394 }
1395
1396 ts_base->base_priority = task->priority;
1397 task_unlock(task);
1398
1399 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1400 break;
1401 }
1402
1403 case TASK_SECURITY_TOKEN:
1404 {
1405 register security_token_t *sec_token_p;
1406
1407 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT)
1408 return (KERN_INVALID_ARGUMENT);
1409
1410 sec_token_p = (security_token_t *) task_info_out;
1411
1412 task_lock(task);
1413 *sec_token_p = task->sec_token;
1414 task_unlock(task);
1415
1416 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1417 break;
1418 }
1419
1420 case TASK_AUDIT_TOKEN:
1421 {
1422 register audit_token_t *audit_token_p;
1423
1424 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT)
1425 return (KERN_INVALID_ARGUMENT);
1426
1427 audit_token_p = (audit_token_t *) task_info_out;
1428
1429 task_lock(task);
1430 *audit_token_p = task->audit_token;
1431 task_unlock(task);
1432
1433 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1434 break;
1435 }
1436
1437 case TASK_SCHED_INFO:
1438 return (KERN_INVALID_ARGUMENT);
1439
1440 case TASK_EVENTS_INFO:
1441 {
1442 register task_events_info_t events_info;
1443
1444 if (*task_info_count < TASK_EVENTS_INFO_COUNT)
1445 return (KERN_INVALID_ARGUMENT);
1446
1447 events_info = (task_events_info_t) task_info_out;
1448
1449 task_lock(task);
1450 events_info->faults = task->faults;
1451 events_info->pageins = task->pageins;
1452 events_info->cow_faults = task->cow_faults;
1453 events_info->messages_sent = task->messages_sent;
1454 events_info->messages_received = task->messages_received;
1455 events_info->syscalls_mach = task->syscalls_mach;
1456 events_info->syscalls_unix = task->syscalls_unix;
1457 events_info->csw = task->csw;
1458 task_unlock(task);
1459
1460 *task_info_count = TASK_EVENTS_INFO_COUNT;
1461 break;
1462 }
1463
1464 default:
1465 return (KERN_INVALID_ARGUMENT);
1466 }
1467
1468 return (KERN_SUCCESS);
1469 }
1470
1471 /*
1472 * task_assign:
1473 *
1474 * Change the assigned processor set for the task
1475 */
1476 kern_return_t
1477 task_assign(
1478 __unused task_t task,
1479 __unused processor_set_t new_pset,
1480 __unused boolean_t assign_threads)
1481 {
1482 return(KERN_FAILURE);
1483 }
1484
1485 /*
1486 * task_assign_default:
1487 *
1488 * Version of task_assign to assign to default processor set.
1489 */
1490 kern_return_t
1491 task_assign_default(
1492 task_t task,
1493 boolean_t assign_threads)
1494 {
1495 return (task_assign(task, &default_pset, assign_threads));
1496 }
1497
1498 /*
1499 * task_get_assignment
1500 *
1501 * Return name of processor set that task is assigned to.
1502 */
1503 kern_return_t
1504 task_get_assignment(
1505 task_t task,
1506 processor_set_t *pset)
1507 {
1508 if (!task->active)
1509 return(KERN_FAILURE);
1510
1511 *pset = task->processor_set;
1512 pset_reference(*pset);
1513 return(KERN_SUCCESS);
1514 }
1515
1516
1517 /*
1518 * task_policy
1519 *
1520 * Set scheduling policy and parameters, both base and limit, for
1521 * the given task. Policy must be a policy which is enabled for the
1522 * processor set. Change contained threads if requested.
1523 */
1524 kern_return_t
1525 task_policy(
1526 __unused task_t task,
1527 __unused policy_t policy_id,
1528 __unused policy_base_t base,
1529 __unused mach_msg_type_number_t count,
1530 __unused boolean_t set_limit,
1531 __unused boolean_t change)
1532 {
1533 return(KERN_FAILURE);
1534 }
1535
1536 /*
1537 * task_set_policy
1538 *
1539 * Set scheduling policy and parameters, both base and limit, for
1540 * the given task. Policy can be any policy implemented by the
1541 * processor set, whether enabled or not. Change contained threads
1542 * if requested.
1543 */
1544 kern_return_t
1545 task_set_policy(
1546 __unused task_t task,
1547 __unused processor_set_t pset,
1548 __unused policy_t policy_id,
1549 __unused policy_base_t base,
1550 __unused mach_msg_type_number_t base_count,
1551 __unused policy_limit_t limit,
1552 __unused mach_msg_type_number_t limit_count,
1553 __unused boolean_t change)
1554 {
1555 return(KERN_FAILURE);
1556 }
1557
1558 #if FAST_TAS
1559 kern_return_t
1560 task_set_ras_pc(
1561 task_t task,
1562 vm_offset_t pc,
1563 vm_offset_t endpc)
1564 {
1565 extern int fast_tas_debug;
1566
1567 if (fast_tas_debug) {
1568 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1569 task, pc, endpc);
1570 }
1571 task_lock(task);
1572 task->fast_tas_base = pc;
1573 task->fast_tas_end = endpc;
1574 task_unlock(task);
1575 return KERN_SUCCESS;
1576 }
1577 #else /* FAST_TAS */
1578 kern_return_t
1579 task_set_ras_pc(
1580 __unused task_t task,
1581 __unused vm_offset_t pc,
1582 __unused vm_offset_t endpc)
1583 {
1584 return KERN_FAILURE;
1585 }
1586 #endif /* FAST_TAS */
1587
1588 void
1589 task_synchronizer_destroy_all(task_t task)
1590 {
1591 semaphore_t semaphore;
1592 lock_set_t lock_set;
1593
1594 /*
1595 * Destroy owned semaphores
1596 */
1597
1598 while (!queue_empty(&task->semaphore_list)) {
1599 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1600 (void) semaphore_destroy(task, semaphore);
1601 }
1602
1603 /*
1604 * Destroy owned lock sets
1605 */
1606
1607 while (!queue_empty(&task->lock_set_list)) {
1608 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1609 (void) lock_set_destroy(task, lock_set);
1610 }
1611 }
1612
1613 /*
1614 * We need to export some functions to other components that
1615 * are currently implemented in macros within the osfmk
1616 * component. Just export them as functions of the same name.
1617 */
1618 boolean_t is_kerneltask(task_t t)
1619 {
1620 if (t == kernel_task)
1621 return (TRUE);
1622
1623 return (FALSE);
1624 }
1625
1626 #undef current_task
1627 task_t current_task(void);
1628 task_t current_task(void)
1629 {
1630 return (current_task_fast());
1631 }
1632
1633 #undef task_reference
1634 void task_reference(task_t task);
1635 void
1636 task_reference(
1637 task_t task)
1638 {
1639 if (task != TASK_NULL)
1640 task_reference_internal(task);
1641 }