]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_FREE_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 * File: kern/task.c
53 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
54 * David Black
55 *
56 * Task management primitives implementation.
57 */
58 /*
59 * Copyright (c) 1993 The University of Utah and
60 * the Computer Systems Laboratory (CSL). All rights reserved.
61 *
62 * Permission to use, copy, modify and distribute this software and its
63 * documentation is hereby granted, provided that both the copyright
64 * notice and this permission notice appear in all copies of the
65 * software, derivative works or modified versions, and any portions
66 * thereof, and that both notices appear in supporting documentation.
67 *
68 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
69 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
70 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
71 *
72 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
73 * improvements that they make and grant CSL redistribution rights.
74 *
75 */
76
77 #include <mach_kdb.h>
78 #include <mach_host.h>
79 #include <mach_prof.h>
80 #include <fast_tas.h>
81 #include <platforms.h>
82
83 #include <mach/mach_types.h>
84 #include <mach/boolean.h>
85 #include <mach/host_priv.h>
86 #include <mach/machine/vm_types.h>
87 #include <mach/vm_param.h>
88 #include <mach/semaphore.h>
89 #include <mach/task_info.h>
90 #include <mach/task_special_ports.h>
91
92 #include <ipc/ipc_types.h>
93 #include <ipc/ipc_space.h>
94 #include <ipc/ipc_entry.h>
95
96 #include <kern/kern_types.h>
97 #include <kern/mach_param.h>
98 #include <kern/misc_protos.h>
99 #include <kern/task.h>
100 #include <kern/thread.h>
101 #include <kern/zalloc.h>
102 #include <kern/kalloc.h>
103 #include <kern/processor.h>
104 #include <kern/sched_prim.h> /* for thread_wakeup */
105 #include <kern/ipc_tt.h>
106 #include <kern/ledger.h>
107 #include <kern/host.h>
108 #include <kern/clock.h>
109 #include <kern/timer.h>
110 #include <kern/profile.h>
111 #include <kern/assert.h>
112 #include <kern/sync_lock.h>
113
114 #include <vm/pmap.h>
115 #include <vm/vm_map.h>
116 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
117 #include <vm/vm_pageout.h>
118 #include <vm/vm_protos.h> /* for vm_map_remove_commpage64 */
119
120 #if MACH_KDB
121 #include <ddb/db_sym.h>
122 #endif /* MACH_KDB */
123
124 #ifdef __ppc__
125 #include <ppc/exception.h>
126 #include <ppc/hw_perfmon.h>
127 #endif
128
129 /*
130 * Exported interfaces
131 */
132
133 #include <mach/task_server.h>
134 #include <mach/mach_host_server.h>
135 #include <mach/host_security_server.h>
136 #include <mach/mach_port_server.h>
137
138 #include <vm/task_working_set.h>
139 #include <vm/vm_shared_memory_server.h>
140
141 task_t kernel_task;
142 zone_t task_zone;
143
144 /* Forwards */
145
146 void task_hold_locked(
147 task_t task);
148 void task_wait_locked(
149 task_t task);
150 void task_release_locked(
151 task_t task);
152 void task_free(
153 task_t task );
154 void task_synchronizer_destroy_all(
155 task_t task);
156
157 kern_return_t task_set_ledger(
158 task_t task,
159 ledger_t wired,
160 ledger_t paged);
161
162 void
163 task_backing_store_privileged(
164 task_t task)
165 {
166 task_lock(task);
167 task->priv_flags |= VM_BACKING_STORE_PRIV;
168 task_unlock(task);
169 return;
170 }
171
172 void
173 task_working_set_disable(task_t task)
174 {
175 struct tws_hash *ws;
176
177 task_lock(task);
178 ws = task->dynamic_working_set;
179 task->dynamic_working_set = NULL;
180 task_unlock(task);
181 if (ws) {
182 tws_hash_ws_flush(ws);
183 tws_hash_destroy(ws);
184 }
185 }
186
187 void
188 task_set_64bit(
189 task_t task,
190 boolean_t is64bit)
191 {
192 if(is64bit) {
193 /* LP64todo - no task working set for 64-bit */
194 task_set_64BitAddr(task);
195 task_working_set_disable(task);
196 task->map->max_offset = MACH_VM_MAX_ADDRESS;
197 } else {
198 /*
199 * Deallocate all memory previously allocated
200 * above the 32-bit address space, since it won't
201 * be accessible anymore.
202 */
203 /* LP64todo - make this clean */
204 #ifdef __ppc__
205 vm_map_remove_commpage64(task->map);
206 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
207 #endif
208 (void) vm_map_remove(task->map,
209 (vm_map_offset_t) VM_MAX_ADDRESS,
210 MACH_VM_MAX_ADDRESS,
211 VM_MAP_NO_FLAGS);
212 task_clear_64BitAddr(task);
213 task->map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS;
214 }
215 }
216
217 void
218 task_init(void)
219 {
220 task_zone = zinit(
221 sizeof(struct task),
222 TASK_MAX * sizeof(struct task),
223 TASK_CHUNK * sizeof(struct task),
224 "tasks");
225
226 /*
227 * Create the kernel task as the first task.
228 */
229 if (task_create_internal(TASK_NULL, FALSE, &kernel_task) != KERN_SUCCESS)
230 panic("task_init\n");
231
232 vm_map_deallocate(kernel_task->map);
233 kernel_task->map = kernel_map;
234 }
235
236 #if MACH_HOST
237
238 #if 0
239 static void
240 task_freeze(
241 task_t task)
242 {
243 task_lock(task);
244 /*
245 * If may_assign is false, task is already being assigned,
246 * wait for that to finish.
247 */
248 while (task->may_assign == FALSE) {
249 wait_result_t res;
250
251 task->assign_active = TRUE;
252 res = thread_sleep_mutex((event_t) &task->assign_active,
253 &task->lock, THREAD_UNINT);
254 assert(res == THREAD_AWAKENED);
255 }
256 task->may_assign = FALSE;
257 task_unlock(task);
258 return;
259 }
260 #else
261 #define thread_freeze(thread) assert(task->processor_set == &default_pset)
262 #endif
263
264 #if 0
265 static void
266 task_unfreeze(
267 task_t task)
268 {
269 task_lock(task);
270 assert(task->may_assign == FALSE);
271 task->may_assign = TRUE;
272 if (task->assign_active == TRUE) {
273 task->assign_active = FALSE;
274 thread_wakeup((event_t)&task->assign_active);
275 }
276 task_unlock(task);
277 return;
278 }
279 #else
280 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset)
281 #endif
282
283 #endif /* MACH_HOST */
284
285 /*
286 * Create a task running in the kernel address space. It may
287 * have its own map of size mem_size and may have ipc privileges.
288 */
289 kern_return_t
290 kernel_task_create(
291 __unused task_t parent_task,
292 __unused vm_offset_t map_base,
293 __unused vm_size_t map_size,
294 __unused task_t *child_task)
295 {
296 return (KERN_INVALID_ARGUMENT);
297 }
298
299 kern_return_t
300 task_create(
301 task_t parent_task,
302 __unused ledger_port_array_t ledger_ports,
303 __unused mach_msg_type_number_t num_ledger_ports,
304 boolean_t inherit_memory,
305 task_t *child_task) /* OUT */
306 {
307 if (parent_task == TASK_NULL)
308 return(KERN_INVALID_ARGUMENT);
309
310 return task_create_internal(
311 parent_task, inherit_memory, child_task);
312 }
313
314 kern_return_t
315 host_security_create_task_token(
316 host_security_t host_security,
317 task_t parent_task,
318 security_token_t sec_token,
319 audit_token_t audit_token,
320 host_priv_t host_priv,
321 __unused ledger_port_array_t ledger_ports,
322 __unused mach_msg_type_number_t num_ledger_ports,
323 boolean_t inherit_memory,
324 task_t *child_task) /* OUT */
325 {
326 kern_return_t result;
327
328 if (parent_task == TASK_NULL)
329 return(KERN_INVALID_ARGUMENT);
330
331 if (host_security == HOST_NULL)
332 return(KERN_INVALID_SECURITY);
333
334 result = task_create_internal(
335 parent_task, inherit_memory, child_task);
336
337 if (result != KERN_SUCCESS)
338 return(result);
339
340 result = host_security_set_task_token(host_security,
341 *child_task,
342 sec_token,
343 audit_token,
344 host_priv);
345
346 if (result != KERN_SUCCESS)
347 return(result);
348
349 return(result);
350 }
351
352 kern_return_t
353 task_create_internal(
354 task_t parent_task,
355 boolean_t inherit_memory,
356 task_t *child_task) /* OUT */
357 {
358 task_t new_task;
359 processor_set_t pset;
360
361 new_task = (task_t) zalloc(task_zone);
362
363 if (new_task == TASK_NULL)
364 return(KERN_RESOURCE_SHORTAGE);
365
366 /* one ref for just being alive; one for our caller */
367 new_task->ref_count = 2;
368
369 if (inherit_memory)
370 new_task->map = vm_map_fork(parent_task->map);
371 else
372 new_task->map = vm_map_create(pmap_create(0),
373 (vm_map_offset_t)(VM_MIN_ADDRESS),
374 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
375
376 mutex_init(&new_task->lock, 0);
377 queue_init(&new_task->threads);
378 new_task->suspend_count = 0;
379 new_task->thread_count = 0;
380 new_task->active_thread_count = 0;
381 new_task->user_stop_count = 0;
382 new_task->role = TASK_UNSPECIFIED;
383 new_task->active = TRUE;
384 new_task->user_data = 0;
385 new_task->faults = 0;
386 new_task->cow_faults = 0;
387 new_task->pageins = 0;
388 new_task->messages_sent = 0;
389 new_task->messages_received = 0;
390 new_task->syscalls_mach = 0;
391 new_task->priv_flags = 0;
392 new_task->syscalls_unix=0;
393 new_task->csw=0;
394 new_task->taskFeatures[0] = 0; /* Init task features */
395 new_task->taskFeatures[1] = 0; /* Init task features */
396 new_task->dynamic_working_set = 0;
397
398 task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT,
399 0, TWS_HASH_STYLE_DEFAULT);
400
401 #ifdef MACH_BSD
402 new_task->bsd_info = 0;
403 #endif /* MACH_BSD */
404
405 #ifdef __ppc__
406 if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */
407 #endif
408
409 queue_init(&new_task->semaphore_list);
410 queue_init(&new_task->lock_set_list);
411 new_task->semaphores_owned = 0;
412 new_task->lock_sets_owned = 0;
413
414 #if MACH_HOST
415 new_task->may_assign = TRUE;
416 new_task->assign_active = FALSE;
417 #endif /* MACH_HOST */
418
419 ipc_task_init(new_task, parent_task);
420
421 new_task->total_user_time = 0;
422 new_task->total_system_time = 0;
423
424 task_prof_init(new_task);
425
426 if (parent_task != TASK_NULL) {
427 #if MACH_HOST
428 /*
429 * Freeze the parent, so that parent_task->processor_set
430 * cannot change.
431 */
432 task_freeze(parent_task);
433 #endif /* MACH_HOST */
434 pset = parent_task->processor_set;
435 if (!pset->active)
436 pset = &default_pset;
437
438 new_task->sec_token = parent_task->sec_token;
439 new_task->audit_token = parent_task->audit_token;
440
441 shared_region_mapping_ref(parent_task->system_shared_region);
442 new_task->system_shared_region = parent_task->system_shared_region;
443
444 new_task->wired_ledger_port = ledger_copy(
445 convert_port_to_ledger(parent_task->wired_ledger_port));
446 new_task->paged_ledger_port = ledger_copy(
447 convert_port_to_ledger(parent_task->paged_ledger_port));
448 if(task_has_64BitAddr(parent_task))
449 task_set_64BitAddr(new_task);
450 }
451 else {
452 pset = &default_pset;
453
454 new_task->sec_token = KERNEL_SECURITY_TOKEN;
455 new_task->audit_token = KERNEL_AUDIT_TOKEN;
456 new_task->wired_ledger_port = ledger_copy(root_wired_ledger);
457 new_task->paged_ledger_port = ledger_copy(root_paged_ledger);
458 }
459
460 if (kernel_task == TASK_NULL) {
461 new_task->priority = BASEPRI_KERNEL;
462 new_task->max_priority = MAXPRI_KERNEL;
463 }
464 else {
465 new_task->priority = BASEPRI_DEFAULT;
466 new_task->max_priority = MAXPRI_USER;
467 }
468
469 pset_lock(pset);
470 pset_add_task(pset, new_task);
471 pset_unlock(pset);
472 #if MACH_HOST
473 if (parent_task != TASK_NULL)
474 task_unfreeze(parent_task);
475 #endif /* MACH_HOST */
476
477 if (vm_backing_store_low && parent_task != NULL)
478 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
479
480 ipc_task_enable(new_task);
481
482 *child_task = new_task;
483 return(KERN_SUCCESS);
484 }
485
486 /*
487 * task_deallocate:
488 *
489 * Drop a reference on a task.
490 */
491 void
492 task_deallocate(
493 task_t task)
494 {
495 processor_set_t pset;
496
497 if (task == TASK_NULL)
498 return;
499
500 if (task_deallocate_internal(task) > 0)
501 return;
502
503 pset = task->processor_set;
504 pset_deallocate(pset);
505
506 if(task->dynamic_working_set)
507 tws_hash_destroy(task->dynamic_working_set);
508
509 ipc_task_terminate(task);
510
511 vm_map_deallocate(task->map);
512 is_release(task->itk_space);
513
514 task_prof_deallocate(task);
515 zfree(task_zone, task);
516 }
517
518 /*
519 * task_terminate:
520 *
521 * Terminate the specified task. See comments on thread_terminate
522 * (kern/thread.c) about problems with terminating the "current task."
523 */
524
525 kern_return_t
526 task_terminate(
527 task_t task)
528 {
529 if (task == TASK_NULL)
530 return (KERN_INVALID_ARGUMENT);
531
532 if (task->bsd_info)
533 return (KERN_FAILURE);
534
535 return (task_terminate_internal(task));
536 }
537
538 kern_return_t
539 task_terminate_internal(
540 task_t task)
541 {
542 processor_set_t pset;
543 thread_t thread, self;
544 task_t self_task;
545 boolean_t interrupt_save;
546
547 assert(task != kernel_task);
548
549 self = current_thread();
550 self_task = self->task;
551
552 /*
553 * Get the task locked and make sure that we are not racing
554 * with someone else trying to terminate us.
555 */
556 if (task == self_task)
557 task_lock(task);
558 else
559 if (task < self_task) {
560 task_lock(task);
561 task_lock(self_task);
562 }
563 else {
564 task_lock(self_task);
565 task_lock(task);
566 }
567
568 if (!task->active || !self->active) {
569 /*
570 * Task or current act is already being terminated.
571 * Just return an error. If we are dying, this will
572 * just get us to our AST special handler and that
573 * will get us to finalize the termination of ourselves.
574 */
575 task_unlock(task);
576 if (self_task != task)
577 task_unlock(self_task);
578
579 return (KERN_FAILURE);
580 }
581
582 if (self_task != task)
583 task_unlock(self_task);
584
585 /*
586 * Make sure the current thread does not get aborted out of
587 * the waits inside these operations.
588 */
589 interrupt_save = thread_interrupt_level(THREAD_UNINT);
590
591 /*
592 * Indicate that we want all the threads to stop executing
593 * at user space by holding the task (we would have held
594 * each thread independently in thread_terminate_internal -
595 * but this way we may be more likely to already find it
596 * held there). Mark the task inactive, and prevent
597 * further task operations via the task port.
598 */
599 task_hold_locked(task);
600 task->active = FALSE;
601 ipc_task_disable(task);
602
603 /*
604 * Terminate each thread in the task.
605 */
606 queue_iterate(&task->threads, thread, thread_t, task_threads) {
607 thread_terminate_internal(thread);
608 }
609
610 /*
611 * Give the machine dependent code a chance
612 * to perform cleanup before ripping apart
613 * the task.
614 */
615 if (self_task == task)
616 machine_thread_terminate_self();
617
618 task_unlock(task);
619
620 /*
621 * Destroy all synchronizers owned by the task.
622 */
623 task_synchronizer_destroy_all(task);
624
625 /*
626 * Destroy the IPC space, leaving just a reference for it.
627 */
628 ipc_space_destroy(task->itk_space);
629
630 /* LP64todo - make this clean */
631 #ifdef __ppc__
632 vm_map_remove_commpage64(task->map);
633 pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */
634 #endif
635
636 /*
637 * If the current thread is a member of the task
638 * being terminated, then the last reference to
639 * the task will not be dropped until the thread
640 * is finally reaped. To avoid incurring the
641 * expense of removing the address space regions
642 * at reap time, we do it explictly here.
643 */
644 vm_map_remove(task->map, task->map->min_offset,
645 task->map->max_offset, VM_MAP_NO_FLAGS);
646
647 shared_region_mapping_dealloc(task->system_shared_region);
648
649 /*
650 * Flush working set here to avoid I/O in reaper thread
651 */
652 if (task->dynamic_working_set)
653 tws_hash_ws_flush(task->dynamic_working_set);
654
655 pset = task->processor_set;
656 pset_lock(pset);
657 pset_remove_task(pset,task);
658 pset_unlock(pset);
659
660 /*
661 * We no longer need to guard against being aborted, so restore
662 * the previous interruptible state.
663 */
664 thread_interrupt_level(interrupt_save);
665
666 #if __ppc__
667 perfmon_release_facility(task); // notify the perfmon facility
668 #endif
669
670 /*
671 * Get rid of the task active reference on itself.
672 */
673 task_deallocate(task);
674
675 return (KERN_SUCCESS);
676 }
677
678 /*
679 * task_halt:
680 *
681 * Shut the current task down (except for the current thread) in
682 * preparation for dramatic changes to the task (probably exec).
683 * We hold the task, terminate all other threads in the task and
684 * wait for them to terminate, clean up the portspace, and when
685 * all done, let the current thread go.
686 */
687 kern_return_t
688 task_halt(
689 task_t task)
690 {
691 thread_t thread, self;
692
693 assert(task != kernel_task);
694
695 self = current_thread();
696
697 if (task != self->task)
698 return (KERN_INVALID_ARGUMENT);
699
700 task_lock(task);
701
702 if (!task->active || !self->active) {
703 /*
704 * Task or current thread is already being terminated.
705 * Hurry up and return out of the current kernel context
706 * so that we run our AST special handler to terminate
707 * ourselves.
708 */
709 task_unlock(task);
710
711 return (KERN_FAILURE);
712 }
713
714 if (task->thread_count > 1) {
715 /*
716 * Mark all the threads to keep them from starting any more
717 * user-level execution. The thread_terminate_internal code
718 * would do this on a thread by thread basis anyway, but this
719 * gives us a better chance of not having to wait there.
720 */
721 task_hold_locked(task);
722
723 /*
724 * Terminate all the other threads in the task.
725 */
726 queue_iterate(&task->threads, thread, thread_t, task_threads) {
727 if (thread != self)
728 thread_terminate_internal(thread);
729 }
730
731 task_release_locked(task);
732 }
733
734 /*
735 * Give the machine dependent code a chance
736 * to perform cleanup before ripping apart
737 * the task.
738 */
739 machine_thread_terminate_self();
740
741 task_unlock(task);
742
743 /*
744 * Destroy all synchronizers owned by the task.
745 */
746 task_synchronizer_destroy_all(task);
747
748 /*
749 * Destroy the contents of the IPC space, leaving just
750 * a reference for it.
751 */
752 ipc_space_clean(task->itk_space);
753
754 /*
755 * Clean out the address space, as we are going to be
756 * getting a new one.
757 */
758 vm_map_remove(task->map, task->map->min_offset,
759 task->map->max_offset, VM_MAP_NO_FLAGS);
760
761 return (KERN_SUCCESS);
762 }
763
764 /*
765 * task_hold_locked:
766 *
767 * Suspend execution of the specified task.
768 * This is a recursive-style suspension of the task, a count of
769 * suspends is maintained.
770 *
771 * CONDITIONS: the task is locked and active.
772 */
773 void
774 task_hold_locked(
775 register task_t task)
776 {
777 register thread_t thread;
778
779 assert(task->active);
780
781 if (task->suspend_count++ > 0)
782 return;
783
784 /*
785 * Iterate through all the threads and hold them.
786 */
787 queue_iterate(&task->threads, thread, thread_t, task_threads) {
788 thread_mtx_lock(thread);
789 thread_hold(thread);
790 thread_mtx_unlock(thread);
791 }
792 }
793
794 /*
795 * task_hold:
796 *
797 * Same as the internal routine above, except that is must lock
798 * and verify that the task is active. This differs from task_suspend
799 * in that it places a kernel hold on the task rather than just a
800 * user-level hold. This keeps users from over resuming and setting
801 * it running out from under the kernel.
802 *
803 * CONDITIONS: the caller holds a reference on the task
804 */
805 kern_return_t
806 task_hold(
807 register task_t task)
808 {
809 if (task == TASK_NULL)
810 return (KERN_INVALID_ARGUMENT);
811
812 task_lock(task);
813
814 if (!task->active) {
815 task_unlock(task);
816
817 return (KERN_FAILURE);
818 }
819
820 task_hold_locked(task);
821 task_unlock(task);
822
823 return (KERN_SUCCESS);
824 }
825
826 /*
827 * task_wait_locked:
828 *
829 * Wait for all threads in task to stop.
830 *
831 * Conditions:
832 * Called with task locked, active, and held.
833 */
834 void
835 task_wait_locked(
836 register task_t task)
837 {
838 register thread_t thread, self;
839
840 assert(task->active);
841 assert(task->suspend_count > 0);
842
843 self = current_thread();
844
845 /*
846 * Iterate through all the threads and wait for them to
847 * stop. Do not wait for the current thread if it is within
848 * the task.
849 */
850 queue_iterate(&task->threads, thread, thread_t, task_threads) {
851 if (thread != self)
852 thread_wait(thread);
853 }
854 }
855
856 /*
857 * task_release_locked:
858 *
859 * Release a kernel hold on a task.
860 *
861 * CONDITIONS: the task is locked and active
862 */
863 void
864 task_release_locked(
865 register task_t task)
866 {
867 register thread_t thread;
868
869 assert(task->active);
870 assert(task->suspend_count > 0);
871
872 if (--task->suspend_count > 0)
873 return;
874
875 queue_iterate(&task->threads, thread, thread_t, task_threads) {
876 thread_mtx_lock(thread);
877 thread_release(thread);
878 thread_mtx_unlock(thread);
879 }
880 }
881
882 /*
883 * task_release:
884 *
885 * Same as the internal routine above, except that it must lock
886 * and verify that the task is active.
887 *
888 * CONDITIONS: The caller holds a reference to the task
889 */
890 kern_return_t
891 task_release(
892 task_t task)
893 {
894 if (task == TASK_NULL)
895 return (KERN_INVALID_ARGUMENT);
896
897 task_lock(task);
898
899 if (!task->active) {
900 task_unlock(task);
901
902 return (KERN_FAILURE);
903 }
904
905 task_release_locked(task);
906 task_unlock(task);
907
908 return (KERN_SUCCESS);
909 }
910
911 kern_return_t
912 task_threads(
913 task_t task,
914 thread_act_array_t *threads_out,
915 mach_msg_type_number_t *count)
916 {
917 mach_msg_type_number_t actual;
918 thread_t *threads;
919 thread_t thread;
920 vm_size_t size, size_needed;
921 void *addr;
922 unsigned int i, j;
923
924 if (task == TASK_NULL)
925 return (KERN_INVALID_ARGUMENT);
926
927 size = 0; addr = 0;
928
929 for (;;) {
930 task_lock(task);
931 if (!task->active) {
932 task_unlock(task);
933
934 if (size != 0)
935 kfree(addr, size);
936
937 return (KERN_FAILURE);
938 }
939
940 actual = task->thread_count;
941
942 /* do we have the memory we need? */
943 size_needed = actual * sizeof (mach_port_t);
944 if (size_needed <= size)
945 break;
946
947 /* unlock the task and allocate more memory */
948 task_unlock(task);
949
950 if (size != 0)
951 kfree(addr, size);
952
953 assert(size_needed > 0);
954 size = size_needed;
955
956 addr = kalloc(size);
957 if (addr == 0)
958 return (KERN_RESOURCE_SHORTAGE);
959 }
960
961 /* OK, have memory and the task is locked & active */
962 threads = (thread_t *)addr;
963
964 i = j = 0;
965
966 for (thread = (thread_t)queue_first(&task->threads); i < actual;
967 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
968 thread_reference_internal(thread);
969 threads[j++] = thread;
970 }
971
972 assert(queue_end(&task->threads, (queue_entry_t)thread));
973
974 actual = j;
975 size_needed = actual * sizeof (mach_port_t);
976
977 /* can unlock task now that we've got the thread refs */
978 task_unlock(task);
979
980 if (actual == 0) {
981 /* no threads, so return null pointer and deallocate memory */
982
983 *threads_out = 0;
984 *count = 0;
985
986 if (size != 0)
987 kfree(addr, size);
988 }
989 else {
990 /* if we allocated too much, must copy */
991
992 if (size_needed < size) {
993 void *newaddr;
994
995 newaddr = kalloc(size_needed);
996 if (newaddr == 0) {
997 for (i = 0; i < actual; ++i)
998 thread_deallocate(threads[i]);
999 kfree(addr, size);
1000 return (KERN_RESOURCE_SHORTAGE);
1001 }
1002
1003 bcopy(addr, newaddr, size_needed);
1004 kfree(addr, size);
1005 threads = (thread_t *)newaddr;
1006 }
1007
1008 *threads_out = threads;
1009 *count = actual;
1010
1011 /* do the conversion that Mig should handle */
1012
1013 for (i = 0; i < actual; ++i)
1014 ((ipc_port_t *) threads)[i] = convert_thread_to_port(threads[i]);
1015 }
1016
1017 return (KERN_SUCCESS);
1018 }
1019
1020 /*
1021 * task_suspend:
1022 *
1023 * Implement a user-level suspension on a task.
1024 *
1025 * Conditions:
1026 * The caller holds a reference to the task
1027 */
1028 kern_return_t
1029 task_suspend(
1030 register task_t task)
1031 {
1032 if (task == TASK_NULL || task == kernel_task)
1033 return (KERN_INVALID_ARGUMENT);
1034
1035 task_lock(task);
1036
1037 if (!task->active) {
1038 task_unlock(task);
1039
1040 return (KERN_FAILURE);
1041 }
1042
1043 if (task->user_stop_count++ > 0) {
1044 /*
1045 * If the stop count was positive, the task is
1046 * already stopped and we can exit.
1047 */
1048 task_unlock(task);
1049
1050 return (KERN_SUCCESS);
1051 }
1052
1053 /*
1054 * Put a kernel-level hold on the threads in the task (all
1055 * user-level task suspensions added together represent a
1056 * single kernel-level hold). We then wait for the threads
1057 * to stop executing user code.
1058 */
1059 task_hold_locked(task);
1060 task_wait_locked(task);
1061
1062 task_unlock(task);
1063
1064 return (KERN_SUCCESS);
1065 }
1066
1067 /*
1068 * task_resume:
1069 * Release a kernel hold on a task.
1070 *
1071 * Conditions:
1072 * The caller holds a reference to the task
1073 */
1074 kern_return_t
1075 task_resume(
1076 register task_t task)
1077 {
1078 register boolean_t release = FALSE;
1079
1080 if (task == TASK_NULL || task == kernel_task)
1081 return (KERN_INVALID_ARGUMENT);
1082
1083 task_lock(task);
1084
1085 if (!task->active) {
1086 task_unlock(task);
1087
1088 return (KERN_FAILURE);
1089 }
1090
1091 if (task->user_stop_count > 0) {
1092 if (--task->user_stop_count == 0)
1093 release = TRUE;
1094 }
1095 else {
1096 task_unlock(task);
1097
1098 return (KERN_FAILURE);
1099 }
1100
1101 /*
1102 * Release the task if necessary.
1103 */
1104 if (release)
1105 task_release_locked(task);
1106
1107 task_unlock(task);
1108
1109 return (KERN_SUCCESS);
1110 }
1111
1112 kern_return_t
1113 host_security_set_task_token(
1114 host_security_t host_security,
1115 task_t task,
1116 security_token_t sec_token,
1117 audit_token_t audit_token,
1118 host_priv_t host_priv)
1119 {
1120 ipc_port_t host_port;
1121 kern_return_t kr;
1122
1123 if (task == TASK_NULL)
1124 return(KERN_INVALID_ARGUMENT);
1125
1126 if (host_security == HOST_NULL)
1127 return(KERN_INVALID_SECURITY);
1128
1129 task_lock(task);
1130 task->sec_token = sec_token;
1131 task->audit_token = audit_token;
1132 task_unlock(task);
1133
1134 if (host_priv != HOST_PRIV_NULL) {
1135 kr = host_get_host_priv_port(host_priv, &host_port);
1136 } else {
1137 kr = host_get_host_port(host_priv_self(), &host_port);
1138 }
1139 assert(kr == KERN_SUCCESS);
1140 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1141 return(kr);
1142 }
1143
1144 /*
1145 * Utility routine to set a ledger
1146 */
1147 kern_return_t
1148 task_set_ledger(
1149 task_t task,
1150 ledger_t wired,
1151 ledger_t paged)
1152 {
1153 if (task == TASK_NULL)
1154 return(KERN_INVALID_ARGUMENT);
1155
1156 task_lock(task);
1157 if (wired) {
1158 ipc_port_release_send(task->wired_ledger_port);
1159 task->wired_ledger_port = ledger_copy(wired);
1160 }
1161 if (paged) {
1162 ipc_port_release_send(task->paged_ledger_port);
1163 task->paged_ledger_port = ledger_copy(paged);
1164 }
1165 task_unlock(task);
1166
1167 return(KERN_SUCCESS);
1168 }
1169
1170 /*
1171 * This routine was added, pretty much exclusively, for registering the
1172 * RPC glue vector for in-kernel short circuited tasks. Rather than
1173 * removing it completely, I have only disabled that feature (which was
1174 * the only feature at the time). It just appears that we are going to
1175 * want to add some user data to tasks in the future (i.e. bsd info,
1176 * task names, etc...), so I left it in the formal task interface.
1177 */
1178 kern_return_t
1179 task_set_info(
1180 task_t task,
1181 task_flavor_t flavor,
1182 __unused task_info_t task_info_in, /* pointer to IN array */
1183 __unused mach_msg_type_number_t task_info_count)
1184 {
1185 if (task == TASK_NULL)
1186 return(KERN_INVALID_ARGUMENT);
1187
1188 switch (flavor) {
1189 default:
1190 return (KERN_INVALID_ARGUMENT);
1191 }
1192 return (KERN_SUCCESS);
1193 }
1194
1195 kern_return_t
1196 task_info(
1197 task_t task,
1198 task_flavor_t flavor,
1199 task_info_t task_info_out,
1200 mach_msg_type_number_t *task_info_count)
1201 {
1202 if (task == TASK_NULL)
1203 return (KERN_INVALID_ARGUMENT);
1204
1205 switch (flavor) {
1206
1207 case TASK_BASIC_INFO_32:
1208 {
1209 task_basic_info_32_t basic_info;
1210 vm_map_t map;
1211
1212 if (*task_info_count < TASK_BASIC_INFO_32_COUNT)
1213 return (KERN_INVALID_ARGUMENT);
1214
1215 basic_info = (task_basic_info_32_t)task_info_out;
1216
1217 map = (task == kernel_task)? kernel_map: task->map;
1218 basic_info->virtual_size = CAST_DOWN(vm_offset_t,map->size);
1219 basic_info->resident_size = pmap_resident_count(map->pmap)
1220 * PAGE_SIZE;
1221
1222 task_lock(task);
1223 basic_info->policy = ((task != kernel_task)?
1224 POLICY_TIMESHARE: POLICY_RR);
1225 basic_info->suspend_count = task->user_stop_count;
1226
1227 absolutetime_to_microtime(
1228 task->total_user_time,
1229 &basic_info->user_time.seconds,
1230 &basic_info->user_time.microseconds);
1231 absolutetime_to_microtime(
1232 task->total_system_time,
1233 &basic_info->system_time.seconds,
1234 &basic_info->system_time.microseconds);
1235 task_unlock(task);
1236
1237 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1238 break;
1239 }
1240
1241 case TASK_BASIC_INFO_64:
1242 {
1243 task_basic_info_64_t basic_info;
1244 vm_map_t map;
1245
1246 if (*task_info_count < TASK_BASIC_INFO_64_COUNT)
1247 return (KERN_INVALID_ARGUMENT);
1248
1249 basic_info = (task_basic_info_64_t)task_info_out;
1250
1251 map = (task == kernel_task)? kernel_map: task->map;
1252 basic_info->virtual_size = map->size;
1253 basic_info->resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)
1254 * PAGE_SIZE);
1255
1256 task_lock(task);
1257 basic_info->policy = ((task != kernel_task)?
1258 POLICY_TIMESHARE: POLICY_RR);
1259 basic_info->suspend_count = task->user_stop_count;
1260
1261 absolutetime_to_microtime(
1262 task->total_user_time,
1263 &basic_info->user_time.seconds,
1264 &basic_info->user_time.microseconds);
1265 absolutetime_to_microtime(
1266 task->total_system_time,
1267 &basic_info->system_time.seconds,
1268 &basic_info->system_time.microseconds);
1269 task_unlock(task);
1270
1271 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1272 break;
1273 }
1274
1275 case TASK_THREAD_TIMES_INFO:
1276 {
1277 register task_thread_times_info_t times_info;
1278 register thread_t thread;
1279
1280 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT)
1281 return (KERN_INVALID_ARGUMENT);
1282
1283 times_info = (task_thread_times_info_t) task_info_out;
1284 times_info->user_time.seconds = 0;
1285 times_info->user_time.microseconds = 0;
1286 times_info->system_time.seconds = 0;
1287 times_info->system_time.microseconds = 0;
1288
1289 task_lock(task);
1290
1291 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1292 time_value_t user_time, system_time;
1293
1294 thread_read_times(thread, &user_time, &system_time);
1295
1296 time_value_add(&times_info->user_time, &user_time);
1297 time_value_add(&times_info->system_time, &system_time);
1298 }
1299
1300 task_unlock(task);
1301
1302 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1303 break;
1304 }
1305
1306 case TASK_ABSOLUTETIME_INFO:
1307 {
1308 task_absolutetime_info_t info;
1309 register thread_t thread;
1310
1311 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT)
1312 return (KERN_INVALID_ARGUMENT);
1313
1314 info = (task_absolutetime_info_t)task_info_out;
1315 info->threads_user = info->threads_system = 0;
1316
1317 task_lock(task);
1318
1319 info->total_user = task->total_user_time;
1320 info->total_system = task->total_system_time;
1321
1322 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1323 uint64_t tval;
1324
1325 tval = timer_grab(&thread->user_timer);
1326 info->threads_user += tval;
1327 info->total_user += tval;
1328
1329 tval = timer_grab(&thread->system_timer);
1330 info->threads_system += tval;
1331 info->total_system += tval;
1332 }
1333
1334 task_unlock(task);
1335
1336 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1337 break;
1338 }
1339
1340 /* OBSOLETE */
1341 case TASK_SCHED_FIFO_INFO:
1342 {
1343
1344 if (*task_info_count < POLICY_FIFO_BASE_COUNT)
1345 return (KERN_INVALID_ARGUMENT);
1346
1347 return (KERN_INVALID_POLICY);
1348 }
1349
1350 /* OBSOLETE */
1351 case TASK_SCHED_RR_INFO:
1352 {
1353 register policy_rr_base_t rr_base;
1354
1355 if (*task_info_count < POLICY_RR_BASE_COUNT)
1356 return (KERN_INVALID_ARGUMENT);
1357
1358 rr_base = (policy_rr_base_t) task_info_out;
1359
1360 task_lock(task);
1361 if (task != kernel_task) {
1362 task_unlock(task);
1363 return (KERN_INVALID_POLICY);
1364 }
1365
1366 rr_base->base_priority = task->priority;
1367 task_unlock(task);
1368
1369 rr_base->quantum = std_quantum_us / 1000;
1370
1371 *task_info_count = POLICY_RR_BASE_COUNT;
1372 break;
1373 }
1374
1375 /* OBSOLETE */
1376 case TASK_SCHED_TIMESHARE_INFO:
1377 {
1378 register policy_timeshare_base_t ts_base;
1379
1380 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT)
1381 return (KERN_INVALID_ARGUMENT);
1382
1383 ts_base = (policy_timeshare_base_t) task_info_out;
1384
1385 task_lock(task);
1386 if (task == kernel_task) {
1387 task_unlock(task);
1388 return (KERN_INVALID_POLICY);
1389 }
1390
1391 ts_base->base_priority = task->priority;
1392 task_unlock(task);
1393
1394 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1395 break;
1396 }
1397
1398 case TASK_SECURITY_TOKEN:
1399 {
1400 register security_token_t *sec_token_p;
1401
1402 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT)
1403 return (KERN_INVALID_ARGUMENT);
1404
1405 sec_token_p = (security_token_t *) task_info_out;
1406
1407 task_lock(task);
1408 *sec_token_p = task->sec_token;
1409 task_unlock(task);
1410
1411 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
1412 break;
1413 }
1414
1415 case TASK_AUDIT_TOKEN:
1416 {
1417 register audit_token_t *audit_token_p;
1418
1419 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT)
1420 return (KERN_INVALID_ARGUMENT);
1421
1422 audit_token_p = (audit_token_t *) task_info_out;
1423
1424 task_lock(task);
1425 *audit_token_p = task->audit_token;
1426 task_unlock(task);
1427
1428 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
1429 break;
1430 }
1431
1432 case TASK_SCHED_INFO:
1433 return (KERN_INVALID_ARGUMENT);
1434
1435 case TASK_EVENTS_INFO:
1436 {
1437 register task_events_info_t events_info;
1438
1439 if (*task_info_count < TASK_EVENTS_INFO_COUNT)
1440 return (KERN_INVALID_ARGUMENT);
1441
1442 events_info = (task_events_info_t) task_info_out;
1443
1444 task_lock(task);
1445 events_info->faults = task->faults;
1446 events_info->pageins = task->pageins;
1447 events_info->cow_faults = task->cow_faults;
1448 events_info->messages_sent = task->messages_sent;
1449 events_info->messages_received = task->messages_received;
1450 events_info->syscalls_mach = task->syscalls_mach;
1451 events_info->syscalls_unix = task->syscalls_unix;
1452 events_info->csw = task->csw;
1453 task_unlock(task);
1454
1455 *task_info_count = TASK_EVENTS_INFO_COUNT;
1456 break;
1457 }
1458
1459 default:
1460 return (KERN_INVALID_ARGUMENT);
1461 }
1462
1463 return (KERN_SUCCESS);
1464 }
1465
1466 /*
1467 * task_assign:
1468 *
1469 * Change the assigned processor set for the task
1470 */
1471 kern_return_t
1472 task_assign(
1473 __unused task_t task,
1474 __unused processor_set_t new_pset,
1475 __unused boolean_t assign_threads)
1476 {
1477 return(KERN_FAILURE);
1478 }
1479
1480 /*
1481 * task_assign_default:
1482 *
1483 * Version of task_assign to assign to default processor set.
1484 */
1485 kern_return_t
1486 task_assign_default(
1487 task_t task,
1488 boolean_t assign_threads)
1489 {
1490 return (task_assign(task, &default_pset, assign_threads));
1491 }
1492
1493 /*
1494 * task_get_assignment
1495 *
1496 * Return name of processor set that task is assigned to.
1497 */
1498 kern_return_t
1499 task_get_assignment(
1500 task_t task,
1501 processor_set_t *pset)
1502 {
1503 if (!task->active)
1504 return(KERN_FAILURE);
1505
1506 *pset = task->processor_set;
1507 pset_reference(*pset);
1508 return(KERN_SUCCESS);
1509 }
1510
1511
1512 /*
1513 * task_policy
1514 *
1515 * Set scheduling policy and parameters, both base and limit, for
1516 * the given task. Policy must be a policy which is enabled for the
1517 * processor set. Change contained threads if requested.
1518 */
1519 kern_return_t
1520 task_policy(
1521 __unused task_t task,
1522 __unused policy_t policy_id,
1523 __unused policy_base_t base,
1524 __unused mach_msg_type_number_t count,
1525 __unused boolean_t set_limit,
1526 __unused boolean_t change)
1527 {
1528 return(KERN_FAILURE);
1529 }
1530
1531 /*
1532 * task_set_policy
1533 *
1534 * Set scheduling policy and parameters, both base and limit, for
1535 * the given task. Policy can be any policy implemented by the
1536 * processor set, whether enabled or not. Change contained threads
1537 * if requested.
1538 */
1539 kern_return_t
1540 task_set_policy(
1541 __unused task_t task,
1542 __unused processor_set_t pset,
1543 __unused policy_t policy_id,
1544 __unused policy_base_t base,
1545 __unused mach_msg_type_number_t base_count,
1546 __unused policy_limit_t limit,
1547 __unused mach_msg_type_number_t limit_count,
1548 __unused boolean_t change)
1549 {
1550 return(KERN_FAILURE);
1551 }
1552
1553 #if FAST_TAS
1554 kern_return_t
1555 task_set_ras_pc(
1556 task_t task,
1557 vm_offset_t pc,
1558 vm_offset_t endpc)
1559 {
1560 extern int fast_tas_debug;
1561
1562 if (fast_tas_debug) {
1563 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
1564 task, pc, endpc);
1565 }
1566 task_lock(task);
1567 task->fast_tas_base = pc;
1568 task->fast_tas_end = endpc;
1569 task_unlock(task);
1570 return KERN_SUCCESS;
1571 }
1572 #else /* FAST_TAS */
1573 kern_return_t
1574 task_set_ras_pc(
1575 __unused task_t task,
1576 __unused vm_offset_t pc,
1577 __unused vm_offset_t endpc)
1578 {
1579 return KERN_FAILURE;
1580 }
1581 #endif /* FAST_TAS */
1582
1583 void
1584 task_synchronizer_destroy_all(task_t task)
1585 {
1586 semaphore_t semaphore;
1587 lock_set_t lock_set;
1588
1589 /*
1590 * Destroy owned semaphores
1591 */
1592
1593 while (!queue_empty(&task->semaphore_list)) {
1594 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
1595 (void) semaphore_destroy(task, semaphore);
1596 }
1597
1598 /*
1599 * Destroy owned lock sets
1600 */
1601
1602 while (!queue_empty(&task->lock_set_list)) {
1603 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
1604 (void) lock_set_destroy(task, lock_set);
1605 }
1606 }
1607
1608 /*
1609 * We need to export some functions to other components that
1610 * are currently implemented in macros within the osfmk
1611 * component. Just export them as functions of the same name.
1612 */
1613 boolean_t is_kerneltask(task_t t)
1614 {
1615 if (t == kernel_task)
1616 return (TRUE);
1617
1618 return (FALSE);
1619 }
1620
1621 #undef current_task
1622 task_t current_task(void);
1623 task_t current_task(void)
1624 {
1625 return (current_task_fast());
1626 }
1627
1628 #undef task_reference
1629 void task_reference(task_t task);
1630 void
1631 task_reference(
1632 task_t task)
1633 {
1634 if (task != TASK_NULL)
1635 task_reference_internal(task);
1636 }