2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved. 
   8  * This file contains Original Code and/or Modifications of Original Code 
   9  * as defined in and that are subject to the Apple Public Source License 
  10  * Version 2.0 (the 'License'). You may not use this file except in 
  11  * compliance with the License. Please obtain a copy of the License at 
  12  * http://www.opensource.apple.com/apsl/ and read it before using this 
  15  * The Original Code and all software distributed under the License are 
  16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  20  * Please see the License for the specific language governing rights and 
  21  * limitations under the License. 
  23  * @APPLE_LICENSE_HEADER_END@ 
  26  * @OSF_FREE_COPYRIGHT@ 
  29  * Mach Operating System 
  30  * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University 
  31  * All Rights Reserved. 
  33  * Permission to use, copy, modify and distribute this software and its 
  34  * documentation is hereby granted, provided that both the copyright 
  35  * notice and this permission notice appear in all copies of the 
  36  * software, derivative works or modified versions, and any portions 
  37  * thereof, and that both notices appear in supporting documentation. 
  39  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 
  40  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 
  41  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 
  43  * Carnegie Mellon requests users of this software to return to 
  45  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU 
  46  *  School of Computer Science 
  47  *  Carnegie Mellon University 
  48  *  Pittsburgh PA 15213-3890 
  50  * any improvements or extensions that they make and grant Carnegie Mellon 
  51  * the rights to redistribute these changes. 
  55  *      Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub, 
  58  *      Task management primitives implementation. 
  61  * Copyright (c) 1993 The University of Utah and 
  62  * the Computer Systems Laboratory (CSL).  All rights reserved. 
  64  * Permission to use, copy, modify and distribute this software and its 
  65  * documentation is hereby granted, provided that both the copyright 
  66  * notice and this permission notice appear in all copies of the 
  67  * software, derivative works or modified versions, and any portions 
  68  * thereof, and that both notices appear in supporting documentation. 
  70  * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS 
  71  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF 
  72  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 
  74  * CSL requests users of this software to return to csl-dist@cs.utah.edu any 
  75  * improvements that they make and grant CSL redistribution rights. 
  80 #include <mach_host.h> 
  81 #include <mach_prof.h> 
  83 #include <task_swapper.h> 
  84 #include <platforms.h> 
  86 #include <mach/boolean.h> 
  87 #include <mach/machine/vm_types.h> 
  88 #include <mach/vm_param.h> 
  89 #include <mach/semaphore.h> 
  90 #include <mach/task_info.h> 
  91 #include <mach/task_special_ports.h> 
  92 #include <mach/mach_types.h> 
  93 #include <ipc/ipc_space.h> 
  94 #include <ipc/ipc_entry.h> 
  95 #include <kern/mach_param.h> 
  96 #include <kern/misc_protos.h> 
  97 #include <kern/task.h> 
  98 #include <kern/thread.h> 
  99 #include <kern/zalloc.h> 
 100 #include <kern/kalloc.h> 
 101 #include <kern/processor.h> 
 102 #include <kern/sched_prim.h>    /* for thread_wakeup */ 
 103 #include <kern/ipc_tt.h> 
 104 #include <kern/ledger.h> 
 105 #include <kern/host.h> 
 106 #include <vm/vm_kern.h>         /* for kernel_map, ipc_kernel_map */ 
 107 #include <kern/profile.h> 
 108 #include <kern/assert.h> 
 109 #include <kern/sync_lock.h> 
 111 #include <ddb/db_sym.h> 
 112 #endif  /* MACH_KDB */ 
 115 #include <kern/task_swap.h> 
 116 #endif  /* TASK_SWAPPER */ 
 119 #include <ppc/exception.h> 
 120 #include <ppc/hw_perfmon.h> 
 124  * Exported interfaces 
 127 #include <mach/task_server.h> 
 128 #include <mach/mach_host_server.h> 
 129 #include <mach/host_security_server.h> 
 130 #include <vm/task_working_set.h> 
 137 void            task_hold_locked( 
 139 void            task_wait_locked( 
 141 void            task_release_locked( 
 143 void            task_collect_scan(void); 
 146 void            task_synchronizer_destroy_all( 
 149 kern_return_t   
task_set_ledger( 
 159                         TASK_MAX 
* sizeof(struct task
), 
 160                         TASK_CHUNK 
* sizeof(struct task
), 
 166          * Create the kernel task as the first task. 
 167          * Task_create_local must assign to kernel_task as a side effect, 
 168          * for other initialization. (:-() 
 170         if (task_create_local( 
 171                         TASK_NULL
, FALSE
, FALSE
, &kernel_task
) != KERN_SUCCESS
) 
 172                 panic("task_init\n"); 
 173         vm_map_deallocate(kernel_task
->map
); 
 174         kernel_task
->map 
= kernel_map
; 
 177         if (watchacts 
& WA_TASK
) 
 178             printf("task_init: kernel_task = %x map=%x\n", 
 179                                 kernel_task
, kernel_map
); 
 180 #endif  /* MACH_ASSERT */ 
 192          *      If may_assign is false, task is already being assigned, 
 193          *      wait for that to finish. 
 195         while (task
->may_assign 
== FALSE
) { 
 198                 task
->assign_active 
= TRUE
; 
 199                 res 
= thread_sleep_mutex((event_t
) &task
->assign_active
, 
 200                                          &task
->lock
, THREAD_UNINT
); 
 201                 assert(res 
== THREAD_AWAKENED
); 
 203         task
->may_assign 
= FALSE
; 
 208 #define thread_freeze(thread)   assert(task->processor_set == &default_pset) 
 217         assert(task
->may_assign 
== FALSE
); 
 218         task
->may_assign 
= TRUE
; 
 219         if (task
->assign_active 
== TRUE
) { 
 220                 task
->assign_active 
= FALSE
; 
 221                 thread_wakeup((event_t
)&task
->assign_active
); 
 227 #define thread_unfreeze(thread) assert(task->processor_set == &default_pset) 
 230 #endif  /* MACH_HOST */ 
 233  * Create a task running in the kernel address space.  It may 
 234  * have its own map of size mem_size and may have ipc privileges. 
 239         vm_offset_t             map_base
, 
 243         kern_return_t           result
; 
 250         result 
= task_create_local(parent_task
, FALSE
, TRUE
, &new_task
); 
 251         if (result 
!= KERN_SUCCESS
) 
 255          * Task_create_local creates the task with a user-space map. 
 256          * We attempt to replace the map and free it afterwards; else 
 257          * task_deallocate will free it (can NOT set map to null before 
 258          * task_deallocate, this impersonates a norma placeholder task). 
 259          * _Mark the memory as pageable_ -- this is what we 
 260          * want for images (like servers) loaded into the kernel. 
 263                 vm_map_deallocate(new_task
->map
); 
 264                 new_task
->map 
= kernel_map
; 
 265                 *child_task 
= new_task
; 
 267                 old_map 
= new_task
->map
; 
 268                 if ((result 
= kmem_suballoc(kernel_map
, &map_base
, 
 269                                             map_size
, TRUE
, FALSE
, 
 270                                             &new_task
->map
)) != KERN_SUCCESS
) { 
 272                          * New task created with ref count of 2 -- decrement by 
 273                          * one to force task deletion. 
 275                         printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n", 
 276                                kernel_map
, map_base
, map_size
); 
 277                         --new_task
->ref_count
; 
 278                         task_deallocate(new_task
); 
 281                 vm_map_deallocate(old_map
); 
 282                 *child_task 
= new_task
; 
 284         return (KERN_SUCCESS
); 
 290         ledger_port_array_t     ledger_ports
, 
 291         mach_msg_type_number_t  num_ledger_ports
, 
 292         boolean_t               inherit_memory
, 
 293         task_t                  
*child_task
)            /* OUT */ 
 295         if (parent_task 
== TASK_NULL
) 
 296                 return(KERN_INVALID_ARGUMENT
); 
 298         return task_create_local( 
 299                         parent_task
, inherit_memory
, FALSE
, child_task
); 
 303 host_security_create_task_token( 
 304         host_security_t         host_security
, 
 306         security_token_t        sec_token
, 
 307         host_priv_t             host_priv
, 
 308         ledger_port_array_t     ledger_ports
, 
 309         mach_msg_type_number_t  num_ledger_ports
, 
 310         boolean_t               inherit_memory
, 
 311         task_t                  
*child_task
)            /* OUT */ 
 313         kern_return_t           result
; 
 315         if (parent_task 
== TASK_NULL
) 
 316                 return(KERN_INVALID_ARGUMENT
); 
 318         if (host_security 
== HOST_NULL
) 
 319                 return(KERN_INVALID_SECURITY
); 
 321         result 
= task_create_local( 
 322                         parent_task
, inherit_memory
, FALSE
, child_task
); 
 324         if (result 
!= KERN_SUCCESS
) 
 327         result 
= host_security_set_task_token(host_security
, 
 332         if (result 
!= KERN_SUCCESS
) 
 341         boolean_t       inherit_memory
, 
 342         boolean_t       kernel_loaded
, 
 343         task_t          
*child_task
)            /* OUT */ 
 346         processor_set_t pset
; 
 348         new_task 
= (task_t
) zalloc(task_zone
); 
 350         if (new_task 
== TASK_NULL
) 
 351                 return(KERN_RESOURCE_SHORTAGE
); 
 353         /* one ref for just being alive; one for our caller */ 
 354         new_task
->ref_count 
= 2; 
 357                 new_task
->map 
= vm_map_fork(parent_task
->map
); 
 359                 new_task
->map 
= vm_map_create(pmap_create(0), 
 360                                         round_page_32(VM_MIN_ADDRESS
), 
 361                                         trunc_page_32(VM_MAX_ADDRESS
), TRUE
); 
 363         mutex_init(&new_task
->lock
, ETAP_THREAD_TASK_NEW
); 
 364         queue_init(&new_task
->thr_acts
); 
 365         new_task
->suspend_count 
= 0; 
 366         new_task
->thr_act_count 
= 0; 
 367         new_task
->res_act_count 
= 0; 
 368         new_task
->active_act_count 
= 0; 
 369         new_task
->user_stop_count 
= 0; 
 370         new_task
->role 
= TASK_UNSPECIFIED
; 
 371         new_task
->active 
= TRUE
; 
 372         new_task
->kernel_loaded 
= kernel_loaded
; 
 373         new_task
->user_data 
= 0; 
 374         new_task
->faults 
= 0; 
 375         new_task
->cow_faults 
= 0; 
 376         new_task
->pageins 
= 0; 
 377         new_task
->messages_sent 
= 0; 
 378         new_task
->messages_received 
= 0; 
 379         new_task
->syscalls_mach 
= 0; 
 380         new_task
->syscalls_unix
=0; 
 382         new_task
->taskFeatures
[0] = 0;                          /* Init task features */ 
 383         new_task
->taskFeatures
[1] = 0;                          /* Init task features */ 
 384         new_task
->dynamic_working_set 
= 0; 
 386         task_working_set_create(new_task
, TWS_SMALL_HASH_LINE_COUNT
,  
 387                                                 0, TWS_HASH_STYLE_DEFAULT
); 
 390         new_task
->bsd_info 
= 0; 
 391 #endif /* MACH_BSD */ 
 394         if(per_proc_info
[0].pf
.Available 
& pf64Bit
) new_task
->taskFeatures
[0] |= tf64BitData
;   /* If 64-bit machine, show we have 64-bit registers at least */ 
 398         new_task
->swap_state 
= TASK_SW_IN
; 
 399         new_task
->swap_flags 
= 0; 
 400         new_task
->swap_ast_waiting 
= 0; 
 401         new_task
->swap_stamp 
= sched_tick
; 
 402         new_task
->swap_rss 
= 0; 
 403         new_task
->swap_nswap 
= 0; 
 404 #endif  /* TASK_SWAPPER */ 
 406         queue_init(&new_task
->semaphore_list
); 
 407         queue_init(&new_task
->lock_set_list
); 
 408         new_task
->semaphores_owned 
= 0; 
 409         new_task
->lock_sets_owned 
= 0; 
 412         new_task
->may_assign 
= TRUE
; 
 413         new_task
->assign_active 
= FALSE
; 
 414 #endif  /* MACH_HOST */ 
 415         eml_task_reference(new_task
, parent_task
); 
 417         ipc_task_init(new_task
, parent_task
); 
 419         new_task
->total_user_time
.seconds 
= 0; 
 420         new_task
->total_user_time
.microseconds 
= 0; 
 421         new_task
->total_system_time
.seconds 
= 0; 
 422         new_task
->total_system_time
.microseconds 
= 0; 
 424         task_prof_init(new_task
); 
 426         if (parent_task 
!= TASK_NULL
) { 
 429                  * Freeze the parent, so that parent_task->processor_set 
 432                 task_freeze(parent_task
); 
 433 #endif  /* MACH_HOST */ 
 434                 pset 
= parent_task
->processor_set
; 
 436                         pset 
= &default_pset
; 
 438                 new_task
->sec_token 
= parent_task
->sec_token
; 
 440                 shared_region_mapping_ref(parent_task
->system_shared_region
); 
 441                 new_task
->system_shared_region 
= parent_task
->system_shared_region
; 
 443                 new_task
->wired_ledger_port 
= ledger_copy( 
 444                         convert_port_to_ledger(parent_task
->wired_ledger_port
)); 
 445                 new_task
->paged_ledger_port 
= ledger_copy( 
 446                         convert_port_to_ledger(parent_task
->paged_ledger_port
)); 
 449                 pset 
= &default_pset
; 
 451                 new_task
->sec_token 
= KERNEL_SECURITY_TOKEN
; 
 452                 new_task
->wired_ledger_port 
= ledger_copy(root_wired_ledger
); 
 453                 new_task
->paged_ledger_port 
= ledger_copy(root_paged_ledger
); 
 456         if (kernel_task 
== TASK_NULL
) { 
 457                 new_task
->priority 
= MINPRI_KERNEL
; 
 458                 new_task
->max_priority 
= MAXPRI_KERNEL
; 
 461                 new_task
->priority 
= BASEPRI_DEFAULT
; 
 462                 new_task
->max_priority 
= MAXPRI_USER
; 
 466         pset_add_task(pset
, new_task
); 
 469         if (parent_task 
!= TASK_NULL
) 
 470                 task_unfreeze(parent_task
); 
 471 #endif  /* MACH_HOST */ 
 474         if (inherit_memory
) { 
 475                 new_task
->fast_tas_base 
= parent_task
->fast_tas_base
; 
 476                 new_task
->fast_tas_end  
= parent_task
->fast_tas_end
; 
 478                 new_task
->fast_tas_base 
= (vm_offset_t
)0; 
 479                 new_task
->fast_tas_end  
= (vm_offset_t
)0; 
 481 #endif  /* FAST_TAS */ 
 483         ipc_task_enable(new_task
); 
 486         task_swapout_eligible(new_task
); 
 487 #endif  /* TASK_SWAPPER */ 
 490         if (watchacts 
& WA_TASK
) 
 491             printf("*** task_create_local(par=%x inh=%x) == 0x%x\n", 
 492                         parent_task
, inherit_memory
, new_task
); 
 493 #endif  /* MACH_ASSERT */ 
 495         *child_task 
= new_task
; 
 496         return(KERN_SUCCESS
); 
 502  *      Drop a reference on a task 
 509         processor_set_t pset
; 
 512         if (task 
== TASK_NULL
) 
 516         refs 
= --task
->ref_count
; 
 523         /* task_terminate guarantees that this task is off the list */ 
 524         assert((task
->swap_state 
& TASK_SW_ELIGIBLE
) == 0); 
 525 #endif  /* TASK_SWAPPER */ 
 527         if(task
->dynamic_working_set
) 
 528                 tws_hash_destroy((tws_hash_t
)task
->dynamic_working_set
); 
 531         eml_task_deallocate(task
); 
 533         ipc_task_terminate(task
); 
 539         pset 
= task
->processor_set
; 
 541         pset_remove_task(pset
,task
); 
 543         pset_deallocate(pset
); 
 549         if (task
->kernel_loaded
) 
 550             vm_map_remove(kernel_map
, task
->map
->min_offset
, 
 551                           task
->map
->max_offset
, VM_MAP_NO_FLAGS
); 
 552         vm_map_deallocate(task
->map
); 
 553         is_release(task
->itk_space
); 
 554         task_prof_deallocate(task
); 
 555         zfree(task_zone
, (vm_offset_t
) task
); 
 563         if (task 
!= TASK_NULL
) { 
 574         if (task 
!= TASK_NULL
) { 
 575                 if (task_lock_try(task
)) { 
 587  *      Terminate the specified task.  See comments on thread_terminate 
 588  *      (kern/thread.c) about problems with terminating the "current task." 
 595         if (task 
== TASK_NULL
) 
 596                 return(KERN_INVALID_ARGUMENT
); 
 598                 return(KERN_FAILURE
); 
 599         return (task_terminate_internal(task
)); 
 603 task_terminate_internal( 
 606         thread_act_t    thr_act
, cur_thr_act
; 
 608         boolean_t       interrupt_save
; 
 610         assert(task 
!= kernel_task
); 
 612         cur_thr_act 
= current_act(); 
 613         cur_task 
= cur_thr_act
->task
; 
 617          *      If task is not resident (swapped out, or being swapped 
 618          *      out), we want to bring it back in (this can block). 
 619          *      NOTE: The only way that this can happen in the current 
 620          *      system is if the task is swapped while it has a thread 
 621          *      in exit(), and the thread does not hit a clean point 
 622          *      to swap itself before getting here.   
 623          *      Terminating other tasks is another way to this code, but 
 624          *      it is not yet fully supported. 
 625          *      The task_swapin is unconditional.  It used to be done 
 626          *      only if the task is not resident.  Swapping in a 
 627          *      resident task will prevent it from being swapped out  
 628          *      while it terminates. 
 630         task_swapin(task
, TRUE
);        /* TRUE means make it unswappable */ 
 631 #endif  /* TASK_SWAPPER */ 
 634          *      Get the task locked and make sure that we are not racing 
 635          *      with someone else trying to terminate us. 
 637         if (task 
== cur_task
) { 
 639         } else if (task 
< cur_task
) { 
 647         if (!task
->active 
|| !cur_thr_act
->active
) { 
 649                  *      Task or current act is already being terminated. 
 650                  *      Just return an error. If we are dying, this will 
 651                  *      just get us to our AST special handler and that 
 652                  *      will get us to finalize the termination of ourselves. 
 655                 if (cur_task 
!= task
) 
 656                         task_unlock(cur_task
); 
 657                 return(KERN_FAILURE
); 
 659         if (cur_task 
!= task
) 
 660                 task_unlock(cur_task
); 
 663          * Make sure the current thread does not get aborted out of 
 664          * the waits inside these operations. 
 666         interrupt_save 
= thread_interrupt_level(THREAD_UNINT
); 
 669          *      Indicate that we want all the threads to stop executing 
 670          *      at user space by holding the task (we would have held 
 671          *      each thread independently in thread_terminate_internal - 
 672          *      but this way we may be more likely to already find it 
 673          *      held there).  Mark the task inactive, and prevent 
 674          *      further task operations via the task port. 
 676         task_hold_locked(task
); 
 677         task
->active 
= FALSE
; 
 678         ipc_task_disable(task
); 
 681          *      Terminate each activation in the task. 
 683          *      Each terminated activation will run it's special handler 
 684          *      when its current kernel context is unwound.  That will 
 685          *      clean up most of the thread resources.  Then it will be 
 686          *      handed over to the reaper, who will finally remove the 
 687          *      thread from the task list and free the structures. 
 689         queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
) { 
 690                         thread_terminate_internal(thr_act
); 
 694          *      Clean up any virtual machine state/resources associated 
 695          *      with the current activation because it may hold wiring 
 696          *      and other references on resources we will be trying to 
 699         if (cur_thr_act
->task 
== task
) 
 700                 act_virtual_machine_destroy(cur_thr_act
); 
 705          *      Destroy all synchronizers owned by the task. 
 707         task_synchronizer_destroy_all(task
); 
 710          *      Destroy the IPC space, leaving just a reference for it. 
 712         if (!task
->kernel_loaded
) 
 713                 ipc_space_destroy(task
->itk_space
); 
 716          * If the current thread is a member of the task 
 717          * being terminated, then the last reference to 
 718          * the task will not be dropped until the thread 
 719          * is finally reaped.  To avoid incurring the 
 720          * expense of removing the address space regions 
 721          * at reap time, we do it explictly here. 
 723         (void) vm_map_remove(task
->map
, 
 724                              task
->map
->min_offset
, 
 725                              task
->map
->max_offset
, VM_MAP_NO_FLAGS
); 
 727         shared_region_mapping_dealloc(task
->system_shared_region
); 
 730          * Flush working set here to avoid I/O in reaper thread 
 732         if(task
->dynamic_working_set
) 
 733                 tws_hash_ws_flush((tws_hash_t
) 
 734                                 task
->dynamic_working_set
); 
 737          * We no longer need to guard against being aborted, so restore 
 738          * the previous interruptible state. 
 740         thread_interrupt_level(interrupt_save
); 
 743     perfmon_release_facility(task
); // notify the perfmon facility 
 747          * Get rid of the task active reference on itself. 
 749         task_deallocate(task
); 
 751         return(KERN_SUCCESS
); 
 755  * task_halt -  Shut the current task down (except for the current thread) in 
 756  *              preparation for dramatic changes to the task (probably exec). 
 757  *              We hold the task, terminate all other threads in the task and 
 758  *              wait for them to terminate, clean up the portspace, and when 
 759  *              all done, let the current thread go. 
 765         thread_act_t    thr_act
, cur_thr_act
; 
 768         assert(task 
!= kernel_task
); 
 770         cur_thr_act 
= current_act(); 
 771         cur_task 
= cur_thr_act
->task
; 
 773         if (task 
!= cur_task
) { 
 774                 return(KERN_INVALID_ARGUMENT
); 
 779          *      If task is not resident (swapped out, or being swapped 
 780          *      out), we want to bring it back in and make it unswappable. 
 781          *      This can block, so do it early. 
 783         task_swapin(task
, TRUE
);        /* TRUE means make it unswappable */ 
 784 #endif  /* TASK_SWAPPER */ 
 788         if (!task
->active 
|| !cur_thr_act
->active
) { 
 790                  *      Task or current thread is already being terminated. 
 791                  *      Hurry up and return out of the current kernel context 
 792                  *      so that we run our AST special handler to terminate 
 796                 return(KERN_FAILURE
); 
 799         if (task
->thr_act_count 
> 1) { 
 801                  * Mark all the threads to keep them from starting any more 
 802                  * user-level execution.  The thread_terminate_internal code 
 803                  * would do this on a thread by thread basis anyway, but this 
 804                  * gives us a better chance of not having to wait there. 
 806                 task_hold_locked(task
); 
 809                  *      Terminate all the other activations in the task. 
 811                  *      Each terminated activation will run it's special handler 
 812                  *      when its current kernel context is unwound.  That will 
 813                  *      clean up most of the thread resources.  Then it will be 
 814                  *      handed over to the reaper, who will finally remove the 
 815                  *      thread from the task list and free the structures. 
 817                 queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
,thr_acts
) { 
 818                         if (thr_act 
!= cur_thr_act
) 
 819                                 thread_terminate_internal(thr_act
); 
 821                 task_release_locked(task
); 
 825          *      If the current thread has any virtual machine state 
 826          *      associated with it, we need to explicitly clean that 
 827          *      up now (because we did not terminate the current act) 
 828          *      before we try to clean up the task VM and port spaces. 
 830         act_virtual_machine_destroy(cur_thr_act
); 
 835          *      Destroy all synchronizers owned by the task. 
 837         task_synchronizer_destroy_all(task
); 
 840          *      Destroy the contents of the IPC space, leaving just 
 841          *      a reference for it. 
 843         if (!task
->kernel_loaded
) 
 844                 ipc_space_clean(task
->itk_space
); 
 847          * Clean out the address space, as we are going to be 
 850         (void) vm_map_remove(task
->map
, 
 851                              task
->map
->min_offset
, 
 852                              task
->map
->max_offset
, VM_MAP_NO_FLAGS
); 
 860  *      Suspend execution of the specified task. 
 861  *      This is a recursive-style suspension of the task, a count of 
 862  *      suspends is maintained. 
 864  *      CONDITIONS: the task is locked and active. 
 868         register task_t task
) 
 870         register thread_act_t   thr_act
; 
 872         assert(task
->active
); 
 874         if (task
->suspend_count
++ > 0) 
 878          *      Iterate through all the thread_act's and hold them. 
 880         queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
) { 
 881                 act_lock_thread(thr_act
); 
 882                 thread_hold(thr_act
); 
 883                 act_unlock_thread(thr_act
); 
 890  *      Same as the internal routine above, except that is must lock 
 891  *      and verify that the task is active.  This differs from task_suspend 
 892  *      in that it places a kernel hold on the task rather than just a  
 893  *      user-level hold.  This keeps users from over resuming and setting 
 894  *      it running out from under the kernel. 
 896  *      CONDITIONS: the caller holds a reference on the task 
 899 task_hold(task_t task
) 
 903         if (task 
== TASK_NULL
) 
 904                 return (KERN_INVALID_ARGUMENT
); 
 908                 return (KERN_FAILURE
); 
 910         task_hold_locked(task
); 
 913         return(KERN_SUCCESS
); 
 917  * Routine:     task_wait_locked 
 918  *      Wait for all threads in task to stop. 
 921  *      Called with task locked, active, and held. 
 925         register task_t         task
) 
 927         register thread_act_t   thr_act
, cur_thr_act
; 
 929         assert(task
->active
); 
 930         assert(task
->suspend_count 
> 0); 
 932         cur_thr_act 
= current_act(); 
 934          *      Iterate through all the thread's and wait for them to 
 935          *      stop.  Do not wait for the current thread if it is within 
 938         queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
) { 
 939                 if (thr_act 
!= cur_thr_act
) { 
 940                         thread_shuttle_t thr_shuttle
; 
 942                         thr_shuttle 
= act_lock_thread(thr_act
); 
 943                         thread_wait(thr_shuttle
); 
 944                         act_unlock_thread(thr_act
); 
 950  *      task_release_locked: 
 952  *      Release a kernel hold on a task. 
 954  *      CONDITIONS: the task is locked and active 
 958         register task_t task
) 
 960         register thread_act_t   thr_act
; 
 962         assert(task
->active
); 
 963         assert(task
->suspend_count 
> 0); 
 965         if (--task
->suspend_count 
> 0) 
 969          *      Iterate through all the thread_act's and hold them. 
 970          *      Do not hold the current thread_act if it is within the 
 973         queue_iterate(&task
->thr_acts
, thr_act
, thread_act_t
, thr_acts
) { 
 974                 act_lock_thread(thr_act
); 
 975                 thread_release(thr_act
); 
 976                 act_unlock_thread(thr_act
); 
 983  *      Same as the internal routine above, except that it must lock 
 984  *      and verify that the task is active. 
 986  *      CONDITIONS: The caller holds a reference to the task 
 989 task_release(task_t task
) 
 993         if (task 
== TASK_NULL
) 
 994                 return (KERN_INVALID_ARGUMENT
); 
 998                 return (KERN_FAILURE
); 
1000         task_release_locked(task
); 
1003         return(KERN_SUCCESS
); 
1009         thread_act_array_t      
*thr_act_list
, 
1010         mach_msg_type_number_t  
*count
) 
1012         unsigned int            actual
; /* this many thr_acts */ 
1013         thread_act_t            thr_act
; 
1014         thread_act_t            
*thr_acts
; 
1018         vm_size_t size
, size_needed
; 
1021         if (task 
== TASK_NULL
) 
1022                 return KERN_INVALID_ARGUMENT
; 
1028                 if (!task
->active
) { 
1032                         return KERN_FAILURE
; 
1035                 actual 
= task
->thr_act_count
; 
1037                 /* do we have the memory we need? */ 
1038                 size_needed 
= actual 
* sizeof(mach_port_t
); 
1039                 if (size_needed 
<= size
) 
1042                 /* unlock the task and allocate more memory */ 
1048                 assert(size_needed 
> 0); 
1051                 addr 
= kalloc(size
); 
1053                         return KERN_RESOURCE_SHORTAGE
; 
1056         /* OK, have memory and the task is locked & active */ 
1057         thr_acts 
= (thread_act_t 
*) addr
; 
1059         for (i 
= j 
= 0, thr_act 
= (thread_act_t
) queue_first(&task
->thr_acts
); 
1061              i
++, thr_act 
= (thread_act_t
) queue_next(&thr_act
->thr_acts
)) { 
1063                 if (thr_act
->ref_count 
> 0) { 
1064                         act_locked_act_reference(thr_act
); 
1065                         thr_acts
[j
++] = thr_act
; 
1067                 act_unlock(thr_act
); 
1069         assert(queue_end(&task
->thr_acts
, (queue_entry_t
) thr_act
)); 
1072         size_needed 
= actual 
* sizeof(mach_port_t
); 
1074         /* can unlock task now that we've got the thr_act refs */ 
1078                 /* no thr_acts, so return null pointer and deallocate memory */ 
1086                 /* if we allocated too much, must copy */ 
1088                 if (size_needed 
< size
) { 
1089                         vm_offset_t newaddr
; 
1091                         newaddr 
= kalloc(size_needed
); 
1093                                 for (i 
= 0; i 
< actual
; i
++) 
1094                                         act_deallocate(thr_acts
[i
]); 
1096                                 return KERN_RESOURCE_SHORTAGE
; 
1099                         bcopy((char *) addr
, (char *) newaddr
, size_needed
); 
1101                         thr_acts 
= (thread_act_t 
*) newaddr
; 
1104                 *thr_act_list 
= thr_acts
; 
1107                 /* do the conversion that Mig should handle */ 
1109                 for (i 
= 0; i 
< actual
; i
++) 
1110                         ((ipc_port_t 
*) thr_acts
)[i
] = 
1111                                 convert_act_to_port(thr_acts
[i
]); 
1114         return KERN_SUCCESS
; 
1118  * Routine:     task_suspend 
1119  *      Implement a user-level suspension on a task. 
1122  *      The caller holds a reference to the task 
1126         register task_t         task
) 
1128         if (task 
== TASK_NULL
) 
1129                 return (KERN_INVALID_ARGUMENT
); 
1132         if (!task
->active
) { 
1134                 return (KERN_FAILURE
); 
1136         if ((task
->user_stop_count
)++ > 0) { 
1138                  *      If the stop count was positive, the task is 
1139                  *      already stopped and we can exit. 
1142                 return (KERN_SUCCESS
); 
1146          * Put a kernel-level hold on the threads in the task (all 
1147          * user-level task suspensions added together represent a 
1148          * single kernel-level hold).  We then wait for the threads 
1149          * to stop executing user code. 
1151         task_hold_locked(task
); 
1152         task_wait_locked(task
); 
1154         return (KERN_SUCCESS
); 
1158  * Routine:     task_resume 
1159  *              Release a kernel hold on a task. 
1162  *              The caller holds a reference to the task 
1165 task_resume(register task_t task
) 
1167         register boolean_t      release
; 
1169         if (task 
== TASK_NULL
) 
1170                 return(KERN_INVALID_ARGUMENT
); 
1174         if (!task
->active
) { 
1176                 return(KERN_FAILURE
); 
1178         if (task
->user_stop_count 
> 0) { 
1179                 if (--(task
->user_stop_count
) == 0) 
1184                 return(KERN_FAILURE
); 
1188          *      Release the task if necessary. 
1191                 task_release_locked(task
); 
1194         return(KERN_SUCCESS
); 
1198 host_security_set_task_token( 
1199         host_security_t  host_security
, 
1201         security_token_t sec_token
, 
1202         host_priv_t      host_priv
) 
1206         if (task 
== TASK_NULL
) 
1207                 return(KERN_INVALID_ARGUMENT
); 
1209         if (host_security 
== HOST_NULL
) 
1210                 return(KERN_INVALID_SECURITY
); 
1213         task
->sec_token 
= sec_token
; 
1216         if (host_priv 
!= HOST_PRIV_NULL
) { 
1217                 kr 
= task_set_special_port(task
, 
1219                                 ipc_port_make_send(realhost
.host_priv_self
)); 
1221                 kr 
= task_set_special_port(task
, 
1223                                 ipc_port_make_send(realhost
.host_self
)); 
1229  * Utility routine to set a ledger 
1237         if (task 
== TASK_NULL
) 
1238                 return(KERN_INVALID_ARGUMENT
); 
1242                 ipc_port_release_send(task
->wired_ledger_port
); 
1243                 task
->wired_ledger_port 
= ledger_copy(wired
); 
1246                 ipc_port_release_send(task
->paged_ledger_port
); 
1247                 task
->paged_ledger_port 
= ledger_copy(paged
); 
1251         return(KERN_SUCCESS
); 
1255  * This routine was added, pretty much exclusively, for registering the 
1256  * RPC glue vector for in-kernel short circuited tasks.  Rather than 
1257  * removing it completely, I have only disabled that feature (which was 
1258  * the only feature at the time).  It just appears that we are going to 
1259  * want to add some user data to tasks in the future (i.e. bsd info, 
1260  * task names, etc...), so I left it in the formal task interface. 
1265         task_flavor_t   flavor
, 
1266         task_info_t     task_info_in
,           /* pointer to IN array */ 
1267         mach_msg_type_number_t  task_info_count
) 
1271         if (task 
== TASK_NULL
) 
1272                 return(KERN_INVALID_ARGUMENT
); 
1276                 return (KERN_INVALID_ARGUMENT
); 
1278         return (KERN_SUCCESS
); 
1284         task_flavor_t           flavor
, 
1285         task_info_t             task_info_out
, 
1286         mach_msg_type_number_t  
*task_info_count
) 
1291         if (task 
== TASK_NULL
) 
1292                 return(KERN_INVALID_ARGUMENT
); 
1296             case TASK_BASIC_INFO
: 
1298                 register task_basic_info_t      basic_info
; 
1300                 if (*task_info_count 
< TASK_BASIC_INFO_COUNT
) { 
1301                     return(KERN_INVALID_ARGUMENT
); 
1304                 basic_info 
= (task_basic_info_t
) task_info_out
; 
1306                 map 
= (task 
== kernel_task
) ? kernel_map 
: task
->map
; 
1308                 basic_info
->virtual_size  
= map
->size
; 
1309                 basic_info
->resident_size 
= pmap_resident_count(map
->pmap
) 
1313                 basic_info
->policy 
= ((task 
!= kernel_task
)? 
1314                                                                                   POLICY_TIMESHARE
: POLICY_RR
); 
1315                 basic_info
->suspend_count 
= task
->user_stop_count
; 
1316                 basic_info
->user_time
.seconds
 
1317                                 = task
->total_user_time
.seconds
; 
1318                 basic_info
->user_time
.microseconds
 
1319                                 = task
->total_user_time
.microseconds
; 
1320                 basic_info
->system_time
.seconds
 
1321                                 = task
->total_system_time
.seconds
; 
1322                 basic_info
->system_time
.microseconds 
 
1323                                 = task
->total_system_time
.microseconds
; 
1326                 *task_info_count 
= TASK_BASIC_INFO_COUNT
; 
1330             case TASK_THREAD_TIMES_INFO
: 
1332                 register task_thread_times_info_t times_info
; 
1333                 register thread_t       thread
; 
1334                 register thread_act_t   thr_act
; 
1336                 if (*task_info_count 
< TASK_THREAD_TIMES_INFO_COUNT
) { 
1337                     return (KERN_INVALID_ARGUMENT
); 
1340                 times_info 
= (task_thread_times_info_t
) task_info_out
; 
1341                 times_info
->user_time
.seconds 
= 0; 
1342                 times_info
->user_time
.microseconds 
= 0; 
1343                 times_info
->system_time
.seconds 
= 0; 
1344                 times_info
->system_time
.microseconds 
= 0; 
1347                 queue_iterate(&task
->thr_acts
, thr_act
, 
1348                               thread_act_t
, thr_acts
) 
1350                     time_value_t user_time
, system_time
; 
1353                     thread 
= act_lock_thread(thr_act
); 
1355                     /* JMM - add logic to skip threads that have migrated 
1359                     assert(thread
);  /* Must have thread */ 
1361                     thread_lock(thread
); 
1363                     thread_read_times(thread
, &user_time
, &system_time
); 
1365                     thread_unlock(thread
); 
1367                     act_unlock_thread(thr_act
); 
1369                     time_value_add(×_info
->user_time
, &user_time
); 
1370                     time_value_add(×_info
->system_time
, &system_time
); 
1374                 *task_info_count 
= TASK_THREAD_TIMES_INFO_COUNT
; 
1378             case TASK_SCHED_FIFO_INFO
: 
1381                 if (*task_info_count 
< POLICY_FIFO_BASE_COUNT
) 
1382                         return(KERN_INVALID_ARGUMENT
); 
1384                 return(KERN_INVALID_POLICY
); 
1387             case TASK_SCHED_RR_INFO
: 
1389                 register policy_rr_base_t       rr_base
; 
1391                 if (*task_info_count 
< POLICY_RR_BASE_COUNT
) 
1392                         return(KERN_INVALID_ARGUMENT
); 
1394                 rr_base 
= (policy_rr_base_t
) task_info_out
; 
1397                 if (task 
!= kernel_task
) { 
1399                         return(KERN_INVALID_POLICY
); 
1402                 rr_base
->base_priority 
= task
->priority
; 
1405                 rr_base
->quantum 
= tick 
/ 1000; 
1407                 *task_info_count 
= POLICY_RR_BASE_COUNT
; 
1411             case TASK_SCHED_TIMESHARE_INFO
: 
1413                 register policy_timeshare_base_t        ts_base
; 
1415                 if (*task_info_count 
< POLICY_TIMESHARE_BASE_COUNT
) 
1416                         return(KERN_INVALID_ARGUMENT
); 
1418                 ts_base 
= (policy_timeshare_base_t
) task_info_out
; 
1421                 if (task 
== kernel_task
) { 
1423                         return(KERN_INVALID_POLICY
); 
1426                 ts_base
->base_priority 
= task
->priority
; 
1429                 *task_info_count 
= POLICY_TIMESHARE_BASE_COUNT
; 
1433             case TASK_SECURITY_TOKEN
: 
1435                 register security_token_t       
*sec_token_p
; 
1437                 if (*task_info_count 
< TASK_SECURITY_TOKEN_COUNT
) { 
1438                     return(KERN_INVALID_ARGUMENT
); 
1441                 sec_token_p 
= (security_token_t 
*) task_info_out
; 
1444                 *sec_token_p 
= task
->sec_token
; 
1447                 *task_info_count 
= TASK_SECURITY_TOKEN_COUNT
; 
1451             case TASK_SCHED_INFO
: 
1452                         return(KERN_INVALID_ARGUMENT
); 
1454             case TASK_EVENTS_INFO
: 
1456                 register task_events_info_t     events_info
; 
1458                 if (*task_info_count 
< TASK_EVENTS_INFO_COUNT
) { 
1459                     return(KERN_INVALID_ARGUMENT
); 
1462                 events_info 
= (task_events_info_t
) task_info_out
; 
1465                 events_info
->faults 
= task
->faults
; 
1466                 events_info
->pageins 
= task
->pageins
; 
1467                 events_info
->cow_faults 
= task
->cow_faults
; 
1468                 events_info
->messages_sent 
= task
->messages_sent
; 
1469                 events_info
->messages_received 
= task
->messages_received
; 
1470                 events_info
->syscalls_mach 
= task
->syscalls_mach
; 
1471                 events_info
->syscalls_unix 
= task
->syscalls_unix
; 
1472                 events_info
->csw 
= task
->csw
; 
1475                 *task_info_count 
= TASK_EVENTS_INFO_COUNT
; 
1480                 return (KERN_INVALID_ARGUMENT
); 
1483         return(KERN_SUCCESS
); 
1489  *      Change the assigned processor set for the task 
1494         processor_set_t new_pset
, 
1495         boolean_t       assign_threads
) 
1498         task
++; new_pset
++; assign_threads
++; 
1500         return(KERN_FAILURE
); 
1504  *      task_assign_default: 
1506  *      Version of task_assign to assign to default processor set. 
1509 task_assign_default( 
1511         boolean_t       assign_threads
) 
1513     return (task_assign(task
, &default_pset
, assign_threads
)); 
1517  *      task_get_assignment 
1519  *      Return name of processor set that task is assigned to. 
1522 task_get_assignment( 
1524         processor_set_t 
*pset
) 
1527                 return(KERN_FAILURE
); 
1529         *pset 
= task
->processor_set
; 
1530         pset_reference(*pset
); 
1531         return(KERN_SUCCESS
); 
1538  *      Set scheduling policy and parameters, both base and limit, for 
1539  *      the given task. Policy must be a policy which is enabled for the 
1540  *      processor set. Change contained threads if requested.  
1547         mach_msg_type_number_t  count
, 
1548         boolean_t                               set_limit
, 
1551         return(KERN_FAILURE
); 
1557  *      Set scheduling policy and parameters, both base and limit, for  
1558  *      the given task. Policy can be any policy implemented by the 
1559  *      processor set, whether enabled or not. Change contained threads 
1565         processor_set_t                 pset
, 
1568         mach_msg_type_number_t  base_count
, 
1569         policy_limit_t                  limit
, 
1570         mach_msg_type_number_t  limit_count
, 
1573         return(KERN_FAILURE
); 
1577  *      task_collect_scan: 
1579  *      Attempt to free resources owned by tasks. 
1583 task_collect_scan(void) 
1585         register task_t         task
, prev_task
; 
1586         processor_set_t         pset 
= &default_pset
; 
1590         task 
= (task_t
) queue_first(&pset
->tasks
); 
1591         while (!queue_end(&pset
->tasks
, (queue_entry_t
) task
)) { 
1593                 if (task
->ref_count 
> 0) { 
1595                         task_reference_locked(task
); 
1600                          *      While we still have the pset locked, freeze the task in 
1601                          *      this pset.  That way, when we get back from collecting 
1602                          *      it, we can dereference the pset_tasks chain for the task 
1603                          *      and be assured that we are still in this chain. 
1610                         pmap_collect(task
->map
->pmap
); 
1614                         task 
= (task_t
) queue_next(&task
->pset_tasks
); 
1617                         task_unfreeze(prev_task
); 
1620                         task_deallocate(prev_task
); 
1623                         task 
= (task_t
) queue_next(&task
->pset_tasks
); 
1629         pset_deallocate(pset
); 
1632 /* Also disabled in vm/vm_pageout.c */ 
1633 boolean_t task_collect_allowed 
= FALSE
; 
1634 unsigned task_collect_last_tick 
= 0; 
1635 unsigned task_collect_max_rate 
= 0;             /* in ticks */ 
1638  *      consider_task_collect: 
1640  *      Called by the pageout daemon when the system needs more free pages. 
1644 consider_task_collect(void) 
1647          *      By default, don't attempt task collection more frequently 
1648          *      than once per second. 
1651         if (task_collect_max_rate 
== 0) 
1652                 task_collect_max_rate 
= (1 << SCHED_TICK_SHIFT
) + 1; 
1654         if (task_collect_allowed 
&& 
1655             (sched_tick 
> (task_collect_last_tick 
+ task_collect_max_rate
))) { 
1656                 task_collect_last_tick 
= sched_tick
; 
1657                 task_collect_scan(); 
1668         extern int fast_tas_debug
; 
1670         if (fast_tas_debug
) { 
1671                 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n", 
1675         task
->fast_tas_base 
= pc
; 
1676         task
->fast_tas_end 
=  endpc
; 
1678         return KERN_SUCCESS
; 
1680 #else   /* FAST_TAS */ 
1687         return KERN_FAILURE
; 
1689 #endif  /* FAST_TAS */ 
1693 task_synchronizer_destroy_all(task_t task
) 
1695         semaphore_t     semaphore
; 
1696         lock_set_t      lock_set
; 
1699          *  Destroy owned semaphores 
1702         while (!queue_empty(&task
->semaphore_list
)) { 
1703                 semaphore 
= (semaphore_t
) queue_first(&task
->semaphore_list
); 
1704                 (void) semaphore_destroy(task
, semaphore
); 
1708          *  Destroy owned lock sets 
1711         while (!queue_empty(&task
->lock_set_list
)) { 
1712                 lock_set 
= (lock_set_t
) queue_first(&task
->lock_set_list
); 
1713                 (void) lock_set_destroy(task
, lock_set
); 
1718  *      task_set_port_space: 
1720  *      Set port name space of task to specified size. 
1724 task_set_port_space( 
1730         is_write_lock(task
->itk_space
); 
1731         kr 
= ipc_entry_grow_table(task
->itk_space
, table_entries
); 
1732         if (kr 
== KERN_SUCCESS
) 
1733                 is_write_unlock(task
->itk_space
); 
1738  * We need to export some functions to other components that 
1739  * are currently implemented in macros within the osfmk 
1740  * component.  Just export them as functions of the same name. 
1742 boolean_t 
is_kerneltask(task_t t
) 
1744         if (t 
== kernel_task
) 
1747                 return((t
->kernel_loaded
)); 
1751 task_t 
current_task() 
1753         return (current_task_fast());