2  * Copyright (c) 2000-2007 Apple, Inc. All rights reserved. 
   4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. The rights granted to you under the License 
  10  * may not be used to create, or enable the creation or redistribution of, 
  11  * unlawful or unlicensed copies of an Apple operating system, or to 
  12  * circumvent, violate, or enable the circumvention or violation of, any 
  13  * terms of an Apple operating system software license agreement. 
  15  * Please obtain a copy of the License at 
  16  * http://www.opensource.apple.com/apsl/ and read it before using this file. 
  18  * The Original Code and all software distributed under the License are 
  19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  23  * Please see the License for the specific language governing rights and 
  24  * limitations under the License. 
  26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 
  28 #include <mach/mach_types.h> 
  29 #include <mach/machine/vm_param.h> 
  31 #include <kern/kern_types.h> 
  32 #include <kern/processor.h> 
  33 #include <kern/thread.h> 
  34 #include <kern/task.h> 
  36 #include <kern/lock.h> 
  38 #include <ipc/ipc_port.h> 
  39 #include <ipc/ipc_object.h> 
  40 #include <vm/vm_map.h> 
  41 #include <vm/vm_kern.h> 
  43 #include <vm/vm_protos.h> /* last */ 
  45 #undef thread_should_halt 
  46 #undef ipc_port_release 
  48 /* BSD KERN COMPONENT INTERFACE */ 
  50 task_t  bsd_init_task 
= TASK_NULL
; 
  51 char    init_task_failure_data
[1024]; 
  52 extern unsigned int not_in_kdp
; /* Skip acquiring locks if we're in kdp */ 
  54 thread_t 
get_firstthread(task_t
); 
  55 int get_task_userstop(task_t
); 
  56 int get_thread_userstop(thread_t
); 
  57 boolean_t 
thread_should_abort(thread_t
); 
  58 boolean_t 
current_thread_aborted(void); 
  59 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *); 
  60 void ipc_port_release(ipc_port_t
); 
  61 kern_return_t 
get_signalact(task_t 
, thread_t 
*, int); 
  62 int get_vmsubmap_entries(vm_map_t
, vm_object_offset_t
, vm_object_offset_t
); 
  63 void syscall_exit_funnelcheck(void); 
  69 void  *get_bsdtask_info(task_t t
) 
  77 void *get_bsdthreadtask_info(thread_t th
) 
  79         return(th
->task 
!= TASK_NULL 
? th
->task
->bsd_info 
: NULL
); 
  85 void set_bsdtask_info(task_t t
,void * v
) 
  93 void *get_bsdthread_info(thread_t th
) 
  99  * XXX: wait for BSD to  fix signal code 
 100  * Until then, we cannot block here.  We know the task 
 101  * can't go away, so we make sure it is still active after 
 102  * retrieving the first thread for extra safety. 
 104 thread_t 
get_firstthread(task_t task
) 
 106         thread_t        thread 
= (thread_t
)queue_first(&task
->threads
); 
 108         if (queue_end(&task
->threads
, (queue_entry_t
)thread
)) 
 109                 thread 
= THREAD_NULL
; 
 112                 return (THREAD_NULL
); 
 120         thread_t        
*result_out
, 
 123         kern_return_t   result 
= KERN_SUCCESS
; 
 124         thread_t                inc
, thread 
= THREAD_NULL
; 
 131                 return (KERN_FAILURE
); 
 134         for (inc  
= (thread_t
)queue_first(&task
->threads
); 
 135                         !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) { 
 136                 thread_mtx_lock(inc
); 
 138                                 (inc
->sched_mode 
& TH_MODE_ISABORTED
) != TH_MODE_ABORT
) { 
 142                 thread_mtx_unlock(inc
); 
 144                 inc 
= (thread_t
)queue_next(&inc
->task_threads
); 
 148                 *result_out 
= thread
; 
 152                         act_set_astbsd(thread
); 
 154                 thread_mtx_unlock(thread
); 
 157                 result 
= KERN_FAILURE
; 
 171         kern_return_t   result 
= KERN_FAILURE
; 
 179                 return (KERN_FAILURE
); 
 182         for (inc  
= (thread_t
)queue_first(&task
->threads
); 
 183                         !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) { 
 185                         thread_mtx_lock(inc
); 
 188                                         (inc
->sched_mode 
& TH_MODE_ISABORTED
) != TH_MODE_ABORT
) { 
 189                                 result 
= KERN_SUCCESS
; 
 193                         thread_mtx_unlock(inc
); 
 197                 inc 
= (thread_t
)queue_next(&inc
->task_threads
); 
 200         if (result 
== KERN_SUCCESS
) { 
 202                         act_set_astbsd(thread
); 
 204                 thread_mtx_unlock(thread
); 
 213  * This is only safe to call from a thread executing in 
 214  * in the task's context or if the task is locked  Otherwise, 
 215  * the map could be switched for the task (and freed) before 
 216  * we to return it here. 
 218 vm_map_t  
get_task_map(task_t t
) 
 223 vm_map_t  
get_task_map_reference(task_t t
) 
 236         vm_map_reference_swap(m
); 
 244 ipc_space_t  
get_task_ipcspace(task_t t
) 
 246         return(t
->itk_space
); 
 249 int get_task_numactivethreads(task_t task
) 
 252         int num_active_thr
=0; 
 255         for (inc  
= (thread_t
)queue_first(&task
->threads
); 
 256                         !queue_end(&task
->threads
, (queue_entry_t
)inc
); inc 
= (thread_t
)queue_next(&inc
->task_threads
))  
 262         return num_active_thr
; 
 265 int  get_task_numacts(task_t t
) 
 267         return(t
->thread_count
); 
 270 /* does this machine need  64bit register set for signal handler */ 
 271 int is_64signalregset(void) 
 273         task_t t 
= current_task(); 
 274         if(t
->taskFeatures
[0] & tf64BitData
) 
 281  * Swap in a new map for the task/thread pair; the old map reference is 
 285 swap_task_map(task_t task
, thread_t thread
, vm_map_t map
) 
 289         if (task 
!= thread
->task
) 
 290                 panic("swap_task_map"); 
 294         thread
->map 
= task
->map 
= map
; 
 297 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 
 298         inval_copy_windows(thread
); 
 307 pmap_t  
get_task_pmap(task_t t
) 
 309         return(t
->map
->pmap
); 
 315 uint64_t get_task_resident_size(task_t task
)  
 319         map 
= (task 
== kernel_task
) ? kernel_map
: task
->map
; 
 320         return((uint64_t)pmap_resident_count(map
->pmap
) * PAGE_SIZE_64
); 
 326 pmap_t  
get_map_pmap(vm_map_t map
) 
 333 task_t  
get_threadtask(thread_t th
) 
 345         return(vm_map_min(map
)); 
 355         return(vm_map_max(map
)); 
 365 get_vmsubmap_entries( 
 367         vm_object_offset_t      start
, 
 368         vm_object_offset_t      end
) 
 370         int     total_entries 
= 0; 
 371         vm_map_entry_t  entry
; 
 375         entry 
= vm_map_first_entry(map
); 
 376         while((entry 
!= vm_map_to_entry(map
)) && (entry
->vme_start 
< start
)) { 
 377                 entry 
= entry
->vme_next
; 
 380         while((entry 
!= vm_map_to_entry(map
)) && (entry
->vme_start 
< end
)) { 
 381                 if(entry
->is_sub_map
) { 
 383                                 get_vmsubmap_entries(entry
->object
.sub_map
,  
 386                                         (entry
->vme_end 
- entry
->vme_start
)); 
 390                 entry 
= entry
->vme_next
; 
 394         return(total_entries
); 
 401         int     total_entries 
= 0; 
 402         vm_map_entry_t  entry
; 
 406         entry 
= vm_map_first_entry(map
); 
 408         while(entry 
!= vm_map_to_entry(map
)) { 
 409                 if(entry
->is_sub_map
) { 
 411                                 get_vmsubmap_entries(entry
->object
.sub_map
,  
 414                                         (entry
->vme_end 
- entry
->vme_start
)); 
 418                 entry 
= entry
->vme_next
; 
 422         return(total_entries
); 
 435         return(task
->user_stop_count
); 
 445         return(th
->user_stop_count
); 
 455         return ((th
->sched_mode 
& TH_MODE_ISABORTED
) == TH_MODE_ABORT
); 
 459  * This routine is like thread_should_abort() above.  It checks to 
 460  * see if the current thread is aborted.  But unlike above, it also 
 461  * checks to see if thread is safely aborted.  If so, it returns 
 462  * that fact, and clears the condition (safe aborts only should 
 463  * have a single effect, and a poll of the abort status 
 467 current_thread_aborted ( 
 470         thread_t th 
= current_thread(); 
 473         if ((th
->sched_mode 
& TH_MODE_ISABORTED
) == TH_MODE_ABORT 
&& 
 474                         (th
->options 
& TH_OPT_INTMASK
) != THREAD_UNINT
) 
 476         if (th
->sched_mode 
& TH_MODE_ABORTSAFELY
) { 
 479                 if (th
->sched_mode 
& TH_MODE_ABORTSAFELY
) 
 480                         th
->sched_mode 
&= ~TH_MODE_ISABORTED
; 
 491 task_act_iterate_wth_args( 
 493         void                    (*func_callback
)(thread_t
, void *), 
 500         for (inc  
= (thread_t
)queue_first(&task
->threads
); 
 501                         !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) { 
 502                 (void) (*func_callback
)(inc
, func_arg
); 
 503                 inc 
= (thread_t
)queue_next(&inc
->task_threads
); 
 513         ipc_object_release(&(port
)->ip_object
); 
 521         reenable 
= ml_set_interrupts_enabled(FALSE
); 
 522         ast_on_fast(AST_BSD
); 
 523         (void)ml_set_interrupts_enabled(reenable
); 
 527 #include <sys/bsdtask_info.h> 
 530 fill_taskprocinfo(task_t task
, struct proc_taskinfo_internal 
* ptinfo
) 
 533         task_absolutetime_info_data_t   tinfo
; 
 535         int cswitch 
= 0, numrunning 
= 0; 
 537         map 
= (task 
== kernel_task
)? kernel_map
: task
->map
; 
 539         ptinfo
->pti_virtual_size  
= map
->size
; 
 540         ptinfo
->pti_resident_size 
= 
 541                 (mach_vm_size_t
)(pmap_resident_count(map
->pmap
)) 
 546         ptinfo
->pti_policy 
= ((task 
!= kernel_task
)? 
 547                                           POLICY_TIMESHARE
: POLICY_RR
); 
 549         tinfo
.threads_user 
= tinfo
.threads_system 
= 0; 
 550         tinfo
.total_user 
= task
->total_user_time
; 
 551         tinfo
.total_system 
= task
->total_system_time
; 
 553         queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) { 
 556                 if ((thread
->state 
& TH_RUN
) == TH_RUN
) 
 558                 cswitch 
+= thread
->c_switch
; 
 559                 tval 
= timer_grab(&thread
->user_timer
); 
 560                 tinfo
.threads_user 
+= tval
; 
 561                 tinfo
.total_user 
+= tval
; 
 563                 tval 
= timer_grab(&thread
->system_timer
); 
 564                 tinfo
.threads_system 
+= tval
; 
 565                 tinfo
.total_system 
+= tval
; 
 568         ptinfo
->pti_total_system 
= tinfo
.total_system
; 
 569         ptinfo
->pti_total_user 
= tinfo
.total_user
; 
 570         ptinfo
->pti_threads_system 
= tinfo
.threads_system
; 
 571         ptinfo
->pti_threads_user 
= tinfo
.threads_user
; 
 573         ptinfo
->pti_faults 
= task
->faults
; 
 574         ptinfo
->pti_pageins 
= task
->pageins
; 
 575         ptinfo
->pti_cow_faults 
= task
->cow_faults
; 
 576         ptinfo
->pti_messages_sent 
= task
->messages_sent
; 
 577         ptinfo
->pti_messages_received 
= task
->messages_received
; 
 578         ptinfo
->pti_syscalls_mach 
= task
->syscalls_mach
; 
 579         ptinfo
->pti_syscalls_unix 
= task
->syscalls_unix
; 
 580         ptinfo
->pti_csw 
= task
->c_switch 
+ cswitch
; 
 581         ptinfo
->pti_threadnum 
= task
->thread_count
; 
 582         ptinfo
->pti_numrunning 
= numrunning
; 
 583         ptinfo
->pti_priority 
= task
->priority
; 
 589 fill_taskthreadinfo(task_t task
, uint64_t thaddr
, struct proc_threadinfo_internal 
* ptinfo
, void * vpp
, int *vidp
) 
 593         mach_msg_type_number_t count
; 
 594         thread_basic_info_data_t basic_info
; 
 599         for (thact  
= (thread_t
)queue_first(&task
->threads
); 
 600                         !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) { 
 601 #if defined(__ppc__) || defined(__arm__) 
 602                 if (thact
->machine
.cthread_self 
== thaddr
) 
 603 #elif defined (__i386__) || defined (__x86_64__) 
 604                 if (thact
->machine
.pcb
->cthread_self 
== thaddr
) 
 606 #error architecture not supported 
 610                         count 
= THREAD_BASIC_INFO_COUNT
; 
 611                         if ((kret 
= thread_info_internal(thact
, THREAD_BASIC_INFO
, (thread_info_t
)&basic_info
, &count
)) != KERN_SUCCESS
) { 
 616                         ptinfo
->pth_user_time 
= timer_grab(&basic_info
.user_time
); 
 617                         ptinfo
->pth_system_time 
= timer_grab(&basic_info
.system_time
); 
 619                         ptinfo
->pth_user_time 
= ((basic_info
.user_time
.seconds 
* NSEC_PER_SEC
) + (basic_info
.user_time
.microseconds 
* NSEC_PER_USEC
)); 
 620                         ptinfo
->pth_system_time 
= ((basic_info
.system_time
.seconds 
* NSEC_PER_SEC
) + (basic_info
.system_time
.microseconds 
* NSEC_PER_USEC
)); 
 623                         ptinfo
->pth_cpu_usage 
= basic_info
.cpu_usage
; 
 624                         ptinfo
->pth_policy 
= basic_info
.policy
; 
 625                         ptinfo
->pth_run_state 
= basic_info
.run_state
; 
 626                         ptinfo
->pth_flags 
= basic_info
.flags
; 
 627                         ptinfo
->pth_sleep_time 
= basic_info
.sleep_time
; 
 628                         ptinfo
->pth_curpri 
= thact
->sched_pri
; 
 629                         ptinfo
->pth_priority 
= thact
->priority
; 
 630                         ptinfo
->pth_maxpriority 
= thact
->max_priority
; 
 632                         if ((vpp 
!= NULL
) && (thact
->uthread 
!= NULL
))  
 633                                 bsd_threadcdir(thact
->uthread
, vpp
, vidp
); 
 634                         bsd_getthreadname(thact
->uthread
,ptinfo
->pth_name
); 
 638                 thact 
= (thread_t
)queue_next(&thact
->task_threads
); 
 648 fill_taskthreadlist(task_t task
, void * buffer
, int thcount
) 
 655         uptr 
= (uint64_t *)buffer
; 
 659         for (thact  
= (thread_t
)queue_first(&task
->threads
); 
 660                         !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) { 
 661 #if defined(__ppc__) || defined(__arm__) 
 662                 thaddr 
= thact
->machine
.cthread_self
; 
 663 #elif defined (__i386__) || defined (__x86_64__) 
 664                 thaddr 
= thact
->machine
.pcb
->cthread_self
; 
 666 #error architecture not supported 
 670                 if (numthr 
>= thcount
) 
 672                 thact 
= (thread_t
)queue_next(&thact
->task_threads
); 
 677         return (int)(numthr 
* sizeof(uint64_t)); 
 682 get_numthreads(task_t task
) 
 684         return(task
->thread_count
); 
 688 syscall_exit_funnelcheck(void) 
 692         thread 
= current_thread(); 
 694         if (thread
->funnel_lock
) 
 695                 panic("syscall exit with funnel held\n");