2 * Copyright (c) 2000-2007 Apple, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
30 #include <kern/kern_types.h>
31 #include <kern/processor.h>
32 #include <kern/thread.h>
33 #include <kern/task.h>
35 #include <kern/lock.h>
37 #include <ipc/ipc_port.h>
38 #include <ipc/ipc_object.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_kern.h>
42 #include <vm/vm_protos.h> /* last */
44 #undef thread_should_halt
45 #undef ipc_port_release
47 /* BSD KERN COMPONENT INTERFACE */
49 task_t bsd_init_task
= TASK_NULL
;
50 char init_task_failure_data
[1024];
51 extern unsigned int not_in_kdp
; /* Skip acquiring locks if we're in kdp */
53 thread_t
get_firstthread(task_t
);
54 int get_task_userstop(task_t
);
55 int get_thread_userstop(thread_t
);
56 boolean_t
thread_should_abort(thread_t
);
57 boolean_t
current_thread_aborted(void);
58 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
59 void ipc_port_release(ipc_port_t
);
60 kern_return_t
get_signalact(task_t
, thread_t
*, int);
61 int get_vmsubmap_entries(vm_map_t
, vm_object_offset_t
, vm_object_offset_t
);
62 void syscall_exit_funnelcheck(void);
68 void *get_bsdtask_info(task_t t
)
76 void *get_bsdthreadtask_info(thread_t th
)
78 return(th
->task
!= TASK_NULL
? th
->task
->bsd_info
: NULL
);
84 void set_bsdtask_info(task_t t
,void * v
)
92 void *get_bsdthread_info(thread_t th
)
98 * XXX: wait for BSD to fix signal code
99 * Until then, we cannot block here. We know the task
100 * can't go away, so we make sure it is still active after
101 * retrieving the first thread for extra safety.
103 thread_t
get_firstthread(task_t task
)
105 thread_t thread
= (thread_t
)queue_first(&task
->threads
);
107 if (queue_end(&task
->threads
, (queue_entry_t
)thread
))
108 thread
= THREAD_NULL
;
111 return (THREAD_NULL
);
119 thread_t
*result_out
,
122 kern_return_t result
= KERN_SUCCESS
;
123 thread_t inc
, thread
= THREAD_NULL
;
130 return (KERN_FAILURE
);
133 for (inc
= (thread_t
)queue_first(&task
->threads
);
134 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
135 thread_mtx_lock(inc
);
137 (inc
->sched_mode
& TH_MODE_ISABORTED
) != TH_MODE_ABORT
) {
141 thread_mtx_unlock(inc
);
143 inc
= (thread_t
)queue_next(&inc
->task_threads
);
147 *result_out
= thread
;
151 act_set_astbsd(thread
);
153 thread_mtx_unlock(thread
);
156 result
= KERN_FAILURE
;
170 kern_return_t result
= KERN_FAILURE
;
178 return (KERN_FAILURE
);
181 for (inc
= (thread_t
)queue_first(&task
->threads
);
182 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
184 thread_mtx_lock(inc
);
187 (inc
->sched_mode
& TH_MODE_ISABORTED
) != TH_MODE_ABORT
) {
188 result
= KERN_SUCCESS
;
192 thread_mtx_unlock(inc
);
196 inc
= (thread_t
)queue_next(&inc
->task_threads
);
199 if (result
== KERN_SUCCESS
) {
201 act_set_astbsd(thread
);
203 thread_mtx_unlock(thread
);
212 * This is only safe to call from a thread executing in
213 * in the task's context or if the task is locked Otherwise,
214 * the map could be switched for the task (and freed) before
215 * we to return it here.
217 vm_map_t
get_task_map(task_t t
)
222 vm_map_t
get_task_map_reference(task_t t
)
235 vm_map_reference_swap(m
);
243 ipc_space_t
get_task_ipcspace(task_t t
)
245 return(t
->itk_space
);
248 int get_task_numacts(task_t t
)
250 return(t
->thread_count
);
253 /* does this machine need 64bit register set for signal handler */
254 int is_64signalregset(void)
256 task_t t
= current_task();
257 if(t
->taskFeatures
[0] & tf64BitData
)
264 * The old map reference is returned.
267 swap_task_map(task_t task
,vm_map_t map
)
269 thread_t thread
= current_thread();
272 if (task
!= thread
->task
)
273 panic("swap_task_map");
277 thread
->map
= task
->map
= map
;
280 inval_copy_windows(thread
);
288 pmap_t
get_task_pmap(task_t t
)
290 return(t
->map
->pmap
);
296 pmap_t
get_map_pmap(vm_map_t map
)
303 task_t
get_threadtask(thread_t th
)
315 return(vm_map_min(map
));
325 return(vm_map_max(map
));
335 get_vmsubmap_entries(
337 vm_object_offset_t start
,
338 vm_object_offset_t end
)
340 int total_entries
= 0;
341 vm_map_entry_t entry
;
345 entry
= vm_map_first_entry(map
);
346 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< start
)) {
347 entry
= entry
->vme_next
;
350 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
351 if(entry
->is_sub_map
) {
353 get_vmsubmap_entries(entry
->object
.sub_map
,
356 (entry
->vme_end
- entry
->vme_start
));
360 entry
= entry
->vme_next
;
364 return(total_entries
);
371 int total_entries
= 0;
372 vm_map_entry_t entry
;
376 entry
= vm_map_first_entry(map
);
378 while(entry
!= vm_map_to_entry(map
)) {
379 if(entry
->is_sub_map
) {
381 get_vmsubmap_entries(entry
->object
.sub_map
,
384 (entry
->vme_end
- entry
->vme_start
));
388 entry
= entry
->vme_next
;
392 return(total_entries
);
405 return(task
->user_stop_count
);
415 return(th
->user_stop_count
);
425 return ((th
->sched_mode
& TH_MODE_ISABORTED
) == TH_MODE_ABORT
);
429 * This routine is like thread_should_abort() above. It checks to
430 * see if the current thread is aborted. But unlike above, it also
431 * checks to see if thread is safely aborted. If so, it returns
432 * that fact, and clears the condition (safe aborts only should
433 * have a single effect, and a poll of the abort status
437 current_thread_aborted (
440 thread_t th
= current_thread();
443 if ((th
->sched_mode
& TH_MODE_ISABORTED
) == TH_MODE_ABORT
&&
444 (th
->options
& TH_OPT_INTMASK
) != THREAD_UNINT
)
446 if (th
->sched_mode
& TH_MODE_ABORTSAFELY
) {
449 if (th
->sched_mode
& TH_MODE_ABORTSAFELY
)
450 th
->sched_mode
&= ~TH_MODE_ISABORTED
;
461 task_act_iterate_wth_args(
463 void (*func_callback
)(thread_t
, void *),
470 for (inc
= (thread_t
)queue_first(&task
->threads
);
471 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
472 (void) (*func_callback
)(inc
, func_arg
);
473 inc
= (thread_t
)queue_next(&inc
->task_threads
);
483 ipc_object_release(&(port
)->ip_object
);
491 reenable
= ml_set_interrupts_enabled(FALSE
);
492 ast_on_fast(AST_BSD
);
493 (void)ml_set_interrupts_enabled(reenable
);
497 #include <sys/bsdtask_info.h>
500 fill_taskprocinfo(task_t task
, struct proc_taskinfo_internal
* ptinfo
)
503 task_absolutetime_info_data_t tinfo
;
505 int cswitch
= 0, numrunning
= 0;
507 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
509 ptinfo
->pti_virtual_size
= map
->size
;
510 ptinfo
->pti_resident_size
=
511 (mach_vm_size_t
)(pmap_resident_count(map
->pmap
))
516 ptinfo
->pti_policy
= ((task
!= kernel_task
)?
517 POLICY_TIMESHARE
: POLICY_RR
);
519 tinfo
.threads_user
= tinfo
.threads_system
= 0;
520 tinfo
.total_user
= task
->total_user_time
;
521 tinfo
.total_system
= task
->total_system_time
;
523 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
526 if ((thread
->state
& TH_RUN
) == TH_RUN
)
528 cswitch
+= thread
->c_switch
;
529 tval
= timer_grab(&thread
->user_timer
);
530 tinfo
.threads_user
+= tval
;
531 tinfo
.total_user
+= tval
;
533 tval
= timer_grab(&thread
->system_timer
);
534 tinfo
.threads_system
+= tval
;
535 tinfo
.total_system
+= tval
;
538 ptinfo
->pti_total_system
= tinfo
.total_system
;
539 ptinfo
->pti_total_user
= tinfo
.total_user
;
540 ptinfo
->pti_threads_system
= tinfo
.threads_system
;
541 ptinfo
->pti_threads_user
= tinfo
.threads_user
;
543 ptinfo
->pti_faults
= task
->faults
;
544 ptinfo
->pti_pageins
= task
->pageins
;
545 ptinfo
->pti_cow_faults
= task
->cow_faults
;
546 ptinfo
->pti_messages_sent
= task
->messages_sent
;
547 ptinfo
->pti_messages_received
= task
->messages_received
;
548 ptinfo
->pti_syscalls_mach
= task
->syscalls_mach
;
549 ptinfo
->pti_syscalls_unix
= task
->syscalls_unix
;
550 ptinfo
->pti_csw
= task
->c_switch
+ cswitch
;
551 ptinfo
->pti_threadnum
= task
->thread_count
;
552 ptinfo
->pti_numrunning
= numrunning
;
553 ptinfo
->pti_priority
= task
->priority
;
559 fill_taskthreadinfo(task_t task
, uint64_t thaddr
, struct proc_threadinfo_internal
* ptinfo
, void * vpp
, int *vidp
)
563 mach_msg_type_number_t count
;
564 thread_basic_info_data_t basic_info
;
569 for (thact
= (thread_t
)queue_first(&task
->threads
);
570 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
571 #if defined(__ppc__) || defined(__arm__)
572 if (thact
->machine
.cthread_self
== thaddr
)
573 #elif defined (__i386__)
574 if (thact
->machine
.pcb
->cthread_self
== thaddr
)
576 #error architecture not supported
580 count
= THREAD_BASIC_INFO_COUNT
;
581 if ((kret
= thread_info_internal(thact
, THREAD_BASIC_INFO
, (thread_info_t
)&basic_info
, &count
)) != KERN_SUCCESS
) {
586 ptinfo
->pth_user_time
= timer_grab(&basic_info
.user_time
);
587 ptinfo
->pth_system_time
= timer_grab(&basic_info
.system_time
);
589 ptinfo
->pth_user_time
= ((basic_info
.user_time
.seconds
* NSEC_PER_SEC
) + (basic_info
.user_time
.microseconds
* NSEC_PER_USEC
));
590 ptinfo
->pth_system_time
= ((basic_info
.system_time
.seconds
* NSEC_PER_SEC
) + (basic_info
.system_time
.microseconds
* NSEC_PER_USEC
));
593 ptinfo
->pth_cpu_usage
= basic_info
.cpu_usage
;
594 ptinfo
->pth_policy
= basic_info
.policy
;
595 ptinfo
->pth_run_state
= basic_info
.run_state
;
596 ptinfo
->pth_flags
= basic_info
.flags
;
597 ptinfo
->pth_sleep_time
= basic_info
.sleep_time
;
598 ptinfo
->pth_curpri
= thact
->sched_pri
;
599 ptinfo
->pth_priority
= thact
->priority
;
600 ptinfo
->pth_maxpriority
= thact
->max_priority
;
602 if ((vpp
!= NULL
) && (thact
->uthread
!= NULL
))
603 bsd_threadcdir(thact
->uthread
, vpp
, vidp
);
607 thact
= (thread_t
)queue_next(&thact
->task_threads
);
617 fill_taskthreadlist(task_t task
, void * buffer
, int thcount
)
624 uptr
= (uint64_t *)buffer
;
628 for (thact
= (thread_t
)queue_first(&task
->threads
);
629 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
630 #if defined(__ppc__) || defined(__arm__)
631 thaddr
= thact
->machine
.cthread_self
;
632 #elif defined (__i386__)
633 thaddr
= thact
->machine
.pcb
->cthread_self
;
635 #error architecture not supported
639 if (numthr
>= thcount
)
641 thact
= (thread_t
)queue_next(&thact
->task_threads
);
646 return(numthr
* sizeof(uint64_t));
651 get_numthreads(task_t task
)
653 return(task
->thread_count
);
657 syscall_exit_funnelcheck(void)
661 thread
= current_thread();
663 if (thread
->funnel_lock
)
664 panic("syscall exit with funnel held\n");