2 * Copyright (c) 2000-2007 Apple, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <mach/mach_types.h>
30 #include <kern/kern_types.h>
31 #include <kern/processor.h>
32 #include <kern/thread.h>
33 #include <kern/task.h>
35 #include <kern/lock.h>
37 #include <ipc/ipc_port.h>
38 #include <ipc/ipc_object.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_kern.h>
42 #include <vm/vm_protos.h> /* last */
44 #undef thread_should_halt
45 #undef ipc_port_release
47 /* BSD KERN COMPONENT INTERFACE */
49 task_t bsd_init_task
= TASK_NULL
;
50 char init_task_failure_data
[1024];
51 extern unsigned int not_in_kdp
; /* Skip acquiring locks if we're in kdp */
53 thread_t
get_firstthread(task_t
);
54 int get_task_userstop(task_t
);
55 int get_thread_userstop(thread_t
);
56 boolean_t
thread_should_abort(thread_t
);
57 boolean_t
current_thread_aborted(void);
58 void task_act_iterate_wth_args(task_t
, void(*)(thread_t
, void *), void *);
59 void ipc_port_release(ipc_port_t
);
60 kern_return_t
get_signalact(task_t
, thread_t
*, int);
61 int get_vmsubmap_entries(vm_map_t
, vm_object_offset_t
, vm_object_offset_t
);
62 void syscall_exit_funnelcheck(void);
68 void *get_bsdtask_info(task_t t
)
76 void *get_bsdthreadtask_info(thread_t th
)
78 return(th
->task
!= TASK_NULL
? th
->task
->bsd_info
: NULL
);
84 void set_bsdtask_info(task_t t
,void * v
)
92 void *get_bsdthread_info(thread_t th
)
98 * XXX: wait for BSD to fix signal code
99 * Until then, we cannot block here. We know the task
100 * can't go away, so we make sure it is still active after
101 * retrieving the first thread for extra safety.
103 thread_t
get_firstthread(task_t task
)
105 thread_t thread
= (thread_t
)queue_first(&task
->threads
);
107 if (queue_end(&task
->threads
, (queue_entry_t
)thread
))
108 thread
= THREAD_NULL
;
111 return (THREAD_NULL
);
119 thread_t
*result_out
,
122 kern_return_t result
= KERN_SUCCESS
;
123 thread_t inc
, thread
= THREAD_NULL
;
130 return (KERN_FAILURE
);
133 for (inc
= (thread_t
)queue_first(&task
->threads
);
134 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
135 thread_mtx_lock(inc
);
137 (inc
->sched_mode
& TH_MODE_ISABORTED
) != TH_MODE_ABORT
) {
141 thread_mtx_unlock(inc
);
143 inc
= (thread_t
)queue_next(&inc
->task_threads
);
147 *result_out
= thread
;
151 act_set_astbsd(thread
);
153 thread_mtx_unlock(thread
);
156 result
= KERN_FAILURE
;
170 kern_return_t result
= KERN_FAILURE
;
178 return (KERN_FAILURE
);
181 for (inc
= (thread_t
)queue_first(&task
->threads
);
182 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
184 thread_mtx_lock(inc
);
187 (inc
->sched_mode
& TH_MODE_ISABORTED
) != TH_MODE_ABORT
) {
188 result
= KERN_SUCCESS
;
192 thread_mtx_unlock(inc
);
196 inc
= (thread_t
)queue_next(&inc
->task_threads
);
199 if (result
== KERN_SUCCESS
) {
201 act_set_astbsd(thread
);
203 thread_mtx_unlock(thread
);
212 * This is only safe to call from a thread executing in
213 * in the task's context or if the task is locked Otherwise,
214 * the map could be switched for the task (and freed) before
215 * we to return it here.
217 vm_map_t
get_task_map(task_t t
)
222 vm_map_t
get_task_map_reference(task_t t
)
235 vm_map_reference_swap(m
);
243 ipc_space_t
get_task_ipcspace(task_t t
)
245 return(t
->itk_space
);
248 int get_task_numactivethreads(task_t task
)
251 int num_active_thr
=0;
254 for (inc
= (thread_t
)queue_first(&task
->threads
);
255 !queue_end(&task
->threads
, (queue_entry_t
)inc
); inc
= (thread_t
)queue_next(&inc
->task_threads
))
261 return num_active_thr
;
264 int get_task_numacts(task_t t
)
266 return(t
->thread_count
);
269 /* does this machine need 64bit register set for signal handler */
270 int is_64signalregset(void)
272 task_t t
= current_task();
273 if(t
->taskFeatures
[0] & tf64BitData
)
280 * The old map reference is returned.
283 swap_task_map(task_t task
,vm_map_t map
)
285 thread_t thread
= current_thread();
288 if (task
!= thread
->task
)
289 panic("swap_task_map");
293 thread
->map
= task
->map
= map
;
296 inval_copy_windows(thread
);
304 pmap_t
get_task_pmap(task_t t
)
306 return(t
->map
->pmap
);
312 pmap_t
get_map_pmap(vm_map_t map
)
319 task_t
get_threadtask(thread_t th
)
331 return(vm_map_min(map
));
341 return(vm_map_max(map
));
351 get_vmsubmap_entries(
353 vm_object_offset_t start
,
354 vm_object_offset_t end
)
356 int total_entries
= 0;
357 vm_map_entry_t entry
;
361 entry
= vm_map_first_entry(map
);
362 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< start
)) {
363 entry
= entry
->vme_next
;
366 while((entry
!= vm_map_to_entry(map
)) && (entry
->vme_start
< end
)) {
367 if(entry
->is_sub_map
) {
369 get_vmsubmap_entries(entry
->object
.sub_map
,
372 (entry
->vme_end
- entry
->vme_start
));
376 entry
= entry
->vme_next
;
380 return(total_entries
);
387 int total_entries
= 0;
388 vm_map_entry_t entry
;
392 entry
= vm_map_first_entry(map
);
394 while(entry
!= vm_map_to_entry(map
)) {
395 if(entry
->is_sub_map
) {
397 get_vmsubmap_entries(entry
->object
.sub_map
,
400 (entry
->vme_end
- entry
->vme_start
));
404 entry
= entry
->vme_next
;
408 return(total_entries
);
421 return(task
->user_stop_count
);
431 return(th
->user_stop_count
);
441 return ((th
->sched_mode
& TH_MODE_ISABORTED
) == TH_MODE_ABORT
);
445 * This routine is like thread_should_abort() above. It checks to
446 * see if the current thread is aborted. But unlike above, it also
447 * checks to see if thread is safely aborted. If so, it returns
448 * that fact, and clears the condition (safe aborts only should
449 * have a single effect, and a poll of the abort status
453 current_thread_aborted (
456 thread_t th
= current_thread();
459 if ((th
->sched_mode
& TH_MODE_ISABORTED
) == TH_MODE_ABORT
&&
460 (th
->options
& TH_OPT_INTMASK
) != THREAD_UNINT
)
462 if (th
->sched_mode
& TH_MODE_ABORTSAFELY
) {
465 if (th
->sched_mode
& TH_MODE_ABORTSAFELY
)
466 th
->sched_mode
&= ~TH_MODE_ISABORTED
;
477 task_act_iterate_wth_args(
479 void (*func_callback
)(thread_t
, void *),
486 for (inc
= (thread_t
)queue_first(&task
->threads
);
487 !queue_end(&task
->threads
, (queue_entry_t
)inc
); ) {
488 (void) (*func_callback
)(inc
, func_arg
);
489 inc
= (thread_t
)queue_next(&inc
->task_threads
);
499 ipc_object_release(&(port
)->ip_object
);
507 reenable
= ml_set_interrupts_enabled(FALSE
);
508 ast_on_fast(AST_BSD
);
509 (void)ml_set_interrupts_enabled(reenable
);
513 #include <sys/bsdtask_info.h>
516 fill_taskprocinfo(task_t task
, struct proc_taskinfo_internal
* ptinfo
)
519 task_absolutetime_info_data_t tinfo
;
521 int cswitch
= 0, numrunning
= 0;
523 map
= (task
== kernel_task
)? kernel_map
: task
->map
;
525 ptinfo
->pti_virtual_size
= map
->size
;
526 ptinfo
->pti_resident_size
=
527 (mach_vm_size_t
)(pmap_resident_count(map
->pmap
))
532 ptinfo
->pti_policy
= ((task
!= kernel_task
)?
533 POLICY_TIMESHARE
: POLICY_RR
);
535 tinfo
.threads_user
= tinfo
.threads_system
= 0;
536 tinfo
.total_user
= task
->total_user_time
;
537 tinfo
.total_system
= task
->total_system_time
;
539 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
542 if ((thread
->state
& TH_RUN
) == TH_RUN
)
544 cswitch
+= thread
->c_switch
;
545 tval
= timer_grab(&thread
->user_timer
);
546 tinfo
.threads_user
+= tval
;
547 tinfo
.total_user
+= tval
;
549 tval
= timer_grab(&thread
->system_timer
);
550 tinfo
.threads_system
+= tval
;
551 tinfo
.total_system
+= tval
;
554 ptinfo
->pti_total_system
= tinfo
.total_system
;
555 ptinfo
->pti_total_user
= tinfo
.total_user
;
556 ptinfo
->pti_threads_system
= tinfo
.threads_system
;
557 ptinfo
->pti_threads_user
= tinfo
.threads_user
;
559 ptinfo
->pti_faults
= task
->faults
;
560 ptinfo
->pti_pageins
= task
->pageins
;
561 ptinfo
->pti_cow_faults
= task
->cow_faults
;
562 ptinfo
->pti_messages_sent
= task
->messages_sent
;
563 ptinfo
->pti_messages_received
= task
->messages_received
;
564 ptinfo
->pti_syscalls_mach
= task
->syscalls_mach
;
565 ptinfo
->pti_syscalls_unix
= task
->syscalls_unix
;
566 ptinfo
->pti_csw
= task
->c_switch
+ cswitch
;
567 ptinfo
->pti_threadnum
= task
->thread_count
;
568 ptinfo
->pti_numrunning
= numrunning
;
569 ptinfo
->pti_priority
= task
->priority
;
575 fill_taskthreadinfo(task_t task
, uint64_t thaddr
, struct proc_threadinfo_internal
* ptinfo
, void * vpp
, int *vidp
)
579 mach_msg_type_number_t count
;
580 thread_basic_info_data_t basic_info
;
585 for (thact
= (thread_t
)queue_first(&task
->threads
);
586 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
587 #if defined(__ppc__) || defined(__arm__)
588 if (thact
->machine
.cthread_self
== thaddr
)
589 #elif defined (__i386__)
590 if (thact
->machine
.pcb
->cthread_self
== thaddr
)
592 #error architecture not supported
596 count
= THREAD_BASIC_INFO_COUNT
;
597 if ((kret
= thread_info_internal(thact
, THREAD_BASIC_INFO
, (thread_info_t
)&basic_info
, &count
)) != KERN_SUCCESS
) {
602 ptinfo
->pth_user_time
= timer_grab(&basic_info
.user_time
);
603 ptinfo
->pth_system_time
= timer_grab(&basic_info
.system_time
);
605 ptinfo
->pth_user_time
= ((basic_info
.user_time
.seconds
* NSEC_PER_SEC
) + (basic_info
.user_time
.microseconds
* NSEC_PER_USEC
));
606 ptinfo
->pth_system_time
= ((basic_info
.system_time
.seconds
* NSEC_PER_SEC
) + (basic_info
.system_time
.microseconds
* NSEC_PER_USEC
));
609 ptinfo
->pth_cpu_usage
= basic_info
.cpu_usage
;
610 ptinfo
->pth_policy
= basic_info
.policy
;
611 ptinfo
->pth_run_state
= basic_info
.run_state
;
612 ptinfo
->pth_flags
= basic_info
.flags
;
613 ptinfo
->pth_sleep_time
= basic_info
.sleep_time
;
614 ptinfo
->pth_curpri
= thact
->sched_pri
;
615 ptinfo
->pth_priority
= thact
->priority
;
616 ptinfo
->pth_maxpriority
= thact
->max_priority
;
618 if ((vpp
!= NULL
) && (thact
->uthread
!= NULL
))
619 bsd_threadcdir(thact
->uthread
, vpp
, vidp
);
623 thact
= (thread_t
)queue_next(&thact
->task_threads
);
633 fill_taskthreadlist(task_t task
, void * buffer
, int thcount
)
640 uptr
= (uint64_t *)buffer
;
644 for (thact
= (thread_t
)queue_first(&task
->threads
);
645 !queue_end(&task
->threads
, (queue_entry_t
)thact
); ) {
646 #if defined(__ppc__) || defined(__arm__)
647 thaddr
= thact
->machine
.cthread_self
;
648 #elif defined (__i386__)
649 thaddr
= thact
->machine
.pcb
->cthread_self
;
651 #error architecture not supported
655 if (numthr
>= thcount
)
657 thact
= (thread_t
)queue_next(&thact
->task_threads
);
662 return(numthr
* sizeof(uint64_t));
667 get_numthreads(task_t task
)
669 return(task
->thread_count
);
673 syscall_exit_funnelcheck(void)
677 thread
= current_thread();
679 if (thread
->funnel_lock
)
680 panic("syscall exit with funnel held\n");