2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/kalloc.h>
38 #include <chud/chud_xnu.h>
39 #include <chud/chud_xnu_private.h>
40 #include <chud/chud_thread.h>
42 #include <machine/machine_routines.h>
44 #include <libkern/OSAtomic.h>
51 #include <kperf/kperf.h>
54 // include the correct file to find real_ncpus
55 #if defined(__i386__) || defined(__x86_64__)
58 // fall back on declaring it extern. The linker will sort us out.
59 extern unsigned int real_ncpus
;
62 // Mask for supported options
63 #define T_CHUD_BIND_OPT_MASK (-1UL)
66 #pragma mark **** thread binding ****
70 * This method will bind a given thread to the requested CPU starting at the
71 * next time quantum. If the thread is the current thread, this method will
72 * force a thread_block(). The result is that if you call this method on the
73 * current thread, you will be on the requested CPU when this method returns.
75 __private_extern__ kern_return_t
76 chudxnu_bind_thread(thread_t thread
, int cpu
, __unused
int options
)
78 processor_t proc
= NULL
;
80 if(cpu
< 0 || (unsigned int)cpu
>= real_ncpus
) // sanity check
83 // temporary restriction until after phase 2 of the scheduler
84 if(thread
!= current_thread())
87 proc
= cpu_to_processor(cpu
);
90 * Potentially racey, but mainly to prevent bind to shutdown
93 if(proc
&& !(proc
->state
== PROCESSOR_OFF_LINE
) &&
94 !(proc
->state
== PROCESSOR_SHUTDOWN
)) {
99 * If we're trying to bind the current thread, and
100 * we're not on the target cpu, and not at interrupt
101 * context, block the current thread to force a
102 * reschedule on the target CPU.
104 if(thread
== current_thread() &&
105 !ml_at_interrupt_context() && cpu_number() != cpu
) {
106 (void)thread_block(THREAD_CONTINUE_NULL
);
113 __private_extern__ kern_return_t
114 chudxnu_unbind_thread(thread_t thread
, __unused
int options
)
116 if(thread
== current_thread())
117 thread_bind(PROCESSOR_NULL
);
121 __private_extern__ boolean_t
122 chudxnu_thread_get_idle(thread_t thread
) {
124 * Instantaneous snapshot of the idle state of
127 * Should be called only on an interrupted or
128 * suspended thread to avoid a race.
130 return ((thread
->state
& TH_IDLE
) == TH_IDLE
);
133 __private_extern__
int
134 chudxnu_thread_get_scheduler_state(thread_t thread
) {
136 * Instantaneous snapshot of the scheduler state of
139 * MUST ONLY be called on an interrupted or
140 * locked thread, to avoid a race.
144 int schedulerState
= (volatile int)(thread
->state
);
145 processor_t lastProcessor
= (volatile processor_t
)(thread
->last_processor
);
147 if ((PROCESSOR_NULL
!= lastProcessor
) && (thread
== lastProcessor
->active_thread
)) {
148 state
|= CHUDXNU_TS_RUNNING
;
151 if (schedulerState
& TH_RUN
) {
152 state
|= CHUDXNU_TS_RUNNABLE
;
155 if (schedulerState
& TH_WAIT
) {
156 state
|= CHUDXNU_TS_WAIT
;
159 if (schedulerState
& TH_UNINT
) {
160 state
|= CHUDXNU_TS_UNINT
;
163 if (schedulerState
& TH_SUSP
) {
164 state
|= CHUDXNU_TS_SUSP
;
167 if (schedulerState
& TH_TERMINATE
) {
168 state
|= CHUDXNU_TS_TERMINATE
;
171 if (schedulerState
& TH_IDLE
) {
172 state
|= CHUDXNU_TS_IDLE
;
179 #pragma mark **** task and thread info ****
182 __private_extern__ boolean_t
183 chudxnu_is_64bit_task(task_t task
)
185 return (task_has_64BitAddr(task
));
188 // an exact copy of task_threads() except no mig conversion at the end!
190 chudxnu_private_task_threads(
192 thread_act_array_t
*threads_out
,
193 mach_msg_type_number_t
*count
)
195 mach_msg_type_number_t actual
;
196 thread_t
*thread_list
;
198 vm_size_t size
, size_needed
;
202 if (task
== TASK_NULL
)
203 return (KERN_INVALID_ARGUMENT
);
205 size
= 0; addr
= NULL
;
215 return (KERN_FAILURE
);
218 actual
= task
->thread_count
;
220 /* do we have the memory we need? */
221 size_needed
= actual
* sizeof (mach_port_t
);
222 if (size_needed
<= size
)
225 /* unlock the task and allocate more memory */
231 assert(size_needed
> 0);
236 return (KERN_RESOURCE_SHORTAGE
);
239 /* OK, have memory and the task is locked & active */
240 thread_list
= (thread_t
*)addr
;
244 for (thread
= (thread_t
)queue_first(&task
->threads
); i
< actual
;
245 ++i
, thread
= (thread_t
)queue_next(&thread
->task_threads
)) {
246 thread_reference_internal(thread
);
247 thread_list
[j
++] = thread
;
250 assert(queue_end(&task
->threads
, (queue_entry_t
)thread
));
253 size_needed
= actual
* sizeof (mach_port_t
);
255 /* can unlock task now that we've got the thread refs */
259 /* no threads, so return null pointer and deallocate memory */
268 /* if we allocated too much, must copy */
270 if (size_needed
< size
) {
273 newaddr
= kalloc(size_needed
);
275 for (i
= 0; i
< actual
; ++i
)
276 thread_deallocate(thread_list
[i
]);
278 return (KERN_RESOURCE_SHORTAGE
);
281 bcopy(addr
, newaddr
, size_needed
);
283 thread_list
= (thread_t
*)newaddr
;
286 *threads_out
= thread_list
;
290 return (KERN_SUCCESS
);
294 __private_extern__ kern_return_t
296 task_array_t
*task_list
,
297 mach_msg_type_number_t
*count
)
299 return processor_set_things(&pset0
, (void **)task_list
, count
, PSET_THING_TASK
);
302 __private_extern__ kern_return_t
303 chudxnu_free_task_list(
304 task_array_t
*task_list
,
305 mach_msg_type_number_t
*count
)
307 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
308 void *addr
= *task_list
;
311 int i
, maxCount
= *count
;
312 for(i
=0; i
<maxCount
; i
++) {
313 task_deallocate((*task_list
)[i
]);
323 __private_extern__ kern_return_t
325 thread_array_t
*thread_list
,
326 mach_msg_type_number_t
*count
)
328 return processor_set_things(&pset0
, (void **)thread_list
, count
, PSET_THING_THREAD
);
331 __private_extern__ kern_return_t
332 chudxnu_task_threads(
334 thread_array_t
*thread_list
,
335 mach_msg_type_number_t
*count
)
337 return chudxnu_private_task_threads(task
, thread_list
, count
);
340 __private_extern__ kern_return_t
341 chudxnu_free_thread_list(
342 thread_array_t
*thread_list
,
343 mach_msg_type_number_t
*count
)
345 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
346 void *addr
= *thread_list
;
349 int i
, maxCount
= *count
;
350 for(i
=0; i
<maxCount
; i
++) {
351 thread_deallocate((*thread_list
)[i
]);
362 __private_extern__ task_t
363 chudxnu_current_task(void)
365 return current_task();
368 __private_extern__ thread_t
369 chudxnu_current_thread(void)
371 return current_thread();
374 __private_extern__ task_t
375 chudxnu_task_for_thread(thread_t thread
)
377 return get_threadtask(thread
);
380 __private_extern__ kern_return_t
383 thread_flavor_t flavor
,
384 thread_info_t thread_info_out
,
385 mach_msg_type_number_t
*thread_info_count
)
387 return thread_info(thread
, flavor
, thread_info_out
, thread_info_count
);
391 /* thread marking stuff */
393 __private_extern__ boolean_t
394 chudxnu_thread_get_marked(thread_t thread
)
397 return ((thread
->t_chud
& T_CHUD_MARKED
) != 0);
401 __private_extern__ boolean_t
402 chudxnu_thread_set_marked(thread_t thread
, boolean_t new_value
)
408 // set the marked bit
409 old_val
= OSBitOrAtomic(T_CHUD_MARKED
, &(thread
->t_chud
));
411 // clear the marked bit
412 old_val
= OSBitAndAtomic(~T_CHUD_MARKED
, &(thread
->t_chud
));
414 return (old_val
& T_CHUD_MARKED
) == T_CHUD_MARKED
;
419 /* XXX: good thing this code is experimental... */
421 /* external handler */
422 extern void (*chudxnu_thread_ast_handler
)(thread_t
);
423 void (*chudxnu_thread_ast_handler
)(thread_t
) = NULL
;
425 /* AST callback to dispatch to AppleProfile */
426 extern void chudxnu_thread_ast(thread_t
);
428 chudxnu_thread_ast(thread_t thread
)
431 /* check for PMC work */
432 kpc_thread_ast_handler(thread
);
436 /* check for kperf work */
437 kperf_thread_ast_handler(thread
);
440 /* atomicness for kdebug events */
441 void (*handler
)(thread_t
) = chudxnu_thread_ast_handler
;
450 /* Get and set bits on the thread and trigger an AST handler */
451 void chudxnu_set_thread_ast( thread_t thread
);
453 chudxnu_set_thread_ast( thread_t thread
)
455 /* FIXME: only call this on current thread from an interrupt handler for now... */
456 if( thread
!= current_thread() )
457 panic( "unsafe AST set" );
459 act_set_kperf(thread
);
462 /* get and set the thread bits */
463 extern uint32_t chudxnu_get_thread_bits( thread_t thread
);
464 extern void chudxnu_set_thread_bits( thread_t thread
, uint32_t bits
);
467 chudxnu_get_thread_bits( thread_t thread
)
469 return thread
->t_chud
;
473 chudxnu_set_thread_bits( thread_t thread
, uint32_t bits
)
475 thread
->t_chud
= bits
;
478 /* get and set thread dirty bits. so CHUD can track whether the thread
479 * has been dispatched since it last looked. caller must hold the
483 chudxnu_thread_get_dirty(thread_t thread
)
485 if( thread
->c_switch
!= thread
->chud_c_switch
)
492 chudxnu_thread_set_dirty(thread_t thread
, boolean_t makedirty
)
495 thread
->chud_c_switch
= thread
->c_switch
- 1;
497 thread
->chud_c_switch
= thread
->c_switch
;