2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/kalloc.h>
38 #include <chud/chud_xnu.h>
39 #include <chud/chud_xnu_private.h>
40 #include <chud/chud_thread.h>
42 #include <machine/machine_routines.h>
44 #include <libkern/OSAtomic.h>
46 // include the correct file to find real_ncpus
47 #if defined(__i386__) || defined(__x86_64__)
49 #elif defined(__ppc__) || defined(__ppc64__)
50 # include <ppc/cpu_internal.h>
51 #elif defined(__arm__)
52 # include <arm/cpu_internal.h>
54 // fall back on declaring it extern. The linker will sort us out.
55 extern unsigned int real_ncpus
;
58 // Mask for supported options
59 #define T_CHUD_BIND_OPT_MASK (-1UL)
61 #pragma mark **** thread binding ****
64 * This method will bind a given thread to the requested CPU starting at the
65 * next time quantum. If the thread is the current thread, this method will
66 * force a thread_block(). The result is that if you call this method on the
67 * current thread, you will be on the requested CPU when this method returns.
69 __private_extern__ kern_return_t
70 chudxnu_bind_thread(thread_t thread
, int cpu
, __unused
int options
)
72 processor_t proc
= NULL
;
74 if(cpu
< 0 || (unsigned int)cpu
>= real_ncpus
) // sanity check
77 // temporary restriction until after phase 2 of the scheduler
78 if(thread
!= current_thread())
81 proc
= cpu_to_processor(cpu
);
84 * Potentially racey, but mainly to prevent bind to shutdown
87 if(proc
&& !(proc
->state
== PROCESSOR_OFF_LINE
) &&
88 !(proc
->state
== PROCESSOR_SHUTDOWN
)) {
93 * If we're trying to bind the current thread, and
94 * we're not on the target cpu, and not at interrupt
95 * context, block the current thread to force a
96 * reschedule on the target CPU.
98 if(thread
== current_thread() &&
99 !(ml_at_interrupt_context() && cpu_number() == cpu
)) {
100 (void)thread_block(THREAD_CONTINUE_NULL
);
107 __private_extern__ kern_return_t
108 chudxnu_unbind_thread(thread_t thread
, __unused
int options
)
110 if(thread
== current_thread())
111 thread_bind(PROCESSOR_NULL
);
115 __private_extern__ boolean_t
116 chudxnu_thread_get_idle(thread_t thread
) {
118 * Instantaneous snapshot of the idle state of
121 * Should be called only on an interrupted or
122 * suspended thread to avoid a race.
124 return ((thread
->state
& TH_IDLE
) == TH_IDLE
);
127 #pragma mark **** task and thread info ****
129 __private_extern__ boolean_t
130 chudxnu_is_64bit_task(task_t task
)
132 return (task_has_64BitAddr(task
));
136 #define THING_THREAD 1
138 // an exact copy of processor_set_things() except no mig conversion at the end!
140 chudxnu_private_processor_set_things(
141 processor_set_t pset
,
142 mach_port_t
**thing_list
,
143 mach_msg_type_number_t
*count
,
146 unsigned int actual
; /* this many things */
147 unsigned int maxthings
;
150 vm_size_t size
, size_needed
;
153 if (pset
== PROCESSOR_SET_NULL
|| pset
!= &pset0
)
154 return (KERN_INVALID_ARGUMENT
);
156 size
= 0; addr
= NULL
;
159 mutex_lock(&tasks_threads_lock
);
161 if (type
== THING_TASK
)
162 maxthings
= tasks_count
;
164 maxthings
= threads_count
;
166 /* do we have the memory we need? */
168 size_needed
= maxthings
* sizeof (mach_port_t
);
169 if (size_needed
<= size
)
172 mutex_unlock(&tasks_threads_lock
);
177 assert(size_needed
> 0);
182 return (KERN_RESOURCE_SHORTAGE
);
185 /* OK, have memory and the processor_set is locked & active */
192 task_t task
, *task_list
= (task_t
*)addr
;
194 for (task
= (task_t
)queue_first(&tasks
);
195 !queue_end(&tasks
, (queue_entry_t
)task
);
196 task
= (task_t
)queue_next(&task
->tasks
)) {
197 task_reference_internal(task
);
198 task_list
[actual
++] = task
;
206 thread_t thread
, *thread_list
= (thread_t
*)addr
;
208 for (i
= 0, thread
= (thread_t
)queue_first(&threads
);
209 !queue_end(&threads
, (queue_entry_t
)thread
);
210 thread
= (thread_t
)queue_next(&thread
->threads
)) {
211 thread_reference_internal(thread
);
212 thread_list
[actual
++] = thread
;
219 mutex_unlock(&tasks_threads_lock
);
221 if (actual
< maxthings
)
222 size_needed
= actual
* sizeof (mach_port_t
);
225 /* no things, so return null pointer and deallocate memory */
233 /* if we allocated too much, must copy */
235 if (size_needed
< size
) {
238 newaddr
= kalloc(size_needed
);
244 task_t
*task_list
= (task_t
*)addr
;
246 for (i
= 0; i
< actual
; i
++)
247 task_deallocate(task_list
[i
]);
253 thread_t
*thread_list
= (thread_t
*)addr
;
255 for (i
= 0; i
< actual
; i
++)
256 thread_deallocate(thread_list
[i
]);
262 return (KERN_RESOURCE_SHORTAGE
);
265 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
270 *thing_list
= (mach_port_t
*)addr
;
274 return (KERN_SUCCESS
);
277 // an exact copy of task_threads() except no mig conversion at the end!
279 chudxnu_private_task_threads(
281 thread_act_array_t
*threads_out
,
282 mach_msg_type_number_t
*count
)
284 mach_msg_type_number_t actual
;
285 thread_t
*thread_list
;
287 vm_size_t size
, size_needed
;
291 if (task
== TASK_NULL
)
292 return (KERN_INVALID_ARGUMENT
);
294 size
= 0; addr
= NULL
;
304 return (KERN_FAILURE
);
307 actual
= task
->thread_count
;
309 /* do we have the memory we need? */
310 size_needed
= actual
* sizeof (mach_port_t
);
311 if (size_needed
<= size
)
314 /* unlock the task and allocate more memory */
320 assert(size_needed
> 0);
325 return (KERN_RESOURCE_SHORTAGE
);
328 /* OK, have memory and the task is locked & active */
329 thread_list
= (thread_t
*)addr
;
333 for (thread
= (thread_t
)queue_first(&task
->threads
); i
< actual
;
334 ++i
, thread
= (thread_t
)queue_next(&thread
->task_threads
)) {
335 thread_reference_internal(thread
);
336 thread_list
[j
++] = thread
;
339 assert(queue_end(&task
->threads
, (queue_entry_t
)thread
));
342 size_needed
= actual
* sizeof (mach_port_t
);
344 /* can unlock task now that we've got the thread refs */
348 /* no threads, so return null pointer and deallocate memory */
357 /* if we allocated too much, must copy */
359 if (size_needed
< size
) {
362 newaddr
= kalloc(size_needed
);
364 for (i
= 0; i
< actual
; ++i
)
365 thread_deallocate(thread_list
[i
]);
367 return (KERN_RESOURCE_SHORTAGE
);
370 bcopy(addr
, newaddr
, size_needed
);
372 thread_list
= (thread_t
*)newaddr
;
375 *threads_out
= thread_list
;
379 return (KERN_SUCCESS
);
383 __private_extern__ kern_return_t
385 task_array_t
*task_list
,
386 mach_msg_type_number_t
*count
)
388 return chudxnu_private_processor_set_things(&pset0
, (mach_port_t
**)task_list
, count
, THING_TASK
);
391 __private_extern__ kern_return_t
392 chudxnu_free_task_list(
393 task_array_t
*task_list
,
394 mach_msg_type_number_t
*count
)
396 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
397 void *addr
= *task_list
;
400 int i
, maxCount
= *count
;
401 for(i
=0; i
<maxCount
; i
++) {
402 task_deallocate((*task_list
)[i
]);
412 __private_extern__ kern_return_t
414 thread_array_t
*thread_list
,
415 mach_msg_type_number_t
*count
)
417 return chudxnu_private_processor_set_things(&pset0
, (mach_port_t
**)thread_list
, count
, THING_THREAD
);
420 __private_extern__ kern_return_t
421 chudxnu_task_threads(
423 thread_array_t
*thread_list
,
424 mach_msg_type_number_t
*count
)
426 return chudxnu_private_task_threads(task
, thread_list
, count
);
429 __private_extern__ kern_return_t
430 chudxnu_free_thread_list(
431 thread_array_t
*thread_list
,
432 mach_msg_type_number_t
*count
)
434 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
435 void *addr
= *thread_list
;
438 int i
, maxCount
= *count
;
439 for(i
=0; i
<maxCount
; i
++) {
440 thread_deallocate((*thread_list
)[i
]);
451 __private_extern__ task_t
452 chudxnu_current_task(void)
454 return current_task();
457 __private_extern__ thread_t
458 chudxnu_current_thread(void)
460 return current_thread();
463 __private_extern__ task_t
464 chudxnu_task_for_thread(thread_t thread
)
466 return get_threadtask(thread
);
469 __private_extern__ kern_return_t
472 thread_flavor_t flavor
,
473 thread_info_t thread_info_out
,
474 mach_msg_type_number_t
*thread_info_count
)
476 return thread_info(thread
, flavor
, thread_info_out
, thread_info_count
);
480 __private_extern__ kern_return_t
481 chudxnu_thread_last_context_switch(thread_t thread
, uint64_t *timestamp
)
483 *timestamp
= thread
->last_switch
;
487 /* thread marking stuff */
489 __private_extern__ boolean_t
490 chudxnu_thread_get_marked(thread_t thread
)
493 return ((thread
->t_chud
& T_CHUD_MARKED
) != 0);
497 __private_extern__ boolean_t
498 chudxnu_thread_set_marked(thread_t thread
, boolean_t new_value
)
504 // set the marked bit
505 old_val
= OSBitOrAtomic(T_CHUD_MARKED
, (UInt32
*) &(thread
->t_chud
));
507 // clear the marked bit
508 old_val
= OSBitAndAtomic(~T_CHUD_MARKED
, (UInt32
*) &(thread
->t_chud
));
510 return (old_val
& T_CHUD_MARKED
) == T_CHUD_MARKED
;