2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <mach/mach_types.h>
24 #include <mach/task.h>
25 #include <mach/thread_act.h>
27 #include <kern/kern_types.h>
28 #include <kern/processor.h>
29 #include <kern/thread.h>
30 #include <kern/kalloc.h>
32 #include <chud/chud_xnu.h>
33 #include <chud/chud_xnu_private.h>
35 #include <machine/machine_routines.h>
37 // include the correct file to find real_ncpus
38 #if defined(__i386__) || defined(__x86_64__)
40 #endif // i386 or x86_64
42 #if defined(__ppc__) || defined(__ppc64__)
43 # include <ppc/cpu_internal.h>
44 #endif // ppc or ppc64
46 #pragma mark **** thread binding ****
48 __private_extern__ kern_return_t
49 chudxnu_bind_thread(thread_t thread
, int cpu
)
51 processor_t proc
= NULL
;
53 if(cpu
>= real_ncpus
) // sanity check
56 proc
= cpu_to_processor(cpu
);
58 if(proc
&& !(proc
->state
== PROCESSOR_OFF_LINE
) &&
59 !(proc
->state
== PROCESSOR_SHUTDOWN
)) {
60 /* disallow bind to shutdown processor */
61 thread_bind(thread
, proc
);
62 if(thread
==current_thread()) {
63 (void)thread_block(THREAD_CONTINUE_NULL
);
70 __private_extern__ kern_return_t
71 chudxnu_unbind_thread(thread_t thread
)
73 thread_bind(thread
, PROCESSOR_NULL
);
77 #pragma mark **** task and thread info ****
80 boolean_t
chudxnu_is_64bit_task(task_t task
)
82 return (task_has_64BitAddr(task
));
86 #define THING_THREAD 1
88 // an exact copy of processor_set_things() except no mig conversion at the end!
90 chudxnu_private_processor_set_things(
92 mach_port_t
**thing_list
,
93 mach_msg_type_number_t
*count
,
96 unsigned int actual
; /* this many things */
97 unsigned int maxthings
;
100 vm_size_t size
, size_needed
;
103 if (pset
== PROCESSOR_SET_NULL
)
104 return (KERN_INVALID_ARGUMENT
);
113 return (KERN_FAILURE
);
116 if (type
== THING_TASK
)
117 maxthings
= pset
->task_count
;
119 maxthings
= pset
->thread_count
;
121 /* do we have the memory we need? */
123 size_needed
= maxthings
* sizeof (mach_port_t
);
124 if (size_needed
<= size
)
127 /* unlock the pset and allocate more memory */
133 assert(size_needed
> 0);
138 return (KERN_RESOURCE_SHORTAGE
);
141 /* OK, have memory and the processor_set is locked & active */
148 task_t task
, *tasks
= (task_t
*)addr
;
150 for (task
= (task_t
)queue_first(&pset
->tasks
);
151 !queue_end(&pset
->tasks
, (queue_entry_t
)task
);
152 task
= (task_t
)queue_next(&task
->pset_tasks
)) {
153 task_reference_internal(task
);
154 tasks
[actual
++] = task
;
162 thread_t thread
, *threads
= (thread_t
*)addr
;
164 for (i
= 0, thread
= (thread_t
)queue_first(&pset
->threads
);
165 !queue_end(&pset
->threads
, (queue_entry_t
)thread
);
166 thread
= (thread_t
)queue_next(&thread
->pset_threads
)) {
167 thread_reference_internal(thread
);
168 threads
[actual
++] = thread
;
177 if (actual
< maxthings
)
178 size_needed
= actual
* sizeof (mach_port_t
);
181 /* no things, so return null pointer and deallocate memory */
189 /* if we allocated too much, must copy */
191 if (size_needed
< size
) {
194 newaddr
= kalloc(size_needed
);
200 task_t
*tasks
= (task_t
*)addr
;
202 for (i
= 0; i
< actual
; i
++)
203 task_deallocate(tasks
[i
]);
209 thread_t
*threads
= (thread_t
*)addr
;
211 for (i
= 0; i
< actual
; i
++)
212 thread_deallocate(threads
[i
]);
218 return (KERN_RESOURCE_SHORTAGE
);
221 bcopy((void *) addr
, (void *) newaddr
, size_needed
);
226 *thing_list
= (mach_port_t
*)addr
;
230 return (KERN_SUCCESS
);
233 // an exact copy of task_threads() except no mig conversion at the end!
235 chudxnu_private_task_threads(
237 thread_act_array_t
*threads_out
,
238 mach_msg_type_number_t
*count
)
240 mach_msg_type_number_t actual
;
243 vm_size_t size
, size_needed
;
247 if (task
== TASK_NULL
)
248 return (KERN_INVALID_ARGUMENT
);
260 return (KERN_FAILURE
);
263 actual
= task
->thread_count
;
265 /* do we have the memory we need? */
266 size_needed
= actual
* sizeof (mach_port_t
);
267 if (size_needed
<= size
)
270 /* unlock the task and allocate more memory */
276 assert(size_needed
> 0);
281 return (KERN_RESOURCE_SHORTAGE
);
284 /* OK, have memory and the task is locked & active */
285 threads
= (thread_t
*)addr
;
289 for (thread
= (thread_t
)queue_first(&task
->threads
); i
< actual
;
290 ++i
, thread
= (thread_t
)queue_next(&thread
->task_threads
)) {
291 thread_reference_internal(thread
);
292 threads
[j
++] = thread
;
295 assert(queue_end(&task
->threads
, (queue_entry_t
)thread
));
298 size_needed
= actual
* sizeof (mach_port_t
);
300 /* can unlock task now that we've got the thread refs */
304 /* no threads, so return null pointer and deallocate memory */
313 /* if we allocated too much, must copy */
315 if (size_needed
< size
) {
318 newaddr
= kalloc(size_needed
);
320 for (i
= 0; i
< actual
; ++i
)
321 thread_deallocate(threads
[i
]);
323 return (KERN_RESOURCE_SHORTAGE
);
326 bcopy(addr
, newaddr
, size_needed
);
328 threads
= (thread_t
*)newaddr
;
331 *threads_out
= threads
;
335 return (KERN_SUCCESS
);
339 __private_extern__ kern_return_t
341 task_array_t
*task_list
,
342 mach_msg_type_number_t
*count
)
344 return chudxnu_private_processor_set_things(&default_pset
, (mach_port_t
**)task_list
, count
, THING_TASK
);
347 __private_extern__ kern_return_t
348 chudxnu_free_task_list(
349 task_array_t
*task_list
,
350 mach_msg_type_number_t
*count
)
352 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
353 void *addr
= *task_list
;
356 int i
, maxCount
= *count
;
357 for(i
=0; i
<maxCount
; i
++) {
358 task_deallocate((*task_list
)[i
]);
369 __private_extern__ kern_return_t
371 thread_array_t
*thread_list
,
372 mach_msg_type_number_t
*count
)
374 return chudxnu_private_processor_set_things(&default_pset
, (mach_port_t
**)thread_list
, count
, THING_THREAD
);
377 __private_extern__ kern_return_t
378 chudxnu_task_threads(
380 thread_array_t
*thread_list
,
381 mach_msg_type_number_t
*count
)
383 return chudxnu_private_task_threads(task
, thread_list
, count
);
386 __private_extern__ kern_return_t
387 chudxnu_free_thread_list(
388 thread_array_t
*thread_list
,
389 mach_msg_type_number_t
*count
)
391 vm_size_t size
= (*count
)*sizeof(mach_port_t
);
392 void *addr
= *thread_list
;
395 int i
, maxCount
= *count
;
396 for(i
=0; i
<maxCount
; i
++) {
397 thread_deallocate((*thread_list
)[i
]);
408 __private_extern__ task_t
409 chudxnu_current_task(void)
411 return current_task();
414 __private_extern__ thread_t
415 chudxnu_current_thread(void)
417 return current_thread();
420 __private_extern__ task_t
421 chudxnu_task_for_thread(thread_t thread
)
423 return get_threadtask(thread
);
426 __private_extern__ kern_return_t
429 thread_flavor_t flavor
,
430 thread_info_t thread_info_out
,
431 mach_msg_type_number_t
*thread_info_count
)
433 return thread_info(thread
, flavor
, thread_info_out
, thread_info_count
);
436 __private_extern__ kern_return_t
437 chudxnu_thread_last_context_switch(thread_t thread
, uint64_t *timestamp
)
439 *timestamp
= thread
->last_switch
;