]> git.saurik.com Git - apple/xnu.git/blame - osfmk/chud/chud_thread.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / chud / chud_thread.c
CommitLineData
0c530ab8
A
1/*
2 * Copyright (c) 2003-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23#include <mach/mach_types.h>
24#include <mach/task.h>
25#include <mach/thread_act.h>
26
27#include <kern/kern_types.h>
28#include <kern/processor.h>
29#include <kern/thread.h>
30#include <kern/kalloc.h>
31
32#include <chud/chud_xnu.h>
33#include <chud/chud_xnu_private.h>
34
35#include <machine/machine_routines.h>
36
37// include the correct file to find real_ncpus
38#if defined(__i386__) || defined(__x86_64__)
39# include <i386/mp.h>
40#endif // i386 or x86_64
41
42#if defined(__ppc__) || defined(__ppc64__)
43# include <ppc/cpu_internal.h>
44#endif // ppc or ppc64
45
46#pragma mark **** thread binding ****
47
48__private_extern__ kern_return_t
49chudxnu_bind_thread(thread_t thread, int cpu)
50{
51 processor_t proc = NULL;
52
53 if(cpu >= real_ncpus) // sanity check
54 return KERN_FAILURE;
55
56 proc = cpu_to_processor(cpu);
57
58 if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
59 !(proc->state == PROCESSOR_SHUTDOWN)) {
60 /* disallow bind to shutdown processor */
61 thread_bind(thread, proc);
62 if(thread==current_thread()) {
63 (void)thread_block(THREAD_CONTINUE_NULL);
64 }
65 return KERN_SUCCESS;
66 }
67 return KERN_FAILURE;
68}
69
70__private_extern__ kern_return_t
71chudxnu_unbind_thread(thread_t thread)
72{
73 thread_bind(thread, PROCESSOR_NULL);
74 return KERN_SUCCESS;
75}
76
77#pragma mark **** task and thread info ****
78
79__private_extern__
80boolean_t chudxnu_is_64bit_task(task_t task)
81{
82 return (task_has_64BitAddr(task));
83}
84
85#define THING_TASK 0
86#define THING_THREAD 1
87
88// an exact copy of processor_set_things() except no mig conversion at the end!
89static kern_return_t
90chudxnu_private_processor_set_things(
91 processor_set_t pset,
92 mach_port_t **thing_list,
93 mach_msg_type_number_t *count,
94 int type)
95{
96 unsigned int actual; /* this many things */
97 unsigned int maxthings;
98 unsigned int i;
99
100 vm_size_t size, size_needed;
101 void *addr;
102
103 if (pset == PROCESSOR_SET_NULL)
104 return (KERN_INVALID_ARGUMENT);
105
106 size = 0; addr = 0;
107
108 for (;;) {
109 pset_lock(pset);
110 if (!pset->active) {
111 pset_unlock(pset);
112
113 return (KERN_FAILURE);
114 }
115
116 if (type == THING_TASK)
117 maxthings = pset->task_count;
118 else
119 maxthings = pset->thread_count;
120
121 /* do we have the memory we need? */
122
123 size_needed = maxthings * sizeof (mach_port_t);
124 if (size_needed <= size)
125 break;
126
127 /* unlock the pset and allocate more memory */
128 pset_unlock(pset);
129
130 if (size != 0)
131 kfree(addr, size);
132
133 assert(size_needed > 0);
134 size = size_needed;
135
136 addr = kalloc(size);
137 if (addr == 0)
138 return (KERN_RESOURCE_SHORTAGE);
139 }
140
141 /* OK, have memory and the processor_set is locked & active */
142
143 actual = 0;
144 switch (type) {
145
146 case THING_TASK:
147 {
148 task_t task, *tasks = (task_t *)addr;
149
150 for (task = (task_t)queue_first(&pset->tasks);
151 !queue_end(&pset->tasks, (queue_entry_t)task);
152 task = (task_t)queue_next(&task->pset_tasks)) {
153 task_reference_internal(task);
154 tasks[actual++] = task;
155 }
156
157 break;
158 }
159
160 case THING_THREAD:
161 {
162 thread_t thread, *threads = (thread_t *)addr;
163
164 for (i = 0, thread = (thread_t)queue_first(&pset->threads);
165 !queue_end(&pset->threads, (queue_entry_t)thread);
166 thread = (thread_t)queue_next(&thread->pset_threads)) {
167 thread_reference_internal(thread);
168 threads[actual++] = thread;
169 }
170
171 break;
172 }
173 }
174
175 pset_unlock(pset);
176
177 if (actual < maxthings)
178 size_needed = actual * sizeof (mach_port_t);
179
180 if (actual == 0) {
181 /* no things, so return null pointer and deallocate memory */
182 *thing_list = 0;
183 *count = 0;
184
185 if (size != 0)
186 kfree(addr, size);
187 }
188 else {
189 /* if we allocated too much, must copy */
190
191 if (size_needed < size) {
192 void *newaddr;
193
194 newaddr = kalloc(size_needed);
195 if (newaddr == 0) {
196 switch (type) {
197
198 case THING_TASK:
199 {
200 task_t *tasks = (task_t *)addr;
201
202 for (i = 0; i < actual; i++)
203 task_deallocate(tasks[i]);
204 break;
205 }
206
207 case THING_THREAD:
208 {
209 thread_t *threads = (thread_t *)addr;
210
211 for (i = 0; i < actual; i++)
212 thread_deallocate(threads[i]);
213 break;
214 }
215 }
216
217 kfree(addr, size);
218 return (KERN_RESOURCE_SHORTAGE);
219 }
220
221 bcopy((void *) addr, (void *) newaddr, size_needed);
222 kfree(addr, size);
223 addr = newaddr;
224 }
225
226 *thing_list = (mach_port_t *)addr;
227 *count = actual;
228 }
229
230 return (KERN_SUCCESS);
231}
232
233// an exact copy of task_threads() except no mig conversion at the end!
234static kern_return_t
235chudxnu_private_task_threads(
236 task_t task,
237 thread_act_array_t *threads_out,
238 mach_msg_type_number_t *count)
239{
240 mach_msg_type_number_t actual;
241 thread_t *threads;
242 thread_t thread;
243 vm_size_t size, size_needed;
244 void *addr;
245 unsigned int i, j;
246
247 if (task == TASK_NULL)
248 return (KERN_INVALID_ARGUMENT);
249
250 size = 0; addr = 0;
251
252 for (;;) {
253 task_lock(task);
254 if (!task->active) {
255 task_unlock(task);
256
257 if (size != 0)
258 kfree(addr, size);
259
260 return (KERN_FAILURE);
261 }
262
263 actual = task->thread_count;
264
265 /* do we have the memory we need? */
266 size_needed = actual * sizeof (mach_port_t);
267 if (size_needed <= size)
268 break;
269
270 /* unlock the task and allocate more memory */
271 task_unlock(task);
272
273 if (size != 0)
274 kfree(addr, size);
275
276 assert(size_needed > 0);
277 size = size_needed;
278
279 addr = kalloc(size);
280 if (addr == 0)
281 return (KERN_RESOURCE_SHORTAGE);
282 }
283
284 /* OK, have memory and the task is locked & active */
285 threads = (thread_t *)addr;
286
287 i = j = 0;
288
289 for (thread = (thread_t)queue_first(&task->threads); i < actual;
290 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
291 thread_reference_internal(thread);
292 threads[j++] = thread;
293 }
294
295 assert(queue_end(&task->threads, (queue_entry_t)thread));
296
297 actual = j;
298 size_needed = actual * sizeof (mach_port_t);
299
300 /* can unlock task now that we've got the thread refs */
301 task_unlock(task);
302
303 if (actual == 0) {
304 /* no threads, so return null pointer and deallocate memory */
305
306 *threads_out = 0;
307 *count = 0;
308
309 if (size != 0)
310 kfree(addr, size);
311 }
312 else {
313 /* if we allocated too much, must copy */
314
315 if (size_needed < size) {
316 void *newaddr;
317
318 newaddr = kalloc(size_needed);
319 if (newaddr == 0) {
320 for (i = 0; i < actual; ++i)
321 thread_deallocate(threads[i]);
322 kfree(addr, size);
323 return (KERN_RESOURCE_SHORTAGE);
324 }
325
326 bcopy(addr, newaddr, size_needed);
327 kfree(addr, size);
328 threads = (thread_t *)newaddr;
329 }
330
331 *threads_out = threads;
332 *count = actual;
333 }
334
335 return (KERN_SUCCESS);
336}
337
338
339__private_extern__ kern_return_t
340chudxnu_all_tasks(
341 task_array_t *task_list,
342 mach_msg_type_number_t *count)
343{
344 return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)task_list, count, THING_TASK);
345}
346
347__private_extern__ kern_return_t
348chudxnu_free_task_list(
349 task_array_t *task_list,
350 mach_msg_type_number_t *count)
351{
352 vm_size_t size = (*count)*sizeof(mach_port_t);
353 void *addr = *task_list;
354
355 if(addr) {
356 int i, maxCount = *count;
357 for(i=0; i<maxCount; i++) {
358 task_deallocate((*task_list)[i]);
359 }
360 kfree(addr, size);
361 *task_list = NULL;
362 *count = 0;
363 return KERN_SUCCESS;
364 } else {
365 return KERN_FAILURE;
366 }
367}
368
369__private_extern__ kern_return_t
370chudxnu_all_threads(
371 thread_array_t *thread_list,
372 mach_msg_type_number_t *count)
373{
374 return chudxnu_private_processor_set_things(&default_pset, (mach_port_t **)thread_list, count, THING_THREAD);
375}
376
377__private_extern__ kern_return_t
378chudxnu_task_threads(
379 task_t task,
380 thread_array_t *thread_list,
381 mach_msg_type_number_t *count)
382{
383 return chudxnu_private_task_threads(task, thread_list, count);
384}
385
386__private_extern__ kern_return_t
387chudxnu_free_thread_list(
388 thread_array_t *thread_list,
389 mach_msg_type_number_t *count)
390{
391 vm_size_t size = (*count)*sizeof(mach_port_t);
392 void *addr = *thread_list;
393
394 if(addr) {
395 int i, maxCount = *count;
396 for(i=0; i<maxCount; i++) {
397 thread_deallocate((*thread_list)[i]);
398 }
399 kfree(addr, size);
400 *thread_list = NULL;
401 *count = 0;
402 return KERN_SUCCESS;
403 } else {
404 return KERN_FAILURE;
405 }
406}
407
408__private_extern__ task_t
409chudxnu_current_task(void)
410{
411 return current_task();
412}
413
414__private_extern__ thread_t
415chudxnu_current_thread(void)
416{
417 return current_thread();
418}
419
420__private_extern__ task_t
421chudxnu_task_for_thread(thread_t thread)
422{
423 return get_threadtask(thread);
424}
425
426__private_extern__ kern_return_t
427chudxnu_thread_info(
428 thread_t thread,
429 thread_flavor_t flavor,
430 thread_info_t thread_info_out,
431 mach_msg_type_number_t *thread_info_count)
432{
433 return thread_info(thread, flavor, thread_info_out, thread_info_count);
434}
435
436__private_extern__ kern_return_t
437chudxnu_thread_last_context_switch(thread_t thread, uint64_t *timestamp)
438{
439 *timestamp = thread->last_switch;
440 return KERN_SUCCESS;
441}
442