]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/chud_thread.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / osfmk / chud / chud_thread.c
1 /*
2 * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/kalloc.h>
37
38 #include <chud/chud_xnu.h>
39 #include <chud/chud_xnu_private.h>
40 #include <chud/chud_thread.h>
41
42 #include <machine/machine_routines.h>
43
44 #include <libkern/OSAtomic.h>
45
46 // include the correct file to find real_ncpus
47 #if defined(__i386__) || defined(__x86_64__)
48 # include <i386/mp.h>
49 #elif defined(__ppc__) || defined(__ppc64__)
50 # include <ppc/cpu_internal.h>
51 #else
52 // fall back on declaring it extern. The linker will sort us out.
53 extern unsigned int real_ncpus;
54 #endif
55
56 // Mask for supported options
57 #define T_CHUD_BIND_OPT_MASK (-1UL)
58
59 #pragma mark **** thread binding ****
60
61 /*
62 * This method will bind a given thread to the requested CPU starting at the
63 * next time quantum. If the thread is the current thread, this method will
64 * force a thread_block(). The result is that if you call this method on the
65 * current thread, you will be on the requested CPU when this method returns.
66 */
67 __private_extern__ kern_return_t
68 chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
69 {
70 processor_t proc = NULL;
71
72 if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
73 return KERN_FAILURE;
74
75 // temporary restriction until after phase 2 of the scheduler
76 if(thread != current_thread())
77 return KERN_FAILURE;
78
79 proc = cpu_to_processor(cpu);
80
81 /*
82 * Potentially racey, but mainly to prevent bind to shutdown
83 * processor.
84 */
85 if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
86 !(proc->state == PROCESSOR_SHUTDOWN)) {
87
88 thread_bind(proc);
89
90 /*
91 * If we're trying to bind the current thread, and
92 * we're not on the target cpu, and not at interrupt
93 * context, block the current thread to force a
94 * reschedule on the target CPU.
95 */
96 if(thread == current_thread() &&
97 !(ml_at_interrupt_context() && cpu_number() == cpu)) {
98 (void)thread_block(THREAD_CONTINUE_NULL);
99 }
100 return KERN_SUCCESS;
101 }
102 return KERN_FAILURE;
103 }
104
105 __private_extern__ kern_return_t
106 chudxnu_unbind_thread(thread_t thread, __unused int options)
107 {
108 if(thread == current_thread())
109 thread_bind(PROCESSOR_NULL);
110 return KERN_SUCCESS;
111 }
112
113 __private_extern__ boolean_t
114 chudxnu_thread_get_idle(thread_t thread) {
115 /*
116 * Instantaneous snapshot of the idle state of
117 * a given thread.
118 *
119 * Should be called only on an interrupted or
120 * suspended thread to avoid a race.
121 */
122 return ((thread->state & TH_IDLE) == TH_IDLE);
123 }
124
125 #pragma mark **** task and thread info ****
126
127 __private_extern__ boolean_t
128 chudxnu_is_64bit_task(task_t task)
129 {
130 return (task_has_64BitAddr(task));
131 }
132
133 #define THING_TASK 0
134 #define THING_THREAD 1
135
136 // an exact copy of processor_set_things() except no mig conversion at the end!
137 static kern_return_t
138 chudxnu_private_processor_set_things(
139 processor_set_t pset,
140 mach_port_t **thing_list,
141 mach_msg_type_number_t *count,
142 int type)
143 {
144 unsigned int actual; /* this many things */
145 unsigned int maxthings;
146 unsigned int i;
147
148 vm_size_t size, size_needed;
149 void *addr;
150
151 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
152 return (KERN_INVALID_ARGUMENT);
153
154 size = 0; addr = NULL;
155
156 for (;;) {
157 mutex_lock(&tasks_threads_lock);
158
159 if (type == THING_TASK)
160 maxthings = tasks_count;
161 else
162 maxthings = threads_count;
163
164 /* do we have the memory we need? */
165
166 size_needed = maxthings * sizeof (mach_port_t);
167 if (size_needed <= size)
168 break;
169
170 mutex_unlock(&tasks_threads_lock);
171
172 if (size != 0)
173 kfree(addr, size);
174
175 assert(size_needed > 0);
176 size = size_needed;
177
178 addr = kalloc(size);
179 if (addr == 0)
180 return (KERN_RESOURCE_SHORTAGE);
181 }
182
183 /* OK, have memory and the processor_set is locked & active */
184
185 actual = 0;
186 switch (type) {
187
188 case THING_TASK:
189 {
190 task_t task, *task_list = (task_t *)addr;
191
192 for (task = (task_t)queue_first(&tasks);
193 !queue_end(&tasks, (queue_entry_t)task);
194 task = (task_t)queue_next(&task->tasks)) {
195 task_reference_internal(task);
196 task_list[actual++] = task;
197 }
198
199 break;
200 }
201
202 case THING_THREAD:
203 {
204 thread_t thread, *thread_list = (thread_t *)addr;
205
206 for (i = 0, thread = (thread_t)queue_first(&threads);
207 !queue_end(&threads, (queue_entry_t)thread);
208 thread = (thread_t)queue_next(&thread->threads)) {
209 thread_reference_internal(thread);
210 thread_list[actual++] = thread;
211 }
212
213 break;
214 }
215 }
216
217 mutex_unlock(&tasks_threads_lock);
218
219 if (actual < maxthings)
220 size_needed = actual * sizeof (mach_port_t);
221
222 if (actual == 0) {
223 /* no things, so return null pointer and deallocate memory */
224 *thing_list = NULL;
225 *count = 0;
226
227 if (size != 0)
228 kfree(addr, size);
229 }
230 else {
231 /* if we allocated too much, must copy */
232
233 if (size_needed < size) {
234 void *newaddr;
235
236 newaddr = kalloc(size_needed);
237 if (newaddr == 0) {
238 switch (type) {
239
240 case THING_TASK:
241 {
242 task_t *task_list = (task_t *)addr;
243
244 for (i = 0; i < actual; i++)
245 task_deallocate(task_list[i]);
246 break;
247 }
248
249 case THING_THREAD:
250 {
251 thread_t *thread_list = (thread_t *)addr;
252
253 for (i = 0; i < actual; i++)
254 thread_deallocate(thread_list[i]);
255 break;
256 }
257 }
258
259 kfree(addr, size);
260 return (KERN_RESOURCE_SHORTAGE);
261 }
262
263 bcopy((void *) addr, (void *) newaddr, size_needed);
264 kfree(addr, size);
265 addr = newaddr;
266 }
267
268 *thing_list = (mach_port_t *)addr;
269 *count = actual;
270 }
271
272 return (KERN_SUCCESS);
273 }
274
275 // an exact copy of task_threads() except no mig conversion at the end!
276 static kern_return_t
277 chudxnu_private_task_threads(
278 task_t task,
279 thread_act_array_t *threads_out,
280 mach_msg_type_number_t *count)
281 {
282 mach_msg_type_number_t actual;
283 thread_t *thread_list;
284 thread_t thread;
285 vm_size_t size, size_needed;
286 void *addr;
287 unsigned int i, j;
288
289 if (task == TASK_NULL)
290 return (KERN_INVALID_ARGUMENT);
291
292 size = 0; addr = NULL;
293
294 for (;;) {
295 task_lock(task);
296 if (!task->active) {
297 task_unlock(task);
298
299 if (size != 0)
300 kfree(addr, size);
301
302 return (KERN_FAILURE);
303 }
304
305 actual = task->thread_count;
306
307 /* do we have the memory we need? */
308 size_needed = actual * sizeof (mach_port_t);
309 if (size_needed <= size)
310 break;
311
312 /* unlock the task and allocate more memory */
313 task_unlock(task);
314
315 if (size != 0)
316 kfree(addr, size);
317
318 assert(size_needed > 0);
319 size = size_needed;
320
321 addr = kalloc(size);
322 if (addr == 0)
323 return (KERN_RESOURCE_SHORTAGE);
324 }
325
326 /* OK, have memory and the task is locked & active */
327 thread_list = (thread_t *)addr;
328
329 i = j = 0;
330
331 for (thread = (thread_t)queue_first(&task->threads); i < actual;
332 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
333 thread_reference_internal(thread);
334 thread_list[j++] = thread;
335 }
336
337 assert(queue_end(&task->threads, (queue_entry_t)thread));
338
339 actual = j;
340 size_needed = actual * sizeof (mach_port_t);
341
342 /* can unlock task now that we've got the thread refs */
343 task_unlock(task);
344
345 if (actual == 0) {
346 /* no threads, so return null pointer and deallocate memory */
347
348 *threads_out = NULL;
349 *count = 0;
350
351 if (size != 0)
352 kfree(addr, size);
353 }
354 else {
355 /* if we allocated too much, must copy */
356
357 if (size_needed < size) {
358 void *newaddr;
359
360 newaddr = kalloc(size_needed);
361 if (newaddr == 0) {
362 for (i = 0; i < actual; ++i)
363 thread_deallocate(thread_list[i]);
364 kfree(addr, size);
365 return (KERN_RESOURCE_SHORTAGE);
366 }
367
368 bcopy(addr, newaddr, size_needed);
369 kfree(addr, size);
370 thread_list = (thread_t *)newaddr;
371 }
372
373 *threads_out = thread_list;
374 *count = actual;
375 }
376
377 return (KERN_SUCCESS);
378 }
379
380
381 __private_extern__ kern_return_t
382 chudxnu_all_tasks(
383 task_array_t *task_list,
384 mach_msg_type_number_t *count)
385 {
386 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)task_list, count, THING_TASK);
387 }
388
389 __private_extern__ kern_return_t
390 chudxnu_free_task_list(
391 task_array_t *task_list,
392 mach_msg_type_number_t *count)
393 {
394 vm_size_t size = (*count)*sizeof(mach_port_t);
395 void *addr = *task_list;
396
397 if(addr) {
398 int i, maxCount = *count;
399 for(i=0; i<maxCount; i++) {
400 task_deallocate((*task_list)[i]);
401 }
402 kfree(addr, size);
403 *task_list = NULL;
404 *count = 0;
405 return KERN_SUCCESS;
406 } else {
407 return KERN_FAILURE;
408 }
409 }
410 __private_extern__ kern_return_t
411 chudxnu_all_threads(
412 thread_array_t *thread_list,
413 mach_msg_type_number_t *count)
414 {
415 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)thread_list, count, THING_THREAD);
416 }
417
418 __private_extern__ kern_return_t
419 chudxnu_task_threads(
420 task_t task,
421 thread_array_t *thread_list,
422 mach_msg_type_number_t *count)
423 {
424 return chudxnu_private_task_threads(task, thread_list, count);
425 }
426
427 __private_extern__ kern_return_t
428 chudxnu_free_thread_list(
429 thread_array_t *thread_list,
430 mach_msg_type_number_t *count)
431 {
432 vm_size_t size = (*count)*sizeof(mach_port_t);
433 void *addr = *thread_list;
434
435 if(addr) {
436 int i, maxCount = *count;
437 for(i=0; i<maxCount; i++) {
438 thread_deallocate((*thread_list)[i]);
439 }
440 kfree(addr, size);
441 *thread_list = NULL;
442 *count = 0;
443 return KERN_SUCCESS;
444 } else {
445 return KERN_FAILURE;
446 }
447 }
448
449 __private_extern__ task_t
450 chudxnu_current_task(void)
451 {
452 return current_task();
453 }
454
455 __private_extern__ thread_t
456 chudxnu_current_thread(void)
457 {
458 return current_thread();
459 }
460
461 __private_extern__ task_t
462 chudxnu_task_for_thread(thread_t thread)
463 {
464 return get_threadtask(thread);
465 }
466
467 __private_extern__ kern_return_t
468 chudxnu_thread_info(
469 thread_t thread,
470 thread_flavor_t flavor,
471 thread_info_t thread_info_out,
472 mach_msg_type_number_t *thread_info_count)
473 {
474 return thread_info(thread, flavor, thread_info_out, thread_info_count);
475 }
476
477
478 __private_extern__ kern_return_t
479 chudxnu_thread_last_context_switch(thread_t thread, uint64_t *timestamp)
480 {
481 *timestamp = thread->last_switch;
482 return KERN_SUCCESS;
483 }
484
485 /* thread marking stuff */
486
487 __private_extern__ boolean_t
488 chudxnu_thread_get_marked(thread_t thread)
489 {
490 if(thread)
491 return ((thread->t_chud & T_CHUD_MARKED) != 0);
492 return FALSE;
493 }
494
495 __private_extern__ boolean_t
496 chudxnu_thread_set_marked(thread_t thread, boolean_t new_value)
497 {
498 boolean_t old_val;
499
500 if(thread) {
501 if(new_value) {
502 // set the marked bit
503 old_val = OSBitOrAtomic(T_CHUD_MARKED, (UInt32 *) &(thread->t_chud));
504 } else {
505 // clear the marked bit
506 old_val = OSBitAndAtomic(~T_CHUD_MARKED, (UInt32 *) &(thread->t_chud));
507 }
508 return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED;
509 }
510 return FALSE;
511 }
512