]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/chud_thread.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / osfmk / chud / chud_thread.c
1 /*
2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/kalloc.h>
37
38 #include <chud/chud_xnu.h>
39 #include <chud/chud_xnu_private.h>
40 #include <chud/chud_thread.h>
41
42 #include <machine/machine_routines.h>
43
44 #include <libkern/OSAtomic.h>
45
46 #if KPC
47 #include <kern/kpc.h>
48 #endif
49
50 #if KPERF
51 #include <kperf/kperf.h>
52 #endif
53
54 // include the correct file to find real_ncpus
55 #if defined(__i386__) || defined(__x86_64__)
56 # include <i386/mp.h>
57 #else
58 // fall back on declaring it extern. The linker will sort us out.
59 extern unsigned int real_ncpus;
60 #endif
61
62 // Mask for supported options
63 #define T_CHUD_BIND_OPT_MASK (-1UL)
64
65 #if 0
66 #pragma mark **** thread binding ****
67 #endif
68
69 /*
70 * This method will bind a given thread to the requested CPU starting at the
71 * next time quantum. If the thread is the current thread, this method will
72 * force a thread_block(). The result is that if you call this method on the
73 * current thread, you will be on the requested CPU when this method returns.
74 */
75 __private_extern__ kern_return_t
76 chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
77 {
78 processor_t proc = NULL;
79
80 if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
81 return KERN_FAILURE;
82
83 // temporary restriction until after phase 2 of the scheduler
84 if(thread != current_thread())
85 return KERN_FAILURE;
86
87 proc = cpu_to_processor(cpu);
88
89 /*
90 * Potentially racey, but mainly to prevent bind to shutdown
91 * processor.
92 */
93 if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
94 !(proc->state == PROCESSOR_SHUTDOWN)) {
95
96 thread_bind(proc);
97
98 /*
99 * If we're trying to bind the current thread, and
100 * we're not on the target cpu, and not at interrupt
101 * context, block the current thread to force a
102 * reschedule on the target CPU.
103 */
104 if(thread == current_thread() &&
105 !ml_at_interrupt_context() && cpu_number() != cpu) {
106 (void)thread_block(THREAD_CONTINUE_NULL);
107 }
108 return KERN_SUCCESS;
109 }
110 return KERN_FAILURE;
111 }
112
113 __private_extern__ kern_return_t
114 chudxnu_unbind_thread(thread_t thread, __unused int options)
115 {
116 if(thread == current_thread())
117 thread_bind(PROCESSOR_NULL);
118 return KERN_SUCCESS;
119 }
120
121 __private_extern__ boolean_t
122 chudxnu_thread_get_idle(thread_t thread) {
123 /*
124 * Instantaneous snapshot of the idle state of
125 * a given thread.
126 *
127 * Should be called only on an interrupted or
128 * suspended thread to avoid a race.
129 */
130 return ((thread->state & TH_IDLE) == TH_IDLE);
131 }
132
133 __private_extern__ int
134 chudxnu_thread_get_scheduler_state(thread_t thread) {
135 /*
136 * Instantaneous snapshot of the scheduler state of
137 * a given thread.
138 *
139 * MUST ONLY be called on an interrupted or
140 * locked thread, to avoid a race.
141 */
142
143 int state = 0;
144 int schedulerState = (volatile int)(thread->state);
145 processor_t lastProcessor = (volatile processor_t)(thread->last_processor);
146
147 if ((PROCESSOR_NULL != lastProcessor) && (thread == lastProcessor->active_thread)) {
148 state |= CHUDXNU_TS_RUNNING;
149 }
150
151 if (schedulerState & TH_RUN) {
152 state |= CHUDXNU_TS_RUNNABLE;
153 }
154
155 if (schedulerState & TH_WAIT) {
156 state |= CHUDXNU_TS_WAIT;
157 }
158
159 if (schedulerState & TH_UNINT) {
160 state |= CHUDXNU_TS_UNINT;
161 }
162
163 if (schedulerState & TH_SUSP) {
164 state |= CHUDXNU_TS_SUSP;
165 }
166
167 if (schedulerState & TH_TERMINATE) {
168 state |= CHUDXNU_TS_TERMINATE;
169 }
170
171 if (schedulerState & TH_IDLE) {
172 state |= CHUDXNU_TS_IDLE;
173 }
174
175 return state;
176 }
177
178 #if 0
179 #pragma mark **** task and thread info ****
180 #endif
181
182 __private_extern__ boolean_t
183 chudxnu_is_64bit_task(task_t task)
184 {
185 return (task_has_64BitAddr(task));
186 }
187
188 // an exact copy of task_threads() except no mig conversion at the end!
189 static kern_return_t
190 chudxnu_private_task_threads(
191 task_t task,
192 thread_act_array_t *threads_out,
193 mach_msg_type_number_t *count)
194 {
195 mach_msg_type_number_t actual;
196 thread_t *thread_list;
197 thread_t thread;
198 vm_size_t size, size_needed;
199 void *addr;
200 unsigned int i, j;
201
202 if (task == TASK_NULL)
203 return (KERN_INVALID_ARGUMENT);
204
205 size = 0; addr = NULL;
206
207 for (;;) {
208 task_lock(task);
209 if (!task->active) {
210 task_unlock(task);
211
212 if (size != 0)
213 kfree(addr, size);
214
215 return (KERN_FAILURE);
216 }
217
218 actual = task->thread_count;
219
220 /* do we have the memory we need? */
221 size_needed = actual * sizeof (mach_port_t);
222 if (size_needed <= size)
223 break;
224
225 /* unlock the task and allocate more memory */
226 task_unlock(task);
227
228 if (size != 0)
229 kfree(addr, size);
230
231 assert(size_needed > 0);
232 size = size_needed;
233
234 addr = kalloc(size);
235 if (addr == 0)
236 return (KERN_RESOURCE_SHORTAGE);
237 }
238
239 /* OK, have memory and the task is locked & active */
240 thread_list = (thread_t *)addr;
241
242 i = j = 0;
243
244 for (thread = (thread_t)queue_first(&task->threads); i < actual;
245 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
246 thread_reference_internal(thread);
247 thread_list[j++] = thread;
248 }
249
250 assert(queue_end(&task->threads, (queue_entry_t)thread));
251
252 actual = j;
253 size_needed = actual * sizeof (mach_port_t);
254
255 /* can unlock task now that we've got the thread refs */
256 task_unlock(task);
257
258 if (actual == 0) {
259 /* no threads, so return null pointer and deallocate memory */
260
261 *threads_out = NULL;
262 *count = 0;
263
264 if (size != 0)
265 kfree(addr, size);
266 }
267 else {
268 /* if we allocated too much, must copy */
269
270 if (size_needed < size) {
271 void *newaddr;
272
273 newaddr = kalloc(size_needed);
274 if (newaddr == 0) {
275 for (i = 0; i < actual; ++i)
276 thread_deallocate(thread_list[i]);
277 kfree(addr, size);
278 return (KERN_RESOURCE_SHORTAGE);
279 }
280
281 bcopy(addr, newaddr, size_needed);
282 kfree(addr, size);
283 thread_list = (thread_t *)newaddr;
284 }
285
286 *threads_out = thread_list;
287 *count = actual;
288 }
289
290 return (KERN_SUCCESS);
291 }
292
293
294 __private_extern__ kern_return_t
295 chudxnu_all_tasks(
296 task_array_t *task_list,
297 mach_msg_type_number_t *count)
298 {
299 return processor_set_things(&pset0, (void **)task_list, count, PSET_THING_TASK);
300 }
301
302 __private_extern__ kern_return_t
303 chudxnu_free_task_list(
304 task_array_t *task_list,
305 mach_msg_type_number_t *count)
306 {
307 vm_size_t size = (*count)*sizeof(mach_port_t);
308 void *addr = *task_list;
309
310 if(addr) {
311 int i, maxCount = *count;
312 for(i=0; i<maxCount; i++) {
313 task_deallocate((*task_list)[i]);
314 }
315 kfree(addr, size);
316 *task_list = NULL;
317 *count = 0;
318 return KERN_SUCCESS;
319 } else {
320 return KERN_FAILURE;
321 }
322 }
323 __private_extern__ kern_return_t
324 chudxnu_all_threads(
325 thread_array_t *thread_list,
326 mach_msg_type_number_t *count)
327 {
328 return processor_set_things(&pset0, (void **)thread_list, count, PSET_THING_THREAD);
329 }
330
331 __private_extern__ kern_return_t
332 chudxnu_task_threads(
333 task_t task,
334 thread_array_t *thread_list,
335 mach_msg_type_number_t *count)
336 {
337 return chudxnu_private_task_threads(task, thread_list, count);
338 }
339
340 __private_extern__ kern_return_t
341 chudxnu_free_thread_list(
342 thread_array_t *thread_list,
343 mach_msg_type_number_t *count)
344 {
345 vm_size_t size = (*count)*sizeof(mach_port_t);
346 void *addr = *thread_list;
347
348 if(addr) {
349 int i, maxCount = *count;
350 for(i=0; i<maxCount; i++) {
351 thread_deallocate((*thread_list)[i]);
352 }
353 kfree(addr, size);
354 *thread_list = NULL;
355 *count = 0;
356 return KERN_SUCCESS;
357 } else {
358 return KERN_FAILURE;
359 }
360 }
361
362 __private_extern__ task_t
363 chudxnu_current_task(void)
364 {
365 return current_task();
366 }
367
368 __private_extern__ thread_t
369 chudxnu_current_thread(void)
370 {
371 return current_thread();
372 }
373
374 __private_extern__ task_t
375 chudxnu_task_for_thread(thread_t thread)
376 {
377 return get_threadtask(thread);
378 }
379
380 __private_extern__ kern_return_t
381 chudxnu_thread_info(
382 thread_t thread,
383 thread_flavor_t flavor,
384 thread_info_t thread_info_out,
385 mach_msg_type_number_t *thread_info_count)
386 {
387 return thread_info(thread, flavor, thread_info_out, thread_info_count);
388 }
389
390
391 /* thread marking stuff */
392
393 __private_extern__ boolean_t
394 chudxnu_thread_get_marked(thread_t thread)
395 {
396 if(thread)
397 return ((thread->t_chud & T_CHUD_MARKED) != 0);
398 return FALSE;
399 }
400
401 __private_extern__ boolean_t
402 chudxnu_thread_set_marked(thread_t thread, boolean_t new_value)
403 {
404 boolean_t old_val;
405
406 if(thread) {
407 if(new_value) {
408 // set the marked bit
409 old_val = OSBitOrAtomic(T_CHUD_MARKED, &(thread->t_chud));
410 } else {
411 // clear the marked bit
412 old_val = OSBitAndAtomic(~T_CHUD_MARKED, &(thread->t_chud));
413 }
414 return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED;
415 }
416 return FALSE;
417 }
418
419 /* XXX: good thing this code is experimental... */
420
421 /* external handler */
422 extern void (*chudxnu_thread_ast_handler)(thread_t);
423 void (*chudxnu_thread_ast_handler)(thread_t) = NULL;
424
425 /* AST callback to dispatch to AppleProfile */
426 extern void chudxnu_thread_ast(thread_t);
427 void
428 chudxnu_thread_ast(thread_t thread)
429 {
430 #if KPC
431 /* check for PMC work */
432 kpc_thread_ast_handler(thread);
433 #endif
434
435 #if KPERF
436 /* check for kperf work */
437 kperf_thread_ast_handler(thread);
438 #endif
439
440 /* atomicness for kdebug events */
441 void (*handler)(thread_t) = chudxnu_thread_ast_handler;
442 if( handler )
443 handler( thread );
444
445 thread->t_chud = 0;
446 }
447
448
449
450 /* Get and set bits on the thread and trigger an AST handler */
451 void chudxnu_set_thread_ast( thread_t thread );
452 void
453 chudxnu_set_thread_ast( thread_t thread )
454 {
455 /* FIXME: only call this on current thread from an interrupt handler for now... */
456 if( thread != current_thread() )
457 panic( "unsafe AST set" );
458
459 act_set_kperf(thread);
460 }
461
462 /* get and set the thread bits */
463 extern uint32_t chudxnu_get_thread_bits( thread_t thread );
464 extern void chudxnu_set_thread_bits( thread_t thread, uint32_t bits );
465
466 uint32_t
467 chudxnu_get_thread_bits( thread_t thread )
468 {
469 return thread->t_chud;
470 }
471
472 void
473 chudxnu_set_thread_bits( thread_t thread, uint32_t bits )
474 {
475 thread->t_chud = bits;
476 }
477
478 /* get and set thread dirty bits. so CHUD can track whether the thread
479 * has been dispatched since it last looked. caller must hold the
480 * thread lock
481 */
482 boolean_t
483 chudxnu_thread_get_dirty(thread_t thread)
484 {
485 if( thread->c_switch != thread->chud_c_switch )
486 return TRUE;
487 else
488 return FALSE;
489 }
490
491 void
492 chudxnu_thread_set_dirty(thread_t thread, boolean_t makedirty)
493 {
494 if( makedirty )
495 thread->chud_c_switch = thread->c_switch - 1;
496 else
497 thread->chud_c_switch = thread->c_switch;
498 }