]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/chud_thread.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / chud / chud_thread.c
1 /*
2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/kalloc.h>
37
38 #include <chud/chud_xnu.h>
39 #include <chud/chud_xnu_private.h>
40 #include <chud/chud_thread.h>
41
42 #include <machine/machine_routines.h>
43
44 #include <libkern/OSAtomic.h>
45
46 #if KPC
47 #include <kern/kpc.h>
48 #endif
49
50 #if KPERF
51 #include <kperf/kperf.h>
52 #endif
53
54 // include the correct file to find real_ncpus
55 #if defined(__i386__) || defined(__x86_64__)
56 # include <i386/mp.h>
57 #else
58 // fall back on declaring it extern. The linker will sort us out.
59 extern unsigned int real_ncpus;
60 #endif
61
62 // Mask for supported options
63 #define T_CHUD_BIND_OPT_MASK (-1UL)
64
65 #if 0
66 #pragma mark **** thread binding ****
67 #endif
68
69 /*
70 * This method will bind a given thread to the requested CPU starting at the
71 * next time quantum. If the thread is the current thread, this method will
72 * force a thread_block(). The result is that if you call this method on the
73 * current thread, you will be on the requested CPU when this method returns.
74 */
75 __private_extern__ kern_return_t
76 chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
77 {
78 processor_t proc = NULL;
79
80 if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
81 return KERN_FAILURE;
82
83 // temporary restriction until after phase 2 of the scheduler
84 if(thread != current_thread())
85 return KERN_FAILURE;
86
87 proc = cpu_to_processor(cpu);
88
89 /*
90 * Potentially racey, but mainly to prevent bind to shutdown
91 * processor.
92 */
93 if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
94 !(proc->state == PROCESSOR_SHUTDOWN)) {
95
96 thread_bind(proc);
97
98 /*
99 * If we're trying to bind the current thread, and
100 * we're not on the target cpu, and not at interrupt
101 * context, block the current thread to force a
102 * reschedule on the target CPU.
103 */
104 if(thread == current_thread() &&
105 !ml_at_interrupt_context() && cpu_number() != cpu) {
106 (void)thread_block(THREAD_CONTINUE_NULL);
107 }
108 return KERN_SUCCESS;
109 }
110 return KERN_FAILURE;
111 }
112
113 __private_extern__ kern_return_t
114 chudxnu_unbind_thread(thread_t thread, __unused int options)
115 {
116 if(thread == current_thread())
117 thread_bind(PROCESSOR_NULL);
118 return KERN_SUCCESS;
119 }
120
121 __private_extern__ boolean_t
122 chudxnu_thread_get_idle(thread_t thread) {
123 /*
124 * Instantaneous snapshot of the idle state of
125 * a given thread.
126 *
127 * Should be called only on an interrupted or
128 * suspended thread to avoid a race.
129 */
130 return ((thread->state & TH_IDLE) == TH_IDLE);
131 }
132
133 __private_extern__ int
134 chudxnu_thread_get_scheduler_state(thread_t thread) {
135 /*
136 * Instantaneous snapshot of the scheduler state of
137 * a given thread.
138 *
139 * MUST ONLY be called on an interrupted or
140 * locked thread, to avoid a race.
141 */
142
143 int state = 0;
144 int schedulerState = (volatile int)(thread->state);
145 processor_t lastProcessor = (volatile processor_t)(thread->last_processor);
146
147 if ((PROCESSOR_NULL != lastProcessor) && (thread == lastProcessor->active_thread)) {
148 state |= CHUDXNU_TS_RUNNING;
149 }
150
151 if (schedulerState & TH_RUN) {
152 state |= CHUDXNU_TS_RUNNABLE;
153 }
154
155 if (schedulerState & TH_WAIT) {
156 state |= CHUDXNU_TS_WAIT;
157 }
158
159 if (schedulerState & TH_UNINT) {
160 state |= CHUDXNU_TS_UNINT;
161 }
162
163 if (schedulerState & TH_SUSP) {
164 state |= CHUDXNU_TS_SUSP;
165 }
166
167 if (schedulerState & TH_TERMINATE) {
168 state |= CHUDXNU_TS_TERMINATE;
169 }
170
171 if (schedulerState & TH_IDLE) {
172 state |= CHUDXNU_TS_IDLE;
173 }
174
175 return state;
176 }
177
178 #if 0
179 #pragma mark **** task and thread info ****
180 #endif
181
182 __private_extern__ boolean_t
183 chudxnu_is_64bit_task(task_t task)
184 {
185 return (task_has_64BitAddr(task));
186 }
187
188 #define THING_TASK 0
189 #define THING_THREAD 1
190
191 // an exact copy of processor_set_things() except no mig conversion at the end!
192 static kern_return_t
193 chudxnu_private_processor_set_things(
194 processor_set_t pset,
195 mach_port_t **thing_list,
196 mach_msg_type_number_t *count,
197 int type)
198 {
199 unsigned int actual; /* this many things */
200 unsigned int maxthings;
201 unsigned int i;
202
203 vm_size_t size, size_needed;
204 void *addr;
205
206 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
207 return (KERN_INVALID_ARGUMENT);
208
209 size = 0; addr = NULL;
210
211 for (;;) {
212 lck_mtx_lock(&tasks_threads_lock);
213
214 if (type == THING_TASK)
215 maxthings = tasks_count;
216 else
217 maxthings = threads_count;
218
219 /* do we have the memory we need? */
220
221 size_needed = maxthings * sizeof (mach_port_t);
222 if (size_needed <= size)
223 break;
224
225 lck_mtx_unlock(&tasks_threads_lock);
226
227 if (size != 0)
228 kfree(addr, size);
229
230 assert(size_needed > 0);
231 size = size_needed;
232
233 addr = kalloc(size);
234 if (addr == 0)
235 return (KERN_RESOURCE_SHORTAGE);
236 }
237
238 /* OK, have memory and the processor_set is locked & active */
239
240 actual = 0;
241 switch (type) {
242
243 case THING_TASK:
244 {
245 task_t task, *task_list = (task_t *)addr;
246
247 for (task = (task_t)queue_first(&tasks);
248 !queue_end(&tasks, (queue_entry_t)task);
249 task = (task_t)queue_next(&task->tasks)) {
250 task_reference_internal(task);
251 task_list[actual++] = task;
252 }
253
254 break;
255 }
256
257 case THING_THREAD:
258 {
259 thread_t thread, *thread_list = (thread_t *)addr;
260
261 for (i = 0, thread = (thread_t)queue_first(&threads);
262 !queue_end(&threads, (queue_entry_t)thread);
263 thread = (thread_t)queue_next(&thread->threads)) {
264 thread_reference_internal(thread);
265 thread_list[actual++] = thread;
266 }
267
268 break;
269 }
270 }
271
272 lck_mtx_unlock(&tasks_threads_lock);
273
274 if (actual < maxthings)
275 size_needed = actual * sizeof (mach_port_t);
276
277 if (actual == 0) {
278 /* no things, so return null pointer and deallocate memory */
279 *thing_list = NULL;
280 *count = 0;
281
282 if (size != 0)
283 kfree(addr, size);
284 }
285 else {
286 /* if we allocated too much, must copy */
287
288 if (size_needed < size) {
289 void *newaddr;
290
291 newaddr = kalloc(size_needed);
292 if (newaddr == 0) {
293 switch (type) {
294
295 case THING_TASK:
296 {
297 task_t *task_list = (task_t *)addr;
298
299 for (i = 0; i < actual; i++)
300 task_deallocate(task_list[i]);
301 break;
302 }
303
304 case THING_THREAD:
305 {
306 thread_t *thread_list = (thread_t *)addr;
307
308 for (i = 0; i < actual; i++)
309 thread_deallocate(thread_list[i]);
310 break;
311 }
312 }
313
314 kfree(addr, size);
315 return (KERN_RESOURCE_SHORTAGE);
316 }
317
318 bcopy((void *) addr, (void *) newaddr, size_needed);
319 kfree(addr, size);
320 addr = newaddr;
321 }
322
323 *thing_list = (mach_port_t *)addr;
324 *count = actual;
325 }
326
327 return (KERN_SUCCESS);
328 }
329
330 // an exact copy of task_threads() except no mig conversion at the end!
331 static kern_return_t
332 chudxnu_private_task_threads(
333 task_t task,
334 thread_act_array_t *threads_out,
335 mach_msg_type_number_t *count)
336 {
337 mach_msg_type_number_t actual;
338 thread_t *thread_list;
339 thread_t thread;
340 vm_size_t size, size_needed;
341 void *addr;
342 unsigned int i, j;
343
344 if (task == TASK_NULL)
345 return (KERN_INVALID_ARGUMENT);
346
347 size = 0; addr = NULL;
348
349 for (;;) {
350 task_lock(task);
351 if (!task->active) {
352 task_unlock(task);
353
354 if (size != 0)
355 kfree(addr, size);
356
357 return (KERN_FAILURE);
358 }
359
360 actual = task->thread_count;
361
362 /* do we have the memory we need? */
363 size_needed = actual * sizeof (mach_port_t);
364 if (size_needed <= size)
365 break;
366
367 /* unlock the task and allocate more memory */
368 task_unlock(task);
369
370 if (size != 0)
371 kfree(addr, size);
372
373 assert(size_needed > 0);
374 size = size_needed;
375
376 addr = kalloc(size);
377 if (addr == 0)
378 return (KERN_RESOURCE_SHORTAGE);
379 }
380
381 /* OK, have memory and the task is locked & active */
382 thread_list = (thread_t *)addr;
383
384 i = j = 0;
385
386 for (thread = (thread_t)queue_first(&task->threads); i < actual;
387 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
388 thread_reference_internal(thread);
389 thread_list[j++] = thread;
390 }
391
392 assert(queue_end(&task->threads, (queue_entry_t)thread));
393
394 actual = j;
395 size_needed = actual * sizeof (mach_port_t);
396
397 /* can unlock task now that we've got the thread refs */
398 task_unlock(task);
399
400 if (actual == 0) {
401 /* no threads, so return null pointer and deallocate memory */
402
403 *threads_out = NULL;
404 *count = 0;
405
406 if (size != 0)
407 kfree(addr, size);
408 }
409 else {
410 /* if we allocated too much, must copy */
411
412 if (size_needed < size) {
413 void *newaddr;
414
415 newaddr = kalloc(size_needed);
416 if (newaddr == 0) {
417 for (i = 0; i < actual; ++i)
418 thread_deallocate(thread_list[i]);
419 kfree(addr, size);
420 return (KERN_RESOURCE_SHORTAGE);
421 }
422
423 bcopy(addr, newaddr, size_needed);
424 kfree(addr, size);
425 thread_list = (thread_t *)newaddr;
426 }
427
428 *threads_out = thread_list;
429 *count = actual;
430 }
431
432 return (KERN_SUCCESS);
433 }
434
435
436 __private_extern__ kern_return_t
437 chudxnu_all_tasks(
438 task_array_t *task_list,
439 mach_msg_type_number_t *count)
440 {
441 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)task_list, count, THING_TASK);
442 }
443
444 __private_extern__ kern_return_t
445 chudxnu_free_task_list(
446 task_array_t *task_list,
447 mach_msg_type_number_t *count)
448 {
449 vm_size_t size = (*count)*sizeof(mach_port_t);
450 void *addr = *task_list;
451
452 if(addr) {
453 int i, maxCount = *count;
454 for(i=0; i<maxCount; i++) {
455 task_deallocate((*task_list)[i]);
456 }
457 kfree(addr, size);
458 *task_list = NULL;
459 *count = 0;
460 return KERN_SUCCESS;
461 } else {
462 return KERN_FAILURE;
463 }
464 }
465 __private_extern__ kern_return_t
466 chudxnu_all_threads(
467 thread_array_t *thread_list,
468 mach_msg_type_number_t *count)
469 {
470 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)thread_list, count, THING_THREAD);
471 }
472
473 __private_extern__ kern_return_t
474 chudxnu_task_threads(
475 task_t task,
476 thread_array_t *thread_list,
477 mach_msg_type_number_t *count)
478 {
479 return chudxnu_private_task_threads(task, thread_list, count);
480 }
481
482 __private_extern__ kern_return_t
483 chudxnu_free_thread_list(
484 thread_array_t *thread_list,
485 mach_msg_type_number_t *count)
486 {
487 vm_size_t size = (*count)*sizeof(mach_port_t);
488 void *addr = *thread_list;
489
490 if(addr) {
491 int i, maxCount = *count;
492 for(i=0; i<maxCount; i++) {
493 thread_deallocate((*thread_list)[i]);
494 }
495 kfree(addr, size);
496 *thread_list = NULL;
497 *count = 0;
498 return KERN_SUCCESS;
499 } else {
500 return KERN_FAILURE;
501 }
502 }
503
504 __private_extern__ task_t
505 chudxnu_current_task(void)
506 {
507 return current_task();
508 }
509
510 __private_extern__ thread_t
511 chudxnu_current_thread(void)
512 {
513 return current_thread();
514 }
515
516 __private_extern__ task_t
517 chudxnu_task_for_thread(thread_t thread)
518 {
519 return get_threadtask(thread);
520 }
521
522 __private_extern__ kern_return_t
523 chudxnu_thread_info(
524 thread_t thread,
525 thread_flavor_t flavor,
526 thread_info_t thread_info_out,
527 mach_msg_type_number_t *thread_info_count)
528 {
529 return thread_info(thread, flavor, thread_info_out, thread_info_count);
530 }
531
532
533 /* thread marking stuff */
534
535 __private_extern__ boolean_t
536 chudxnu_thread_get_marked(thread_t thread)
537 {
538 if(thread)
539 return ((thread->t_chud & T_CHUD_MARKED) != 0);
540 return FALSE;
541 }
542
543 __private_extern__ boolean_t
544 chudxnu_thread_set_marked(thread_t thread, boolean_t new_value)
545 {
546 boolean_t old_val;
547
548 if(thread) {
549 if(new_value) {
550 // set the marked bit
551 old_val = OSBitOrAtomic(T_CHUD_MARKED, &(thread->t_chud));
552 } else {
553 // clear the marked bit
554 old_val = OSBitAndAtomic(~T_CHUD_MARKED, &(thread->t_chud));
555 }
556 return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED;
557 }
558 return FALSE;
559 }
560
561 /* XXX: good thing this code is experimental... */
562
563 /* external handler */
564 extern void (*chudxnu_thread_ast_handler)(thread_t);
565 void (*chudxnu_thread_ast_handler)(thread_t) = NULL;
566
567 /* AST callback to dispatch to AppleProfile */
568 extern void chudxnu_thread_ast(thread_t);
569 void
570 chudxnu_thread_ast(thread_t thread)
571 {
572 #if KPC
573 /* check for PMC work */
574 kpc_thread_ast_handler(thread);
575 #endif
576
577 #if KPERF
578 /* check for kperf work */
579 kperf_thread_ast_handler(thread);
580 #endif
581
582 /* atomicness for kdebug events */
583 void (*handler)(thread_t) = chudxnu_thread_ast_handler;
584 if( handler )
585 handler( thread );
586
587 thread->t_chud = 0;
588 }
589
590
591
592 /* Get and set bits on the thread and trigger an AST handler */
593 void chudxnu_set_thread_ast( thread_t thread );
594 void
595 chudxnu_set_thread_ast( thread_t thread )
596 {
597 /* FIXME: only call this on current thread from an interrupt handler for now... */
598 if( thread != current_thread() )
599 panic( "unsafe AST set" );
600
601 act_set_kperf(thread);
602 }
603
604 /* get and set the thread bits */
605 extern uint32_t chudxnu_get_thread_bits( thread_t thread );
606 extern void chudxnu_set_thread_bits( thread_t thread, uint32_t bits );
607
608 uint32_t
609 chudxnu_get_thread_bits( thread_t thread )
610 {
611 return thread->t_chud;
612 }
613
614 void
615 chudxnu_set_thread_bits( thread_t thread, uint32_t bits )
616 {
617 thread->t_chud = bits;
618 }
619
620 /* get and set thread dirty bits. so CHUD can track whether the thread
621 * has been dispatched since it last looked. caller must hold the
622 * thread lock
623 */
624 boolean_t
625 chudxnu_thread_get_dirty(thread_t thread)
626 {
627 if( thread->c_switch != thread->chud_c_switch )
628 return TRUE;
629 else
630 return FALSE;
631 }
632
633 void
634 chudxnu_thread_set_dirty(thread_t thread, boolean_t makedirty)
635 {
636 if( makedirty )
637 thread->chud_c_switch = thread->c_switch - 1;
638 else
639 thread->chud_c_switch = thread->c_switch;
640 }