]>
Commit | Line | Data |
---|---|---|
0c530ab8 | 1 | /* |
b0d623f7 | 2 | * Copyright (c) 2003-2009 Apple Inc. All rights reserved. |
0c530ab8 | 3 | * |
2d21ac55 A |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
0c530ab8 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
0c530ab8 A |
27 | */ |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <mach/task.h> | |
31 | #include <mach/thread_act.h> | |
32 | ||
33 | #include <kern/kern_types.h> | |
34 | #include <kern/processor.h> | |
35 | #include <kern/thread.h> | |
36 | #include <kern/kalloc.h> | |
37 | ||
38 | #include <chud/chud_xnu.h> | |
39 | #include <chud/chud_xnu_private.h> | |
2d21ac55 | 40 | #include <chud/chud_thread.h> |
0c530ab8 A |
41 | |
42 | #include <machine/machine_routines.h> | |
43 | ||
2d21ac55 A |
44 | #include <libkern/OSAtomic.h> |
45 | ||
0c530ab8 A |
46 | // include the correct file to find real_ncpus |
47 | #if defined(__i386__) || defined(__x86_64__) | |
48 | # include <i386/mp.h> | |
2d21ac55 A |
49 | #else |
50 | // fall back on declaring it extern. The linker will sort us out. | |
51 | extern unsigned int real_ncpus; | |
52 | #endif | |
53 | ||
54 | // Mask for supported options | |
55 | #define T_CHUD_BIND_OPT_MASK (-1UL) | |
0c530ab8 | 56 | |
b0d623f7 | 57 | #if 0 |
0c530ab8 | 58 | #pragma mark **** thread binding **** |
b0d623f7 | 59 | #endif |
0c530ab8 | 60 | |
2d21ac55 A |
61 | /* |
62 | * This method will bind a given thread to the requested CPU starting at the | |
63 | * next time quantum. If the thread is the current thread, this method will | |
64 | * force a thread_block(). The result is that if you call this method on the | |
65 | * current thread, you will be on the requested CPU when this method returns. | |
66 | */ | |
0c530ab8 | 67 | __private_extern__ kern_return_t |
2d21ac55 | 68 | chudxnu_bind_thread(thread_t thread, int cpu, __unused int options) |
0c530ab8 A |
69 | { |
70 | processor_t proc = NULL; | |
2d21ac55 A |
71 | |
72 | if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check | |
0c530ab8 | 73 | return KERN_FAILURE; |
2d21ac55 A |
74 | |
75 | // temporary restriction until after phase 2 of the scheduler | |
76 | if(thread != current_thread()) | |
77 | return KERN_FAILURE; | |
0c530ab8 A |
78 | |
79 | proc = cpu_to_processor(cpu); | |
80 | ||
2d21ac55 A |
81 | /* |
82 | * Potentially racey, but mainly to prevent bind to shutdown | |
83 | * processor. | |
84 | */ | |
0c530ab8 | 85 | if(proc && !(proc->state == PROCESSOR_OFF_LINE) && |
2d21ac55 A |
86 | !(proc->state == PROCESSOR_SHUTDOWN)) { |
87 | ||
88 | thread_bind(proc); | |
89 | ||
90 | /* | |
91 | * If we're trying to bind the current thread, and | |
92 | * we're not on the target cpu, and not at interrupt | |
93 | * context, block the current thread to force a | |
94 | * reschedule on the target CPU. | |
95 | */ | |
96 | if(thread == current_thread() && | |
b0d623f7 | 97 | !ml_at_interrupt_context() && cpu_number() != cpu) { |
0c530ab8 A |
98 | (void)thread_block(THREAD_CONTINUE_NULL); |
99 | } | |
100 | return KERN_SUCCESS; | |
101 | } | |
102 | return KERN_FAILURE; | |
103 | } | |
104 | ||
105 | __private_extern__ kern_return_t | |
2d21ac55 | 106 | chudxnu_unbind_thread(thread_t thread, __unused int options) |
0c530ab8 | 107 | { |
2d21ac55 A |
108 | if(thread == current_thread()) |
109 | thread_bind(PROCESSOR_NULL); | |
0c530ab8 A |
110 | return KERN_SUCCESS; |
111 | } | |
112 | ||
2d21ac55 A |
113 | __private_extern__ boolean_t |
114 | chudxnu_thread_get_idle(thread_t thread) { | |
115 | /* | |
116 | * Instantaneous snapshot of the idle state of | |
117 | * a given thread. | |
118 | * | |
119 | * Should be called only on an interrupted or | |
120 | * suspended thread to avoid a race. | |
121 | */ | |
122 | return ((thread->state & TH_IDLE) == TH_IDLE); | |
123 | } | |
124 | ||
6d2010ae A |
125 | __private_extern__ int |
126 | chudxnu_thread_get_scheduler_state(thread_t thread) { | |
127 | /* | |
128 | * Instantaneous snapshot of the scheduler state of | |
129 | * a given thread. | |
130 | * | |
131 | * MUST ONLY be called on an interrupted or | |
132 | * locked thread, to avoid a race. | |
133 | */ | |
134 | ||
135 | int state = 0; | |
136 | int schedulerState = (volatile int)(thread->state); | |
137 | processor_t lastProcessor = (volatile processor_t)(thread->last_processor); | |
138 | ||
139 | if ((PROCESSOR_NULL != lastProcessor) && (thread == lastProcessor->active_thread)) { | |
140 | state |= CHUDXNU_TS_RUNNING; | |
141 | } | |
142 | ||
143 | if (schedulerState & TH_RUN) { | |
144 | state |= CHUDXNU_TS_RUNNABLE; | |
145 | } | |
146 | ||
147 | if (schedulerState & TH_WAIT) { | |
148 | state |= CHUDXNU_TS_WAIT; | |
149 | } | |
150 | ||
151 | if (schedulerState & TH_UNINT) { | |
152 | state |= CHUDXNU_TS_UNINT; | |
153 | } | |
154 | ||
155 | if (schedulerState & TH_SUSP) { | |
156 | state |= CHUDXNU_TS_SUSP; | |
157 | } | |
158 | ||
159 | if (schedulerState & TH_TERMINATE) { | |
160 | state |= CHUDXNU_TS_TERMINATE; | |
161 | } | |
162 | ||
163 | if (schedulerState & TH_IDLE) { | |
164 | state |= CHUDXNU_TS_IDLE; | |
165 | } | |
166 | ||
167 | return state; | |
168 | } | |
169 | ||
b0d623f7 | 170 | #if 0 |
0c530ab8 | 171 | #pragma mark **** task and thread info **** |
b0d623f7 | 172 | #endif |
0c530ab8 | 173 | |
2d21ac55 A |
174 | __private_extern__ boolean_t |
175 | chudxnu_is_64bit_task(task_t task) | |
0c530ab8 A |
176 | { |
177 | return (task_has_64BitAddr(task)); | |
178 | } | |
179 | ||
180 | #define THING_TASK 0 | |
181 | #define THING_THREAD 1 | |
182 | ||
183 | // an exact copy of processor_set_things() except no mig conversion at the end! | |
184 | static kern_return_t | |
185 | chudxnu_private_processor_set_things( | |
186 | processor_set_t pset, | |
187 | mach_port_t **thing_list, | |
188 | mach_msg_type_number_t *count, | |
189 | int type) | |
190 | { | |
191 | unsigned int actual; /* this many things */ | |
192 | unsigned int maxthings; | |
193 | unsigned int i; | |
194 | ||
195 | vm_size_t size, size_needed; | |
196 | void *addr; | |
197 | ||
2d21ac55 | 198 | if (pset == PROCESSOR_SET_NULL || pset != &pset0) |
0c530ab8 A |
199 | return (KERN_INVALID_ARGUMENT); |
200 | ||
2d21ac55 | 201 | size = 0; addr = NULL; |
0c530ab8 A |
202 | |
203 | for (;;) { | |
b0d623f7 | 204 | lck_mtx_lock(&tasks_threads_lock); |
0c530ab8 A |
205 | |
206 | if (type == THING_TASK) | |
2d21ac55 | 207 | maxthings = tasks_count; |
0c530ab8 | 208 | else |
2d21ac55 | 209 | maxthings = threads_count; |
0c530ab8 A |
210 | |
211 | /* do we have the memory we need? */ | |
212 | ||
213 | size_needed = maxthings * sizeof (mach_port_t); | |
214 | if (size_needed <= size) | |
215 | break; | |
216 | ||
b0d623f7 | 217 | lck_mtx_unlock(&tasks_threads_lock); |
0c530ab8 A |
218 | |
219 | if (size != 0) | |
220 | kfree(addr, size); | |
221 | ||
222 | assert(size_needed > 0); | |
223 | size = size_needed; | |
224 | ||
225 | addr = kalloc(size); | |
226 | if (addr == 0) | |
227 | return (KERN_RESOURCE_SHORTAGE); | |
228 | } | |
229 | ||
230 | /* OK, have memory and the processor_set is locked & active */ | |
231 | ||
232 | actual = 0; | |
233 | switch (type) { | |
234 | ||
235 | case THING_TASK: | |
236 | { | |
2d21ac55 | 237 | task_t task, *task_list = (task_t *)addr; |
0c530ab8 | 238 | |
2d21ac55 A |
239 | for (task = (task_t)queue_first(&tasks); |
240 | !queue_end(&tasks, (queue_entry_t)task); | |
241 | task = (task_t)queue_next(&task->tasks)) { | |
0c530ab8 | 242 | task_reference_internal(task); |
2d21ac55 | 243 | task_list[actual++] = task; |
0c530ab8 A |
244 | } |
245 | ||
246 | break; | |
247 | } | |
248 | ||
249 | case THING_THREAD: | |
250 | { | |
2d21ac55 | 251 | thread_t thread, *thread_list = (thread_t *)addr; |
0c530ab8 | 252 | |
2d21ac55 A |
253 | for (i = 0, thread = (thread_t)queue_first(&threads); |
254 | !queue_end(&threads, (queue_entry_t)thread); | |
255 | thread = (thread_t)queue_next(&thread->threads)) { | |
0c530ab8 | 256 | thread_reference_internal(thread); |
2d21ac55 | 257 | thread_list[actual++] = thread; |
0c530ab8 A |
258 | } |
259 | ||
260 | break; | |
261 | } | |
262 | } | |
263 | ||
b0d623f7 | 264 | lck_mtx_unlock(&tasks_threads_lock); |
0c530ab8 A |
265 | |
266 | if (actual < maxthings) | |
267 | size_needed = actual * sizeof (mach_port_t); | |
268 | ||
269 | if (actual == 0) { | |
270 | /* no things, so return null pointer and deallocate memory */ | |
2d21ac55 | 271 | *thing_list = NULL; |
0c530ab8 A |
272 | *count = 0; |
273 | ||
274 | if (size != 0) | |
275 | kfree(addr, size); | |
276 | } | |
277 | else { | |
278 | /* if we allocated too much, must copy */ | |
279 | ||
280 | if (size_needed < size) { | |
281 | void *newaddr; | |
282 | ||
283 | newaddr = kalloc(size_needed); | |
284 | if (newaddr == 0) { | |
285 | switch (type) { | |
286 | ||
287 | case THING_TASK: | |
288 | { | |
2d21ac55 | 289 | task_t *task_list = (task_t *)addr; |
0c530ab8 A |
290 | |
291 | for (i = 0; i < actual; i++) | |
2d21ac55 | 292 | task_deallocate(task_list[i]); |
0c530ab8 A |
293 | break; |
294 | } | |
295 | ||
296 | case THING_THREAD: | |
297 | { | |
2d21ac55 | 298 | thread_t *thread_list = (thread_t *)addr; |
0c530ab8 A |
299 | |
300 | for (i = 0; i < actual; i++) | |
2d21ac55 | 301 | thread_deallocate(thread_list[i]); |
0c530ab8 A |
302 | break; |
303 | } | |
304 | } | |
305 | ||
306 | kfree(addr, size); | |
307 | return (KERN_RESOURCE_SHORTAGE); | |
308 | } | |
309 | ||
310 | bcopy((void *) addr, (void *) newaddr, size_needed); | |
311 | kfree(addr, size); | |
312 | addr = newaddr; | |
313 | } | |
314 | ||
315 | *thing_list = (mach_port_t *)addr; | |
316 | *count = actual; | |
317 | } | |
318 | ||
319 | return (KERN_SUCCESS); | |
320 | } | |
321 | ||
322 | // an exact copy of task_threads() except no mig conversion at the end! | |
323 | static kern_return_t | |
324 | chudxnu_private_task_threads( | |
325 | task_t task, | |
326 | thread_act_array_t *threads_out, | |
327 | mach_msg_type_number_t *count) | |
328 | { | |
329 | mach_msg_type_number_t actual; | |
2d21ac55 | 330 | thread_t *thread_list; |
0c530ab8 A |
331 | thread_t thread; |
332 | vm_size_t size, size_needed; | |
333 | void *addr; | |
334 | unsigned int i, j; | |
335 | ||
336 | if (task == TASK_NULL) | |
337 | return (KERN_INVALID_ARGUMENT); | |
338 | ||
2d21ac55 | 339 | size = 0; addr = NULL; |
0c530ab8 A |
340 | |
341 | for (;;) { | |
342 | task_lock(task); | |
343 | if (!task->active) { | |
344 | task_unlock(task); | |
345 | ||
346 | if (size != 0) | |
347 | kfree(addr, size); | |
348 | ||
349 | return (KERN_FAILURE); | |
350 | } | |
351 | ||
352 | actual = task->thread_count; | |
353 | ||
354 | /* do we have the memory we need? */ | |
355 | size_needed = actual * sizeof (mach_port_t); | |
356 | if (size_needed <= size) | |
357 | break; | |
358 | ||
359 | /* unlock the task and allocate more memory */ | |
360 | task_unlock(task); | |
361 | ||
362 | if (size != 0) | |
363 | kfree(addr, size); | |
364 | ||
365 | assert(size_needed > 0); | |
366 | size = size_needed; | |
367 | ||
368 | addr = kalloc(size); | |
369 | if (addr == 0) | |
370 | return (KERN_RESOURCE_SHORTAGE); | |
371 | } | |
372 | ||
373 | /* OK, have memory and the task is locked & active */ | |
2d21ac55 | 374 | thread_list = (thread_t *)addr; |
0c530ab8 A |
375 | |
376 | i = j = 0; | |
377 | ||
378 | for (thread = (thread_t)queue_first(&task->threads); i < actual; | |
379 | ++i, thread = (thread_t)queue_next(&thread->task_threads)) { | |
380 | thread_reference_internal(thread); | |
2d21ac55 | 381 | thread_list[j++] = thread; |
0c530ab8 A |
382 | } |
383 | ||
384 | assert(queue_end(&task->threads, (queue_entry_t)thread)); | |
385 | ||
386 | actual = j; | |
387 | size_needed = actual * sizeof (mach_port_t); | |
388 | ||
389 | /* can unlock task now that we've got the thread refs */ | |
390 | task_unlock(task); | |
391 | ||
392 | if (actual == 0) { | |
393 | /* no threads, so return null pointer and deallocate memory */ | |
394 | ||
2d21ac55 | 395 | *threads_out = NULL; |
0c530ab8 A |
396 | *count = 0; |
397 | ||
398 | if (size != 0) | |
399 | kfree(addr, size); | |
400 | } | |
401 | else { | |
402 | /* if we allocated too much, must copy */ | |
403 | ||
404 | if (size_needed < size) { | |
405 | void *newaddr; | |
406 | ||
407 | newaddr = kalloc(size_needed); | |
408 | if (newaddr == 0) { | |
409 | for (i = 0; i < actual; ++i) | |
2d21ac55 | 410 | thread_deallocate(thread_list[i]); |
0c530ab8 A |
411 | kfree(addr, size); |
412 | return (KERN_RESOURCE_SHORTAGE); | |
413 | } | |
414 | ||
415 | bcopy(addr, newaddr, size_needed); | |
416 | kfree(addr, size); | |
2d21ac55 | 417 | thread_list = (thread_t *)newaddr; |
0c530ab8 A |
418 | } |
419 | ||
2d21ac55 | 420 | *threads_out = thread_list; |
0c530ab8 A |
421 | *count = actual; |
422 | } | |
423 | ||
424 | return (KERN_SUCCESS); | |
425 | } | |
426 | ||
427 | ||
428 | __private_extern__ kern_return_t | |
429 | chudxnu_all_tasks( | |
430 | task_array_t *task_list, | |
431 | mach_msg_type_number_t *count) | |
432 | { | |
2d21ac55 | 433 | return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)task_list, count, THING_TASK); |
0c530ab8 A |
434 | } |
435 | ||
436 | __private_extern__ kern_return_t | |
437 | chudxnu_free_task_list( | |
438 | task_array_t *task_list, | |
439 | mach_msg_type_number_t *count) | |
440 | { | |
441 | vm_size_t size = (*count)*sizeof(mach_port_t); | |
442 | void *addr = *task_list; | |
443 | ||
444 | if(addr) { | |
445 | int i, maxCount = *count; | |
446 | for(i=0; i<maxCount; i++) { | |
447 | task_deallocate((*task_list)[i]); | |
448 | } | |
449 | kfree(addr, size); | |
450 | *task_list = NULL; | |
451 | *count = 0; | |
452 | return KERN_SUCCESS; | |
453 | } else { | |
454 | return KERN_FAILURE; | |
455 | } | |
456 | } | |
0c530ab8 A |
457 | __private_extern__ kern_return_t |
458 | chudxnu_all_threads( | |
459 | thread_array_t *thread_list, | |
460 | mach_msg_type_number_t *count) | |
461 | { | |
2d21ac55 | 462 | return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)thread_list, count, THING_THREAD); |
0c530ab8 A |
463 | } |
464 | ||
465 | __private_extern__ kern_return_t | |
466 | chudxnu_task_threads( | |
467 | task_t task, | |
468 | thread_array_t *thread_list, | |
469 | mach_msg_type_number_t *count) | |
470 | { | |
471 | return chudxnu_private_task_threads(task, thread_list, count); | |
472 | } | |
473 | ||
474 | __private_extern__ kern_return_t | |
475 | chudxnu_free_thread_list( | |
476 | thread_array_t *thread_list, | |
477 | mach_msg_type_number_t *count) | |
478 | { | |
479 | vm_size_t size = (*count)*sizeof(mach_port_t); | |
480 | void *addr = *thread_list; | |
481 | ||
482 | if(addr) { | |
483 | int i, maxCount = *count; | |
484 | for(i=0; i<maxCount; i++) { | |
485 | thread_deallocate((*thread_list)[i]); | |
486 | } | |
487 | kfree(addr, size); | |
488 | *thread_list = NULL; | |
489 | *count = 0; | |
490 | return KERN_SUCCESS; | |
491 | } else { | |
492 | return KERN_FAILURE; | |
493 | } | |
494 | } | |
495 | ||
496 | __private_extern__ task_t | |
497 | chudxnu_current_task(void) | |
498 | { | |
499 | return current_task(); | |
500 | } | |
501 | ||
502 | __private_extern__ thread_t | |
503 | chudxnu_current_thread(void) | |
504 | { | |
505 | return current_thread(); | |
506 | } | |
507 | ||
508 | __private_extern__ task_t | |
509 | chudxnu_task_for_thread(thread_t thread) | |
510 | { | |
511 | return get_threadtask(thread); | |
512 | } | |
513 | ||
514 | __private_extern__ kern_return_t | |
515 | chudxnu_thread_info( | |
516 | thread_t thread, | |
517 | thread_flavor_t flavor, | |
518 | thread_info_t thread_info_out, | |
519 | mach_msg_type_number_t *thread_info_count) | |
520 | { | |
521 | return thread_info(thread, flavor, thread_info_out, thread_info_count); | |
522 | } | |
523 | ||
2d21ac55 | 524 | |
2d21ac55 A |
525 | /* thread marking stuff */ |
526 | ||
527 | __private_extern__ boolean_t | |
528 | chudxnu_thread_get_marked(thread_t thread) | |
529 | { | |
530 | if(thread) | |
531 | return ((thread->t_chud & T_CHUD_MARKED) != 0); | |
532 | return FALSE; | |
533 | } | |
534 | ||
535 | __private_extern__ boolean_t | |
536 | chudxnu_thread_set_marked(thread_t thread, boolean_t new_value) | |
537 | { | |
538 | boolean_t old_val; | |
539 | ||
540 | if(thread) { | |
541 | if(new_value) { | |
542 | // set the marked bit | |
b0d623f7 | 543 | old_val = OSBitOrAtomic(T_CHUD_MARKED, &(thread->t_chud)); |
2d21ac55 A |
544 | } else { |
545 | // clear the marked bit | |
b0d623f7 | 546 | old_val = OSBitAndAtomic(~T_CHUD_MARKED, &(thread->t_chud)); |
2d21ac55 A |
547 | } |
548 | return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED; | |
549 | } | |
550 | return FALSE; | |
551 | } | |
552 | ||
316670eb A |
553 | /* XXX: good thing this code is experimental... */ |
554 | ||
555 | /* external handler */ | |
556 | extern void (*chudxnu_thread_ast_handler)(thread_t); | |
557 | void (*chudxnu_thread_ast_handler)(thread_t) = NULL; | |
558 | ||
559 | /* AST callback to dispatch to AppleProfile */ | |
560 | extern void chudxnu_thread_ast(thread_t); | |
561 | void | |
562 | chudxnu_thread_ast(thread_t thread) | |
563 | { | |
564 | /* atomicness for kdebug events */ | |
565 | void (*handler)(thread_t) = chudxnu_thread_ast_handler; | |
566 | if( handler ) | |
567 | handler( thread ); | |
568 | ||
569 | thread->t_chud = 0; | |
570 | } | |
571 | ||
572 | ||
573 | ||
574 | /* Get and set bits on the thread and trigger an AST handler */ | |
575 | void chudxnu_set_thread_ast( thread_t thread ); | |
576 | void | |
577 | chudxnu_set_thread_ast( thread_t thread ) | |
578 | { | |
579 | /* FIXME: only call this on current thread from an interrupt handler for now... */ | |
580 | if( thread != current_thread() ) | |
581 | panic( "unsafe AST set" ); | |
582 | ||
583 | act_set_kperf(thread); | |
584 | } | |
585 | ||
586 | /* get and set the thread bits */ | |
587 | extern uint32_t chudxnu_get_thread_bits( thread_t thread ); | |
588 | extern void chudxnu_set_thread_bits( thread_t thread, uint32_t bits ); | |
589 | ||
590 | uint32_t | |
591 | chudxnu_get_thread_bits( thread_t thread ) | |
592 | { | |
593 | return thread->t_chud; | |
594 | } | |
595 | ||
596 | void | |
597 | chudxnu_set_thread_bits( thread_t thread, uint32_t bits ) | |
598 | { | |
599 | thread->t_chud = bits; | |
600 | } | |
601 | ||
602 | /* get and set thread dirty bits. so CHUD can track whether the thread | |
603 | * has been dispatched since it last looked. caller must hold the | |
604 | * thread lock | |
605 | */ | |
606 | boolean_t | |
607 | chudxnu_thread_get_dirty(thread_t thread) | |
608 | { | |
609 | if( thread->c_switch != thread->chud_c_switch ) | |
610 | return TRUE; | |
611 | else | |
612 | return FALSE; | |
613 | } | |
614 | ||
615 | void | |
616 | chudxnu_thread_set_dirty(thread_t thread, boolean_t makedirty) | |
617 | { | |
618 | if( makedirty ) | |
619 | thread->chud_c_switch = thread->c_switch - 1; | |
620 | else | |
621 | thread->chud_c_switch = thread->c_switch; | |
622 | } |