]> git.saurik.com Git - apple/xnu.git/blame - osfmk/chud/chud_thread.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / chud / chud_thread.c
CommitLineData
0c530ab8 1/*
b0d623f7 2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55
A
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <mach/mach_types.h>
30#include <mach/task.h>
31#include <mach/thread_act.h>
32
33#include <kern/kern_types.h>
34#include <kern/processor.h>
35#include <kern/thread.h>
36#include <kern/kalloc.h>
37
38#include <chud/chud_xnu.h>
39#include <chud/chud_xnu_private.h>
2d21ac55 40#include <chud/chud_thread.h>
0c530ab8
A
41
42#include <machine/machine_routines.h>
43
2d21ac55
A
44#include <libkern/OSAtomic.h>
45
0c530ab8
A
46// include the correct file to find real_ncpus
47#if defined(__i386__) || defined(__x86_64__)
48# include <i386/mp.h>
2d21ac55 49#elif defined(__ppc__) || defined(__ppc64__)
0c530ab8 50# include <ppc/cpu_internal.h>
2d21ac55
A
51#else
52// fall back on declaring it extern. The linker will sort us out.
53extern unsigned int real_ncpus;
54#endif
55
56// Mask for supported options
57#define T_CHUD_BIND_OPT_MASK (-1UL)
0c530ab8 58
b0d623f7 59#if 0
0c530ab8 60#pragma mark **** thread binding ****
b0d623f7 61#endif
0c530ab8 62
2d21ac55
A
63/*
64 * This method will bind a given thread to the requested CPU starting at the
65 * next time quantum. If the thread is the current thread, this method will
66 * force a thread_block(). The result is that if you call this method on the
67 * current thread, you will be on the requested CPU when this method returns.
68 */
0c530ab8 69__private_extern__ kern_return_t
2d21ac55 70chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
0c530ab8
A
71{
72 processor_t proc = NULL;
2d21ac55
A
73
74 if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
0c530ab8 75 return KERN_FAILURE;
2d21ac55
A
76
77 // temporary restriction until after phase 2 of the scheduler
78 if(thread != current_thread())
79 return KERN_FAILURE;
0c530ab8
A
80
81 proc = cpu_to_processor(cpu);
82
2d21ac55
A
83 /*
84 * Potentially racey, but mainly to prevent bind to shutdown
85 * processor.
86 */
0c530ab8 87 if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
2d21ac55
A
88 !(proc->state == PROCESSOR_SHUTDOWN)) {
89
90 thread_bind(proc);
91
92 /*
93 * If we're trying to bind the current thread, and
94 * we're not on the target cpu, and not at interrupt
95 * context, block the current thread to force a
96 * reschedule on the target CPU.
97 */
98 if(thread == current_thread() &&
b0d623f7 99 !ml_at_interrupt_context() && cpu_number() != cpu) {
0c530ab8
A
100 (void)thread_block(THREAD_CONTINUE_NULL);
101 }
102 return KERN_SUCCESS;
103 }
104 return KERN_FAILURE;
105}
106
107__private_extern__ kern_return_t
2d21ac55 108chudxnu_unbind_thread(thread_t thread, __unused int options)
0c530ab8 109{
2d21ac55
A
110 if(thread == current_thread())
111 thread_bind(PROCESSOR_NULL);
0c530ab8
A
112 return KERN_SUCCESS;
113}
114
2d21ac55
A
115__private_extern__ boolean_t
116chudxnu_thread_get_idle(thread_t thread) {
117 /*
118 * Instantaneous snapshot of the idle state of
119 * a given thread.
120 *
121 * Should be called only on an interrupted or
122 * suspended thread to avoid a race.
123 */
124 return ((thread->state & TH_IDLE) == TH_IDLE);
125}
126
b0d623f7 127#if 0
0c530ab8 128#pragma mark **** task and thread info ****
b0d623f7 129#endif
0c530ab8 130
2d21ac55
A
131__private_extern__ boolean_t
132chudxnu_is_64bit_task(task_t task)
0c530ab8
A
133{
134 return (task_has_64BitAddr(task));
135}
136
137#define THING_TASK 0
138#define THING_THREAD 1
139
140// an exact copy of processor_set_things() except no mig conversion at the end!
141static kern_return_t
142chudxnu_private_processor_set_things(
143 processor_set_t pset,
144 mach_port_t **thing_list,
145 mach_msg_type_number_t *count,
146 int type)
147{
148 unsigned int actual; /* this many things */
149 unsigned int maxthings;
150 unsigned int i;
151
152 vm_size_t size, size_needed;
153 void *addr;
154
2d21ac55 155 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
0c530ab8
A
156 return (KERN_INVALID_ARGUMENT);
157
2d21ac55 158 size = 0; addr = NULL;
0c530ab8
A
159
160 for (;;) {
b0d623f7 161 lck_mtx_lock(&tasks_threads_lock);
0c530ab8
A
162
163 if (type == THING_TASK)
2d21ac55 164 maxthings = tasks_count;
0c530ab8 165 else
2d21ac55 166 maxthings = threads_count;
0c530ab8
A
167
168 /* do we have the memory we need? */
169
170 size_needed = maxthings * sizeof (mach_port_t);
171 if (size_needed <= size)
172 break;
173
b0d623f7 174 lck_mtx_unlock(&tasks_threads_lock);
0c530ab8
A
175
176 if (size != 0)
177 kfree(addr, size);
178
179 assert(size_needed > 0);
180 size = size_needed;
181
182 addr = kalloc(size);
183 if (addr == 0)
184 return (KERN_RESOURCE_SHORTAGE);
185 }
186
187 /* OK, have memory and the processor_set is locked & active */
188
189 actual = 0;
190 switch (type) {
191
192 case THING_TASK:
193 {
2d21ac55 194 task_t task, *task_list = (task_t *)addr;
0c530ab8 195
2d21ac55
A
196 for (task = (task_t)queue_first(&tasks);
197 !queue_end(&tasks, (queue_entry_t)task);
198 task = (task_t)queue_next(&task->tasks)) {
0c530ab8 199 task_reference_internal(task);
2d21ac55 200 task_list[actual++] = task;
0c530ab8
A
201 }
202
203 break;
204 }
205
206 case THING_THREAD:
207 {
2d21ac55 208 thread_t thread, *thread_list = (thread_t *)addr;
0c530ab8 209
2d21ac55
A
210 for (i = 0, thread = (thread_t)queue_first(&threads);
211 !queue_end(&threads, (queue_entry_t)thread);
212 thread = (thread_t)queue_next(&thread->threads)) {
0c530ab8 213 thread_reference_internal(thread);
2d21ac55 214 thread_list[actual++] = thread;
0c530ab8
A
215 }
216
217 break;
218 }
219 }
220
b0d623f7 221 lck_mtx_unlock(&tasks_threads_lock);
0c530ab8
A
222
223 if (actual < maxthings)
224 size_needed = actual * sizeof (mach_port_t);
225
226 if (actual == 0) {
227 /* no things, so return null pointer and deallocate memory */
2d21ac55 228 *thing_list = NULL;
0c530ab8
A
229 *count = 0;
230
231 if (size != 0)
232 kfree(addr, size);
233 }
234 else {
235 /* if we allocated too much, must copy */
236
237 if (size_needed < size) {
238 void *newaddr;
239
240 newaddr = kalloc(size_needed);
241 if (newaddr == 0) {
242 switch (type) {
243
244 case THING_TASK:
245 {
2d21ac55 246 task_t *task_list = (task_t *)addr;
0c530ab8
A
247
248 for (i = 0; i < actual; i++)
2d21ac55 249 task_deallocate(task_list[i]);
0c530ab8
A
250 break;
251 }
252
253 case THING_THREAD:
254 {
2d21ac55 255 thread_t *thread_list = (thread_t *)addr;
0c530ab8
A
256
257 for (i = 0; i < actual; i++)
2d21ac55 258 thread_deallocate(thread_list[i]);
0c530ab8
A
259 break;
260 }
261 }
262
263 kfree(addr, size);
264 return (KERN_RESOURCE_SHORTAGE);
265 }
266
267 bcopy((void *) addr, (void *) newaddr, size_needed);
268 kfree(addr, size);
269 addr = newaddr;
270 }
271
272 *thing_list = (mach_port_t *)addr;
273 *count = actual;
274 }
275
276 return (KERN_SUCCESS);
277}
278
279// an exact copy of task_threads() except no mig conversion at the end!
280static kern_return_t
281chudxnu_private_task_threads(
282 task_t task,
283 thread_act_array_t *threads_out,
284 mach_msg_type_number_t *count)
285{
286 mach_msg_type_number_t actual;
2d21ac55 287 thread_t *thread_list;
0c530ab8
A
288 thread_t thread;
289 vm_size_t size, size_needed;
290 void *addr;
291 unsigned int i, j;
292
293 if (task == TASK_NULL)
294 return (KERN_INVALID_ARGUMENT);
295
2d21ac55 296 size = 0; addr = NULL;
0c530ab8
A
297
298 for (;;) {
299 task_lock(task);
300 if (!task->active) {
301 task_unlock(task);
302
303 if (size != 0)
304 kfree(addr, size);
305
306 return (KERN_FAILURE);
307 }
308
309 actual = task->thread_count;
310
311 /* do we have the memory we need? */
312 size_needed = actual * sizeof (mach_port_t);
313 if (size_needed <= size)
314 break;
315
316 /* unlock the task and allocate more memory */
317 task_unlock(task);
318
319 if (size != 0)
320 kfree(addr, size);
321
322 assert(size_needed > 0);
323 size = size_needed;
324
325 addr = kalloc(size);
326 if (addr == 0)
327 return (KERN_RESOURCE_SHORTAGE);
328 }
329
330 /* OK, have memory and the task is locked & active */
2d21ac55 331 thread_list = (thread_t *)addr;
0c530ab8
A
332
333 i = j = 0;
334
335 for (thread = (thread_t)queue_first(&task->threads); i < actual;
336 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
337 thread_reference_internal(thread);
2d21ac55 338 thread_list[j++] = thread;
0c530ab8
A
339 }
340
341 assert(queue_end(&task->threads, (queue_entry_t)thread));
342
343 actual = j;
344 size_needed = actual * sizeof (mach_port_t);
345
346 /* can unlock task now that we've got the thread refs */
347 task_unlock(task);
348
349 if (actual == 0) {
350 /* no threads, so return null pointer and deallocate memory */
351
2d21ac55 352 *threads_out = NULL;
0c530ab8
A
353 *count = 0;
354
355 if (size != 0)
356 kfree(addr, size);
357 }
358 else {
359 /* if we allocated too much, must copy */
360
361 if (size_needed < size) {
362 void *newaddr;
363
364 newaddr = kalloc(size_needed);
365 if (newaddr == 0) {
366 for (i = 0; i < actual; ++i)
2d21ac55 367 thread_deallocate(thread_list[i]);
0c530ab8
A
368 kfree(addr, size);
369 return (KERN_RESOURCE_SHORTAGE);
370 }
371
372 bcopy(addr, newaddr, size_needed);
373 kfree(addr, size);
2d21ac55 374 thread_list = (thread_t *)newaddr;
0c530ab8
A
375 }
376
2d21ac55 377 *threads_out = thread_list;
0c530ab8
A
378 *count = actual;
379 }
380
381 return (KERN_SUCCESS);
382}
383
384
385__private_extern__ kern_return_t
386chudxnu_all_tasks(
387 task_array_t *task_list,
388 mach_msg_type_number_t *count)
389{
2d21ac55 390 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)task_list, count, THING_TASK);
0c530ab8
A
391}
392
393__private_extern__ kern_return_t
394chudxnu_free_task_list(
395 task_array_t *task_list,
396 mach_msg_type_number_t *count)
397{
398 vm_size_t size = (*count)*sizeof(mach_port_t);
399 void *addr = *task_list;
400
401 if(addr) {
402 int i, maxCount = *count;
403 for(i=0; i<maxCount; i++) {
404 task_deallocate((*task_list)[i]);
405 }
406 kfree(addr, size);
407 *task_list = NULL;
408 *count = 0;
409 return KERN_SUCCESS;
410 } else {
411 return KERN_FAILURE;
412 }
413}
0c530ab8
A
414__private_extern__ kern_return_t
415chudxnu_all_threads(
416 thread_array_t *thread_list,
417 mach_msg_type_number_t *count)
418{
2d21ac55 419 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)thread_list, count, THING_THREAD);
0c530ab8
A
420}
421
422__private_extern__ kern_return_t
423chudxnu_task_threads(
424 task_t task,
425 thread_array_t *thread_list,
426 mach_msg_type_number_t *count)
427{
428 return chudxnu_private_task_threads(task, thread_list, count);
429}
430
431__private_extern__ kern_return_t
432chudxnu_free_thread_list(
433 thread_array_t *thread_list,
434 mach_msg_type_number_t *count)
435{
436 vm_size_t size = (*count)*sizeof(mach_port_t);
437 void *addr = *thread_list;
438
439 if(addr) {
440 int i, maxCount = *count;
441 for(i=0; i<maxCount; i++) {
442 thread_deallocate((*thread_list)[i]);
443 }
444 kfree(addr, size);
445 *thread_list = NULL;
446 *count = 0;
447 return KERN_SUCCESS;
448 } else {
449 return KERN_FAILURE;
450 }
451}
452
453__private_extern__ task_t
454chudxnu_current_task(void)
455{
456 return current_task();
457}
458
459__private_extern__ thread_t
460chudxnu_current_thread(void)
461{
462 return current_thread();
463}
464
465__private_extern__ task_t
466chudxnu_task_for_thread(thread_t thread)
467{
468 return get_threadtask(thread);
469}
470
471__private_extern__ kern_return_t
472chudxnu_thread_info(
473 thread_t thread,
474 thread_flavor_t flavor,
475 thread_info_t thread_info_out,
476 mach_msg_type_number_t *thread_info_count)
477{
478 return thread_info(thread, flavor, thread_info_out, thread_info_count);
479}
480
2d21ac55 481
2d21ac55
A
482/* thread marking stuff */
483
484__private_extern__ boolean_t
485chudxnu_thread_get_marked(thread_t thread)
486{
487 if(thread)
488 return ((thread->t_chud & T_CHUD_MARKED) != 0);
489 return FALSE;
490}
491
492__private_extern__ boolean_t
493chudxnu_thread_set_marked(thread_t thread, boolean_t new_value)
494{
495 boolean_t old_val;
496
497 if(thread) {
498 if(new_value) {
499 // set the marked bit
b0d623f7 500 old_val = OSBitOrAtomic(T_CHUD_MARKED, &(thread->t_chud));
2d21ac55
A
501 } else {
502 // clear the marked bit
b0d623f7 503 old_val = OSBitAndAtomic(~T_CHUD_MARKED, &(thread->t_chud));
2d21ac55
A
504 }
505 return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED;
506 }
507 return FALSE;
508}
509