]> git.saurik.com Git - apple/xnu.git/blob - osfmk/chud/chud_thread.c
xnu-1504.9.26.tar.gz
[apple/xnu.git] / osfmk / chud / chud_thread.c
1 /*
2 * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
31 #include <mach/thread_act.h>
32
33 #include <kern/kern_types.h>
34 #include <kern/processor.h>
35 #include <kern/thread.h>
36 #include <kern/kalloc.h>
37
38 #include <chud/chud_xnu.h>
39 #include <chud/chud_xnu_private.h>
40 #include <chud/chud_thread.h>
41
42 #include <machine/machine_routines.h>
43
44 #include <libkern/OSAtomic.h>
45
46 // include the correct file to find real_ncpus
47 #if defined(__i386__) || defined(__x86_64__)
48 # include <i386/mp.h>
49 #elif defined(__ppc__) || defined(__ppc64__)
50 # include <ppc/cpu_internal.h>
51 #else
52 // fall back on declaring it extern. The linker will sort us out.
53 extern unsigned int real_ncpus;
54 #endif
55
56 // Mask for supported options
57 #define T_CHUD_BIND_OPT_MASK (-1UL)
58
59 #if 0
60 #pragma mark **** thread binding ****
61 #endif
62
63 /*
64 * This method will bind a given thread to the requested CPU starting at the
65 * next time quantum. If the thread is the current thread, this method will
66 * force a thread_block(). The result is that if you call this method on the
67 * current thread, you will be on the requested CPU when this method returns.
68 */
69 __private_extern__ kern_return_t
70 chudxnu_bind_thread(thread_t thread, int cpu, __unused int options)
71 {
72 processor_t proc = NULL;
73
74 if(cpu < 0 || (unsigned int)cpu >= real_ncpus) // sanity check
75 return KERN_FAILURE;
76
77 // temporary restriction until after phase 2 of the scheduler
78 if(thread != current_thread())
79 return KERN_FAILURE;
80
81 proc = cpu_to_processor(cpu);
82
83 /*
84 * Potentially racey, but mainly to prevent bind to shutdown
85 * processor.
86 */
87 if(proc && !(proc->state == PROCESSOR_OFF_LINE) &&
88 !(proc->state == PROCESSOR_SHUTDOWN)) {
89
90 thread_bind(proc);
91
92 /*
93 * If we're trying to bind the current thread, and
94 * we're not on the target cpu, and not at interrupt
95 * context, block the current thread to force a
96 * reschedule on the target CPU.
97 */
98 if(thread == current_thread() &&
99 !ml_at_interrupt_context() && cpu_number() != cpu) {
100 (void)thread_block(THREAD_CONTINUE_NULL);
101 }
102 return KERN_SUCCESS;
103 }
104 return KERN_FAILURE;
105 }
106
107 __private_extern__ kern_return_t
108 chudxnu_unbind_thread(thread_t thread, __unused int options)
109 {
110 if(thread == current_thread())
111 thread_bind(PROCESSOR_NULL);
112 return KERN_SUCCESS;
113 }
114
115 __private_extern__ boolean_t
116 chudxnu_thread_get_idle(thread_t thread) {
117 /*
118 * Instantaneous snapshot of the idle state of
119 * a given thread.
120 *
121 * Should be called only on an interrupted or
122 * suspended thread to avoid a race.
123 */
124 return ((thread->state & TH_IDLE) == TH_IDLE);
125 }
126
127 #if 0
128 #pragma mark **** task and thread info ****
129 #endif
130
131 __private_extern__ boolean_t
132 chudxnu_is_64bit_task(task_t task)
133 {
134 return (task_has_64BitAddr(task));
135 }
136
137 #define THING_TASK 0
138 #define THING_THREAD 1
139
140 // an exact copy of processor_set_things() except no mig conversion at the end!
141 static kern_return_t
142 chudxnu_private_processor_set_things(
143 processor_set_t pset,
144 mach_port_t **thing_list,
145 mach_msg_type_number_t *count,
146 int type)
147 {
148 unsigned int actual; /* this many things */
149 unsigned int maxthings;
150 unsigned int i;
151
152 vm_size_t size, size_needed;
153 void *addr;
154
155 if (pset == PROCESSOR_SET_NULL || pset != &pset0)
156 return (KERN_INVALID_ARGUMENT);
157
158 size = 0; addr = NULL;
159
160 for (;;) {
161 lck_mtx_lock(&tasks_threads_lock);
162
163 if (type == THING_TASK)
164 maxthings = tasks_count;
165 else
166 maxthings = threads_count;
167
168 /* do we have the memory we need? */
169
170 size_needed = maxthings * sizeof (mach_port_t);
171 if (size_needed <= size)
172 break;
173
174 lck_mtx_unlock(&tasks_threads_lock);
175
176 if (size != 0)
177 kfree(addr, size);
178
179 assert(size_needed > 0);
180 size = size_needed;
181
182 addr = kalloc(size);
183 if (addr == 0)
184 return (KERN_RESOURCE_SHORTAGE);
185 }
186
187 /* OK, have memory and the processor_set is locked & active */
188
189 actual = 0;
190 switch (type) {
191
192 case THING_TASK:
193 {
194 task_t task, *task_list = (task_t *)addr;
195
196 for (task = (task_t)queue_first(&tasks);
197 !queue_end(&tasks, (queue_entry_t)task);
198 task = (task_t)queue_next(&task->tasks)) {
199 task_reference_internal(task);
200 task_list[actual++] = task;
201 }
202
203 break;
204 }
205
206 case THING_THREAD:
207 {
208 thread_t thread, *thread_list = (thread_t *)addr;
209
210 for (i = 0, thread = (thread_t)queue_first(&threads);
211 !queue_end(&threads, (queue_entry_t)thread);
212 thread = (thread_t)queue_next(&thread->threads)) {
213 thread_reference_internal(thread);
214 thread_list[actual++] = thread;
215 }
216
217 break;
218 }
219 }
220
221 lck_mtx_unlock(&tasks_threads_lock);
222
223 if (actual < maxthings)
224 size_needed = actual * sizeof (mach_port_t);
225
226 if (actual == 0) {
227 /* no things, so return null pointer and deallocate memory */
228 *thing_list = NULL;
229 *count = 0;
230
231 if (size != 0)
232 kfree(addr, size);
233 }
234 else {
235 /* if we allocated too much, must copy */
236
237 if (size_needed < size) {
238 void *newaddr;
239
240 newaddr = kalloc(size_needed);
241 if (newaddr == 0) {
242 switch (type) {
243
244 case THING_TASK:
245 {
246 task_t *task_list = (task_t *)addr;
247
248 for (i = 0; i < actual; i++)
249 task_deallocate(task_list[i]);
250 break;
251 }
252
253 case THING_THREAD:
254 {
255 thread_t *thread_list = (thread_t *)addr;
256
257 for (i = 0; i < actual; i++)
258 thread_deallocate(thread_list[i]);
259 break;
260 }
261 }
262
263 kfree(addr, size);
264 return (KERN_RESOURCE_SHORTAGE);
265 }
266
267 bcopy((void *) addr, (void *) newaddr, size_needed);
268 kfree(addr, size);
269 addr = newaddr;
270 }
271
272 *thing_list = (mach_port_t *)addr;
273 *count = actual;
274 }
275
276 return (KERN_SUCCESS);
277 }
278
279 // an exact copy of task_threads() except no mig conversion at the end!
280 static kern_return_t
281 chudxnu_private_task_threads(
282 task_t task,
283 thread_act_array_t *threads_out,
284 mach_msg_type_number_t *count)
285 {
286 mach_msg_type_number_t actual;
287 thread_t *thread_list;
288 thread_t thread;
289 vm_size_t size, size_needed;
290 void *addr;
291 unsigned int i, j;
292
293 if (task == TASK_NULL)
294 return (KERN_INVALID_ARGUMENT);
295
296 size = 0; addr = NULL;
297
298 for (;;) {
299 task_lock(task);
300 if (!task->active) {
301 task_unlock(task);
302
303 if (size != 0)
304 kfree(addr, size);
305
306 return (KERN_FAILURE);
307 }
308
309 actual = task->thread_count;
310
311 /* do we have the memory we need? */
312 size_needed = actual * sizeof (mach_port_t);
313 if (size_needed <= size)
314 break;
315
316 /* unlock the task and allocate more memory */
317 task_unlock(task);
318
319 if (size != 0)
320 kfree(addr, size);
321
322 assert(size_needed > 0);
323 size = size_needed;
324
325 addr = kalloc(size);
326 if (addr == 0)
327 return (KERN_RESOURCE_SHORTAGE);
328 }
329
330 /* OK, have memory and the task is locked & active */
331 thread_list = (thread_t *)addr;
332
333 i = j = 0;
334
335 for (thread = (thread_t)queue_first(&task->threads); i < actual;
336 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
337 thread_reference_internal(thread);
338 thread_list[j++] = thread;
339 }
340
341 assert(queue_end(&task->threads, (queue_entry_t)thread));
342
343 actual = j;
344 size_needed = actual * sizeof (mach_port_t);
345
346 /* can unlock task now that we've got the thread refs */
347 task_unlock(task);
348
349 if (actual == 0) {
350 /* no threads, so return null pointer and deallocate memory */
351
352 *threads_out = NULL;
353 *count = 0;
354
355 if (size != 0)
356 kfree(addr, size);
357 }
358 else {
359 /* if we allocated too much, must copy */
360
361 if (size_needed < size) {
362 void *newaddr;
363
364 newaddr = kalloc(size_needed);
365 if (newaddr == 0) {
366 for (i = 0; i < actual; ++i)
367 thread_deallocate(thread_list[i]);
368 kfree(addr, size);
369 return (KERN_RESOURCE_SHORTAGE);
370 }
371
372 bcopy(addr, newaddr, size_needed);
373 kfree(addr, size);
374 thread_list = (thread_t *)newaddr;
375 }
376
377 *threads_out = thread_list;
378 *count = actual;
379 }
380
381 return (KERN_SUCCESS);
382 }
383
384
385 __private_extern__ kern_return_t
386 chudxnu_all_tasks(
387 task_array_t *task_list,
388 mach_msg_type_number_t *count)
389 {
390 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)task_list, count, THING_TASK);
391 }
392
393 __private_extern__ kern_return_t
394 chudxnu_free_task_list(
395 task_array_t *task_list,
396 mach_msg_type_number_t *count)
397 {
398 vm_size_t size = (*count)*sizeof(mach_port_t);
399 void *addr = *task_list;
400
401 if(addr) {
402 int i, maxCount = *count;
403 for(i=0; i<maxCount; i++) {
404 task_deallocate((*task_list)[i]);
405 }
406 kfree(addr, size);
407 *task_list = NULL;
408 *count = 0;
409 return KERN_SUCCESS;
410 } else {
411 return KERN_FAILURE;
412 }
413 }
414 __private_extern__ kern_return_t
415 chudxnu_all_threads(
416 thread_array_t *thread_list,
417 mach_msg_type_number_t *count)
418 {
419 return chudxnu_private_processor_set_things(&pset0, (mach_port_t **)thread_list, count, THING_THREAD);
420 }
421
422 __private_extern__ kern_return_t
423 chudxnu_task_threads(
424 task_t task,
425 thread_array_t *thread_list,
426 mach_msg_type_number_t *count)
427 {
428 return chudxnu_private_task_threads(task, thread_list, count);
429 }
430
431 __private_extern__ kern_return_t
432 chudxnu_free_thread_list(
433 thread_array_t *thread_list,
434 mach_msg_type_number_t *count)
435 {
436 vm_size_t size = (*count)*sizeof(mach_port_t);
437 void *addr = *thread_list;
438
439 if(addr) {
440 int i, maxCount = *count;
441 for(i=0; i<maxCount; i++) {
442 thread_deallocate((*thread_list)[i]);
443 }
444 kfree(addr, size);
445 *thread_list = NULL;
446 *count = 0;
447 return KERN_SUCCESS;
448 } else {
449 return KERN_FAILURE;
450 }
451 }
452
453 __private_extern__ task_t
454 chudxnu_current_task(void)
455 {
456 return current_task();
457 }
458
459 __private_extern__ thread_t
460 chudxnu_current_thread(void)
461 {
462 return current_thread();
463 }
464
465 __private_extern__ task_t
466 chudxnu_task_for_thread(thread_t thread)
467 {
468 return get_threadtask(thread);
469 }
470
471 __private_extern__ kern_return_t
472 chudxnu_thread_info(
473 thread_t thread,
474 thread_flavor_t flavor,
475 thread_info_t thread_info_out,
476 mach_msg_type_number_t *thread_info_count)
477 {
478 return thread_info(thread, flavor, thread_info_out, thread_info_count);
479 }
480
481
482 /* thread marking stuff */
483
484 __private_extern__ boolean_t
485 chudxnu_thread_get_marked(thread_t thread)
486 {
487 if(thread)
488 return ((thread->t_chud & T_CHUD_MARKED) != 0);
489 return FALSE;
490 }
491
492 __private_extern__ boolean_t
493 chudxnu_thread_set_marked(thread_t thread, boolean_t new_value)
494 {
495 boolean_t old_val;
496
497 if(thread) {
498 if(new_value) {
499 // set the marked bit
500 old_val = OSBitOrAtomic(T_CHUD_MARKED, &(thread->t_chud));
501 } else {
502 // clear the marked bit
503 old_val = OSBitAndAtomic(~T_CHUD_MARKED, &(thread->t_chud));
504 }
505 return (old_val & T_CHUD_MARKED) == T_CHUD_MARKED;
506 }
507 return FALSE;
508 }
509