]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/chud/chud_thread.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / chud / chud_thread.c
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26 #include <ppc/chud/chud_xnu.h>
27 #include <kern/processor.h>
28 #include <kern/thread.h>
29 #include <kern/thread_act.h>
30 #include <kern/ipc_tt.h>
31 #include <ppc/proc_reg.h>
32 #include <ppc/machine_routines.h>
33
34 __private_extern__
35 kern_return_t chudxnu_bind_current_thread(int cpu)
36 {
37 if(cpu>=0 && cpu<chudxnu_avail_cpu_count()) { /* make sure cpu # is sane */
38 thread_bind(current_thread(), processor_ptr[cpu]);
39 thread_block((void (*)(void)) 0);
40 return KERN_SUCCESS;
41 } else {
42 return KERN_FAILURE;
43 }
44 }
45
46 __private_extern__
47 kern_return_t chudxnu_unbind_current_thread(void)
48 {
49 thread_bind(current_thread(), PROCESSOR_NULL);
50 return KERN_SUCCESS;
51 }
52
53 static savearea *chudxnu_private_get_regs(void)
54 {
55 return current_act()->mact.pcb; // take the top savearea (user or kernel)
56 }
57
58 static savearea *chudxnu_private_get_user_regs(void)
59 {
60 return find_user_regs(current_act()); // take the top user savearea (skip any kernel saveareas)
61 }
62
63 static savearea_fpu *chudxnu_private_get_fp_regs(void)
64 {
65 fpu_save(current_act()->mact.curctx); // just in case it's live, save it
66 return current_act()->mact.curctx->FPUsave; // take the top savearea (user or kernel)
67 }
68
69 static savearea_fpu *chudxnu_private_get_user_fp_regs(void)
70 {
71 return find_user_fpu(current_act()); // take the top user savearea (skip any kernel saveareas)
72 }
73
74 static savearea_vec *chudxnu_private_get_vec_regs(void)
75 {
76 vec_save(current_act()->mact.curctx); // just in case it's live, save it
77 return current_act()->mact.curctx->VMXsave; // take the top savearea (user or kernel)
78 }
79
80 static savearea_vec *chudxnu_private_get_user_vec_regs(void)
81 {
82 return find_user_vec(current_act()); // take the top user savearea (skip any kernel saveareas)
83 }
84
85 __private_extern__
86 kern_return_t chudxnu_copy_savearea_to_threadstate(thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count, struct savearea *sv)
87 {
88 struct ppc_thread_state *ts;
89 struct ppc_thread_state64 *xts;
90
91 switch(flavor) {
92 case PPC_THREAD_STATE:
93 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
94 *count = 0;
95 return KERN_INVALID_ARGUMENT;
96 }
97 ts = (struct ppc_thread_state *) tstate;
98 if(sv) {
99 ts->r0 = (unsigned int)sv->save_r0;
100 ts->r1 = (unsigned int)sv->save_r1;
101 ts->r2 = (unsigned int)sv->save_r2;
102 ts->r3 = (unsigned int)sv->save_r3;
103 ts->r4 = (unsigned int)sv->save_r4;
104 ts->r5 = (unsigned int)sv->save_r5;
105 ts->r6 = (unsigned int)sv->save_r6;
106 ts->r7 = (unsigned int)sv->save_r7;
107 ts->r8 = (unsigned int)sv->save_r8;
108 ts->r9 = (unsigned int)sv->save_r9;
109 ts->r10 = (unsigned int)sv->save_r10;
110 ts->r11 = (unsigned int)sv->save_r11;
111 ts->r12 = (unsigned int)sv->save_r12;
112 ts->r13 = (unsigned int)sv->save_r13;
113 ts->r14 = (unsigned int)sv->save_r14;
114 ts->r15 = (unsigned int)sv->save_r15;
115 ts->r16 = (unsigned int)sv->save_r16;
116 ts->r17 = (unsigned int)sv->save_r17;
117 ts->r18 = (unsigned int)sv->save_r18;
118 ts->r19 = (unsigned int)sv->save_r19;
119 ts->r20 = (unsigned int)sv->save_r20;
120 ts->r21 = (unsigned int)sv->save_r21;
121 ts->r22 = (unsigned int)sv->save_r22;
122 ts->r23 = (unsigned int)sv->save_r23;
123 ts->r24 = (unsigned int)sv->save_r24;
124 ts->r25 = (unsigned int)sv->save_r25;
125 ts->r26 = (unsigned int)sv->save_r26;
126 ts->r27 = (unsigned int)sv->save_r27;
127 ts->r28 = (unsigned int)sv->save_r28;
128 ts->r29 = (unsigned int)sv->save_r29;
129 ts->r30 = (unsigned int)sv->save_r30;
130 ts->r31 = (unsigned int)sv->save_r31;
131 ts->cr = (unsigned int)sv->save_cr;
132 ts->xer = (unsigned int)sv->save_xer;
133 ts->lr = (unsigned int)sv->save_lr;
134 ts->ctr = (unsigned int)sv->save_ctr;
135 ts->srr0 = (unsigned int)sv->save_srr0;
136 ts->srr1 = (unsigned int)sv->save_srr1;
137 ts->mq = 0;
138 ts->vrsave = (unsigned int)sv->save_vrsave;
139 } else {
140 bzero((void *)ts, sizeof(struct ppc_thread_state));
141 }
142 *count = PPC_THREAD_STATE_COUNT; /* Pass back the amount we actually copied */
143 return KERN_SUCCESS;
144 break;
145 case PPC_THREAD_STATE64:
146 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
147 return KERN_INVALID_ARGUMENT;
148 }
149 xts = (struct ppc_thread_state64 *) tstate;
150 if(sv) {
151 xts->r0 = sv->save_r0;
152 xts->r1 = sv->save_r1;
153 xts->r2 = sv->save_r2;
154 xts->r3 = sv->save_r3;
155 xts->r4 = sv->save_r4;
156 xts->r5 = sv->save_r5;
157 xts->r6 = sv->save_r6;
158 xts->r7 = sv->save_r7;
159 xts->r8 = sv->save_r8;
160 xts->r9 = sv->save_r9;
161 xts->r10 = sv->save_r10;
162 xts->r11 = sv->save_r11;
163 xts->r12 = sv->save_r12;
164 xts->r13 = sv->save_r13;
165 xts->r14 = sv->save_r14;
166 xts->r15 = sv->save_r15;
167 xts->r16 = sv->save_r16;
168 xts->r17 = sv->save_r17;
169 xts->r18 = sv->save_r18;
170 xts->r19 = sv->save_r19;
171 xts->r20 = sv->save_r20;
172 xts->r21 = sv->save_r21;
173 xts->r22 = sv->save_r22;
174 xts->r23 = sv->save_r23;
175 xts->r24 = sv->save_r24;
176 xts->r25 = sv->save_r25;
177 xts->r26 = sv->save_r26;
178 xts->r27 = sv->save_r27;
179 xts->r28 = sv->save_r28;
180 xts->r29 = sv->save_r29;
181 xts->r30 = sv->save_r30;
182 xts->r31 = sv->save_r31;
183 xts->cr = sv->save_cr;
184 xts->xer = sv->save_xer;
185 xts->lr = sv->save_lr;
186 xts->ctr = sv->save_ctr;
187 xts->srr0 = sv->save_srr0;
188 xts->srr1 = sv->save_srr1;
189 xts->vrsave = sv->save_vrsave;
190 } else {
191 bzero((void *)xts, sizeof(struct ppc_thread_state64));
192 }
193 *count = PPC_THREAD_STATE64_COUNT; /* Pass back the amount we actually copied */
194 return KERN_SUCCESS;
195 break;
196 default:
197 *count = 0;
198 return KERN_INVALID_ARGUMENT;
199 break;
200 }
201 }
202
203 __private_extern__
204 kern_return_t chudxnu_copy_threadstate_to_savearea(struct savearea *sv, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count)
205 {
206 struct ppc_thread_state *ts;
207 struct ppc_thread_state64 *xts;
208
209 switch(flavor) {
210 case PPC_THREAD_STATE:
211 if(*count < PPC_THREAD_STATE_COUNT) { /* Is the count ok? */
212 return KERN_INVALID_ARGUMENT;
213 }
214 ts = (struct ppc_thread_state *) tstate;
215 if(sv) {
216 sv->save_r0 = (uint64_t)ts->r0;
217 sv->save_r1 = (uint64_t)ts->r1;
218 sv->save_r2 = (uint64_t)ts->r2;
219 sv->save_r3 = (uint64_t)ts->r3;
220 sv->save_r4 = (uint64_t)ts->r4;
221 sv->save_r5 = (uint64_t)ts->r5;
222 sv->save_r6 = (uint64_t)ts->r6;
223 sv->save_r7 = (uint64_t)ts->r7;
224 sv->save_r8 = (uint64_t)ts->r8;
225 sv->save_r9 = (uint64_t)ts->r9;
226 sv->save_r10 = (uint64_t)ts->r10;
227 sv->save_r11 = (uint64_t)ts->r11;
228 sv->save_r12 = (uint64_t)ts->r12;
229 sv->save_r13 = (uint64_t)ts->r13;
230 sv->save_r14 = (uint64_t)ts->r14;
231 sv->save_r15 = (uint64_t)ts->r15;
232 sv->save_r16 = (uint64_t)ts->r16;
233 sv->save_r17 = (uint64_t)ts->r17;
234 sv->save_r18 = (uint64_t)ts->r18;
235 sv->save_r19 = (uint64_t)ts->r19;
236 sv->save_r20 = (uint64_t)ts->r20;
237 sv->save_r21 = (uint64_t)ts->r21;
238 sv->save_r22 = (uint64_t)ts->r22;
239 sv->save_r23 = (uint64_t)ts->r23;
240 sv->save_r24 = (uint64_t)ts->r24;
241 sv->save_r25 = (uint64_t)ts->r25;
242 sv->save_r26 = (uint64_t)ts->r26;
243 sv->save_r27 = (uint64_t)ts->r27;
244 sv->save_r28 = (uint64_t)ts->r28;
245 sv->save_r29 = (uint64_t)ts->r29;
246 sv->save_r30 = (uint64_t)ts->r30;
247 sv->save_r31 = (uint64_t)ts->r31;
248 sv->save_cr = ts->cr;
249 sv->save_xer = (uint64_t)ts->xer;
250 sv->save_lr = (uint64_t)ts->lr;
251 sv->save_ctr = (uint64_t)ts->ctr;
252 sv->save_srr0 = (uint64_t)ts->srr0;
253 sv->save_srr1 = (uint64_t)ts->srr1;
254 sv->save_vrsave = ts->vrsave;
255 return KERN_SUCCESS;
256 } else {
257 return KERN_FAILURE;
258 }
259 break;
260 case PPC_THREAD_STATE64:
261 if(*count < PPC_THREAD_STATE64_COUNT) { /* Is the count ok? */
262 return KERN_INVALID_ARGUMENT;
263 }
264 xts = (struct ppc_thread_state64 *) tstate;
265 if(sv) {
266 sv->save_r0 = xts->r0;
267 sv->save_r1 = xts->r1;
268 sv->save_r2 = xts->r2;
269 sv->save_r3 = xts->r3;
270 sv->save_r4 = xts->r4;
271 sv->save_r5 = xts->r5;
272 sv->save_r6 = xts->r6;
273 sv->save_r7 = xts->r7;
274 sv->save_r8 = xts->r8;
275 sv->save_r9 = xts->r9;
276 sv->save_r10 = xts->r10;
277 sv->save_r11 = xts->r11;
278 sv->save_r12 = xts->r12;
279 sv->save_r13 = xts->r13;
280 sv->save_r14 = xts->r14;
281 sv->save_r15 = xts->r15;
282 sv->save_r16 = xts->r16;
283 sv->save_r17 = xts->r17;
284 sv->save_r18 = xts->r18;
285 sv->save_r19 = xts->r19;
286 sv->save_r20 = xts->r20;
287 sv->save_r21 = xts->r21;
288 sv->save_r22 = xts->r22;
289 sv->save_r23 = xts->r23;
290 sv->save_r24 = xts->r24;
291 sv->save_r25 = xts->r25;
292 sv->save_r26 = xts->r26;
293 sv->save_r27 = xts->r27;
294 sv->save_r28 = xts->r28;
295 sv->save_r29 = xts->r29;
296 sv->save_r30 = xts->r30;
297 sv->save_r31 = xts->r31;
298 sv->save_cr = xts->cr;
299 sv->save_xer = xts->xer;
300 sv->save_lr = xts->lr;
301 sv->save_ctr = xts->ctr;
302 sv->save_srr0 = xts->srr0;
303 sv->save_srr1 = xts->srr1;
304 sv->save_vrsave = xts->vrsave;
305 return KERN_SUCCESS;
306 } else {
307 return KERN_FAILURE;
308 }
309 }
310 }
311
312 __private_extern__
313 kern_return_t chudxnu_thread_get_state(thread_act_t thr_act,
314 thread_flavor_t flavor,
315 thread_state_t tstate,
316 mach_msg_type_number_t *count,
317 boolean_t user_only)
318 {
319 if(thr_act==current_act()) {
320 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) {
321 struct savearea *sv;
322 if(user_only) {
323 sv = chudxnu_private_get_user_regs();
324 } else {
325 sv = chudxnu_private_get_regs();
326 }
327 return chudxnu_copy_savearea_to_threadstate(flavor, tstate, count, sv);
328 } else if(flavor==PPC_FLOAT_STATE && user_only) {
329 #warning chudxnu_thread_get_state() does not yet support supervisor FP
330 return machine_thread_get_state(current_act(), flavor, tstate, count);
331 } else if(flavor==PPC_VECTOR_STATE && user_only) {
332 #warning chudxnu_thread_get_state() does not yet support supervisor VMX
333 return machine_thread_get_state(current_act(), flavor, tstate, count);
334 } else {
335 *count = 0;
336 return KERN_INVALID_ARGUMENT;
337 }
338 } else {
339 return machine_thread_get_state(thr_act, flavor, tstate, count);
340 }
341 }
342
343 __private_extern__
344 kern_return_t chudxnu_thread_set_state(thread_act_t thr_act,
345 thread_flavor_t flavor,
346 thread_state_t tstate,
347 mach_msg_type_number_t count,
348 boolean_t user_only)
349 {
350 if(thr_act==current_act()) {
351 if(flavor==PPC_THREAD_STATE || flavor==PPC_THREAD_STATE64) {
352 struct savearea *sv;
353 if(user_only) {
354 sv = chudxnu_private_get_user_regs();
355 } else {
356 sv = chudxnu_private_get_regs();
357 }
358 return chudxnu_copy_threadstate_to_savearea(sv, flavor, tstate, &count);
359 } else if(flavor==PPC_FLOAT_STATE && user_only) {
360 #warning chudxnu_thread_set_state() does not yet support supervisor FP
361 return machine_thread_set_state(current_act(), flavor, tstate, count);
362 } else if(flavor==PPC_VECTOR_STATE && user_only) {
363 #warning chudxnu_thread_set_state() does not yet support supervisor VMX
364 return machine_thread_set_state(current_act(), flavor, tstate, count);
365 } else {
366 return KERN_INVALID_ARGUMENT;
367 }
368 } else {
369 return machine_thread_set_state(thr_act, flavor, tstate, count);
370 }
371 }
372
373 static inline kern_return_t chudxnu_private_task_read_bytes(task_t task, vm_offset_t addr, int size, void *data)
374 {
375
376 kern_return_t ret;
377
378 if(task==kernel_task) {
379 if(size==sizeof(unsigned int)) {
380 addr64_t phys_addr;
381 ppnum_t pp;
382
383 pp = pmap_find_phys(kernel_pmap, addr); /* Get the page number */
384 if(!pp) return KERN_FAILURE; /* Not mapped... */
385
386 phys_addr = ((addr64_t)pp << 12) | (addr & 0x0000000000000FFFULL); /* Shove in the page offset */
387
388 if(phys_addr < mem_actual) { /* Sanity check: is it in memory? */
389 *((uint32_t *)data) = ml_phys_read_64(phys_addr);
390 return KERN_SUCCESS;
391 }
392 } else {
393 return KERN_FAILURE;
394 }
395 } else {
396
397 ret = KERN_SUCCESS; /* Assume everything worked */
398 if(copyin((void *)addr, data, size)) ret = KERN_FAILURE; /* Get memory, if non-zero rc, it didn't work */
399 return ret;
400 }
401 }
402
403 // chudxnu_current_thread_get_callstack gathers a raw callstack along with any information needed to
404 // fix it up later (in case we stopped program as it was saving values into prev stack frame, etc.)
405 // after sampling has finished.
406 //
407 // For an N-entry callstack:
408 //
409 // [0] current pc
410 // [1..N-3] stack frames (including current one)
411 // [N-2] current LR (return value if we're in a leaf function)
412 // [N-1] current r0 (in case we've saved LR in r0)
413 //
414
415 #define FP_LINK_OFFSET 2
416 #define STACK_ALIGNMENT_MASK 0xF // PPC stack frames are supposed to be 16-byte aligned
417 #define INST_ALIGNMENT_MASK 0x3 // Instructions are always 4-bytes wide
418
419 #ifndef USER_MODE
420 #define USER_MODE(msr) ((msr) & MASK(MSR_PR) ? TRUE : FALSE)
421 #endif
422
423 #ifndef SUPERVISOR_MODE
424 #define SUPERVISOR_MODE(msr) ((msr) & MASK(MSR_PR) ? FALSE : TRUE)
425 #endif
426
427 #define VALID_STACK_ADDRESS(addr) (addr>=0x1000 && (addr&STACK_ALIGNMENT_MASK)==0x0 && (supervisor ? (addr>=kernStackMin && addr<=kernStackMax) : TRUE))
428
429 __private_extern__
430 kern_return_t chudxnu_current_thread_get_callstack(uint32_t *callStack,
431 mach_msg_type_number_t *count,
432 boolean_t user_only)
433 {
434 kern_return_t kr;
435 vm_address_t nextFramePointer = 0;
436 vm_address_t currPC, currLR, currR0;
437 vm_address_t framePointer;
438 vm_address_t prevPC = 0;
439 vm_address_t kernStackMin = min_valid_stack_address();
440 vm_address_t kernStackMax = max_valid_stack_address();
441 unsigned int *buffer = callStack;
442 int bufferIndex = 0;
443 int bufferMaxIndex = *count;
444 boolean_t supervisor;
445 struct savearea *sv;
446
447 if(user_only) {
448 sv = chudxnu_private_get_user_regs();
449 } else {
450 sv = chudxnu_private_get_regs();
451 }
452
453 if(!sv) {
454 *count = 0;
455 return KERN_FAILURE;
456 }
457
458 supervisor = SUPERVISOR_MODE(sv->save_srr1);
459
460 if(!supervisor && ml_at_interrupt_context()) { // can't do copyin() if on interrupt stack
461 *count = 0;
462 return KERN_FAILURE;
463 }
464
465 bufferMaxIndex = bufferMaxIndex - 2; // allot space for saving the LR and R0 on the stack at the end.
466 if(bufferMaxIndex<2) {
467 *count = 0;
468 return KERN_RESOURCE_SHORTAGE;
469 }
470
471 currPC = sv->save_srr0;
472 framePointer = sv->save_r1; /* r1 is the stack pointer (no FP on PPC) */
473 currLR = sv->save_lr;
474 currR0 = sv->save_r0;
475
476 bufferIndex = 0; // start with a stack of size zero
477 buffer[bufferIndex++] = currPC; // save PC in position 0.
478
479 // Now, fill buffer with stack backtraces.
480 while(bufferIndex<bufferMaxIndex && VALID_STACK_ADDRESS(framePointer)) {
481 vm_address_t pc = 0;
482 // Above the stack pointer, the following values are saved:
483 // saved LR
484 // saved CR
485 // saved SP
486 //-> SP
487 // Here, we'll get the lr from the stack.
488 volatile vm_address_t fp_link = (vm_address_t)(((unsigned *)framePointer)+FP_LINK_OFFSET);
489
490 // Note that we read the pc even for the first stack frame (which, in theory,
491 // is always empty because the callee fills it in just before it lowers the
492 // stack. However, if we catch the program in between filling in the return
493 // address and lowering the stack, we want to still have a valid backtrace.
494 // FixupStack correctly disregards this value if necessary.
495
496 if(supervisor) {
497 kr = chudxnu_private_task_read_bytes(kernel_task, fp_link, sizeof(unsigned int), &pc);
498 } else {
499 kr = chudxnu_private_task_read_bytes(current_task(), fp_link, sizeof(unsigned int), &pc);
500 }
501 if(kr!=KERN_SUCCESS) {
502 // IOLog("task_read_callstack: unable to read framePointer: %08x\n",framePointer);
503 pc = 0;
504 break;
505 }
506
507 // retrieve the contents of the frame pointer and advance to the next stack frame if it's valid
508
509 if(supervisor) {
510 kr = chudxnu_private_task_read_bytes(kernel_task, framePointer, sizeof(unsigned int), &nextFramePointer);
511 } else {
512 kr = chudxnu_private_task_read_bytes(current_task(), framePointer, sizeof(unsigned int), &nextFramePointer);
513 }
514 if(kr!=KERN_SUCCESS) {
515 nextFramePointer = 0;
516 }
517
518 if(nextFramePointer) {
519 buffer[bufferIndex++] = pc;
520 prevPC = pc;
521 }
522
523 if(nextFramePointer<framePointer) {
524 break;
525 } else {
526 framePointer = nextFramePointer;
527 }
528 }
529
530 if(bufferIndex>=bufferMaxIndex) {
531 *count = 0;
532 return KERN_RESOURCE_SHORTAGE;
533 }
534
535 // Save link register and R0 at bottom of stack. This means that we won't worry
536 // about these values messing up stack compression. These end up being used
537 // by FixupStack.
538 buffer[bufferIndex++] = currLR;
539 buffer[bufferIndex++] = currR0;
540
541 *count = bufferIndex;
542 return KERN_SUCCESS;
543 }
544
545 __private_extern__
546 int chudxnu_task_threads(task_t task,
547 thread_act_array_t *thr_act_list,
548 mach_msg_type_number_t *count)
549 {
550 mach_msg_type_number_t task_thread_count = 0;
551 kern_return_t kr;
552
553 kr = task_threads(current_task(), thr_act_list, count);
554 if(kr==KERN_SUCCESS) {
555 thread_act_t thr_act;
556 int i, state_count;
557 for(i=0; i<(*count); i++) {
558 thr_act = convert_port_to_act(((ipc_port_t *)(*thr_act_list))[i]);
559 /* undo the mig conversion task_threads does */
560 thr_act_list[i] = thr_act;
561 }
562 }
563 return kr;
564 }
565
566 __private_extern__
567 thread_act_t chudxnu_current_act(void)
568 {
569 return current_act();
570 }
571
572 __private_extern__
573 task_t chudxnu_current_task(void)
574 {
575 return current_task();
576 }
577
578 __private_extern__
579 kern_return_t chudxnu_thread_info(thread_act_t thr_act,
580 thread_flavor_t flavor,
581 thread_info_t thread_info_out,
582 mach_msg_type_number_t *thread_info_count)
583 {
584 return thread_info(thr_act, flavor, thread_info_out, thread_info_count);
585 }