]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2010 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <mach/mach_types.h> | |
29 | #include <mach/machine/vm_param.h> | |
30 | ||
31 | #include <kern/kern_types.h> | |
32 | #include <kern/processor.h> | |
33 | #include <kern/thread.h> | |
34 | #include <kern/task.h> | |
35 | #include <kern/spl.h> | |
36 | #include <kern/lock.h> | |
37 | #include <kern/ast.h> | |
38 | #include <ipc/ipc_port.h> | |
39 | #include <ipc/ipc_object.h> | |
40 | #include <vm/vm_map.h> | |
41 | #include <vm/vm_kern.h> | |
42 | #include <vm/pmap.h> | |
43 | #include <vm/vm_protos.h> /* last */ | |
44 | ||
45 | #undef thread_should_halt | |
46 | ||
47 | /* BSD KERN COMPONENT INTERFACE */ | |
48 | ||
49 | task_t bsd_init_task = TASK_NULL; | |
50 | char init_task_failure_data[1024]; | |
51 | extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */ | |
52 | ||
53 | thread_t get_firstthread(task_t); | |
54 | int get_task_userstop(task_t); | |
55 | int get_thread_userstop(thread_t); | |
56 | boolean_t current_thread_aborted(void); | |
57 | void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *); | |
58 | kern_return_t get_signalact(task_t , thread_t *, int); | |
59 | int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t); | |
60 | void syscall_exit_funnelcheck(void); | |
61 | ||
62 | ||
63 | /* | |
64 | * | |
65 | */ | |
66 | void *get_bsdtask_info(task_t t) | |
67 | { | |
68 | return(t->bsd_info); | |
69 | } | |
70 | ||
71 | /* | |
72 | * | |
73 | */ | |
74 | void *get_bsdthreadtask_info(thread_t th) | |
75 | { | |
76 | return(th->task != TASK_NULL ? th->task->bsd_info : NULL); | |
77 | } | |
78 | ||
79 | /* | |
80 | * | |
81 | */ | |
82 | void set_bsdtask_info(task_t t,void * v) | |
83 | { | |
84 | t->bsd_info=v; | |
85 | } | |
86 | ||
87 | /* | |
88 | * | |
89 | */ | |
90 | void *get_bsdthread_info(thread_t th) | |
91 | { | |
92 | return(th->uthread); | |
93 | } | |
94 | ||
95 | /* | |
96 | * XXX | |
97 | */ | |
98 | int get_thread_lock_count(thread_t th); /* forced forward */ | |
99 | int get_thread_lock_count(thread_t th) | |
100 | { | |
101 | return(th->mutex_count); | |
102 | } | |
103 | ||
104 | /* | |
105 | * XXX: wait for BSD to fix signal code | |
106 | * Until then, we cannot block here. We know the task | |
107 | * can't go away, so we make sure it is still active after | |
108 | * retrieving the first thread for extra safety. | |
109 | */ | |
110 | thread_t get_firstthread(task_t task) | |
111 | { | |
112 | thread_t thread = (thread_t)queue_first(&task->threads); | |
113 | ||
114 | if (queue_end(&task->threads, (queue_entry_t)thread)) | |
115 | thread = THREAD_NULL; | |
116 | ||
117 | if (!task->active) | |
118 | return (THREAD_NULL); | |
119 | ||
120 | return (thread); | |
121 | } | |
122 | ||
123 | kern_return_t | |
124 | get_signalact( | |
125 | task_t task, | |
126 | thread_t *result_out, | |
127 | int setast) | |
128 | { | |
129 | kern_return_t result = KERN_SUCCESS; | |
130 | thread_t inc, thread = THREAD_NULL; | |
131 | ||
132 | task_lock(task); | |
133 | ||
134 | if (!task->active) { | |
135 | task_unlock(task); | |
136 | ||
137 | return (KERN_FAILURE); | |
138 | } | |
139 | ||
140 | for (inc = (thread_t)queue_first(&task->threads); | |
141 | !queue_end(&task->threads, (queue_entry_t)inc); ) { | |
142 | thread_mtx_lock(inc); | |
143 | if (inc->active && | |
144 | (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) { | |
145 | thread = inc; | |
146 | break; | |
147 | } | |
148 | thread_mtx_unlock(inc); | |
149 | ||
150 | inc = (thread_t)queue_next(&inc->task_threads); | |
151 | } | |
152 | ||
153 | if (result_out) | |
154 | *result_out = thread; | |
155 | ||
156 | if (thread) { | |
157 | if (setast) | |
158 | act_set_astbsd(thread); | |
159 | ||
160 | thread_mtx_unlock(thread); | |
161 | } | |
162 | else | |
163 | result = KERN_FAILURE; | |
164 | ||
165 | task_unlock(task); | |
166 | ||
167 | return (result); | |
168 | } | |
169 | ||
170 | ||
171 | kern_return_t | |
172 | check_actforsig( | |
173 | task_t task, | |
174 | thread_t thread, | |
175 | int setast) | |
176 | { | |
177 | kern_return_t result = KERN_FAILURE; | |
178 | thread_t inc; | |
179 | ||
180 | task_lock(task); | |
181 | ||
182 | if (!task->active) { | |
183 | task_unlock(task); | |
184 | ||
185 | return (KERN_FAILURE); | |
186 | } | |
187 | ||
188 | for (inc = (thread_t)queue_first(&task->threads); | |
189 | !queue_end(&task->threads, (queue_entry_t)inc); ) { | |
190 | if (inc == thread) { | |
191 | thread_mtx_lock(inc); | |
192 | ||
193 | if (inc->active && | |
194 | (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) { | |
195 | result = KERN_SUCCESS; | |
196 | break; | |
197 | } | |
198 | ||
199 | thread_mtx_unlock(inc); | |
200 | break; | |
201 | } | |
202 | ||
203 | inc = (thread_t)queue_next(&inc->task_threads); | |
204 | } | |
205 | ||
206 | if (result == KERN_SUCCESS) { | |
207 | if (setast) | |
208 | act_set_astbsd(thread); | |
209 | ||
210 | thread_mtx_unlock(thread); | |
211 | } | |
212 | ||
213 | task_unlock(task); | |
214 | ||
215 | return (result); | |
216 | } | |
217 | ||
218 | ledger_t get_task_ledger(task_t t) | |
219 | { | |
220 | return(t->ledger); | |
221 | } | |
222 | ||
223 | /* | |
224 | * This is only safe to call from a thread executing in | |
225 | * in the task's context or if the task is locked Otherwise, | |
226 | * the map could be switched for the task (and freed) before | |
227 | * we to return it here. | |
228 | */ | |
229 | vm_map_t get_task_map(task_t t) | |
230 | { | |
231 | return(t->map); | |
232 | } | |
233 | ||
234 | vm_map_t get_task_map_reference(task_t t) | |
235 | { | |
236 | vm_map_t m; | |
237 | ||
238 | if (t == NULL) | |
239 | return VM_MAP_NULL; | |
240 | ||
241 | task_lock(t); | |
242 | if (!t->active) { | |
243 | task_unlock(t); | |
244 | return VM_MAP_NULL; | |
245 | } | |
246 | m = t->map; | |
247 | vm_map_reference_swap(m); | |
248 | task_unlock(t); | |
249 | return m; | |
250 | } | |
251 | ||
252 | /* | |
253 | * | |
254 | */ | |
255 | ipc_space_t get_task_ipcspace(task_t t) | |
256 | { | |
257 | return(t->itk_space); | |
258 | } | |
259 | ||
260 | int get_task_numactivethreads(task_t task) | |
261 | { | |
262 | thread_t inc; | |
263 | int num_active_thr=0; | |
264 | task_lock(task); | |
265 | ||
266 | for (inc = (thread_t)queue_first(&task->threads); | |
267 | !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)queue_next(&inc->task_threads)) | |
268 | { | |
269 | if(inc->active) | |
270 | num_active_thr++; | |
271 | } | |
272 | task_unlock(task); | |
273 | return num_active_thr; | |
274 | } | |
275 | ||
276 | int get_task_numacts(task_t t) | |
277 | { | |
278 | return(t->thread_count); | |
279 | } | |
280 | ||
281 | /* does this machine need 64bit register set for signal handler */ | |
282 | int is_64signalregset(void) | |
283 | { | |
284 | task_t t = current_task(); | |
285 | if(t->taskFeatures[0] & tf64BitData) | |
286 | return(1); | |
287 | else | |
288 | return(0); | |
289 | } | |
290 | ||
291 | /* | |
292 | * Swap in a new map for the task/thread pair; the old map reference is | |
293 | * returned. | |
294 | */ | |
295 | vm_map_t | |
296 | swap_task_map(task_t task, thread_t thread, vm_map_t map, boolean_t doswitch) | |
297 | { | |
298 | vm_map_t old_map; | |
299 | ||
300 | if (task != thread->task) | |
301 | panic("swap_task_map"); | |
302 | ||
303 | task_lock(task); | |
304 | mp_disable_preemption(); | |
305 | old_map = task->map; | |
306 | thread->map = task->map = map; | |
307 | if (doswitch) { | |
308 | pmap_switch(map->pmap); | |
309 | } | |
310 | mp_enable_preemption(); | |
311 | task_unlock(task); | |
312 | ||
313 | #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 | |
314 | inval_copy_windows(thread); | |
315 | #endif | |
316 | ||
317 | return old_map; | |
318 | } | |
319 | ||
320 | /* | |
321 | * | |
322 | */ | |
323 | pmap_t get_task_pmap(task_t t) | |
324 | { | |
325 | return(t->map->pmap); | |
326 | } | |
327 | ||
328 | /* | |
329 | * | |
330 | */ | |
331 | uint64_t get_task_resident_size(task_t task) | |
332 | { | |
333 | vm_map_t map; | |
334 | ||
335 | map = (task == kernel_task) ? kernel_map: task->map; | |
336 | return((uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64); | |
337 | } | |
338 | ||
339 | /* | |
340 | * | |
341 | */ | |
342 | pmap_t get_map_pmap(vm_map_t map) | |
343 | { | |
344 | return(map->pmap); | |
345 | } | |
346 | /* | |
347 | * | |
348 | */ | |
349 | task_t get_threadtask(thread_t th) | |
350 | { | |
351 | return(th->task); | |
352 | } | |
353 | ||
354 | /* | |
355 | * | |
356 | */ | |
357 | vm_map_offset_t | |
358 | get_map_min( | |
359 | vm_map_t map) | |
360 | { | |
361 | return(vm_map_min(map)); | |
362 | } | |
363 | ||
364 | /* | |
365 | * | |
366 | */ | |
367 | vm_map_offset_t | |
368 | get_map_max( | |
369 | vm_map_t map) | |
370 | { | |
371 | return(vm_map_max(map)); | |
372 | } | |
373 | vm_map_size_t | |
374 | get_vmmap_size( | |
375 | vm_map_t map) | |
376 | { | |
377 | return(map->size); | |
378 | } | |
379 | ||
380 | int | |
381 | get_vmsubmap_entries( | |
382 | vm_map_t map, | |
383 | vm_object_offset_t start, | |
384 | vm_object_offset_t end) | |
385 | { | |
386 | int total_entries = 0; | |
387 | vm_map_entry_t entry; | |
388 | ||
389 | if (not_in_kdp) | |
390 | vm_map_lock(map); | |
391 | entry = vm_map_first_entry(map); | |
392 | while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { | |
393 | entry = entry->vme_next; | |
394 | } | |
395 | ||
396 | while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
397 | if(entry->is_sub_map) { | |
398 | total_entries += | |
399 | get_vmsubmap_entries(entry->object.sub_map, | |
400 | entry->offset, | |
401 | entry->offset + | |
402 | (entry->vme_end - entry->vme_start)); | |
403 | } else { | |
404 | total_entries += 1; | |
405 | } | |
406 | entry = entry->vme_next; | |
407 | } | |
408 | if (not_in_kdp) | |
409 | vm_map_unlock(map); | |
410 | return(total_entries); | |
411 | } | |
412 | ||
413 | int | |
414 | get_vmmap_entries( | |
415 | vm_map_t map) | |
416 | { | |
417 | int total_entries = 0; | |
418 | vm_map_entry_t entry; | |
419 | ||
420 | if (not_in_kdp) | |
421 | vm_map_lock(map); | |
422 | entry = vm_map_first_entry(map); | |
423 | ||
424 | while(entry != vm_map_to_entry(map)) { | |
425 | if(entry->is_sub_map) { | |
426 | total_entries += | |
427 | get_vmsubmap_entries(entry->object.sub_map, | |
428 | entry->offset, | |
429 | entry->offset + | |
430 | (entry->vme_end - entry->vme_start)); | |
431 | } else { | |
432 | total_entries += 1; | |
433 | } | |
434 | entry = entry->vme_next; | |
435 | } | |
436 | if (not_in_kdp) | |
437 | vm_map_unlock(map); | |
438 | return(total_entries); | |
439 | } | |
440 | ||
441 | /* | |
442 | * | |
443 | */ | |
444 | /* | |
445 | * | |
446 | */ | |
447 | int | |
448 | get_task_userstop( | |
449 | task_t task) | |
450 | { | |
451 | return(task->user_stop_count); | |
452 | } | |
453 | ||
454 | /* | |
455 | * | |
456 | */ | |
457 | int | |
458 | get_thread_userstop( | |
459 | thread_t th) | |
460 | { | |
461 | return(th->user_stop_count); | |
462 | } | |
463 | ||
464 | /* | |
465 | * | |
466 | */ | |
467 | boolean_t | |
468 | get_task_pidsuspended( | |
469 | task_t task) | |
470 | { | |
471 | return (task->pidsuspended); | |
472 | } | |
473 | ||
474 | /* | |
475 | * | |
476 | */ | |
477 | boolean_t | |
478 | get_task_frozen( | |
479 | task_t task) | |
480 | { | |
481 | return (task->frozen); | |
482 | } | |
483 | ||
484 | /* | |
485 | * | |
486 | */ | |
487 | boolean_t | |
488 | thread_should_abort( | |
489 | thread_t th) | |
490 | { | |
491 | return ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT); | |
492 | } | |
493 | ||
494 | /* | |
495 | * This routine is like thread_should_abort() above. It checks to | |
496 | * see if the current thread is aborted. But unlike above, it also | |
497 | * checks to see if thread is safely aborted. If so, it returns | |
498 | * that fact, and clears the condition (safe aborts only should | |
499 | * have a single effect, and a poll of the abort status | |
500 | * qualifies. | |
501 | */ | |
502 | boolean_t | |
503 | current_thread_aborted ( | |
504 | void) | |
505 | { | |
506 | thread_t th = current_thread(); | |
507 | spl_t s; | |
508 | ||
509 | if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT && | |
510 | (th->options & TH_OPT_INTMASK) != THREAD_UNINT) | |
511 | return (TRUE); | |
512 | if (th->sched_flags & TH_SFLAG_ABORTSAFELY) { | |
513 | s = splsched(); | |
514 | thread_lock(th); | |
515 | if (th->sched_flags & TH_SFLAG_ABORTSAFELY) | |
516 | th->sched_flags &= ~TH_SFLAG_ABORTED_MASK; | |
517 | thread_unlock(th); | |
518 | splx(s); | |
519 | } | |
520 | return FALSE; | |
521 | } | |
522 | ||
523 | /* | |
524 | * | |
525 | */ | |
526 | void | |
527 | task_act_iterate_wth_args( | |
528 | task_t task, | |
529 | void (*func_callback)(thread_t, void *), | |
530 | void *func_arg) | |
531 | { | |
532 | thread_t inc; | |
533 | ||
534 | task_lock(task); | |
535 | ||
536 | for (inc = (thread_t)queue_first(&task->threads); | |
537 | !queue_end(&task->threads, (queue_entry_t)inc); ) { | |
538 | (void) (*func_callback)(inc, func_arg); | |
539 | inc = (thread_t)queue_next(&inc->task_threads); | |
540 | } | |
541 | ||
542 | task_unlock(task); | |
543 | } | |
544 | ||
545 | ||
546 | void | |
547 | astbsd_on(void) | |
548 | { | |
549 | boolean_t reenable; | |
550 | ||
551 | reenable = ml_set_interrupts_enabled(FALSE); | |
552 | ast_on_fast(AST_BSD); | |
553 | (void)ml_set_interrupts_enabled(reenable); | |
554 | } | |
555 | ||
556 | ||
557 | #include <sys/bsdtask_info.h> | |
558 | ||
559 | void | |
560 | fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo) | |
561 | { | |
562 | vm_map_t map; | |
563 | task_absolutetime_info_data_t tinfo; | |
564 | thread_t thread; | |
565 | uint32_t cswitch = 0, numrunning = 0; | |
566 | uint32_t syscalls_unix = 0; | |
567 | uint32_t syscalls_mach = 0; | |
568 | ||
569 | map = (task == kernel_task)? kernel_map: task->map; | |
570 | ||
571 | ptinfo->pti_virtual_size = map->size; | |
572 | ptinfo->pti_resident_size = | |
573 | (mach_vm_size_t)(pmap_resident_count(map->pmap)) | |
574 | * PAGE_SIZE_64; | |
575 | ||
576 | task_lock(task); | |
577 | ||
578 | ptinfo->pti_policy = ((task != kernel_task)? | |
579 | POLICY_TIMESHARE: POLICY_RR); | |
580 | ||
581 | tinfo.threads_user = tinfo.threads_system = 0; | |
582 | tinfo.total_user = task->total_user_time; | |
583 | tinfo.total_system = task->total_system_time; | |
584 | ||
585 | queue_iterate(&task->threads, thread, thread_t, task_threads) { | |
586 | uint64_t tval; | |
587 | spl_t x; | |
588 | ||
589 | x = splsched(); | |
590 | thread_lock(thread); | |
591 | ||
592 | if ((thread->state & TH_RUN) == TH_RUN) | |
593 | numrunning++; | |
594 | cswitch += thread->c_switch; | |
595 | tval = timer_grab(&thread->user_timer); | |
596 | tinfo.threads_user += tval; | |
597 | tinfo.total_user += tval; | |
598 | ||
599 | tval = timer_grab(&thread->system_timer); | |
600 | ||
601 | if (thread->precise_user_kernel_time) { | |
602 | tinfo.threads_system += tval; | |
603 | tinfo.total_system += tval; | |
604 | } else { | |
605 | /* system_timer may represent either sys or user */ | |
606 | tinfo.threads_user += tval; | |
607 | tinfo.total_user += tval; | |
608 | } | |
609 | ||
610 | syscalls_unix += thread->syscalls_unix; | |
611 | syscalls_mach += thread->syscalls_mach; | |
612 | ||
613 | thread_unlock(thread); | |
614 | splx(x); | |
615 | } | |
616 | ||
617 | ptinfo->pti_total_system = tinfo.total_system; | |
618 | ptinfo->pti_total_user = tinfo.total_user; | |
619 | ptinfo->pti_threads_system = tinfo.threads_system; | |
620 | ptinfo->pti_threads_user = tinfo.threads_user; | |
621 | ||
622 | ptinfo->pti_faults = task->faults; | |
623 | ptinfo->pti_pageins = task->pageins; | |
624 | ptinfo->pti_cow_faults = task->cow_faults; | |
625 | ptinfo->pti_messages_sent = task->messages_sent; | |
626 | ptinfo->pti_messages_received = task->messages_received; | |
627 | ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach; | |
628 | ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix; | |
629 | ptinfo->pti_csw = task->c_switch + cswitch; | |
630 | ptinfo->pti_threadnum = task->thread_count; | |
631 | ptinfo->pti_numrunning = numrunning; | |
632 | ptinfo->pti_priority = task->priority; | |
633 | ||
634 | task_unlock(task); | |
635 | } | |
636 | ||
637 | int | |
638 | fill_taskthreadinfo(task_t task, uint64_t thaddr, int thuniqueid, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp) | |
639 | { | |
640 | thread_t thact; | |
641 | int err=0; | |
642 | mach_msg_type_number_t count; | |
643 | thread_basic_info_data_t basic_info; | |
644 | kern_return_t kret; | |
645 | uint64_t addr = 0; | |
646 | ||
647 | task_lock(task); | |
648 | ||
649 | for (thact = (thread_t)queue_first(&task->threads); | |
650 | !queue_end(&task->threads, (queue_entry_t)thact); ) { | |
651 | addr = (thuniqueid==0)?thact->machine.cthread_self: thact->thread_id; | |
652 | if (addr == thaddr) | |
653 | { | |
654 | ||
655 | count = THREAD_BASIC_INFO_COUNT; | |
656 | if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) { | |
657 | err = 1; | |
658 | goto out; | |
659 | } | |
660 | ptinfo->pth_user_time = ((basic_info.user_time.seconds * NSEC_PER_SEC) + (basic_info.user_time.microseconds * NSEC_PER_USEC)); | |
661 | ptinfo->pth_system_time = ((basic_info.system_time.seconds * NSEC_PER_SEC) + (basic_info.system_time.microseconds * NSEC_PER_USEC)); | |
662 | ||
663 | ptinfo->pth_cpu_usage = basic_info.cpu_usage; | |
664 | ptinfo->pth_policy = basic_info.policy; | |
665 | ptinfo->pth_run_state = basic_info.run_state; | |
666 | ptinfo->pth_flags = basic_info.flags; | |
667 | ptinfo->pth_sleep_time = basic_info.sleep_time; | |
668 | ptinfo->pth_curpri = thact->sched_pri; | |
669 | ptinfo->pth_priority = thact->priority; | |
670 | ptinfo->pth_maxpriority = thact->max_priority; | |
671 | ||
672 | if ((vpp != NULL) && (thact->uthread != NULL)) | |
673 | bsd_threadcdir(thact->uthread, vpp, vidp); | |
674 | bsd_getthreadname(thact->uthread,ptinfo->pth_name); | |
675 | err = 0; | |
676 | goto out; | |
677 | } | |
678 | thact = (thread_t)queue_next(&thact->task_threads); | |
679 | } | |
680 | err = 1; | |
681 | ||
682 | out: | |
683 | task_unlock(task); | |
684 | return(err); | |
685 | } | |
686 | ||
687 | int | |
688 | fill_taskthreadlist(task_t task, void * buffer, int thcount) | |
689 | { | |
690 | int numthr=0; | |
691 | thread_t thact; | |
692 | uint64_t * uptr; | |
693 | uint64_t thaddr; | |
694 | ||
695 | uptr = (uint64_t *)buffer; | |
696 | ||
697 | task_lock(task); | |
698 | ||
699 | for (thact = (thread_t)queue_first(&task->threads); | |
700 | !queue_end(&task->threads, (queue_entry_t)thact); ) { | |
701 | thaddr = thact->machine.cthread_self; | |
702 | *uptr++ = thaddr; | |
703 | numthr++; | |
704 | if (numthr >= thcount) | |
705 | goto out; | |
706 | thact = (thread_t)queue_next(&thact->task_threads); | |
707 | } | |
708 | ||
709 | out: | |
710 | task_unlock(task); | |
711 | return (int)(numthr * sizeof(uint64_t)); | |
712 | ||
713 | } | |
714 | ||
715 | int | |
716 | get_numthreads(task_t task) | |
717 | { | |
718 | return(task->thread_count); | |
719 | } | |
720 | ||
721 | void | |
722 | syscall_exit_funnelcheck(void) | |
723 | { | |
724 | thread_t thread; | |
725 | ||
726 | thread = current_thread(); | |
727 | ||
728 | if (thread->funnel_lock) | |
729 | panic("syscall exit with funnel held\n"); | |
730 | } |