]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | #include <mach/mach_types.h> | |
91447636 A |
29 | |
30 | #include <kern/kern_types.h> | |
31 | #include <kern/processor.h> | |
1c79356b | 32 | #include <kern/thread.h> |
1c79356b A |
33 | #include <kern/task.h> |
34 | #include <kern/spl.h> | |
35 | #include <kern/lock.h> | |
91447636 | 36 | #include <kern/ast.h> |
1c79356b A |
37 | #include <ipc/ipc_port.h> |
38 | #include <ipc/ipc_object.h> | |
91447636 | 39 | #include <vm/vm_map.h> |
0c530ab8 | 40 | #include <vm/vm_kern.h> |
91447636 A |
41 | #include <vm/pmap.h> |
42 | #include <vm/vm_protos.h> /* last */ | |
1c79356b A |
43 | |
44 | #undef thread_should_halt | |
45 | #undef ipc_port_release | |
1c79356b | 46 | |
1c79356b A |
47 | /* BSD KERN COMPONENT INTERFACE */ |
48 | ||
9bccf70c | 49 | task_t bsd_init_task = TASK_NULL; |
1c79356b | 50 | char init_task_failure_data[1024]; |
55e303ae | 51 | extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */ |
1c79356b | 52 | |
91447636 | 53 | thread_t get_firstthread(task_t); |
1c79356b | 54 | int get_task_userstop(task_t); |
91447636 | 55 | int get_thread_userstop(thread_t); |
55e303ae | 56 | boolean_t thread_should_abort(thread_t); |
1c79356b | 57 | boolean_t current_thread_aborted(void); |
91447636 | 58 | void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *); |
1c79356b | 59 | void ipc_port_release(ipc_port_t); |
91447636 A |
60 | kern_return_t get_signalact(task_t , thread_t *, int); |
61 | int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t); | |
2d21ac55 A |
62 | void syscall_exit_funnelcheck(void); |
63 | ||
1c79356b | 64 | |
1c79356b A |
65 | /* |
66 | * | |
67 | */ | |
68 | void *get_bsdtask_info(task_t t) | |
69 | { | |
70 | return(t->bsd_info); | |
71 | } | |
72 | ||
2d21ac55 A |
73 | /* |
74 | * | |
75 | */ | |
76 | void *get_bsdthreadtask_info(thread_t th) | |
77 | { | |
78 | return(th->task != TASK_NULL ? th->task->bsd_info : NULL); | |
79 | } | |
80 | ||
1c79356b A |
81 | /* |
82 | * | |
83 | */ | |
84 | void set_bsdtask_info(task_t t,void * v) | |
85 | { | |
86 | t->bsd_info=v; | |
87 | } | |
88 | ||
89 | /* | |
90 | * | |
91 | */ | |
91447636 | 92 | void *get_bsdthread_info(thread_t th) |
1c79356b A |
93 | { |
94 | return(th->uthread); | |
95 | } | |
96 | ||
97 | /* | |
98 | * XXX: wait for BSD to fix signal code | |
99 | * Until then, we cannot block here. We know the task | |
100 | * can't go away, so we make sure it is still active after | |
101 | * retrieving the first thread for extra safety. | |
102 | */ | |
91447636 | 103 | thread_t get_firstthread(task_t task) |
1c79356b | 104 | { |
91447636 A |
105 | thread_t thread = (thread_t)queue_first(&task->threads); |
106 | ||
107 | if (queue_end(&task->threads, (queue_entry_t)thread)) | |
108 | thread = THREAD_NULL; | |
1c79356b | 109 | |
1c79356b | 110 | if (!task->active) |
91447636 A |
111 | return (THREAD_NULL); |
112 | ||
113 | return (thread); | |
1c79356b A |
114 | } |
115 | ||
91447636 A |
116 | kern_return_t |
117 | get_signalact( | |
118 | task_t task, | |
119 | thread_t *result_out, | |
120 | int setast) | |
1c79356b | 121 | { |
91447636 A |
122 | kern_return_t result = KERN_SUCCESS; |
123 | thread_t inc, thread = THREAD_NULL; | |
1c79356b A |
124 | |
125 | task_lock(task); | |
91447636 | 126 | |
1c79356b A |
127 | if (!task->active) { |
128 | task_unlock(task); | |
91447636 A |
129 | |
130 | return (KERN_FAILURE); | |
1c79356b A |
131 | } |
132 | ||
91447636 A |
133 | for (inc = (thread_t)queue_first(&task->threads); |
134 | !queue_end(&task->threads, (queue_entry_t)inc); ) { | |
2d21ac55 A |
135 | thread_mtx_lock(inc); |
136 | if (inc->active && | |
137 | (inc->sched_mode & TH_MODE_ISABORTED) != TH_MODE_ABORT) { | |
138 | thread = inc; | |
139 | break; | |
140 | } | |
141 | thread_mtx_unlock(inc); | |
142 | ||
143 | inc = (thread_t)queue_next(&inc->task_threads); | |
91447636 A |
144 | } |
145 | ||
146 | if (result_out) | |
147 | *result_out = thread; | |
148 | ||
149 | if (thread) { | |
150 | if (setast) | |
151 | act_set_astbsd(thread); | |
152 | ||
153 | thread_mtx_unlock(thread); | |
154 | } | |
155 | else | |
156 | result = KERN_FAILURE; | |
157 | ||
1c79356b A |
158 | task_unlock(task); |
159 | ||
91447636 | 160 | return (result); |
1c79356b A |
161 | } |
162 | ||
0b4e3aa0 | 163 | |
91447636 A |
164 | kern_return_t |
165 | check_actforsig( | |
166 | task_t task, | |
167 | thread_t thread, | |
168 | int setast) | |
0b4e3aa0 | 169 | { |
91447636 A |
170 | kern_return_t result = KERN_FAILURE; |
171 | thread_t inc; | |
0b4e3aa0 A |
172 | |
173 | task_lock(task); | |
91447636 | 174 | |
0b4e3aa0 A |
175 | if (!task->active) { |
176 | task_unlock(task); | |
91447636 A |
177 | |
178 | return (KERN_FAILURE); | |
0b4e3aa0 A |
179 | } |
180 | ||
91447636 A |
181 | for (inc = (thread_t)queue_first(&task->threads); |
182 | !queue_end(&task->threads, (queue_entry_t)inc); ) { | |
183 | if (inc == thread) { | |
184 | thread_mtx_lock(inc); | |
185 | ||
186 | if (inc->active && | |
2d21ac55 | 187 | (inc->sched_mode & TH_MODE_ISABORTED) != TH_MODE_ABORT) { |
91447636 | 188 | result = KERN_SUCCESS; |
0b4e3aa0 | 189 | break; |
91447636 | 190 | } |
0b4e3aa0 | 191 | |
91447636 A |
192 | thread_mtx_unlock(inc); |
193 | break; | |
194 | } | |
195 | ||
196 | inc = (thread_t)queue_next(&inc->task_threads); | |
197 | } | |
198 | ||
199 | if (result == KERN_SUCCESS) { | |
200 | if (setast) | |
201 | act_set_astbsd(thread); | |
202 | ||
203 | thread_mtx_unlock(thread); | |
204 | } | |
205 | ||
206 | task_unlock(task); | |
207 | ||
208 | return (result); | |
0b4e3aa0 A |
209 | } |
210 | ||
1c79356b | 211 | /* |
91447636 A |
212 | * This is only safe to call from a thread executing in |
213 | * in the task's context or if the task is locked Otherwise, | |
214 | * the map could be switched for the task (and freed) before | |
215 | * we to return it here. | |
1c79356b A |
216 | */ |
217 | vm_map_t get_task_map(task_t t) | |
218 | { | |
219 | return(t->map); | |
220 | } | |
221 | ||
91447636 A |
222 | vm_map_t get_task_map_reference(task_t t) |
223 | { | |
224 | vm_map_t m; | |
225 | ||
226 | if (t == NULL) | |
227 | return VM_MAP_NULL; | |
228 | ||
229 | task_lock(t); | |
230 | if (!t->active) { | |
231 | task_unlock(t); | |
232 | return VM_MAP_NULL; | |
233 | } | |
234 | m = t->map; | |
235 | vm_map_reference_swap(m); | |
236 | task_unlock(t); | |
237 | return m; | |
238 | } | |
239 | ||
1c79356b A |
240 | /* |
241 | * | |
242 | */ | |
243 | ipc_space_t get_task_ipcspace(task_t t) | |
244 | { | |
245 | return(t->itk_space); | |
246 | } | |
247 | ||
248 | int get_task_numacts(task_t t) | |
249 | { | |
55e303ae A |
250 | return(t->thread_count); |
251 | } | |
252 | ||
253 | /* does this machine need 64bit register set for signal handler */ | |
254 | int is_64signalregset(void) | |
255 | { | |
256 | task_t t = current_task(); | |
257 | if(t->taskFeatures[0] & tf64BitData) | |
258 | return(1); | |
259 | else | |
260 | return(0); | |
1c79356b A |
261 | } |
262 | ||
263 | /* | |
55e303ae | 264 | * The old map reference is returned. |
1c79356b A |
265 | */ |
266 | vm_map_t | |
267 | swap_task_map(task_t task,vm_map_t map) | |
268 | { | |
91447636 | 269 | thread_t thread = current_thread(); |
1c79356b A |
270 | vm_map_t old_map; |
271 | ||
91447636 | 272 | if (task != thread->task) |
55e303ae A |
273 | panic("swap_task_map"); |
274 | ||
1c79356b A |
275 | task_lock(task); |
276 | old_map = task->map; | |
91447636 | 277 | thread->map = task->map = map; |
1c79356b | 278 | task_unlock(task); |
0c530ab8 A |
279 | |
280 | inval_copy_windows(thread); | |
281 | ||
1c79356b A |
282 | return old_map; |
283 | } | |
284 | ||
1c79356b A |
285 | /* |
286 | * | |
287 | */ | |
288 | pmap_t get_task_pmap(task_t t) | |
289 | { | |
290 | return(t->map->pmap); | |
291 | } | |
292 | ||
293 | /* | |
294 | * | |
295 | */ | |
296 | pmap_t get_map_pmap(vm_map_t map) | |
297 | { | |
298 | return(map->pmap); | |
299 | } | |
300 | /* | |
301 | * | |
302 | */ | |
91447636 | 303 | task_t get_threadtask(thread_t th) |
1c79356b A |
304 | { |
305 | return(th->task); | |
306 | } | |
307 | ||
1c79356b A |
308 | /* |
309 | * | |
310 | */ | |
91447636 | 311 | vm_map_offset_t |
1c79356b A |
312 | get_map_min( |
313 | vm_map_t map) | |
314 | { | |
315 | return(vm_map_min(map)); | |
316 | } | |
317 | ||
318 | /* | |
319 | * | |
320 | */ | |
91447636 | 321 | vm_map_offset_t |
1c79356b A |
322 | get_map_max( |
323 | vm_map_t map) | |
324 | { | |
325 | return(vm_map_max(map)); | |
326 | } | |
91447636 | 327 | vm_map_size_t |
1c79356b A |
328 | get_vmmap_size( |
329 | vm_map_t map) | |
330 | { | |
331 | return(map->size); | |
332 | } | |
333 | ||
334 | int | |
335 | get_vmsubmap_entries( | |
336 | vm_map_t map, | |
337 | vm_object_offset_t start, | |
338 | vm_object_offset_t end) | |
339 | { | |
340 | int total_entries = 0; | |
341 | vm_map_entry_t entry; | |
342 | ||
55e303ae A |
343 | if (not_in_kdp) |
344 | vm_map_lock(map); | |
1c79356b A |
345 | entry = vm_map_first_entry(map); |
346 | while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { | |
347 | entry = entry->vme_next; | |
348 | } | |
349 | ||
350 | while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
351 | if(entry->is_sub_map) { | |
352 | total_entries += | |
353 | get_vmsubmap_entries(entry->object.sub_map, | |
354 | entry->offset, | |
355 | entry->offset + | |
356 | (entry->vme_end - entry->vme_start)); | |
357 | } else { | |
358 | total_entries += 1; | |
359 | } | |
360 | entry = entry->vme_next; | |
361 | } | |
55e303ae A |
362 | if (not_in_kdp) |
363 | vm_map_unlock(map); | |
1c79356b A |
364 | return(total_entries); |
365 | } | |
366 | ||
367 | int | |
368 | get_vmmap_entries( | |
369 | vm_map_t map) | |
370 | { | |
371 | int total_entries = 0; | |
372 | vm_map_entry_t entry; | |
373 | ||
55e303ae A |
374 | if (not_in_kdp) |
375 | vm_map_lock(map); | |
1c79356b A |
376 | entry = vm_map_first_entry(map); |
377 | ||
378 | while(entry != vm_map_to_entry(map)) { | |
379 | if(entry->is_sub_map) { | |
380 | total_entries += | |
381 | get_vmsubmap_entries(entry->object.sub_map, | |
382 | entry->offset, | |
383 | entry->offset + | |
384 | (entry->vme_end - entry->vme_start)); | |
385 | } else { | |
386 | total_entries += 1; | |
387 | } | |
388 | entry = entry->vme_next; | |
389 | } | |
55e303ae A |
390 | if (not_in_kdp) |
391 | vm_map_unlock(map); | |
1c79356b A |
392 | return(total_entries); |
393 | } | |
394 | ||
395 | /* | |
396 | * | |
397 | */ | |
398 | /* | |
399 | * | |
400 | */ | |
401 | int | |
402 | get_task_userstop( | |
403 | task_t task) | |
404 | { | |
405 | return(task->user_stop_count); | |
406 | } | |
407 | ||
408 | /* | |
409 | * | |
410 | */ | |
411 | int | |
412 | get_thread_userstop( | |
91447636 | 413 | thread_t th) |
1c79356b A |
414 | { |
415 | return(th->user_stop_count); | |
416 | } | |
417 | ||
1c79356b A |
418 | /* |
419 | * | |
420 | */ | |
421 | boolean_t | |
422 | thread_should_abort( | |
55e303ae | 423 | thread_t th) |
1c79356b | 424 | { |
2d21ac55 | 425 | return ((th->sched_mode & TH_MODE_ISABORTED) == TH_MODE_ABORT); |
1c79356b A |
426 | } |
427 | ||
428 | /* | |
9bccf70c A |
429 | * This routine is like thread_should_abort() above. It checks to |
430 | * see if the current thread is aborted. But unlike above, it also | |
431 | * checks to see if thread is safely aborted. If so, it returns | |
432 | * that fact, and clears the condition (safe aborts only should | |
433 | * have a single effect, and a poll of the abort status | |
434 | * qualifies. | |
1c79356b A |
435 | */ |
436 | boolean_t | |
437 | current_thread_aborted ( | |
438 | void) | |
439 | { | |
440 | thread_t th = current_thread(); | |
9bccf70c A |
441 | spl_t s; |
442 | ||
2d21ac55 | 443 | if ((th->sched_mode & TH_MODE_ISABORTED) == TH_MODE_ABORT && |
91447636 | 444 | (th->options & TH_OPT_INTMASK) != THREAD_UNINT) |
9bccf70c | 445 | return (TRUE); |
2d21ac55 | 446 | if (th->sched_mode & TH_MODE_ABORTSAFELY) { |
9bccf70c A |
447 | s = splsched(); |
448 | thread_lock(th); | |
2d21ac55 A |
449 | if (th->sched_mode & TH_MODE_ABORTSAFELY) |
450 | th->sched_mode &= ~TH_MODE_ISABORTED; | |
9bccf70c A |
451 | thread_unlock(th); |
452 | splx(s); | |
453 | } | |
454 | return FALSE; | |
1c79356b A |
455 | } |
456 | ||
457 | /* | |
458 | * | |
459 | */ | |
460 | void | |
461 | task_act_iterate_wth_args( | |
91447636 A |
462 | task_t task, |
463 | void (*func_callback)(thread_t, void *), | |
464 | void *func_arg) | |
1c79356b | 465 | { |
91447636 | 466 | thread_t inc; |
1c79356b A |
467 | |
468 | task_lock(task); | |
91447636 A |
469 | |
470 | for (inc = (thread_t)queue_first(&task->threads); | |
471 | !queue_end(&task->threads, (queue_entry_t)inc); ) { | |
472 | (void) (*func_callback)(inc, func_arg); | |
473 | inc = (thread_t)queue_next(&inc->task_threads); | |
474 | } | |
475 | ||
1c79356b A |
476 | task_unlock(task); |
477 | } | |
478 | ||
479 | void | |
480 | ipc_port_release( | |
481 | ipc_port_t port) | |
482 | { | |
483 | ipc_object_release(&(port)->ip_object); | |
484 | } | |
485 | ||
9bccf70c A |
486 | void |
487 | astbsd_on(void) | |
488 | { | |
489 | boolean_t reenable; | |
1c79356b | 490 | |
9bccf70c A |
491 | reenable = ml_set_interrupts_enabled(FALSE); |
492 | ast_on_fast(AST_BSD); | |
493 | (void)ml_set_interrupts_enabled(reenable); | |
494 | } | |
0c530ab8 A |
495 | |
496 | ||
497 | #include <sys/bsdtask_info.h> | |
498 | ||
499 | void | |
500 | fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo) | |
501 | { | |
502 | vm_map_t map; | |
503 | task_absolutetime_info_data_t tinfo; | |
504 | thread_t thread; | |
2d21ac55 | 505 | int cswitch = 0, numrunning = 0; |
0c530ab8 A |
506 | |
507 | map = (task == kernel_task)? kernel_map: task->map; | |
508 | ||
509 | ptinfo->pti_virtual_size = map->size; | |
2d21ac55 A |
510 | ptinfo->pti_resident_size = |
511 | (mach_vm_size_t)(pmap_resident_count(map->pmap)) | |
512 | * PAGE_SIZE_64; | |
0c530ab8 A |
513 | |
514 | task_lock(task); | |
515 | ||
516 | ptinfo->pti_policy = ((task != kernel_task)? | |
517 | POLICY_TIMESHARE: POLICY_RR); | |
518 | ||
519 | tinfo.threads_user = tinfo.threads_system = 0; | |
520 | tinfo.total_user = task->total_user_time; | |
521 | tinfo.total_system = task->total_system_time; | |
522 | ||
523 | queue_iterate(&task->threads, thread, thread_t, task_threads) { | |
524 | uint64_t tval; | |
525 | ||
526 | if ((thread->state & TH_RUN) == TH_RUN) | |
527 | numrunning++; | |
2d21ac55 | 528 | cswitch += thread->c_switch; |
0c530ab8 A |
529 | tval = timer_grab(&thread->user_timer); |
530 | tinfo.threads_user += tval; | |
531 | tinfo.total_user += tval; | |
532 | ||
533 | tval = timer_grab(&thread->system_timer); | |
534 | tinfo.threads_system += tval; | |
535 | tinfo.total_system += tval; | |
536 | } | |
537 | ||
538 | ptinfo->pti_total_system = tinfo.total_system; | |
539 | ptinfo->pti_total_user = tinfo.total_user; | |
540 | ptinfo->pti_threads_system = tinfo.threads_system; | |
541 | ptinfo->pti_threads_user = tinfo.threads_user; | |
542 | ||
543 | ptinfo->pti_faults = task->faults; | |
544 | ptinfo->pti_pageins = task->pageins; | |
545 | ptinfo->pti_cow_faults = task->cow_faults; | |
546 | ptinfo->pti_messages_sent = task->messages_sent; | |
547 | ptinfo->pti_messages_received = task->messages_received; | |
548 | ptinfo->pti_syscalls_mach = task->syscalls_mach; | |
549 | ptinfo->pti_syscalls_unix = task->syscalls_unix; | |
2d21ac55 | 550 | ptinfo->pti_csw = task->c_switch + cswitch; |
0c530ab8 A |
551 | ptinfo->pti_threadnum = task->thread_count; |
552 | ptinfo->pti_numrunning = numrunning; | |
553 | ptinfo->pti_priority = task->priority; | |
554 | ||
555 | task_unlock(task); | |
556 | } | |
557 | ||
558 | int | |
2d21ac55 | 559 | fill_taskthreadinfo(task_t task, uint64_t thaddr, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp) |
0c530ab8 A |
560 | { |
561 | thread_t thact; | |
2d21ac55 A |
562 | int err=0; |
563 | mach_msg_type_number_t count; | |
0c530ab8 A |
564 | thread_basic_info_data_t basic_info; |
565 | kern_return_t kret; | |
566 | ||
567 | task_lock(task); | |
568 | ||
569 | for (thact = (thread_t)queue_first(&task->threads); | |
570 | !queue_end(&task->threads, (queue_entry_t)thact); ) { | |
2d21ac55 | 571 | #if defined(__ppc__) || defined(__arm__) |
0c530ab8 A |
572 | if (thact->machine.cthread_self == thaddr) |
573 | #elif defined (__i386__) | |
574 | if (thact->machine.pcb->cthread_self == thaddr) | |
575 | #else | |
576 | #error architecture not supported | |
577 | #endif | |
578 | { | |
579 | ||
580 | count = THREAD_BASIC_INFO_COUNT; | |
2d21ac55 | 581 | if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) { |
0c530ab8 A |
582 | err = 1; |
583 | goto out; | |
584 | } | |
585 | #if 0 | |
586 | ptinfo->pth_user_time = timer_grab(&basic_info.user_time); | |
587 | ptinfo->pth_system_time = timer_grab(&basic_info.system_time); | |
588 | #else | |
589 | ptinfo->pth_user_time = ((basic_info.user_time.seconds * NSEC_PER_SEC) + (basic_info.user_time.microseconds * NSEC_PER_USEC)); | |
590 | ptinfo->pth_system_time = ((basic_info.system_time.seconds * NSEC_PER_SEC) + (basic_info.system_time.microseconds * NSEC_PER_USEC)); | |
591 | ||
592 | #endif | |
593 | ptinfo->pth_cpu_usage = basic_info.cpu_usage; | |
594 | ptinfo->pth_policy = basic_info.policy; | |
595 | ptinfo->pth_run_state = basic_info.run_state; | |
596 | ptinfo->pth_flags = basic_info.flags; | |
597 | ptinfo->pth_sleep_time = basic_info.sleep_time; | |
598 | ptinfo->pth_curpri = thact->sched_pri; | |
599 | ptinfo->pth_priority = thact->priority; | |
600 | ptinfo->pth_maxpriority = thact->max_priority; | |
601 | ||
2d21ac55 A |
602 | if ((vpp != NULL) && (thact->uthread != NULL)) |
603 | bsd_threadcdir(thact->uthread, vpp, vidp); | |
0c530ab8 A |
604 | err = 0; |
605 | goto out; | |
606 | } | |
607 | thact = (thread_t)queue_next(&thact->task_threads); | |
608 | } | |
609 | err = 1; | |
610 | ||
611 | out: | |
612 | task_unlock(task); | |
613 | return(err); | |
614 | } | |
615 | ||
616 | int | |
617 | fill_taskthreadlist(task_t task, void * buffer, int thcount) | |
618 | { | |
619 | int numthr=0; | |
620 | thread_t thact; | |
621 | uint64_t * uptr; | |
622 | uint64_t thaddr; | |
623 | ||
624 | uptr = (uint64_t *)buffer; | |
625 | ||
626 | task_lock(task); | |
627 | ||
628 | for (thact = (thread_t)queue_first(&task->threads); | |
629 | !queue_end(&task->threads, (queue_entry_t)thact); ) { | |
2d21ac55 | 630 | #if defined(__ppc__) || defined(__arm__) |
0c530ab8 A |
631 | thaddr = thact->machine.cthread_self; |
632 | #elif defined (__i386__) | |
633 | thaddr = thact->machine.pcb->cthread_self; | |
634 | #else | |
635 | #error architecture not supported | |
636 | #endif | |
637 | *uptr++ = thaddr; | |
638 | numthr++; | |
639 | if (numthr >= thcount) | |
640 | goto out; | |
641 | thact = (thread_t)queue_next(&thact->task_threads); | |
642 | } | |
643 | ||
644 | out: | |
645 | task_unlock(task); | |
646 | return(numthr * sizeof(uint64_t)); | |
647 | ||
648 | } | |
649 | ||
650 | int | |
651 | get_numthreads(task_t task) | |
652 | { | |
653 | return(task->thread_count); | |
654 | } | |
655 | ||
2d21ac55 A |
656 | void |
657 | syscall_exit_funnelcheck(void) | |
658 | { | |
659 | thread_t thread; | |
660 | ||
661 | thread = current_thread(); | |
662 | ||
663 | if (thread->funnel_lock) | |
664 | panic("syscall exit with funnel held\n"); | |
665 | } |