]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <mach/mach_types.h>
23
24 #include <kern/kern_types.h>
25 #include <kern/processor.h>
26 #include <kern/thread.h>
27 #include <kern/task.h>
28 #include <kern/spl.h>
29 #include <kern/lock.h>
30 #include <kern/ast.h>
31 #include <ipc/ipc_port.h>
32 #include <ipc/ipc_object.h>
33 #include <vm/vm_map.h>
34 #include <vm/vm_kern.h>
35 #include <vm/pmap.h>
36 #include <vm/vm_protos.h> /* last */
37
38 #undef thread_should_halt
39 #undef ipc_port_release
40
41 /* BSD KERN COMPONENT INTERFACE */
42
43 task_t bsd_init_task = TASK_NULL;
44 char init_task_failure_data[1024];
45 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
46
47 thread_t get_firstthread(task_t);
48 int get_task_userstop(task_t);
49 int get_thread_userstop(thread_t);
50 boolean_t thread_should_abort(thread_t);
51 boolean_t current_thread_aborted(void);
52 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
53 void ipc_port_release(ipc_port_t);
54 boolean_t is_thread_active(thread_t);
55 kern_return_t get_signalact(task_t , thread_t *, int);
56 int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
57
58 /*
59 *
60 */
61 void *get_bsdtask_info(task_t t)
62 {
63 return(t->bsd_info);
64 }
65
66 /*
67 *
68 */
69 void set_bsdtask_info(task_t t,void * v)
70 {
71 t->bsd_info=v;
72 }
73
74 /*
75 *
76 */
77 void *get_bsdthread_info(thread_t th)
78 {
79 return(th->uthread);
80 }
81
82 /*
83 * XXX: wait for BSD to fix signal code
84 * Until then, we cannot block here. We know the task
85 * can't go away, so we make sure it is still active after
86 * retrieving the first thread for extra safety.
87 */
88 thread_t get_firstthread(task_t task)
89 {
90 thread_t thread = (thread_t)queue_first(&task->threads);
91
92 if (queue_end(&task->threads, (queue_entry_t)thread))
93 thread = THREAD_NULL;
94
95 if (!task->active)
96 return (THREAD_NULL);
97
98 return (thread);
99 }
100
101 kern_return_t
102 get_signalact(
103 task_t task,
104 thread_t *result_out,
105 int setast)
106 {
107 kern_return_t result = KERN_SUCCESS;
108 thread_t inc, thread = THREAD_NULL;
109
110 task_lock(task);
111
112 if (!task->active) {
113 task_unlock(task);
114
115 return (KERN_FAILURE);
116 }
117
118 for (inc = (thread_t)queue_first(&task->threads);
119 !queue_end(&task->threads, (queue_entry_t)inc); ) {
120 thread_mtx_lock(inc);
121 if (inc->active &&
122 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
123 thread = inc;
124 break;
125 }
126 thread_mtx_unlock(inc);
127
128 inc = (thread_t)queue_next(&inc->task_threads);
129 }
130
131 if (result_out)
132 *result_out = thread;
133
134 if (thread) {
135 if (setast)
136 act_set_astbsd(thread);
137
138 thread_mtx_unlock(thread);
139 }
140 else
141 result = KERN_FAILURE;
142
143 task_unlock(task);
144
145 return (result);
146 }
147
148
149 kern_return_t
150 check_actforsig(
151 task_t task,
152 thread_t thread,
153 int setast)
154 {
155 kern_return_t result = KERN_FAILURE;
156 thread_t inc;
157
158 task_lock(task);
159
160 if (!task->active) {
161 task_unlock(task);
162
163 return (KERN_FAILURE);
164 }
165
166 for (inc = (thread_t)queue_first(&task->threads);
167 !queue_end(&task->threads, (queue_entry_t)inc); ) {
168 if (inc == thread) {
169 thread_mtx_lock(inc);
170
171 if (inc->active &&
172 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
173 result = KERN_SUCCESS;
174 break;
175 }
176
177 thread_mtx_unlock(inc);
178 break;
179 }
180
181 inc = (thread_t)queue_next(&inc->task_threads);
182 }
183
184 if (result == KERN_SUCCESS) {
185 if (setast)
186 act_set_astbsd(thread);
187
188 thread_mtx_unlock(thread);
189 }
190
191 task_unlock(task);
192
193 return (result);
194 }
195
196 /*
197 * This is only safe to call from a thread executing in
198 * in the task's context or if the task is locked Otherwise,
199 * the map could be switched for the task (and freed) before
200 * we to return it here.
201 */
202 vm_map_t get_task_map(task_t t)
203 {
204 return(t->map);
205 }
206
207 vm_map_t get_task_map_reference(task_t t)
208 {
209 vm_map_t m;
210
211 if (t == NULL)
212 return VM_MAP_NULL;
213
214 task_lock(t);
215 if (!t->active) {
216 task_unlock(t);
217 return VM_MAP_NULL;
218 }
219 m = t->map;
220 vm_map_reference_swap(m);
221 task_unlock(t);
222 return m;
223 }
224
225 /*
226 *
227 */
228 ipc_space_t get_task_ipcspace(task_t t)
229 {
230 return(t->itk_space);
231 }
232
233 int get_task_numacts(task_t t)
234 {
235 return(t->thread_count);
236 }
237
238 /* does this machine need 64bit register set for signal handler */
239 int is_64signalregset(void)
240 {
241 task_t t = current_task();
242 if(t->taskFeatures[0] & tf64BitData)
243 return(1);
244 else
245 return(0);
246 }
247
248 /*
249 * The old map reference is returned.
250 */
251 vm_map_t
252 swap_task_map(task_t task,vm_map_t map)
253 {
254 thread_t thread = current_thread();
255 vm_map_t old_map;
256
257 if (task != thread->task)
258 panic("swap_task_map");
259
260 task_lock(task);
261 old_map = task->map;
262 thread->map = task->map = map;
263 task_unlock(task);
264
265 inval_copy_windows(thread);
266
267 return old_map;
268 }
269
270 /*
271 *
272 */
273 pmap_t get_task_pmap(task_t t)
274 {
275 return(t->map->pmap);
276 }
277
278 /*
279 *
280 */
281 pmap_t get_map_pmap(vm_map_t map)
282 {
283 return(map->pmap);
284 }
285 /*
286 *
287 */
288 task_t get_threadtask(thread_t th)
289 {
290 return(th->task);
291 }
292
293
294 /*
295 *
296 */
297 boolean_t is_thread_idle(thread_t th)
298 {
299 return((th->state & TH_IDLE) == TH_IDLE);
300 }
301
302 /*
303 *
304 */
305 boolean_t is_thread_running(thread_t th)
306 {
307 return((th->state & TH_RUN) == TH_RUN);
308 }
309
310 /*
311 *
312 */
313 thread_t
314 getshuttle_thread(
315 thread_t th)
316 {
317 return(th);
318 }
319
320 /*
321 *
322 */
323 thread_t
324 getact_thread(
325 thread_t th)
326 {
327 return(th);
328 }
329
330 /*
331 *
332 */
333 vm_map_offset_t
334 get_map_min(
335 vm_map_t map)
336 {
337 return(vm_map_min(map));
338 }
339
340 /*
341 *
342 */
343 vm_map_offset_t
344 get_map_max(
345 vm_map_t map)
346 {
347 return(vm_map_max(map));
348 }
349 vm_map_size_t
350 get_vmmap_size(
351 vm_map_t map)
352 {
353 return(map->size);
354 }
355
356 int
357 get_vmsubmap_entries(
358 vm_map_t map,
359 vm_object_offset_t start,
360 vm_object_offset_t end)
361 {
362 int total_entries = 0;
363 vm_map_entry_t entry;
364
365 if (not_in_kdp)
366 vm_map_lock(map);
367 entry = vm_map_first_entry(map);
368 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
369 entry = entry->vme_next;
370 }
371
372 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
373 if(entry->is_sub_map) {
374 total_entries +=
375 get_vmsubmap_entries(entry->object.sub_map,
376 entry->offset,
377 entry->offset +
378 (entry->vme_end - entry->vme_start));
379 } else {
380 total_entries += 1;
381 }
382 entry = entry->vme_next;
383 }
384 if (not_in_kdp)
385 vm_map_unlock(map);
386 return(total_entries);
387 }
388
389 int
390 get_vmmap_entries(
391 vm_map_t map)
392 {
393 int total_entries = 0;
394 vm_map_entry_t entry;
395
396 if (not_in_kdp)
397 vm_map_lock(map);
398 entry = vm_map_first_entry(map);
399
400 while(entry != vm_map_to_entry(map)) {
401 if(entry->is_sub_map) {
402 total_entries +=
403 get_vmsubmap_entries(entry->object.sub_map,
404 entry->offset,
405 entry->offset +
406 (entry->vme_end - entry->vme_start));
407 } else {
408 total_entries += 1;
409 }
410 entry = entry->vme_next;
411 }
412 if (not_in_kdp)
413 vm_map_unlock(map);
414 return(total_entries);
415 }
416
417 /*
418 *
419 */
420 /*
421 *
422 */
423 int
424 get_task_userstop(
425 task_t task)
426 {
427 return(task->user_stop_count);
428 }
429
430 /*
431 *
432 */
433 int
434 get_thread_userstop(
435 thread_t th)
436 {
437 return(th->user_stop_count);
438 }
439
440 /*
441 *
442 */
443 boolean_t
444 thread_should_abort(
445 thread_t th)
446 {
447 return ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
448 }
449
450 /*
451 * This routine is like thread_should_abort() above. It checks to
452 * see if the current thread is aborted. But unlike above, it also
453 * checks to see if thread is safely aborted. If so, it returns
454 * that fact, and clears the condition (safe aborts only should
455 * have a single effect, and a poll of the abort status
456 * qualifies.
457 */
458 boolean_t
459 current_thread_aborted (
460 void)
461 {
462 thread_t th = current_thread();
463 spl_t s;
464
465 if ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
466 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
467 return (TRUE);
468 if (th->state & TH_ABORT_SAFELY) {
469 s = splsched();
470 thread_lock(th);
471 if (th->state & TH_ABORT_SAFELY)
472 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
473 thread_unlock(th);
474 splx(s);
475 }
476 return FALSE;
477 }
478
479 /*
480 *
481 */
482 void
483 task_act_iterate_wth_args(
484 task_t task,
485 void (*func_callback)(thread_t, void *),
486 void *func_arg)
487 {
488 thread_t inc;
489
490 task_lock(task);
491
492 for (inc = (thread_t)queue_first(&task->threads);
493 !queue_end(&task->threads, (queue_entry_t)inc); ) {
494 (void) (*func_callback)(inc, func_arg);
495 inc = (thread_t)queue_next(&inc->task_threads);
496 }
497
498 task_unlock(task);
499 }
500
501 void
502 ipc_port_release(
503 ipc_port_t port)
504 {
505 ipc_object_release(&(port)->ip_object);
506 }
507
508 boolean_t
509 is_thread_active(
510 thread_t th)
511 {
512 return(th->active);
513 }
514
515 void
516 astbsd_on(void)
517 {
518 boolean_t reenable;
519
520 reenable = ml_set_interrupts_enabled(FALSE);
521 ast_on_fast(AST_BSD);
522 (void)ml_set_interrupts_enabled(reenable);
523 }
524
525
526 #include <sys/bsdtask_info.h>
527
528 void
529 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
530 {
531 vm_map_t map;
532 task_absolutetime_info_data_t tinfo;
533 thread_t thread;
534 int numrunning = 0;
535
536 map = (task == kernel_task)? kernel_map: task->map;
537
538 ptinfo->pti_virtual_size = map->size;
539 ptinfo->pti_resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap)
540 * PAGE_SIZE);
541
542 task_lock(task);
543
544 ptinfo->pti_policy = ((task != kernel_task)?
545 POLICY_TIMESHARE: POLICY_RR);
546
547 tinfo.threads_user = tinfo.threads_system = 0;
548 tinfo.total_user = task->total_user_time;
549 tinfo.total_system = task->total_system_time;
550
551 queue_iterate(&task->threads, thread, thread_t, task_threads) {
552 uint64_t tval;
553
554 if ((thread->state & TH_RUN) == TH_RUN)
555 numrunning++;
556 tval = timer_grab(&thread->user_timer);
557 tinfo.threads_user += tval;
558 tinfo.total_user += tval;
559
560 tval = timer_grab(&thread->system_timer);
561 tinfo.threads_system += tval;
562 tinfo.total_system += tval;
563 }
564
565 ptinfo->pti_total_system = tinfo.total_system;
566 ptinfo->pti_total_user = tinfo.total_user;
567 ptinfo->pti_threads_system = tinfo.threads_system;
568 ptinfo->pti_threads_user = tinfo.threads_user;
569
570 ptinfo->pti_faults = task->faults;
571 ptinfo->pti_pageins = task->pageins;
572 ptinfo->pti_cow_faults = task->cow_faults;
573 ptinfo->pti_messages_sent = task->messages_sent;
574 ptinfo->pti_messages_received = task->messages_received;
575 ptinfo->pti_syscalls_mach = task->syscalls_mach;
576 ptinfo->pti_syscalls_unix = task->syscalls_unix;
577 ptinfo->pti_csw = task->csw;
578 ptinfo->pti_threadnum = task->thread_count;
579 ptinfo->pti_numrunning = numrunning;
580 ptinfo->pti_priority = task->priority;
581
582 task_unlock(task);
583 }
584
585 int
586 fill_taskthreadinfo(task_t task, uint64_t thaddr, struct proc_threadinfo_internal * ptinfo)
587 {
588 thread_t thact;
589 int err=0, count;
590 thread_basic_info_data_t basic_info;
591 kern_return_t kret;
592
593 task_lock(task);
594
595 for (thact = (thread_t)queue_first(&task->threads);
596 !queue_end(&task->threads, (queue_entry_t)thact); ) {
597 #if defined(__ppc__)
598 if (thact->machine.cthread_self == thaddr)
599 #elif defined (__i386__)
600 if (thact->machine.pcb->cthread_self == thaddr)
601 #else
602 #error architecture not supported
603 #endif
604 {
605
606 count = THREAD_BASIC_INFO_COUNT;
607 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, &basic_info, &count)) != KERN_SUCCESS) {
608 err = 1;
609 goto out;
610 }
611 #if 0
612 ptinfo->pth_user_time = timer_grab(&basic_info.user_time);
613 ptinfo->pth_system_time = timer_grab(&basic_info.system_time);
614 #else
615 ptinfo->pth_user_time = ((basic_info.user_time.seconds * NSEC_PER_SEC) + (basic_info.user_time.microseconds * NSEC_PER_USEC));
616 ptinfo->pth_system_time = ((basic_info.system_time.seconds * NSEC_PER_SEC) + (basic_info.system_time.microseconds * NSEC_PER_USEC));
617
618 #endif
619 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
620 ptinfo->pth_policy = basic_info.policy;
621 ptinfo->pth_run_state = basic_info.run_state;
622 ptinfo->pth_flags = basic_info.flags;
623 ptinfo->pth_sleep_time = basic_info.sleep_time;
624 ptinfo->pth_curpri = thact->sched_pri;
625 ptinfo->pth_priority = thact->priority;
626 ptinfo->pth_maxpriority = thact->max_priority;
627
628 err = 0;
629 goto out;
630 }
631 thact = (thread_t)queue_next(&thact->task_threads);
632 }
633 err = 1;
634
635 out:
636 task_unlock(task);
637 return(err);
638 }
639
640 int
641 fill_taskthreadlist(task_t task, void * buffer, int thcount)
642 {
643 int numthr=0;
644 thread_t thact;
645 uint64_t * uptr;
646 uint64_t thaddr;
647
648 uptr = (uint64_t *)buffer;
649
650 task_lock(task);
651
652 for (thact = (thread_t)queue_first(&task->threads);
653 !queue_end(&task->threads, (queue_entry_t)thact); ) {
654 #if defined(__ppc__)
655 thaddr = thact->machine.cthread_self;
656 #elif defined (__i386__)
657 thaddr = thact->machine.pcb->cthread_self;
658 #else
659 #error architecture not supported
660 #endif
661 *uptr++ = thaddr;
662 numthr++;
663 if (numthr >= thcount)
664 goto out;
665 thact = (thread_t)queue_next(&thact->task_threads);
666 }
667
668 out:
669 task_unlock(task);
670 return(numthr * sizeof(uint64_t));
671
672 }
673
674 int
675 get_numthreads(task_t task)
676 {
677 return(task->thread_count);
678 }
679