]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30
31 #include <kern/kern_types.h>
32 #include <kern/processor.h>
33 #include <kern/thread.h>
34 #include <kern/task.h>
35 #include <kern/spl.h>
36 #include <kern/lock.h>
37 #include <kern/ast.h>
38 #include <ipc/ipc_port.h>
39 #include <ipc/ipc_object.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_kern.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_protos.h> /* last */
44
45 #undef thread_should_halt
46 #undef ipc_port_release
47
48 /* BSD KERN COMPONENT INTERFACE */
49
50 task_t bsd_init_task = TASK_NULL;
51 char init_task_failure_data[1024];
52 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
53
54 thread_t get_firstthread(task_t);
55 int get_task_userstop(task_t);
56 int get_thread_userstop(thread_t);
57 boolean_t thread_should_abort(thread_t);
58 boolean_t current_thread_aborted(void);
59 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
60 void ipc_port_release(ipc_port_t);
61 kern_return_t get_signalact(task_t , thread_t *, int);
62 int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
63 void syscall_exit_funnelcheck(void);
64
65
66 /*
67 *
68 */
69 void *get_bsdtask_info(task_t t)
70 {
71 return(t->bsd_info);
72 }
73
74 /*
75 *
76 */
77 void *get_bsdthreadtask_info(thread_t th)
78 {
79 return(th->task != TASK_NULL ? th->task->bsd_info : NULL);
80 }
81
82 /*
83 *
84 */
85 void set_bsdtask_info(task_t t,void * v)
86 {
87 t->bsd_info=v;
88 }
89
90 /*
91 *
92 */
93 void *get_bsdthread_info(thread_t th)
94 {
95 return(th->uthread);
96 }
97
98 /*
99 * XXX
100 */
101 int get_thread_lock_count(thread_t th); /* forced forward */
102 int get_thread_lock_count(thread_t th)
103 {
104 return(th->mutex_count);
105 }
106
107 /*
108 * XXX: wait for BSD to fix signal code
109 * Until then, we cannot block here. We know the task
110 * can't go away, so we make sure it is still active after
111 * retrieving the first thread for extra safety.
112 */
113 thread_t get_firstthread(task_t task)
114 {
115 thread_t thread = (thread_t)queue_first(&task->threads);
116
117 if (queue_end(&task->threads, (queue_entry_t)thread))
118 thread = THREAD_NULL;
119
120 if (!task->active)
121 return (THREAD_NULL);
122
123 return (thread);
124 }
125
126 kern_return_t
127 get_signalact(
128 task_t task,
129 thread_t *result_out,
130 int setast)
131 {
132 kern_return_t result = KERN_SUCCESS;
133 thread_t inc, thread = THREAD_NULL;
134
135 task_lock(task);
136
137 if (!task->active) {
138 task_unlock(task);
139
140 return (KERN_FAILURE);
141 }
142
143 for (inc = (thread_t)queue_first(&task->threads);
144 !queue_end(&task->threads, (queue_entry_t)inc); ) {
145 thread_mtx_lock(inc);
146 if (inc->active &&
147 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
148 thread = inc;
149 break;
150 }
151 thread_mtx_unlock(inc);
152
153 inc = (thread_t)queue_next(&inc->task_threads);
154 }
155
156 if (result_out)
157 *result_out = thread;
158
159 if (thread) {
160 if (setast)
161 act_set_astbsd(thread);
162
163 thread_mtx_unlock(thread);
164 }
165 else
166 result = KERN_FAILURE;
167
168 task_unlock(task);
169
170 return (result);
171 }
172
173
174 kern_return_t
175 check_actforsig(
176 task_t task,
177 thread_t thread,
178 int setast)
179 {
180 kern_return_t result = KERN_FAILURE;
181 thread_t inc;
182
183 task_lock(task);
184
185 if (!task->active) {
186 task_unlock(task);
187
188 return (KERN_FAILURE);
189 }
190
191 for (inc = (thread_t)queue_first(&task->threads);
192 !queue_end(&task->threads, (queue_entry_t)inc); ) {
193 if (inc == thread) {
194 thread_mtx_lock(inc);
195
196 if (inc->active &&
197 (inc->sched_flags & TH_SFLAG_ABORTED_MASK) != TH_SFLAG_ABORT) {
198 result = KERN_SUCCESS;
199 break;
200 }
201
202 thread_mtx_unlock(inc);
203 break;
204 }
205
206 inc = (thread_t)queue_next(&inc->task_threads);
207 }
208
209 if (result == KERN_SUCCESS) {
210 if (setast)
211 act_set_astbsd(thread);
212
213 thread_mtx_unlock(thread);
214 }
215
216 task_unlock(task);
217
218 return (result);
219 }
220
221 /*
222 * This is only safe to call from a thread executing in
223 * in the task's context or if the task is locked Otherwise,
224 * the map could be switched for the task (and freed) before
225 * we to return it here.
226 */
227 vm_map_t get_task_map(task_t t)
228 {
229 return(t->map);
230 }
231
232 vm_map_t get_task_map_reference(task_t t)
233 {
234 vm_map_t m;
235
236 if (t == NULL)
237 return VM_MAP_NULL;
238
239 task_lock(t);
240 if (!t->active) {
241 task_unlock(t);
242 return VM_MAP_NULL;
243 }
244 m = t->map;
245 vm_map_reference_swap(m);
246 task_unlock(t);
247 return m;
248 }
249
250 /*
251 *
252 */
253 ipc_space_t get_task_ipcspace(task_t t)
254 {
255 return(t->itk_space);
256 }
257
258 int get_task_numactivethreads(task_t task)
259 {
260 thread_t inc;
261 int num_active_thr=0;
262 task_lock(task);
263
264 for (inc = (thread_t)queue_first(&task->threads);
265 !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)queue_next(&inc->task_threads))
266 {
267 if(inc->active)
268 num_active_thr++;
269 }
270 task_unlock(task);
271 return num_active_thr;
272 }
273
274 int get_task_numacts(task_t t)
275 {
276 return(t->thread_count);
277 }
278
279 /* does this machine need 64bit register set for signal handler */
280 int is_64signalregset(void)
281 {
282 task_t t = current_task();
283 if(t->taskFeatures[0] & tf64BitData)
284 return(1);
285 else
286 return(0);
287 }
288
289 /*
290 * Swap in a new map for the task/thread pair; the old map reference is
291 * returned.
292 */
293 vm_map_t
294 swap_task_map(task_t task, thread_t thread, vm_map_t map, boolean_t doswitch)
295 {
296 vm_map_t old_map;
297
298 if (task != thread->task)
299 panic("swap_task_map");
300
301 task_lock(task);
302 mp_disable_preemption();
303 old_map = task->map;
304 thread->map = task->map = map;
305 if (doswitch)
306 pmap_switch(map->pmap);
307 mp_enable_preemption();
308 task_unlock(task);
309
310 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
311 inval_copy_windows(thread);
312 #endif
313
314 return old_map;
315 }
316
317 /*
318 *
319 */
320 pmap_t get_task_pmap(task_t t)
321 {
322 return(t->map->pmap);
323 }
324
325 /*
326 *
327 */
328 uint64_t get_task_resident_size(task_t task)
329 {
330 vm_map_t map;
331
332 map = (task == kernel_task) ? kernel_map: task->map;
333 return((uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64);
334 }
335
336 /*
337 *
338 */
339 pmap_t get_map_pmap(vm_map_t map)
340 {
341 return(map->pmap);
342 }
343 /*
344 *
345 */
346 task_t get_threadtask(thread_t th)
347 {
348 return(th->task);
349 }
350
351 /*
352 *
353 */
354 vm_map_offset_t
355 get_map_min(
356 vm_map_t map)
357 {
358 return(vm_map_min(map));
359 }
360
361 /*
362 *
363 */
364 vm_map_offset_t
365 get_map_max(
366 vm_map_t map)
367 {
368 return(vm_map_max(map));
369 }
370 vm_map_size_t
371 get_vmmap_size(
372 vm_map_t map)
373 {
374 return(map->size);
375 }
376
377 int
378 get_vmsubmap_entries(
379 vm_map_t map,
380 vm_object_offset_t start,
381 vm_object_offset_t end)
382 {
383 int total_entries = 0;
384 vm_map_entry_t entry;
385
386 if (not_in_kdp)
387 vm_map_lock(map);
388 entry = vm_map_first_entry(map);
389 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
390 entry = entry->vme_next;
391 }
392
393 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
394 if(entry->is_sub_map) {
395 total_entries +=
396 get_vmsubmap_entries(entry->object.sub_map,
397 entry->offset,
398 entry->offset +
399 (entry->vme_end - entry->vme_start));
400 } else {
401 total_entries += 1;
402 }
403 entry = entry->vme_next;
404 }
405 if (not_in_kdp)
406 vm_map_unlock(map);
407 return(total_entries);
408 }
409
410 int
411 get_vmmap_entries(
412 vm_map_t map)
413 {
414 int total_entries = 0;
415 vm_map_entry_t entry;
416
417 if (not_in_kdp)
418 vm_map_lock(map);
419 entry = vm_map_first_entry(map);
420
421 while(entry != vm_map_to_entry(map)) {
422 if(entry->is_sub_map) {
423 total_entries +=
424 get_vmsubmap_entries(entry->object.sub_map,
425 entry->offset,
426 entry->offset +
427 (entry->vme_end - entry->vme_start));
428 } else {
429 total_entries += 1;
430 }
431 entry = entry->vme_next;
432 }
433 if (not_in_kdp)
434 vm_map_unlock(map);
435 return(total_entries);
436 }
437
438 /*
439 *
440 */
441 /*
442 *
443 */
444 int
445 get_task_userstop(
446 task_t task)
447 {
448 return(task->user_stop_count);
449 }
450
451 /*
452 *
453 */
454 int
455 get_thread_userstop(
456 thread_t th)
457 {
458 return(th->user_stop_count);
459 }
460
461 /*
462 *
463 */
464 boolean_t
465 thread_should_abort(
466 thread_t th)
467 {
468 return ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT);
469 }
470
471 /*
472 * This routine is like thread_should_abort() above. It checks to
473 * see if the current thread is aborted. But unlike above, it also
474 * checks to see if thread is safely aborted. If so, it returns
475 * that fact, and clears the condition (safe aborts only should
476 * have a single effect, and a poll of the abort status
477 * qualifies.
478 */
479 boolean_t
480 current_thread_aborted (
481 void)
482 {
483 thread_t th = current_thread();
484 spl_t s;
485
486 if ((th->sched_flags & TH_SFLAG_ABORTED_MASK) == TH_SFLAG_ABORT &&
487 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
488 return (TRUE);
489 if (th->sched_flags & TH_SFLAG_ABORTSAFELY) {
490 s = splsched();
491 thread_lock(th);
492 if (th->sched_flags & TH_SFLAG_ABORTSAFELY)
493 th->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
494 thread_unlock(th);
495 splx(s);
496 }
497 return FALSE;
498 }
499
500 /*
501 *
502 */
503 void
504 task_act_iterate_wth_args(
505 task_t task,
506 void (*func_callback)(thread_t, void *),
507 void *func_arg)
508 {
509 thread_t inc;
510
511 task_lock(task);
512
513 for (inc = (thread_t)queue_first(&task->threads);
514 !queue_end(&task->threads, (queue_entry_t)inc); ) {
515 (void) (*func_callback)(inc, func_arg);
516 inc = (thread_t)queue_next(&inc->task_threads);
517 }
518
519 task_unlock(task);
520 }
521
522 void
523 ipc_port_release(
524 ipc_port_t port)
525 {
526 ipc_object_release(&(port)->ip_object);
527 }
528
529 void
530 astbsd_on(void)
531 {
532 boolean_t reenable;
533
534 reenable = ml_set_interrupts_enabled(FALSE);
535 ast_on_fast(AST_BSD);
536 (void)ml_set_interrupts_enabled(reenable);
537 }
538
539
540 #include <sys/bsdtask_info.h>
541
542 void
543 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
544 {
545 vm_map_t map;
546 task_absolutetime_info_data_t tinfo;
547 thread_t thread;
548 uint32_t cswitch = 0, numrunning = 0;
549 uint32_t syscalls_unix = 0;
550 uint32_t syscalls_mach = 0;
551
552 map = (task == kernel_task)? kernel_map: task->map;
553
554 ptinfo->pti_virtual_size = map->size;
555 ptinfo->pti_resident_size =
556 (mach_vm_size_t)(pmap_resident_count(map->pmap))
557 * PAGE_SIZE_64;
558
559 task_lock(task);
560
561 ptinfo->pti_policy = ((task != kernel_task)?
562 POLICY_TIMESHARE: POLICY_RR);
563
564 tinfo.threads_user = tinfo.threads_system = 0;
565 tinfo.total_user = task->total_user_time;
566 tinfo.total_system = task->total_system_time;
567
568 queue_iterate(&task->threads, thread, thread_t, task_threads) {
569 uint64_t tval;
570
571 if ((thread->state & TH_RUN) == TH_RUN)
572 numrunning++;
573 cswitch += thread->c_switch;
574 tval = timer_grab(&thread->user_timer);
575 tinfo.threads_user += tval;
576 tinfo.total_user += tval;
577
578 tval = timer_grab(&thread->system_timer);
579 tinfo.threads_system += tval;
580 tinfo.total_system += tval;
581
582 syscalls_unix += thread->syscalls_unix;
583 syscalls_mach += thread->syscalls_mach;
584 }
585
586 ptinfo->pti_total_system = tinfo.total_system;
587 ptinfo->pti_total_user = tinfo.total_user;
588 ptinfo->pti_threads_system = tinfo.threads_system;
589 ptinfo->pti_threads_user = tinfo.threads_user;
590
591 ptinfo->pti_faults = task->faults;
592 ptinfo->pti_pageins = task->pageins;
593 ptinfo->pti_cow_faults = task->cow_faults;
594 ptinfo->pti_messages_sent = task->messages_sent;
595 ptinfo->pti_messages_received = task->messages_received;
596 ptinfo->pti_syscalls_mach = task->syscalls_mach + syscalls_mach;
597 ptinfo->pti_syscalls_unix = task->syscalls_unix + syscalls_unix;
598 ptinfo->pti_csw = task->c_switch + cswitch;
599 ptinfo->pti_threadnum = task->thread_count;
600 ptinfo->pti_numrunning = numrunning;
601 ptinfo->pti_priority = task->priority;
602
603 task_unlock(task);
604 }
605
606 int
607 fill_taskthreadinfo(task_t task, uint64_t thaddr, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
608 {
609 thread_t thact;
610 int err=0;
611 mach_msg_type_number_t count;
612 thread_basic_info_data_t basic_info;
613 kern_return_t kret;
614
615 task_lock(task);
616
617 for (thact = (thread_t)queue_first(&task->threads);
618 !queue_end(&task->threads, (queue_entry_t)thact); ) {
619 if (thact->machine.cthread_self == thaddr)
620 {
621
622 count = THREAD_BASIC_INFO_COUNT;
623 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
624 err = 1;
625 goto out;
626 }
627 #if 0
628 ptinfo->pth_user_time = timer_grab(&basic_info.user_time);
629 ptinfo->pth_system_time = timer_grab(&basic_info.system_time);
630 #else
631 ptinfo->pth_user_time = ((basic_info.user_time.seconds * NSEC_PER_SEC) + (basic_info.user_time.microseconds * NSEC_PER_USEC));
632 ptinfo->pth_system_time = ((basic_info.system_time.seconds * NSEC_PER_SEC) + (basic_info.system_time.microseconds * NSEC_PER_USEC));
633
634 #endif
635 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
636 ptinfo->pth_policy = basic_info.policy;
637 ptinfo->pth_run_state = basic_info.run_state;
638 ptinfo->pth_flags = basic_info.flags;
639 ptinfo->pth_sleep_time = basic_info.sleep_time;
640 ptinfo->pth_curpri = thact->sched_pri;
641 ptinfo->pth_priority = thact->priority;
642 ptinfo->pth_maxpriority = thact->max_priority;
643
644 if ((vpp != NULL) && (thact->uthread != NULL))
645 bsd_threadcdir(thact->uthread, vpp, vidp);
646 bsd_getthreadname(thact->uthread,ptinfo->pth_name);
647 err = 0;
648 goto out;
649 }
650 thact = (thread_t)queue_next(&thact->task_threads);
651 }
652 err = 1;
653
654 out:
655 task_unlock(task);
656 return(err);
657 }
658
659 int
660 fill_taskthreadlist(task_t task, void * buffer, int thcount)
661 {
662 int numthr=0;
663 thread_t thact;
664 uint64_t * uptr;
665 uint64_t thaddr;
666
667 uptr = (uint64_t *)buffer;
668
669 task_lock(task);
670
671 for (thact = (thread_t)queue_first(&task->threads);
672 !queue_end(&task->threads, (queue_entry_t)thact); ) {
673 thaddr = thact->machine.cthread_self;
674 *uptr++ = thaddr;
675 numthr++;
676 if (numthr >= thcount)
677 goto out;
678 thact = (thread_t)queue_next(&thact->task_threads);
679 }
680
681 out:
682 task_unlock(task);
683 return (int)(numthr * sizeof(uint64_t));
684
685 }
686
687 int
688 get_numthreads(task_t task)
689 {
690 return(task->thread_count);
691 }
692
693 void
694 syscall_exit_funnelcheck(void)
695 {
696 thread_t thread;
697
698 thread = current_thread();
699
700 if (thread->funnel_lock)
701 panic("syscall exit with funnel held\n");
702 }