]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2007 Apple, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29 #include <mach/machine/vm_param.h>
30
31 #include <kern/kern_types.h>
32 #include <kern/processor.h>
33 #include <kern/thread.h>
34 #include <kern/task.h>
35 #include <kern/spl.h>
36 #include <kern/lock.h>
37 #include <kern/ast.h>
38 #include <ipc/ipc_port.h>
39 #include <ipc/ipc_object.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_kern.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_protos.h> /* last */
44
45 #undef thread_should_halt
46 #undef ipc_port_release
47
48 /* BSD KERN COMPONENT INTERFACE */
49
50 task_t bsd_init_task = TASK_NULL;
51 char init_task_failure_data[1024];
52 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
53
54 thread_t get_firstthread(task_t);
55 int get_task_userstop(task_t);
56 int get_thread_userstop(thread_t);
57 boolean_t thread_should_abort(thread_t);
58 boolean_t current_thread_aborted(void);
59 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
60 void ipc_port_release(ipc_port_t);
61 kern_return_t get_signalact(task_t , thread_t *, int);
62 int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
63 void syscall_exit_funnelcheck(void);
64
65
66 /*
67 *
68 */
69 void *get_bsdtask_info(task_t t)
70 {
71 return(t->bsd_info);
72 }
73
74 /*
75 *
76 */
77 void *get_bsdthreadtask_info(thread_t th)
78 {
79 return(th->task != TASK_NULL ? th->task->bsd_info : NULL);
80 }
81
82 /*
83 *
84 */
85 void set_bsdtask_info(task_t t,void * v)
86 {
87 t->bsd_info=v;
88 }
89
90 /*
91 *
92 */
93 void *get_bsdthread_info(thread_t th)
94 {
95 return(th->uthread);
96 }
97
98 /*
99 * XXX: wait for BSD to fix signal code
100 * Until then, we cannot block here. We know the task
101 * can't go away, so we make sure it is still active after
102 * retrieving the first thread for extra safety.
103 */
104 thread_t get_firstthread(task_t task)
105 {
106 thread_t thread = (thread_t)queue_first(&task->threads);
107
108 if (queue_end(&task->threads, (queue_entry_t)thread))
109 thread = THREAD_NULL;
110
111 if (!task->active)
112 return (THREAD_NULL);
113
114 return (thread);
115 }
116
117 kern_return_t
118 get_signalact(
119 task_t task,
120 thread_t *result_out,
121 int setast)
122 {
123 kern_return_t result = KERN_SUCCESS;
124 thread_t inc, thread = THREAD_NULL;
125
126 task_lock(task);
127
128 if (!task->active) {
129 task_unlock(task);
130
131 return (KERN_FAILURE);
132 }
133
134 for (inc = (thread_t)queue_first(&task->threads);
135 !queue_end(&task->threads, (queue_entry_t)inc); ) {
136 thread_mtx_lock(inc);
137 if (inc->active &&
138 (inc->sched_mode & TH_MODE_ISABORTED) != TH_MODE_ABORT) {
139 thread = inc;
140 break;
141 }
142 thread_mtx_unlock(inc);
143
144 inc = (thread_t)queue_next(&inc->task_threads);
145 }
146
147 if (result_out)
148 *result_out = thread;
149
150 if (thread) {
151 if (setast)
152 act_set_astbsd(thread);
153
154 thread_mtx_unlock(thread);
155 }
156 else
157 result = KERN_FAILURE;
158
159 task_unlock(task);
160
161 return (result);
162 }
163
164
165 kern_return_t
166 check_actforsig(
167 task_t task,
168 thread_t thread,
169 int setast)
170 {
171 kern_return_t result = KERN_FAILURE;
172 thread_t inc;
173
174 task_lock(task);
175
176 if (!task->active) {
177 task_unlock(task);
178
179 return (KERN_FAILURE);
180 }
181
182 for (inc = (thread_t)queue_first(&task->threads);
183 !queue_end(&task->threads, (queue_entry_t)inc); ) {
184 if (inc == thread) {
185 thread_mtx_lock(inc);
186
187 if (inc->active &&
188 (inc->sched_mode & TH_MODE_ISABORTED) != TH_MODE_ABORT) {
189 result = KERN_SUCCESS;
190 break;
191 }
192
193 thread_mtx_unlock(inc);
194 break;
195 }
196
197 inc = (thread_t)queue_next(&inc->task_threads);
198 }
199
200 if (result == KERN_SUCCESS) {
201 if (setast)
202 act_set_astbsd(thread);
203
204 thread_mtx_unlock(thread);
205 }
206
207 task_unlock(task);
208
209 return (result);
210 }
211
212 /*
213 * This is only safe to call from a thread executing in
214 * in the task's context or if the task is locked Otherwise,
215 * the map could be switched for the task (and freed) before
216 * we to return it here.
217 */
218 vm_map_t get_task_map(task_t t)
219 {
220 return(t->map);
221 }
222
223 vm_map_t get_task_map_reference(task_t t)
224 {
225 vm_map_t m;
226
227 if (t == NULL)
228 return VM_MAP_NULL;
229
230 task_lock(t);
231 if (!t->active) {
232 task_unlock(t);
233 return VM_MAP_NULL;
234 }
235 m = t->map;
236 vm_map_reference_swap(m);
237 task_unlock(t);
238 return m;
239 }
240
241 /*
242 *
243 */
244 ipc_space_t get_task_ipcspace(task_t t)
245 {
246 return(t->itk_space);
247 }
248
249 int get_task_numactivethreads(task_t task)
250 {
251 thread_t inc;
252 int num_active_thr=0;
253 task_lock(task);
254
255 for (inc = (thread_t)queue_first(&task->threads);
256 !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)queue_next(&inc->task_threads))
257 {
258 if(inc->active)
259 num_active_thr++;
260 }
261 task_unlock(task);
262 return num_active_thr;
263 }
264
265 int get_task_numacts(task_t t)
266 {
267 return(t->thread_count);
268 }
269
270 /* does this machine need 64bit register set for signal handler */
271 int is_64signalregset(void)
272 {
273 task_t t = current_task();
274 if(t->taskFeatures[0] & tf64BitData)
275 return(1);
276 else
277 return(0);
278 }
279
280 /*
281 * Swap in a new map for the task/thread pair; the old map reference is
282 * returned.
283 */
284 vm_map_t
285 swap_task_map(task_t task, thread_t thread, vm_map_t map)
286 {
287 vm_map_t old_map;
288
289 if (task != thread->task)
290 panic("swap_task_map");
291
292 task_lock(task);
293 old_map = task->map;
294 thread->map = task->map = map;
295 task_unlock(task);
296
297 #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
298 inval_copy_windows(thread);
299 #endif
300
301 return old_map;
302 }
303
304 /*
305 *
306 */
307 pmap_t get_task_pmap(task_t t)
308 {
309 return(t->map->pmap);
310 }
311
312 /*
313 *
314 */
315 uint64_t get_task_resident_size(task_t task)
316 {
317 vm_map_t map;
318
319 map = (task == kernel_task) ? kernel_map: task->map;
320 return((uint64_t)pmap_resident_count(map->pmap) * PAGE_SIZE_64);
321 }
322
323 /*
324 *
325 */
326 pmap_t get_map_pmap(vm_map_t map)
327 {
328 return(map->pmap);
329 }
330 /*
331 *
332 */
333 task_t get_threadtask(thread_t th)
334 {
335 return(th->task);
336 }
337
338 /*
339 *
340 */
341 vm_map_offset_t
342 get_map_min(
343 vm_map_t map)
344 {
345 return(vm_map_min(map));
346 }
347
348 /*
349 *
350 */
351 vm_map_offset_t
352 get_map_max(
353 vm_map_t map)
354 {
355 return(vm_map_max(map));
356 }
357 vm_map_size_t
358 get_vmmap_size(
359 vm_map_t map)
360 {
361 return(map->size);
362 }
363
364 int
365 get_vmsubmap_entries(
366 vm_map_t map,
367 vm_object_offset_t start,
368 vm_object_offset_t end)
369 {
370 int total_entries = 0;
371 vm_map_entry_t entry;
372
373 if (not_in_kdp)
374 vm_map_lock(map);
375 entry = vm_map_first_entry(map);
376 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
377 entry = entry->vme_next;
378 }
379
380 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
381 if(entry->is_sub_map) {
382 total_entries +=
383 get_vmsubmap_entries(entry->object.sub_map,
384 entry->offset,
385 entry->offset +
386 (entry->vme_end - entry->vme_start));
387 } else {
388 total_entries += 1;
389 }
390 entry = entry->vme_next;
391 }
392 if (not_in_kdp)
393 vm_map_unlock(map);
394 return(total_entries);
395 }
396
397 int
398 get_vmmap_entries(
399 vm_map_t map)
400 {
401 int total_entries = 0;
402 vm_map_entry_t entry;
403
404 if (not_in_kdp)
405 vm_map_lock(map);
406 entry = vm_map_first_entry(map);
407
408 while(entry != vm_map_to_entry(map)) {
409 if(entry->is_sub_map) {
410 total_entries +=
411 get_vmsubmap_entries(entry->object.sub_map,
412 entry->offset,
413 entry->offset +
414 (entry->vme_end - entry->vme_start));
415 } else {
416 total_entries += 1;
417 }
418 entry = entry->vme_next;
419 }
420 if (not_in_kdp)
421 vm_map_unlock(map);
422 return(total_entries);
423 }
424
425 /*
426 *
427 */
428 /*
429 *
430 */
431 int
432 get_task_userstop(
433 task_t task)
434 {
435 return(task->user_stop_count);
436 }
437
438 /*
439 *
440 */
441 int
442 get_thread_userstop(
443 thread_t th)
444 {
445 return(th->user_stop_count);
446 }
447
448 /*
449 *
450 */
451 boolean_t
452 thread_should_abort(
453 thread_t th)
454 {
455 return ((th->sched_mode & TH_MODE_ISABORTED) == TH_MODE_ABORT);
456 }
457
458 /*
459 * This routine is like thread_should_abort() above. It checks to
460 * see if the current thread is aborted. But unlike above, it also
461 * checks to see if thread is safely aborted. If so, it returns
462 * that fact, and clears the condition (safe aborts only should
463 * have a single effect, and a poll of the abort status
464 * qualifies.
465 */
466 boolean_t
467 current_thread_aborted (
468 void)
469 {
470 thread_t th = current_thread();
471 spl_t s;
472
473 if ((th->sched_mode & TH_MODE_ISABORTED) == TH_MODE_ABORT &&
474 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
475 return (TRUE);
476 if (th->sched_mode & TH_MODE_ABORTSAFELY) {
477 s = splsched();
478 thread_lock(th);
479 if (th->sched_mode & TH_MODE_ABORTSAFELY)
480 th->sched_mode &= ~TH_MODE_ISABORTED;
481 thread_unlock(th);
482 splx(s);
483 }
484 return FALSE;
485 }
486
487 /*
488 *
489 */
490 void
491 task_act_iterate_wth_args(
492 task_t task,
493 void (*func_callback)(thread_t, void *),
494 void *func_arg)
495 {
496 thread_t inc;
497
498 task_lock(task);
499
500 for (inc = (thread_t)queue_first(&task->threads);
501 !queue_end(&task->threads, (queue_entry_t)inc); ) {
502 (void) (*func_callback)(inc, func_arg);
503 inc = (thread_t)queue_next(&inc->task_threads);
504 }
505
506 task_unlock(task);
507 }
508
509 void
510 ipc_port_release(
511 ipc_port_t port)
512 {
513 ipc_object_release(&(port)->ip_object);
514 }
515
516 void
517 astbsd_on(void)
518 {
519 boolean_t reenable;
520
521 reenable = ml_set_interrupts_enabled(FALSE);
522 ast_on_fast(AST_BSD);
523 (void)ml_set_interrupts_enabled(reenable);
524 }
525
526
527 #include <sys/bsdtask_info.h>
528
529 void
530 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
531 {
532 vm_map_t map;
533 task_absolutetime_info_data_t tinfo;
534 thread_t thread;
535 int cswitch = 0, numrunning = 0;
536
537 map = (task == kernel_task)? kernel_map: task->map;
538
539 ptinfo->pti_virtual_size = map->size;
540 ptinfo->pti_resident_size =
541 (mach_vm_size_t)(pmap_resident_count(map->pmap))
542 * PAGE_SIZE_64;
543
544 task_lock(task);
545
546 ptinfo->pti_policy = ((task != kernel_task)?
547 POLICY_TIMESHARE: POLICY_RR);
548
549 tinfo.threads_user = tinfo.threads_system = 0;
550 tinfo.total_user = task->total_user_time;
551 tinfo.total_system = task->total_system_time;
552
553 queue_iterate(&task->threads, thread, thread_t, task_threads) {
554 uint64_t tval;
555
556 if ((thread->state & TH_RUN) == TH_RUN)
557 numrunning++;
558 cswitch += thread->c_switch;
559 tval = timer_grab(&thread->user_timer);
560 tinfo.threads_user += tval;
561 tinfo.total_user += tval;
562
563 tval = timer_grab(&thread->system_timer);
564 tinfo.threads_system += tval;
565 tinfo.total_system += tval;
566 }
567
568 ptinfo->pti_total_system = tinfo.total_system;
569 ptinfo->pti_total_user = tinfo.total_user;
570 ptinfo->pti_threads_system = tinfo.threads_system;
571 ptinfo->pti_threads_user = tinfo.threads_user;
572
573 ptinfo->pti_faults = task->faults;
574 ptinfo->pti_pageins = task->pageins;
575 ptinfo->pti_cow_faults = task->cow_faults;
576 ptinfo->pti_messages_sent = task->messages_sent;
577 ptinfo->pti_messages_received = task->messages_received;
578 ptinfo->pti_syscalls_mach = task->syscalls_mach;
579 ptinfo->pti_syscalls_unix = task->syscalls_unix;
580 ptinfo->pti_csw = task->c_switch + cswitch;
581 ptinfo->pti_threadnum = task->thread_count;
582 ptinfo->pti_numrunning = numrunning;
583 ptinfo->pti_priority = task->priority;
584
585 task_unlock(task);
586 }
587
588 int
589 fill_taskthreadinfo(task_t task, uint64_t thaddr, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
590 {
591 thread_t thact;
592 int err=0;
593 mach_msg_type_number_t count;
594 thread_basic_info_data_t basic_info;
595 kern_return_t kret;
596
597 task_lock(task);
598
599 for (thact = (thread_t)queue_first(&task->threads);
600 !queue_end(&task->threads, (queue_entry_t)thact); ) {
601 #if defined(__ppc__) || defined(__arm__)
602 if (thact->machine.cthread_self == thaddr)
603 #elif defined (__i386__) || defined (__x86_64__)
604 if (thact->machine.pcb->cthread_self == thaddr)
605 #else
606 #error architecture not supported
607 #endif
608 {
609
610 count = THREAD_BASIC_INFO_COUNT;
611 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
612 err = 1;
613 goto out;
614 }
615 #if 0
616 ptinfo->pth_user_time = timer_grab(&basic_info.user_time);
617 ptinfo->pth_system_time = timer_grab(&basic_info.system_time);
618 #else
619 ptinfo->pth_user_time = ((basic_info.user_time.seconds * NSEC_PER_SEC) + (basic_info.user_time.microseconds * NSEC_PER_USEC));
620 ptinfo->pth_system_time = ((basic_info.system_time.seconds * NSEC_PER_SEC) + (basic_info.system_time.microseconds * NSEC_PER_USEC));
621
622 #endif
623 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
624 ptinfo->pth_policy = basic_info.policy;
625 ptinfo->pth_run_state = basic_info.run_state;
626 ptinfo->pth_flags = basic_info.flags;
627 ptinfo->pth_sleep_time = basic_info.sleep_time;
628 ptinfo->pth_curpri = thact->sched_pri;
629 ptinfo->pth_priority = thact->priority;
630 ptinfo->pth_maxpriority = thact->max_priority;
631
632 if ((vpp != NULL) && (thact->uthread != NULL))
633 bsd_threadcdir(thact->uthread, vpp, vidp);
634 bsd_getthreadname(thact->uthread,ptinfo->pth_name);
635 err = 0;
636 goto out;
637 }
638 thact = (thread_t)queue_next(&thact->task_threads);
639 }
640 err = 1;
641
642 out:
643 task_unlock(task);
644 return(err);
645 }
646
647 int
648 fill_taskthreadlist(task_t task, void * buffer, int thcount)
649 {
650 int numthr=0;
651 thread_t thact;
652 uint64_t * uptr;
653 uint64_t thaddr;
654
655 uptr = (uint64_t *)buffer;
656
657 task_lock(task);
658
659 for (thact = (thread_t)queue_first(&task->threads);
660 !queue_end(&task->threads, (queue_entry_t)thact); ) {
661 #if defined(__ppc__) || defined(__arm__)
662 thaddr = thact->machine.cthread_self;
663 #elif defined (__i386__) || defined (__x86_64__)
664 thaddr = thact->machine.pcb->cthread_self;
665 #else
666 #error architecture not supported
667 #endif
668 *uptr++ = thaddr;
669 numthr++;
670 if (numthr >= thcount)
671 goto out;
672 thact = (thread_t)queue_next(&thact->task_threads);
673 }
674
675 out:
676 task_unlock(task);
677 return (int)(numthr * sizeof(uint64_t));
678
679 }
680
681 int
682 get_numthreads(task_t task)
683 {
684 return(task->thread_count);
685 }
686
687 void
688 syscall_exit_funnelcheck(void)
689 {
690 thread_t thread;
691
692 thread = current_thread();
693
694 if (thread->funnel_lock)
695 panic("syscall exit with funnel held\n");
696 }