]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
c13e4082616e4673db1af7aa1eeeaa7ef1a151d3
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2007 Apple, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <mach/mach_types.h>
29
30 #include <kern/kern_types.h>
31 #include <kern/processor.h>
32 #include <kern/thread.h>
33 #include <kern/task.h>
34 #include <kern/spl.h>
35 #include <kern/lock.h>
36 #include <kern/ast.h>
37 #include <ipc/ipc_port.h>
38 #include <ipc/ipc_object.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_kern.h>
41 #include <vm/pmap.h>
42 #include <vm/vm_protos.h> /* last */
43
44 #undef thread_should_halt
45 #undef ipc_port_release
46
47 /* BSD KERN COMPONENT INTERFACE */
48
49 task_t bsd_init_task = TASK_NULL;
50 char init_task_failure_data[1024];
51 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
52
53 thread_t get_firstthread(task_t);
54 int get_task_userstop(task_t);
55 int get_thread_userstop(thread_t);
56 boolean_t thread_should_abort(thread_t);
57 boolean_t current_thread_aborted(void);
58 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
59 void ipc_port_release(ipc_port_t);
60 kern_return_t get_signalact(task_t , thread_t *, int);
61 int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
62 void syscall_exit_funnelcheck(void);
63
64
65 /*
66 *
67 */
68 void *get_bsdtask_info(task_t t)
69 {
70 return(t->bsd_info);
71 }
72
73 /*
74 *
75 */
76 void *get_bsdthreadtask_info(thread_t th)
77 {
78 return(th->task != TASK_NULL ? th->task->bsd_info : NULL);
79 }
80
81 /*
82 *
83 */
84 void set_bsdtask_info(task_t t,void * v)
85 {
86 t->bsd_info=v;
87 }
88
89 /*
90 *
91 */
92 void *get_bsdthread_info(thread_t th)
93 {
94 return(th->uthread);
95 }
96
97 /*
98 * XXX: wait for BSD to fix signal code
99 * Until then, we cannot block here. We know the task
100 * can't go away, so we make sure it is still active after
101 * retrieving the first thread for extra safety.
102 */
103 thread_t get_firstthread(task_t task)
104 {
105 thread_t thread = (thread_t)queue_first(&task->threads);
106
107 if (queue_end(&task->threads, (queue_entry_t)thread))
108 thread = THREAD_NULL;
109
110 if (!task->active)
111 return (THREAD_NULL);
112
113 return (thread);
114 }
115
116 kern_return_t
117 get_signalact(
118 task_t task,
119 thread_t *result_out,
120 int setast)
121 {
122 kern_return_t result = KERN_SUCCESS;
123 thread_t inc, thread = THREAD_NULL;
124
125 task_lock(task);
126
127 if (!task->active) {
128 task_unlock(task);
129
130 return (KERN_FAILURE);
131 }
132
133 for (inc = (thread_t)queue_first(&task->threads);
134 !queue_end(&task->threads, (queue_entry_t)inc); ) {
135 thread_mtx_lock(inc);
136 if (inc->active &&
137 (inc->sched_mode & TH_MODE_ISABORTED) != TH_MODE_ABORT) {
138 thread = inc;
139 break;
140 }
141 thread_mtx_unlock(inc);
142
143 inc = (thread_t)queue_next(&inc->task_threads);
144 }
145
146 if (result_out)
147 *result_out = thread;
148
149 if (thread) {
150 if (setast)
151 act_set_astbsd(thread);
152
153 thread_mtx_unlock(thread);
154 }
155 else
156 result = KERN_FAILURE;
157
158 task_unlock(task);
159
160 return (result);
161 }
162
163
164 kern_return_t
165 check_actforsig(
166 task_t task,
167 thread_t thread,
168 int setast)
169 {
170 kern_return_t result = KERN_FAILURE;
171 thread_t inc;
172
173 task_lock(task);
174
175 if (!task->active) {
176 task_unlock(task);
177
178 return (KERN_FAILURE);
179 }
180
181 for (inc = (thread_t)queue_first(&task->threads);
182 !queue_end(&task->threads, (queue_entry_t)inc); ) {
183 if (inc == thread) {
184 thread_mtx_lock(inc);
185
186 if (inc->active &&
187 (inc->sched_mode & TH_MODE_ISABORTED) != TH_MODE_ABORT) {
188 result = KERN_SUCCESS;
189 break;
190 }
191
192 thread_mtx_unlock(inc);
193 break;
194 }
195
196 inc = (thread_t)queue_next(&inc->task_threads);
197 }
198
199 if (result == KERN_SUCCESS) {
200 if (setast)
201 act_set_astbsd(thread);
202
203 thread_mtx_unlock(thread);
204 }
205
206 task_unlock(task);
207
208 return (result);
209 }
210
211 /*
212 * This is only safe to call from a thread executing in
213 * in the task's context or if the task is locked Otherwise,
214 * the map could be switched for the task (and freed) before
215 * we to return it here.
216 */
217 vm_map_t get_task_map(task_t t)
218 {
219 return(t->map);
220 }
221
222 vm_map_t get_task_map_reference(task_t t)
223 {
224 vm_map_t m;
225
226 if (t == NULL)
227 return VM_MAP_NULL;
228
229 task_lock(t);
230 if (!t->active) {
231 task_unlock(t);
232 return VM_MAP_NULL;
233 }
234 m = t->map;
235 vm_map_reference_swap(m);
236 task_unlock(t);
237 return m;
238 }
239
240 /*
241 *
242 */
243 ipc_space_t get_task_ipcspace(task_t t)
244 {
245 return(t->itk_space);
246 }
247
248 int get_task_numactivethreads(task_t task)
249 {
250 thread_t inc;
251 int num_active_thr=0;
252 task_lock(task);
253
254 for (inc = (thread_t)queue_first(&task->threads);
255 !queue_end(&task->threads, (queue_entry_t)inc); inc = (thread_t)queue_next(&inc->task_threads))
256 {
257 if(inc->active)
258 num_active_thr++;
259 }
260 task_unlock(task);
261 return num_active_thr;
262 }
263
264 int get_task_numacts(task_t t)
265 {
266 return(t->thread_count);
267 }
268
269 /* does this machine need 64bit register set for signal handler */
270 int is_64signalregset(void)
271 {
272 task_t t = current_task();
273 if(t->taskFeatures[0] & tf64BitData)
274 return(1);
275 else
276 return(0);
277 }
278
279 /*
280 * The old map reference is returned.
281 */
282 vm_map_t
283 swap_task_map(task_t task,vm_map_t map)
284 {
285 thread_t thread = current_thread();
286 vm_map_t old_map;
287
288 if (task != thread->task)
289 panic("swap_task_map");
290
291 task_lock(task);
292 old_map = task->map;
293 thread->map = task->map = map;
294 task_unlock(task);
295
296 inval_copy_windows(thread);
297
298 return old_map;
299 }
300
301 /*
302 *
303 */
304 pmap_t get_task_pmap(task_t t)
305 {
306 return(t->map->pmap);
307 }
308
309 /*
310 *
311 */
312 pmap_t get_map_pmap(vm_map_t map)
313 {
314 return(map->pmap);
315 }
316 /*
317 *
318 */
319 task_t get_threadtask(thread_t th)
320 {
321 return(th->task);
322 }
323
324 /*
325 *
326 */
327 vm_map_offset_t
328 get_map_min(
329 vm_map_t map)
330 {
331 return(vm_map_min(map));
332 }
333
334 /*
335 *
336 */
337 vm_map_offset_t
338 get_map_max(
339 vm_map_t map)
340 {
341 return(vm_map_max(map));
342 }
343 vm_map_size_t
344 get_vmmap_size(
345 vm_map_t map)
346 {
347 return(map->size);
348 }
349
350 int
351 get_vmsubmap_entries(
352 vm_map_t map,
353 vm_object_offset_t start,
354 vm_object_offset_t end)
355 {
356 int total_entries = 0;
357 vm_map_entry_t entry;
358
359 if (not_in_kdp)
360 vm_map_lock(map);
361 entry = vm_map_first_entry(map);
362 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
363 entry = entry->vme_next;
364 }
365
366 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
367 if(entry->is_sub_map) {
368 total_entries +=
369 get_vmsubmap_entries(entry->object.sub_map,
370 entry->offset,
371 entry->offset +
372 (entry->vme_end - entry->vme_start));
373 } else {
374 total_entries += 1;
375 }
376 entry = entry->vme_next;
377 }
378 if (not_in_kdp)
379 vm_map_unlock(map);
380 return(total_entries);
381 }
382
383 int
384 get_vmmap_entries(
385 vm_map_t map)
386 {
387 int total_entries = 0;
388 vm_map_entry_t entry;
389
390 if (not_in_kdp)
391 vm_map_lock(map);
392 entry = vm_map_first_entry(map);
393
394 while(entry != vm_map_to_entry(map)) {
395 if(entry->is_sub_map) {
396 total_entries +=
397 get_vmsubmap_entries(entry->object.sub_map,
398 entry->offset,
399 entry->offset +
400 (entry->vme_end - entry->vme_start));
401 } else {
402 total_entries += 1;
403 }
404 entry = entry->vme_next;
405 }
406 if (not_in_kdp)
407 vm_map_unlock(map);
408 return(total_entries);
409 }
410
411 /*
412 *
413 */
414 /*
415 *
416 */
417 int
418 get_task_userstop(
419 task_t task)
420 {
421 return(task->user_stop_count);
422 }
423
424 /*
425 *
426 */
427 int
428 get_thread_userstop(
429 thread_t th)
430 {
431 return(th->user_stop_count);
432 }
433
434 /*
435 *
436 */
437 boolean_t
438 thread_should_abort(
439 thread_t th)
440 {
441 return ((th->sched_mode & TH_MODE_ISABORTED) == TH_MODE_ABORT);
442 }
443
444 /*
445 * This routine is like thread_should_abort() above. It checks to
446 * see if the current thread is aborted. But unlike above, it also
447 * checks to see if thread is safely aborted. If so, it returns
448 * that fact, and clears the condition (safe aborts only should
449 * have a single effect, and a poll of the abort status
450 * qualifies.
451 */
452 boolean_t
453 current_thread_aborted (
454 void)
455 {
456 thread_t th = current_thread();
457 spl_t s;
458
459 if ((th->sched_mode & TH_MODE_ISABORTED) == TH_MODE_ABORT &&
460 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
461 return (TRUE);
462 if (th->sched_mode & TH_MODE_ABORTSAFELY) {
463 s = splsched();
464 thread_lock(th);
465 if (th->sched_mode & TH_MODE_ABORTSAFELY)
466 th->sched_mode &= ~TH_MODE_ISABORTED;
467 thread_unlock(th);
468 splx(s);
469 }
470 return FALSE;
471 }
472
473 /*
474 *
475 */
476 void
477 task_act_iterate_wth_args(
478 task_t task,
479 void (*func_callback)(thread_t, void *),
480 void *func_arg)
481 {
482 thread_t inc;
483
484 task_lock(task);
485
486 for (inc = (thread_t)queue_first(&task->threads);
487 !queue_end(&task->threads, (queue_entry_t)inc); ) {
488 (void) (*func_callback)(inc, func_arg);
489 inc = (thread_t)queue_next(&inc->task_threads);
490 }
491
492 task_unlock(task);
493 }
494
495 void
496 ipc_port_release(
497 ipc_port_t port)
498 {
499 ipc_object_release(&(port)->ip_object);
500 }
501
502 void
503 astbsd_on(void)
504 {
505 boolean_t reenable;
506
507 reenable = ml_set_interrupts_enabled(FALSE);
508 ast_on_fast(AST_BSD);
509 (void)ml_set_interrupts_enabled(reenable);
510 }
511
512
513 #include <sys/bsdtask_info.h>
514
515 void
516 fill_taskprocinfo(task_t task, struct proc_taskinfo_internal * ptinfo)
517 {
518 vm_map_t map;
519 task_absolutetime_info_data_t tinfo;
520 thread_t thread;
521 int cswitch = 0, numrunning = 0;
522
523 map = (task == kernel_task)? kernel_map: task->map;
524
525 ptinfo->pti_virtual_size = map->size;
526 ptinfo->pti_resident_size =
527 (mach_vm_size_t)(pmap_resident_count(map->pmap))
528 * PAGE_SIZE_64;
529
530 task_lock(task);
531
532 ptinfo->pti_policy = ((task != kernel_task)?
533 POLICY_TIMESHARE: POLICY_RR);
534
535 tinfo.threads_user = tinfo.threads_system = 0;
536 tinfo.total_user = task->total_user_time;
537 tinfo.total_system = task->total_system_time;
538
539 queue_iterate(&task->threads, thread, thread_t, task_threads) {
540 uint64_t tval;
541
542 if ((thread->state & TH_RUN) == TH_RUN)
543 numrunning++;
544 cswitch += thread->c_switch;
545 tval = timer_grab(&thread->user_timer);
546 tinfo.threads_user += tval;
547 tinfo.total_user += tval;
548
549 tval = timer_grab(&thread->system_timer);
550 tinfo.threads_system += tval;
551 tinfo.total_system += tval;
552 }
553
554 ptinfo->pti_total_system = tinfo.total_system;
555 ptinfo->pti_total_user = tinfo.total_user;
556 ptinfo->pti_threads_system = tinfo.threads_system;
557 ptinfo->pti_threads_user = tinfo.threads_user;
558
559 ptinfo->pti_faults = task->faults;
560 ptinfo->pti_pageins = task->pageins;
561 ptinfo->pti_cow_faults = task->cow_faults;
562 ptinfo->pti_messages_sent = task->messages_sent;
563 ptinfo->pti_messages_received = task->messages_received;
564 ptinfo->pti_syscalls_mach = task->syscalls_mach;
565 ptinfo->pti_syscalls_unix = task->syscalls_unix;
566 ptinfo->pti_csw = task->c_switch + cswitch;
567 ptinfo->pti_threadnum = task->thread_count;
568 ptinfo->pti_numrunning = numrunning;
569 ptinfo->pti_priority = task->priority;
570
571 task_unlock(task);
572 }
573
574 int
575 fill_taskthreadinfo(task_t task, uint64_t thaddr, struct proc_threadinfo_internal * ptinfo, void * vpp, int *vidp)
576 {
577 thread_t thact;
578 int err=0;
579 mach_msg_type_number_t count;
580 thread_basic_info_data_t basic_info;
581 kern_return_t kret;
582
583 task_lock(task);
584
585 for (thact = (thread_t)queue_first(&task->threads);
586 !queue_end(&task->threads, (queue_entry_t)thact); ) {
587 #if defined(__ppc__) || defined(__arm__)
588 if (thact->machine.cthread_self == thaddr)
589 #elif defined (__i386__)
590 if (thact->machine.pcb->cthread_self == thaddr)
591 #else
592 #error architecture not supported
593 #endif
594 {
595
596 count = THREAD_BASIC_INFO_COUNT;
597 if ((kret = thread_info_internal(thact, THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count)) != KERN_SUCCESS) {
598 err = 1;
599 goto out;
600 }
601 #if 0
602 ptinfo->pth_user_time = timer_grab(&basic_info.user_time);
603 ptinfo->pth_system_time = timer_grab(&basic_info.system_time);
604 #else
605 ptinfo->pth_user_time = ((basic_info.user_time.seconds * NSEC_PER_SEC) + (basic_info.user_time.microseconds * NSEC_PER_USEC));
606 ptinfo->pth_system_time = ((basic_info.system_time.seconds * NSEC_PER_SEC) + (basic_info.system_time.microseconds * NSEC_PER_USEC));
607
608 #endif
609 ptinfo->pth_cpu_usage = basic_info.cpu_usage;
610 ptinfo->pth_policy = basic_info.policy;
611 ptinfo->pth_run_state = basic_info.run_state;
612 ptinfo->pth_flags = basic_info.flags;
613 ptinfo->pth_sleep_time = basic_info.sleep_time;
614 ptinfo->pth_curpri = thact->sched_pri;
615 ptinfo->pth_priority = thact->priority;
616 ptinfo->pth_maxpriority = thact->max_priority;
617
618 if ((vpp != NULL) && (thact->uthread != NULL))
619 bsd_threadcdir(thact->uthread, vpp, vidp);
620 err = 0;
621 goto out;
622 }
623 thact = (thread_t)queue_next(&thact->task_threads);
624 }
625 err = 1;
626
627 out:
628 task_unlock(task);
629 return(err);
630 }
631
632 int
633 fill_taskthreadlist(task_t task, void * buffer, int thcount)
634 {
635 int numthr=0;
636 thread_t thact;
637 uint64_t * uptr;
638 uint64_t thaddr;
639
640 uptr = (uint64_t *)buffer;
641
642 task_lock(task);
643
644 for (thact = (thread_t)queue_first(&task->threads);
645 !queue_end(&task->threads, (queue_entry_t)thact); ) {
646 #if defined(__ppc__) || defined(__arm__)
647 thaddr = thact->machine.cthread_self;
648 #elif defined (__i386__)
649 thaddr = thact->machine.pcb->cthread_self;
650 #else
651 #error architecture not supported
652 #endif
653 *uptr++ = thaddr;
654 numthr++;
655 if (numthr >= thcount)
656 goto out;
657 thact = (thread_t)queue_next(&thact->task_threads);
658 }
659
660 out:
661 task_unlock(task);
662 return(numthr * sizeof(uint64_t));
663
664 }
665
666 int
667 get_numthreads(task_t task)
668 {
669 return(task->thread_count);
670 }
671
672 void
673 syscall_exit_funnelcheck(void)
674 {
675 thread_t thread;
676
677 thread = current_thread();
678
679 if (thread->funnel_lock)
680 panic("syscall exit with funnel held\n");
681 }