]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
338b5b284bf25e94a9f70f01734c79a1199ef365
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <mach/mach_types.h>
31
32 #include <kern/kern_types.h>
33 #include <kern/processor.h>
34 #include <kern/thread.h>
35 #include <kern/task.h>
36 #include <kern/spl.h>
37 #include <kern/lock.h>
38 #include <kern/ast.h>
39 #include <ipc/ipc_port.h>
40 #include <ipc/ipc_object.h>
41 #include <vm/vm_map.h>
42 #include <vm/pmap.h>
43 #include <vm/vm_protos.h> /* last */
44
45 #undef thread_should_halt
46 #undef ipc_port_release
47
48 /* BSD KERN COMPONENT INTERFACE */
49
50 task_t bsd_init_task = TASK_NULL;
51 char init_task_failure_data[1024];
52 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
53
54 thread_t get_firstthread(task_t);
55 int get_task_userstop(task_t);
56 int get_thread_userstop(thread_t);
57 boolean_t thread_should_abort(thread_t);
58 boolean_t current_thread_aborted(void);
59 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
60 void ipc_port_release(ipc_port_t);
61 boolean_t is_thread_active(thread_t);
62 kern_return_t get_signalact(task_t , thread_t *, int);
63 int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
64
65 /*
66 *
67 */
68 void *get_bsdtask_info(task_t t)
69 {
70 return(t->bsd_info);
71 }
72
73 /*
74 *
75 */
76 void set_bsdtask_info(task_t t,void * v)
77 {
78 t->bsd_info=v;
79 }
80
81 /*
82 *
83 */
84 void *get_bsdthread_info(thread_t th)
85 {
86 return(th->uthread);
87 }
88
89 /*
90 * XXX: wait for BSD to fix signal code
91 * Until then, we cannot block here. We know the task
92 * can't go away, so we make sure it is still active after
93 * retrieving the first thread for extra safety.
94 */
95 thread_t get_firstthread(task_t task)
96 {
97 thread_t thread = (thread_t)queue_first(&task->threads);
98
99 if (queue_end(&task->threads, (queue_entry_t)thread))
100 thread = THREAD_NULL;
101
102 if (!task->active)
103 return (THREAD_NULL);
104
105 return (thread);
106 }
107
108 kern_return_t
109 get_signalact(
110 task_t task,
111 thread_t *result_out,
112 int setast)
113 {
114 kern_return_t result = KERN_SUCCESS;
115 thread_t inc, thread = THREAD_NULL;
116
117 task_lock(task);
118
119 if (!task->active) {
120 task_unlock(task);
121
122 return (KERN_FAILURE);
123 }
124
125 for (inc = (thread_t)queue_first(&task->threads);
126 !queue_end(&task->threads, (queue_entry_t)inc); ) {
127 thread_mtx_lock(inc);
128 if (inc->active &&
129 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
130 thread = inc;
131 break;
132 }
133 thread_mtx_unlock(inc);
134
135 inc = (thread_t)queue_next(&inc->task_threads);
136 }
137
138 if (result_out)
139 *result_out = thread;
140
141 if (thread) {
142 if (setast)
143 act_set_astbsd(thread);
144
145 thread_mtx_unlock(thread);
146 }
147 else
148 result = KERN_FAILURE;
149
150 task_unlock(task);
151
152 return (result);
153 }
154
155
156 kern_return_t
157 check_actforsig(
158 task_t task,
159 thread_t thread,
160 int setast)
161 {
162 kern_return_t result = KERN_FAILURE;
163 thread_t inc;
164
165 task_lock(task);
166
167 if (!task->active) {
168 task_unlock(task);
169
170 return (KERN_FAILURE);
171 }
172
173 for (inc = (thread_t)queue_first(&task->threads);
174 !queue_end(&task->threads, (queue_entry_t)inc); ) {
175 if (inc == thread) {
176 thread_mtx_lock(inc);
177
178 if (inc->active &&
179 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
180 result = KERN_SUCCESS;
181 break;
182 }
183
184 thread_mtx_unlock(inc);
185 break;
186 }
187
188 inc = (thread_t)queue_next(&inc->task_threads);
189 }
190
191 if (result == KERN_SUCCESS) {
192 if (setast)
193 act_set_astbsd(thread);
194
195 thread_mtx_unlock(thread);
196 }
197
198 task_unlock(task);
199
200 return (result);
201 }
202
203 /*
204 * This is only safe to call from a thread executing in
205 * in the task's context or if the task is locked Otherwise,
206 * the map could be switched for the task (and freed) before
207 * we to return it here.
208 */
209 vm_map_t get_task_map(task_t t)
210 {
211 return(t->map);
212 }
213
214 vm_map_t get_task_map_reference(task_t t)
215 {
216 vm_map_t m;
217
218 if (t == NULL)
219 return VM_MAP_NULL;
220
221 task_lock(t);
222 if (!t->active) {
223 task_unlock(t);
224 return VM_MAP_NULL;
225 }
226 m = t->map;
227 vm_map_reference_swap(m);
228 task_unlock(t);
229 return m;
230 }
231
232 /*
233 *
234 */
235 ipc_space_t get_task_ipcspace(task_t t)
236 {
237 return(t->itk_space);
238 }
239
240 int get_task_numacts(task_t t)
241 {
242 return(t->thread_count);
243 }
244
245 /* does this machine need 64bit register set for signal handler */
246 int is_64signalregset(void)
247 {
248 task_t t = current_task();
249 if(t->taskFeatures[0] & tf64BitData)
250 return(1);
251 else
252 return(0);
253 }
254
255 /*
256 * The old map reference is returned.
257 */
258 vm_map_t
259 swap_task_map(task_t task,vm_map_t map)
260 {
261 thread_t thread = current_thread();
262 vm_map_t old_map;
263
264 if (task != thread->task)
265 panic("swap_task_map");
266
267 task_lock(task);
268 old_map = task->map;
269 thread->map = task->map = map;
270 task_unlock(task);
271 return old_map;
272 }
273
274 /*
275 *
276 */
277 pmap_t get_task_pmap(task_t t)
278 {
279 return(t->map->pmap);
280 }
281
282 /*
283 *
284 */
285 pmap_t get_map_pmap(vm_map_t map)
286 {
287 return(map->pmap);
288 }
289 /*
290 *
291 */
292 task_t get_threadtask(thread_t th)
293 {
294 return(th->task);
295 }
296
297
298 /*
299 *
300 */
301 boolean_t is_thread_idle(thread_t th)
302 {
303 return((th->state & TH_IDLE) == TH_IDLE);
304 }
305
306 /*
307 *
308 */
309 boolean_t is_thread_running(thread_t th)
310 {
311 return((th->state & TH_RUN) == TH_RUN);
312 }
313
314 /*
315 *
316 */
317 thread_t
318 getshuttle_thread(
319 thread_t th)
320 {
321 return(th);
322 }
323
324 /*
325 *
326 */
327 thread_t
328 getact_thread(
329 thread_t th)
330 {
331 return(th);
332 }
333
334 /*
335 *
336 */
337 vm_map_offset_t
338 get_map_min(
339 vm_map_t map)
340 {
341 return(vm_map_min(map));
342 }
343
344 /*
345 *
346 */
347 vm_map_offset_t
348 get_map_max(
349 vm_map_t map)
350 {
351 return(vm_map_max(map));
352 }
353 vm_map_size_t
354 get_vmmap_size(
355 vm_map_t map)
356 {
357 return(map->size);
358 }
359
360 int
361 get_vmsubmap_entries(
362 vm_map_t map,
363 vm_object_offset_t start,
364 vm_object_offset_t end)
365 {
366 int total_entries = 0;
367 vm_map_entry_t entry;
368
369 if (not_in_kdp)
370 vm_map_lock(map);
371 entry = vm_map_first_entry(map);
372 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
373 entry = entry->vme_next;
374 }
375
376 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
377 if(entry->is_sub_map) {
378 total_entries +=
379 get_vmsubmap_entries(entry->object.sub_map,
380 entry->offset,
381 entry->offset +
382 (entry->vme_end - entry->vme_start));
383 } else {
384 total_entries += 1;
385 }
386 entry = entry->vme_next;
387 }
388 if (not_in_kdp)
389 vm_map_unlock(map);
390 return(total_entries);
391 }
392
393 int
394 get_vmmap_entries(
395 vm_map_t map)
396 {
397 int total_entries = 0;
398 vm_map_entry_t entry;
399
400 if (not_in_kdp)
401 vm_map_lock(map);
402 entry = vm_map_first_entry(map);
403
404 while(entry != vm_map_to_entry(map)) {
405 if(entry->is_sub_map) {
406 total_entries +=
407 get_vmsubmap_entries(entry->object.sub_map,
408 entry->offset,
409 entry->offset +
410 (entry->vme_end - entry->vme_start));
411 } else {
412 total_entries += 1;
413 }
414 entry = entry->vme_next;
415 }
416 if (not_in_kdp)
417 vm_map_unlock(map);
418 return(total_entries);
419 }
420
421 /*
422 *
423 */
424 /*
425 *
426 */
427 int
428 get_task_userstop(
429 task_t task)
430 {
431 return(task->user_stop_count);
432 }
433
434 /*
435 *
436 */
437 int
438 get_thread_userstop(
439 thread_t th)
440 {
441 return(th->user_stop_count);
442 }
443
444 /*
445 *
446 */
447 boolean_t
448 thread_should_abort(
449 thread_t th)
450 {
451 return ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
452 }
453
454 /*
455 * This routine is like thread_should_abort() above. It checks to
456 * see if the current thread is aborted. But unlike above, it also
457 * checks to see if thread is safely aborted. If so, it returns
458 * that fact, and clears the condition (safe aborts only should
459 * have a single effect, and a poll of the abort status
460 * qualifies.
461 */
462 boolean_t
463 current_thread_aborted (
464 void)
465 {
466 thread_t th = current_thread();
467 spl_t s;
468
469 if ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
470 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
471 return (TRUE);
472 if (th->state & TH_ABORT_SAFELY) {
473 s = splsched();
474 thread_lock(th);
475 if (th->state & TH_ABORT_SAFELY)
476 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
477 thread_unlock(th);
478 splx(s);
479 }
480 return FALSE;
481 }
482
483 /*
484 *
485 */
486 void
487 task_act_iterate_wth_args(
488 task_t task,
489 void (*func_callback)(thread_t, void *),
490 void *func_arg)
491 {
492 thread_t inc;
493
494 task_lock(task);
495
496 for (inc = (thread_t)queue_first(&task->threads);
497 !queue_end(&task->threads, (queue_entry_t)inc); ) {
498 (void) (*func_callback)(inc, func_arg);
499 inc = (thread_t)queue_next(&inc->task_threads);
500 }
501
502 task_unlock(task);
503 }
504
505 void
506 ipc_port_release(
507 ipc_port_t port)
508 {
509 ipc_object_release(&(port)->ip_object);
510 }
511
512 boolean_t
513 is_thread_active(
514 thread_t th)
515 {
516 return(th->active);
517 }
518
519 void
520 astbsd_on(void)
521 {
522 boolean_t reenable;
523
524 reenable = ml_set_interrupts_enabled(FALSE);
525 ast_on_fast(AST_BSD);
526 (void)ml_set_interrupts_enabled(reenable);
527 }