]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
34afe5ae2fb0d7abf20436c2aaaa7896e4ff97ce
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 #include <mach/mach_types.h>
26 #include <kern/queue.h>
27 #include <kern/ast.h>
28 #include <kern/thread.h>
29 #include <kern/thread_act.h>
30 #include <kern/task.h>
31 #include <kern/spl.h>
32 #include <kern/lock.h>
33 #include <vm/vm_map.h>
34 #include <vm/pmap.h>
35 #include <ipc/ipc_port.h>
36 #include <ipc/ipc_object.h>
37
38 #undef thread_should_halt
39 #undef ipc_port_release
40
41 decl_simple_lock_data(extern,reaper_lock)
42 extern queue_head_t reaper_queue;
43
44 /* BSD KERN COMPONENT INTERFACE */
45
46 task_t bsd_init_task = TASK_NULL;
47 char init_task_failure_data[1024];
48
49 thread_act_t get_firstthread(task_t);
50 vm_map_t get_task_map(task_t);
51 ipc_space_t get_task_ipcspace(task_t);
52 boolean_t is_kerneltask(task_t);
53 boolean_t is_thread_idle(thread_t);
54 boolean_t is_thread_running(thread_act_t);
55 thread_shuttle_t getshuttle_thread( thread_act_t);
56 thread_act_t getact_thread( thread_shuttle_t);
57 vm_offset_t get_map_min( vm_map_t);
58 vm_offset_t get_map_max( vm_map_t);
59 int get_task_userstop(task_t);
60 int get_thread_userstop(thread_act_t);
61 boolean_t thread_should_abort(thread_shuttle_t);
62 boolean_t current_thread_aborted(void);
63 void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
64 void ipc_port_release(ipc_port_t);
65 boolean_t is_thread_active(thread_t);
66 kern_return_t get_thread_waitresult(thread_t);
67 vm_size_t get_vmmap_size(vm_map_t);
68 int get_vmmap_entries(vm_map_t);
69 int get_task_numacts(task_t);
70 thread_act_t get_firstthread(task_t task);
71 kern_return_t get_signalact(task_t , thread_act_t *, thread_t *, int);
72 void astbsd_on(void);
73
74 /*
75 *
76 */
77 void *get_bsdtask_info(task_t t)
78 {
79 return(t->bsd_info);
80 }
81
82 /*
83 *
84 */
85 void set_bsdtask_info(task_t t,void * v)
86 {
87 t->bsd_info=v;
88 }
89
90 /*
91 *
92 */
93 void *get_bsdthread_info(thread_act_t th)
94 {
95 return(th->uthread);
96 }
97
98 /*
99 * XXX: wait for BSD to fix signal code
100 * Until then, we cannot block here. We know the task
101 * can't go away, so we make sure it is still active after
102 * retrieving the first thread for extra safety.
103 */
104 thread_act_t get_firstthread(task_t task)
105 {
106 thread_act_t thr_act;
107
108 thr_act = (thread_act_t)queue_first(&task->thr_acts);
109 if (thr_act == (thread_act_t)&task->thr_acts)
110 thr_act = THR_ACT_NULL;
111 if (!task->active)
112 return(THR_ACT_NULL);
113 return(thr_act);
114 }
115
116 kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, int setast)
117 {
118
119 thread_act_t inc;
120 thread_act_t ninc;
121 thread_act_t thr_act;
122 thread_t th;
123
124 task_lock(task);
125 if (!task->active) {
126 task_unlock(task);
127 return(KERN_FAILURE);
128 }
129
130 thr_act = THR_ACT_NULL;
131 for (inc = (thread_act_t)queue_first(&task->thr_acts);
132 inc != (thread_act_t)&task->thr_acts;
133 inc = ninc) {
134 th = act_lock_thread(inc);
135 if ((inc->active) &&
136 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
137 thr_act = inc;
138 break;
139 }
140 act_unlock_thread(inc);
141 ninc = (thread_act_t)queue_next(&inc->thr_acts);
142 }
143 out:
144 if (thact)
145 *thact = thr_act;
146
147 if (thshut)
148 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
149 if (thr_act) {
150 if (setast)
151 act_set_astbsd(thr_act);
152
153 act_unlock_thread(thr_act);
154 }
155 task_unlock(task);
156
157 if (thr_act)
158 return(KERN_SUCCESS);
159 else
160 return(KERN_FAILURE);
161 }
162
163
164 kern_return_t check_actforsig(task_t task, thread_act_t thact, thread_t * thshut, int setast)
165 {
166
167 thread_act_t inc;
168 thread_act_t ninc;
169 thread_act_t thr_act;
170 thread_t th;
171 int found=0;
172
173 task_lock(task);
174 if (!task->active) {
175 task_unlock(task);
176 return(KERN_FAILURE);
177 }
178
179 thr_act = THR_ACT_NULL;
180 for (inc = (thread_act_t)queue_first(&task->thr_acts);
181 inc != (thread_act_t)&task->thr_acts;
182 inc = ninc) {
183
184 if (inc != thact) {
185 ninc = (thread_act_t)queue_next(&inc->thr_acts);
186 continue;
187 }
188 th = act_lock_thread(inc);
189 if ((inc->active) &&
190 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
191 found = 1;
192 thr_act = inc;
193 break;
194 }
195 act_unlock_thread(inc);
196 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
197 break;
198 }
199 out:
200 if (found) {
201 if (thshut)
202 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
203 if (setast)
204 act_set_astbsd(thr_act);
205
206 act_unlock_thread(thr_act);
207 }
208 task_unlock(task);
209
210 if (found)
211 return(KERN_SUCCESS);
212 else
213 return(KERN_FAILURE);
214 }
215
216 /*
217 *
218 */
219 vm_map_t get_task_map(task_t t)
220 {
221 return(t->map);
222 }
223
224 /*
225 *
226 */
227 ipc_space_t get_task_ipcspace(task_t t)
228 {
229 return(t->itk_space);
230 }
231
232 int get_task_numacts(task_t t)
233 {
234 return(t->thr_act_count);
235 }
236
237
238 /* does this machine need 64bit register set for signal handler */
239 int is_64signalregset(void)
240 {
241 task_t t = current_task();
242 if(t->taskFeatures[0] & tf64BitData)
243 return(1);
244 else
245 return(0);
246 }
247
248 /*
249 * Reset the current task's map by taking a reference
250 * on the new map. The old map reference is returned.
251 */
252 vm_map_t
253 swap_task_map(task_t task,vm_map_t map)
254 {
255 vm_map_t old_map;
256
257 vm_map_reference(map);
258 task_lock(task);
259 old_map = task->map;
260 task->map = map;
261 task_unlock(task);
262 return old_map;
263 }
264
265 /*
266 * Reset the current act map.
267 * The caller donates us a reference to the new map
268 * and we donote our reference to the old map to him.
269 */
270 vm_map_t
271 swap_act_map(thread_act_t thr_act,vm_map_t map)
272 {
273 vm_map_t old_map;
274
275 act_lock(thr_act);
276 old_map = thr_act->map;
277 thr_act->map = map;
278 act_unlock(thr_act);
279 return old_map;
280 }
281
282 /*
283 *
284 */
285 pmap_t get_task_pmap(task_t t)
286 {
287 return(t->map->pmap);
288 }
289
290 /*
291 *
292 */
293 pmap_t get_map_pmap(vm_map_t map)
294 {
295 return(map->pmap);
296 }
297 /*
298 *
299 */
300 task_t get_threadtask(thread_act_t th)
301 {
302 return(th->task);
303 }
304
305
306 /*
307 *
308 */
309 boolean_t is_thread_idle(thread_t th)
310 {
311 return((th->state & TH_IDLE) == TH_IDLE);
312 }
313
314 /*
315 *
316 */
317 boolean_t is_thread_running(thread_act_t thact)
318 {
319 thread_t th = thact->thread;
320 return((th->state & TH_RUN) == TH_RUN);
321 }
322
323 /*
324 *
325 */
326 thread_shuttle_t
327 getshuttle_thread(
328 thread_act_t th)
329 {
330 #ifdef DEBUG
331 assert(th->thread);
332 #endif
333 return(th->thread);
334 }
335
336 /*
337 *
338 */
339 thread_act_t
340 getact_thread(
341 thread_shuttle_t th)
342 {
343 #ifdef DEBUG
344 assert(th->top_act);
345 #endif
346 return(th->top_act);
347 }
348
349 /*
350 *
351 */
352 vm_offset_t
353 get_map_min(
354 vm_map_t map)
355 {
356 return(vm_map_min(map));
357 }
358
359 /*
360 *
361 */
362 vm_offset_t
363 get_map_max(
364 vm_map_t map)
365 {
366 return(vm_map_max(map));
367 }
368 vm_size_t
369 get_vmmap_size(
370 vm_map_t map)
371 {
372 return(map->size);
373 }
374
375 int
376 get_vmsubmap_entries(
377 vm_map_t map,
378 vm_object_offset_t start,
379 vm_object_offset_t end)
380 {
381 int total_entries = 0;
382 vm_map_entry_t entry;
383
384 vm_map_lock(map);
385 entry = vm_map_first_entry(map);
386 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
387 entry = entry->vme_next;
388 }
389
390 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
391 if(entry->is_sub_map) {
392 total_entries +=
393 get_vmsubmap_entries(entry->object.sub_map,
394 entry->offset,
395 entry->offset +
396 (entry->vme_end - entry->vme_start));
397 } else {
398 total_entries += 1;
399 }
400 entry = entry->vme_next;
401 }
402 vm_map_unlock(map);
403 return(total_entries);
404 }
405
406 int
407 get_vmmap_entries(
408 vm_map_t map)
409 {
410 int total_entries = 0;
411 vm_map_entry_t entry;
412
413 vm_map_lock(map);
414 entry = vm_map_first_entry(map);
415
416 while(entry != vm_map_to_entry(map)) {
417 if(entry->is_sub_map) {
418 total_entries +=
419 get_vmsubmap_entries(entry->object.sub_map,
420 entry->offset,
421 entry->offset +
422 (entry->vme_end - entry->vme_start));
423 } else {
424 total_entries += 1;
425 }
426 entry = entry->vme_next;
427 }
428 vm_map_unlock(map);
429 return(total_entries);
430 }
431
432 /*
433 *
434 */
435 /*
436 *
437 */
438 int
439 get_task_userstop(
440 task_t task)
441 {
442 return(task->user_stop_count);
443 }
444
445 /*
446 *
447 */
448 int
449 get_thread_userstop(
450 thread_act_t th)
451 {
452 return(th->user_stop_count);
453 }
454
455 /*
456 *
457 */
458 boolean_t
459 thread_should_abort(
460 thread_shuttle_t th)
461 {
462 return(!th->top_act || !th->top_act->active ||
463 (th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
464 }
465
466 /*
467 * This routine is like thread_should_abort() above. It checks to
468 * see if the current thread is aborted. But unlike above, it also
469 * checks to see if thread is safely aborted. If so, it returns
470 * that fact, and clears the condition (safe aborts only should
471 * have a single effect, and a poll of the abort status
472 * qualifies.
473 */
474 boolean_t
475 current_thread_aborted (
476 void)
477 {
478 thread_t th = current_thread();
479 spl_t s;
480
481 if (!th->top_act ||
482 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
483 th->interrupt_level != THREAD_UNINT))
484 return (TRUE);
485 if (th->state & TH_ABORT_SAFELY) {
486 s = splsched();
487 thread_lock(th);
488 if (th->state & TH_ABORT_SAFELY)
489 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
490 thread_unlock(th);
491 splx(s);
492 }
493 return FALSE;
494 }
495
496 /*
497 *
498 */
499 void
500 task_act_iterate_wth_args(
501 task_t task,
502 void (*func_callback)(thread_act_t, void *),
503 void *func_arg)
504 {
505 thread_act_t inc, ninc;
506
507 task_lock(task);
508 for (inc = (thread_act_t)queue_first(&task->thr_acts);
509 inc != (thread_act_t)&task->thr_acts;
510 inc = ninc) {
511 ninc = (thread_act_t)queue_next(&inc->thr_acts);
512 (void) (*func_callback)(inc, func_arg);
513 }
514 task_unlock(task);
515 }
516
517 void
518 ipc_port_release(
519 ipc_port_t port)
520 {
521 ipc_object_release(&(port)->ip_object);
522 }
523
524 boolean_t
525 is_thread_active(
526 thread_shuttle_t th)
527 {
528 return(th->active);
529 }
530
531 kern_return_t
532 get_thread_waitresult(
533 thread_shuttle_t th)
534 {
535 return(th->wait_result);
536 }
537
538 void
539 astbsd_on(void)
540 {
541 boolean_t reenable;
542
543 reenable = ml_set_interrupts_enabled(FALSE);
544 ast_on_fast(AST_BSD);
545 (void)ml_set_interrupts_enabled(reenable);
546 }