]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
xnu-344.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <mach/mach_types.h>
23 #include <kern/queue.h>
24 #include <kern/ast.h>
25 #include <kern/thread.h>
26 #include <kern/thread_act.h>
27 #include <kern/task.h>
28 #include <kern/spl.h>
29 #include <kern/lock.h>
30 #include <vm/vm_map.h>
31 #include <vm/pmap.h>
32 #include <ipc/ipc_port.h>
33 #include <ipc/ipc_object.h>
34
35 #undef thread_should_halt
36 #undef ipc_port_release
37
38 decl_simple_lock_data(extern,reaper_lock)
39 extern queue_head_t reaper_queue;
40
41 /* BSD KERN COMPONENT INTERFACE */
42
43 task_t bsd_init_task = TASK_NULL;
44 char init_task_failure_data[1024];
45
46 thread_act_t get_firstthread(task_t);
47 vm_map_t get_task_map(task_t);
48 ipc_space_t get_task_ipcspace(task_t);
49 boolean_t is_kerneltask(task_t);
50 boolean_t is_thread_idle(thread_t);
51 boolean_t is_thread_running(thread_act_t);
52 thread_shuttle_t getshuttle_thread( thread_act_t);
53 thread_act_t getact_thread( thread_shuttle_t);
54 vm_offset_t get_map_min( vm_map_t);
55 vm_offset_t get_map_max( vm_map_t);
56 int get_task_userstop(task_t);
57 int get_thread_userstop(thread_act_t);
58 boolean_t thread_should_abort(thread_shuttle_t);
59 boolean_t current_thread_aborted(void);
60 void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
61 void ipc_port_release(ipc_port_t);
62 boolean_t is_thread_active(thread_t);
63 kern_return_t get_thread_waitresult(thread_t);
64 vm_size_t get_vmmap_size(vm_map_t);
65 int get_vmmap_entries(vm_map_t);
66 int get_task_numacts(task_t);
67 thread_act_t get_firstthread(task_t task);
68 kern_return_t get_signalact(task_t , thread_act_t *, thread_t *, int);
69 void astbsd_on(void);
70
71 /*
72 *
73 */
74 void *get_bsdtask_info(task_t t)
75 {
76 return(t->bsd_info);
77 }
78
79 /*
80 *
81 */
82 void set_bsdtask_info(task_t t,void * v)
83 {
84 t->bsd_info=v;
85 }
86
87 /*
88 *
89 */
90 void *get_bsdthread_info(thread_act_t th)
91 {
92 return(th->uthread);
93 }
94
95 /*
96 * XXX: wait for BSD to fix signal code
97 * Until then, we cannot block here. We know the task
98 * can't go away, so we make sure it is still active after
99 * retrieving the first thread for extra safety.
100 */
101 thread_act_t get_firstthread(task_t task)
102 {
103 thread_act_t thr_act;
104
105 thr_act = (thread_act_t)queue_first(&task->thr_acts);
106 if (thr_act == (thread_act_t)&task->thr_acts)
107 thr_act = THR_ACT_NULL;
108 if (!task->active)
109 return(THR_ACT_NULL);
110 return(thr_act);
111 }
112
113 kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, int setast)
114 {
115
116 thread_act_t inc;
117 thread_act_t ninc;
118 thread_act_t thr_act;
119 thread_t th;
120
121 task_lock(task);
122 if (!task->active) {
123 task_unlock(task);
124 return(KERN_FAILURE);
125 }
126
127 thr_act = THR_ACT_NULL;
128 for (inc = (thread_act_t)queue_first(&task->thr_acts);
129 inc != (thread_act_t)&task->thr_acts;
130 inc = ninc) {
131 th = act_lock_thread(inc);
132 if ((inc->active) &&
133 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
134 thr_act = inc;
135 break;
136 }
137 act_unlock_thread(inc);
138 ninc = (thread_act_t)queue_next(&inc->thr_acts);
139 }
140 out:
141 if (thact)
142 *thact = thr_act;
143
144 if (thshut)
145 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
146 if (thr_act) {
147 if (setast)
148 act_set_astbsd(thr_act);
149
150 act_unlock_thread(thr_act);
151 }
152 task_unlock(task);
153
154 if (thr_act)
155 return(KERN_SUCCESS);
156 else
157 return(KERN_FAILURE);
158 }
159
160
161 kern_return_t check_actforsig(task_t task, thread_act_t thact, thread_t * thshut, int setast)
162 {
163
164 thread_act_t inc;
165 thread_act_t ninc;
166 thread_act_t thr_act;
167 thread_t th;
168 int found=0;
169
170 task_lock(task);
171 if (!task->active) {
172 task_unlock(task);
173 return(KERN_FAILURE);
174 }
175
176 thr_act = THR_ACT_NULL;
177 for (inc = (thread_act_t)queue_first(&task->thr_acts);
178 inc != (thread_act_t)&task->thr_acts;
179 inc = ninc) {
180
181 if (inc != thact) {
182 ninc = (thread_act_t)queue_next(&inc->thr_acts);
183 continue;
184 }
185 th = act_lock_thread(inc);
186 if ((inc->active) &&
187 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
188 found = 1;
189 thr_act = inc;
190 break;
191 }
192 act_unlock_thread(inc);
193 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
194 break;
195 }
196 out:
197 if (found) {
198 if (thshut)
199 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
200 if (setast)
201 act_set_astbsd(thr_act);
202
203 act_unlock_thread(thr_act);
204 }
205 task_unlock(task);
206
207 if (found)
208 return(KERN_SUCCESS);
209 else
210 return(KERN_FAILURE);
211 }
212
213 /*
214 *
215 */
216 vm_map_t get_task_map(task_t t)
217 {
218 return(t->map);
219 }
220
221 /*
222 *
223 */
224 ipc_space_t get_task_ipcspace(task_t t)
225 {
226 return(t->itk_space);
227 }
228
229 int get_task_numacts(task_t t)
230 {
231 return(t->thr_act_count);
232 }
233
234 /*
235 * Reset the current task's map by taking a reference
236 * on the new map. The old map reference is returned.
237 */
238 vm_map_t
239 swap_task_map(task_t task,vm_map_t map)
240 {
241 vm_map_t old_map;
242
243 vm_map_reference(map);
244 task_lock(task);
245 old_map = task->map;
246 task->map = map;
247 task_unlock(task);
248 return old_map;
249 }
250
251 /*
252 * Reset the current act map.
253 * The caller donates us a reference to the new map
254 * and we donote our reference to the old map to him.
255 */
256 vm_map_t
257 swap_act_map(thread_act_t thr_act,vm_map_t map)
258 {
259 vm_map_t old_map;
260
261 act_lock(thr_act);
262 old_map = thr_act->map;
263 thr_act->map = map;
264 act_unlock(thr_act);
265 return old_map;
266 }
267
268 /*
269 *
270 */
271 pmap_t get_task_pmap(task_t t)
272 {
273 return(t->map->pmap);
274 }
275
276 /*
277 *
278 */
279 pmap_t get_map_pmap(vm_map_t map)
280 {
281 return(map->pmap);
282 }
283 /*
284 *
285 */
286 task_t get_threadtask(thread_act_t th)
287 {
288 return(th->task);
289 }
290
291
292 /*
293 *
294 */
295 boolean_t is_thread_idle(thread_t th)
296 {
297 return((th->state & TH_IDLE) == TH_IDLE);
298 }
299
300 /*
301 *
302 */
303 boolean_t is_thread_running(thread_act_t thact)
304 {
305 thread_t th = thact->thread;
306 return((th->state & TH_RUN) == TH_RUN);
307 }
308
309 /*
310 *
311 */
312 thread_shuttle_t
313 getshuttle_thread(
314 thread_act_t th)
315 {
316 #ifdef DEBUG
317 assert(th->thread);
318 #endif
319 return(th->thread);
320 }
321
322 /*
323 *
324 */
325 thread_act_t
326 getact_thread(
327 thread_shuttle_t th)
328 {
329 #ifdef DEBUG
330 assert(th->top_act);
331 #endif
332 return(th->top_act);
333 }
334
335 /*
336 *
337 */
338 vm_offset_t
339 get_map_min(
340 vm_map_t map)
341 {
342 return(vm_map_min(map));
343 }
344
345 /*
346 *
347 */
348 vm_offset_t
349 get_map_max(
350 vm_map_t map)
351 {
352 return(vm_map_max(map));
353 }
354 vm_size_t
355 get_vmmap_size(
356 vm_map_t map)
357 {
358 return(map->size);
359 }
360
361 int
362 get_vmsubmap_entries(
363 vm_map_t map,
364 vm_object_offset_t start,
365 vm_object_offset_t end)
366 {
367 int total_entries = 0;
368 vm_map_entry_t entry;
369
370 vm_map_lock(map);
371 entry = vm_map_first_entry(map);
372 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
373 entry = entry->vme_next;
374 }
375
376 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
377 if(entry->is_sub_map) {
378 total_entries +=
379 get_vmsubmap_entries(entry->object.sub_map,
380 entry->offset,
381 entry->offset +
382 (entry->vme_end - entry->vme_start));
383 } else {
384 total_entries += 1;
385 }
386 entry = entry->vme_next;
387 }
388 vm_map_unlock(map);
389 return(total_entries);
390 }
391
392 int
393 get_vmmap_entries(
394 vm_map_t map)
395 {
396 int total_entries = 0;
397 vm_map_entry_t entry;
398
399 vm_map_lock(map);
400 entry = vm_map_first_entry(map);
401
402 while(entry != vm_map_to_entry(map)) {
403 if(entry->is_sub_map) {
404 total_entries +=
405 get_vmsubmap_entries(entry->object.sub_map,
406 entry->offset,
407 entry->offset +
408 (entry->vme_end - entry->vme_start));
409 } else {
410 total_entries += 1;
411 }
412 entry = entry->vme_next;
413 }
414 vm_map_unlock(map);
415 return(total_entries);
416 }
417
418 /*
419 *
420 */
421 /*
422 *
423 */
424 int
425 get_task_userstop(
426 task_t task)
427 {
428 return(task->user_stop_count);
429 }
430
431 /*
432 *
433 */
434 int
435 get_thread_userstop(
436 thread_act_t th)
437 {
438 return(th->user_stop_count);
439 }
440
441 /*
442 *
443 */
444 boolean_t
445 thread_should_abort(
446 thread_shuttle_t th)
447 {
448 return(!th->top_act || !th->top_act->active ||
449 (th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
450 }
451
452 /*
453 * This routine is like thread_should_abort() above. It checks to
454 * see if the current thread is aborted. But unlike above, it also
455 * checks to see if thread is safely aborted. If so, it returns
456 * that fact, and clears the condition (safe aborts only should
457 * have a single effect, and a poll of the abort status
458 * qualifies.
459 */
460 boolean_t
461 current_thread_aborted (
462 void)
463 {
464 thread_t th = current_thread();
465 spl_t s;
466
467 if (!th->top_act ||
468 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
469 th->interrupt_level != THREAD_UNINT))
470 return (TRUE);
471 if (th->state & TH_ABORT_SAFELY) {
472 s = splsched();
473 thread_lock(th);
474 if (th->state & TH_ABORT_SAFELY)
475 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
476 thread_unlock(th);
477 splx(s);
478 }
479 return FALSE;
480 }
481
482 /*
483 *
484 */
485 void
486 task_act_iterate_wth_args(
487 task_t task,
488 void (*func_callback)(thread_act_t, void *),
489 void *func_arg)
490 {
491 thread_act_t inc, ninc;
492
493 task_lock(task);
494 for (inc = (thread_act_t)queue_first(&task->thr_acts);
495 inc != (thread_act_t)&task->thr_acts;
496 inc = ninc) {
497 ninc = (thread_act_t)queue_next(&inc->thr_acts);
498 (void) (*func_callback)(inc, func_arg);
499 }
500 task_unlock(task);
501 }
502
503 void
504 ipc_port_release(
505 ipc_port_t port)
506 {
507 ipc_object_release(&(port)->ip_object);
508 }
509
510 boolean_t
511 is_thread_active(
512 thread_shuttle_t th)
513 {
514 return(th->active);
515 }
516
517 kern_return_t
518 get_thread_waitresult(
519 thread_shuttle_t th)
520 {
521 return(th->wait_result);
522 }
523
524 void
525 astbsd_on(void)
526 {
527 boolean_t reenable;
528
529 reenable = ml_set_interrupts_enabled(FALSE);
530 ast_on_fast(AST_BSD);
531 (void)ml_set_interrupts_enabled(reenable);
532 }