]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/bsd_kern.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <mach/mach_types.h>
23#include <kern/queue.h>
24#include <kern/ast.h>
25#include <kern/thread.h>
26#include <kern/thread_act.h>
27#include <kern/task.h>
28#include <kern/spl.h>
29#include <kern/lock.h>
30#include <vm/vm_map.h>
31#include <vm/pmap.h>
32#include <ipc/ipc_port.h>
33#include <ipc/ipc_object.h>
34
35#undef thread_should_halt
36#undef ipc_port_release
1c79356b
A
37
38decl_simple_lock_data(extern,reaper_lock)
39extern queue_head_t reaper_queue;
40
41/* BSD KERN COMPONENT INTERFACE */
42
9bccf70c 43task_t bsd_init_task = TASK_NULL;
1c79356b
A
44char init_task_failure_data[1024];
45
46thread_act_t get_firstthread(task_t);
47vm_map_t get_task_map(task_t);
48ipc_space_t get_task_ipcspace(task_t);
49boolean_t is_kerneltask(task_t);
50boolean_t is_thread_idle(thread_t);
9bccf70c 51boolean_t is_thread_running(thread_act_t);
1c79356b
A
52thread_shuttle_t getshuttle_thread( thread_act_t);
53thread_act_t getact_thread( thread_shuttle_t);
54vm_offset_t get_map_min( vm_map_t);
55vm_offset_t get_map_max( vm_map_t);
56int get_task_userstop(task_t);
57int get_thread_userstop(thread_act_t);
1c79356b
A
58boolean_t thread_should_abort(thread_shuttle_t);
59boolean_t current_thread_aborted(void);
60void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
61void ipc_port_release(ipc_port_t);
1c79356b 62boolean_t is_thread_active(thread_t);
1c79356b
A
63kern_return_t get_thread_waitresult(thread_t);
64vm_size_t get_vmmap_size(vm_map_t);
65int get_vmmap_entries(vm_map_t);
66int get_task_numacts(task_t);
67thread_act_t get_firstthread(task_t task);
68kern_return_t get_signalact(task_t , thread_act_t *, thread_t *, int);
9bccf70c 69void astbsd_on(void);
1c79356b 70
1c79356b
A
71/*
72 *
73 */
74void *get_bsdtask_info(task_t t)
75{
76 return(t->bsd_info);
77}
78
79/*
80 *
81 */
82void set_bsdtask_info(task_t t,void * v)
83{
84 t->bsd_info=v;
85}
86
87/*
88 *
89 */
90void *get_bsdthread_info(thread_act_t th)
91{
92 return(th->uthread);
93}
94
95/*
96 * XXX: wait for BSD to fix signal code
97 * Until then, we cannot block here. We know the task
98 * can't go away, so we make sure it is still active after
99 * retrieving the first thread for extra safety.
100 */
101thread_act_t get_firstthread(task_t task)
102{
103 thread_act_t thr_act;
104
105 thr_act = (thread_act_t)queue_first(&task->thr_acts);
106 if (thr_act == (thread_act_t)&task->thr_acts)
107 thr_act = THR_ACT_NULL;
108 if (!task->active)
109 return(THR_ACT_NULL);
110 return(thr_act);
111}
112
113kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, int setast)
114{
115
116 thread_act_t inc;
117 thread_act_t ninc;
118 thread_act_t thr_act;
119 thread_t th;
120
121 task_lock(task);
122 if (!task->active) {
123 task_unlock(task);
124 return(KERN_FAILURE);
125 }
126
127 thr_act = THR_ACT_NULL;
128 for (inc = (thread_act_t)queue_first(&task->thr_acts);
129 inc != (thread_act_t)&task->thr_acts;
130 inc = ninc) {
131 th = act_lock_thread(inc);
9bccf70c
A
132 if ((inc->active) &&
133 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
1c79356b
A
134 thr_act = inc;
135 break;
136 }
137 act_unlock_thread(inc);
138 ninc = (thread_act_t)queue_next(&inc->thr_acts);
139 }
140out:
141 if (thact)
142 *thact = thr_act;
143
144 if (thshut)
145 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
146 if (thr_act) {
9bccf70c
A
147 if (setast)
148 act_set_astbsd(thr_act);
149
1c79356b
A
150 act_unlock_thread(thr_act);
151 }
152 task_unlock(task);
153
154 if (thr_act)
155 return(KERN_SUCCESS);
156 else
157 return(KERN_FAILURE);
158}
159
0b4e3aa0 160
9bccf70c 161kern_return_t check_actforsig(task_t task, thread_act_t thact, thread_t * thshut, int setast)
0b4e3aa0
A
162{
163
164 thread_act_t inc;
165 thread_act_t ninc;
166 thread_act_t thr_act;
167 thread_t th;
168 int found=0;
169
170 task_lock(task);
171 if (!task->active) {
172 task_unlock(task);
173 return(KERN_FAILURE);
174 }
175
176 thr_act = THR_ACT_NULL;
177 for (inc = (thread_act_t)queue_first(&task->thr_acts);
178 inc != (thread_act_t)&task->thr_acts;
179 inc = ninc) {
180
181 if (inc != thact) {
182 ninc = (thread_act_t)queue_next(&inc->thr_acts);
183 continue;
184 }
185 th = act_lock_thread(inc);
9bccf70c
A
186 if ((inc->active) &&
187 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
0b4e3aa0
A
188 found = 1;
189 thr_act = inc;
190 break;
191 }
192 act_unlock_thread(inc);
193 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
194 break;
195 }
196out:
197 if (found) {
198 if (thshut)
199 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
9bccf70c
A
200 if (setast)
201 act_set_astbsd(thr_act);
202
0b4e3aa0
A
203 act_unlock_thread(thr_act);
204 }
205 task_unlock(task);
206
207 if (found)
208 return(KERN_SUCCESS);
209 else
210 return(KERN_FAILURE);
211}
212
1c79356b
A
213/*
214 *
215 */
216vm_map_t get_task_map(task_t t)
217{
218 return(t->map);
219}
220
221/*
222 *
223 */
224ipc_space_t get_task_ipcspace(task_t t)
225{
226 return(t->itk_space);
227}
228
229int get_task_numacts(task_t t)
230{
231 return(t->thr_act_count);
232}
233
234/*
235 * Reset the current task's map by taking a reference
236 * on the new map. The old map reference is returned.
237 */
238vm_map_t
239swap_task_map(task_t task,vm_map_t map)
240{
241 vm_map_t old_map;
242
243 vm_map_reference(map);
244 task_lock(task);
245 old_map = task->map;
246 task->map = map;
247 task_unlock(task);
248 return old_map;
249}
250
251/*
252 * Reset the current act map.
253 * The caller donates us a reference to the new map
254 * and we donote our reference to the old map to him.
255 */
256vm_map_t
257swap_act_map(thread_act_t thr_act,vm_map_t map)
258{
259 vm_map_t old_map;
260
261 act_lock(thr_act);
262 old_map = thr_act->map;
263 thr_act->map = map;
264 act_unlock(thr_act);
265 return old_map;
266}
267
268/*
269 *
270 */
271pmap_t get_task_pmap(task_t t)
272{
273 return(t->map->pmap);
274}
275
276/*
277 *
278 */
279pmap_t get_map_pmap(vm_map_t map)
280{
281 return(map->pmap);
282}
283/*
284 *
285 */
286task_t get_threadtask(thread_act_t th)
287{
288 return(th->task);
289}
290
291
292/*
293 *
294 */
295boolean_t is_thread_idle(thread_t th)
296{
297 return((th->state & TH_IDLE) == TH_IDLE);
298}
299
300/*
301 *
302 */
9bccf70c 303boolean_t is_thread_running(thread_act_t thact)
1c79356b 304{
9bccf70c 305 thread_t th = thact->thread;
1c79356b
A
306 return((th->state & TH_RUN) == TH_RUN);
307}
308
309/*
310 *
311 */
312thread_shuttle_t
313getshuttle_thread(
314 thread_act_t th)
315{
316#ifdef DEBUG
317 assert(th->thread);
318#endif
319 return(th->thread);
320}
321
322/*
323 *
324 */
325thread_act_t
326getact_thread(
327 thread_shuttle_t th)
328{
329#ifdef DEBUG
330 assert(th->top_act);
331#endif
332 return(th->top_act);
333}
334
335/*
336 *
337 */
338vm_offset_t
339get_map_min(
340 vm_map_t map)
341{
342 return(vm_map_min(map));
343}
344
345/*
346 *
347 */
348vm_offset_t
349get_map_max(
350 vm_map_t map)
351{
352 return(vm_map_max(map));
353}
354vm_size_t
355get_vmmap_size(
356 vm_map_t map)
357{
358 return(map->size);
359}
360
361int
362get_vmsubmap_entries(
363 vm_map_t map,
364 vm_object_offset_t start,
365 vm_object_offset_t end)
366{
367 int total_entries = 0;
368 vm_map_entry_t entry;
369
370 vm_map_lock(map);
371 entry = vm_map_first_entry(map);
372 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
373 entry = entry->vme_next;
374 }
375
376 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
377 if(entry->is_sub_map) {
378 total_entries +=
379 get_vmsubmap_entries(entry->object.sub_map,
380 entry->offset,
381 entry->offset +
382 (entry->vme_end - entry->vme_start));
383 } else {
384 total_entries += 1;
385 }
386 entry = entry->vme_next;
387 }
388 vm_map_unlock(map);
389 return(total_entries);
390}
391
392int
393get_vmmap_entries(
394 vm_map_t map)
395{
396 int total_entries = 0;
397 vm_map_entry_t entry;
398
399 vm_map_lock(map);
400 entry = vm_map_first_entry(map);
401
402 while(entry != vm_map_to_entry(map)) {
403 if(entry->is_sub_map) {
404 total_entries +=
405 get_vmsubmap_entries(entry->object.sub_map,
406 entry->offset,
407 entry->offset +
408 (entry->vme_end - entry->vme_start));
409 } else {
410 total_entries += 1;
411 }
412 entry = entry->vme_next;
413 }
414 vm_map_unlock(map);
415 return(total_entries);
416}
417
418/*
419 *
420 */
421/*
422 *
423 */
424int
425get_task_userstop(
426 task_t task)
427{
428 return(task->user_stop_count);
429}
430
431/*
432 *
433 */
434int
435get_thread_userstop(
436 thread_act_t th)
437{
438 return(th->user_stop_count);
439}
440
1c79356b
A
441/*
442 *
443 */
444boolean_t
445thread_should_abort(
446 thread_shuttle_t th)
447{
9bccf70c
A
448 return(!th->top_act || !th->top_act->active ||
449 (th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
1c79356b
A
450}
451
452/*
9bccf70c
A
453 * This routine is like thread_should_abort() above. It checks to
454 * see if the current thread is aborted. But unlike above, it also
455 * checks to see if thread is safely aborted. If so, it returns
456 * that fact, and clears the condition (safe aborts only should
457 * have a single effect, and a poll of the abort status
458 * qualifies.
1c79356b
A
459 */
460boolean_t
461current_thread_aborted (
462 void)
463{
464 thread_t th = current_thread();
9bccf70c
A
465 spl_t s;
466
467 if (!th->top_act ||
468 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
469 th->interrupt_level != THREAD_UNINT))
470 return (TRUE);
471 if (th->state & TH_ABORT_SAFELY) {
472 s = splsched();
473 thread_lock(th);
474 if (th->state & TH_ABORT_SAFELY)
475 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
476 thread_unlock(th);
477 splx(s);
478 }
479 return FALSE;
1c79356b
A
480}
481
482/*
483 *
484 */
485void
486task_act_iterate_wth_args(
487 task_t task,
488 void (*func_callback)(thread_act_t, void *),
489 void *func_arg)
490{
491 thread_act_t inc, ninc;
492
493 task_lock(task);
494 for (inc = (thread_act_t)queue_first(&task->thr_acts);
495 inc != (thread_act_t)&task->thr_acts;
496 inc = ninc) {
497 ninc = (thread_act_t)queue_next(&inc->thr_acts);
498 (void) (*func_callback)(inc, func_arg);
499 }
500 task_unlock(task);
501}
502
503void
504ipc_port_release(
505 ipc_port_t port)
506{
507 ipc_object_release(&(port)->ip_object);
508}
509
1c79356b
A
510boolean_t
511is_thread_active(
512 thread_shuttle_t th)
513{
514 return(th->active);
515}
516
1c79356b
A
517kern_return_t
518get_thread_waitresult(
519 thread_shuttle_t th)
520{
521 return(th->wait_result);
522}
523
9bccf70c
A
524void
525astbsd_on(void)
526{
527 boolean_t reenable;
1c79356b 528
9bccf70c
A
529 reenable = ml_set_interrupts_enabled(FALSE);
530 ast_on_fast(AST_BSD);
531 (void)ml_set_interrupts_enabled(reenable);
532}