]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
5648cb3878a6d533dd83e5003b3b2bbe65b16bc3
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <mach/mach_types.h>
23 #include <kern/queue.h>
24 #include <kern/ast.h>
25 #include <kern/thread.h>
26 #include <kern/thread_act.h>
27 #include <kern/task.h>
28 #include <kern/spl.h>
29 #include <kern/lock.h>
30 #include <vm/vm_map.h>
31 #include <vm/pmap.h>
32 #include <ipc/ipc_port.h>
33 #include <ipc/ipc_object.h>
34
35 #undef thread_should_halt
36 #undef ipc_port_release
37 #undef thread_ast_set
38 #undef thread_ast_clear
39
40 decl_simple_lock_data(extern,reaper_lock)
41 extern queue_head_t reaper_queue;
42
43 /* BSD KERN COMPONENT INTERFACE */
44
45 vm_address_t bsd_init_task = 0;
46 char init_task_failure_data[1024];
47
48 thread_act_t get_firstthread(task_t);
49 vm_map_t get_task_map(task_t);
50 ipc_space_t get_task_ipcspace(task_t);
51 boolean_t is_kerneltask(task_t);
52 boolean_t is_thread_idle(thread_t);
53 boolean_t is_thread_running(thread_t);
54 thread_shuttle_t getshuttle_thread( thread_act_t);
55 thread_act_t getact_thread( thread_shuttle_t);
56 vm_offset_t get_map_min( vm_map_t);
57 vm_offset_t get_map_max( vm_map_t);
58 int get_task_userstop(task_t);
59 int get_thread_userstop(thread_act_t);
60 int inc_task_userstop(task_t);
61 boolean_t thread_should_abort(thread_shuttle_t);
62 boolean_t current_thread_aborted(void);
63 void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
64 void ipc_port_release(ipc_port_t);
65 void thread_ast_set(thread_act_t, ast_t);
66 void thread_ast_clear(thread_act_t, ast_t);
67 boolean_t is_thread_active(thread_t);
68 event_t get_thread_waitevent(thread_t);
69 kern_return_t get_thread_waitresult(thread_t);
70 vm_size_t get_vmmap_size(vm_map_t);
71 int get_vmmap_entries(vm_map_t);
72 int get_task_numacts(task_t);
73 thread_act_t get_firstthread(task_t task);
74 kern_return_t get_signalact(task_t , thread_act_t *, thread_t *, int);
75
76 /*
77 *
78 */
79 void *get_bsdtask_info(task_t t)
80 {
81 return(t->bsd_info);
82 }
83
84 /*
85 *
86 */
87 void set_bsdtask_info(task_t t,void * v)
88 {
89 t->bsd_info=v;
90 }
91
92 /*
93 *
94 */
95 void *get_bsdthread_info(thread_act_t th)
96 {
97 return(th->uthread);
98 }
99
100 /*
101 * XXX: wait for BSD to fix signal code
102 * Until then, we cannot block here. We know the task
103 * can't go away, so we make sure it is still active after
104 * retrieving the first thread for extra safety.
105 */
106 thread_act_t get_firstthread(task_t task)
107 {
108 thread_act_t thr_act;
109
110 thr_act = (thread_act_t)queue_first(&task->thr_acts);
111 if (thr_act == (thread_act_t)&task->thr_acts)
112 thr_act = THR_ACT_NULL;
113 if (!task->active)
114 return(THR_ACT_NULL);
115 return(thr_act);
116 }
117
118 kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, int setast)
119 {
120
121 thread_act_t inc;
122 thread_act_t ninc;
123 thread_act_t thr_act;
124 thread_t th;
125
126 task_lock(task);
127 if (!task->active) {
128 task_unlock(task);
129 return(KERN_FAILURE);
130 }
131
132 thr_act = THR_ACT_NULL;
133 for (inc = (thread_act_t)queue_first(&task->thr_acts);
134 inc != (thread_act_t)&task->thr_acts;
135 inc = ninc) {
136 th = act_lock_thread(inc);
137 if ((inc->active) && ((th->state & TH_ABORT) != TH_ABORT)) {
138 thr_act = inc;
139 break;
140 }
141 act_unlock_thread(inc);
142 ninc = (thread_act_t)queue_next(&inc->thr_acts);
143 }
144 out:
145 if (thact)
146 *thact = thr_act;
147
148 if (thshut)
149 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
150 if (thr_act) {
151 if (setast) {
152 thread_ast_set(thr_act, AST_BSD);
153 if (current_act() == thr_act)
154 ast_on(AST_BSD);
155 }
156 act_unlock_thread(thr_act);
157 }
158 task_unlock(task);
159
160 if (thr_act)
161 return(KERN_SUCCESS);
162 else
163 return(KERN_FAILURE);
164 }
165
166
167 kern_return_t check_actforsig(task_t task, thread_act_t * thact, thread_t * thshut, int setast)
168 {
169
170 thread_act_t inc;
171 thread_act_t ninc;
172 thread_act_t thr_act;
173 thread_t th;
174 int found=0;
175
176 task_lock(task);
177 if (!task->active) {
178 task_unlock(task);
179 return(KERN_FAILURE);
180 }
181
182 thr_act = THR_ACT_NULL;
183 for (inc = (thread_act_t)queue_first(&task->thr_acts);
184 inc != (thread_act_t)&task->thr_acts;
185 inc = ninc) {
186
187 if (inc != thact) {
188 ninc = (thread_act_t)queue_next(&inc->thr_acts);
189 continue;
190 }
191 th = act_lock_thread(inc);
192 if ((inc->active) && ((th->state & TH_ABORT) != TH_ABORT)) {
193 found = 1;
194 thr_act = inc;
195 break;
196 }
197 act_unlock_thread(inc);
198 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
199 break;
200 }
201 out:
202 if (found) {
203 if (thshut)
204 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
205 if (setast) {
206 thread_ast_set(thr_act, AST_BSD);
207 if (current_act() == thr_act)
208 ast_on(AST_BSD);
209 }
210 act_unlock_thread(thr_act);
211 }
212 task_unlock(task);
213
214 if (found)
215 return(KERN_SUCCESS);
216 else
217 return(KERN_FAILURE);
218 }
219
220 /*
221 *
222 */
223 vm_map_t get_task_map(task_t t)
224 {
225 return(t->map);
226 }
227
228 /*
229 *
230 */
231 ipc_space_t get_task_ipcspace(task_t t)
232 {
233 return(t->itk_space);
234 }
235
236 int get_task_numacts(task_t t)
237 {
238 return(t->thr_act_count);
239 }
240
241 /*
242 * Reset the current task's map by taking a reference
243 * on the new map. The old map reference is returned.
244 */
245 vm_map_t
246 swap_task_map(task_t task,vm_map_t map)
247 {
248 vm_map_t old_map;
249
250 vm_map_reference(map);
251 task_lock(task);
252 old_map = task->map;
253 task->map = map;
254 task_unlock(task);
255 return old_map;
256 }
257
258 /*
259 * Reset the current act map.
260 * The caller donates us a reference to the new map
261 * and we donote our reference to the old map to him.
262 */
263 vm_map_t
264 swap_act_map(thread_act_t thr_act,vm_map_t map)
265 {
266 vm_map_t old_map;
267
268 act_lock(thr_act);
269 old_map = thr_act->map;
270 thr_act->map = map;
271 act_unlock(thr_act);
272 return old_map;
273 }
274
275 /*
276 *
277 */
278 pmap_t get_task_pmap(task_t t)
279 {
280 return(t->map->pmap);
281 }
282
283 /*
284 *
285 */
286 pmap_t get_map_pmap(vm_map_t map)
287 {
288 return(map->pmap);
289 }
290 /*
291 *
292 */
293 task_t get_threadtask(thread_act_t th)
294 {
295 return(th->task);
296 }
297
298
299 /*
300 *
301 */
302 boolean_t is_thread_idle(thread_t th)
303 {
304 return((th->state & TH_IDLE) == TH_IDLE);
305 }
306
307 /*
308 *
309 */
310 boolean_t is_thread_running(thread_t th)
311 {
312 return((th->state & TH_RUN) == TH_RUN);
313 }
314
315 /*
316 *
317 */
318 thread_shuttle_t
319 getshuttle_thread(
320 thread_act_t th)
321 {
322 #ifdef DEBUG
323 assert(th->thread);
324 #endif
325 return(th->thread);
326 }
327
328 /*
329 *
330 */
331 thread_act_t
332 getact_thread(
333 thread_shuttle_t th)
334 {
335 #ifdef DEBUG
336 assert(th->top_act);
337 #endif
338 return(th->top_act);
339 }
340
341 /*
342 *
343 */
344 vm_offset_t
345 get_map_min(
346 vm_map_t map)
347 {
348 return(vm_map_min(map));
349 }
350
351 /*
352 *
353 */
354 vm_offset_t
355 get_map_max(
356 vm_map_t map)
357 {
358 return(vm_map_max(map));
359 }
360 vm_size_t
361 get_vmmap_size(
362 vm_map_t map)
363 {
364 return(map->size);
365 }
366
367 int
368 get_vmsubmap_entries(
369 vm_map_t map,
370 vm_object_offset_t start,
371 vm_object_offset_t end)
372 {
373 int total_entries = 0;
374 vm_map_entry_t entry;
375
376 vm_map_lock(map);
377 entry = vm_map_first_entry(map);
378 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
379 entry = entry->vme_next;
380 }
381
382 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
383 if(entry->is_sub_map) {
384 total_entries +=
385 get_vmsubmap_entries(entry->object.sub_map,
386 entry->offset,
387 entry->offset +
388 (entry->vme_end - entry->vme_start));
389 } else {
390 total_entries += 1;
391 }
392 entry = entry->vme_next;
393 }
394 vm_map_unlock(map);
395 return(total_entries);
396 }
397
398 int
399 get_vmmap_entries(
400 vm_map_t map)
401 {
402 int total_entries = 0;
403 vm_map_entry_t entry;
404
405 vm_map_lock(map);
406 entry = vm_map_first_entry(map);
407
408 while(entry != vm_map_to_entry(map)) {
409 if(entry->is_sub_map) {
410 total_entries +=
411 get_vmsubmap_entries(entry->object.sub_map,
412 entry->offset,
413 entry->offset +
414 (entry->vme_end - entry->vme_start));
415 } else {
416 total_entries += 1;
417 }
418 entry = entry->vme_next;
419 }
420 vm_map_unlock(map);
421 return(total_entries);
422 }
423
424 /*
425 *
426 */
427 /*
428 *
429 */
430 int
431 get_task_userstop(
432 task_t task)
433 {
434 return(task->user_stop_count);
435 }
436
437 /*
438 *
439 */
440 int
441 get_thread_userstop(
442 thread_act_t th)
443 {
444 return(th->user_stop_count);
445 }
446
447 /*
448 *
449 */
450 int
451 inc_task_userstop(
452 task_t task)
453 {
454 int i=0;
455 i = task->user_stop_count;
456 task->user_stop_count++;
457 return(i);
458 }
459
460
461 /*
462 *
463 */
464 boolean_t
465 thread_should_abort(
466 thread_shuttle_t th)
467 {
468 return( (!th->top_act || !th->top_act->active ||
469 th->state & TH_ABORT));
470 }
471
472 /*
473 *
474 */
475 boolean_t
476 current_thread_aborted (
477 void)
478 {
479 thread_t th = current_thread();
480
481 return(!th->top_act ||
482 ((th->state & TH_ABORT) && (th->interruptible)));
483 }
484
485 /*
486 *
487 */
488 void
489 task_act_iterate_wth_args(
490 task_t task,
491 void (*func_callback)(thread_act_t, void *),
492 void *func_arg)
493 {
494 thread_act_t inc, ninc;
495
496 task_lock(task);
497 for (inc = (thread_act_t)queue_first(&task->thr_acts);
498 inc != (thread_act_t)&task->thr_acts;
499 inc = ninc) {
500 ninc = (thread_act_t)queue_next(&inc->thr_acts);
501 (void) (*func_callback)(inc, func_arg);
502 }
503 task_unlock(task);
504 }
505
506 void
507 ipc_port_release(
508 ipc_port_t port)
509 {
510 ipc_object_release(&(port)->ip_object);
511 }
512
513 void
514 thread_ast_set(
515 thread_act_t act,
516 ast_t reason)
517 {
518 act->ast |= reason;
519 }
520 void
521 thread_ast_clear(
522 thread_act_t act,
523 ast_t reason)
524 {
525 act->ast &= ~(reason);
526 }
527
528 boolean_t
529 is_thread_active(
530 thread_shuttle_t th)
531 {
532 return(th->active);
533 }
534
535 event_t
536 get_thread_waitevent(
537 thread_shuttle_t th)
538 {
539 return(th->wait_event);
540 }
541
542 kern_return_t
543 get_thread_waitresult(
544 thread_shuttle_t th)
545 {
546 return(th->wait_result);
547 }
548
549