]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
24017dd53bf99d12b3da04977fa5342faca7b518
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <mach/mach_types.h>
23 #include <kern/queue.h>
24 #include <kern/ast.h>
25 #include <kern/thread.h>
26 #include <kern/thread_act.h>
27 #include <kern/task.h>
28 #include <kern/spl.h>
29 #include <kern/lock.h>
30 #include <vm/vm_map.h>
31 #include <vm/pmap.h>
32 #include <ipc/ipc_port.h>
33 #include <ipc/ipc_object.h>
34
35 #undef thread_should_halt
36 #undef ipc_port_release
37 #undef thread_ast_set
38 #undef thread_ast_clear
39
40 decl_simple_lock_data(extern,reaper_lock)
41 extern queue_head_t reaper_queue;
42
43 /* BSD KERN COMPONENT INTERFACE */
44
45 vm_address_t bsd_init_task = 0;
46 char init_task_failure_data[1024];
47
48 thread_act_t get_firstthread(task_t);
49 vm_map_t get_task_map(task_t);
50 ipc_space_t get_task_ipcspace(task_t);
51 boolean_t is_kerneltask(task_t);
52 boolean_t is_thread_idle(thread_t);
53 boolean_t is_thread_running(thread_t);
54 thread_shuttle_t getshuttle_thread( thread_act_t);
55 thread_act_t getact_thread( thread_shuttle_t);
56 vm_offset_t get_map_min( vm_map_t);
57 vm_offset_t get_map_max( vm_map_t);
58 int get_task_userstop(task_t);
59 int get_thread_userstop(thread_act_t);
60 int inc_task_userstop(task_t);
61 boolean_t thread_should_abort(thread_shuttle_t);
62 boolean_t current_thread_aborted(void);
63 void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
64 void ipc_port_release(ipc_port_t);
65 void thread_ast_set(thread_act_t, ast_t);
66 void thread_ast_clear(thread_act_t, ast_t);
67 boolean_t is_thread_active(thread_t);
68 event_t get_thread_waitevent(thread_t);
69 kern_return_t get_thread_waitresult(thread_t);
70 vm_size_t get_vmmap_size(vm_map_t);
71 int get_vmmap_entries(vm_map_t);
72 int get_task_numacts(task_t);
73 thread_act_t get_firstthread(task_t task);
74 kern_return_t get_signalact(task_t , thread_act_t *, thread_t *, int);
75
76 kern_return_t bsd_refvm_object(vm_object_t object);
77
78
79 /*
80 *
81 */
82 void *get_bsdtask_info(task_t t)
83 {
84 return(t->bsd_info);
85 }
86
87 /*
88 *
89 */
90 void set_bsdtask_info(task_t t,void * v)
91 {
92 t->bsd_info=v;
93 }
94
95 /*
96 *
97 */
98 void *get_bsdthread_info(thread_act_t th)
99 {
100 return(th->uthread);
101 }
102
103 /*
104 * XXX: wait for BSD to fix signal code
105 * Until then, we cannot block here. We know the task
106 * can't go away, so we make sure it is still active after
107 * retrieving the first thread for extra safety.
108 */
109 thread_act_t get_firstthread(task_t task)
110 {
111 thread_act_t thr_act;
112
113 thr_act = (thread_act_t)queue_first(&task->thr_acts);
114 if (thr_act == (thread_act_t)&task->thr_acts)
115 thr_act = THR_ACT_NULL;
116 if (!task->active)
117 return(THR_ACT_NULL);
118 return(thr_act);
119 }
120
121 kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, int setast)
122 {
123
124 thread_act_t inc;
125 thread_act_t ninc;
126 thread_act_t thr_act;
127 thread_t th;
128
129 task_lock(task);
130 if (!task->active) {
131 task_unlock(task);
132 return(KERN_FAILURE);
133 }
134
135 thr_act = THR_ACT_NULL;
136 for (inc = (thread_act_t)queue_first(&task->thr_acts);
137 inc != (thread_act_t)&task->thr_acts;
138 inc = ninc) {
139 th = act_lock_thread(inc);
140 if ((inc->active) && ((th->state & TH_ABORT) != TH_ABORT)) {
141 thr_act = inc;
142 break;
143 }
144 act_unlock_thread(inc);
145 ninc = (thread_act_t)queue_next(&inc->thr_acts);
146 }
147 out:
148 if (thact)
149 *thact = thr_act;
150
151 if (thshut)
152 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
153 if (thr_act) {
154 if (setast) {
155 thread_ast_set(thr_act, AST_BSD);
156 if (current_act() == thr_act)
157 ast_on(AST_BSD);
158 }
159 act_unlock_thread(thr_act);
160 }
161 task_unlock(task);
162
163 if (thr_act)
164 return(KERN_SUCCESS);
165 else
166 return(KERN_FAILURE);
167 }
168
169 /*
170 *
171 */
172 vm_map_t get_task_map(task_t t)
173 {
174 return(t->map);
175 }
176
177 /*
178 *
179 */
180 ipc_space_t get_task_ipcspace(task_t t)
181 {
182 return(t->itk_space);
183 }
184
185 int get_task_numacts(task_t t)
186 {
187 return(t->thr_act_count);
188 }
189
190 /*
191 * Reset the current task's map by taking a reference
192 * on the new map. The old map reference is returned.
193 */
194 vm_map_t
195 swap_task_map(task_t task,vm_map_t map)
196 {
197 vm_map_t old_map;
198
199 vm_map_reference(map);
200 task_lock(task);
201 old_map = task->map;
202 task->map = map;
203 task_unlock(task);
204 return old_map;
205 }
206
207 /*
208 * Reset the current act map.
209 * The caller donates us a reference to the new map
210 * and we donote our reference to the old map to him.
211 */
212 vm_map_t
213 swap_act_map(thread_act_t thr_act,vm_map_t map)
214 {
215 vm_map_t old_map;
216
217 act_lock(thr_act);
218 old_map = thr_act->map;
219 thr_act->map = map;
220 act_unlock(thr_act);
221 return old_map;
222 }
223
224 /*
225 *
226 */
227 pmap_t get_task_pmap(task_t t)
228 {
229 return(t->map->pmap);
230 }
231
232 /*
233 *
234 */
235 pmap_t get_map_pmap(vm_map_t map)
236 {
237 return(map->pmap);
238 }
239 /*
240 *
241 */
242 task_t get_threadtask(thread_act_t th)
243 {
244 return(th->task);
245 }
246
247
248 /*
249 *
250 */
251 boolean_t is_thread_idle(thread_t th)
252 {
253 return((th->state & TH_IDLE) == TH_IDLE);
254 }
255
256 /*
257 *
258 */
259 boolean_t is_thread_running(thread_t th)
260 {
261 return((th->state & TH_RUN) == TH_RUN);
262 }
263
264 /*
265 *
266 */
267 thread_shuttle_t
268 getshuttle_thread(
269 thread_act_t th)
270 {
271 #ifdef DEBUG
272 assert(th->thread);
273 #endif
274 return(th->thread);
275 }
276
277 /*
278 *
279 */
280 thread_act_t
281 getact_thread(
282 thread_shuttle_t th)
283 {
284 #ifdef DEBUG
285 assert(th->top_act);
286 #endif
287 return(th->top_act);
288 }
289
290 /*
291 *
292 */
293 vm_offset_t
294 get_map_min(
295 vm_map_t map)
296 {
297 return(vm_map_min(map));
298 }
299
300 /*
301 *
302 */
303 vm_offset_t
304 get_map_max(
305 vm_map_t map)
306 {
307 return(vm_map_max(map));
308 }
309 vm_size_t
310 get_vmmap_size(
311 vm_map_t map)
312 {
313 return(map->size);
314 }
315
316 int
317 get_vmsubmap_entries(
318 vm_map_t map,
319 vm_object_offset_t start,
320 vm_object_offset_t end)
321 {
322 int total_entries = 0;
323 vm_map_entry_t entry;
324
325 vm_map_lock(map);
326 entry = vm_map_first_entry(map);
327 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
328 entry = entry->vme_next;
329 }
330
331 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
332 if(entry->is_sub_map) {
333 total_entries +=
334 get_vmsubmap_entries(entry->object.sub_map,
335 entry->offset,
336 entry->offset +
337 (entry->vme_end - entry->vme_start));
338 } else {
339 total_entries += 1;
340 }
341 entry = entry->vme_next;
342 }
343 vm_map_unlock(map);
344 return(total_entries);
345 }
346
347 int
348 get_vmmap_entries(
349 vm_map_t map)
350 {
351 int total_entries = 0;
352 vm_map_entry_t entry;
353
354 vm_map_lock(map);
355 entry = vm_map_first_entry(map);
356
357 while(entry != vm_map_to_entry(map)) {
358 if(entry->is_sub_map) {
359 total_entries +=
360 get_vmsubmap_entries(entry->object.sub_map,
361 entry->offset,
362 entry->offset +
363 (entry->vme_end - entry->vme_start));
364 } else {
365 total_entries += 1;
366 }
367 entry = entry->vme_next;
368 }
369 vm_map_unlock(map);
370 return(total_entries);
371 }
372
373 /*
374 *
375 */
376 /*
377 *
378 */
379 int
380 get_task_userstop(
381 task_t task)
382 {
383 return(task->user_stop_count);
384 }
385
386 /*
387 *
388 */
389 int
390 get_thread_userstop(
391 thread_act_t th)
392 {
393 return(th->user_stop_count);
394 }
395
396 /*
397 *
398 */
399 int
400 inc_task_userstop(
401 task_t task)
402 {
403 int i=0;
404 i = task->user_stop_count;
405 task->user_stop_count++;
406 return(i);
407 }
408
409
410 /*
411 *
412 */
413 boolean_t
414 thread_should_abort(
415 thread_shuttle_t th)
416 {
417 return( (!th->top_act || !th->top_act->active ||
418 th->state & TH_ABORT));
419 }
420
421 /*
422 *
423 */
424 boolean_t
425 current_thread_aborted (
426 void)
427 {
428 thread_t th = current_thread();
429
430 return(!th->top_act || (th->state & TH_ABORT));
431 }
432
433 /*
434 *
435 */
436 void
437 task_act_iterate_wth_args(
438 task_t task,
439 void (*func_callback)(thread_act_t, void *),
440 void *func_arg)
441 {
442 thread_act_t inc, ninc;
443
444 task_lock(task);
445 for (inc = (thread_act_t)queue_first(&task->thr_acts);
446 inc != (thread_act_t)&task->thr_acts;
447 inc = ninc) {
448 ninc = (thread_act_t)queue_next(&inc->thr_acts);
449 (void) (*func_callback)(inc, func_arg);
450 }
451 task_unlock(task);
452 }
453
454 void
455 ipc_port_release(
456 ipc_port_t port)
457 {
458 ipc_object_release(&(port)->ip_object);
459 }
460
461 void
462 thread_ast_set(
463 thread_act_t act,
464 ast_t reason)
465 {
466 act->ast |= reason;
467 }
468 void
469 thread_ast_clear(
470 thread_act_t act,
471 ast_t reason)
472 {
473 act->ast &= ~(reason);
474 }
475
476 boolean_t
477 is_thread_active(
478 thread_shuttle_t th)
479 {
480 return(th->active);
481 }
482
483 event_t
484 get_thread_waitevent(
485 thread_shuttle_t th)
486 {
487 return(th->wait_event);
488 }
489
490 kern_return_t
491 get_thread_waitresult(
492 thread_shuttle_t th)
493 {
494 return(th->wait_result);
495 }
496
497 kern_return_t
498 bsd_refvm_object(vm_object_t object)
499 {
500 vm_object_reference(object);
501 return(KERN_SUCCESS);
502 }
503