]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/bsd_kern.c
xnu-124.13.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <mach/mach_types.h>
23#include <kern/queue.h>
24#include <kern/ast.h>
25#include <kern/thread.h>
26#include <kern/thread_act.h>
27#include <kern/task.h>
28#include <kern/spl.h>
29#include <kern/lock.h>
30#include <vm/vm_map.h>
31#include <vm/pmap.h>
32#include <ipc/ipc_port.h>
33#include <ipc/ipc_object.h>
34
35#undef thread_should_halt
36#undef ipc_port_release
37#undef thread_ast_set
38#undef thread_ast_clear
39
40decl_simple_lock_data(extern,reaper_lock)
41extern queue_head_t reaper_queue;
42
43/* BSD KERN COMPONENT INTERFACE */
44
45vm_address_t bsd_init_task = 0;
46char init_task_failure_data[1024];
47
48thread_act_t get_firstthread(task_t);
49vm_map_t get_task_map(task_t);
50ipc_space_t get_task_ipcspace(task_t);
51boolean_t is_kerneltask(task_t);
52boolean_t is_thread_idle(thread_t);
53boolean_t is_thread_running(thread_t);
54thread_shuttle_t getshuttle_thread( thread_act_t);
55thread_act_t getact_thread( thread_shuttle_t);
56vm_offset_t get_map_min( vm_map_t);
57vm_offset_t get_map_max( vm_map_t);
58int get_task_userstop(task_t);
59int get_thread_userstop(thread_act_t);
60int inc_task_userstop(task_t);
61boolean_t thread_should_abort(thread_shuttle_t);
62boolean_t current_thread_aborted(void);
63void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
64void ipc_port_release(ipc_port_t);
65void thread_ast_set(thread_act_t, ast_t);
66void thread_ast_clear(thread_act_t, ast_t);
67boolean_t is_thread_active(thread_t);
68event_t get_thread_waitevent(thread_t);
69kern_return_t get_thread_waitresult(thread_t);
70vm_size_t get_vmmap_size(vm_map_t);
71int get_vmmap_entries(vm_map_t);
72int get_task_numacts(task_t);
73thread_act_t get_firstthread(task_t task);
74kern_return_t get_signalact(task_t , thread_act_t *, thread_t *, int);
75
76kern_return_t bsd_refvm_object(vm_object_t object);
77
78
79/*
80 *
81 */
82void *get_bsdtask_info(task_t t)
83{
84 return(t->bsd_info);
85}
86
87/*
88 *
89 */
90void set_bsdtask_info(task_t t,void * v)
91{
92 t->bsd_info=v;
93}
94
95/*
96 *
97 */
98void *get_bsdthread_info(thread_act_t th)
99{
100 return(th->uthread);
101}
102
103/*
104 * XXX: wait for BSD to fix signal code
105 * Until then, we cannot block here. We know the task
106 * can't go away, so we make sure it is still active after
107 * retrieving the first thread for extra safety.
108 */
109thread_act_t get_firstthread(task_t task)
110{
111 thread_act_t thr_act;
112
113 thr_act = (thread_act_t)queue_first(&task->thr_acts);
114 if (thr_act == (thread_act_t)&task->thr_acts)
115 thr_act = THR_ACT_NULL;
116 if (!task->active)
117 return(THR_ACT_NULL);
118 return(thr_act);
119}
120
121kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, int setast)
122{
123
124 thread_act_t inc;
125 thread_act_t ninc;
126 thread_act_t thr_act;
127 thread_t th;
128
129 task_lock(task);
130 if (!task->active) {
131 task_unlock(task);
132 return(KERN_FAILURE);
133 }
134
135 thr_act = THR_ACT_NULL;
136 for (inc = (thread_act_t)queue_first(&task->thr_acts);
137 inc != (thread_act_t)&task->thr_acts;
138 inc = ninc) {
139 th = act_lock_thread(inc);
140 if ((inc->active) && ((th->state & TH_ABORT) != TH_ABORT)) {
141 thr_act = inc;
142 break;
143 }
144 act_unlock_thread(inc);
145 ninc = (thread_act_t)queue_next(&inc->thr_acts);
146 }
147out:
148 if (thact)
149 *thact = thr_act;
150
151 if (thshut)
152 *thshut = thr_act? thr_act->thread: THREAD_NULL ;
153 if (thr_act) {
154 if (setast) {
155 thread_ast_set(thr_act, AST_BSD);
156 if (current_act() == thr_act)
157 ast_on(AST_BSD);
158 }
159 act_unlock_thread(thr_act);
160 }
161 task_unlock(task);
162
163 if (thr_act)
164 return(KERN_SUCCESS);
165 else
166 return(KERN_FAILURE);
167}
168
169/*
170 *
171 */
172vm_map_t get_task_map(task_t t)
173{
174 return(t->map);
175}
176
177/*
178 *
179 */
180ipc_space_t get_task_ipcspace(task_t t)
181{
182 return(t->itk_space);
183}
184
185int get_task_numacts(task_t t)
186{
187 return(t->thr_act_count);
188}
189
190/*
191 * Reset the current task's map by taking a reference
192 * on the new map. The old map reference is returned.
193 */
194vm_map_t
195swap_task_map(task_t task,vm_map_t map)
196{
197 vm_map_t old_map;
198
199 vm_map_reference(map);
200 task_lock(task);
201 old_map = task->map;
202 task->map = map;
203 task_unlock(task);
204 return old_map;
205}
206
207/*
208 * Reset the current act map.
209 * The caller donates us a reference to the new map
210 * and we donote our reference to the old map to him.
211 */
212vm_map_t
213swap_act_map(thread_act_t thr_act,vm_map_t map)
214{
215 vm_map_t old_map;
216
217 act_lock(thr_act);
218 old_map = thr_act->map;
219 thr_act->map = map;
220 act_unlock(thr_act);
221 return old_map;
222}
223
224/*
225 *
226 */
227pmap_t get_task_pmap(task_t t)
228{
229 return(t->map->pmap);
230}
231
232/*
233 *
234 */
235pmap_t get_map_pmap(vm_map_t map)
236{
237 return(map->pmap);
238}
239/*
240 *
241 */
242task_t get_threadtask(thread_act_t th)
243{
244 return(th->task);
245}
246
247
248/*
249 *
250 */
251boolean_t is_thread_idle(thread_t th)
252{
253 return((th->state & TH_IDLE) == TH_IDLE);
254}
255
256/*
257 *
258 */
259boolean_t is_thread_running(thread_t th)
260{
261 return((th->state & TH_RUN) == TH_RUN);
262}
263
264/*
265 *
266 */
267thread_shuttle_t
268getshuttle_thread(
269 thread_act_t th)
270{
271#ifdef DEBUG
272 assert(th->thread);
273#endif
274 return(th->thread);
275}
276
277/*
278 *
279 */
280thread_act_t
281getact_thread(
282 thread_shuttle_t th)
283{
284#ifdef DEBUG
285 assert(th->top_act);
286#endif
287 return(th->top_act);
288}
289
290/*
291 *
292 */
293vm_offset_t
294get_map_min(
295 vm_map_t map)
296{
297 return(vm_map_min(map));
298}
299
300/*
301 *
302 */
303vm_offset_t
304get_map_max(
305 vm_map_t map)
306{
307 return(vm_map_max(map));
308}
309vm_size_t
310get_vmmap_size(
311 vm_map_t map)
312{
313 return(map->size);
314}
315
316int
317get_vmsubmap_entries(
318 vm_map_t map,
319 vm_object_offset_t start,
320 vm_object_offset_t end)
321{
322 int total_entries = 0;
323 vm_map_entry_t entry;
324
325 vm_map_lock(map);
326 entry = vm_map_first_entry(map);
327 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
328 entry = entry->vme_next;
329 }
330
331 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
332 if(entry->is_sub_map) {
333 total_entries +=
334 get_vmsubmap_entries(entry->object.sub_map,
335 entry->offset,
336 entry->offset +
337 (entry->vme_end - entry->vme_start));
338 } else {
339 total_entries += 1;
340 }
341 entry = entry->vme_next;
342 }
343 vm_map_unlock(map);
344 return(total_entries);
345}
346
347int
348get_vmmap_entries(
349 vm_map_t map)
350{
351 int total_entries = 0;
352 vm_map_entry_t entry;
353
354 vm_map_lock(map);
355 entry = vm_map_first_entry(map);
356
357 while(entry != vm_map_to_entry(map)) {
358 if(entry->is_sub_map) {
359 total_entries +=
360 get_vmsubmap_entries(entry->object.sub_map,
361 entry->offset,
362 entry->offset +
363 (entry->vme_end - entry->vme_start));
364 } else {
365 total_entries += 1;
366 }
367 entry = entry->vme_next;
368 }
369 vm_map_unlock(map);
370 return(total_entries);
371}
372
373/*
374 *
375 */
376/*
377 *
378 */
379int
380get_task_userstop(
381 task_t task)
382{
383 return(task->user_stop_count);
384}
385
386/*
387 *
388 */
389int
390get_thread_userstop(
391 thread_act_t th)
392{
393 return(th->user_stop_count);
394}
395
396/*
397 *
398 */
399int
400inc_task_userstop(
401 task_t task)
402{
403 int i=0;
404 i = task->user_stop_count;
405 task->user_stop_count++;
406 return(i);
407}
408
409
410/*
411 *
412 */
413boolean_t
414thread_should_abort(
415 thread_shuttle_t th)
416{
417 return( (!th->top_act || !th->top_act->active ||
418 th->state & TH_ABORT));
419}
420
421/*
422 *
423 */
424boolean_t
425current_thread_aborted (
426 void)
427{
428 thread_t th = current_thread();
429
e7c99d92
A
430 return(!th->top_act ||
431 ((th->state & TH_ABORT) && (th->interruptible)));
1c79356b
A
432}
433
434/*
435 *
436 */
437void
438task_act_iterate_wth_args(
439 task_t task,
440 void (*func_callback)(thread_act_t, void *),
441 void *func_arg)
442{
443 thread_act_t inc, ninc;
444
445 task_lock(task);
446 for (inc = (thread_act_t)queue_first(&task->thr_acts);
447 inc != (thread_act_t)&task->thr_acts;
448 inc = ninc) {
449 ninc = (thread_act_t)queue_next(&inc->thr_acts);
450 (void) (*func_callback)(inc, func_arg);
451 }
452 task_unlock(task);
453}
454
455void
456ipc_port_release(
457 ipc_port_t port)
458{
459 ipc_object_release(&(port)->ip_object);
460}
461
462void
463thread_ast_set(
464 thread_act_t act,
465 ast_t reason)
466{
467 act->ast |= reason;
468}
469void
470thread_ast_clear(
471 thread_act_t act,
472 ast_t reason)
473{
474 act->ast &= ~(reason);
475}
476
477boolean_t
478is_thread_active(
479 thread_shuttle_t th)
480{
481 return(th->active);
482}
483
484event_t
485get_thread_waitevent(
486 thread_shuttle_t th)
487{
488 return(th->wait_event);
489}
490
491kern_return_t
492get_thread_waitresult(
493 thread_shuttle_t th)
494{
495 return(th->wait_result);
496}
497
498kern_return_t
499bsd_refvm_object(vm_object_t object)
500{
501 vm_object_reference(object);
502 return(KERN_SUCCESS);
503}
504