]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/bsd_kern.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25#include <mach/mach_types.h>
26#include <kern/queue.h>
27#include <kern/ast.h>
28#include <kern/thread.h>
29#include <kern/thread_act.h>
30#include <kern/task.h>
31#include <kern/spl.h>
32#include <kern/lock.h>
33#include <vm/vm_map.h>
34#include <vm/pmap.h>
35#include <ipc/ipc_port.h>
36#include <ipc/ipc_object.h>
37
38#undef thread_should_halt
39#undef ipc_port_release
1c79356b 40
1c79356b
A
41/* BSD KERN COMPONENT INTERFACE */
42
9bccf70c 43task_t bsd_init_task = TASK_NULL;
1c79356b 44char init_task_failure_data[1024];
55e303ae 45extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
1c79356b
A
46
47thread_act_t get_firstthread(task_t);
48vm_map_t get_task_map(task_t);
49ipc_space_t get_task_ipcspace(task_t);
50boolean_t is_kerneltask(task_t);
51boolean_t is_thread_idle(thread_t);
1c79356b
A
52vm_offset_t get_map_min( vm_map_t);
53vm_offset_t get_map_max( vm_map_t);
54int get_task_userstop(task_t);
55int get_thread_userstop(thread_act_t);
55e303ae 56boolean_t thread_should_abort(thread_t);
1c79356b
A
57boolean_t current_thread_aborted(void);
58void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
59void ipc_port_release(ipc_port_t);
1c79356b 60boolean_t is_thread_active(thread_t);
1c79356b
A
61kern_return_t get_thread_waitresult(thread_t);
62vm_size_t get_vmmap_size(vm_map_t);
63int get_vmmap_entries(vm_map_t);
64int get_task_numacts(task_t);
65thread_act_t get_firstthread(task_t task);
55e303ae 66kern_return_t get_signalact(task_t , thread_act_t *, int);
9bccf70c 67void astbsd_on(void);
1c79356b 68
1c79356b
A
69/*
70 *
71 */
72void *get_bsdtask_info(task_t t)
73{
74 return(t->bsd_info);
75}
76
77/*
78 *
79 */
80void set_bsdtask_info(task_t t,void * v)
81{
82 t->bsd_info=v;
83}
84
85/*
86 *
87 */
88void *get_bsdthread_info(thread_act_t th)
89{
90 return(th->uthread);
91}
92
93/*
94 * XXX: wait for BSD to fix signal code
95 * Until then, we cannot block here. We know the task
96 * can't go away, so we make sure it is still active after
97 * retrieving the first thread for extra safety.
98 */
99thread_act_t get_firstthread(task_t task)
100{
101 thread_act_t thr_act;
102
55e303ae
A
103 thr_act = (thread_act_t)queue_first(&task->threads);
104 if (queue_end(&task->threads, (queue_entry_t)thr_act))
1c79356b
A
105 thr_act = THR_ACT_NULL;
106 if (!task->active)
107 return(THR_ACT_NULL);
108 return(thr_act);
109}
110
55e303ae 111kern_return_t get_signalact(task_t task,thread_act_t * thact, int setast)
1c79356b
A
112{
113
114 thread_act_t inc;
115 thread_act_t ninc;
116 thread_act_t thr_act;
117 thread_t th;
118
119 task_lock(task);
120 if (!task->active) {
121 task_unlock(task);
122 return(KERN_FAILURE);
123 }
124
125 thr_act = THR_ACT_NULL;
55e303ae
A
126 for (inc = (thread_act_t)queue_first(&task->threads);
127 !queue_end(&task->threads, (queue_entry_t)inc);
1c79356b
A
128 inc = ninc) {
129 th = act_lock_thread(inc);
9bccf70c
A
130 if ((inc->active) &&
131 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
1c79356b
A
132 thr_act = inc;
133 break;
134 }
135 act_unlock_thread(inc);
55e303ae 136 ninc = (thread_act_t)queue_next(&inc->task_threads);
1c79356b
A
137 }
138out:
139 if (thact)
140 *thact = thr_act;
1c79356b 141 if (thr_act) {
9bccf70c
A
142 if (setast)
143 act_set_astbsd(thr_act);
144
1c79356b
A
145 act_unlock_thread(thr_act);
146 }
147 task_unlock(task);
148
149 if (thr_act)
150 return(KERN_SUCCESS);
151 else
152 return(KERN_FAILURE);
153}
154
0b4e3aa0 155
55e303ae 156kern_return_t check_actforsig(task_t task, thread_act_t thact, int setast)
0b4e3aa0
A
157{
158
159 thread_act_t inc;
160 thread_act_t ninc;
161 thread_act_t thr_act;
162 thread_t th;
163 int found=0;
164
165 task_lock(task);
166 if (!task->active) {
167 task_unlock(task);
168 return(KERN_FAILURE);
169 }
170
171 thr_act = THR_ACT_NULL;
55e303ae
A
172 for (inc = (thread_act_t)queue_first(&task->threads);
173 !queue_end(&task->threads, (queue_entry_t)inc);
0b4e3aa0
A
174 inc = ninc) {
175
176 if (inc != thact) {
55e303ae 177 ninc = (thread_act_t)queue_next(&inc->task_threads);
0b4e3aa0
A
178 continue;
179 }
180 th = act_lock_thread(inc);
9bccf70c
A
181 if ((inc->active) &&
182 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
0b4e3aa0
A
183 found = 1;
184 thr_act = inc;
185 break;
186 }
187 act_unlock_thread(inc);
188 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
189 break;
190 }
191out:
192 if (found) {
9bccf70c
A
193 if (setast)
194 act_set_astbsd(thr_act);
195
0b4e3aa0
A
196 act_unlock_thread(thr_act);
197 }
198 task_unlock(task);
199
200 if (found)
201 return(KERN_SUCCESS);
202 else
203 return(KERN_FAILURE);
204}
205
1c79356b
A
206/*
207 *
208 */
209vm_map_t get_task_map(task_t t)
210{
211 return(t->map);
212}
213
214/*
215 *
216 */
217ipc_space_t get_task_ipcspace(task_t t)
218{
219 return(t->itk_space);
220}
221
222int get_task_numacts(task_t t)
223{
55e303ae
A
224 return(t->thread_count);
225}
226
227/* does this machine need 64bit register set for signal handler */
228int is_64signalregset(void)
229{
230 task_t t = current_task();
231 if(t->taskFeatures[0] & tf64BitData)
232 return(1);
233 else
234 return(0);
1c79356b
A
235}
236
237/*
55e303ae 238 * The old map reference is returned.
1c79356b
A
239 */
240vm_map_t
241swap_task_map(task_t task,vm_map_t map)
242{
55e303ae 243 thread_act_t act = current_act();
1c79356b
A
244 vm_map_t old_map;
245
55e303ae
A
246 if (task != act->task)
247 panic("swap_task_map");
248
1c79356b
A
249 task_lock(task);
250 old_map = task->map;
55e303ae 251 act->map = task->map = map;
1c79356b
A
252 task_unlock(task);
253 return old_map;
254}
255
1c79356b
A
256vm_map_t
257swap_act_map(thread_act_t thr_act,vm_map_t map)
258{
55e303ae 259 panic("swap_act_map");
1c79356b
A
260}
261
262/*
263 *
264 */
265pmap_t get_task_pmap(task_t t)
266{
267 return(t->map->pmap);
268}
269
270/*
271 *
272 */
273pmap_t get_map_pmap(vm_map_t map)
274{
275 return(map->pmap);
276}
277/*
278 *
279 */
280task_t get_threadtask(thread_act_t th)
281{
282 return(th->task);
283}
284
285
286/*
287 *
288 */
289boolean_t is_thread_idle(thread_t th)
290{
291 return((th->state & TH_IDLE) == TH_IDLE);
292}
293
294/*
295 *
296 */
55e303ae 297boolean_t is_thread_running(thread_t th)
1c79356b
A
298{
299 return((th->state & TH_RUN) == TH_RUN);
300}
301
302/*
303 *
304 */
55e303ae 305thread_t
1c79356b 306getshuttle_thread(
55e303ae 307 thread_t th)
1c79356b 308{
55e303ae 309 return(th);
1c79356b
A
310}
311
312/*
313 *
314 */
55e303ae 315thread_t
1c79356b 316getact_thread(
55e303ae 317 thread_t th)
1c79356b 318{
55e303ae 319 return(th);
1c79356b
A
320}
321
322/*
323 *
324 */
325vm_offset_t
326get_map_min(
327 vm_map_t map)
328{
329 return(vm_map_min(map));
330}
331
332/*
333 *
334 */
335vm_offset_t
336get_map_max(
337 vm_map_t map)
338{
339 return(vm_map_max(map));
340}
341vm_size_t
342get_vmmap_size(
343 vm_map_t map)
344{
345 return(map->size);
346}
347
348int
349get_vmsubmap_entries(
350 vm_map_t map,
351 vm_object_offset_t start,
352 vm_object_offset_t end)
353{
354 int total_entries = 0;
355 vm_map_entry_t entry;
356
55e303ae
A
357 if (not_in_kdp)
358 vm_map_lock(map);
1c79356b
A
359 entry = vm_map_first_entry(map);
360 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
361 entry = entry->vme_next;
362 }
363
364 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
365 if(entry->is_sub_map) {
366 total_entries +=
367 get_vmsubmap_entries(entry->object.sub_map,
368 entry->offset,
369 entry->offset +
370 (entry->vme_end - entry->vme_start));
371 } else {
372 total_entries += 1;
373 }
374 entry = entry->vme_next;
375 }
55e303ae
A
376 if (not_in_kdp)
377 vm_map_unlock(map);
1c79356b
A
378 return(total_entries);
379}
380
381int
382get_vmmap_entries(
383 vm_map_t map)
384{
385 int total_entries = 0;
386 vm_map_entry_t entry;
387
55e303ae
A
388 if (not_in_kdp)
389 vm_map_lock(map);
1c79356b
A
390 entry = vm_map_first_entry(map);
391
392 while(entry != vm_map_to_entry(map)) {
393 if(entry->is_sub_map) {
394 total_entries +=
395 get_vmsubmap_entries(entry->object.sub_map,
396 entry->offset,
397 entry->offset +
398 (entry->vme_end - entry->vme_start));
399 } else {
400 total_entries += 1;
401 }
402 entry = entry->vme_next;
403 }
55e303ae
A
404 if (not_in_kdp)
405 vm_map_unlock(map);
1c79356b
A
406 return(total_entries);
407}
408
409/*
410 *
411 */
412/*
413 *
414 */
415int
416get_task_userstop(
417 task_t task)
418{
419 return(task->user_stop_count);
420}
421
422/*
423 *
424 */
425int
426get_thread_userstop(
427 thread_act_t th)
428{
429 return(th->user_stop_count);
430}
431
1c79356b
A
432/*
433 *
434 */
435boolean_t
436thread_should_abort(
55e303ae 437 thread_t th)
1c79356b 438{
55e303ae 439 return(!th->top_act ||
9bccf70c 440 (th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
1c79356b
A
441}
442
443/*
9bccf70c
A
444 * This routine is like thread_should_abort() above. It checks to
445 * see if the current thread is aborted. But unlike above, it also
446 * checks to see if thread is safely aborted. If so, it returns
447 * that fact, and clears the condition (safe aborts only should
448 * have a single effect, and a poll of the abort status
449 * qualifies.
1c79356b
A
450 */
451boolean_t
452current_thread_aborted (
453 void)
454{
455 thread_t th = current_thread();
9bccf70c
A
456 spl_t s;
457
458 if (!th->top_act ||
459 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
460 th->interrupt_level != THREAD_UNINT))
461 return (TRUE);
462 if (th->state & TH_ABORT_SAFELY) {
463 s = splsched();
464 thread_lock(th);
465 if (th->state & TH_ABORT_SAFELY)
466 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
467 thread_unlock(th);
468 splx(s);
469 }
470 return FALSE;
1c79356b
A
471}
472
473/*
474 *
475 */
476void
477task_act_iterate_wth_args(
478 task_t task,
479 void (*func_callback)(thread_act_t, void *),
480 void *func_arg)
481{
482 thread_act_t inc, ninc;
483
484 task_lock(task);
55e303ae
A
485 for (inc = (thread_act_t)queue_first(&task->threads);
486 !queue_end(&task->threads, (queue_entry_t)inc);
1c79356b 487 inc = ninc) {
55e303ae 488 ninc = (thread_act_t)queue_next(&inc->task_threads);
1c79356b
A
489 (void) (*func_callback)(inc, func_arg);
490 }
491 task_unlock(task);
492}
493
494void
495ipc_port_release(
496 ipc_port_t port)
497{
498 ipc_object_release(&(port)->ip_object);
499}
500
1c79356b
A
501boolean_t
502is_thread_active(
55e303ae 503 thread_t th)
1c79356b
A
504{
505 return(th->active);
506}
507
1c79356b
A
508kern_return_t
509get_thread_waitresult(
55e303ae 510 thread_t th)
1c79356b
A
511{
512 return(th->wait_result);
513}
514
9bccf70c
A
515void
516astbsd_on(void)
517{
518 boolean_t reenable;
1c79356b 519
9bccf70c
A
520 reenable = ml_set_interrupts_enabled(FALSE);
521 ast_on_fast(AST_BSD);
522 (void)ml_set_interrupts_enabled(reenable);
523}