]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 #include <mach/mach_types.h>
26 #include <kern/queue.h>
27 #include <kern/ast.h>
28 #include <kern/thread.h>
29 #include <kern/thread_act.h>
30 #include <kern/task.h>
31 #include <kern/spl.h>
32 #include <kern/lock.h>
33 #include <vm/vm_map.h>
34 #include <vm/pmap.h>
35 #include <ipc/ipc_port.h>
36 #include <ipc/ipc_object.h>
37
38 #undef thread_should_halt
39 #undef ipc_port_release
40
41 /* BSD KERN COMPONENT INTERFACE */
42
43 task_t bsd_init_task = TASK_NULL;
44 char init_task_failure_data[1024];
45 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
46
47 thread_act_t get_firstthread(task_t);
48 vm_map_t get_task_map(task_t);
49 ipc_space_t get_task_ipcspace(task_t);
50 boolean_t is_kerneltask(task_t);
51 boolean_t is_thread_idle(thread_t);
52 vm_offset_t get_map_min( vm_map_t);
53 vm_offset_t get_map_max( vm_map_t);
54 int get_task_userstop(task_t);
55 int get_thread_userstop(thread_act_t);
56 boolean_t thread_should_abort(thread_t);
57 boolean_t current_thread_aborted(void);
58 void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
59 void ipc_port_release(ipc_port_t);
60 boolean_t is_thread_active(thread_t);
61 kern_return_t get_thread_waitresult(thread_t);
62 vm_size_t get_vmmap_size(vm_map_t);
63 int get_vmmap_entries(vm_map_t);
64 int get_task_numacts(task_t);
65 thread_act_t get_firstthread(task_t task);
66 kern_return_t get_signalact(task_t , thread_act_t *, int);
67 void astbsd_on(void);
68
69 /*
70 *
71 */
72 void *get_bsdtask_info(task_t t)
73 {
74 return(t->bsd_info);
75 }
76
77 /*
78 *
79 */
80 void set_bsdtask_info(task_t t,void * v)
81 {
82 t->bsd_info=v;
83 }
84
85 /*
86 *
87 */
88 void *get_bsdthread_info(thread_act_t th)
89 {
90 return(th->uthread);
91 }
92
93 /*
94 * XXX: wait for BSD to fix signal code
95 * Until then, we cannot block here. We know the task
96 * can't go away, so we make sure it is still active after
97 * retrieving the first thread for extra safety.
98 */
99 thread_act_t get_firstthread(task_t task)
100 {
101 thread_act_t thr_act;
102
103 thr_act = (thread_act_t)queue_first(&task->threads);
104 if (queue_end(&task->threads, (queue_entry_t)thr_act))
105 thr_act = THR_ACT_NULL;
106 if (!task->active)
107 return(THR_ACT_NULL);
108 return(thr_act);
109 }
110
111 kern_return_t get_signalact(task_t task,thread_act_t * thact, int setast)
112 {
113
114 thread_act_t inc;
115 thread_act_t ninc;
116 thread_act_t thr_act;
117 thread_t th;
118
119 task_lock(task);
120 if (!task->active) {
121 task_unlock(task);
122 return(KERN_FAILURE);
123 }
124
125 thr_act = THR_ACT_NULL;
126 for (inc = (thread_act_t)queue_first(&task->threads);
127 !queue_end(&task->threads, (queue_entry_t)inc);
128 inc = ninc) {
129 th = act_lock_thread(inc);
130 if ((inc->active) &&
131 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
132 thr_act = inc;
133 break;
134 }
135 act_unlock_thread(inc);
136 ninc = (thread_act_t)queue_next(&inc->task_threads);
137 }
138 out:
139 if (thact)
140 *thact = thr_act;
141 if (thr_act) {
142 if (setast)
143 act_set_astbsd(thr_act);
144
145 act_unlock_thread(thr_act);
146 }
147 task_unlock(task);
148
149 if (thr_act)
150 return(KERN_SUCCESS);
151 else
152 return(KERN_FAILURE);
153 }
154
155
156 kern_return_t check_actforsig(task_t task, thread_act_t thact, int setast)
157 {
158
159 thread_act_t inc;
160 thread_act_t ninc;
161 thread_act_t thr_act;
162 thread_t th;
163 int found=0;
164
165 task_lock(task);
166 if (!task->active) {
167 task_unlock(task);
168 return(KERN_FAILURE);
169 }
170
171 thr_act = THR_ACT_NULL;
172 for (inc = (thread_act_t)queue_first(&task->threads);
173 !queue_end(&task->threads, (queue_entry_t)inc);
174 inc = ninc) {
175
176 if (inc != thact) {
177 ninc = (thread_act_t)queue_next(&inc->task_threads);
178 continue;
179 }
180 th = act_lock_thread(inc);
181 if ((inc->active) &&
182 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
183 found = 1;
184 thr_act = inc;
185 break;
186 }
187 act_unlock_thread(inc);
188 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
189 break;
190 }
191 out:
192 if (found) {
193 if (setast)
194 act_set_astbsd(thr_act);
195
196 act_unlock_thread(thr_act);
197 }
198 task_unlock(task);
199
200 if (found)
201 return(KERN_SUCCESS);
202 else
203 return(KERN_FAILURE);
204 }
205
206 /*
207 *
208 */
209 vm_map_t get_task_map(task_t t)
210 {
211 return(t->map);
212 }
213
214 /*
215 *
216 */
217 ipc_space_t get_task_ipcspace(task_t t)
218 {
219 return(t->itk_space);
220 }
221
222 int get_task_numacts(task_t t)
223 {
224 return(t->thread_count);
225 }
226
227 /* does this machine need 64bit register set for signal handler */
228 int is_64signalregset(void)
229 {
230 task_t t = current_task();
231 if(t->taskFeatures[0] & tf64BitData)
232 return(1);
233 else
234 return(0);
235 }
236
237 /*
238 * The old map reference is returned.
239 */
240 vm_map_t
241 swap_task_map(task_t task,vm_map_t map)
242 {
243 thread_act_t act = current_act();
244 vm_map_t old_map;
245
246 if (task != act->task)
247 panic("swap_task_map");
248
249 task_lock(task);
250 old_map = task->map;
251 act->map = task->map = map;
252 task_unlock(task);
253 return old_map;
254 }
255
256 vm_map_t
257 swap_act_map(thread_act_t thr_act,vm_map_t map)
258 {
259 panic("swap_act_map");
260 }
261
262 /*
263 *
264 */
265 pmap_t get_task_pmap(task_t t)
266 {
267 return(t->map->pmap);
268 }
269
270 /*
271 *
272 */
273 pmap_t get_map_pmap(vm_map_t map)
274 {
275 return(map->pmap);
276 }
277 /*
278 *
279 */
280 task_t get_threadtask(thread_act_t th)
281 {
282 return(th->task);
283 }
284
285
286 /*
287 *
288 */
289 boolean_t is_thread_idle(thread_t th)
290 {
291 return((th->state & TH_IDLE) == TH_IDLE);
292 }
293
294 /*
295 *
296 */
297 boolean_t is_thread_running(thread_t th)
298 {
299 return((th->state & TH_RUN) == TH_RUN);
300 }
301
302 /*
303 *
304 */
305 thread_t
306 getshuttle_thread(
307 thread_t th)
308 {
309 return(th);
310 }
311
312 /*
313 *
314 */
315 thread_t
316 getact_thread(
317 thread_t th)
318 {
319 return(th);
320 }
321
322 /*
323 *
324 */
325 vm_offset_t
326 get_map_min(
327 vm_map_t map)
328 {
329 return(vm_map_min(map));
330 }
331
332 /*
333 *
334 */
335 vm_offset_t
336 get_map_max(
337 vm_map_t map)
338 {
339 return(vm_map_max(map));
340 }
341 vm_size_t
342 get_vmmap_size(
343 vm_map_t map)
344 {
345 return(map->size);
346 }
347
348 int
349 get_vmsubmap_entries(
350 vm_map_t map,
351 vm_object_offset_t start,
352 vm_object_offset_t end)
353 {
354 int total_entries = 0;
355 vm_map_entry_t entry;
356
357 if (not_in_kdp)
358 vm_map_lock(map);
359 entry = vm_map_first_entry(map);
360 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
361 entry = entry->vme_next;
362 }
363
364 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
365 if(entry->is_sub_map) {
366 total_entries +=
367 get_vmsubmap_entries(entry->object.sub_map,
368 entry->offset,
369 entry->offset +
370 (entry->vme_end - entry->vme_start));
371 } else {
372 total_entries += 1;
373 }
374 entry = entry->vme_next;
375 }
376 if (not_in_kdp)
377 vm_map_unlock(map);
378 return(total_entries);
379 }
380
381 int
382 get_vmmap_entries(
383 vm_map_t map)
384 {
385 int total_entries = 0;
386 vm_map_entry_t entry;
387
388 if (not_in_kdp)
389 vm_map_lock(map);
390 entry = vm_map_first_entry(map);
391
392 while(entry != vm_map_to_entry(map)) {
393 if(entry->is_sub_map) {
394 total_entries +=
395 get_vmsubmap_entries(entry->object.sub_map,
396 entry->offset,
397 entry->offset +
398 (entry->vme_end - entry->vme_start));
399 } else {
400 total_entries += 1;
401 }
402 entry = entry->vme_next;
403 }
404 if (not_in_kdp)
405 vm_map_unlock(map);
406 return(total_entries);
407 }
408
409 /*
410 *
411 */
412 /*
413 *
414 */
415 int
416 get_task_userstop(
417 task_t task)
418 {
419 return(task->user_stop_count);
420 }
421
422 /*
423 *
424 */
425 int
426 get_thread_userstop(
427 thread_act_t th)
428 {
429 return(th->user_stop_count);
430 }
431
432 /*
433 *
434 */
435 boolean_t
436 thread_should_abort(
437 thread_t th)
438 {
439 return(!th->top_act ||
440 (th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
441 }
442
443 /*
444 * This routine is like thread_should_abort() above. It checks to
445 * see if the current thread is aborted. But unlike above, it also
446 * checks to see if thread is safely aborted. If so, it returns
447 * that fact, and clears the condition (safe aborts only should
448 * have a single effect, and a poll of the abort status
449 * qualifies.
450 */
451 boolean_t
452 current_thread_aborted (
453 void)
454 {
455 thread_t th = current_thread();
456 spl_t s;
457
458 if (!th->top_act ||
459 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
460 th->interrupt_level != THREAD_UNINT))
461 return (TRUE);
462 if (th->state & TH_ABORT_SAFELY) {
463 s = splsched();
464 thread_lock(th);
465 if (th->state & TH_ABORT_SAFELY)
466 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
467 thread_unlock(th);
468 splx(s);
469 }
470 return FALSE;
471 }
472
473 /*
474 *
475 */
476 void
477 task_act_iterate_wth_args(
478 task_t task,
479 void (*func_callback)(thread_act_t, void *),
480 void *func_arg)
481 {
482 thread_act_t inc, ninc;
483
484 task_lock(task);
485 for (inc = (thread_act_t)queue_first(&task->threads);
486 !queue_end(&task->threads, (queue_entry_t)inc);
487 inc = ninc) {
488 ninc = (thread_act_t)queue_next(&inc->task_threads);
489 (void) (*func_callback)(inc, func_arg);
490 }
491 task_unlock(task);
492 }
493
494 void
495 ipc_port_release(
496 ipc_port_t port)
497 {
498 ipc_object_release(&(port)->ip_object);
499 }
500
501 boolean_t
502 is_thread_active(
503 thread_t th)
504 {
505 return(th->active);
506 }
507
508 kern_return_t
509 get_thread_waitresult(
510 thread_t th)
511 {
512 return(th->wait_result);
513 }
514
515 void
516 astbsd_on(void)
517 {
518 boolean_t reenable;
519
520 reenable = ml_set_interrupts_enabled(FALSE);
521 ast_on_fast(AST_BSD);
522 (void)ml_set_interrupts_enabled(reenable);
523 }