]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <mach/mach_types.h>
23
24 #include <kern/kern_types.h>
25 #include <kern/processor.h>
26 #include <kern/thread.h>
27 #include <kern/task.h>
28 #include <kern/spl.h>
29 #include <kern/lock.h>
30 #include <kern/ast.h>
31 #include <ipc/ipc_port.h>
32 #include <ipc/ipc_object.h>
33 #include <vm/vm_map.h>
34 #include <vm/pmap.h>
35 #include <vm/vm_protos.h> /* last */
36
37 #undef thread_should_halt
38 #undef ipc_port_release
39
40 /* BSD KERN COMPONENT INTERFACE */
41
42 task_t bsd_init_task = TASK_NULL;
43 char init_task_failure_data[1024];
44 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
45
46 thread_t get_firstthread(task_t);
47 int get_task_userstop(task_t);
48 int get_thread_userstop(thread_t);
49 boolean_t thread_should_abort(thread_t);
50 boolean_t current_thread_aborted(void);
51 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
52 void ipc_port_release(ipc_port_t);
53 boolean_t is_thread_active(thread_t);
54 kern_return_t get_signalact(task_t , thread_t *, int);
55 int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
56
57 /*
58 *
59 */
60 void *get_bsdtask_info(task_t t)
61 {
62 return(t->bsd_info);
63 }
64
65 /*
66 *
67 */
68 void set_bsdtask_info(task_t t,void * v)
69 {
70 t->bsd_info=v;
71 }
72
73 /*
74 *
75 */
76 void *get_bsdthread_info(thread_t th)
77 {
78 return(th->uthread);
79 }
80
81 /*
82 * XXX: wait for BSD to fix signal code
83 * Until then, we cannot block here. We know the task
84 * can't go away, so we make sure it is still active after
85 * retrieving the first thread for extra safety.
86 */
87 thread_t get_firstthread(task_t task)
88 {
89 thread_t thread = (thread_t)queue_first(&task->threads);
90
91 if (queue_end(&task->threads, (queue_entry_t)thread))
92 thread = THREAD_NULL;
93
94 if (!task->active)
95 return (THREAD_NULL);
96
97 return (thread);
98 }
99
100 kern_return_t
101 get_signalact(
102 task_t task,
103 thread_t *result_out,
104 int setast)
105 {
106 kern_return_t result = KERN_SUCCESS;
107 thread_t inc, thread = THREAD_NULL;
108
109 task_lock(task);
110
111 if (!task->active) {
112 task_unlock(task);
113
114 return (KERN_FAILURE);
115 }
116
117 for (inc = (thread_t)queue_first(&task->threads);
118 !queue_end(&task->threads, (queue_entry_t)inc); ) {
119 thread_mtx_lock(inc);
120 if (inc->active &&
121 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
122 thread = inc;
123 break;
124 }
125 thread_mtx_unlock(inc);
126
127 inc = (thread_t)queue_next(&inc->task_threads);
128 }
129
130 if (result_out)
131 *result_out = thread;
132
133 if (thread) {
134 if (setast)
135 act_set_astbsd(thread);
136
137 thread_mtx_unlock(thread);
138 }
139 else
140 result = KERN_FAILURE;
141
142 task_unlock(task);
143
144 return (result);
145 }
146
147
148 kern_return_t
149 check_actforsig(
150 task_t task,
151 thread_t thread,
152 int setast)
153 {
154 kern_return_t result = KERN_FAILURE;
155 thread_t inc;
156
157 task_lock(task);
158
159 if (!task->active) {
160 task_unlock(task);
161
162 return (KERN_FAILURE);
163 }
164
165 for (inc = (thread_t)queue_first(&task->threads);
166 !queue_end(&task->threads, (queue_entry_t)inc); ) {
167 if (inc == thread) {
168 thread_mtx_lock(inc);
169
170 if (inc->active &&
171 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
172 result = KERN_SUCCESS;
173 break;
174 }
175
176 thread_mtx_unlock(inc);
177 break;
178 }
179
180 inc = (thread_t)queue_next(&inc->task_threads);
181 }
182
183 if (result == KERN_SUCCESS) {
184 if (setast)
185 act_set_astbsd(thread);
186
187 thread_mtx_unlock(thread);
188 }
189
190 task_unlock(task);
191
192 return (result);
193 }
194
195 /*
196 * This is only safe to call from a thread executing in
197 * in the task's context or if the task is locked Otherwise,
198 * the map could be switched for the task (and freed) before
199 * we to return it here.
200 */
201 vm_map_t get_task_map(task_t t)
202 {
203 return(t->map);
204 }
205
206 vm_map_t get_task_map_reference(task_t t)
207 {
208 vm_map_t m;
209
210 if (t == NULL)
211 return VM_MAP_NULL;
212
213 task_lock(t);
214 if (!t->active) {
215 task_unlock(t);
216 return VM_MAP_NULL;
217 }
218 m = t->map;
219 vm_map_reference_swap(m);
220 task_unlock(t);
221 return m;
222 }
223
224 /*
225 *
226 */
227 ipc_space_t get_task_ipcspace(task_t t)
228 {
229 return(t->itk_space);
230 }
231
232 int get_task_numacts(task_t t)
233 {
234 return(t->thread_count);
235 }
236
237 /* does this machine need 64bit register set for signal handler */
238 int is_64signalregset(void)
239 {
240 task_t t = current_task();
241 if(t->taskFeatures[0] & tf64BitData)
242 return(1);
243 else
244 return(0);
245 }
246
247 /*
248 * The old map reference is returned.
249 */
250 vm_map_t
251 swap_task_map(task_t task,vm_map_t map)
252 {
253 thread_t thread = current_thread();
254 vm_map_t old_map;
255
256 if (task != thread->task)
257 panic("swap_task_map");
258
259 task_lock(task);
260 old_map = task->map;
261 thread->map = task->map = map;
262 task_unlock(task);
263 return old_map;
264 }
265
266 /*
267 *
268 */
269 pmap_t get_task_pmap(task_t t)
270 {
271 return(t->map->pmap);
272 }
273
274 /*
275 *
276 */
277 pmap_t get_map_pmap(vm_map_t map)
278 {
279 return(map->pmap);
280 }
281 /*
282 *
283 */
284 task_t get_threadtask(thread_t th)
285 {
286 return(th->task);
287 }
288
289
290 /*
291 *
292 */
293 boolean_t is_thread_idle(thread_t th)
294 {
295 return((th->state & TH_IDLE) == TH_IDLE);
296 }
297
298 /*
299 *
300 */
301 boolean_t is_thread_running(thread_t th)
302 {
303 return((th->state & TH_RUN) == TH_RUN);
304 }
305
306 /*
307 *
308 */
309 thread_t
310 getshuttle_thread(
311 thread_t th)
312 {
313 return(th);
314 }
315
316 /*
317 *
318 */
319 thread_t
320 getact_thread(
321 thread_t th)
322 {
323 return(th);
324 }
325
326 /*
327 *
328 */
329 vm_map_offset_t
330 get_map_min(
331 vm_map_t map)
332 {
333 return(vm_map_min(map));
334 }
335
336 /*
337 *
338 */
339 vm_map_offset_t
340 get_map_max(
341 vm_map_t map)
342 {
343 return(vm_map_max(map));
344 }
345 vm_map_size_t
346 get_vmmap_size(
347 vm_map_t map)
348 {
349 return(map->size);
350 }
351
352 int
353 get_vmsubmap_entries(
354 vm_map_t map,
355 vm_object_offset_t start,
356 vm_object_offset_t end)
357 {
358 int total_entries = 0;
359 vm_map_entry_t entry;
360
361 if (not_in_kdp)
362 vm_map_lock(map);
363 entry = vm_map_first_entry(map);
364 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
365 entry = entry->vme_next;
366 }
367
368 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
369 if(entry->is_sub_map) {
370 total_entries +=
371 get_vmsubmap_entries(entry->object.sub_map,
372 entry->offset,
373 entry->offset +
374 (entry->vme_end - entry->vme_start));
375 } else {
376 total_entries += 1;
377 }
378 entry = entry->vme_next;
379 }
380 if (not_in_kdp)
381 vm_map_unlock(map);
382 return(total_entries);
383 }
384
385 int
386 get_vmmap_entries(
387 vm_map_t map)
388 {
389 int total_entries = 0;
390 vm_map_entry_t entry;
391
392 if (not_in_kdp)
393 vm_map_lock(map);
394 entry = vm_map_first_entry(map);
395
396 while(entry != vm_map_to_entry(map)) {
397 if(entry->is_sub_map) {
398 total_entries +=
399 get_vmsubmap_entries(entry->object.sub_map,
400 entry->offset,
401 entry->offset +
402 (entry->vme_end - entry->vme_start));
403 } else {
404 total_entries += 1;
405 }
406 entry = entry->vme_next;
407 }
408 if (not_in_kdp)
409 vm_map_unlock(map);
410 return(total_entries);
411 }
412
413 /*
414 *
415 */
416 /*
417 *
418 */
419 int
420 get_task_userstop(
421 task_t task)
422 {
423 return(task->user_stop_count);
424 }
425
426 /*
427 *
428 */
429 int
430 get_thread_userstop(
431 thread_t th)
432 {
433 return(th->user_stop_count);
434 }
435
436 /*
437 *
438 */
439 boolean_t
440 thread_should_abort(
441 thread_t th)
442 {
443 return ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
444 }
445
446 /*
447 * This routine is like thread_should_abort() above. It checks to
448 * see if the current thread is aborted. But unlike above, it also
449 * checks to see if thread is safely aborted. If so, it returns
450 * that fact, and clears the condition (safe aborts only should
451 * have a single effect, and a poll of the abort status
452 * qualifies.
453 */
454 boolean_t
455 current_thread_aborted (
456 void)
457 {
458 thread_t th = current_thread();
459 spl_t s;
460
461 if ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
462 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
463 return (TRUE);
464 if (th->state & TH_ABORT_SAFELY) {
465 s = splsched();
466 thread_lock(th);
467 if (th->state & TH_ABORT_SAFELY)
468 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
469 thread_unlock(th);
470 splx(s);
471 }
472 return FALSE;
473 }
474
475 /*
476 *
477 */
478 void
479 task_act_iterate_wth_args(
480 task_t task,
481 void (*func_callback)(thread_t, void *),
482 void *func_arg)
483 {
484 thread_t inc;
485
486 task_lock(task);
487
488 for (inc = (thread_t)queue_first(&task->threads);
489 !queue_end(&task->threads, (queue_entry_t)inc); ) {
490 (void) (*func_callback)(inc, func_arg);
491 inc = (thread_t)queue_next(&inc->task_threads);
492 }
493
494 task_unlock(task);
495 }
496
497 void
498 ipc_port_release(
499 ipc_port_t port)
500 {
501 ipc_object_release(&(port)->ip_object);
502 }
503
504 boolean_t
505 is_thread_active(
506 thread_t th)
507 {
508 return(th->active);
509 }
510
511 void
512 astbsd_on(void)
513 {
514 boolean_t reenable;
515
516 reenable = ml_set_interrupts_enabled(FALSE);
517 ast_on_fast(AST_BSD);
518 (void)ml_set_interrupts_enabled(reenable);
519 }