]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
13620cd60eb7600550c336ebe8a93a057d4eba3f
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 #include <mach/mach_types.h>
24
25 #include <kern/kern_types.h>
26 #include <kern/processor.h>
27 #include <kern/thread.h>
28 #include <kern/task.h>
29 #include <kern/spl.h>
30 #include <kern/lock.h>
31 #include <kern/ast.h>
32 #include <ipc/ipc_port.h>
33 #include <ipc/ipc_object.h>
34 #include <vm/vm_map.h>
35 #include <vm/pmap.h>
36 #include <vm/vm_protos.h> /* last */
37
38 #undef thread_should_halt
39 #undef ipc_port_release
40
41 /* BSD KERN COMPONENT INTERFACE */
42
43 task_t bsd_init_task = TASK_NULL;
44 char init_task_failure_data[1024];
45 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
46
47 thread_t get_firstthread(task_t);
48 int get_task_userstop(task_t);
49 int get_thread_userstop(thread_t);
50 boolean_t thread_should_abort(thread_t);
51 boolean_t current_thread_aborted(void);
52 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
53 void ipc_port_release(ipc_port_t);
54 boolean_t is_thread_active(thread_t);
55 kern_return_t get_signalact(task_t , thread_t *, int);
56 int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
57
58 /*
59 *
60 */
61 void *get_bsdtask_info(task_t t)
62 {
63 return(t->bsd_info);
64 }
65
66 /*
67 *
68 */
69 void set_bsdtask_info(task_t t,void * v)
70 {
71 t->bsd_info=v;
72 }
73
74 /*
75 *
76 */
77 void *get_bsdthread_info(thread_t th)
78 {
79 return(th->uthread);
80 }
81
82 /*
83 * XXX: wait for BSD to fix signal code
84 * Until then, we cannot block here. We know the task
85 * can't go away, so we make sure it is still active after
86 * retrieving the first thread for extra safety.
87 */
88 thread_t get_firstthread(task_t task)
89 {
90 thread_t thread = (thread_t)queue_first(&task->threads);
91
92 if (queue_end(&task->threads, (queue_entry_t)thread))
93 thread = THREAD_NULL;
94
95 if (!task->active)
96 return (THREAD_NULL);
97
98 return (thread);
99 }
100
101 kern_return_t
102 get_signalact(
103 task_t task,
104 thread_t *result_out,
105 int setast)
106 {
107 kern_return_t result = KERN_SUCCESS;
108 thread_t inc, thread = THREAD_NULL;
109
110 task_lock(task);
111
112 if (!task->active) {
113 task_unlock(task);
114
115 return (KERN_FAILURE);
116 }
117
118 for (inc = (thread_t)queue_first(&task->threads);
119 !queue_end(&task->threads, (queue_entry_t)inc); ) {
120 thread_mtx_lock(inc);
121 if (inc->active &&
122 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
123 thread = inc;
124 break;
125 }
126 thread_mtx_unlock(inc);
127
128 inc = (thread_t)queue_next(&inc->task_threads);
129 }
130
131 if (result_out)
132 *result_out = thread;
133
134 if (thread) {
135 if (setast)
136 act_set_astbsd(thread);
137
138 thread_mtx_unlock(thread);
139 }
140 else
141 result = KERN_FAILURE;
142
143 task_unlock(task);
144
145 return (result);
146 }
147
148
149 kern_return_t
150 check_actforsig(
151 task_t task,
152 thread_t thread,
153 int setast)
154 {
155 kern_return_t result = KERN_FAILURE;
156 thread_t inc;
157
158 task_lock(task);
159
160 if (!task->active) {
161 task_unlock(task);
162
163 return (KERN_FAILURE);
164 }
165
166 for (inc = (thread_t)queue_first(&task->threads);
167 !queue_end(&task->threads, (queue_entry_t)inc); ) {
168 if (inc == thread) {
169 thread_mtx_lock(inc);
170
171 if (inc->active &&
172 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
173 result = KERN_SUCCESS;
174 break;
175 }
176
177 thread_mtx_unlock(inc);
178 break;
179 }
180
181 inc = (thread_t)queue_next(&inc->task_threads);
182 }
183
184 if (result == KERN_SUCCESS) {
185 if (setast)
186 act_set_astbsd(thread);
187
188 thread_mtx_unlock(thread);
189 }
190
191 task_unlock(task);
192
193 return (result);
194 }
195
196 /*
197 * This is only safe to call from a thread executing in
198 * in the task's context or if the task is locked Otherwise,
199 * the map could be switched for the task (and freed) before
200 * we to return it here.
201 */
202 vm_map_t get_task_map(task_t t)
203 {
204 return(t->map);
205 }
206
207 vm_map_t get_task_map_reference(task_t t)
208 {
209 vm_map_t m;
210
211 if (t == NULL)
212 return VM_MAP_NULL;
213
214 task_lock(t);
215 if (!t->active) {
216 task_unlock(t);
217 return VM_MAP_NULL;
218 }
219 m = t->map;
220 vm_map_reference_swap(m);
221 task_unlock(t);
222 return m;
223 }
224
225 /*
226 *
227 */
228 ipc_space_t get_task_ipcspace(task_t t)
229 {
230 return(t->itk_space);
231 }
232
233 int get_task_numacts(task_t t)
234 {
235 return(t->thread_count);
236 }
237
238 /* does this machine need 64bit register set for signal handler */
239 int is_64signalregset(void)
240 {
241 task_t t = current_task();
242 if(t->taskFeatures[0] & tf64BitData)
243 return(1);
244 else
245 return(0);
246 }
247
248 /*
249 * The old map reference is returned.
250 */
251 vm_map_t
252 swap_task_map(task_t task,vm_map_t map)
253 {
254 thread_t thread = current_thread();
255 vm_map_t old_map;
256
257 if (task != thread->task)
258 panic("swap_task_map");
259
260 task_lock(task);
261 old_map = task->map;
262 thread->map = task->map = map;
263 task_unlock(task);
264 return old_map;
265 }
266
267 /*
268 *
269 */
270 pmap_t get_task_pmap(task_t t)
271 {
272 return(t->map->pmap);
273 }
274
275 /*
276 *
277 */
278 pmap_t get_map_pmap(vm_map_t map)
279 {
280 return(map->pmap);
281 }
282 /*
283 *
284 */
285 task_t get_threadtask(thread_t th)
286 {
287 return(th->task);
288 }
289
290
291 /*
292 *
293 */
294 boolean_t is_thread_idle(thread_t th)
295 {
296 return((th->state & TH_IDLE) == TH_IDLE);
297 }
298
299 /*
300 *
301 */
302 boolean_t is_thread_running(thread_t th)
303 {
304 return((th->state & TH_RUN) == TH_RUN);
305 }
306
307 /*
308 *
309 */
310 thread_t
311 getshuttle_thread(
312 thread_t th)
313 {
314 return(th);
315 }
316
317 /*
318 *
319 */
320 thread_t
321 getact_thread(
322 thread_t th)
323 {
324 return(th);
325 }
326
327 /*
328 *
329 */
330 vm_map_offset_t
331 get_map_min(
332 vm_map_t map)
333 {
334 return(vm_map_min(map));
335 }
336
337 /*
338 *
339 */
340 vm_map_offset_t
341 get_map_max(
342 vm_map_t map)
343 {
344 return(vm_map_max(map));
345 }
346 vm_map_size_t
347 get_vmmap_size(
348 vm_map_t map)
349 {
350 return(map->size);
351 }
352
353 int
354 get_vmsubmap_entries(
355 vm_map_t map,
356 vm_object_offset_t start,
357 vm_object_offset_t end)
358 {
359 int total_entries = 0;
360 vm_map_entry_t entry;
361
362 if (not_in_kdp)
363 vm_map_lock(map);
364 entry = vm_map_first_entry(map);
365 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
366 entry = entry->vme_next;
367 }
368
369 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
370 if(entry->is_sub_map) {
371 total_entries +=
372 get_vmsubmap_entries(entry->object.sub_map,
373 entry->offset,
374 entry->offset +
375 (entry->vme_end - entry->vme_start));
376 } else {
377 total_entries += 1;
378 }
379 entry = entry->vme_next;
380 }
381 if (not_in_kdp)
382 vm_map_unlock(map);
383 return(total_entries);
384 }
385
386 int
387 get_vmmap_entries(
388 vm_map_t map)
389 {
390 int total_entries = 0;
391 vm_map_entry_t entry;
392
393 if (not_in_kdp)
394 vm_map_lock(map);
395 entry = vm_map_first_entry(map);
396
397 while(entry != vm_map_to_entry(map)) {
398 if(entry->is_sub_map) {
399 total_entries +=
400 get_vmsubmap_entries(entry->object.sub_map,
401 entry->offset,
402 entry->offset +
403 (entry->vme_end - entry->vme_start));
404 } else {
405 total_entries += 1;
406 }
407 entry = entry->vme_next;
408 }
409 if (not_in_kdp)
410 vm_map_unlock(map);
411 return(total_entries);
412 }
413
414 /*
415 *
416 */
417 /*
418 *
419 */
420 int
421 get_task_userstop(
422 task_t task)
423 {
424 return(task->user_stop_count);
425 }
426
427 /*
428 *
429 */
430 int
431 get_thread_userstop(
432 thread_t th)
433 {
434 return(th->user_stop_count);
435 }
436
437 /*
438 *
439 */
440 boolean_t
441 thread_should_abort(
442 thread_t th)
443 {
444 return ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
445 }
446
447 /*
448 * This routine is like thread_should_abort() above. It checks to
449 * see if the current thread is aborted. But unlike above, it also
450 * checks to see if thread is safely aborted. If so, it returns
451 * that fact, and clears the condition (safe aborts only should
452 * have a single effect, and a poll of the abort status
453 * qualifies.
454 */
455 boolean_t
456 current_thread_aborted (
457 void)
458 {
459 thread_t th = current_thread();
460 spl_t s;
461
462 if ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
463 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
464 return (TRUE);
465 if (th->state & TH_ABORT_SAFELY) {
466 s = splsched();
467 thread_lock(th);
468 if (th->state & TH_ABORT_SAFELY)
469 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
470 thread_unlock(th);
471 splx(s);
472 }
473 return FALSE;
474 }
475
476 /*
477 *
478 */
479 void
480 task_act_iterate_wth_args(
481 task_t task,
482 void (*func_callback)(thread_t, void *),
483 void *func_arg)
484 {
485 thread_t inc;
486
487 task_lock(task);
488
489 for (inc = (thread_t)queue_first(&task->threads);
490 !queue_end(&task->threads, (queue_entry_t)inc); ) {
491 (void) (*func_callback)(inc, func_arg);
492 inc = (thread_t)queue_next(&inc->task_threads);
493 }
494
495 task_unlock(task);
496 }
497
498 void
499 ipc_port_release(
500 ipc_port_t port)
501 {
502 ipc_object_release(&(port)->ip_object);
503 }
504
505 boolean_t
506 is_thread_active(
507 thread_t th)
508 {
509 return(th->active);
510 }
511
512 void
513 astbsd_on(void)
514 {
515 boolean_t reenable;
516
517 reenable = ml_set_interrupts_enabled(FALSE);
518 ast_on_fast(AST_BSD);
519 (void)ml_set_interrupts_enabled(reenable);
520 }