]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/bsd_kern.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28#include <mach/mach_types.h>
91447636
A
29
30#include <kern/kern_types.h>
31#include <kern/processor.h>
1c79356b 32#include <kern/thread.h>
1c79356b
A
33#include <kern/task.h>
34#include <kern/spl.h>
35#include <kern/lock.h>
91447636 36#include <kern/ast.h>
1c79356b
A
37#include <ipc/ipc_port.h>
38#include <ipc/ipc_object.h>
91447636
A
39#include <vm/vm_map.h>
40#include <vm/pmap.h>
41#include <vm/vm_protos.h> /* last */
1c79356b
A
42
43#undef thread_should_halt
44#undef ipc_port_release
1c79356b 45
1c79356b
A
46/* BSD KERN COMPONENT INTERFACE */
47
9bccf70c 48task_t bsd_init_task = TASK_NULL;
1c79356b 49char init_task_failure_data[1024];
55e303ae 50extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
1c79356b 51
91447636 52thread_t get_firstthread(task_t);
1c79356b 53int get_task_userstop(task_t);
91447636 54int get_thread_userstop(thread_t);
55e303ae 55boolean_t thread_should_abort(thread_t);
1c79356b 56boolean_t current_thread_aborted(void);
91447636 57void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
1c79356b 58void ipc_port_release(ipc_port_t);
1c79356b 59boolean_t is_thread_active(thread_t);
91447636
A
60kern_return_t get_signalact(task_t , thread_t *, int);
61int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t);
1c79356b 62
1c79356b
A
63/*
64 *
65 */
66void *get_bsdtask_info(task_t t)
67{
68 return(t->bsd_info);
69}
70
71/*
72 *
73 */
74void set_bsdtask_info(task_t t,void * v)
75{
76 t->bsd_info=v;
77}
78
79/*
80 *
81 */
91447636 82void *get_bsdthread_info(thread_t th)
1c79356b
A
83{
84 return(th->uthread);
85}
86
87/*
88 * XXX: wait for BSD to fix signal code
89 * Until then, we cannot block here. We know the task
90 * can't go away, so we make sure it is still active after
91 * retrieving the first thread for extra safety.
92 */
91447636 93thread_t get_firstthread(task_t task)
1c79356b 94{
91447636
A
95 thread_t thread = (thread_t)queue_first(&task->threads);
96
97 if (queue_end(&task->threads, (queue_entry_t)thread))
98 thread = THREAD_NULL;
1c79356b 99
1c79356b 100 if (!task->active)
91447636
A
101 return (THREAD_NULL);
102
103 return (thread);
1c79356b
A
104}
105
91447636
A
106kern_return_t
107get_signalact(
108 task_t task,
109 thread_t *result_out,
110 int setast)
1c79356b 111{
91447636
A
112 kern_return_t result = KERN_SUCCESS;
113 thread_t inc, thread = THREAD_NULL;
1c79356b
A
114
115 task_lock(task);
91447636 116
1c79356b
A
117 if (!task->active) {
118 task_unlock(task);
91447636
A
119
120 return (KERN_FAILURE);
1c79356b
A
121 }
122
91447636
A
123 for (inc = (thread_t)queue_first(&task->threads);
124 !queue_end(&task->threads, (queue_entry_t)inc); ) {
125 thread_mtx_lock(inc);
126 if (inc->active &&
127 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
128 thread = inc;
129 break;
1c79356b 130 }
91447636
A
131 thread_mtx_unlock(inc);
132
133 inc = (thread_t)queue_next(&inc->task_threads);
134 }
135
136 if (result_out)
137 *result_out = thread;
138
139 if (thread) {
140 if (setast)
141 act_set_astbsd(thread);
142
143 thread_mtx_unlock(thread);
144 }
145 else
146 result = KERN_FAILURE;
147
1c79356b
A
148 task_unlock(task);
149
91447636 150 return (result);
1c79356b
A
151}
152
0b4e3aa0 153
91447636
A
154kern_return_t
155check_actforsig(
156 task_t task,
157 thread_t thread,
158 int setast)
0b4e3aa0 159{
91447636
A
160 kern_return_t result = KERN_FAILURE;
161 thread_t inc;
0b4e3aa0
A
162
163 task_lock(task);
91447636 164
0b4e3aa0
A
165 if (!task->active) {
166 task_unlock(task);
91447636
A
167
168 return (KERN_FAILURE);
0b4e3aa0
A
169 }
170
91447636
A
171 for (inc = (thread_t)queue_first(&task->threads);
172 !queue_end(&task->threads, (queue_entry_t)inc); ) {
173 if (inc == thread) {
174 thread_mtx_lock(inc);
175
176 if (inc->active &&
177 (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) {
178 result = KERN_SUCCESS;
0b4e3aa0 179 break;
91447636 180 }
0b4e3aa0 181
91447636
A
182 thread_mtx_unlock(inc);
183 break;
184 }
185
186 inc = (thread_t)queue_next(&inc->task_threads);
187 }
188
189 if (result == KERN_SUCCESS) {
190 if (setast)
191 act_set_astbsd(thread);
192
193 thread_mtx_unlock(thread);
194 }
195
196 task_unlock(task);
197
198 return (result);
0b4e3aa0
A
199}
200
1c79356b 201/*
91447636
A
202 * This is only safe to call from a thread executing in
203 * in the task's context or if the task is locked Otherwise,
204 * the map could be switched for the task (and freed) before
205 * we to return it here.
1c79356b
A
206 */
207vm_map_t get_task_map(task_t t)
208{
209 return(t->map);
210}
211
91447636
A
212vm_map_t get_task_map_reference(task_t t)
213{
214 vm_map_t m;
215
216 if (t == NULL)
217 return VM_MAP_NULL;
218
219 task_lock(t);
220 if (!t->active) {
221 task_unlock(t);
222 return VM_MAP_NULL;
223 }
224 m = t->map;
225 vm_map_reference_swap(m);
226 task_unlock(t);
227 return m;
228}
229
1c79356b
A
230/*
231 *
232 */
233ipc_space_t get_task_ipcspace(task_t t)
234{
235 return(t->itk_space);
236}
237
238int get_task_numacts(task_t t)
239{
55e303ae
A
240 return(t->thread_count);
241}
242
243/* does this machine need 64bit register set for signal handler */
244int is_64signalregset(void)
245{
246 task_t t = current_task();
247 if(t->taskFeatures[0] & tf64BitData)
248 return(1);
249 else
250 return(0);
1c79356b
A
251}
252
253/*
55e303ae 254 * The old map reference is returned.
1c79356b
A
255 */
256vm_map_t
257swap_task_map(task_t task,vm_map_t map)
258{
91447636 259 thread_t thread = current_thread();
1c79356b
A
260 vm_map_t old_map;
261
91447636 262 if (task != thread->task)
55e303ae
A
263 panic("swap_task_map");
264
1c79356b
A
265 task_lock(task);
266 old_map = task->map;
91447636 267 thread->map = task->map = map;
1c79356b
A
268 task_unlock(task);
269 return old_map;
270}
271
1c79356b
A
272/*
273 *
274 */
275pmap_t get_task_pmap(task_t t)
276{
277 return(t->map->pmap);
278}
279
280/*
281 *
282 */
283pmap_t get_map_pmap(vm_map_t map)
284{
285 return(map->pmap);
286}
287/*
288 *
289 */
91447636 290task_t get_threadtask(thread_t th)
1c79356b
A
291{
292 return(th->task);
293}
294
295
296/*
297 *
298 */
299boolean_t is_thread_idle(thread_t th)
300{
301 return((th->state & TH_IDLE) == TH_IDLE);
302}
303
304/*
305 *
306 */
55e303ae 307boolean_t is_thread_running(thread_t th)
1c79356b
A
308{
309 return((th->state & TH_RUN) == TH_RUN);
310}
311
312/*
313 *
314 */
55e303ae 315thread_t
1c79356b 316getshuttle_thread(
55e303ae 317 thread_t th)
1c79356b 318{
55e303ae 319 return(th);
1c79356b
A
320}
321
322/*
323 *
324 */
55e303ae 325thread_t
1c79356b 326getact_thread(
55e303ae 327 thread_t th)
1c79356b 328{
55e303ae 329 return(th);
1c79356b
A
330}
331
332/*
333 *
334 */
91447636 335vm_map_offset_t
1c79356b
A
336get_map_min(
337 vm_map_t map)
338{
339 return(vm_map_min(map));
340}
341
342/*
343 *
344 */
91447636 345vm_map_offset_t
1c79356b
A
346get_map_max(
347 vm_map_t map)
348{
349 return(vm_map_max(map));
350}
91447636 351vm_map_size_t
1c79356b
A
352get_vmmap_size(
353 vm_map_t map)
354{
355 return(map->size);
356}
357
358int
359get_vmsubmap_entries(
360 vm_map_t map,
361 vm_object_offset_t start,
362 vm_object_offset_t end)
363{
364 int total_entries = 0;
365 vm_map_entry_t entry;
366
55e303ae
A
367 if (not_in_kdp)
368 vm_map_lock(map);
1c79356b
A
369 entry = vm_map_first_entry(map);
370 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
371 entry = entry->vme_next;
372 }
373
374 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
375 if(entry->is_sub_map) {
376 total_entries +=
377 get_vmsubmap_entries(entry->object.sub_map,
378 entry->offset,
379 entry->offset +
380 (entry->vme_end - entry->vme_start));
381 } else {
382 total_entries += 1;
383 }
384 entry = entry->vme_next;
385 }
55e303ae
A
386 if (not_in_kdp)
387 vm_map_unlock(map);
1c79356b
A
388 return(total_entries);
389}
390
391int
392get_vmmap_entries(
393 vm_map_t map)
394{
395 int total_entries = 0;
396 vm_map_entry_t entry;
397
55e303ae
A
398 if (not_in_kdp)
399 vm_map_lock(map);
1c79356b
A
400 entry = vm_map_first_entry(map);
401
402 while(entry != vm_map_to_entry(map)) {
403 if(entry->is_sub_map) {
404 total_entries +=
405 get_vmsubmap_entries(entry->object.sub_map,
406 entry->offset,
407 entry->offset +
408 (entry->vme_end - entry->vme_start));
409 } else {
410 total_entries += 1;
411 }
412 entry = entry->vme_next;
413 }
55e303ae
A
414 if (not_in_kdp)
415 vm_map_unlock(map);
1c79356b
A
416 return(total_entries);
417}
418
419/*
420 *
421 */
422/*
423 *
424 */
425int
426get_task_userstop(
427 task_t task)
428{
429 return(task->user_stop_count);
430}
431
432/*
433 *
434 */
435int
436get_thread_userstop(
91447636 437 thread_t th)
1c79356b
A
438{
439 return(th->user_stop_count);
440}
441
1c79356b
A
442/*
443 *
444 */
445boolean_t
446thread_should_abort(
55e303ae 447 thread_t th)
1c79356b 448{
91447636 449 return ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
1c79356b
A
450}
451
452/*
9bccf70c
A
453 * This routine is like thread_should_abort() above. It checks to
454 * see if the current thread is aborted. But unlike above, it also
455 * checks to see if thread is safely aborted. If so, it returns
456 * that fact, and clears the condition (safe aborts only should
457 * have a single effect, and a poll of the abort status
458 * qualifies.
1c79356b
A
459 */
460boolean_t
461current_thread_aborted (
462 void)
463{
464 thread_t th = current_thread();
9bccf70c
A
465 spl_t s;
466
91447636
A
467 if ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
468 (th->options & TH_OPT_INTMASK) != THREAD_UNINT)
9bccf70c
A
469 return (TRUE);
470 if (th->state & TH_ABORT_SAFELY) {
471 s = splsched();
472 thread_lock(th);
473 if (th->state & TH_ABORT_SAFELY)
474 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
475 thread_unlock(th);
476 splx(s);
477 }
478 return FALSE;
1c79356b
A
479}
480
481/*
482 *
483 */
484void
485task_act_iterate_wth_args(
91447636
A
486 task_t task,
487 void (*func_callback)(thread_t, void *),
488 void *func_arg)
1c79356b 489{
91447636 490 thread_t inc;
1c79356b
A
491
492 task_lock(task);
91447636
A
493
494 for (inc = (thread_t)queue_first(&task->threads);
495 !queue_end(&task->threads, (queue_entry_t)inc); ) {
496 (void) (*func_callback)(inc, func_arg);
497 inc = (thread_t)queue_next(&inc->task_threads);
498 }
499
1c79356b
A
500 task_unlock(task);
501}
502
503void
504ipc_port_release(
505 ipc_port_t port)
506{
507 ipc_object_release(&(port)->ip_object);
508}
509
1c79356b
A
510boolean_t
511is_thread_active(
55e303ae 512 thread_t th)
1c79356b
A
513{
514 return(th->active);
515}
516
9bccf70c
A
517void
518astbsd_on(void)
519{
520 boolean_t reenable;
1c79356b 521
9bccf70c
A
522 reenable = ml_set_interrupts_enabled(FALSE);
523 ast_on_fast(AST_BSD);
524 (void)ml_set_interrupts_enabled(reenable);
525}