]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/bsd_kern.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <mach/mach_types.h>
23#include <kern/queue.h>
24#include <kern/ast.h>
25#include <kern/thread.h>
26#include <kern/thread_act.h>
27#include <kern/task.h>
28#include <kern/spl.h>
29#include <kern/lock.h>
30#include <vm/vm_map.h>
31#include <vm/pmap.h>
32#include <ipc/ipc_port.h>
33#include <ipc/ipc_object.h>
34
35#undef thread_should_halt
36#undef ipc_port_release
1c79356b 37
1c79356b
A
38/* BSD KERN COMPONENT INTERFACE */
39
9bccf70c 40task_t bsd_init_task = TASK_NULL;
1c79356b 41char init_task_failure_data[1024];
55e303ae 42extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
1c79356b
A
43
44thread_act_t get_firstthread(task_t);
45vm_map_t get_task_map(task_t);
46ipc_space_t get_task_ipcspace(task_t);
47boolean_t is_kerneltask(task_t);
48boolean_t is_thread_idle(thread_t);
1c79356b
A
49vm_offset_t get_map_min( vm_map_t);
50vm_offset_t get_map_max( vm_map_t);
51int get_task_userstop(task_t);
52int get_thread_userstop(thread_act_t);
55e303ae 53boolean_t thread_should_abort(thread_t);
1c79356b
A
54boolean_t current_thread_aborted(void);
55void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
56void ipc_port_release(ipc_port_t);
1c79356b 57boolean_t is_thread_active(thread_t);
1c79356b
A
58kern_return_t get_thread_waitresult(thread_t);
59vm_size_t get_vmmap_size(vm_map_t);
60int get_vmmap_entries(vm_map_t);
61int get_task_numacts(task_t);
62thread_act_t get_firstthread(task_t task);
55e303ae 63kern_return_t get_signalact(task_t , thread_act_t *, int);
9bccf70c 64void astbsd_on(void);
1c79356b 65
1c79356b
A
66/*
67 *
68 */
69void *get_bsdtask_info(task_t t)
70{
71 return(t->bsd_info);
72}
73
74/*
75 *
76 */
77void set_bsdtask_info(task_t t,void * v)
78{
79 t->bsd_info=v;
80}
81
82/*
83 *
84 */
85void *get_bsdthread_info(thread_act_t th)
86{
87 return(th->uthread);
88}
89
90/*
91 * XXX: wait for BSD to fix signal code
92 * Until then, we cannot block here. We know the task
93 * can't go away, so we make sure it is still active after
94 * retrieving the first thread for extra safety.
95 */
96thread_act_t get_firstthread(task_t task)
97{
98 thread_act_t thr_act;
99
55e303ae
A
100 thr_act = (thread_act_t)queue_first(&task->threads);
101 if (queue_end(&task->threads, (queue_entry_t)thr_act))
1c79356b
A
102 thr_act = THR_ACT_NULL;
103 if (!task->active)
104 return(THR_ACT_NULL);
105 return(thr_act);
106}
107
55e303ae 108kern_return_t get_signalact(task_t task,thread_act_t * thact, int setast)
1c79356b
A
109{
110
111 thread_act_t inc;
112 thread_act_t ninc;
113 thread_act_t thr_act;
114 thread_t th;
115
116 task_lock(task);
117 if (!task->active) {
118 task_unlock(task);
119 return(KERN_FAILURE);
120 }
121
122 thr_act = THR_ACT_NULL;
55e303ae
A
123 for (inc = (thread_act_t)queue_first(&task->threads);
124 !queue_end(&task->threads, (queue_entry_t)inc);
1c79356b
A
125 inc = ninc) {
126 th = act_lock_thread(inc);
9bccf70c
A
127 if ((inc->active) &&
128 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
1c79356b
A
129 thr_act = inc;
130 break;
131 }
132 act_unlock_thread(inc);
55e303ae 133 ninc = (thread_act_t)queue_next(&inc->task_threads);
1c79356b
A
134 }
135out:
136 if (thact)
137 *thact = thr_act;
1c79356b 138 if (thr_act) {
9bccf70c
A
139 if (setast)
140 act_set_astbsd(thr_act);
141
1c79356b
A
142 act_unlock_thread(thr_act);
143 }
144 task_unlock(task);
145
146 if (thr_act)
147 return(KERN_SUCCESS);
148 else
149 return(KERN_FAILURE);
150}
151
0b4e3aa0 152
55e303ae 153kern_return_t check_actforsig(task_t task, thread_act_t thact, int setast)
0b4e3aa0
A
154{
155
156 thread_act_t inc;
157 thread_act_t ninc;
158 thread_act_t thr_act;
159 thread_t th;
160 int found=0;
161
162 task_lock(task);
163 if (!task->active) {
164 task_unlock(task);
165 return(KERN_FAILURE);
166 }
167
168 thr_act = THR_ACT_NULL;
55e303ae
A
169 for (inc = (thread_act_t)queue_first(&task->threads);
170 !queue_end(&task->threads, (queue_entry_t)inc);
0b4e3aa0
A
171 inc = ninc) {
172
173 if (inc != thact) {
55e303ae 174 ninc = (thread_act_t)queue_next(&inc->task_threads);
0b4e3aa0
A
175 continue;
176 }
177 th = act_lock_thread(inc);
9bccf70c
A
178 if ((inc->active) &&
179 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
0b4e3aa0
A
180 found = 1;
181 thr_act = inc;
182 break;
183 }
184 act_unlock_thread(inc);
185 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
186 break;
187 }
188out:
189 if (found) {
9bccf70c
A
190 if (setast)
191 act_set_astbsd(thr_act);
192
0b4e3aa0
A
193 act_unlock_thread(thr_act);
194 }
195 task_unlock(task);
196
197 if (found)
198 return(KERN_SUCCESS);
199 else
200 return(KERN_FAILURE);
201}
202
1c79356b
A
203/*
204 *
205 */
206vm_map_t get_task_map(task_t t)
207{
208 return(t->map);
209}
210
211/*
212 *
213 */
214ipc_space_t get_task_ipcspace(task_t t)
215{
216 return(t->itk_space);
217}
218
219int get_task_numacts(task_t t)
220{
55e303ae
A
221 return(t->thread_count);
222}
223
224/* does this machine need 64bit register set for signal handler */
225int is_64signalregset(void)
226{
227 task_t t = current_task();
228 if(t->taskFeatures[0] & tf64BitData)
229 return(1);
230 else
231 return(0);
1c79356b
A
232}
233
234/*
55e303ae 235 * The old map reference is returned.
1c79356b
A
236 */
237vm_map_t
238swap_task_map(task_t task,vm_map_t map)
239{
55e303ae 240 thread_act_t act = current_act();
1c79356b
A
241 vm_map_t old_map;
242
55e303ae
A
243 if (task != act->task)
244 panic("swap_task_map");
245
1c79356b
A
246 task_lock(task);
247 old_map = task->map;
55e303ae 248 act->map = task->map = map;
1c79356b
A
249 task_unlock(task);
250 return old_map;
251}
252
1c79356b
A
253vm_map_t
254swap_act_map(thread_act_t thr_act,vm_map_t map)
255{
55e303ae 256 panic("swap_act_map");
1c79356b
A
257}
258
259/*
260 *
261 */
262pmap_t get_task_pmap(task_t t)
263{
264 return(t->map->pmap);
265}
266
267/*
268 *
269 */
270pmap_t get_map_pmap(vm_map_t map)
271{
272 return(map->pmap);
273}
274/*
275 *
276 */
277task_t get_threadtask(thread_act_t th)
278{
279 return(th->task);
280}
281
282
283/*
284 *
285 */
286boolean_t is_thread_idle(thread_t th)
287{
288 return((th->state & TH_IDLE) == TH_IDLE);
289}
290
291/*
292 *
293 */
55e303ae 294boolean_t is_thread_running(thread_t th)
1c79356b
A
295{
296 return((th->state & TH_RUN) == TH_RUN);
297}
298
299/*
300 *
301 */
55e303ae 302thread_t
1c79356b 303getshuttle_thread(
55e303ae 304 thread_t th)
1c79356b 305{
55e303ae 306 return(th);
1c79356b
A
307}
308
309/*
310 *
311 */
55e303ae 312thread_t
1c79356b 313getact_thread(
55e303ae 314 thread_t th)
1c79356b 315{
55e303ae 316 return(th);
1c79356b
A
317}
318
319/*
320 *
321 */
322vm_offset_t
323get_map_min(
324 vm_map_t map)
325{
326 return(vm_map_min(map));
327}
328
329/*
330 *
331 */
332vm_offset_t
333get_map_max(
334 vm_map_t map)
335{
336 return(vm_map_max(map));
337}
338vm_size_t
339get_vmmap_size(
340 vm_map_t map)
341{
342 return(map->size);
343}
344
345int
346get_vmsubmap_entries(
347 vm_map_t map,
348 vm_object_offset_t start,
349 vm_object_offset_t end)
350{
351 int total_entries = 0;
352 vm_map_entry_t entry;
353
55e303ae
A
354 if (not_in_kdp)
355 vm_map_lock(map);
1c79356b
A
356 entry = vm_map_first_entry(map);
357 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
358 entry = entry->vme_next;
359 }
360
361 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
362 if(entry->is_sub_map) {
363 total_entries +=
364 get_vmsubmap_entries(entry->object.sub_map,
365 entry->offset,
366 entry->offset +
367 (entry->vme_end - entry->vme_start));
368 } else {
369 total_entries += 1;
370 }
371 entry = entry->vme_next;
372 }
55e303ae
A
373 if (not_in_kdp)
374 vm_map_unlock(map);
1c79356b
A
375 return(total_entries);
376}
377
378int
379get_vmmap_entries(
380 vm_map_t map)
381{
382 int total_entries = 0;
383 vm_map_entry_t entry;
384
55e303ae
A
385 if (not_in_kdp)
386 vm_map_lock(map);
1c79356b
A
387 entry = vm_map_first_entry(map);
388
389 while(entry != vm_map_to_entry(map)) {
390 if(entry->is_sub_map) {
391 total_entries +=
392 get_vmsubmap_entries(entry->object.sub_map,
393 entry->offset,
394 entry->offset +
395 (entry->vme_end - entry->vme_start));
396 } else {
397 total_entries += 1;
398 }
399 entry = entry->vme_next;
400 }
55e303ae
A
401 if (not_in_kdp)
402 vm_map_unlock(map);
1c79356b
A
403 return(total_entries);
404}
405
406/*
407 *
408 */
409/*
410 *
411 */
412int
413get_task_userstop(
414 task_t task)
415{
416 return(task->user_stop_count);
417}
418
419/*
420 *
421 */
422int
423get_thread_userstop(
424 thread_act_t th)
425{
426 return(th->user_stop_count);
427}
428
1c79356b
A
429/*
430 *
431 */
432boolean_t
433thread_should_abort(
55e303ae 434 thread_t th)
1c79356b 435{
55e303ae 436 return(!th->top_act ||
9bccf70c 437 (th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
1c79356b
A
438}
439
440/*
9bccf70c
A
441 * This routine is like thread_should_abort() above. It checks to
442 * see if the current thread is aborted. But unlike above, it also
443 * checks to see if thread is safely aborted. If so, it returns
444 * that fact, and clears the condition (safe aborts only should
445 * have a single effect, and a poll of the abort status
446 * qualifies.
1c79356b
A
447 */
448boolean_t
449current_thread_aborted (
450 void)
451{
452 thread_t th = current_thread();
9bccf70c
A
453 spl_t s;
454
455 if (!th->top_act ||
456 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
457 th->interrupt_level != THREAD_UNINT))
458 return (TRUE);
459 if (th->state & TH_ABORT_SAFELY) {
460 s = splsched();
461 thread_lock(th);
462 if (th->state & TH_ABORT_SAFELY)
463 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
464 thread_unlock(th);
465 splx(s);
466 }
467 return FALSE;
1c79356b
A
468}
469
470/*
471 *
472 */
473void
474task_act_iterate_wth_args(
475 task_t task,
476 void (*func_callback)(thread_act_t, void *),
477 void *func_arg)
478{
479 thread_act_t inc, ninc;
480
481 task_lock(task);
55e303ae
A
482 for (inc = (thread_act_t)queue_first(&task->threads);
483 !queue_end(&task->threads, (queue_entry_t)inc);
1c79356b 484 inc = ninc) {
55e303ae 485 ninc = (thread_act_t)queue_next(&inc->task_threads);
1c79356b
A
486 (void) (*func_callback)(inc, func_arg);
487 }
488 task_unlock(task);
489}
490
491void
492ipc_port_release(
493 ipc_port_t port)
494{
495 ipc_object_release(&(port)->ip_object);
496}
497
1c79356b
A
498boolean_t
499is_thread_active(
55e303ae 500 thread_t th)
1c79356b
A
501{
502 return(th->active);
503}
504
1c79356b
A
505kern_return_t
506get_thread_waitresult(
55e303ae 507 thread_t th)
1c79356b
A
508{
509 return(th->wait_result);
510}
511
9bccf70c
A
512void
513astbsd_on(void)
514{
515 boolean_t reenable;
1c79356b 516
9bccf70c
A
517 reenable = ml_set_interrupts_enabled(FALSE);
518 ast_on_fast(AST_BSD);
519 (void)ml_set_interrupts_enabled(reenable);
520}