]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/bsd_kern.c
xnu-517.9.5.tar.gz
[apple/xnu.git] / osfmk / kern / bsd_kern.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <mach/mach_types.h>
23 #include <kern/queue.h>
24 #include <kern/ast.h>
25 #include <kern/thread.h>
26 #include <kern/thread_act.h>
27 #include <kern/task.h>
28 #include <kern/spl.h>
29 #include <kern/lock.h>
30 #include <vm/vm_map.h>
31 #include <vm/pmap.h>
32 #include <ipc/ipc_port.h>
33 #include <ipc/ipc_object.h>
34
35 #undef thread_should_halt
36 #undef ipc_port_release
37
38 /* BSD KERN COMPONENT INTERFACE */
39
40 task_t bsd_init_task = TASK_NULL;
41 char init_task_failure_data[1024];
42 extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
43
44 thread_act_t get_firstthread(task_t);
45 vm_map_t get_task_map(task_t);
46 ipc_space_t get_task_ipcspace(task_t);
47 boolean_t is_kerneltask(task_t);
48 boolean_t is_thread_idle(thread_t);
49 vm_offset_t get_map_min( vm_map_t);
50 vm_offset_t get_map_max( vm_map_t);
51 int get_task_userstop(task_t);
52 int get_thread_userstop(thread_act_t);
53 boolean_t thread_should_abort(thread_t);
54 boolean_t current_thread_aborted(void);
55 void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
56 void ipc_port_release(ipc_port_t);
57 boolean_t is_thread_active(thread_t);
58 kern_return_t get_thread_waitresult(thread_t);
59 vm_size_t get_vmmap_size(vm_map_t);
60 int get_vmmap_entries(vm_map_t);
61 int get_task_numacts(task_t);
62 thread_act_t get_firstthread(task_t task);
63 kern_return_t get_signalact(task_t , thread_act_t *, int);
64 void astbsd_on(void);
65
66 /*
67 *
68 */
69 void *get_bsdtask_info(task_t t)
70 {
71 return(t->bsd_info);
72 }
73
74 /*
75 *
76 */
77 void set_bsdtask_info(task_t t,void * v)
78 {
79 t->bsd_info=v;
80 }
81
82 /*
83 *
84 */
85 void *get_bsdthread_info(thread_act_t th)
86 {
87 return(th->uthread);
88 }
89
90 /*
91 * XXX: wait for BSD to fix signal code
92 * Until then, we cannot block here. We know the task
93 * can't go away, so we make sure it is still active after
94 * retrieving the first thread for extra safety.
95 */
96 thread_act_t get_firstthread(task_t task)
97 {
98 thread_act_t thr_act;
99
100 thr_act = (thread_act_t)queue_first(&task->threads);
101 if (queue_end(&task->threads, (queue_entry_t)thr_act))
102 thr_act = THR_ACT_NULL;
103 if (!task->active)
104 return(THR_ACT_NULL);
105 return(thr_act);
106 }
107
108 kern_return_t get_signalact(task_t task,thread_act_t * thact, int setast)
109 {
110
111 thread_act_t inc;
112 thread_act_t ninc;
113 thread_act_t thr_act;
114 thread_t th;
115
116 task_lock(task);
117 if (!task->active) {
118 task_unlock(task);
119 return(KERN_FAILURE);
120 }
121
122 thr_act = THR_ACT_NULL;
123 for (inc = (thread_act_t)queue_first(&task->threads);
124 !queue_end(&task->threads, (queue_entry_t)inc);
125 inc = ninc) {
126 th = act_lock_thread(inc);
127 if ((inc->active) &&
128 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
129 thr_act = inc;
130 break;
131 }
132 act_unlock_thread(inc);
133 ninc = (thread_act_t)queue_next(&inc->task_threads);
134 }
135 out:
136 if (thact)
137 *thact = thr_act;
138 if (thr_act) {
139 if (setast)
140 act_set_astbsd(thr_act);
141
142 act_unlock_thread(thr_act);
143 }
144 task_unlock(task);
145
146 if (thr_act)
147 return(KERN_SUCCESS);
148 else
149 return(KERN_FAILURE);
150 }
151
152
153 kern_return_t check_actforsig(task_t task, thread_act_t thact, int setast)
154 {
155
156 thread_act_t inc;
157 thread_act_t ninc;
158 thread_act_t thr_act;
159 thread_t th;
160 int found=0;
161
162 task_lock(task);
163 if (!task->active) {
164 task_unlock(task);
165 return(KERN_FAILURE);
166 }
167
168 thr_act = THR_ACT_NULL;
169 for (inc = (thread_act_t)queue_first(&task->threads);
170 !queue_end(&task->threads, (queue_entry_t)inc);
171 inc = ninc) {
172
173 if (inc != thact) {
174 ninc = (thread_act_t)queue_next(&inc->task_threads);
175 continue;
176 }
177 th = act_lock_thread(inc);
178 if ((inc->active) &&
179 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
180 found = 1;
181 thr_act = inc;
182 break;
183 }
184 act_unlock_thread(inc);
185 /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
186 break;
187 }
188 out:
189 if (found) {
190 if (setast)
191 act_set_astbsd(thr_act);
192
193 act_unlock_thread(thr_act);
194 }
195 task_unlock(task);
196
197 if (found)
198 return(KERN_SUCCESS);
199 else
200 return(KERN_FAILURE);
201 }
202
203 /*
204 *
205 */
206 vm_map_t get_task_map(task_t t)
207 {
208 return(t->map);
209 }
210
211 /*
212 *
213 */
214 ipc_space_t get_task_ipcspace(task_t t)
215 {
216 return(t->itk_space);
217 }
218
219 int get_task_numacts(task_t t)
220 {
221 return(t->thread_count);
222 }
223
224 /* does this machine need 64bit register set for signal handler */
225 int is_64signalregset(void)
226 {
227 task_t t = current_task();
228 if(t->taskFeatures[0] & tf64BitData)
229 return(1);
230 else
231 return(0);
232 }
233
234 /*
235 * The old map reference is returned.
236 */
237 vm_map_t
238 swap_task_map(task_t task,vm_map_t map)
239 {
240 thread_act_t act = current_act();
241 vm_map_t old_map;
242
243 if (task != act->task)
244 panic("swap_task_map");
245
246 task_lock(task);
247 old_map = task->map;
248 act->map = task->map = map;
249 task_unlock(task);
250 return old_map;
251 }
252
253 vm_map_t
254 swap_act_map(thread_act_t thr_act,vm_map_t map)
255 {
256 panic("swap_act_map");
257 }
258
259 /*
260 *
261 */
262 pmap_t get_task_pmap(task_t t)
263 {
264 return(t->map->pmap);
265 }
266
267 /*
268 *
269 */
270 pmap_t get_map_pmap(vm_map_t map)
271 {
272 return(map->pmap);
273 }
274 /*
275 *
276 */
277 task_t get_threadtask(thread_act_t th)
278 {
279 return(th->task);
280 }
281
282
283 /*
284 *
285 */
286 boolean_t is_thread_idle(thread_t th)
287 {
288 return((th->state & TH_IDLE) == TH_IDLE);
289 }
290
291 /*
292 *
293 */
294 boolean_t is_thread_running(thread_t th)
295 {
296 return((th->state & TH_RUN) == TH_RUN);
297 }
298
299 /*
300 *
301 */
302 thread_t
303 getshuttle_thread(
304 thread_t th)
305 {
306 return(th);
307 }
308
309 /*
310 *
311 */
312 thread_t
313 getact_thread(
314 thread_t th)
315 {
316 return(th);
317 }
318
319 /*
320 *
321 */
322 vm_offset_t
323 get_map_min(
324 vm_map_t map)
325 {
326 return(vm_map_min(map));
327 }
328
329 /*
330 *
331 */
332 vm_offset_t
333 get_map_max(
334 vm_map_t map)
335 {
336 return(vm_map_max(map));
337 }
338 vm_size_t
339 get_vmmap_size(
340 vm_map_t map)
341 {
342 return(map->size);
343 }
344
345 int
346 get_vmsubmap_entries(
347 vm_map_t map,
348 vm_object_offset_t start,
349 vm_object_offset_t end)
350 {
351 int total_entries = 0;
352 vm_map_entry_t entry;
353
354 if (not_in_kdp)
355 vm_map_lock(map);
356 entry = vm_map_first_entry(map);
357 while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
358 entry = entry->vme_next;
359 }
360
361 while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) {
362 if(entry->is_sub_map) {
363 total_entries +=
364 get_vmsubmap_entries(entry->object.sub_map,
365 entry->offset,
366 entry->offset +
367 (entry->vme_end - entry->vme_start));
368 } else {
369 total_entries += 1;
370 }
371 entry = entry->vme_next;
372 }
373 if (not_in_kdp)
374 vm_map_unlock(map);
375 return(total_entries);
376 }
377
378 int
379 get_vmmap_entries(
380 vm_map_t map)
381 {
382 int total_entries = 0;
383 vm_map_entry_t entry;
384
385 if (not_in_kdp)
386 vm_map_lock(map);
387 entry = vm_map_first_entry(map);
388
389 while(entry != vm_map_to_entry(map)) {
390 if(entry->is_sub_map) {
391 total_entries +=
392 get_vmsubmap_entries(entry->object.sub_map,
393 entry->offset,
394 entry->offset +
395 (entry->vme_end - entry->vme_start));
396 } else {
397 total_entries += 1;
398 }
399 entry = entry->vme_next;
400 }
401 if (not_in_kdp)
402 vm_map_unlock(map);
403 return(total_entries);
404 }
405
406 /*
407 *
408 */
409 /*
410 *
411 */
412 int
413 get_task_userstop(
414 task_t task)
415 {
416 return(task->user_stop_count);
417 }
418
419 /*
420 *
421 */
422 int
423 get_thread_userstop(
424 thread_act_t th)
425 {
426 return(th->user_stop_count);
427 }
428
429 /*
430 *
431 */
432 boolean_t
433 thread_should_abort(
434 thread_t th)
435 {
436 return(!th->top_act ||
437 (th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
438 }
439
440 /*
441 * This routine is like thread_should_abort() above. It checks to
442 * see if the current thread is aborted. But unlike above, it also
443 * checks to see if thread is safely aborted. If so, it returns
444 * that fact, and clears the condition (safe aborts only should
445 * have a single effect, and a poll of the abort status
446 * qualifies.
447 */
448 boolean_t
449 current_thread_aborted (
450 void)
451 {
452 thread_t th = current_thread();
453 spl_t s;
454
455 if (!th->top_act ||
456 ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
457 th->interrupt_level != THREAD_UNINT))
458 return (TRUE);
459 if (th->state & TH_ABORT_SAFELY) {
460 s = splsched();
461 thread_lock(th);
462 if (th->state & TH_ABORT_SAFELY)
463 th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
464 thread_unlock(th);
465 splx(s);
466 }
467 return FALSE;
468 }
469
470 /*
471 *
472 */
473 void
474 task_act_iterate_wth_args(
475 task_t task,
476 void (*func_callback)(thread_act_t, void *),
477 void *func_arg)
478 {
479 thread_act_t inc, ninc;
480
481 task_lock(task);
482 for (inc = (thread_act_t)queue_first(&task->threads);
483 !queue_end(&task->threads, (queue_entry_t)inc);
484 inc = ninc) {
485 ninc = (thread_act_t)queue_next(&inc->task_threads);
486 (void) (*func_callback)(inc, func_arg);
487 }
488 task_unlock(task);
489 }
490
491 void
492 ipc_port_release(
493 ipc_port_t port)
494 {
495 ipc_object_release(&(port)->ip_object);
496 }
497
498 boolean_t
499 is_thread_active(
500 thread_t th)
501 {
502 return(th->active);
503 }
504
505 kern_return_t
506 get_thread_waitresult(
507 thread_t th)
508 {
509 return(th->wait_result);
510 }
511
512 void
513 astbsd_on(void)
514 {
515 boolean_t reenable;
516
517 reenable = ml_set_interrupts_enabled(FALSE);
518 ast_on_fast(AST_BSD);
519 (void)ml_set_interrupts_enabled(reenable);
520 }