]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <mach/mach_types.h> | |
29 | ||
30 | #include <kern/kern_types.h> | |
31 | #include <kern/processor.h> | |
32 | #include <kern/thread.h> | |
33 | #include <kern/task.h> | |
34 | #include <kern/spl.h> | |
35 | #include <kern/lock.h> | |
36 | #include <kern/ast.h> | |
37 | #include <ipc/ipc_port.h> | |
38 | #include <ipc/ipc_object.h> | |
39 | #include <vm/vm_map.h> | |
40 | #include <vm/pmap.h> | |
41 | #include <vm/vm_protos.h> /* last */ | |
42 | ||
43 | #undef thread_should_halt | |
44 | #undef ipc_port_release | |
45 | ||
46 | /* BSD KERN COMPONENT INTERFACE */ | |
47 | ||
48 | task_t bsd_init_task = TASK_NULL; | |
49 | char init_task_failure_data[1024]; | |
50 | extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */ | |
51 | ||
52 | thread_t get_firstthread(task_t); | |
53 | int get_task_userstop(task_t); | |
54 | int get_thread_userstop(thread_t); | |
55 | boolean_t thread_should_abort(thread_t); | |
56 | boolean_t current_thread_aborted(void); | |
57 | void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *); | |
58 | void ipc_port_release(ipc_port_t); | |
59 | boolean_t is_thread_active(thread_t); | |
60 | kern_return_t get_signalact(task_t , thread_t *, int); | |
61 | int get_vmsubmap_entries(vm_map_t, vm_object_offset_t, vm_object_offset_t); | |
62 | ||
63 | /* | |
64 | * | |
65 | */ | |
66 | void *get_bsdtask_info(task_t t) | |
67 | { | |
68 | return(t->bsd_info); | |
69 | } | |
70 | ||
71 | /* | |
72 | * | |
73 | */ | |
74 | void set_bsdtask_info(task_t t,void * v) | |
75 | { | |
76 | t->bsd_info=v; | |
77 | } | |
78 | ||
79 | /* | |
80 | * | |
81 | */ | |
82 | void *get_bsdthread_info(thread_t th) | |
83 | { | |
84 | return(th->uthread); | |
85 | } | |
86 | ||
87 | /* | |
88 | * XXX: wait for BSD to fix signal code | |
89 | * Until then, we cannot block here. We know the task | |
90 | * can't go away, so we make sure it is still active after | |
91 | * retrieving the first thread for extra safety. | |
92 | */ | |
93 | thread_t get_firstthread(task_t task) | |
94 | { | |
95 | thread_t thread = (thread_t)queue_first(&task->threads); | |
96 | ||
97 | if (queue_end(&task->threads, (queue_entry_t)thread)) | |
98 | thread = THREAD_NULL; | |
99 | ||
100 | if (!task->active) | |
101 | return (THREAD_NULL); | |
102 | ||
103 | return (thread); | |
104 | } | |
105 | ||
106 | kern_return_t | |
107 | get_signalact( | |
108 | task_t task, | |
109 | thread_t *result_out, | |
110 | int setast) | |
111 | { | |
112 | kern_return_t result = KERN_SUCCESS; | |
113 | thread_t inc, thread = THREAD_NULL; | |
114 | ||
115 | task_lock(task); | |
116 | ||
117 | if (!task->active) { | |
118 | task_unlock(task); | |
119 | ||
120 | return (KERN_FAILURE); | |
121 | } | |
122 | ||
123 | for (inc = (thread_t)queue_first(&task->threads); | |
124 | !queue_end(&task->threads, (queue_entry_t)inc); ) { | |
125 | thread_mtx_lock(inc); | |
126 | if (inc->active && | |
127 | (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) { | |
128 | thread = inc; | |
129 | break; | |
130 | } | |
131 | thread_mtx_unlock(inc); | |
132 | ||
133 | inc = (thread_t)queue_next(&inc->task_threads); | |
134 | } | |
135 | ||
136 | if (result_out) | |
137 | *result_out = thread; | |
138 | ||
139 | if (thread) { | |
140 | if (setast) | |
141 | act_set_astbsd(thread); | |
142 | ||
143 | thread_mtx_unlock(thread); | |
144 | } | |
145 | else | |
146 | result = KERN_FAILURE; | |
147 | ||
148 | task_unlock(task); | |
149 | ||
150 | return (result); | |
151 | } | |
152 | ||
153 | ||
154 | kern_return_t | |
155 | check_actforsig( | |
156 | task_t task, | |
157 | thread_t thread, | |
158 | int setast) | |
159 | { | |
160 | kern_return_t result = KERN_FAILURE; | |
161 | thread_t inc; | |
162 | ||
163 | task_lock(task); | |
164 | ||
165 | if (!task->active) { | |
166 | task_unlock(task); | |
167 | ||
168 | return (KERN_FAILURE); | |
169 | } | |
170 | ||
171 | for (inc = (thread_t)queue_first(&task->threads); | |
172 | !queue_end(&task->threads, (queue_entry_t)inc); ) { | |
173 | if (inc == thread) { | |
174 | thread_mtx_lock(inc); | |
175 | ||
176 | if (inc->active && | |
177 | (inc->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT) { | |
178 | result = KERN_SUCCESS; | |
179 | break; | |
180 | } | |
181 | ||
182 | thread_mtx_unlock(inc); | |
183 | break; | |
184 | } | |
185 | ||
186 | inc = (thread_t)queue_next(&inc->task_threads); | |
187 | } | |
188 | ||
189 | if (result == KERN_SUCCESS) { | |
190 | if (setast) | |
191 | act_set_astbsd(thread); | |
192 | ||
193 | thread_mtx_unlock(thread); | |
194 | } | |
195 | ||
196 | task_unlock(task); | |
197 | ||
198 | return (result); | |
199 | } | |
200 | ||
201 | /* | |
202 | * This is only safe to call from a thread executing in | |
203 | * in the task's context or if the task is locked Otherwise, | |
204 | * the map could be switched for the task (and freed) before | |
205 | * we to return it here. | |
206 | */ | |
207 | vm_map_t get_task_map(task_t t) | |
208 | { | |
209 | return(t->map); | |
210 | } | |
211 | ||
212 | vm_map_t get_task_map_reference(task_t t) | |
213 | { | |
214 | vm_map_t m; | |
215 | ||
216 | if (t == NULL) | |
217 | return VM_MAP_NULL; | |
218 | ||
219 | task_lock(t); | |
220 | if (!t->active) { | |
221 | task_unlock(t); | |
222 | return VM_MAP_NULL; | |
223 | } | |
224 | m = t->map; | |
225 | vm_map_reference_swap(m); | |
226 | task_unlock(t); | |
227 | return m; | |
228 | } | |
229 | ||
230 | /* | |
231 | * | |
232 | */ | |
233 | ipc_space_t get_task_ipcspace(task_t t) | |
234 | { | |
235 | return(t->itk_space); | |
236 | } | |
237 | ||
238 | int get_task_numacts(task_t t) | |
239 | { | |
240 | return(t->thread_count); | |
241 | } | |
242 | ||
243 | /* does this machine need 64bit register set for signal handler */ | |
244 | int is_64signalregset(void) | |
245 | { | |
246 | task_t t = current_task(); | |
247 | if(t->taskFeatures[0] & tf64BitData) | |
248 | return(1); | |
249 | else | |
250 | return(0); | |
251 | } | |
252 | ||
253 | /* | |
254 | * The old map reference is returned. | |
255 | */ | |
256 | vm_map_t | |
257 | swap_task_map(task_t task,vm_map_t map) | |
258 | { | |
259 | thread_t thread = current_thread(); | |
260 | vm_map_t old_map; | |
261 | ||
262 | if (task != thread->task) | |
263 | panic("swap_task_map"); | |
264 | ||
265 | task_lock(task); | |
266 | old_map = task->map; | |
267 | thread->map = task->map = map; | |
268 | task_unlock(task); | |
269 | return old_map; | |
270 | } | |
271 | ||
272 | /* | |
273 | * | |
274 | */ | |
275 | pmap_t get_task_pmap(task_t t) | |
276 | { | |
277 | return(t->map->pmap); | |
278 | } | |
279 | ||
280 | /* | |
281 | * | |
282 | */ | |
283 | pmap_t get_map_pmap(vm_map_t map) | |
284 | { | |
285 | return(map->pmap); | |
286 | } | |
287 | /* | |
288 | * | |
289 | */ | |
290 | task_t get_threadtask(thread_t th) | |
291 | { | |
292 | return(th->task); | |
293 | } | |
294 | ||
295 | ||
296 | /* | |
297 | * | |
298 | */ | |
299 | boolean_t is_thread_idle(thread_t th) | |
300 | { | |
301 | return((th->state & TH_IDLE) == TH_IDLE); | |
302 | } | |
303 | ||
304 | /* | |
305 | * | |
306 | */ | |
307 | boolean_t is_thread_running(thread_t th) | |
308 | { | |
309 | return((th->state & TH_RUN) == TH_RUN); | |
310 | } | |
311 | ||
312 | /* | |
313 | * | |
314 | */ | |
315 | thread_t | |
316 | getshuttle_thread( | |
317 | thread_t th) | |
318 | { | |
319 | return(th); | |
320 | } | |
321 | ||
322 | /* | |
323 | * | |
324 | */ | |
325 | thread_t | |
326 | getact_thread( | |
327 | thread_t th) | |
328 | { | |
329 | return(th); | |
330 | } | |
331 | ||
332 | /* | |
333 | * | |
334 | */ | |
335 | vm_map_offset_t | |
336 | get_map_min( | |
337 | vm_map_t map) | |
338 | { | |
339 | return(vm_map_min(map)); | |
340 | } | |
341 | ||
342 | /* | |
343 | * | |
344 | */ | |
345 | vm_map_offset_t | |
346 | get_map_max( | |
347 | vm_map_t map) | |
348 | { | |
349 | return(vm_map_max(map)); | |
350 | } | |
351 | vm_map_size_t | |
352 | get_vmmap_size( | |
353 | vm_map_t map) | |
354 | { | |
355 | return(map->size); | |
356 | } | |
357 | ||
358 | int | |
359 | get_vmsubmap_entries( | |
360 | vm_map_t map, | |
361 | vm_object_offset_t start, | |
362 | vm_object_offset_t end) | |
363 | { | |
364 | int total_entries = 0; | |
365 | vm_map_entry_t entry; | |
366 | ||
367 | if (not_in_kdp) | |
368 | vm_map_lock(map); | |
369 | entry = vm_map_first_entry(map); | |
370 | while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) { | |
371 | entry = entry->vme_next; | |
372 | } | |
373 | ||
374 | while((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
375 | if(entry->is_sub_map) { | |
376 | total_entries += | |
377 | get_vmsubmap_entries(entry->object.sub_map, | |
378 | entry->offset, | |
379 | entry->offset + | |
380 | (entry->vme_end - entry->vme_start)); | |
381 | } else { | |
382 | total_entries += 1; | |
383 | } | |
384 | entry = entry->vme_next; | |
385 | } | |
386 | if (not_in_kdp) | |
387 | vm_map_unlock(map); | |
388 | return(total_entries); | |
389 | } | |
390 | ||
391 | int | |
392 | get_vmmap_entries( | |
393 | vm_map_t map) | |
394 | { | |
395 | int total_entries = 0; | |
396 | vm_map_entry_t entry; | |
397 | ||
398 | if (not_in_kdp) | |
399 | vm_map_lock(map); | |
400 | entry = vm_map_first_entry(map); | |
401 | ||
402 | while(entry != vm_map_to_entry(map)) { | |
403 | if(entry->is_sub_map) { | |
404 | total_entries += | |
405 | get_vmsubmap_entries(entry->object.sub_map, | |
406 | entry->offset, | |
407 | entry->offset + | |
408 | (entry->vme_end - entry->vme_start)); | |
409 | } else { | |
410 | total_entries += 1; | |
411 | } | |
412 | entry = entry->vme_next; | |
413 | } | |
414 | if (not_in_kdp) | |
415 | vm_map_unlock(map); | |
416 | return(total_entries); | |
417 | } | |
418 | ||
419 | /* | |
420 | * | |
421 | */ | |
422 | /* | |
423 | * | |
424 | */ | |
425 | int | |
426 | get_task_userstop( | |
427 | task_t task) | |
428 | { | |
429 | return(task->user_stop_count); | |
430 | } | |
431 | ||
432 | /* | |
433 | * | |
434 | */ | |
435 | int | |
436 | get_thread_userstop( | |
437 | thread_t th) | |
438 | { | |
439 | return(th->user_stop_count); | |
440 | } | |
441 | ||
442 | /* | |
443 | * | |
444 | */ | |
445 | boolean_t | |
446 | thread_should_abort( | |
447 | thread_t th) | |
448 | { | |
449 | return ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT); | |
450 | } | |
451 | ||
452 | /* | |
453 | * This routine is like thread_should_abort() above. It checks to | |
454 | * see if the current thread is aborted. But unlike above, it also | |
455 | * checks to see if thread is safely aborted. If so, it returns | |
456 | * that fact, and clears the condition (safe aborts only should | |
457 | * have a single effect, and a poll of the abort status | |
458 | * qualifies. | |
459 | */ | |
460 | boolean_t | |
461 | current_thread_aborted ( | |
462 | void) | |
463 | { | |
464 | thread_t th = current_thread(); | |
465 | spl_t s; | |
466 | ||
467 | if ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT && | |
468 | (th->options & TH_OPT_INTMASK) != THREAD_UNINT) | |
469 | return (TRUE); | |
470 | if (th->state & TH_ABORT_SAFELY) { | |
471 | s = splsched(); | |
472 | thread_lock(th); | |
473 | if (th->state & TH_ABORT_SAFELY) | |
474 | th->state &= ~(TH_ABORT|TH_ABORT_SAFELY); | |
475 | thread_unlock(th); | |
476 | splx(s); | |
477 | } | |
478 | return FALSE; | |
479 | } | |
480 | ||
481 | /* | |
482 | * | |
483 | */ | |
484 | void | |
485 | task_act_iterate_wth_args( | |
486 | task_t task, | |
487 | void (*func_callback)(thread_t, void *), | |
488 | void *func_arg) | |
489 | { | |
490 | thread_t inc; | |
491 | ||
492 | task_lock(task); | |
493 | ||
494 | for (inc = (thread_t)queue_first(&task->threads); | |
495 | !queue_end(&task->threads, (queue_entry_t)inc); ) { | |
496 | (void) (*func_callback)(inc, func_arg); | |
497 | inc = (thread_t)queue_next(&inc->task_threads); | |
498 | } | |
499 | ||
500 | task_unlock(task); | |
501 | } | |
502 | ||
503 | void | |
504 | ipc_port_release( | |
505 | ipc_port_t port) | |
506 | { | |
507 | ipc_object_release(&(port)->ip_object); | |
508 | } | |
509 | ||
510 | boolean_t | |
511 | is_thread_active( | |
512 | thread_t th) | |
513 | { | |
514 | return(th->active); | |
515 | } | |
516 | ||
517 | void | |
518 | astbsd_on(void) | |
519 | { | |
520 | boolean_t reenable; | |
521 | ||
522 | reenable = ml_set_interrupts_enabled(FALSE); | |
523 | ast_on_fast(AST_BSD); | |
524 | (void)ml_set_interrupts_enabled(reenable); | |
525 | } |