]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
d7e50217 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
d7e50217 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
d7e50217 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_FREE_COPYRIGHT@ | |
27 | */ | |
28 | /* | |
29 | * Mach Operating System | |
30 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
31 | * All Rights Reserved. | |
32 | * | |
33 | * Permission to use, copy, modify and distribute this software and its | |
34 | * documentation is hereby granted, provided that both the copyright | |
35 | * notice and this permission notice appear in all copies of the | |
36 | * software, derivative works or modified versions, and any portions | |
37 | * thereof, and that both notices appear in supporting documentation. | |
38 | * | |
39 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
40 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
41 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
42 | * | |
43 | * Carnegie Mellon requests users of this software to return to | |
44 | * | |
45 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
46 | * School of Computer Science | |
47 | * Carnegie Mellon University | |
48 | * Pittsburgh PA 15213-3890 | |
49 | * | |
50 | * any improvements or extensions that they make and grant Carnegie Mellon | |
51 | * the rights to redistribute these changes. | |
52 | */ | |
53 | /* | |
54 | * File: kern/task.c | |
55 | * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub, | |
56 | * David Black | |
57 | * | |
58 | * Task management primitives implementation. | |
59 | */ | |
60 | /* | |
61 | * Copyright (c) 1993 The University of Utah and | |
62 | * the Computer Systems Laboratory (CSL). All rights reserved. | |
63 | * | |
64 | * Permission to use, copy, modify and distribute this software and its | |
65 | * documentation is hereby granted, provided that both the copyright | |
66 | * notice and this permission notice appear in all copies of the | |
67 | * software, derivative works or modified versions, and any portions | |
68 | * thereof, and that both notices appear in supporting documentation. | |
69 | * | |
70 | * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS | |
71 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF | |
72 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
73 | * | |
74 | * CSL requests users of this software to return to csl-dist@cs.utah.edu any | |
75 | * improvements that they make and grant CSL redistribution rights. | |
76 | * | |
77 | */ | |
78 | ||
79 | #include <mach_kdb.h> | |
80 | #include <mach_host.h> | |
81 | #include <mach_prof.h> | |
82 | #include <fast_tas.h> | |
83 | #include <task_swapper.h> | |
84 | #include <platforms.h> | |
85 | ||
86 | #include <mach/boolean.h> | |
87 | #include <mach/machine/vm_types.h> | |
88 | #include <mach/vm_param.h> | |
89 | #include <mach/semaphore.h> | |
90 | #include <mach/task_info.h> | |
91 | #include <mach/task_special_ports.h> | |
92 | #include <mach/mach_types.h> | |
1c79356b A |
93 | #include <ipc/ipc_space.h> |
94 | #include <ipc/ipc_entry.h> | |
95 | #include <kern/mach_param.h> | |
96 | #include <kern/misc_protos.h> | |
97 | #include <kern/task.h> | |
98 | #include <kern/thread.h> | |
99 | #include <kern/zalloc.h> | |
100 | #include <kern/kalloc.h> | |
101 | #include <kern/processor.h> | |
102 | #include <kern/sched_prim.h> /* for thread_wakeup */ | |
1c79356b A |
103 | #include <kern/ipc_tt.h> |
104 | #include <kern/ledger.h> | |
105 | #include <kern/host.h> | |
106 | #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */ | |
107 | #include <kern/profile.h> | |
108 | #include <kern/assert.h> | |
109 | #include <kern/sync_lock.h> | |
110 | #if MACH_KDB | |
111 | #include <ddb/db_sym.h> | |
112 | #endif /* MACH_KDB */ | |
113 | ||
114 | #if TASK_SWAPPER | |
115 | #include <kern/task_swap.h> | |
116 | #endif /* TASK_SWAPPER */ | |
117 | ||
d7e50217 A |
118 | #ifdef __ppc__ |
119 | #include <ppc/exception.h> | |
120 | #include <ppc/hw_perfmon.h> | |
121 | #endif | |
122 | ||
1c79356b A |
123 | /* |
124 | * Exported interfaces | |
125 | */ | |
126 | ||
127 | #include <mach/task_server.h> | |
128 | #include <mach/mach_host_server.h> | |
129 | #include <mach/host_security_server.h> | |
0b4e3aa0 | 130 | #include <vm/task_working_set.h> |
1c79356b A |
131 | |
132 | task_t kernel_task; | |
133 | zone_t task_zone; | |
134 | ||
135 | /* Forwards */ | |
136 | ||
137 | void task_hold_locked( | |
138 | task_t task); | |
139 | void task_wait_locked( | |
140 | task_t task); | |
141 | void task_release_locked( | |
142 | task_t task); | |
143 | void task_collect_scan(void); | |
144 | void task_free( | |
145 | task_t task ); | |
146 | void task_synchronizer_destroy_all( | |
147 | task_t task); | |
1c79356b A |
148 | |
149 | kern_return_t task_set_ledger( | |
150 | task_t task, | |
151 | ledger_t wired, | |
152 | ledger_t paged); | |
153 | ||
154 | void | |
155 | task_init(void) | |
156 | { | |
157 | task_zone = zinit( | |
158 | sizeof(struct task), | |
159 | TASK_MAX * sizeof(struct task), | |
160 | TASK_CHUNK * sizeof(struct task), | |
161 | "tasks"); | |
162 | ||
163 | eml_init(); | |
164 | ||
165 | /* | |
166 | * Create the kernel task as the first task. | |
167 | * Task_create_local must assign to kernel_task as a side effect, | |
168 | * for other initialization. (:-() | |
169 | */ | |
170 | if (task_create_local( | |
171 | TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS) | |
172 | panic("task_init\n"); | |
173 | vm_map_deallocate(kernel_task->map); | |
174 | kernel_task->map = kernel_map; | |
175 | ||
176 | #if MACH_ASSERT | |
177 | if (watchacts & WA_TASK) | |
178 | printf("task_init: kernel_task = %x map=%x\n", | |
179 | kernel_task, kernel_map); | |
180 | #endif /* MACH_ASSERT */ | |
181 | } | |
182 | ||
183 | #if MACH_HOST | |
9bccf70c A |
184 | |
185 | #if 0 | |
186 | static void | |
1c79356b A |
187 | task_freeze( |
188 | task_t task) | |
189 | { | |
190 | task_lock(task); | |
191 | /* | |
192 | * If may_assign is false, task is already being assigned, | |
193 | * wait for that to finish. | |
194 | */ | |
195 | while (task->may_assign == FALSE) { | |
9bccf70c A |
196 | wait_result_t res; |
197 | ||
1c79356b | 198 | task->assign_active = TRUE; |
9bccf70c A |
199 | res = thread_sleep_mutex((event_t) &task->assign_active, |
200 | &task->lock, THREAD_UNINT); | |
201 | assert(res == THREAD_AWAKENED); | |
1c79356b A |
202 | } |
203 | task->may_assign = FALSE; | |
204 | task_unlock(task); | |
1c79356b A |
205 | return; |
206 | } | |
9bccf70c A |
207 | #else |
208 | #define thread_freeze(thread) assert(task->processor_set == &default_pset) | |
209 | #endif | |
1c79356b | 210 | |
9bccf70c A |
211 | #if 0 |
212 | static void | |
1c79356b A |
213 | task_unfreeze( |
214 | task_t task) | |
215 | { | |
216 | task_lock(task); | |
217 | assert(task->may_assign == FALSE); | |
218 | task->may_assign = TRUE; | |
219 | if (task->assign_active == TRUE) { | |
220 | task->assign_active = FALSE; | |
221 | thread_wakeup((event_t)&task->assign_active); | |
222 | } | |
223 | task_unlock(task); | |
1c79356b A |
224 | return; |
225 | } | |
9bccf70c A |
226 | #else |
227 | #define thread_unfreeze(thread) assert(task->processor_set == &default_pset) | |
228 | #endif | |
229 | ||
1c79356b A |
230 | #endif /* MACH_HOST */ |
231 | ||
232 | /* | |
233 | * Create a task running in the kernel address space. It may | |
234 | * have its own map of size mem_size and may have ipc privileges. | |
235 | */ | |
236 | kern_return_t | |
237 | kernel_task_create( | |
238 | task_t parent_task, | |
239 | vm_offset_t map_base, | |
240 | vm_size_t map_size, | |
241 | task_t *child_task) | |
242 | { | |
243 | kern_return_t result; | |
244 | task_t new_task; | |
245 | vm_map_t old_map; | |
246 | ||
247 | /* | |
248 | * Create the task. | |
249 | */ | |
250 | result = task_create_local(parent_task, FALSE, TRUE, &new_task); | |
251 | if (result != KERN_SUCCESS) | |
252 | return (result); | |
253 | ||
254 | /* | |
255 | * Task_create_local creates the task with a user-space map. | |
256 | * We attempt to replace the map and free it afterwards; else | |
257 | * task_deallocate will free it (can NOT set map to null before | |
258 | * task_deallocate, this impersonates a norma placeholder task). | |
259 | * _Mark the memory as pageable_ -- this is what we | |
260 | * want for images (like servers) loaded into the kernel. | |
261 | */ | |
262 | if (map_size == 0) { | |
263 | vm_map_deallocate(new_task->map); | |
264 | new_task->map = kernel_map; | |
265 | *child_task = new_task; | |
266 | } else { | |
267 | old_map = new_task->map; | |
268 | if ((result = kmem_suballoc(kernel_map, &map_base, | |
269 | map_size, TRUE, FALSE, | |
270 | &new_task->map)) != KERN_SUCCESS) { | |
271 | /* | |
272 | * New task created with ref count of 2 -- decrement by | |
273 | * one to force task deletion. | |
274 | */ | |
275 | printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n", | |
276 | kernel_map, map_base, map_size); | |
277 | --new_task->ref_count; | |
278 | task_deallocate(new_task); | |
279 | return (result); | |
280 | } | |
281 | vm_map_deallocate(old_map); | |
282 | *child_task = new_task; | |
283 | } | |
284 | return (KERN_SUCCESS); | |
285 | } | |
286 | ||
287 | kern_return_t | |
288 | task_create( | |
289 | task_t parent_task, | |
290 | ledger_port_array_t ledger_ports, | |
291 | mach_msg_type_number_t num_ledger_ports, | |
292 | boolean_t inherit_memory, | |
293 | task_t *child_task) /* OUT */ | |
294 | { | |
295 | if (parent_task == TASK_NULL) | |
296 | return(KERN_INVALID_ARGUMENT); | |
297 | ||
298 | return task_create_local( | |
299 | parent_task, inherit_memory, FALSE, child_task); | |
300 | } | |
301 | ||
302 | kern_return_t | |
303 | host_security_create_task_token( | |
304 | host_security_t host_security, | |
305 | task_t parent_task, | |
306 | security_token_t sec_token, | |
307 | host_priv_t host_priv, | |
308 | ledger_port_array_t ledger_ports, | |
309 | mach_msg_type_number_t num_ledger_ports, | |
310 | boolean_t inherit_memory, | |
311 | task_t *child_task) /* OUT */ | |
312 | { | |
313 | kern_return_t result; | |
314 | ||
315 | if (parent_task == TASK_NULL) | |
316 | return(KERN_INVALID_ARGUMENT); | |
317 | ||
318 | if (host_security == HOST_NULL) | |
319 | return(KERN_INVALID_SECURITY); | |
320 | ||
321 | result = task_create_local( | |
322 | parent_task, inherit_memory, FALSE, child_task); | |
323 | ||
324 | if (result != KERN_SUCCESS) | |
325 | return(result); | |
326 | ||
327 | result = host_security_set_task_token(host_security, | |
328 | *child_task, | |
329 | sec_token, | |
330 | host_priv); | |
331 | ||
332 | if (result != KERN_SUCCESS) | |
333 | return(result); | |
334 | ||
335 | return(result); | |
336 | } | |
337 | ||
338 | kern_return_t | |
339 | task_create_local( | |
340 | task_t parent_task, | |
341 | boolean_t inherit_memory, | |
342 | boolean_t kernel_loaded, | |
343 | task_t *child_task) /* OUT */ | |
344 | { | |
345 | task_t new_task; | |
346 | processor_set_t pset; | |
347 | ||
348 | new_task = (task_t) zalloc(task_zone); | |
349 | ||
350 | if (new_task == TASK_NULL) | |
351 | return(KERN_RESOURCE_SHORTAGE); | |
352 | ||
353 | /* one ref for just being alive; one for our caller */ | |
354 | new_task->ref_count = 2; | |
355 | ||
356 | if (inherit_memory) | |
357 | new_task->map = vm_map_fork(parent_task->map); | |
358 | else | |
359 | new_task->map = vm_map_create(pmap_create(0), | |
d7e50217 A |
360 | round_page_32(VM_MIN_ADDRESS), |
361 | trunc_page_32(VM_MAX_ADDRESS), TRUE); | |
1c79356b A |
362 | |
363 | mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW); | |
1c79356b A |
364 | queue_init(&new_task->thr_acts); |
365 | new_task->suspend_count = 0; | |
366 | new_task->thr_act_count = 0; | |
367 | new_task->res_act_count = 0; | |
368 | new_task->active_act_count = 0; | |
369 | new_task->user_stop_count = 0; | |
0b4e3aa0 | 370 | new_task->role = TASK_UNSPECIFIED; |
1c79356b A |
371 | new_task->active = TRUE; |
372 | new_task->kernel_loaded = kernel_loaded; | |
373 | new_task->user_data = 0; | |
374 | new_task->faults = 0; | |
375 | new_task->cow_faults = 0; | |
376 | new_task->pageins = 0; | |
377 | new_task->messages_sent = 0; | |
378 | new_task->messages_received = 0; | |
379 | new_task->syscalls_mach = 0; | |
380 | new_task->syscalls_unix=0; | |
381 | new_task->csw=0; | |
d7e50217 A |
382 | new_task->taskFeatures[0] = 0; /* Init task features */ |
383 | new_task->taskFeatures[1] = 0; /* Init task features */ | |
0b4e3aa0 A |
384 | new_task->dynamic_working_set = 0; |
385 | ||
386 | task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT, | |
387 | 0, TWS_HASH_STYLE_DEFAULT); | |
1c79356b A |
388 | |
389 | #ifdef MACH_BSD | |
390 | new_task->bsd_info = 0; | |
391 | #endif /* MACH_BSD */ | |
392 | ||
d7e50217 A |
393 | #ifdef __ppc__ |
394 | if(per_proc_info[0].pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */ | |
395 | #endif | |
396 | ||
1c79356b A |
397 | #if TASK_SWAPPER |
398 | new_task->swap_state = TASK_SW_IN; | |
399 | new_task->swap_flags = 0; | |
400 | new_task->swap_ast_waiting = 0; | |
401 | new_task->swap_stamp = sched_tick; | |
402 | new_task->swap_rss = 0; | |
403 | new_task->swap_nswap = 0; | |
404 | #endif /* TASK_SWAPPER */ | |
405 | ||
406 | queue_init(&new_task->semaphore_list); | |
407 | queue_init(&new_task->lock_set_list); | |
408 | new_task->semaphores_owned = 0; | |
409 | new_task->lock_sets_owned = 0; | |
410 | ||
411 | #if MACH_HOST | |
412 | new_task->may_assign = TRUE; | |
413 | new_task->assign_active = FALSE; | |
414 | #endif /* MACH_HOST */ | |
415 | eml_task_reference(new_task, parent_task); | |
416 | ||
417 | ipc_task_init(new_task, parent_task); | |
418 | ||
419 | new_task->total_user_time.seconds = 0; | |
420 | new_task->total_user_time.microseconds = 0; | |
421 | new_task->total_system_time.seconds = 0; | |
422 | new_task->total_system_time.microseconds = 0; | |
423 | ||
424 | task_prof_init(new_task); | |
425 | ||
426 | if (parent_task != TASK_NULL) { | |
427 | #if MACH_HOST | |
428 | /* | |
429 | * Freeze the parent, so that parent_task->processor_set | |
430 | * cannot change. | |
431 | */ | |
432 | task_freeze(parent_task); | |
433 | #endif /* MACH_HOST */ | |
434 | pset = parent_task->processor_set; | |
435 | if (!pset->active) | |
436 | pset = &default_pset; | |
437 | ||
1c79356b A |
438 | new_task->sec_token = parent_task->sec_token; |
439 | ||
440 | shared_region_mapping_ref(parent_task->system_shared_region); | |
441 | new_task->system_shared_region = parent_task->system_shared_region; | |
442 | ||
443 | new_task->wired_ledger_port = ledger_copy( | |
444 | convert_port_to_ledger(parent_task->wired_ledger_port)); | |
445 | new_task->paged_ledger_port = ledger_copy( | |
446 | convert_port_to_ledger(parent_task->paged_ledger_port)); | |
447 | } | |
448 | else { | |
449 | pset = &default_pset; | |
450 | ||
1c79356b A |
451 | new_task->sec_token = KERNEL_SECURITY_TOKEN; |
452 | new_task->wired_ledger_port = ledger_copy(root_wired_ledger); | |
453 | new_task->paged_ledger_port = ledger_copy(root_paged_ledger); | |
454 | } | |
455 | ||
0b4e3aa0 A |
456 | if (kernel_task == TASK_NULL) { |
457 | new_task->priority = MINPRI_KERNEL; | |
458 | new_task->max_priority = MAXPRI_KERNEL; | |
459 | } | |
460 | else { | |
461 | new_task->priority = BASEPRI_DEFAULT; | |
462 | new_task->max_priority = MAXPRI_USER; | |
463 | } | |
464 | ||
1c79356b A |
465 | pset_lock(pset); |
466 | pset_add_task(pset, new_task); | |
467 | pset_unlock(pset); | |
468 | #if MACH_HOST | |
469 | if (parent_task != TASK_NULL) | |
470 | task_unfreeze(parent_task); | |
471 | #endif /* MACH_HOST */ | |
472 | ||
473 | #if FAST_TAS | |
474 | if (inherit_memory) { | |
475 | new_task->fast_tas_base = parent_task->fast_tas_base; | |
476 | new_task->fast_tas_end = parent_task->fast_tas_end; | |
477 | } else { | |
478 | new_task->fast_tas_base = (vm_offset_t)0; | |
479 | new_task->fast_tas_end = (vm_offset_t)0; | |
480 | } | |
481 | #endif /* FAST_TAS */ | |
482 | ||
483 | ipc_task_enable(new_task); | |
484 | ||
485 | #if TASK_SWAPPER | |
486 | task_swapout_eligible(new_task); | |
487 | #endif /* TASK_SWAPPER */ | |
488 | ||
489 | #if MACH_ASSERT | |
490 | if (watchacts & WA_TASK) | |
491 | printf("*** task_create_local(par=%x inh=%x) == 0x%x\n", | |
492 | parent_task, inherit_memory, new_task); | |
493 | #endif /* MACH_ASSERT */ | |
494 | ||
495 | *child_task = new_task; | |
496 | return(KERN_SUCCESS); | |
497 | } | |
498 | ||
499 | /* | |
9bccf70c | 500 | * task_deallocate |
1c79356b | 501 | * |
9bccf70c | 502 | * Drop a reference on a task |
1c79356b A |
503 | * Task is locked. |
504 | */ | |
505 | void | |
9bccf70c | 506 | task_deallocate( |
1c79356b A |
507 | task_t task) |
508 | { | |
509 | processor_set_t pset; | |
9bccf70c | 510 | int refs; |
1c79356b | 511 | |
9bccf70c A |
512 | if (task == TASK_NULL) |
513 | return; | |
514 | ||
515 | task_lock(task); | |
516 | refs = --task->ref_count; | |
517 | task_unlock(task); | |
518 | ||
519 | if (refs > 0) | |
520 | return; | |
1c79356b A |
521 | |
522 | #if TASK_SWAPPER | |
523 | /* task_terminate guarantees that this task is off the list */ | |
524 | assert((task->swap_state & TASK_SW_ELIGIBLE) == 0); | |
525 | #endif /* TASK_SWAPPER */ | |
526 | ||
d7e50217 A |
527 | if(task->dynamic_working_set) |
528 | tws_hash_destroy((tws_hash_t)task->dynamic_working_set); | |
529 | ||
530 | ||
1c79356b A |
531 | eml_task_deallocate(task); |
532 | ||
9bccf70c A |
533 | ipc_task_terminate(task); |
534 | ||
535 | #if MACH_HOST | |
1c79356b | 536 | task_freeze(task); |
9bccf70c A |
537 | #endif |
538 | ||
1c79356b A |
539 | pset = task->processor_set; |
540 | pset_lock(pset); | |
1c79356b | 541 | pset_remove_task(pset,task); |
1c79356b A |
542 | pset_unlock(pset); |
543 | pset_deallocate(pset); | |
544 | ||
9bccf70c A |
545 | #if MACH_HOST |
546 | task_unfreeze(task); | |
547 | #endif | |
1c79356b A |
548 | |
549 | if (task->kernel_loaded) | |
550 | vm_map_remove(kernel_map, task->map->min_offset, | |
551 | task->map->max_offset, VM_MAP_NO_FLAGS); | |
552 | vm_map_deallocate(task->map); | |
553 | is_release(task->itk_space); | |
554 | task_prof_deallocate(task); | |
555 | zfree(task_zone, (vm_offset_t) task); | |
556 | } | |
557 | ||
1c79356b A |
558 | |
559 | void | |
560 | task_reference( | |
561 | task_t task) | |
562 | { | |
563 | if (task != TASK_NULL) { | |
564 | task_lock(task); | |
565 | task->ref_count++; | |
566 | task_unlock(task); | |
567 | } | |
568 | } | |
569 | ||
570 | boolean_t | |
571 | task_reference_try( | |
572 | task_t task) | |
573 | { | |
574 | if (task != TASK_NULL) { | |
575 | if (task_lock_try(task)) { | |
576 | task->ref_count++; | |
577 | task_unlock(task); | |
578 | return TRUE; | |
579 | } | |
580 | } | |
581 | return FALSE; | |
582 | } | |
583 | ||
584 | /* | |
585 | * task_terminate: | |
586 | * | |
587 | * Terminate the specified task. See comments on thread_terminate | |
588 | * (kern/thread.c) about problems with terminating the "current task." | |
589 | */ | |
590 | ||
591 | kern_return_t | |
592 | task_terminate( | |
593 | task_t task) | |
594 | { | |
595 | if (task == TASK_NULL) | |
596 | return(KERN_INVALID_ARGUMENT); | |
597 | if (task->bsd_info) | |
598 | return(KERN_FAILURE); | |
599 | return (task_terminate_internal(task)); | |
600 | } | |
601 | ||
602 | kern_return_t | |
603 | task_terminate_internal( | |
604 | task_t task) | |
605 | { | |
606 | thread_act_t thr_act, cur_thr_act; | |
607 | task_t cur_task; | |
e7c99d92 | 608 | boolean_t interrupt_save; |
1c79356b A |
609 | |
610 | assert(task != kernel_task); | |
611 | ||
612 | cur_thr_act = current_act(); | |
613 | cur_task = cur_thr_act->task; | |
614 | ||
615 | #if TASK_SWAPPER | |
616 | /* | |
617 | * If task is not resident (swapped out, or being swapped | |
618 | * out), we want to bring it back in (this can block). | |
619 | * NOTE: The only way that this can happen in the current | |
620 | * system is if the task is swapped while it has a thread | |
621 | * in exit(), and the thread does not hit a clean point | |
622 | * to swap itself before getting here. | |
623 | * Terminating other tasks is another way to this code, but | |
624 | * it is not yet fully supported. | |
625 | * The task_swapin is unconditional. It used to be done | |
626 | * only if the task is not resident. Swapping in a | |
627 | * resident task will prevent it from being swapped out | |
628 | * while it terminates. | |
629 | */ | |
630 | task_swapin(task, TRUE); /* TRUE means make it unswappable */ | |
631 | #endif /* TASK_SWAPPER */ | |
632 | ||
633 | /* | |
634 | * Get the task locked and make sure that we are not racing | |
635 | * with someone else trying to terminate us. | |
636 | */ | |
637 | if (task == cur_task) { | |
638 | task_lock(task); | |
639 | } else if (task < cur_task) { | |
640 | task_lock(task); | |
641 | task_lock(cur_task); | |
642 | } else { | |
643 | task_lock(cur_task); | |
644 | task_lock(task); | |
645 | } | |
646 | ||
647 | if (!task->active || !cur_thr_act->active) { | |
648 | /* | |
649 | * Task or current act is already being terminated. | |
650 | * Just return an error. If we are dying, this will | |
651 | * just get us to our AST special handler and that | |
652 | * will get us to finalize the termination of ourselves. | |
653 | */ | |
654 | task_unlock(task); | |
655 | if (cur_task != task) | |
656 | task_unlock(cur_task); | |
657 | return(KERN_FAILURE); | |
658 | } | |
659 | if (cur_task != task) | |
660 | task_unlock(cur_task); | |
661 | ||
e7c99d92 A |
662 | /* |
663 | * Make sure the current thread does not get aborted out of | |
664 | * the waits inside these operations. | |
665 | */ | |
9bccf70c | 666 | interrupt_save = thread_interrupt_level(THREAD_UNINT); |
e7c99d92 | 667 | |
1c79356b A |
668 | /* |
669 | * Indicate that we want all the threads to stop executing | |
670 | * at user space by holding the task (we would have held | |
671 | * each thread independently in thread_terminate_internal - | |
672 | * but this way we may be more likely to already find it | |
673 | * held there). Mark the task inactive, and prevent | |
674 | * further task operations via the task port. | |
675 | */ | |
676 | task_hold_locked(task); | |
677 | task->active = FALSE; | |
678 | ipc_task_disable(task); | |
679 | ||
680 | /* | |
681 | * Terminate each activation in the task. | |
682 | * | |
683 | * Each terminated activation will run it's special handler | |
684 | * when its current kernel context is unwound. That will | |
685 | * clean up most of the thread resources. Then it will be | |
686 | * handed over to the reaper, who will finally remove the | |
687 | * thread from the task list and free the structures. | |
1c79356b A |
688 | */ |
689 | queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { | |
1c79356b | 690 | thread_terminate_internal(thr_act); |
1c79356b | 691 | } |
e7c99d92 A |
692 | |
693 | /* | |
694 | * Clean up any virtual machine state/resources associated | |
695 | * with the current activation because it may hold wiring | |
696 | * and other references on resources we will be trying to | |
697 | * release below. | |
698 | */ | |
699 | if (cur_thr_act->task == task) | |
700 | act_virtual_machine_destroy(cur_thr_act); | |
701 | ||
1c79356b A |
702 | task_unlock(task); |
703 | ||
704 | /* | |
705 | * Destroy all synchronizers owned by the task. | |
706 | */ | |
707 | task_synchronizer_destroy_all(task); | |
708 | ||
1c79356b A |
709 | /* |
710 | * Destroy the IPC space, leaving just a reference for it. | |
711 | */ | |
712 | if (!task->kernel_loaded) | |
713 | ipc_space_destroy(task->itk_space); | |
714 | ||
715 | /* | |
716 | * If the current thread is a member of the task | |
717 | * being terminated, then the last reference to | |
718 | * the task will not be dropped until the thread | |
719 | * is finally reaped. To avoid incurring the | |
720 | * expense of removing the address space regions | |
721 | * at reap time, we do it explictly here. | |
722 | */ | |
723 | (void) vm_map_remove(task->map, | |
724 | task->map->min_offset, | |
725 | task->map->max_offset, VM_MAP_NO_FLAGS); | |
726 | ||
9bccf70c A |
727 | shared_region_mapping_dealloc(task->system_shared_region); |
728 | ||
d7e50217 A |
729 | /* |
730 | * Flush working set here to avoid I/O in reaper thread | |
731 | */ | |
9bccf70c | 732 | if(task->dynamic_working_set) |
d7e50217 A |
733 | tws_hash_ws_flush((tws_hash_t) |
734 | task->dynamic_working_set); | |
9bccf70c | 735 | |
1c79356b | 736 | /* |
e7c99d92 A |
737 | * We no longer need to guard against being aborted, so restore |
738 | * the previous interruptible state. | |
739 | */ | |
9bccf70c | 740 | thread_interrupt_level(interrupt_save); |
e7c99d92 | 741 | |
d7e50217 A |
742 | #if __ppc__ |
743 | perfmon_release_facility(task); // notify the perfmon facility | |
744 | #endif | |
745 | ||
e7c99d92 A |
746 | /* |
747 | * Get rid of the task active reference on itself. | |
1c79356b | 748 | */ |
1c79356b A |
749 | task_deallocate(task); |
750 | ||
751 | return(KERN_SUCCESS); | |
752 | } | |
753 | ||
754 | /* | |
755 | * task_halt - Shut the current task down (except for the current thread) in | |
756 | * preparation for dramatic changes to the task (probably exec). | |
757 | * We hold the task, terminate all other threads in the task and | |
758 | * wait for them to terminate, clean up the portspace, and when | |
759 | * all done, let the current thread go. | |
760 | */ | |
761 | kern_return_t | |
762 | task_halt( | |
763 | task_t task) | |
764 | { | |
765 | thread_act_t thr_act, cur_thr_act; | |
766 | task_t cur_task; | |
767 | ||
768 | assert(task != kernel_task); | |
769 | ||
770 | cur_thr_act = current_act(); | |
771 | cur_task = cur_thr_act->task; | |
772 | ||
773 | if (task != cur_task) { | |
774 | return(KERN_INVALID_ARGUMENT); | |
775 | } | |
776 | ||
777 | #if TASK_SWAPPER | |
778 | /* | |
779 | * If task is not resident (swapped out, or being swapped | |
780 | * out), we want to bring it back in and make it unswappable. | |
781 | * This can block, so do it early. | |
782 | */ | |
783 | task_swapin(task, TRUE); /* TRUE means make it unswappable */ | |
784 | #endif /* TASK_SWAPPER */ | |
785 | ||
786 | task_lock(task); | |
787 | ||
788 | if (!task->active || !cur_thr_act->active) { | |
789 | /* | |
790 | * Task or current thread is already being terminated. | |
791 | * Hurry up and return out of the current kernel context | |
792 | * so that we run our AST special handler to terminate | |
793 | * ourselves. | |
794 | */ | |
795 | task_unlock(task); | |
796 | return(KERN_FAILURE); | |
797 | } | |
798 | ||
799 | if (task->thr_act_count > 1) { | |
800 | /* | |
801 | * Mark all the threads to keep them from starting any more | |
802 | * user-level execution. The thread_terminate_internal code | |
803 | * would do this on a thread by thread basis anyway, but this | |
804 | * gives us a better chance of not having to wait there. | |
805 | */ | |
806 | task_hold_locked(task); | |
807 | ||
808 | /* | |
809 | * Terminate all the other activations in the task. | |
810 | * | |
811 | * Each terminated activation will run it's special handler | |
812 | * when its current kernel context is unwound. That will | |
813 | * clean up most of the thread resources. Then it will be | |
814 | * handed over to the reaper, who will finally remove the | |
815 | * thread from the task list and free the structures. | |
1c79356b A |
816 | */ |
817 | queue_iterate(&task->thr_acts, thr_act, thread_act_t,thr_acts) { | |
818 | if (thr_act != cur_thr_act) | |
819 | thread_terminate_internal(thr_act); | |
1c79356b A |
820 | } |
821 | task_release_locked(task); | |
822 | } | |
e7c99d92 A |
823 | |
824 | /* | |
825 | * If the current thread has any virtual machine state | |
826 | * associated with it, we need to explicitly clean that | |
827 | * up now (because we did not terminate the current act) | |
828 | * before we try to clean up the task VM and port spaces. | |
829 | */ | |
830 | act_virtual_machine_destroy(cur_thr_act); | |
831 | ||
1c79356b A |
832 | task_unlock(task); |
833 | ||
834 | /* | |
835 | * Destroy all synchronizers owned by the task. | |
836 | */ | |
837 | task_synchronizer_destroy_all(task); | |
838 | ||
839 | /* | |
9bccf70c A |
840 | * Destroy the contents of the IPC space, leaving just |
841 | * a reference for it. | |
e7c99d92 | 842 | */ |
1c79356b A |
843 | if (!task->kernel_loaded) |
844 | ipc_space_clean(task->itk_space); | |
1c79356b A |
845 | |
846 | /* | |
847 | * Clean out the address space, as we are going to be | |
848 | * getting a new one. | |
849 | */ | |
850 | (void) vm_map_remove(task->map, | |
851 | task->map->min_offset, | |
852 | task->map->max_offset, VM_MAP_NO_FLAGS); | |
853 | ||
854 | return KERN_SUCCESS; | |
855 | } | |
856 | ||
857 | /* | |
858 | * task_hold_locked: | |
859 | * | |
860 | * Suspend execution of the specified task. | |
861 | * This is a recursive-style suspension of the task, a count of | |
862 | * suspends is maintained. | |
863 | * | |
864 | * CONDITIONS: the task is locked and active. | |
865 | */ | |
866 | void | |
867 | task_hold_locked( | |
868 | register task_t task) | |
869 | { | |
870 | register thread_act_t thr_act; | |
871 | ||
872 | assert(task->active); | |
873 | ||
9bccf70c A |
874 | if (task->suspend_count++ > 0) |
875 | return; | |
1c79356b A |
876 | |
877 | /* | |
878 | * Iterate through all the thread_act's and hold them. | |
879 | */ | |
880 | queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { | |
881 | act_lock_thread(thr_act); | |
882 | thread_hold(thr_act); | |
883 | act_unlock_thread(thr_act); | |
884 | } | |
885 | } | |
886 | ||
887 | /* | |
888 | * task_hold: | |
889 | * | |
890 | * Same as the internal routine above, except that is must lock | |
891 | * and verify that the task is active. This differs from task_suspend | |
892 | * in that it places a kernel hold on the task rather than just a | |
893 | * user-level hold. This keeps users from over resuming and setting | |
894 | * it running out from under the kernel. | |
895 | * | |
896 | * CONDITIONS: the caller holds a reference on the task | |
897 | */ | |
898 | kern_return_t | |
899 | task_hold(task_t task) | |
900 | { | |
901 | kern_return_t kret; | |
902 | ||
903 | if (task == TASK_NULL) | |
904 | return (KERN_INVALID_ARGUMENT); | |
905 | task_lock(task); | |
906 | if (!task->active) { | |
907 | task_unlock(task); | |
908 | return (KERN_FAILURE); | |
909 | } | |
910 | task_hold_locked(task); | |
911 | task_unlock(task); | |
912 | ||
913 | return(KERN_SUCCESS); | |
914 | } | |
915 | ||
916 | /* | |
917 | * Routine: task_wait_locked | |
918 | * Wait for all threads in task to stop. | |
919 | * | |
920 | * Conditions: | |
921 | * Called with task locked, active, and held. | |
922 | */ | |
923 | void | |
924 | task_wait_locked( | |
925 | register task_t task) | |
926 | { | |
927 | register thread_act_t thr_act, cur_thr_act; | |
928 | ||
929 | assert(task->active); | |
930 | assert(task->suspend_count > 0); | |
931 | ||
932 | cur_thr_act = current_act(); | |
933 | /* | |
934 | * Iterate through all the thread's and wait for them to | |
935 | * stop. Do not wait for the current thread if it is within | |
936 | * the task. | |
937 | */ | |
938 | queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { | |
939 | if (thr_act != cur_thr_act) { | |
940 | thread_shuttle_t thr_shuttle; | |
941 | ||
942 | thr_shuttle = act_lock_thread(thr_act); | |
943 | thread_wait(thr_shuttle); | |
944 | act_unlock_thread(thr_act); | |
945 | } | |
946 | } | |
947 | } | |
948 | ||
949 | /* | |
950 | * task_release_locked: | |
951 | * | |
952 | * Release a kernel hold on a task. | |
953 | * | |
954 | * CONDITIONS: the task is locked and active | |
955 | */ | |
956 | void | |
957 | task_release_locked( | |
958 | register task_t task) | |
959 | { | |
960 | register thread_act_t thr_act; | |
961 | ||
962 | assert(task->active); | |
9bccf70c | 963 | assert(task->suspend_count > 0); |
1c79356b | 964 | |
9bccf70c A |
965 | if (--task->suspend_count > 0) |
966 | return; | |
1c79356b A |
967 | |
968 | /* | |
969 | * Iterate through all the thread_act's and hold them. | |
970 | * Do not hold the current thread_act if it is within the | |
971 | * task. | |
972 | */ | |
973 | queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { | |
974 | act_lock_thread(thr_act); | |
975 | thread_release(thr_act); | |
976 | act_unlock_thread(thr_act); | |
977 | } | |
978 | } | |
979 | ||
980 | /* | |
981 | * task_release: | |
982 | * | |
983 | * Same as the internal routine above, except that it must lock | |
984 | * and verify that the task is active. | |
985 | * | |
986 | * CONDITIONS: The caller holds a reference to the task | |
987 | */ | |
988 | kern_return_t | |
989 | task_release(task_t task) | |
990 | { | |
991 | kern_return_t kret; | |
992 | ||
993 | if (task == TASK_NULL) | |
994 | return (KERN_INVALID_ARGUMENT); | |
995 | task_lock(task); | |
996 | if (!task->active) { | |
997 | task_unlock(task); | |
998 | return (KERN_FAILURE); | |
999 | } | |
1000 | task_release_locked(task); | |
1001 | task_unlock(task); | |
1002 | ||
1003 | return(KERN_SUCCESS); | |
1004 | } | |
1005 | ||
1006 | kern_return_t | |
1007 | task_threads( | |
1008 | task_t task, | |
1009 | thread_act_array_t *thr_act_list, | |
1010 | mach_msg_type_number_t *count) | |
1011 | { | |
1012 | unsigned int actual; /* this many thr_acts */ | |
1013 | thread_act_t thr_act; | |
1014 | thread_act_t *thr_acts; | |
1015 | thread_t thread; | |
1016 | int i, j; | |
1017 | ||
1018 | vm_size_t size, size_needed; | |
1019 | vm_offset_t addr; | |
1020 | ||
1021 | if (task == TASK_NULL) | |
1022 | return KERN_INVALID_ARGUMENT; | |
1023 | ||
1024 | size = 0; addr = 0; | |
1025 | ||
1026 | for (;;) { | |
1027 | task_lock(task); | |
1028 | if (!task->active) { | |
1029 | task_unlock(task); | |
1030 | if (size != 0) | |
1031 | kfree(addr, size); | |
1032 | return KERN_FAILURE; | |
1033 | } | |
1034 | ||
1035 | actual = task->thr_act_count; | |
1036 | ||
1037 | /* do we have the memory we need? */ | |
1038 | size_needed = actual * sizeof(mach_port_t); | |
1039 | if (size_needed <= size) | |
1040 | break; | |
1041 | ||
1042 | /* unlock the task and allocate more memory */ | |
1043 | task_unlock(task); | |
1044 | ||
1045 | if (size != 0) | |
1046 | kfree(addr, size); | |
1047 | ||
1048 | assert(size_needed > 0); | |
1049 | size = size_needed; | |
1050 | ||
1051 | addr = kalloc(size); | |
1052 | if (addr == 0) | |
1053 | return KERN_RESOURCE_SHORTAGE; | |
1054 | } | |
1055 | ||
1056 | /* OK, have memory and the task is locked & active */ | |
1057 | thr_acts = (thread_act_t *) addr; | |
1058 | ||
1059 | for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->thr_acts); | |
1060 | i < actual; | |
1061 | i++, thr_act = (thread_act_t) queue_next(&thr_act->thr_acts)) { | |
1062 | act_lock(thr_act); | |
1063 | if (thr_act->ref_count > 0) { | |
1064 | act_locked_act_reference(thr_act); | |
1065 | thr_acts[j++] = thr_act; | |
1066 | } | |
1067 | act_unlock(thr_act); | |
1068 | } | |
1069 | assert(queue_end(&task->thr_acts, (queue_entry_t) thr_act)); | |
1070 | ||
1071 | actual = j; | |
1072 | size_needed = actual * sizeof(mach_port_t); | |
1073 | ||
1074 | /* can unlock task now that we've got the thr_act refs */ | |
1075 | task_unlock(task); | |
1076 | ||
1077 | if (actual == 0) { | |
1078 | /* no thr_acts, so return null pointer and deallocate memory */ | |
1079 | ||
1080 | *thr_act_list = 0; | |
1081 | *count = 0; | |
1082 | ||
1083 | if (size != 0) | |
1084 | kfree(addr, size); | |
1085 | } else { | |
1086 | /* if we allocated too much, must copy */ | |
1087 | ||
1088 | if (size_needed < size) { | |
1089 | vm_offset_t newaddr; | |
1090 | ||
1091 | newaddr = kalloc(size_needed); | |
1092 | if (newaddr == 0) { | |
1093 | for (i = 0; i < actual; i++) | |
1094 | act_deallocate(thr_acts[i]); | |
1095 | kfree(addr, size); | |
1096 | return KERN_RESOURCE_SHORTAGE; | |
1097 | } | |
1098 | ||
1099 | bcopy((char *) addr, (char *) newaddr, size_needed); | |
1100 | kfree(addr, size); | |
1101 | thr_acts = (thread_act_t *) newaddr; | |
1102 | } | |
1103 | ||
1104 | *thr_act_list = thr_acts; | |
1105 | *count = actual; | |
1106 | ||
1107 | /* do the conversion that Mig should handle */ | |
1108 | ||
1109 | for (i = 0; i < actual; i++) | |
1110 | ((ipc_port_t *) thr_acts)[i] = | |
1111 | convert_act_to_port(thr_acts[i]); | |
1112 | } | |
1113 | ||
1114 | return KERN_SUCCESS; | |
1115 | } | |
1116 | ||
1117 | /* | |
1118 | * Routine: task_suspend | |
1119 | * Implement a user-level suspension on a task. | |
1120 | * | |
1121 | * Conditions: | |
1122 | * The caller holds a reference to the task | |
1123 | */ | |
1124 | kern_return_t | |
1125 | task_suspend( | |
1126 | register task_t task) | |
1127 | { | |
1128 | if (task == TASK_NULL) | |
1129 | return (KERN_INVALID_ARGUMENT); | |
1130 | ||
1131 | task_lock(task); | |
1132 | if (!task->active) { | |
1133 | task_unlock(task); | |
1134 | return (KERN_FAILURE); | |
1135 | } | |
1136 | if ((task->user_stop_count)++ > 0) { | |
1137 | /* | |
1138 | * If the stop count was positive, the task is | |
1139 | * already stopped and we can exit. | |
1140 | */ | |
1141 | task_unlock(task); | |
1142 | return (KERN_SUCCESS); | |
1143 | } | |
1144 | ||
1145 | /* | |
1146 | * Put a kernel-level hold on the threads in the task (all | |
1147 | * user-level task suspensions added together represent a | |
1148 | * single kernel-level hold). We then wait for the threads | |
1149 | * to stop executing user code. | |
1150 | */ | |
1151 | task_hold_locked(task); | |
1152 | task_wait_locked(task); | |
1153 | task_unlock(task); | |
1154 | return (KERN_SUCCESS); | |
1155 | } | |
1156 | ||
1157 | /* | |
1158 | * Routine: task_resume | |
1159 | * Release a kernel hold on a task. | |
1160 | * | |
1161 | * Conditions: | |
1162 | * The caller holds a reference to the task | |
1163 | */ | |
1164 | kern_return_t | |
1165 | task_resume(register task_t task) | |
1166 | { | |
1167 | register boolean_t release; | |
1168 | ||
1169 | if (task == TASK_NULL) | |
1170 | return(KERN_INVALID_ARGUMENT); | |
1171 | ||
1172 | release = FALSE; | |
1173 | task_lock(task); | |
1174 | if (!task->active) { | |
1175 | task_unlock(task); | |
1176 | return(KERN_FAILURE); | |
1177 | } | |
1178 | if (task->user_stop_count > 0) { | |
1179 | if (--(task->user_stop_count) == 0) | |
1180 | release = TRUE; | |
1181 | } | |
1182 | else { | |
1183 | task_unlock(task); | |
1184 | return(KERN_FAILURE); | |
1185 | } | |
1186 | ||
1187 | /* | |
1188 | * Release the task if necessary. | |
1189 | */ | |
1190 | if (release) | |
1191 | task_release_locked(task); | |
1192 | ||
1193 | task_unlock(task); | |
1194 | return(KERN_SUCCESS); | |
1195 | } | |
1196 | ||
1197 | kern_return_t | |
1198 | host_security_set_task_token( | |
1199 | host_security_t host_security, | |
1200 | task_t task, | |
1201 | security_token_t sec_token, | |
1202 | host_priv_t host_priv) | |
1203 | { | |
1204 | kern_return_t kr; | |
1205 | ||
1206 | if (task == TASK_NULL) | |
1207 | return(KERN_INVALID_ARGUMENT); | |
1208 | ||
1209 | if (host_security == HOST_NULL) | |
1210 | return(KERN_INVALID_SECURITY); | |
1211 | ||
1212 | task_lock(task); | |
1213 | task->sec_token = sec_token; | |
1214 | task_unlock(task); | |
1215 | ||
1216 | if (host_priv != HOST_PRIV_NULL) { | |
1217 | kr = task_set_special_port(task, | |
1218 | TASK_HOST_PORT, | |
1219 | ipc_port_make_send(realhost.host_priv_self)); | |
1220 | } else { | |
1221 | kr = task_set_special_port(task, | |
1222 | TASK_HOST_PORT, | |
1223 | ipc_port_make_send(realhost.host_self)); | |
1224 | } | |
1225 | return(kr); | |
1226 | } | |
1227 | ||
1228 | /* | |
1229 | * Utility routine to set a ledger | |
1230 | */ | |
1231 | kern_return_t | |
1232 | task_set_ledger( | |
1233 | task_t task, | |
1234 | ledger_t wired, | |
1235 | ledger_t paged) | |
1236 | { | |
1237 | if (task == TASK_NULL) | |
1238 | return(KERN_INVALID_ARGUMENT); | |
1239 | ||
1240 | task_lock(task); | |
1241 | if (wired) { | |
1242 | ipc_port_release_send(task->wired_ledger_port); | |
1243 | task->wired_ledger_port = ledger_copy(wired); | |
1244 | } | |
1245 | if (paged) { | |
1246 | ipc_port_release_send(task->paged_ledger_port); | |
1247 | task->paged_ledger_port = ledger_copy(paged); | |
1248 | } | |
1249 | task_unlock(task); | |
1250 | ||
1251 | return(KERN_SUCCESS); | |
1252 | } | |
1253 | ||
1254 | /* | |
1255 | * This routine was added, pretty much exclusively, for registering the | |
1256 | * RPC glue vector for in-kernel short circuited tasks. Rather than | |
1257 | * removing it completely, I have only disabled that feature (which was | |
1258 | * the only feature at the time). It just appears that we are going to | |
1259 | * want to add some user data to tasks in the future (i.e. bsd info, | |
1260 | * task names, etc...), so I left it in the formal task interface. | |
1261 | */ | |
1262 | kern_return_t | |
1263 | task_set_info( | |
1264 | task_t task, | |
1265 | task_flavor_t flavor, | |
1266 | task_info_t task_info_in, /* pointer to IN array */ | |
1267 | mach_msg_type_number_t task_info_count) | |
1268 | { | |
1269 | vm_map_t map; | |
1270 | ||
1271 | if (task == TASK_NULL) | |
1272 | return(KERN_INVALID_ARGUMENT); | |
1273 | ||
1274 | switch (flavor) { | |
1275 | default: | |
1276 | return (KERN_INVALID_ARGUMENT); | |
1277 | } | |
1278 | return (KERN_SUCCESS); | |
1279 | } | |
1280 | ||
1281 | kern_return_t | |
1282 | task_info( | |
1283 | task_t task, | |
1284 | task_flavor_t flavor, | |
1285 | task_info_t task_info_out, | |
1286 | mach_msg_type_number_t *task_info_count) | |
1287 | { | |
1288 | thread_t thread; | |
1289 | vm_map_t map; | |
1290 | ||
1291 | if (task == TASK_NULL) | |
1292 | return(KERN_INVALID_ARGUMENT); | |
1293 | ||
1294 | switch (flavor) { | |
1295 | ||
1296 | case TASK_BASIC_INFO: | |
1297 | { | |
1298 | register task_basic_info_t basic_info; | |
1299 | ||
1300 | if (*task_info_count < TASK_BASIC_INFO_COUNT) { | |
1301 | return(KERN_INVALID_ARGUMENT); | |
1302 | } | |
1303 | ||
1304 | basic_info = (task_basic_info_t) task_info_out; | |
1305 | ||
1306 | map = (task == kernel_task) ? kernel_map : task->map; | |
1307 | ||
1308 | basic_info->virtual_size = map->size; | |
1309 | basic_info->resident_size = pmap_resident_count(map->pmap) | |
1310 | * PAGE_SIZE; | |
1311 | ||
1312 | task_lock(task); | |
0b4e3aa0 A |
1313 | basic_info->policy = ((task != kernel_task)? |
1314 | POLICY_TIMESHARE: POLICY_RR); | |
1c79356b A |
1315 | basic_info->suspend_count = task->user_stop_count; |
1316 | basic_info->user_time.seconds | |
1317 | = task->total_user_time.seconds; | |
1318 | basic_info->user_time.microseconds | |
1319 | = task->total_user_time.microseconds; | |
1320 | basic_info->system_time.seconds | |
1321 | = task->total_system_time.seconds; | |
1322 | basic_info->system_time.microseconds | |
1323 | = task->total_system_time.microseconds; | |
1324 | task_unlock(task); | |
1325 | ||
1326 | *task_info_count = TASK_BASIC_INFO_COUNT; | |
1327 | break; | |
1328 | } | |
1329 | ||
1330 | case TASK_THREAD_TIMES_INFO: | |
1331 | { | |
1332 | register task_thread_times_info_t times_info; | |
1333 | register thread_t thread; | |
1334 | register thread_act_t thr_act; | |
1335 | ||
1336 | if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) { | |
1337 | return (KERN_INVALID_ARGUMENT); | |
1338 | } | |
1339 | ||
1340 | times_info = (task_thread_times_info_t) task_info_out; | |
1341 | times_info->user_time.seconds = 0; | |
1342 | times_info->user_time.microseconds = 0; | |
1343 | times_info->system_time.seconds = 0; | |
1344 | times_info->system_time.microseconds = 0; | |
1345 | ||
1346 | task_lock(task); | |
1347 | queue_iterate(&task->thr_acts, thr_act, | |
1348 | thread_act_t, thr_acts) | |
1349 | { | |
1350 | time_value_t user_time, system_time; | |
1351 | spl_t s; | |
1352 | ||
1353 | thread = act_lock_thread(thr_act); | |
1354 | ||
9bccf70c A |
1355 | /* JMM - add logic to skip threads that have migrated |
1356 | * into this task? | |
1c79356b | 1357 | */ |
9bccf70c A |
1358 | |
1359 | assert(thread); /* Must have thread */ | |
1c79356b A |
1360 | s = splsched(); |
1361 | thread_lock(thread); | |
1362 | ||
1363 | thread_read_times(thread, &user_time, &system_time); | |
1364 | ||
1365 | thread_unlock(thread); | |
1366 | splx(s); | |
1367 | act_unlock_thread(thr_act); | |
1368 | ||
1369 | time_value_add(×_info->user_time, &user_time); | |
1370 | time_value_add(×_info->system_time, &system_time); | |
1371 | } | |
1372 | task_unlock(task); | |
1373 | ||
1374 | *task_info_count = TASK_THREAD_TIMES_INFO_COUNT; | |
1375 | break; | |
1376 | } | |
1377 | ||
1378 | case TASK_SCHED_FIFO_INFO: | |
1379 | { | |
1c79356b A |
1380 | |
1381 | if (*task_info_count < POLICY_FIFO_BASE_COUNT) | |
1382 | return(KERN_INVALID_ARGUMENT); | |
1383 | ||
0b4e3aa0 | 1384 | return(KERN_INVALID_POLICY); |
1c79356b A |
1385 | } |
1386 | ||
1387 | case TASK_SCHED_RR_INFO: | |
1388 | { | |
1389 | register policy_rr_base_t rr_base; | |
1390 | ||
1391 | if (*task_info_count < POLICY_RR_BASE_COUNT) | |
1392 | return(KERN_INVALID_ARGUMENT); | |
1393 | ||
1394 | rr_base = (policy_rr_base_t) task_info_out; | |
1395 | ||
1396 | task_lock(task); | |
0b4e3aa0 | 1397 | if (task != kernel_task) { |
1c79356b A |
1398 | task_unlock(task); |
1399 | return(KERN_INVALID_POLICY); | |
1400 | } | |
1401 | ||
1402 | rr_base->base_priority = task->priority; | |
1403 | task_unlock(task); | |
1404 | ||
0b4e3aa0 | 1405 | rr_base->quantum = tick / 1000; |
1c79356b A |
1406 | |
1407 | *task_info_count = POLICY_RR_BASE_COUNT; | |
1408 | break; | |
1409 | } | |
1410 | ||
1411 | case TASK_SCHED_TIMESHARE_INFO: | |
1412 | { | |
1413 | register policy_timeshare_base_t ts_base; | |
1414 | ||
1415 | if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) | |
1416 | return(KERN_INVALID_ARGUMENT); | |
1417 | ||
1418 | ts_base = (policy_timeshare_base_t) task_info_out; | |
1419 | ||
1420 | task_lock(task); | |
0b4e3aa0 | 1421 | if (task == kernel_task) { |
1c79356b A |
1422 | task_unlock(task); |
1423 | return(KERN_INVALID_POLICY); | |
1424 | } | |
1425 | ||
1426 | ts_base->base_priority = task->priority; | |
1427 | task_unlock(task); | |
1428 | ||
1429 | *task_info_count = POLICY_TIMESHARE_BASE_COUNT; | |
1430 | break; | |
1431 | } | |
1432 | ||
1433 | case TASK_SECURITY_TOKEN: | |
1434 | { | |
1435 | register security_token_t *sec_token_p; | |
1436 | ||
1437 | if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) { | |
1438 | return(KERN_INVALID_ARGUMENT); | |
1439 | } | |
1440 | ||
1441 | sec_token_p = (security_token_t *) task_info_out; | |
1442 | ||
1443 | task_lock(task); | |
1444 | *sec_token_p = task->sec_token; | |
1445 | task_unlock(task); | |
1446 | ||
1447 | *task_info_count = TASK_SECURITY_TOKEN_COUNT; | |
1448 | break; | |
1449 | } | |
1450 | ||
1451 | case TASK_SCHED_INFO: | |
1452 | return(KERN_INVALID_ARGUMENT); | |
1453 | ||
1454 | case TASK_EVENTS_INFO: | |
1455 | { | |
1456 | register task_events_info_t events_info; | |
1457 | ||
1458 | if (*task_info_count < TASK_EVENTS_INFO_COUNT) { | |
1459 | return(KERN_INVALID_ARGUMENT); | |
1460 | } | |
1461 | ||
1462 | events_info = (task_events_info_t) task_info_out; | |
1463 | ||
1464 | task_lock(task); | |
1465 | events_info->faults = task->faults; | |
1466 | events_info->pageins = task->pageins; | |
1467 | events_info->cow_faults = task->cow_faults; | |
1468 | events_info->messages_sent = task->messages_sent; | |
1469 | events_info->messages_received = task->messages_received; | |
1470 | events_info->syscalls_mach = task->syscalls_mach; | |
1471 | events_info->syscalls_unix = task->syscalls_unix; | |
1472 | events_info->csw = task->csw; | |
1473 | task_unlock(task); | |
1474 | ||
1475 | *task_info_count = TASK_EVENTS_INFO_COUNT; | |
1476 | break; | |
1477 | } | |
1478 | ||
1479 | default: | |
1480 | return (KERN_INVALID_ARGUMENT); | |
1481 | } | |
1482 | ||
1483 | return(KERN_SUCCESS); | |
1484 | } | |
1485 | ||
1486 | /* | |
1487 | * task_assign: | |
1488 | * | |
1489 | * Change the assigned processor set for the task | |
1490 | */ | |
1491 | kern_return_t | |
1492 | task_assign( | |
1493 | task_t task, | |
1494 | processor_set_t new_pset, | |
1495 | boolean_t assign_threads) | |
1496 | { | |
1497 | #ifdef lint | |
1498 | task++; new_pset++; assign_threads++; | |
1499 | #endif /* lint */ | |
1500 | return(KERN_FAILURE); | |
1501 | } | |
1502 | ||
1503 | /* | |
1504 | * task_assign_default: | |
1505 | * | |
1506 | * Version of task_assign to assign to default processor set. | |
1507 | */ | |
1508 | kern_return_t | |
1509 | task_assign_default( | |
1510 | task_t task, | |
1511 | boolean_t assign_threads) | |
1512 | { | |
1513 | return (task_assign(task, &default_pset, assign_threads)); | |
1514 | } | |
1515 | ||
1516 | /* | |
1517 | * task_get_assignment | |
1518 | * | |
1519 | * Return name of processor set that task is assigned to. | |
1520 | */ | |
1521 | kern_return_t | |
1522 | task_get_assignment( | |
1523 | task_t task, | |
1524 | processor_set_t *pset) | |
1525 | { | |
1526 | if (!task->active) | |
1527 | return(KERN_FAILURE); | |
1528 | ||
1529 | *pset = task->processor_set; | |
1530 | pset_reference(*pset); | |
1531 | return(KERN_SUCCESS); | |
1532 | } | |
1533 | ||
1534 | ||
1535 | /* | |
1536 | * task_policy | |
1537 | * | |
1538 | * Set scheduling policy and parameters, both base and limit, for | |
1539 | * the given task. Policy must be a policy which is enabled for the | |
1540 | * processor set. Change contained threads if requested. | |
1541 | */ | |
1542 | kern_return_t | |
1543 | task_policy( | |
1544 | task_t task, | |
1545 | policy_t policy_id, | |
1546 | policy_base_t base, | |
1547 | mach_msg_type_number_t count, | |
1548 | boolean_t set_limit, | |
1549 | boolean_t change) | |
1550 | { | |
1551 | return(KERN_FAILURE); | |
1552 | } | |
1553 | ||
1554 | /* | |
1555 | * task_set_policy | |
1556 | * | |
1557 | * Set scheduling policy and parameters, both base and limit, for | |
1558 | * the given task. Policy can be any policy implemented by the | |
1559 | * processor set, whether enabled or not. Change contained threads | |
1560 | * if requested. | |
1561 | */ | |
1562 | kern_return_t | |
1563 | task_set_policy( | |
1564 | task_t task, | |
1565 | processor_set_t pset, | |
1566 | policy_t policy_id, | |
1567 | policy_base_t base, | |
1568 | mach_msg_type_number_t base_count, | |
1569 | policy_limit_t limit, | |
1570 | mach_msg_type_number_t limit_count, | |
1571 | boolean_t change) | |
1572 | { | |
1573 | return(KERN_FAILURE); | |
1574 | } | |
1575 | ||
1576 | /* | |
1577 | * task_collect_scan: | |
1578 | * | |
1579 | * Attempt to free resources owned by tasks. | |
1580 | */ | |
1581 | ||
1582 | void | |
1583 | task_collect_scan(void) | |
1584 | { | |
1585 | register task_t task, prev_task; | |
1586 | processor_set_t pset = &default_pset; | |
1587 | ||
1c79356b A |
1588 | pset_lock(pset); |
1589 | pset->ref_count++; | |
1590 | task = (task_t) queue_first(&pset->tasks); | |
1591 | while (!queue_end(&pset->tasks, (queue_entry_t) task)) { | |
9bccf70c A |
1592 | task_lock(task); |
1593 | if (task->ref_count > 0) { | |
1c79356b | 1594 | |
9bccf70c A |
1595 | task_reference_locked(task); |
1596 | task_unlock(task); | |
1c79356b | 1597 | |
9bccf70c A |
1598 | #if MACH_HOST |
1599 | /* | |
1600 | * While we still have the pset locked, freeze the task in | |
1601 | * this pset. That way, when we get back from collecting | |
1602 | * it, we can dereference the pset_tasks chain for the task | |
1603 | * and be assured that we are still in this chain. | |
1604 | */ | |
1605 | task_freeze(task); | |
1606 | #endif | |
1607 | ||
1608 | pset_unlock(pset); | |
1c79356b | 1609 | |
9bccf70c A |
1610 | pmap_collect(task->map->pmap); |
1611 | ||
1612 | pset_lock(pset); | |
1613 | prev_task = task; | |
1614 | task = (task_t) queue_next(&task->pset_tasks); | |
1615 | ||
1616 | #if MACH_HOST | |
1617 | task_unfreeze(prev_task); | |
1618 | #endif | |
1619 | ||
1620 | task_deallocate(prev_task); | |
1621 | } else { | |
1622 | task_unlock(task); | |
1623 | task = (task_t) queue_next(&task->pset_tasks); | |
1624 | } | |
1c79356b | 1625 | } |
9bccf70c | 1626 | |
1c79356b A |
1627 | pset_unlock(pset); |
1628 | ||
1629 | pset_deallocate(pset); | |
1c79356b A |
1630 | } |
1631 | ||
0b4e3aa0 | 1632 | /* Also disabled in vm/vm_pageout.c */ |
1c79356b A |
1633 | boolean_t task_collect_allowed = FALSE; |
1634 | unsigned task_collect_last_tick = 0; | |
1635 | unsigned task_collect_max_rate = 0; /* in ticks */ | |
1636 | ||
1637 | /* | |
1638 | * consider_task_collect: | |
1639 | * | |
1640 | * Called by the pageout daemon when the system needs more free pages. | |
1641 | */ | |
1642 | ||
1643 | void | |
1644 | consider_task_collect(void) | |
1645 | { | |
1646 | /* | |
1647 | * By default, don't attempt task collection more frequently | |
1648 | * than once per second. | |
1649 | */ | |
1650 | ||
1651 | if (task_collect_max_rate == 0) | |
0b4e3aa0 | 1652 | task_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1; |
1c79356b A |
1653 | |
1654 | if (task_collect_allowed && | |
1655 | (sched_tick > (task_collect_last_tick + task_collect_max_rate))) { | |
1656 | task_collect_last_tick = sched_tick; | |
1657 | task_collect_scan(); | |
1658 | } | |
1659 | } | |
1660 | ||
1661 | kern_return_t | |
1662 | task_set_ras_pc( | |
1663 | task_t task, | |
1664 | vm_offset_t pc, | |
1665 | vm_offset_t endpc) | |
1666 | { | |
1667 | #if FAST_TAS | |
1668 | extern int fast_tas_debug; | |
1669 | ||
1670 | if (fast_tas_debug) { | |
1671 | printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n", | |
1672 | task, pc, endpc); | |
1673 | } | |
1674 | task_lock(task); | |
1675 | task->fast_tas_base = pc; | |
1676 | task->fast_tas_end = endpc; | |
1677 | task_unlock(task); | |
1678 | return KERN_SUCCESS; | |
1679 | ||
1680 | #else /* FAST_TAS */ | |
1681 | #ifdef lint | |
1682 | task++; | |
1683 | pc++; | |
1684 | endpc++; | |
1685 | #endif /* lint */ | |
1686 | ||
1687 | return KERN_FAILURE; | |
1688 | ||
1689 | #endif /* FAST_TAS */ | |
1690 | } | |
1691 | ||
1692 | void | |
1693 | task_synchronizer_destroy_all(task_t task) | |
1694 | { | |
1695 | semaphore_t semaphore; | |
1696 | lock_set_t lock_set; | |
1697 | ||
1698 | /* | |
1699 | * Destroy owned semaphores | |
1700 | */ | |
1701 | ||
1702 | while (!queue_empty(&task->semaphore_list)) { | |
1703 | semaphore = (semaphore_t) queue_first(&task->semaphore_list); | |
1704 | (void) semaphore_destroy(task, semaphore); | |
1705 | } | |
1706 | ||
1707 | /* | |
1708 | * Destroy owned lock sets | |
1709 | */ | |
1710 | ||
1711 | while (!queue_empty(&task->lock_set_list)) { | |
1712 | lock_set = (lock_set_t) queue_first(&task->lock_set_list); | |
1713 | (void) lock_set_destroy(task, lock_set); | |
1714 | } | |
1715 | } | |
1716 | ||
1c79356b A |
1717 | /* |
1718 | * task_set_port_space: | |
1719 | * | |
1720 | * Set port name space of task to specified size. | |
1721 | */ | |
1722 | ||
1723 | kern_return_t | |
1724 | task_set_port_space( | |
1725 | task_t task, | |
1726 | int table_entries) | |
1727 | { | |
1728 | kern_return_t kr; | |
1729 | ||
1730 | is_write_lock(task->itk_space); | |
1731 | kr = ipc_entry_grow_table(task->itk_space, table_entries); | |
1732 | if (kr == KERN_SUCCESS) | |
1733 | is_write_unlock(task->itk_space); | |
1734 | return kr; | |
1735 | } | |
1736 | ||
1737 | /* | |
1738 | * We need to export some functions to other components that | |
1739 | * are currently implemented in macros within the osfmk | |
1740 | * component. Just export them as functions of the same name. | |
1741 | */ | |
1742 | boolean_t is_kerneltask(task_t t) | |
1743 | { | |
1744 | if (t == kernel_task) | |
1745 | return(TRUE); | |
1746 | else | |
1747 | return((t->kernel_loaded)); | |
1748 | } | |
1749 | ||
1750 | #undef current_task | |
1751 | task_t current_task() | |
1752 | { | |
1753 | return (current_task_fast()); | |
1754 | } |