]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_FREE_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | * File: kern/task.c | |
58 | * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub, | |
59 | * David Black | |
60 | * | |
61 | * Task management primitives implementation. | |
62 | */ | |
63 | /* | |
64 | * Copyright (c) 1993 The University of Utah and | |
65 | * the Computer Systems Laboratory (CSL). All rights reserved. | |
66 | * | |
67 | * Permission to use, copy, modify and distribute this software and its | |
68 | * documentation is hereby granted, provided that both the copyright | |
69 | * notice and this permission notice appear in all copies of the | |
70 | * software, derivative works or modified versions, and any portions | |
71 | * thereof, and that both notices appear in supporting documentation. | |
72 | * | |
73 | * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS | |
74 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF | |
75 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
76 | * | |
77 | * CSL requests users of this software to return to csl-dist@cs.utah.edu any | |
78 | * improvements that they make and grant CSL redistribution rights. | |
79 | * | |
80 | */ | |
2d21ac55 A |
81 | /* |
82 | * NOTICE: This file was modified by McAfee Research in 2004 to introduce | |
83 | * support for mandatory and extensible security protections. This notice | |
84 | * is included in support of clause 2.2 (b) of the Apple Public License, | |
85 | * Version 2.0. | |
86 | * Copyright (c) 2005 SPARTA, Inc. | |
87 | */ | |
1c79356b A |
88 | |
89 | #include <mach_kdb.h> | |
1c79356b | 90 | #include <fast_tas.h> |
1c79356b A |
91 | #include <platforms.h> |
92 | ||
91447636 | 93 | #include <mach/mach_types.h> |
1c79356b | 94 | #include <mach/boolean.h> |
91447636 | 95 | #include <mach/host_priv.h> |
1c79356b A |
96 | #include <mach/machine/vm_types.h> |
97 | #include <mach/vm_param.h> | |
98 | #include <mach/semaphore.h> | |
99 | #include <mach/task_info.h> | |
100 | #include <mach/task_special_ports.h> | |
91447636 A |
101 | |
102 | #include <ipc/ipc_types.h> | |
1c79356b A |
103 | #include <ipc/ipc_space.h> |
104 | #include <ipc/ipc_entry.h> | |
91447636 A |
105 | |
106 | #include <kern/kern_types.h> | |
1c79356b A |
107 | #include <kern/mach_param.h> |
108 | #include <kern/misc_protos.h> | |
109 | #include <kern/task.h> | |
110 | #include <kern/thread.h> | |
111 | #include <kern/zalloc.h> | |
112 | #include <kern/kalloc.h> | |
113 | #include <kern/processor.h> | |
114 | #include <kern/sched_prim.h> /* for thread_wakeup */ | |
1c79356b A |
115 | #include <kern/ipc_tt.h> |
116 | #include <kern/ledger.h> | |
117 | #include <kern/host.h> | |
91447636 A |
118 | #include <kern/clock.h> |
119 | #include <kern/timer.h> | |
1c79356b A |
120 | #include <kern/assert.h> |
121 | #include <kern/sync_lock.h> | |
2d21ac55 | 122 | #include <kern/affinity.h> |
91447636 A |
123 | |
124 | #include <vm/pmap.h> | |
125 | #include <vm/vm_map.h> | |
126 | #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */ | |
127 | #include <vm/vm_pageout.h> | |
2d21ac55 | 128 | #include <vm/vm_protos.h> |
91447636 | 129 | |
1c79356b A |
130 | #if MACH_KDB |
131 | #include <ddb/db_sym.h> | |
132 | #endif /* MACH_KDB */ | |
133 | ||
55e303ae A |
134 | #ifdef __ppc__ |
135 | #include <ppc/exception.h> | |
136 | #include <ppc/hw_perfmon.h> | |
137 | #endif | |
138 | ||
1c79356b A |
139 | /* |
140 | * Exported interfaces | |
141 | */ | |
142 | ||
143 | #include <mach/task_server.h> | |
144 | #include <mach/mach_host_server.h> | |
145 | #include <mach/host_security_server.h> | |
91447636 | 146 | #include <mach/mach_port_server.h> |
2d21ac55 | 147 | #include <mach/security_server.h> |
91447636 | 148 | |
2d21ac55 A |
149 | #include <vm/vm_shared_region.h> |
150 | ||
151 | #if CONFIG_MACF_MACH | |
152 | #include <security/mac_mach_internal.h> | |
153 | #endif | |
1c79356b A |
154 | |
155 | task_t kernel_task; | |
156 | zone_t task_zone; | |
157 | ||
158 | /* Forwards */ | |
159 | ||
160 | void task_hold_locked( | |
161 | task_t task); | |
162 | void task_wait_locked( | |
163 | task_t task); | |
164 | void task_release_locked( | |
165 | task_t task); | |
1c79356b A |
166 | void task_free( |
167 | task_t task ); | |
168 | void task_synchronizer_destroy_all( | |
169 | task_t task); | |
1c79356b A |
170 | |
171 | kern_return_t task_set_ledger( | |
172 | task_t task, | |
173 | ledger_t wired, | |
174 | ledger_t paged); | |
175 | ||
55e303ae A |
176 | void |
177 | task_backing_store_privileged( | |
178 | task_t task) | |
179 | { | |
180 | task_lock(task); | |
181 | task->priv_flags |= VM_BACKING_STORE_PRIV; | |
182 | task_unlock(task); | |
183 | return; | |
184 | } | |
185 | ||
91447636 A |
186 | |
187 | void | |
188 | task_set_64bit( | |
189 | task_t task, | |
190 | boolean_t is64bit) | |
191 | { | |
2d21ac55 A |
192 | #ifdef __i386__ |
193 | thread_t thread; | |
194 | #endif /* __i386__ */ | |
195 | int vm_flags = 0; | |
0c530ab8 A |
196 | |
197 | if (is64bit) { | |
2d21ac55 A |
198 | if (task_has_64BitAddr(task)) |
199 | return; | |
0c530ab8 | 200 | |
91447636 | 201 | task_set_64BitAddr(task); |
91447636 | 202 | } else { |
2d21ac55 A |
203 | if ( !task_has_64BitAddr(task)) |
204 | return; | |
0c530ab8 | 205 | |
91447636 A |
206 | /* |
207 | * Deallocate all memory previously allocated | |
208 | * above the 32-bit address space, since it won't | |
209 | * be accessible anymore. | |
210 | */ | |
2d21ac55 A |
211 | /* remove regular VM map entries & pmap mappings */ |
212 | (void) vm_map_remove(task->map, | |
213 | (vm_map_offset_t) VM_MAX_ADDRESS, | |
214 | MACH_VM_MAX_ADDRESS, | |
215 | 0); | |
216 | #ifdef __ppc__ | |
91447636 | 217 | /* LP64todo - make this clean */ |
2d21ac55 A |
218 | /* |
219 | * PPC51: ppc64 is limited to 51-bit addresses. | |
220 | * Memory mapped above that limit is handled specially | |
221 | * at the pmap level, so let pmap clean the commpage mapping | |
222 | * explicitly... | |
223 | */ | |
91447636 | 224 | pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */ |
2d21ac55 A |
225 | /* ... and avoid regular pmap cleanup */ |
226 | vm_flags |= VM_MAP_REMOVE_NO_PMAP_CLEANUP; | |
227 | #endif /* __ppc__ */ | |
228 | /* remove the higher VM mappings */ | |
91447636 | 229 | (void) vm_map_remove(task->map, |
91447636 | 230 | MACH_VM_MAX_ADDRESS, |
2d21ac55 A |
231 | 0xFFFFFFFFFFFFF000ULL, |
232 | vm_flags); | |
91447636 | 233 | task_clear_64BitAddr(task); |
91447636 | 234 | } |
0c530ab8 A |
235 | /* FIXME: On x86, the thread save state flavor can diverge from the |
236 | * task's 64-bit feature flag due to the 32-bit/64-bit register save | |
237 | * state dichotomy. Since we can be pre-empted in this interval, | |
238 | * certain routines may observe the thread as being in an inconsistent | |
239 | * state with respect to its task's 64-bitness. | |
240 | */ | |
241 | #ifdef __i386__ | |
242 | queue_iterate(&task->threads, thread, thread_t, task_threads) { | |
2d21ac55 | 243 | machine_thread_switch_addrmode(thread); |
0c530ab8 | 244 | } |
2d21ac55 | 245 | #endif /* __i386__ */ |
91447636 A |
246 | } |
247 | ||
1c79356b A |
248 | void |
249 | task_init(void) | |
250 | { | |
251 | task_zone = zinit( | |
252 | sizeof(struct task), | |
253 | TASK_MAX * sizeof(struct task), | |
254 | TASK_CHUNK * sizeof(struct task), | |
255 | "tasks"); | |
256 | ||
1c79356b A |
257 | /* |
258 | * Create the kernel task as the first task. | |
1c79356b | 259 | */ |
0c530ab8 | 260 | if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS) |
1c79356b | 261 | panic("task_init\n"); |
55e303ae | 262 | |
1c79356b A |
263 | vm_map_deallocate(kernel_task->map); |
264 | kernel_task->map = kernel_map; | |
1c79356b A |
265 | } |
266 | ||
1c79356b A |
267 | /* |
268 | * Create a task running in the kernel address space. It may | |
269 | * have its own map of size mem_size and may have ipc privileges. | |
270 | */ | |
271 | kern_return_t | |
272 | kernel_task_create( | |
91447636 A |
273 | __unused task_t parent_task, |
274 | __unused vm_offset_t map_base, | |
275 | __unused vm_size_t map_size, | |
276 | __unused task_t *child_task) | |
1c79356b | 277 | { |
55e303ae | 278 | return (KERN_INVALID_ARGUMENT); |
1c79356b A |
279 | } |
280 | ||
281 | kern_return_t | |
282 | task_create( | |
2d21ac55 | 283 | task_t parent_task, |
91447636 | 284 | __unused ledger_port_array_t ledger_ports, |
2d21ac55 A |
285 | __unused mach_msg_type_number_t num_ledger_ports, |
286 | __unused boolean_t inherit_memory, | |
287 | __unused task_t *child_task) /* OUT */ | |
1c79356b A |
288 | { |
289 | if (parent_task == TASK_NULL) | |
290 | return(KERN_INVALID_ARGUMENT); | |
291 | ||
2d21ac55 A |
292 | /* |
293 | * No longer supported: too many calls assume that a task has a valid | |
294 | * process attached. | |
295 | */ | |
296 | return(KERN_FAILURE); | |
1c79356b A |
297 | } |
298 | ||
299 | kern_return_t | |
300 | host_security_create_task_token( | |
91447636 | 301 | host_security_t host_security, |
2d21ac55 A |
302 | task_t parent_task, |
303 | __unused security_token_t sec_token, | |
304 | __unused audit_token_t audit_token, | |
305 | __unused host_priv_t host_priv, | |
91447636 A |
306 | __unused ledger_port_array_t ledger_ports, |
307 | __unused mach_msg_type_number_t num_ledger_ports, | |
2d21ac55 A |
308 | __unused boolean_t inherit_memory, |
309 | __unused task_t *child_task) /* OUT */ | |
1c79356b | 310 | { |
1c79356b A |
311 | if (parent_task == TASK_NULL) |
312 | return(KERN_INVALID_ARGUMENT); | |
313 | ||
314 | if (host_security == HOST_NULL) | |
315 | return(KERN_INVALID_SECURITY); | |
316 | ||
2d21ac55 A |
317 | /* |
318 | * No longer supported. | |
319 | */ | |
320 | return(KERN_FAILURE); | |
1c79356b A |
321 | } |
322 | ||
323 | kern_return_t | |
55e303ae | 324 | task_create_internal( |
1c79356b A |
325 | task_t parent_task, |
326 | boolean_t inherit_memory, | |
0c530ab8 | 327 | boolean_t is_64bit, |
1c79356b A |
328 | task_t *child_task) /* OUT */ |
329 | { | |
2d21ac55 A |
330 | task_t new_task; |
331 | vm_shared_region_t shared_region; | |
1c79356b A |
332 | |
333 | new_task = (task_t) zalloc(task_zone); | |
334 | ||
335 | if (new_task == TASK_NULL) | |
336 | return(KERN_RESOURCE_SHORTAGE); | |
337 | ||
338 | /* one ref for just being alive; one for our caller */ | |
339 | new_task->ref_count = 2; | |
340 | ||
341 | if (inherit_memory) | |
342 | new_task->map = vm_map_fork(parent_task->map); | |
343 | else | |
0c530ab8 | 344 | new_task->map = vm_map_create(pmap_create(0, is_64bit), |
91447636 A |
345 | (vm_map_offset_t)(VM_MIN_ADDRESS), |
346 | (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE); | |
1c79356b | 347 | |
2d21ac55 A |
348 | /* Inherit memlock limit from parent */ |
349 | if (parent_task) | |
350 | vm_map_set_user_wire_limit(new_task->map, parent_task->map->user_wire_limit); | |
351 | ||
91447636 | 352 | mutex_init(&new_task->lock, 0); |
55e303ae | 353 | queue_init(&new_task->threads); |
1c79356b | 354 | new_task->suspend_count = 0; |
55e303ae | 355 | new_task->thread_count = 0; |
55e303ae | 356 | new_task->active_thread_count = 0; |
1c79356b | 357 | new_task->user_stop_count = 0; |
0b4e3aa0 | 358 | new_task->role = TASK_UNSPECIFIED; |
1c79356b | 359 | new_task->active = TRUE; |
2d21ac55 | 360 | new_task->user_data = NULL; |
1c79356b A |
361 | new_task->faults = 0; |
362 | new_task->cow_faults = 0; | |
363 | new_task->pageins = 0; | |
364 | new_task->messages_sent = 0; | |
365 | new_task->messages_received = 0; | |
366 | new_task->syscalls_mach = 0; | |
55e303ae | 367 | new_task->priv_flags = 0; |
1c79356b | 368 | new_task->syscalls_unix=0; |
2d21ac55 | 369 | new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0; |
55e303ae A |
370 | new_task->taskFeatures[0] = 0; /* Init task features */ |
371 | new_task->taskFeatures[1] = 0; /* Init task features */ | |
1c79356b A |
372 | |
373 | #ifdef MACH_BSD | |
2d21ac55 | 374 | new_task->bsd_info = NULL; |
1c79356b A |
375 | #endif /* MACH_BSD */ |
376 | ||
0c530ab8 A |
377 | #ifdef __i386__ |
378 | new_task->i386_ldt = 0; | |
379 | #endif | |
380 | ||
55e303ae | 381 | #ifdef __ppc__ |
91447636 | 382 | if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */ |
55e303ae A |
383 | #endif |
384 | ||
1c79356b A |
385 | queue_init(&new_task->semaphore_list); |
386 | queue_init(&new_task->lock_set_list); | |
387 | new_task->semaphores_owned = 0; | |
388 | new_task->lock_sets_owned = 0; | |
389 | ||
2d21ac55 A |
390 | #if CONFIG_MACF_MACH |
391 | /*mutex_init(&new_task->labellock, ETAP_NO_TRACE);*/ | |
392 | new_task->label = labelh_new(1); | |
393 | mac_task_label_init (&new_task->maclabel); | |
394 | #endif | |
1c79356b A |
395 | |
396 | ipc_task_init(new_task, parent_task); | |
397 | ||
91447636 A |
398 | new_task->total_user_time = 0; |
399 | new_task->total_system_time = 0; | |
1c79356b | 400 | |
2d21ac55 | 401 | new_task->vtimers = 0; |
1c79356b | 402 | |
2d21ac55 A |
403 | new_task->shared_region = NULL; |
404 | ||
405 | new_task->affinity_space = NULL; | |
1c79356b | 406 | |
2d21ac55 | 407 | if (parent_task != TASK_NULL) { |
1c79356b | 408 | new_task->sec_token = parent_task->sec_token; |
55e303ae | 409 | new_task->audit_token = parent_task->audit_token; |
1c79356b | 410 | |
2d21ac55 A |
411 | /* inherit the parent's shared region */ |
412 | shared_region = vm_shared_region_get(parent_task); | |
413 | vm_shared_region_set(new_task, shared_region); | |
1c79356b A |
414 | |
415 | new_task->wired_ledger_port = ledger_copy( | |
416 | convert_port_to_ledger(parent_task->wired_ledger_port)); | |
417 | new_task->paged_ledger_port = ledger_copy( | |
418 | convert_port_to_ledger(parent_task->paged_ledger_port)); | |
91447636 A |
419 | if(task_has_64BitAddr(parent_task)) |
420 | task_set_64BitAddr(new_task); | |
0c530ab8 A |
421 | |
422 | #ifdef __i386__ | |
423 | if (inherit_memory && parent_task->i386_ldt) | |
424 | new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt); | |
425 | #endif | |
2d21ac55 A |
426 | if (inherit_memory && parent_task->affinity_space) |
427 | task_affinity_create(parent_task, new_task); | |
1c79356b A |
428 | } |
429 | else { | |
1c79356b | 430 | new_task->sec_token = KERNEL_SECURITY_TOKEN; |
55e303ae | 431 | new_task->audit_token = KERNEL_AUDIT_TOKEN; |
1c79356b A |
432 | new_task->wired_ledger_port = ledger_copy(root_wired_ledger); |
433 | new_task->paged_ledger_port = ledger_copy(root_paged_ledger); | |
434 | } | |
435 | ||
0b4e3aa0 | 436 | if (kernel_task == TASK_NULL) { |
55e303ae | 437 | new_task->priority = BASEPRI_KERNEL; |
0b4e3aa0 A |
438 | new_task->max_priority = MAXPRI_KERNEL; |
439 | } | |
440 | else { | |
441 | new_task->priority = BASEPRI_DEFAULT; | |
442 | new_task->max_priority = MAXPRI_USER; | |
443 | } | |
2d21ac55 A |
444 | |
445 | mutex_lock(&tasks_threads_lock); | |
446 | queue_enter(&tasks, new_task, task_t, tasks); | |
447 | tasks_count++; | |
448 | mutex_unlock(&tasks_threads_lock); | |
1c79356b | 449 | |
55e303ae A |
450 | if (vm_backing_store_low && parent_task != NULL) |
451 | new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV); | |
1c79356b A |
452 | |
453 | ipc_task_enable(new_task); | |
454 | ||
1c79356b A |
455 | *child_task = new_task; |
456 | return(KERN_SUCCESS); | |
457 | } | |
458 | ||
459 | /* | |
91447636 | 460 | * task_deallocate: |
1c79356b | 461 | * |
91447636 | 462 | * Drop a reference on a task. |
1c79356b A |
463 | */ |
464 | void | |
9bccf70c | 465 | task_deallocate( |
1c79356b A |
466 | task_t task) |
467 | { | |
9bccf70c A |
468 | if (task == TASK_NULL) |
469 | return; | |
470 | ||
91447636 | 471 | if (task_deallocate_internal(task) > 0) |
9bccf70c | 472 | return; |
1c79356b | 473 | |
9bccf70c A |
474 | ipc_task_terminate(task); |
475 | ||
2d21ac55 A |
476 | if (task->affinity_space) |
477 | task_affinity_deallocate(task); | |
478 | ||
1c79356b A |
479 | vm_map_deallocate(task->map); |
480 | is_release(task->itk_space); | |
1c79356b | 481 | |
2d21ac55 A |
482 | #if CONFIG_MACF_MACH |
483 | labelh_release(task->label); | |
484 | #endif | |
91447636 | 485 | zfree(task_zone, task); |
1c79356b A |
486 | } |
487 | ||
0c530ab8 A |
488 | /* |
489 | * task_name_deallocate: | |
490 | * | |
491 | * Drop a reference on a task name. | |
492 | */ | |
493 | void | |
494 | task_name_deallocate( | |
495 | task_name_t task_name) | |
496 | { | |
497 | return(task_deallocate((task_t)task_name)); | |
498 | } | |
499 | ||
500 | ||
1c79356b A |
501 | /* |
502 | * task_terminate: | |
503 | * | |
504 | * Terminate the specified task. See comments on thread_terminate | |
505 | * (kern/thread.c) about problems with terminating the "current task." | |
506 | */ | |
507 | ||
508 | kern_return_t | |
509 | task_terminate( | |
510 | task_t task) | |
511 | { | |
512 | if (task == TASK_NULL) | |
91447636 A |
513 | return (KERN_INVALID_ARGUMENT); |
514 | ||
1c79356b | 515 | if (task->bsd_info) |
91447636 A |
516 | return (KERN_FAILURE); |
517 | ||
1c79356b A |
518 | return (task_terminate_internal(task)); |
519 | } | |
520 | ||
521 | kern_return_t | |
522 | task_terminate_internal( | |
91447636 | 523 | task_t task) |
1c79356b | 524 | { |
91447636 A |
525 | thread_t thread, self; |
526 | task_t self_task; | |
527 | boolean_t interrupt_save; | |
1c79356b A |
528 | |
529 | assert(task != kernel_task); | |
530 | ||
91447636 A |
531 | self = current_thread(); |
532 | self_task = self->task; | |
1c79356b A |
533 | |
534 | /* | |
535 | * Get the task locked and make sure that we are not racing | |
536 | * with someone else trying to terminate us. | |
537 | */ | |
91447636 | 538 | if (task == self_task) |
1c79356b | 539 | task_lock(task); |
91447636 A |
540 | else |
541 | if (task < self_task) { | |
1c79356b | 542 | task_lock(task); |
91447636 A |
543 | task_lock(self_task); |
544 | } | |
545 | else { | |
546 | task_lock(self_task); | |
1c79356b A |
547 | task_lock(task); |
548 | } | |
549 | ||
91447636 | 550 | if (!task->active || !self->active) { |
1c79356b A |
551 | /* |
552 | * Task or current act is already being terminated. | |
553 | * Just return an error. If we are dying, this will | |
554 | * just get us to our AST special handler and that | |
555 | * will get us to finalize the termination of ourselves. | |
556 | */ | |
557 | task_unlock(task); | |
91447636 A |
558 | if (self_task != task) |
559 | task_unlock(self_task); | |
560 | ||
561 | return (KERN_FAILURE); | |
1c79356b | 562 | } |
91447636 A |
563 | |
564 | if (self_task != task) | |
565 | task_unlock(self_task); | |
1c79356b | 566 | |
e7c99d92 A |
567 | /* |
568 | * Make sure the current thread does not get aborted out of | |
569 | * the waits inside these operations. | |
570 | */ | |
9bccf70c | 571 | interrupt_save = thread_interrupt_level(THREAD_UNINT); |
e7c99d92 | 572 | |
1c79356b A |
573 | /* |
574 | * Indicate that we want all the threads to stop executing | |
575 | * at user space by holding the task (we would have held | |
576 | * each thread independently in thread_terminate_internal - | |
577 | * but this way we may be more likely to already find it | |
578 | * held there). Mark the task inactive, and prevent | |
579 | * further task operations via the task port. | |
580 | */ | |
581 | task_hold_locked(task); | |
582 | task->active = FALSE; | |
583 | ipc_task_disable(task); | |
584 | ||
585 | /* | |
91447636 A |
586 | * Terminate each thread in the task. |
587 | */ | |
588 | queue_iterate(&task->threads, thread, thread_t, task_threads) { | |
589 | thread_terminate_internal(thread); | |
1c79356b | 590 | } |
e7c99d92 A |
591 | |
592 | /* | |
55e303ae A |
593 | * Give the machine dependent code a chance |
594 | * to perform cleanup before ripping apart | |
595 | * the task. | |
e7c99d92 | 596 | */ |
91447636 | 597 | if (self_task == task) |
55e303ae | 598 | machine_thread_terminate_self(); |
e7c99d92 | 599 | |
1c79356b A |
600 | task_unlock(task); |
601 | ||
602 | /* | |
603 | * Destroy all synchronizers owned by the task. | |
604 | */ | |
605 | task_synchronizer_destroy_all(task); | |
606 | ||
1c79356b A |
607 | /* |
608 | * Destroy the IPC space, leaving just a reference for it. | |
609 | */ | |
55e303ae | 610 | ipc_space_destroy(task->itk_space); |
1c79356b | 611 | |
2d21ac55 A |
612 | #ifdef __ppc__ |
613 | /* LP64todo - make this clean */ | |
614 | /* | |
615 | * PPC51: ppc64 is limited to 51-bit addresses. | |
616 | */ | |
91447636 | 617 | pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */ |
2d21ac55 | 618 | #endif /* __ppc__ */ |
0c530ab8 A |
619 | |
620 | if (vm_map_has_4GB_pagezero(task->map)) | |
621 | vm_map_clear_4GB_pagezero(task->map); | |
91447636 | 622 | |
1c79356b A |
623 | /* |
624 | * If the current thread is a member of the task | |
625 | * being terminated, then the last reference to | |
626 | * the task will not be dropped until the thread | |
627 | * is finally reaped. To avoid incurring the | |
628 | * expense of removing the address space regions | |
629 | * at reap time, we do it explictly here. | |
630 | */ | |
2d21ac55 A |
631 | vm_map_remove(task->map, |
632 | task->map->min_offset, | |
633 | task->map->max_offset, | |
634 | VM_MAP_NO_FLAGS); | |
1c79356b | 635 | |
2d21ac55 A |
636 | /* release our shared region */ |
637 | vm_shared_region_set(task, NULL); | |
9bccf70c | 638 | |
2d21ac55 A |
639 | mutex_lock(&tasks_threads_lock); |
640 | queue_remove(&tasks, task, task_t, tasks); | |
641 | tasks_count--; | |
642 | mutex_unlock(&tasks_threads_lock); | |
9bccf70c | 643 | |
1c79356b | 644 | /* |
e7c99d92 A |
645 | * We no longer need to guard against being aborted, so restore |
646 | * the previous interruptible state. | |
647 | */ | |
9bccf70c | 648 | thread_interrupt_level(interrupt_save); |
e7c99d92 | 649 | |
55e303ae A |
650 | #if __ppc__ |
651 | perfmon_release_facility(task); // notify the perfmon facility | |
652 | #endif | |
653 | ||
e7c99d92 A |
654 | /* |
655 | * Get rid of the task active reference on itself. | |
1c79356b | 656 | */ |
1c79356b A |
657 | task_deallocate(task); |
658 | ||
91447636 | 659 | return (KERN_SUCCESS); |
1c79356b A |
660 | } |
661 | ||
662 | /* | |
91447636 A |
663 | * task_halt: |
664 | * | |
665 | * Shut the current task down (except for the current thread) in | |
666 | * preparation for dramatic changes to the task (probably exec). | |
667 | * We hold the task, terminate all other threads in the task and | |
668 | * wait for them to terminate, clean up the portspace, and when | |
669 | * all done, let the current thread go. | |
1c79356b A |
670 | */ |
671 | kern_return_t | |
672 | task_halt( | |
673 | task_t task) | |
674 | { | |
91447636 | 675 | thread_t thread, self; |
1c79356b A |
676 | |
677 | assert(task != kernel_task); | |
678 | ||
91447636 | 679 | self = current_thread(); |
1c79356b | 680 | |
91447636 A |
681 | if (task != self->task) |
682 | return (KERN_INVALID_ARGUMENT); | |
1c79356b A |
683 | |
684 | task_lock(task); | |
685 | ||
91447636 | 686 | if (!task->active || !self->active) { |
1c79356b A |
687 | /* |
688 | * Task or current thread is already being terminated. | |
689 | * Hurry up and return out of the current kernel context | |
690 | * so that we run our AST special handler to terminate | |
691 | * ourselves. | |
692 | */ | |
693 | task_unlock(task); | |
91447636 A |
694 | |
695 | return (KERN_FAILURE); | |
1c79356b A |
696 | } |
697 | ||
55e303ae | 698 | if (task->thread_count > 1) { |
1c79356b A |
699 | /* |
700 | * Mark all the threads to keep them from starting any more | |
701 | * user-level execution. The thread_terminate_internal code | |
702 | * would do this on a thread by thread basis anyway, but this | |
703 | * gives us a better chance of not having to wait there. | |
704 | */ | |
705 | task_hold_locked(task); | |
706 | ||
707 | /* | |
91447636 | 708 | * Terminate all the other threads in the task. |
1c79356b | 709 | */ |
91447636 A |
710 | queue_iterate(&task->threads, thread, thread_t, task_threads) { |
711 | if (thread != self) | |
712 | thread_terminate_internal(thread); | |
1c79356b | 713 | } |
91447636 | 714 | |
1c79356b A |
715 | task_release_locked(task); |
716 | } | |
e7c99d92 A |
717 | |
718 | /* | |
55e303ae A |
719 | * Give the machine dependent code a chance |
720 | * to perform cleanup before ripping apart | |
721 | * the task. | |
e7c99d92 | 722 | */ |
55e303ae | 723 | machine_thread_terminate_self(); |
e7c99d92 | 724 | |
1c79356b A |
725 | task_unlock(task); |
726 | ||
727 | /* | |
728 | * Destroy all synchronizers owned by the task. | |
729 | */ | |
730 | task_synchronizer_destroy_all(task); | |
731 | ||
732 | /* | |
9bccf70c A |
733 | * Destroy the contents of the IPC space, leaving just |
734 | * a reference for it. | |
e7c99d92 | 735 | */ |
55e303ae | 736 | ipc_space_clean(task->itk_space); |
1c79356b A |
737 | |
738 | /* | |
739 | * Clean out the address space, as we are going to be | |
740 | * getting a new one. | |
741 | */ | |
91447636 A |
742 | vm_map_remove(task->map, task->map->min_offset, |
743 | task->map->max_offset, VM_MAP_NO_FLAGS); | |
1c79356b | 744 | |
91447636 | 745 | return (KERN_SUCCESS); |
1c79356b A |
746 | } |
747 | ||
748 | /* | |
749 | * task_hold_locked: | |
750 | * | |
751 | * Suspend execution of the specified task. | |
752 | * This is a recursive-style suspension of the task, a count of | |
753 | * suspends is maintained. | |
754 | * | |
755 | * CONDITIONS: the task is locked and active. | |
756 | */ | |
757 | void | |
758 | task_hold_locked( | |
91447636 | 759 | register task_t task) |
1c79356b | 760 | { |
91447636 | 761 | register thread_t thread; |
1c79356b A |
762 | |
763 | assert(task->active); | |
764 | ||
9bccf70c A |
765 | if (task->suspend_count++ > 0) |
766 | return; | |
1c79356b A |
767 | |
768 | /* | |
91447636 | 769 | * Iterate through all the threads and hold them. |
1c79356b | 770 | */ |
91447636 A |
771 | queue_iterate(&task->threads, thread, thread_t, task_threads) { |
772 | thread_mtx_lock(thread); | |
773 | thread_hold(thread); | |
774 | thread_mtx_unlock(thread); | |
1c79356b A |
775 | } |
776 | } | |
777 | ||
778 | /* | |
779 | * task_hold: | |
780 | * | |
781 | * Same as the internal routine above, except that is must lock | |
782 | * and verify that the task is active. This differs from task_suspend | |
783 | * in that it places a kernel hold on the task rather than just a | |
784 | * user-level hold. This keeps users from over resuming and setting | |
785 | * it running out from under the kernel. | |
786 | * | |
787 | * CONDITIONS: the caller holds a reference on the task | |
788 | */ | |
789 | kern_return_t | |
91447636 A |
790 | task_hold( |
791 | register task_t task) | |
1c79356b | 792 | { |
1c79356b A |
793 | if (task == TASK_NULL) |
794 | return (KERN_INVALID_ARGUMENT); | |
91447636 | 795 | |
1c79356b | 796 | task_lock(task); |
91447636 | 797 | |
1c79356b A |
798 | if (!task->active) { |
799 | task_unlock(task); | |
91447636 | 800 | |
1c79356b A |
801 | return (KERN_FAILURE); |
802 | } | |
1c79356b | 803 | |
91447636 A |
804 | task_hold_locked(task); |
805 | task_unlock(task); | |
806 | ||
807 | return (KERN_SUCCESS); | |
1c79356b A |
808 | } |
809 | ||
810 | /* | |
91447636 A |
811 | * task_wait_locked: |
812 | * | |
1c79356b A |
813 | * Wait for all threads in task to stop. |
814 | * | |
815 | * Conditions: | |
816 | * Called with task locked, active, and held. | |
817 | */ | |
818 | void | |
819 | task_wait_locked( | |
820 | register task_t task) | |
821 | { | |
91447636 | 822 | register thread_t thread, self; |
1c79356b A |
823 | |
824 | assert(task->active); | |
825 | assert(task->suspend_count > 0); | |
826 | ||
91447636 A |
827 | self = current_thread(); |
828 | ||
1c79356b | 829 | /* |
91447636 | 830 | * Iterate through all the threads and wait for them to |
1c79356b A |
831 | * stop. Do not wait for the current thread if it is within |
832 | * the task. | |
833 | */ | |
91447636 A |
834 | queue_iterate(&task->threads, thread, thread_t, task_threads) { |
835 | if (thread != self) | |
55e303ae | 836 | thread_wait(thread); |
1c79356b A |
837 | } |
838 | } | |
839 | ||
840 | /* | |
841 | * task_release_locked: | |
842 | * | |
843 | * Release a kernel hold on a task. | |
844 | * | |
845 | * CONDITIONS: the task is locked and active | |
846 | */ | |
847 | void | |
848 | task_release_locked( | |
91447636 | 849 | register task_t task) |
1c79356b | 850 | { |
91447636 | 851 | register thread_t thread; |
1c79356b A |
852 | |
853 | assert(task->active); | |
9bccf70c | 854 | assert(task->suspend_count > 0); |
1c79356b | 855 | |
9bccf70c A |
856 | if (--task->suspend_count > 0) |
857 | return; | |
1c79356b | 858 | |
91447636 A |
859 | queue_iterate(&task->threads, thread, thread_t, task_threads) { |
860 | thread_mtx_lock(thread); | |
861 | thread_release(thread); | |
862 | thread_mtx_unlock(thread); | |
1c79356b A |
863 | } |
864 | } | |
865 | ||
866 | /* | |
867 | * task_release: | |
868 | * | |
869 | * Same as the internal routine above, except that it must lock | |
870 | * and verify that the task is active. | |
871 | * | |
872 | * CONDITIONS: The caller holds a reference to the task | |
873 | */ | |
874 | kern_return_t | |
91447636 A |
875 | task_release( |
876 | task_t task) | |
1c79356b | 877 | { |
1c79356b A |
878 | if (task == TASK_NULL) |
879 | return (KERN_INVALID_ARGUMENT); | |
91447636 | 880 | |
1c79356b | 881 | task_lock(task); |
91447636 | 882 | |
1c79356b A |
883 | if (!task->active) { |
884 | task_unlock(task); | |
91447636 | 885 | |
1c79356b A |
886 | return (KERN_FAILURE); |
887 | } | |
1c79356b | 888 | |
91447636 A |
889 | task_release_locked(task); |
890 | task_unlock(task); | |
891 | ||
892 | return (KERN_SUCCESS); | |
1c79356b A |
893 | } |
894 | ||
895 | kern_return_t | |
896 | task_threads( | |
91447636 A |
897 | task_t task, |
898 | thread_act_array_t *threads_out, | |
1c79356b A |
899 | mach_msg_type_number_t *count) |
900 | { | |
91447636 | 901 | mach_msg_type_number_t actual; |
2d21ac55 | 902 | thread_t *thread_list; |
91447636 A |
903 | thread_t thread; |
904 | vm_size_t size, size_needed; | |
905 | void *addr; | |
906 | unsigned int i, j; | |
1c79356b A |
907 | |
908 | if (task == TASK_NULL) | |
91447636 | 909 | return (KERN_INVALID_ARGUMENT); |
1c79356b | 910 | |
2d21ac55 | 911 | size = 0; addr = NULL; |
1c79356b A |
912 | |
913 | for (;;) { | |
914 | task_lock(task); | |
915 | if (!task->active) { | |
916 | task_unlock(task); | |
91447636 | 917 | |
1c79356b A |
918 | if (size != 0) |
919 | kfree(addr, size); | |
91447636 A |
920 | |
921 | return (KERN_FAILURE); | |
1c79356b A |
922 | } |
923 | ||
55e303ae | 924 | actual = task->thread_count; |
1c79356b A |
925 | |
926 | /* do we have the memory we need? */ | |
91447636 | 927 | size_needed = actual * sizeof (mach_port_t); |
1c79356b A |
928 | if (size_needed <= size) |
929 | break; | |
930 | ||
931 | /* unlock the task and allocate more memory */ | |
932 | task_unlock(task); | |
933 | ||
934 | if (size != 0) | |
935 | kfree(addr, size); | |
936 | ||
937 | assert(size_needed > 0); | |
938 | size = size_needed; | |
939 | ||
940 | addr = kalloc(size); | |
941 | if (addr == 0) | |
91447636 | 942 | return (KERN_RESOURCE_SHORTAGE); |
1c79356b A |
943 | } |
944 | ||
945 | /* OK, have memory and the task is locked & active */ | |
2d21ac55 | 946 | thread_list = (thread_t *)addr; |
91447636 A |
947 | |
948 | i = j = 0; | |
949 | ||
950 | for (thread = (thread_t)queue_first(&task->threads); i < actual; | |
951 | ++i, thread = (thread_t)queue_next(&thread->task_threads)) { | |
952 | thread_reference_internal(thread); | |
2d21ac55 | 953 | thread_list[j++] = thread; |
1c79356b | 954 | } |
91447636 A |
955 | |
956 | assert(queue_end(&task->threads, (queue_entry_t)thread)); | |
1c79356b A |
957 | |
958 | actual = j; | |
91447636 | 959 | size_needed = actual * sizeof (mach_port_t); |
1c79356b | 960 | |
91447636 | 961 | /* can unlock task now that we've got the thread refs */ |
1c79356b A |
962 | task_unlock(task); |
963 | ||
964 | if (actual == 0) { | |
91447636 | 965 | /* no threads, so return null pointer and deallocate memory */ |
1c79356b | 966 | |
2d21ac55 | 967 | *threads_out = NULL; |
1c79356b A |
968 | *count = 0; |
969 | ||
970 | if (size != 0) | |
971 | kfree(addr, size); | |
91447636 A |
972 | } |
973 | else { | |
1c79356b A |
974 | /* if we allocated too much, must copy */ |
975 | ||
976 | if (size_needed < size) { | |
91447636 | 977 | void *newaddr; |
1c79356b A |
978 | |
979 | newaddr = kalloc(size_needed); | |
980 | if (newaddr == 0) { | |
91447636 | 981 | for (i = 0; i < actual; ++i) |
2d21ac55 | 982 | thread_deallocate(thread_list[i]); |
1c79356b | 983 | kfree(addr, size); |
91447636 | 984 | return (KERN_RESOURCE_SHORTAGE); |
1c79356b A |
985 | } |
986 | ||
91447636 | 987 | bcopy(addr, newaddr, size_needed); |
1c79356b | 988 | kfree(addr, size); |
2d21ac55 | 989 | thread_list = (thread_t *)newaddr; |
1c79356b A |
990 | } |
991 | ||
2d21ac55 | 992 | *threads_out = thread_list; |
1c79356b A |
993 | *count = actual; |
994 | ||
995 | /* do the conversion that Mig should handle */ | |
996 | ||
91447636 | 997 | for (i = 0; i < actual; ++i) |
2d21ac55 | 998 | ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]); |
1c79356b A |
999 | } |
1000 | ||
91447636 | 1001 | return (KERN_SUCCESS); |
1c79356b A |
1002 | } |
1003 | ||
1004 | /* | |
91447636 A |
1005 | * task_suspend: |
1006 | * | |
1c79356b A |
1007 | * Implement a user-level suspension on a task. |
1008 | * | |
1009 | * Conditions: | |
1010 | * The caller holds a reference to the task | |
1011 | */ | |
1012 | kern_return_t | |
1013 | task_suspend( | |
1014 | register task_t task) | |
1015 | { | |
91447636 | 1016 | if (task == TASK_NULL || task == kernel_task) |
1c79356b A |
1017 | return (KERN_INVALID_ARGUMENT); |
1018 | ||
1019 | task_lock(task); | |
91447636 | 1020 | |
1c79356b A |
1021 | if (!task->active) { |
1022 | task_unlock(task); | |
91447636 | 1023 | |
1c79356b A |
1024 | return (KERN_FAILURE); |
1025 | } | |
91447636 A |
1026 | |
1027 | if (task->user_stop_count++ > 0) { | |
1c79356b A |
1028 | /* |
1029 | * If the stop count was positive, the task is | |
1030 | * already stopped and we can exit. | |
1031 | */ | |
1032 | task_unlock(task); | |
91447636 | 1033 | |
1c79356b A |
1034 | return (KERN_SUCCESS); |
1035 | } | |
1036 | ||
1037 | /* | |
1038 | * Put a kernel-level hold on the threads in the task (all | |
1039 | * user-level task suspensions added together represent a | |
1040 | * single kernel-level hold). We then wait for the threads | |
1041 | * to stop executing user code. | |
1042 | */ | |
1043 | task_hold_locked(task); | |
1044 | task_wait_locked(task); | |
91447636 | 1045 | |
1c79356b | 1046 | task_unlock(task); |
91447636 | 1047 | |
1c79356b A |
1048 | return (KERN_SUCCESS); |
1049 | } | |
1050 | ||
1051 | /* | |
91447636 | 1052 | * task_resume: |
1c79356b A |
1053 | * Release a kernel hold on a task. |
1054 | * | |
1055 | * Conditions: | |
1056 | * The caller holds a reference to the task | |
1057 | */ | |
1058 | kern_return_t | |
91447636 A |
1059 | task_resume( |
1060 | register task_t task) | |
1c79356b | 1061 | { |
91447636 | 1062 | register boolean_t release = FALSE; |
1c79356b | 1063 | |
91447636 A |
1064 | if (task == TASK_NULL || task == kernel_task) |
1065 | return (KERN_INVALID_ARGUMENT); | |
1c79356b | 1066 | |
1c79356b | 1067 | task_lock(task); |
91447636 | 1068 | |
1c79356b A |
1069 | if (!task->active) { |
1070 | task_unlock(task); | |
91447636 A |
1071 | |
1072 | return (KERN_FAILURE); | |
1c79356b | 1073 | } |
91447636 | 1074 | |
1c79356b | 1075 | if (task->user_stop_count > 0) { |
91447636 A |
1076 | if (--task->user_stop_count == 0) |
1077 | release = TRUE; | |
1c79356b A |
1078 | } |
1079 | else { | |
1080 | task_unlock(task); | |
91447636 A |
1081 | |
1082 | return (KERN_FAILURE); | |
1c79356b A |
1083 | } |
1084 | ||
1085 | /* | |
1086 | * Release the task if necessary. | |
1087 | */ | |
1088 | if (release) | |
1089 | task_release_locked(task); | |
1090 | ||
1091 | task_unlock(task); | |
91447636 A |
1092 | |
1093 | return (KERN_SUCCESS); | |
1c79356b A |
1094 | } |
1095 | ||
1096 | kern_return_t | |
1097 | host_security_set_task_token( | |
1098 | host_security_t host_security, | |
1099 | task_t task, | |
1100 | security_token_t sec_token, | |
55e303ae | 1101 | audit_token_t audit_token, |
1c79356b A |
1102 | host_priv_t host_priv) |
1103 | { | |
55e303ae | 1104 | ipc_port_t host_port; |
1c79356b A |
1105 | kern_return_t kr; |
1106 | ||
1107 | if (task == TASK_NULL) | |
1108 | return(KERN_INVALID_ARGUMENT); | |
1109 | ||
1110 | if (host_security == HOST_NULL) | |
1111 | return(KERN_INVALID_SECURITY); | |
1112 | ||
1113 | task_lock(task); | |
1114 | task->sec_token = sec_token; | |
55e303ae | 1115 | task->audit_token = audit_token; |
1c79356b A |
1116 | task_unlock(task); |
1117 | ||
1118 | if (host_priv != HOST_PRIV_NULL) { | |
55e303ae | 1119 | kr = host_get_host_priv_port(host_priv, &host_port); |
1c79356b | 1120 | } else { |
55e303ae | 1121 | kr = host_get_host_port(host_priv_self(), &host_port); |
1c79356b | 1122 | } |
55e303ae A |
1123 | assert(kr == KERN_SUCCESS); |
1124 | kr = task_set_special_port(task, TASK_HOST_PORT, host_port); | |
1c79356b A |
1125 | return(kr); |
1126 | } | |
1127 | ||
1128 | /* | |
1129 | * Utility routine to set a ledger | |
1130 | */ | |
1131 | kern_return_t | |
1132 | task_set_ledger( | |
1133 | task_t task, | |
1134 | ledger_t wired, | |
1135 | ledger_t paged) | |
1136 | { | |
1137 | if (task == TASK_NULL) | |
1138 | return(KERN_INVALID_ARGUMENT); | |
1139 | ||
1140 | task_lock(task); | |
1141 | if (wired) { | |
1142 | ipc_port_release_send(task->wired_ledger_port); | |
1143 | task->wired_ledger_port = ledger_copy(wired); | |
1144 | } | |
1145 | if (paged) { | |
1146 | ipc_port_release_send(task->paged_ledger_port); | |
1147 | task->paged_ledger_port = ledger_copy(paged); | |
1148 | } | |
1149 | task_unlock(task); | |
1150 | ||
1151 | return(KERN_SUCCESS); | |
1152 | } | |
1153 | ||
1154 | /* | |
1155 | * This routine was added, pretty much exclusively, for registering the | |
1156 | * RPC glue vector for in-kernel short circuited tasks. Rather than | |
1157 | * removing it completely, I have only disabled that feature (which was | |
1158 | * the only feature at the time). It just appears that we are going to | |
1159 | * want to add some user data to tasks in the future (i.e. bsd info, | |
1160 | * task names, etc...), so I left it in the formal task interface. | |
1161 | */ | |
1162 | kern_return_t | |
1163 | task_set_info( | |
1164 | task_t task, | |
1165 | task_flavor_t flavor, | |
91447636 A |
1166 | __unused task_info_t task_info_in, /* pointer to IN array */ |
1167 | __unused mach_msg_type_number_t task_info_count) | |
1c79356b | 1168 | { |
1c79356b A |
1169 | if (task == TASK_NULL) |
1170 | return(KERN_INVALID_ARGUMENT); | |
1171 | ||
1172 | switch (flavor) { | |
1173 | default: | |
1174 | return (KERN_INVALID_ARGUMENT); | |
1175 | } | |
1176 | return (KERN_SUCCESS); | |
1177 | } | |
1178 | ||
1179 | kern_return_t | |
1180 | task_info( | |
91447636 A |
1181 | task_t task, |
1182 | task_flavor_t flavor, | |
1183 | task_info_t task_info_out, | |
1c79356b A |
1184 | mach_msg_type_number_t *task_info_count) |
1185 | { | |
1c79356b | 1186 | if (task == TASK_NULL) |
91447636 | 1187 | return (KERN_INVALID_ARGUMENT); |
1c79356b A |
1188 | |
1189 | switch (flavor) { | |
1190 | ||
91447636 | 1191 | case TASK_BASIC_INFO_32: |
2d21ac55 | 1192 | case TASK_BASIC2_INFO_32: |
91447636 A |
1193 | { |
1194 | task_basic_info_32_t basic_info; | |
1195 | vm_map_t map; | |
1c79356b | 1196 | |
91447636 A |
1197 | if (*task_info_count < TASK_BASIC_INFO_32_COUNT) |
1198 | return (KERN_INVALID_ARGUMENT); | |
1c79356b | 1199 | |
91447636 | 1200 | basic_info = (task_basic_info_32_t)task_info_out; |
1c79356b | 1201 | |
91447636 A |
1202 | map = (task == kernel_task)? kernel_map: task->map; |
1203 | basic_info->virtual_size = CAST_DOWN(vm_offset_t,map->size); | |
2d21ac55 A |
1204 | if (flavor == TASK_BASIC2_INFO_32) { |
1205 | /* | |
1206 | * The "BASIC2" flavor gets the maximum resident | |
1207 | * size instead of the current resident size... | |
1208 | */ | |
1209 | basic_info->resident_size = pmap_resident_max(map->pmap); | |
1210 | } else { | |
1211 | basic_info->resident_size = pmap_resident_count(map->pmap); | |
1212 | } | |
1213 | basic_info->resident_size *= PAGE_SIZE; | |
1c79356b A |
1214 | |
1215 | task_lock(task); | |
0b4e3aa0 A |
1216 | basic_info->policy = ((task != kernel_task)? |
1217 | POLICY_TIMESHARE: POLICY_RR); | |
1c79356b | 1218 | basic_info->suspend_count = task->user_stop_count; |
91447636 | 1219 | |
2d21ac55 A |
1220 | absolutetime_to_microtime(task->total_user_time, |
1221 | (unsigned *)&basic_info->user_time.seconds, | |
1222 | (unsigned *)&basic_info->user_time.microseconds); | |
1223 | absolutetime_to_microtime(task->total_system_time, | |
1224 | (unsigned *)&basic_info->system_time.seconds, | |
1225 | (unsigned *)&basic_info->system_time.microseconds); | |
1c79356b A |
1226 | task_unlock(task); |
1227 | ||
91447636 | 1228 | *task_info_count = TASK_BASIC_INFO_32_COUNT; |
1c79356b | 1229 | break; |
91447636 | 1230 | } |
1c79356b | 1231 | |
91447636 A |
1232 | case TASK_BASIC_INFO_64: |
1233 | { | |
1234 | task_basic_info_64_t basic_info; | |
1235 | vm_map_t map; | |
1c79356b | 1236 | |
91447636 A |
1237 | if (*task_info_count < TASK_BASIC_INFO_64_COUNT) |
1238 | return (KERN_INVALID_ARGUMENT); | |
1239 | ||
1240 | basic_info = (task_basic_info_64_t)task_info_out; | |
1241 | ||
1242 | map = (task == kernel_task)? kernel_map: task->map; | |
1243 | basic_info->virtual_size = map->size; | |
2d21ac55 A |
1244 | basic_info->resident_size = |
1245 | (mach_vm_size_t)(pmap_resident_count(map->pmap)) | |
1246 | * PAGE_SIZE_64; | |
91447636 A |
1247 | |
1248 | task_lock(task); | |
1249 | basic_info->policy = ((task != kernel_task)? | |
1250 | POLICY_TIMESHARE: POLICY_RR); | |
1251 | basic_info->suspend_count = task->user_stop_count; | |
1252 | ||
2d21ac55 A |
1253 | absolutetime_to_microtime(task->total_user_time, |
1254 | (unsigned *)&basic_info->user_time.seconds, | |
1255 | (unsigned *)&basic_info->user_time.microseconds); | |
1256 | absolutetime_to_microtime(task->total_system_time, | |
1257 | (unsigned *)&basic_info->system_time.seconds, | |
1258 | (unsigned *)&basic_info->system_time.microseconds); | |
91447636 A |
1259 | task_unlock(task); |
1260 | ||
1261 | *task_info_count = TASK_BASIC_INFO_64_COUNT; | |
1262 | break; | |
1263 | } | |
1264 | ||
1265 | case TASK_THREAD_TIMES_INFO: | |
1266 | { | |
1267 | register task_thread_times_info_t times_info; | |
1268 | register thread_t thread; | |
1269 | ||
1270 | if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) | |
1c79356b | 1271 | return (KERN_INVALID_ARGUMENT); |
1c79356b A |
1272 | |
1273 | times_info = (task_thread_times_info_t) task_info_out; | |
1274 | times_info->user_time.seconds = 0; | |
1275 | times_info->user_time.microseconds = 0; | |
1276 | times_info->system_time.seconds = 0; | |
1277 | times_info->system_time.microseconds = 0; | |
1278 | ||
1279 | task_lock(task); | |
1c79356b | 1280 | |
91447636 A |
1281 | queue_iterate(&task->threads, thread, thread_t, task_threads) { |
1282 | time_value_t user_time, system_time; | |
1c79356b A |
1283 | |
1284 | thread_read_times(thread, &user_time, &system_time); | |
1285 | ||
1c79356b A |
1286 | time_value_add(×_info->user_time, &user_time); |
1287 | time_value_add(×_info->system_time, &system_time); | |
1288 | } | |
91447636 | 1289 | |
1c79356b A |
1290 | task_unlock(task); |
1291 | ||
1292 | *task_info_count = TASK_THREAD_TIMES_INFO_COUNT; | |
1293 | break; | |
91447636 A |
1294 | } |
1295 | ||
1296 | case TASK_ABSOLUTETIME_INFO: | |
1297 | { | |
1298 | task_absolutetime_info_t info; | |
1299 | register thread_t thread; | |
1300 | ||
1301 | if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) | |
1302 | return (KERN_INVALID_ARGUMENT); | |
1303 | ||
1304 | info = (task_absolutetime_info_t)task_info_out; | |
1305 | info->threads_user = info->threads_system = 0; | |
1306 | ||
1307 | task_lock(task); | |
1308 | ||
1309 | info->total_user = task->total_user_time; | |
1310 | info->total_system = task->total_system_time; | |
1311 | ||
1312 | queue_iterate(&task->threads, thread, thread_t, task_threads) { | |
1313 | uint64_t tval; | |
1314 | ||
1315 | tval = timer_grab(&thread->user_timer); | |
1316 | info->threads_user += tval; | |
1317 | info->total_user += tval; | |
1318 | ||
1319 | tval = timer_grab(&thread->system_timer); | |
1320 | info->threads_system += tval; | |
1321 | info->total_system += tval; | |
1322 | } | |
1323 | ||
1324 | task_unlock(task); | |
1325 | ||
1326 | *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT; | |
1327 | break; | |
1328 | } | |
1c79356b | 1329 | |
91447636 A |
1330 | /* OBSOLETE */ |
1331 | case TASK_SCHED_FIFO_INFO: | |
1332 | { | |
1c79356b A |
1333 | |
1334 | if (*task_info_count < POLICY_FIFO_BASE_COUNT) | |
91447636 | 1335 | return (KERN_INVALID_ARGUMENT); |
1c79356b | 1336 | |
91447636 A |
1337 | return (KERN_INVALID_POLICY); |
1338 | } | |
1c79356b | 1339 | |
91447636 A |
1340 | /* OBSOLETE */ |
1341 | case TASK_SCHED_RR_INFO: | |
1342 | { | |
1c79356b A |
1343 | register policy_rr_base_t rr_base; |
1344 | ||
1345 | if (*task_info_count < POLICY_RR_BASE_COUNT) | |
91447636 | 1346 | return (KERN_INVALID_ARGUMENT); |
1c79356b A |
1347 | |
1348 | rr_base = (policy_rr_base_t) task_info_out; | |
1349 | ||
1350 | task_lock(task); | |
0b4e3aa0 | 1351 | if (task != kernel_task) { |
1c79356b | 1352 | task_unlock(task); |
91447636 | 1353 | return (KERN_INVALID_POLICY); |
1c79356b A |
1354 | } |
1355 | ||
1356 | rr_base->base_priority = task->priority; | |
1357 | task_unlock(task); | |
1358 | ||
91447636 | 1359 | rr_base->quantum = std_quantum_us / 1000; |
1c79356b A |
1360 | |
1361 | *task_info_count = POLICY_RR_BASE_COUNT; | |
1362 | break; | |
91447636 | 1363 | } |
1c79356b | 1364 | |
91447636 A |
1365 | /* OBSOLETE */ |
1366 | case TASK_SCHED_TIMESHARE_INFO: | |
1367 | { | |
1c79356b A |
1368 | register policy_timeshare_base_t ts_base; |
1369 | ||
1370 | if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) | |
91447636 | 1371 | return (KERN_INVALID_ARGUMENT); |
1c79356b A |
1372 | |
1373 | ts_base = (policy_timeshare_base_t) task_info_out; | |
1374 | ||
1375 | task_lock(task); | |
0b4e3aa0 | 1376 | if (task == kernel_task) { |
1c79356b | 1377 | task_unlock(task); |
91447636 | 1378 | return (KERN_INVALID_POLICY); |
1c79356b A |
1379 | } |
1380 | ||
1381 | ts_base->base_priority = task->priority; | |
1382 | task_unlock(task); | |
1383 | ||
1384 | *task_info_count = POLICY_TIMESHARE_BASE_COUNT; | |
1385 | break; | |
91447636 | 1386 | } |
1c79356b | 1387 | |
91447636 A |
1388 | case TASK_SECURITY_TOKEN: |
1389 | { | |
1390 | register security_token_t *sec_token_p; | |
1c79356b | 1391 | |
91447636 A |
1392 | if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) |
1393 | return (KERN_INVALID_ARGUMENT); | |
1c79356b A |
1394 | |
1395 | sec_token_p = (security_token_t *) task_info_out; | |
1396 | ||
1397 | task_lock(task); | |
1398 | *sec_token_p = task->sec_token; | |
1399 | task_unlock(task); | |
1400 | ||
1401 | *task_info_count = TASK_SECURITY_TOKEN_COUNT; | |
91447636 A |
1402 | break; |
1403 | } | |
1c79356b | 1404 | |
91447636 A |
1405 | case TASK_AUDIT_TOKEN: |
1406 | { | |
1407 | register audit_token_t *audit_token_p; | |
55e303ae | 1408 | |
91447636 A |
1409 | if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) |
1410 | return (KERN_INVALID_ARGUMENT); | |
55e303ae A |
1411 | |
1412 | audit_token_p = (audit_token_t *) task_info_out; | |
1413 | ||
1414 | task_lock(task); | |
1415 | *audit_token_p = task->audit_token; | |
1416 | task_unlock(task); | |
1417 | ||
1418 | *task_info_count = TASK_AUDIT_TOKEN_COUNT; | |
91447636 A |
1419 | break; |
1420 | } | |
55e303ae | 1421 | |
91447636 A |
1422 | case TASK_SCHED_INFO: |
1423 | return (KERN_INVALID_ARGUMENT); | |
1c79356b | 1424 | |
91447636 A |
1425 | case TASK_EVENTS_INFO: |
1426 | { | |
1c79356b | 1427 | register task_events_info_t events_info; |
2d21ac55 | 1428 | register thread_t thread; |
1c79356b | 1429 | |
91447636 A |
1430 | if (*task_info_count < TASK_EVENTS_INFO_COUNT) |
1431 | return (KERN_INVALID_ARGUMENT); | |
1c79356b A |
1432 | |
1433 | events_info = (task_events_info_t) task_info_out; | |
1434 | ||
1435 | task_lock(task); | |
2d21ac55 | 1436 | |
1c79356b A |
1437 | events_info->faults = task->faults; |
1438 | events_info->pageins = task->pageins; | |
1439 | events_info->cow_faults = task->cow_faults; | |
1440 | events_info->messages_sent = task->messages_sent; | |
1441 | events_info->messages_received = task->messages_received; | |
1442 | events_info->syscalls_mach = task->syscalls_mach; | |
1443 | events_info->syscalls_unix = task->syscalls_unix; | |
2d21ac55 A |
1444 | |
1445 | events_info->csw = task->c_switch; | |
1446 | ||
1447 | queue_iterate(&task->threads, thread, thread_t, task_threads) { | |
1448 | events_info->csw += thread->c_switch; | |
1449 | } | |
1450 | ||
1c79356b A |
1451 | task_unlock(task); |
1452 | ||
1453 | *task_info_count = TASK_EVENTS_INFO_COUNT; | |
1454 | break; | |
91447636 | 1455 | } |
2d21ac55 A |
1456 | case TASK_AFFINITY_TAG_INFO: |
1457 | { | |
1458 | if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) | |
1459 | return (KERN_INVALID_ARGUMENT); | |
1460 | ||
1461 | return task_affinity_info(task, task_info_out, task_info_count); | |
1462 | } | |
1c79356b | 1463 | |
91447636 | 1464 | default: |
1c79356b A |
1465 | return (KERN_INVALID_ARGUMENT); |
1466 | } | |
1467 | ||
91447636 | 1468 | return (KERN_SUCCESS); |
1c79356b A |
1469 | } |
1470 | ||
2d21ac55 A |
1471 | void |
1472 | task_vtimer_set( | |
1473 | task_t task, | |
1474 | integer_t which) | |
1475 | { | |
1476 | thread_t thread; | |
1477 | ||
1478 | /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */ | |
1479 | ||
1480 | task_lock(task); | |
1481 | ||
1482 | task->vtimers |= which; | |
1483 | ||
1484 | switch (which) { | |
1485 | ||
1486 | case TASK_VTIMER_USER: | |
1487 | queue_iterate(&task->threads, thread, thread_t, task_threads) { | |
1488 | thread->vtimer_user_save = timer_grab(&thread->user_timer); | |
1489 | } | |
1490 | break; | |
1491 | ||
1492 | case TASK_VTIMER_PROF: | |
1493 | queue_iterate(&task->threads, thread, thread_t, task_threads) { | |
1494 | thread->vtimer_prof_save = timer_grab(&thread->user_timer); | |
1495 | thread->vtimer_prof_save += timer_grab(&thread->system_timer); | |
1496 | } | |
1497 | break; | |
1498 | ||
1499 | case TASK_VTIMER_RLIM: | |
1500 | queue_iterate(&task->threads, thread, thread_t, task_threads) { | |
1501 | thread->vtimer_rlim_save = timer_grab(&thread->user_timer); | |
1502 | thread->vtimer_rlim_save += timer_grab(&thread->system_timer); | |
1503 | } | |
1504 | break; | |
1505 | } | |
1506 | ||
1507 | task_unlock(task); | |
1508 | } | |
1509 | ||
1510 | void | |
1511 | task_vtimer_clear( | |
1512 | task_t task, | |
1513 | integer_t which) | |
1514 | { | |
1515 | assert(task == current_task()); | |
1516 | ||
1517 | task_lock(task); | |
1518 | ||
1519 | task->vtimers &= ~which; | |
1520 | ||
1521 | task_unlock(task); | |
1522 | } | |
1523 | ||
1524 | void | |
1525 | task_vtimer_update( | |
1526 | __unused | |
1527 | task_t task, | |
1528 | integer_t which, | |
1529 | uint32_t *microsecs) | |
1530 | { | |
1531 | thread_t thread = current_thread(); | |
1532 | uint32_t tdelt, secs; | |
1533 | uint64_t tsum; | |
1534 | ||
1535 | assert(task == current_task()); | |
1536 | ||
1537 | assert(task->vtimers & which); | |
1538 | ||
1539 | tdelt = secs = 0; | |
1540 | ||
1541 | switch (which) { | |
1542 | ||
1543 | case TASK_VTIMER_USER: | |
1544 | tdelt = timer_delta(&thread->user_timer, | |
1545 | &thread->vtimer_user_save); | |
1546 | break; | |
1547 | ||
1548 | case TASK_VTIMER_PROF: | |
1549 | tsum = timer_grab(&thread->user_timer); | |
1550 | tsum += timer_grab(&thread->system_timer); | |
1551 | tdelt = tsum - thread->vtimer_prof_save; | |
1552 | thread->vtimer_prof_save = tsum; | |
1553 | break; | |
1554 | ||
1555 | case TASK_VTIMER_RLIM: | |
1556 | tsum = timer_grab(&thread->user_timer); | |
1557 | tsum += timer_grab(&thread->system_timer); | |
1558 | tdelt = tsum - thread->vtimer_rlim_save; | |
1559 | thread->vtimer_rlim_save = tsum; | |
1560 | break; | |
1561 | } | |
1562 | ||
1563 | absolutetime_to_microtime(tdelt, &secs, microsecs); | |
1564 | } | |
1565 | ||
1c79356b A |
1566 | /* |
1567 | * task_assign: | |
1568 | * | |
1569 | * Change the assigned processor set for the task | |
1570 | */ | |
1571 | kern_return_t | |
1572 | task_assign( | |
91447636 A |
1573 | __unused task_t task, |
1574 | __unused processor_set_t new_pset, | |
1575 | __unused boolean_t assign_threads) | |
1c79356b | 1576 | { |
1c79356b A |
1577 | return(KERN_FAILURE); |
1578 | } | |
1579 | ||
1580 | /* | |
1581 | * task_assign_default: | |
1582 | * | |
1583 | * Version of task_assign to assign to default processor set. | |
1584 | */ | |
1585 | kern_return_t | |
1586 | task_assign_default( | |
1587 | task_t task, | |
1588 | boolean_t assign_threads) | |
1589 | { | |
2d21ac55 | 1590 | return (task_assign(task, &pset0, assign_threads)); |
1c79356b A |
1591 | } |
1592 | ||
1593 | /* | |
1594 | * task_get_assignment | |
1595 | * | |
1596 | * Return name of processor set that task is assigned to. | |
1597 | */ | |
1598 | kern_return_t | |
1599 | task_get_assignment( | |
1600 | task_t task, | |
1601 | processor_set_t *pset) | |
1602 | { | |
1603 | if (!task->active) | |
1604 | return(KERN_FAILURE); | |
1605 | ||
2d21ac55 A |
1606 | *pset = &pset0; |
1607 | ||
1608 | return (KERN_SUCCESS); | |
1c79356b A |
1609 | } |
1610 | ||
1611 | ||
1612 | /* | |
1613 | * task_policy | |
1614 | * | |
1615 | * Set scheduling policy and parameters, both base and limit, for | |
1616 | * the given task. Policy must be a policy which is enabled for the | |
1617 | * processor set. Change contained threads if requested. | |
1618 | */ | |
1619 | kern_return_t | |
1620 | task_policy( | |
91447636 A |
1621 | __unused task_t task, |
1622 | __unused policy_t policy_id, | |
1623 | __unused policy_base_t base, | |
1624 | __unused mach_msg_type_number_t count, | |
1625 | __unused boolean_t set_limit, | |
1626 | __unused boolean_t change) | |
1c79356b A |
1627 | { |
1628 | return(KERN_FAILURE); | |
1629 | } | |
1630 | ||
1631 | /* | |
1632 | * task_set_policy | |
1633 | * | |
1634 | * Set scheduling policy and parameters, both base and limit, for | |
1635 | * the given task. Policy can be any policy implemented by the | |
1636 | * processor set, whether enabled or not. Change contained threads | |
1637 | * if requested. | |
1638 | */ | |
1639 | kern_return_t | |
1640 | task_set_policy( | |
91447636 A |
1641 | __unused task_t task, |
1642 | __unused processor_set_t pset, | |
1643 | __unused policy_t policy_id, | |
1644 | __unused policy_base_t base, | |
1645 | __unused mach_msg_type_number_t base_count, | |
1646 | __unused policy_limit_t limit, | |
1647 | __unused mach_msg_type_number_t limit_count, | |
1648 | __unused boolean_t change) | |
1c79356b A |
1649 | { |
1650 | return(KERN_FAILURE); | |
1651 | } | |
1652 | ||
91447636 | 1653 | #if FAST_TAS |
1c79356b A |
1654 | kern_return_t |
1655 | task_set_ras_pc( | |
1656 | task_t task, | |
1657 | vm_offset_t pc, | |
1658 | vm_offset_t endpc) | |
1659 | { | |
1c79356b A |
1660 | extern int fast_tas_debug; |
1661 | ||
1662 | if (fast_tas_debug) { | |
1663 | printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n", | |
1664 | task, pc, endpc); | |
1665 | } | |
1666 | task_lock(task); | |
1667 | task->fast_tas_base = pc; | |
1668 | task->fast_tas_end = endpc; | |
1669 | task_unlock(task); | |
1670 | return KERN_SUCCESS; | |
91447636 | 1671 | } |
1c79356b | 1672 | #else /* FAST_TAS */ |
91447636 A |
1673 | kern_return_t |
1674 | task_set_ras_pc( | |
1675 | __unused task_t task, | |
1676 | __unused vm_offset_t pc, | |
1677 | __unused vm_offset_t endpc) | |
1678 | { | |
1c79356b | 1679 | return KERN_FAILURE; |
1c79356b | 1680 | } |
91447636 | 1681 | #endif /* FAST_TAS */ |
1c79356b A |
1682 | |
1683 | void | |
1684 | task_synchronizer_destroy_all(task_t task) | |
1685 | { | |
1686 | semaphore_t semaphore; | |
1687 | lock_set_t lock_set; | |
1688 | ||
1689 | /* | |
1690 | * Destroy owned semaphores | |
1691 | */ | |
1692 | ||
1693 | while (!queue_empty(&task->semaphore_list)) { | |
1694 | semaphore = (semaphore_t) queue_first(&task->semaphore_list); | |
1695 | (void) semaphore_destroy(task, semaphore); | |
1696 | } | |
1697 | ||
1698 | /* | |
1699 | * Destroy owned lock sets | |
1700 | */ | |
1701 | ||
1702 | while (!queue_empty(&task->lock_set_list)) { | |
1703 | lock_set = (lock_set_t) queue_first(&task->lock_set_list); | |
1704 | (void) lock_set_destroy(task, lock_set); | |
1705 | } | |
1706 | } | |
1707 | ||
1c79356b A |
1708 | /* |
1709 | * We need to export some functions to other components that | |
1710 | * are currently implemented in macros within the osfmk | |
1711 | * component. Just export them as functions of the same name. | |
1712 | */ | |
1713 | boolean_t is_kerneltask(task_t t) | |
1714 | { | |
1715 | if (t == kernel_task) | |
55e303ae A |
1716 | return (TRUE); |
1717 | ||
1718 | return (FALSE); | |
1c79356b A |
1719 | } |
1720 | ||
1721 | #undef current_task | |
91447636 A |
1722 | task_t current_task(void); |
1723 | task_t current_task(void) | |
1c79356b A |
1724 | { |
1725 | return (current_task_fast()); | |
1726 | } | |
91447636 A |
1727 | |
1728 | #undef task_reference | |
1729 | void task_reference(task_t task); | |
1730 | void | |
1731 | task_reference( | |
1732 | task_t task) | |
1733 | { | |
1734 | if (task != TASK_NULL) | |
1735 | task_reference_internal(task); | |
1736 | } | |
2d21ac55 A |
1737 | |
1738 | #if CONFIG_MACF_MACH | |
1739 | /* | |
1740 | * Protect 2 task labels against modification by adding a reference on | |
1741 | * both label handles. The locks do not actually have to be held while | |
1742 | * using the labels as only labels with one reference can be modified | |
1743 | * in place. | |
1744 | */ | |
1745 | ||
1746 | void | |
1747 | tasklabel_lock2( | |
1748 | task_t a, | |
1749 | task_t b) | |
1750 | { | |
1751 | labelh_reference(a->label); | |
1752 | labelh_reference(b->label); | |
1753 | } | |
1754 | ||
1755 | void | |
1756 | tasklabel_unlock2( | |
1757 | task_t a, | |
1758 | task_t b) | |
1759 | { | |
1760 | labelh_release(a->label); | |
1761 | labelh_release(b->label); | |
1762 | } | |
1763 | ||
1764 | void | |
1765 | mac_task_label_update_internal( | |
1766 | struct label *pl, | |
1767 | struct task *task) | |
1768 | { | |
1769 | ||
1770 | tasklabel_lock(task); | |
1771 | task->label = labelh_modify(task->label); | |
1772 | mac_task_label_update(pl, &task->maclabel); | |
1773 | tasklabel_unlock(task); | |
1774 | ip_lock(task->itk_self); | |
1775 | mac_port_label_update_cred(pl, &task->itk_self->ip_label); | |
1776 | ip_unlock(task->itk_self); | |
1777 | } | |
1778 | ||
1779 | void | |
1780 | mac_task_label_modify( | |
1781 | struct task *task, | |
1782 | void *arg, | |
1783 | void (*f) (struct label *l, void *arg)) | |
1784 | { | |
1785 | ||
1786 | tasklabel_lock(task); | |
1787 | task->label = labelh_modify(task->label); | |
1788 | (*f)(&task->maclabel, arg); | |
1789 | tasklabel_unlock(task); | |
1790 | } | |
1791 | ||
1792 | struct label * | |
1793 | mac_task_get_label(struct task *task) | |
1794 | { | |
1795 | return (&task->maclabel); | |
1796 | } | |
1797 | #endif |