]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/task.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
CommitLineData
1c79356b 1/*
316670eb 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63/*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
2d21ac55
A
81/*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
1c79356b 88
1c79356b 89#include <fast_tas.h>
1c79356b
A
90#include <platforms.h>
91
91447636 92#include <mach/mach_types.h>
1c79356b 93#include <mach/boolean.h>
91447636 94#include <mach/host_priv.h>
1c79356b
A
95#include <mach/machine/vm_types.h>
96#include <mach/vm_param.h>
97#include <mach/semaphore.h>
98#include <mach/task_info.h>
99#include <mach/task_special_ports.h>
91447636
A
100
101#include <ipc/ipc_types.h>
1c79356b
A
102#include <ipc/ipc_space.h>
103#include <ipc/ipc_entry.h>
91447636
A
104
105#include <kern/kern_types.h>
1c79356b
A
106#include <kern/mach_param.h>
107#include <kern/misc_protos.h>
108#include <kern/task.h>
109#include <kern/thread.h>
110#include <kern/zalloc.h>
111#include <kern/kalloc.h>
112#include <kern/processor.h>
113#include <kern/sched_prim.h> /* for thread_wakeup */
1c79356b 114#include <kern/ipc_tt.h>
1c79356b 115#include <kern/host.h>
91447636
A
116#include <kern/clock.h>
117#include <kern/timer.h>
1c79356b
A
118#include <kern/assert.h>
119#include <kern/sync_lock.h>
2d21ac55 120#include <kern/affinity.h>
91447636
A
121
122#include <vm/pmap.h>
123#include <vm/vm_map.h>
124#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
125#include <vm/vm_pageout.h>
2d21ac55 126#include <vm/vm_protos.h>
91447636 127
1c79356b
A
128/*
129 * Exported interfaces
130 */
131
132#include <mach/task_server.h>
133#include <mach/mach_host_server.h>
134#include <mach/host_security_server.h>
91447636 135#include <mach/mach_port_server.h>
2d21ac55 136#include <mach/security_server.h>
91447636 137
2d21ac55
A
138#include <vm/vm_shared_region.h>
139
140#if CONFIG_MACF_MACH
141#include <security/mac_mach_internal.h>
142#endif
1c79356b 143
b0d623f7
A
144#if CONFIG_COUNTERS
145#include <pmc/pmc.h>
146#endif /* CONFIG_COUNTERS */
147
148task_t kernel_task;
149zone_t task_zone;
150lck_attr_t task_lck_attr;
151lck_grp_t task_lck_grp;
152lck_grp_attr_t task_lck_grp_attr;
316670eb
A
153#if CONFIG_EMBEDDED
154lck_mtx_t task_watch_mtx;
155#endif /* CONFIG_EMBEDDED */
b0d623f7 156
6d2010ae
A
157zinfo_usage_store_t tasks_tkm_private;
158zinfo_usage_store_t tasks_tkm_shared;
159
4b17d6b6
A
160/* A container to accumulate statistics for expired tasks */
161expired_task_statistics_t dead_task_statistics;
162lck_spin_t dead_task_statistics_lock;
163
316670eb 164static ledger_template_t task_ledger_template = NULL;
4b17d6b6 165struct _task_ledger_indices task_ledgers = {-1, -1, -1, -1, -1, -1, -1};
316670eb
A
166void init_task_ledgers(void);
167
168
b0d623f7 169int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
1c79356b 170
6d2010ae
A
171/* externs for BSD kernel */
172extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
173
1c79356b
A
174/* Forwards */
175
176void task_hold_locked(
177 task_t task);
178void task_wait_locked(
316670eb
A
179 task_t task,
180 boolean_t until_not_runnable);
1c79356b
A
181void task_release_locked(
182 task_t task);
1c79356b
A
183void task_free(
184 task_t task );
185void task_synchronizer_destroy_all(
186 task_t task);
1c79356b 187
b0d623f7
A
188int check_for_tasksuspend(
189 task_t task);
190
55e303ae
A
191void
192task_backing_store_privileged(
193 task_t task)
194{
195 task_lock(task);
196 task->priv_flags |= VM_BACKING_STORE_PRIV;
197 task_unlock(task);
198 return;
199}
200
91447636
A
201
202void
203task_set_64bit(
204 task_t task,
205 boolean_t is64bit)
206{
b0d623f7 207#if defined(__i386__) || defined(__x86_64__)
2d21ac55
A
208 thread_t thread;
209#endif /* __i386__ */
210 int vm_flags = 0;
0c530ab8
A
211
212 if (is64bit) {
2d21ac55
A
213 if (task_has_64BitAddr(task))
214 return;
0c530ab8 215
91447636 216 task_set_64BitAddr(task);
91447636 217 } else {
2d21ac55
A
218 if ( !task_has_64BitAddr(task))
219 return;
0c530ab8 220
91447636
A
221 /*
222 * Deallocate all memory previously allocated
223 * above the 32-bit address space, since it won't
224 * be accessible anymore.
225 */
2d21ac55
A
226 /* remove regular VM map entries & pmap mappings */
227 (void) vm_map_remove(task->map,
228 (vm_map_offset_t) VM_MAX_ADDRESS,
229 MACH_VM_MAX_ADDRESS,
230 0);
2d21ac55 231 /* remove the higher VM mappings */
91447636 232 (void) vm_map_remove(task->map,
91447636 233 MACH_VM_MAX_ADDRESS,
2d21ac55
A
234 0xFFFFFFFFFFFFF000ULL,
235 vm_flags);
91447636 236 task_clear_64BitAddr(task);
91447636 237 }
0c530ab8
A
238 /* FIXME: On x86, the thread save state flavor can diverge from the
239 * task's 64-bit feature flag due to the 32-bit/64-bit register save
240 * state dichotomy. Since we can be pre-empted in this interval,
241 * certain routines may observe the thread as being in an inconsistent
242 * state with respect to its task's 64-bitness.
243 */
b0d623f7
A
244#if defined(__i386__) || defined(__x86_64__)
245 task_lock(task);
0c530ab8 246 queue_iterate(&task->threads, thread, thread_t, task_threads) {
b0d623f7 247 thread_mtx_lock(thread);
2d21ac55 248 machine_thread_switch_addrmode(thread);
b0d623f7 249 thread_mtx_unlock(thread);
0c530ab8 250 }
b0d623f7 251 task_unlock(task);
2d21ac55 252#endif /* __i386__ */
91447636
A
253}
254
b0d623f7
A
255
256void
257task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size)
258{
259 task_lock(task);
260 task->all_image_info_addr = addr;
261 task->all_image_info_size = size;
262 task_unlock(task);
263}
264
1c79356b
A
265void
266task_init(void)
267{
b0d623f7
A
268
269 lck_grp_attr_setdefault(&task_lck_grp_attr);
270 lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
271 lck_attr_setdefault(&task_lck_attr);
272 lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
316670eb
A
273#if CONFIG_EMBEDDED
274 lck_mtx_init(&task_watch_mtx, &task_lck_grp, &task_lck_attr);
275#endif /* CONFIG_EMBEDDED */
b0d623f7 276
1c79356b
A
277 task_zone = zinit(
278 sizeof(struct task),
b0d623f7 279 task_max * sizeof(struct task),
1c79356b
A
280 TASK_CHUNK * sizeof(struct task),
281 "tasks");
6d2010ae 282
0b4c1975 283 zone_change(task_zone, Z_NOENCRYPT, TRUE);
1c79356b 284
316670eb
A
285 init_task_ledgers();
286
1c79356b
A
287 /*
288 * Create the kernel task as the first task.
1c79356b 289 */
b0d623f7
A
290#ifdef __LP64__
291 if (task_create_internal(TASK_NULL, FALSE, TRUE, &kernel_task) != KERN_SUCCESS)
292#else
0c530ab8 293 if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
b0d623f7 294#endif
1c79356b 295 panic("task_init\n");
55e303ae 296
1c79356b
A
297 vm_map_deallocate(kernel_task->map);
298 kernel_task->map = kernel_map;
4b17d6b6 299 lck_spin_init(&dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
1c79356b
A
300}
301
1c79356b
A
302/*
303 * Create a task running in the kernel address space. It may
304 * have its own map of size mem_size and may have ipc privileges.
305 */
306kern_return_t
307kernel_task_create(
91447636
A
308 __unused task_t parent_task,
309 __unused vm_offset_t map_base,
310 __unused vm_size_t map_size,
311 __unused task_t *child_task)
1c79356b 312{
55e303ae 313 return (KERN_INVALID_ARGUMENT);
1c79356b
A
314}
315
316kern_return_t
317task_create(
2d21ac55 318 task_t parent_task,
91447636 319 __unused ledger_port_array_t ledger_ports,
2d21ac55
A
320 __unused mach_msg_type_number_t num_ledger_ports,
321 __unused boolean_t inherit_memory,
322 __unused task_t *child_task) /* OUT */
1c79356b
A
323{
324 if (parent_task == TASK_NULL)
325 return(KERN_INVALID_ARGUMENT);
326
2d21ac55
A
327 /*
328 * No longer supported: too many calls assume that a task has a valid
329 * process attached.
330 */
331 return(KERN_FAILURE);
1c79356b
A
332}
333
334kern_return_t
335host_security_create_task_token(
91447636 336 host_security_t host_security,
2d21ac55
A
337 task_t parent_task,
338 __unused security_token_t sec_token,
339 __unused audit_token_t audit_token,
340 __unused host_priv_t host_priv,
91447636
A
341 __unused ledger_port_array_t ledger_ports,
342 __unused mach_msg_type_number_t num_ledger_ports,
2d21ac55
A
343 __unused boolean_t inherit_memory,
344 __unused task_t *child_task) /* OUT */
1c79356b 345{
1c79356b
A
346 if (parent_task == TASK_NULL)
347 return(KERN_INVALID_ARGUMENT);
348
349 if (host_security == HOST_NULL)
350 return(KERN_INVALID_SECURITY);
351
2d21ac55
A
352 /*
353 * No longer supported.
354 */
355 return(KERN_FAILURE);
1c79356b
A
356}
357
316670eb
A
358void
359init_task_ledgers(void)
360{
361 ledger_template_t t;
362
363 assert(task_ledger_template == NULL);
364 assert(kernel_task == TASK_NULL);
365
366 if ((t = ledger_template_create("Per-task ledger")) == NULL)
367 panic("couldn't create task ledger template");
368
369 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
370 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
371 "physmem", "bytes");
372 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
373 "bytes");
374 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
375 "bytes");
376 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
377 "bytes");
4b17d6b6
A
378 task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
379 "count");
380 task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
381 "count");
316670eb
A
382
383 if ((task_ledgers.cpu_time < 0) || (task_ledgers.tkm_private < 0) ||
384 (task_ledgers.tkm_shared < 0) || (task_ledgers.phys_mem < 0) ||
4b17d6b6
A
385 (task_ledgers.wired_mem < 0) || (task_ledgers.platform_idle_wakeups < 0) ||
386 (task_ledgers.interrupt_wakeups < 0)) {
316670eb
A
387 panic("couldn't create entries for task ledger template");
388 }
389
390 task_ledger_template = t;
391}
392
1c79356b 393kern_return_t
55e303ae 394task_create_internal(
1c79356b
A
395 task_t parent_task,
396 boolean_t inherit_memory,
0c530ab8 397 boolean_t is_64bit,
1c79356b
A
398 task_t *child_task) /* OUT */
399{
2d21ac55
A
400 task_t new_task;
401 vm_shared_region_t shared_region;
316670eb 402 ledger_t ledger = NULL;
1c79356b
A
403
404 new_task = (task_t) zalloc(task_zone);
405
406 if (new_task == TASK_NULL)
407 return(KERN_RESOURCE_SHORTAGE);
408
409 /* one ref for just being alive; one for our caller */
410 new_task->ref_count = 2;
411
316670eb
A
412 /* allocate with active entries */
413 assert(task_ledger_template != NULL);
414 if ((ledger = ledger_instantiate(task_ledger_template,
415 LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
416 zfree(task_zone, new_task);
417 return(KERN_RESOURCE_SHORTAGE);
418 }
419 new_task->ledger = ledger;
420
b0d623f7 421 /* if inherit_memory is true, parent_task MUST not be NULL */
1c79356b 422 if (inherit_memory)
316670eb 423 new_task->map = vm_map_fork(ledger, parent_task->map);
1c79356b 424 else
316670eb
A
425 new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit),
426 (vm_map_offset_t)(VM_MIN_ADDRESS),
427 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
1c79356b 428
2d21ac55
A
429 /* Inherit memlock limit from parent */
430 if (parent_task)
b0d623f7 431 vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
2d21ac55 432
b0d623f7 433 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
55e303ae 434 queue_init(&new_task->threads);
1c79356b 435 new_task->suspend_count = 0;
55e303ae 436 new_task->thread_count = 0;
55e303ae 437 new_task->active_thread_count = 0;
1c79356b 438 new_task->user_stop_count = 0;
0b4e3aa0 439 new_task->role = TASK_UNSPECIFIED;
1c79356b 440 new_task->active = TRUE;
b0d623f7 441 new_task->halting = FALSE;
2d21ac55 442 new_task->user_data = NULL;
1c79356b
A
443 new_task->faults = 0;
444 new_task->cow_faults = 0;
445 new_task->pageins = 0;
446 new_task->messages_sent = 0;
447 new_task->messages_received = 0;
448 new_task->syscalls_mach = 0;
55e303ae 449 new_task->priv_flags = 0;
1c79356b 450 new_task->syscalls_unix=0;
2d21ac55 451 new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
55e303ae
A
452 new_task->taskFeatures[0] = 0; /* Init task features */
453 new_task->taskFeatures[1] = 0; /* Init task features */
1c79356b 454
6d2010ae
A
455 zinfo_task_init(new_task);
456
1c79356b 457#ifdef MACH_BSD
2d21ac55 458 new_task->bsd_info = NULL;
1c79356b
A
459#endif /* MACH_BSD */
460
b0d623f7 461#if defined(__i386__) || defined(__x86_64__)
0c530ab8 462 new_task->i386_ldt = 0;
b0d623f7 463 new_task->task_debug = NULL;
0c530ab8
A
464#endif
465
55e303ae 466
1c79356b
A
467 queue_init(&new_task->semaphore_list);
468 queue_init(&new_task->lock_set_list);
469 new_task->semaphores_owned = 0;
470 new_task->lock_sets_owned = 0;
471
2d21ac55 472#if CONFIG_MACF_MACH
2d21ac55
A
473 new_task->label = labelh_new(1);
474 mac_task_label_init (&new_task->maclabel);
475#endif
1c79356b
A
476
477 ipc_task_init(new_task, parent_task);
478
91447636
A
479 new_task->total_user_time = 0;
480 new_task->total_system_time = 0;
1c79356b 481
2d21ac55 482 new_task->vtimers = 0;
1c79356b 483
2d21ac55
A
484 new_task->shared_region = NULL;
485
486 new_task->affinity_space = NULL;
1c79356b 487
b0d623f7
A
488#if CONFIG_COUNTERS
489 new_task->t_chud = 0U;
490#endif
491
316670eb
A
492 new_task->pidsuspended = FALSE;
493 new_task->frozen = FALSE;
494 new_task->rusage_cpu_flags = 0;
495 new_task->rusage_cpu_percentage = 0;
496 new_task->rusage_cpu_interval = 0;
497 new_task->rusage_cpu_deadline = 0;
498 new_task->rusage_cpu_callt = NULL;
499 new_task->proc_terminate = 0;
500#if CONFIG_EMBEDDED
501 queue_init(&new_task->task_watchers);
502 new_task->appstate = TASK_APPSTATE_ACTIVE;
503 new_task->num_taskwatchers = 0;
504 new_task->watchapplying = 0;
505#endif /* CONFIG_EMBEDDED */
506
db609669
A
507 new_task->uexc_range_start = new_task->uexc_range_size = new_task->uexc_handler = 0;
508
2d21ac55 509 if (parent_task != TASK_NULL) {
1c79356b 510 new_task->sec_token = parent_task->sec_token;
55e303ae 511 new_task->audit_token = parent_task->audit_token;
1c79356b 512
2d21ac55
A
513 /* inherit the parent's shared region */
514 shared_region = vm_shared_region_get(parent_task);
515 vm_shared_region_set(new_task, shared_region);
1c79356b 516
91447636
A
517 if(task_has_64BitAddr(parent_task))
518 task_set_64BitAddr(new_task);
b0d623f7
A
519 new_task->all_image_info_addr = parent_task->all_image_info_addr;
520 new_task->all_image_info_size = parent_task->all_image_info_size;
0c530ab8 521
b0d623f7 522#if defined(__i386__) || defined(__x86_64__)
0c530ab8
A
523 if (inherit_memory && parent_task->i386_ldt)
524 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
525#endif
2d21ac55
A
526 if (inherit_memory && parent_task->affinity_space)
527 task_affinity_create(parent_task, new_task);
b0d623f7
A
528
529 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
6d2010ae
A
530 new_task->policystate = parent_task->policystate;
531 /* inherit the self action state */
316670eb 532 new_task->appliedstate = parent_task->appliedstate;
6d2010ae
A
533 new_task->ext_policystate = parent_task->ext_policystate;
534#if NOTYET
535 /* till the child lifecycle is cleared do not inherit external action */
316670eb 536 new_task->ext_appliedstate = parent_task->ext_appliedstate;
6d2010ae 537#else
316670eb 538 new_task->ext_appliedstate = default_task_null_policy;
6d2010ae 539#endif
1c79356b
A
540 }
541 else {
1c79356b 542 new_task->sec_token = KERNEL_SECURITY_TOKEN;
55e303ae 543 new_task->audit_token = KERNEL_AUDIT_TOKEN;
b0d623f7
A
544#ifdef __LP64__
545 if(is_64bit)
546 task_set_64BitAddr(new_task);
547#endif
6d2010ae
A
548 new_task->all_image_info_addr = (mach_vm_address_t)0;
549 new_task->all_image_info_size = (mach_vm_size_t)0;
b0d623f7
A
550
551 new_task->pset_hint = PROCESSOR_SET_NULL;
6d2010ae
A
552 new_task->policystate = default_task_proc_policy;
553 new_task->ext_policystate = default_task_proc_policy;
316670eb
A
554 new_task->appliedstate = default_task_null_policy;
555 new_task->ext_appliedstate = default_task_null_policy;
1c79356b
A
556 }
557
0b4e3aa0 558 if (kernel_task == TASK_NULL) {
55e303ae 559 new_task->priority = BASEPRI_KERNEL;
0b4e3aa0
A
560 new_task->max_priority = MAXPRI_KERNEL;
561 }
562 else {
563 new_task->priority = BASEPRI_DEFAULT;
564 new_task->max_priority = MAXPRI_USER;
565 }
6d2010ae
A
566
567 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
4b17d6b6 568 new_task->task_timer_wakeups_bin_1 = new_task->task_timer_wakeups_bin_2 = 0;
2d21ac55 569
b0d623f7 570 lck_mtx_lock(&tasks_threads_lock);
2d21ac55
A
571 queue_enter(&tasks, new_task, task_t, tasks);
572 tasks_count++;
b0d623f7 573 lck_mtx_unlock(&tasks_threads_lock);
1c79356b 574
55e303ae
A
575 if (vm_backing_store_low && parent_task != NULL)
576 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
1c79356b
A
577
578 ipc_task_enable(new_task);
579
1c79356b
A
580 *child_task = new_task;
581 return(KERN_SUCCESS);
582}
583
584/*
91447636 585 * task_deallocate:
1c79356b 586 *
91447636 587 * Drop a reference on a task.
1c79356b
A
588 */
589void
9bccf70c 590task_deallocate(
1c79356b
A
591 task_t task)
592{
4b17d6b6 593 ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
316670eb 594
9bccf70c
A
595 if (task == TASK_NULL)
596 return;
597
91447636 598 if (task_deallocate_internal(task) > 0)
9bccf70c 599 return;
1c79356b 600
6d2010ae
A
601 lck_mtx_lock(&tasks_threads_lock);
602 queue_remove(&terminated_tasks, task, task_t, tasks);
603 lck_mtx_unlock(&tasks_threads_lock);
604
316670eb
A
605 /*
606 * Give the machine dependent code a chance
607 * to perform cleanup before ripping apart
608 * the task.
609 */
610 machine_task_terminate(task);
611
9bccf70c
A
612 ipc_task_terminate(task);
613
2d21ac55
A
614 if (task->affinity_space)
615 task_affinity_deallocate(task);
616
1c79356b
A
617 vm_map_deallocate(task->map);
618 is_release(task->itk_space);
1c79356b 619
4b17d6b6
A
620 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
621 &interrupt_wakeups, &debit);
622 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
623 &platform_idle_wakeups, &debit);
624
625 /* Accumulate statistics for dead tasks */
626 lck_spin_lock(&dead_task_statistics_lock);
627 dead_task_statistics.total_user_time += task->total_user_time;
628 dead_task_statistics.total_system_time += task->total_system_time;
629
630 dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
631 dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
632
633 dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
634 dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
635
636 lck_spin_unlock(&dead_task_statistics_lock);
b0d623f7
A
637 lck_mtx_destroy(&task->lock, &task_lck_grp);
638
2d21ac55
A
639#if CONFIG_MACF_MACH
640 labelh_release(task->label);
641#endif
316670eb
A
642
643 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
644 &debit)) {
645 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
646 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
647 }
648 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
649 &debit)) {
650 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
651 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
652 }
653 ledger_dereference(task->ledger);
6d2010ae 654 zinfo_task_free(task);
91447636 655 zfree(task_zone, task);
1c79356b
A
656}
657
0c530ab8
A
658/*
659 * task_name_deallocate:
660 *
661 * Drop a reference on a task name.
662 */
663void
664task_name_deallocate(
665 task_name_t task_name)
666{
667 return(task_deallocate((task_t)task_name));
668}
669
670
1c79356b
A
671/*
672 * task_terminate:
673 *
674 * Terminate the specified task. See comments on thread_terminate
675 * (kern/thread.c) about problems with terminating the "current task."
676 */
677
678kern_return_t
679task_terminate(
680 task_t task)
681{
682 if (task == TASK_NULL)
91447636
A
683 return (KERN_INVALID_ARGUMENT);
684
1c79356b 685 if (task->bsd_info)
91447636
A
686 return (KERN_FAILURE);
687
1c79356b
A
688 return (task_terminate_internal(task));
689}
690
691kern_return_t
692task_terminate_internal(
91447636 693 task_t task)
1c79356b 694{
91447636
A
695 thread_t thread, self;
696 task_t self_task;
697 boolean_t interrupt_save;
1c79356b
A
698
699 assert(task != kernel_task);
700
91447636
A
701 self = current_thread();
702 self_task = self->task;
1c79356b
A
703
704 /*
705 * Get the task locked and make sure that we are not racing
706 * with someone else trying to terminate us.
707 */
91447636 708 if (task == self_task)
1c79356b 709 task_lock(task);
91447636
A
710 else
711 if (task < self_task) {
1c79356b 712 task_lock(task);
91447636
A
713 task_lock(self_task);
714 }
715 else {
716 task_lock(self_task);
1c79356b
A
717 task_lock(task);
718 }
719
6d2010ae 720 if (!task->active) {
1c79356b 721 /*
6d2010ae 722 * Task is already being terminated.
1c79356b
A
723 * Just return an error. If we are dying, this will
724 * just get us to our AST special handler and that
725 * will get us to finalize the termination of ourselves.
726 */
727 task_unlock(task);
91447636
A
728 if (self_task != task)
729 task_unlock(self_task);
730
731 return (KERN_FAILURE);
1c79356b 732 }
91447636
A
733
734 if (self_task != task)
735 task_unlock(self_task);
1c79356b 736
e7c99d92
A
737 /*
738 * Make sure the current thread does not get aborted out of
739 * the waits inside these operations.
740 */
9bccf70c 741 interrupt_save = thread_interrupt_level(THREAD_UNINT);
e7c99d92 742
1c79356b
A
743 /*
744 * Indicate that we want all the threads to stop executing
745 * at user space by holding the task (we would have held
746 * each thread independently in thread_terminate_internal -
747 * but this way we may be more likely to already find it
748 * held there). Mark the task inactive, and prevent
749 * further task operations via the task port.
750 */
751 task_hold_locked(task);
752 task->active = FALSE;
753 ipc_task_disable(task);
754
755 /*
91447636
A
756 * Terminate each thread in the task.
757 */
758 queue_iterate(&task->threads, thread, thread_t, task_threads) {
759 thread_terminate_internal(thread);
1c79356b 760 }
e7c99d92 761
316670eb
A
762 task_unlock(task);
763
764#if CONFIG_EMBEDDED
e7c99d92 765 /*
316670eb 766 * remove all task watchers
e7c99d92 767 */
316670eb
A
768 task_removewatchers(task);
769#endif /* CONFIG_EMBEDDED */
1c79356b
A
770
771 /*
772 * Destroy all synchronizers owned by the task.
773 */
774 task_synchronizer_destroy_all(task);
775
1c79356b
A
776 /*
777 * Destroy the IPC space, leaving just a reference for it.
778 */
316670eb 779 ipc_space_terminate(task->itk_space);
1c79356b 780
0c530ab8
A
781 if (vm_map_has_4GB_pagezero(task->map))
782 vm_map_clear_4GB_pagezero(task->map);
91447636 783
1c79356b
A
784 /*
785 * If the current thread is a member of the task
786 * being terminated, then the last reference to
787 * the task will not be dropped until the thread
788 * is finally reaped. To avoid incurring the
789 * expense of removing the address space regions
790 * at reap time, we do it explictly here.
791 */
2d21ac55
A
792 vm_map_remove(task->map,
793 task->map->min_offset,
794 task->map->max_offset,
795 VM_MAP_NO_FLAGS);
1c79356b 796
2d21ac55
A
797 /* release our shared region */
798 vm_shared_region_set(task, NULL);
9bccf70c 799
b0d623f7 800 lck_mtx_lock(&tasks_threads_lock);
2d21ac55 801 queue_remove(&tasks, task, task_t, tasks);
6d2010ae 802 queue_enter(&terminated_tasks, task, task_t, tasks);
2d21ac55 803 tasks_count--;
b0d623f7 804 lck_mtx_unlock(&tasks_threads_lock);
9bccf70c 805
1c79356b 806 /*
e7c99d92
A
807 * We no longer need to guard against being aborted, so restore
808 * the previous interruptible state.
809 */
9bccf70c 810 thread_interrupt_level(interrupt_save);
e7c99d92
A
811
812 /*
813 * Get rid of the task active reference on itself.
1c79356b 814 */
1c79356b
A
815 task_deallocate(task);
816
91447636 817 return (KERN_SUCCESS);
1c79356b
A
818}
819
820/*
b0d623f7 821 * task_start_halt:
91447636
A
822 *
823 * Shut the current task down (except for the current thread) in
824 * preparation for dramatic changes to the task (probably exec).
b0d623f7
A
825 * We hold the task and mark all other threads in the task for
826 * termination.
1c79356b
A
827 */
828kern_return_t
b0d623f7 829task_start_halt(
1c79356b
A
830 task_t task)
831{
91447636 832 thread_t thread, self;
1c79356b
A
833
834 assert(task != kernel_task);
835
91447636 836 self = current_thread();
1c79356b 837
91447636
A
838 if (task != self->task)
839 return (KERN_INVALID_ARGUMENT);
1c79356b
A
840
841 task_lock(task);
842
b0d623f7 843 if (task->halting || !task->active || !self->active) {
1c79356b
A
844 /*
845 * Task or current thread is already being terminated.
846 * Hurry up and return out of the current kernel context
847 * so that we run our AST special handler to terminate
848 * ourselves.
849 */
850 task_unlock(task);
91447636
A
851
852 return (KERN_FAILURE);
1c79356b
A
853 }
854
b0d623f7
A
855 task->halting = TRUE;
856
55e303ae 857 if (task->thread_count > 1) {
b0d623f7 858
1c79356b
A
859 /*
860 * Mark all the threads to keep them from starting any more
861 * user-level execution. The thread_terminate_internal code
862 * would do this on a thread by thread basis anyway, but this
863 * gives us a better chance of not having to wait there.
864 */
865 task_hold_locked(task);
866
867 /*
91447636 868 * Terminate all the other threads in the task.
1c79356b 869 */
91447636
A
870 queue_iterate(&task->threads, thread, thread_t, task_threads) {
871 if (thread != self)
872 thread_terminate_internal(thread);
1c79356b 873 }
91447636 874
1c79356b
A
875 task_release_locked(task);
876 }
b0d623f7
A
877 task_unlock(task);
878 return KERN_SUCCESS;
879}
880
881
882/*
883 * task_complete_halt:
884 *
885 * Complete task halt by waiting for threads to terminate, then clean
886 * up task resources (VM, port namespace, etc...) and then let the
887 * current thread go in the (practically empty) task context.
888 */
889void
890task_complete_halt(task_t task)
891{
892 task_lock(task);
893 assert(task->halting);
894 assert(task == current_task());
e7c99d92 895
b0d623f7
A
896 /*
897 * Wait for the other threads to get shut down.
898 * When the last other thread is reaped, we'll be
316670eb 899 * woken up.
b0d623f7
A
900 */
901 if (task->thread_count > 1) {
902 assert_wait((event_t)&task->halting, THREAD_UNINT);
903 task_unlock(task);
904 thread_block(THREAD_CONTINUE_NULL);
905 } else {
906 task_unlock(task);
907 }
1c79356b 908
316670eb
A
909 /*
910 * Give the machine dependent code a chance
911 * to perform cleanup of task-level resources
912 * associated with the current thread before
913 * ripping apart the task.
914 */
915 machine_task_terminate(task);
916
1c79356b
A
917 /*
918 * Destroy all synchronizers owned by the task.
919 */
920 task_synchronizer_destroy_all(task);
921
922 /*
9bccf70c
A
923 * Destroy the contents of the IPC space, leaving just
924 * a reference for it.
e7c99d92 925 */
55e303ae 926 ipc_space_clean(task->itk_space);
1c79356b
A
927
928 /*
929 * Clean out the address space, as we are going to be
930 * getting a new one.
931 */
91447636
A
932 vm_map_remove(task->map, task->map->min_offset,
933 task->map->max_offset, VM_MAP_NO_FLAGS);
1c79356b 934
b0d623f7 935 task->halting = FALSE;
1c79356b
A
936}
937
938/*
939 * task_hold_locked:
940 *
941 * Suspend execution of the specified task.
942 * This is a recursive-style suspension of the task, a count of
943 * suspends is maintained.
944 *
945 * CONDITIONS: the task is locked and active.
946 */
947void
948task_hold_locked(
91447636 949 register task_t task)
1c79356b 950{
91447636 951 register thread_t thread;
1c79356b
A
952
953 assert(task->active);
954
9bccf70c
A
955 if (task->suspend_count++ > 0)
956 return;
1c79356b
A
957
958 /*
91447636 959 * Iterate through all the threads and hold them.
1c79356b 960 */
91447636
A
961 queue_iterate(&task->threads, thread, thread_t, task_threads) {
962 thread_mtx_lock(thread);
963 thread_hold(thread);
964 thread_mtx_unlock(thread);
1c79356b
A
965 }
966}
967
968/*
969 * task_hold:
970 *
971 * Same as the internal routine above, except that is must lock
972 * and verify that the task is active. This differs from task_suspend
973 * in that it places a kernel hold on the task rather than just a
974 * user-level hold. This keeps users from over resuming and setting
975 * it running out from under the kernel.
976 *
977 * CONDITIONS: the caller holds a reference on the task
978 */
979kern_return_t
91447636
A
980task_hold(
981 register task_t task)
1c79356b 982{
1c79356b
A
983 if (task == TASK_NULL)
984 return (KERN_INVALID_ARGUMENT);
91447636 985
1c79356b 986 task_lock(task);
91447636 987
1c79356b
A
988 if (!task->active) {
989 task_unlock(task);
91447636 990
1c79356b
A
991 return (KERN_FAILURE);
992 }
1c79356b 993
91447636
A
994 task_hold_locked(task);
995 task_unlock(task);
996
997 return (KERN_SUCCESS);
1c79356b
A
998}
999
316670eb
A
1000kern_return_t
1001task_wait(
1002 task_t task,
1003 boolean_t until_not_runnable)
1004{
1005 if (task == TASK_NULL)
1006 return (KERN_INVALID_ARGUMENT);
1007
1008 task_lock(task);
1009
1010 if (!task->active) {
1011 task_unlock(task);
1012
1013 return (KERN_FAILURE);
1014 }
1015
1016 task_wait_locked(task, until_not_runnable);
1017 task_unlock(task);
1018
1019 return (KERN_SUCCESS);
1020}
1021
1c79356b 1022/*
91447636
A
1023 * task_wait_locked:
1024 *
1c79356b
A
1025 * Wait for all threads in task to stop.
1026 *
1027 * Conditions:
1028 * Called with task locked, active, and held.
1029 */
1030void
1031task_wait_locked(
316670eb
A
1032 register task_t task,
1033 boolean_t until_not_runnable)
1c79356b 1034{
91447636 1035 register thread_t thread, self;
1c79356b
A
1036
1037 assert(task->active);
1038 assert(task->suspend_count > 0);
1039
91447636
A
1040 self = current_thread();
1041
1c79356b 1042 /*
91447636 1043 * Iterate through all the threads and wait for them to
1c79356b
A
1044 * stop. Do not wait for the current thread if it is within
1045 * the task.
1046 */
91447636
A
1047 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1048 if (thread != self)
316670eb 1049 thread_wait(thread, until_not_runnable);
1c79356b
A
1050 }
1051}
1052
1053/*
1054 * task_release_locked:
1055 *
1056 * Release a kernel hold on a task.
1057 *
1058 * CONDITIONS: the task is locked and active
1059 */
1060void
1061task_release_locked(
91447636 1062 register task_t task)
1c79356b 1063{
91447636 1064 register thread_t thread;
1c79356b
A
1065
1066 assert(task->active);
9bccf70c 1067 assert(task->suspend_count > 0);
1c79356b 1068
9bccf70c
A
1069 if (--task->suspend_count > 0)
1070 return;
1c79356b 1071
91447636
A
1072 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1073 thread_mtx_lock(thread);
1074 thread_release(thread);
1075 thread_mtx_unlock(thread);
1c79356b
A
1076 }
1077}
1078
1079/*
1080 * task_release:
1081 *
1082 * Same as the internal routine above, except that it must lock
1083 * and verify that the task is active.
1084 *
1085 * CONDITIONS: The caller holds a reference to the task
1086 */
1087kern_return_t
91447636
A
1088task_release(
1089 task_t task)
1c79356b 1090{
1c79356b
A
1091 if (task == TASK_NULL)
1092 return (KERN_INVALID_ARGUMENT);
91447636 1093
1c79356b 1094 task_lock(task);
91447636 1095
1c79356b
A
1096 if (!task->active) {
1097 task_unlock(task);
91447636 1098
1c79356b
A
1099 return (KERN_FAILURE);
1100 }
1c79356b 1101
91447636
A
1102 task_release_locked(task);
1103 task_unlock(task);
1104
1105 return (KERN_SUCCESS);
1c79356b
A
1106}
1107
1108kern_return_t
1109task_threads(
91447636
A
1110 task_t task,
1111 thread_act_array_t *threads_out,
1c79356b
A
1112 mach_msg_type_number_t *count)
1113{
91447636 1114 mach_msg_type_number_t actual;
2d21ac55 1115 thread_t *thread_list;
91447636
A
1116 thread_t thread;
1117 vm_size_t size, size_needed;
1118 void *addr;
1119 unsigned int i, j;
1c79356b
A
1120
1121 if (task == TASK_NULL)
91447636 1122 return (KERN_INVALID_ARGUMENT);
1c79356b 1123
2d21ac55 1124 size = 0; addr = NULL;
1c79356b
A
1125
1126 for (;;) {
1127 task_lock(task);
1128 if (!task->active) {
1129 task_unlock(task);
91447636 1130
1c79356b
A
1131 if (size != 0)
1132 kfree(addr, size);
91447636
A
1133
1134 return (KERN_FAILURE);
1c79356b
A
1135 }
1136
55e303ae 1137 actual = task->thread_count;
1c79356b
A
1138
1139 /* do we have the memory we need? */
91447636 1140 size_needed = actual * sizeof (mach_port_t);
1c79356b
A
1141 if (size_needed <= size)
1142 break;
1143
1144 /* unlock the task and allocate more memory */
1145 task_unlock(task);
1146
1147 if (size != 0)
1148 kfree(addr, size);
1149
1150 assert(size_needed > 0);
1151 size = size_needed;
1152
1153 addr = kalloc(size);
1154 if (addr == 0)
91447636 1155 return (KERN_RESOURCE_SHORTAGE);
1c79356b
A
1156 }
1157
1158 /* OK, have memory and the task is locked & active */
2d21ac55 1159 thread_list = (thread_t *)addr;
91447636
A
1160
1161 i = j = 0;
1162
1163 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1164 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1165 thread_reference_internal(thread);
2d21ac55 1166 thread_list[j++] = thread;
1c79356b 1167 }
91447636
A
1168
1169 assert(queue_end(&task->threads, (queue_entry_t)thread));
1c79356b
A
1170
1171 actual = j;
91447636 1172 size_needed = actual * sizeof (mach_port_t);
1c79356b 1173
91447636 1174 /* can unlock task now that we've got the thread refs */
1c79356b
A
1175 task_unlock(task);
1176
1177 if (actual == 0) {
91447636 1178 /* no threads, so return null pointer and deallocate memory */
1c79356b 1179
2d21ac55 1180 *threads_out = NULL;
1c79356b
A
1181 *count = 0;
1182
1183 if (size != 0)
1184 kfree(addr, size);
91447636
A
1185 }
1186 else {
1c79356b
A
1187 /* if we allocated too much, must copy */
1188
1189 if (size_needed < size) {
91447636 1190 void *newaddr;
1c79356b
A
1191
1192 newaddr = kalloc(size_needed);
1193 if (newaddr == 0) {
91447636 1194 for (i = 0; i < actual; ++i)
2d21ac55 1195 thread_deallocate(thread_list[i]);
1c79356b 1196 kfree(addr, size);
91447636 1197 return (KERN_RESOURCE_SHORTAGE);
1c79356b
A
1198 }
1199
91447636 1200 bcopy(addr, newaddr, size_needed);
1c79356b 1201 kfree(addr, size);
2d21ac55 1202 thread_list = (thread_t *)newaddr;
1c79356b
A
1203 }
1204
2d21ac55 1205 *threads_out = thread_list;
1c79356b
A
1206 *count = actual;
1207
1208 /* do the conversion that Mig should handle */
1209
91447636 1210 for (i = 0; i < actual; ++i)
2d21ac55 1211 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1c79356b
A
1212 }
1213
91447636 1214 return (KERN_SUCCESS);
1c79356b
A
1215}
1216
316670eb
A
1217static kern_return_t
1218place_task_hold (
1c79356b 1219 register task_t task)
316670eb 1220{
1c79356b 1221 if (!task->active) {
1c79356b
A
1222 return (KERN_FAILURE);
1223 }
91447636
A
1224
1225 if (task->user_stop_count++ > 0) {
1c79356b
A
1226 /*
1227 * If the stop count was positive, the task is
1228 * already stopped and we can exit.
1229 */
1c79356b
A
1230 return (KERN_SUCCESS);
1231 }
1232
1233 /*
1234 * Put a kernel-level hold on the threads in the task (all
1235 * user-level task suspensions added together represent a
1236 * single kernel-level hold). We then wait for the threads
1237 * to stop executing user code.
1238 */
1239 task_hold_locked(task);
316670eb
A
1240 task_wait_locked(task, TRUE);
1241
1242 return (KERN_SUCCESS);
1243}
1244
1245static kern_return_t
1246release_task_hold (
1247 register task_t task,
1248 boolean_t pidresume)
1249{
1250 register boolean_t release = FALSE;
1251
1252 if (!task->active) {
1253 return (KERN_FAILURE);
1254 }
1255
1256 if (pidresume) {
1257 if (task->pidsuspended == FALSE) {
1258 return (KERN_FAILURE);
1259 }
1260 task->pidsuspended = FALSE;
1261 }
1262
1263 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
1264 if (--task->user_stop_count == 0) {
1265 release = TRUE;
1266 }
1267 }
1268 else {
1269 return (KERN_FAILURE);
1270 }
1271
1272 /*
1273 * Release the task if necessary.
1274 */
1275 if (release)
1276 task_release_locked(task);
1277
1278 return (KERN_SUCCESS);
1279}
1280
1281/*
1282 * task_suspend:
1283 *
1284 * Implement a user-level suspension on a task.
1285 *
1286 * Conditions:
1287 * The caller holds a reference to the task
1288 */
1289kern_return_t
1290task_suspend(
1291 register task_t task)
1292{
1293 kern_return_t kr;
1294
1295 if (task == TASK_NULL || task == kernel_task)
1296 return (KERN_INVALID_ARGUMENT);
1297
1298 task_lock(task);
1299
1300 kr = place_task_hold(task);
91447636 1301
1c79356b 1302 task_unlock(task);
91447636 1303
316670eb 1304 return (kr);
1c79356b
A
1305}
1306
1307/*
91447636 1308 * task_resume:
1c79356b
A
1309 * Release a kernel hold on a task.
1310 *
1311 * Conditions:
1312 * The caller holds a reference to the task
1313 */
1314kern_return_t
91447636
A
1315task_resume(
1316 register task_t task)
1c79356b 1317{
316670eb 1318 kern_return_t kr;
1c79356b 1319
91447636
A
1320 if (task == TASK_NULL || task == kernel_task)
1321 return (KERN_INVALID_ARGUMENT);
1c79356b 1322
1c79356b 1323 task_lock(task);
91447636 1324
316670eb 1325 kr = release_task_hold(task, FALSE);
91447636 1326
316670eb 1327 task_unlock(task);
91447636 1328
316670eb
A
1329 return (kr);
1330}
1331
1332kern_return_t
1333task_pidsuspend_locked(task_t task)
1334{
1335 kern_return_t kr;
1336
1337 if (task->pidsuspended) {
1338 kr = KERN_FAILURE;
1339 goto out;
1c79356b 1340 }
91447636 1341
316670eb
A
1342 task->pidsuspended = TRUE;
1343
1344 kr = place_task_hold(task);
1345 if (kr != KERN_SUCCESS) {
1346 task->pidsuspended = FALSE;
1c79356b 1347 }
316670eb
A
1348out:
1349 return(kr);
1350}
1c79356b 1351
316670eb
A
1352
1353/*
1354 * task_pidsuspend:
1355 *
1356 * Suspends a task by placing a hold on its threads.
1357 *
1358 * Conditions:
1359 * The caller holds a reference to the task
1360 */
1361kern_return_t
1362task_pidsuspend(
1363 register task_t task)
1364{
1365 kern_return_t kr;
1366
1367 if (task == TASK_NULL || task == kernel_task)
1368 return (KERN_INVALID_ARGUMENT);
1369
1370 task_lock(task);
1371
1372 kr = task_pidsuspend_locked(task);
1c79356b
A
1373
1374 task_unlock(task);
91447636 1375
316670eb
A
1376 return (kr);
1377}
1378
1379/* If enabled, we bring all the frozen pages back in prior to resumption; otherwise, they're faulted back in on demand */
1380#define THAW_ON_RESUME 1
1381
1382/*
1383 * task_pidresume:
1384 * Resumes a previously suspended task.
1385 *
1386 * Conditions:
1387 * The caller holds a reference to the task
1388 */
1389kern_return_t
1390task_pidresume(
1391 register task_t task)
1392{
1393 kern_return_t kr;
1394#if (CONFIG_FREEZE && THAW_ON_RESUME)
1395 boolean_t frozen;
1396#endif
1397
1398 if (task == TASK_NULL || task == kernel_task)
1399 return (KERN_INVALID_ARGUMENT);
1400
1401 task_lock(task);
1402
1403#if (CONFIG_FREEZE && THAW_ON_RESUME)
1404 frozen = task->frozen;
1405 task->frozen = FALSE;
1406#endif
1407
1408 kr = release_task_hold(task, TRUE);
1409
1410 task_unlock(task);
1411
1412#if (CONFIG_FREEZE && THAW_ON_RESUME)
1413 if ((kr == KERN_SUCCESS) && (frozen == TRUE)) {
1414 kr = vm_map_thaw(task->map);
1415 }
1416#endif
1417
1418 return (kr);
1c79356b
A
1419}
1420
6d2010ae
A
1421#if CONFIG_FREEZE
1422
1423/*
1424 * task_freeze:
1425 *
316670eb 1426 * Freeze a task.
6d2010ae
A
1427 *
1428 * Conditions:
1429 * The caller holds a reference to the task
1430 */
1431kern_return_t
1432task_freeze(
1433 register task_t task,
1434 uint32_t *purgeable_count,
1435 uint32_t *wired_count,
1436 uint32_t *clean_count,
1437 uint32_t *dirty_count,
316670eb 1438 uint32_t dirty_budget,
6d2010ae
A
1439 boolean_t *shared,
1440 boolean_t walk_only)
1441{
316670eb
A
1442 kern_return_t kr;
1443
6d2010ae
A
1444 if (task == TASK_NULL || task == kernel_task)
1445 return (KERN_INVALID_ARGUMENT);
1446
316670eb
A
1447 task_lock(task);
1448
1449 if (task->frozen) {
1450 task_unlock(task);
1451 return (KERN_FAILURE);
1452 }
1453
1454 if (walk_only == FALSE) {
1455 task->frozen = TRUE;
1456 }
1457
1458 task_unlock(task);
1459
6d2010ae 1460 if (walk_only) {
316670eb 1461 kr = vm_map_freeze_walk(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
6d2010ae 1462 } else {
316670eb 1463 kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
6d2010ae
A
1464 }
1465
316670eb 1466 return (kr);
6d2010ae
A
1467}
1468
1469/*
1470 * task_thaw:
1471 *
1472 * Thaw a currently frozen task.
1473 *
1474 * Conditions:
1475 * The caller holds a reference to the task
1476 */
1477kern_return_t
1478task_thaw(
1479 register task_t task)
1480{
316670eb
A
1481 kern_return_t kr;
1482
6d2010ae
A
1483 if (task == TASK_NULL || task == kernel_task)
1484 return (KERN_INVALID_ARGUMENT);
1485
316670eb
A
1486 task_lock(task);
1487
1488 if (!task->frozen) {
1489 task_unlock(task);
1490 return (KERN_FAILURE);
1491 }
1492
1493 task->frozen = FALSE;
6d2010ae 1494
316670eb
A
1495 task_unlock(task);
1496
1497 kr = vm_map_thaw(task->map);
1498
1499 return (kr);
6d2010ae
A
1500}
1501
1502#endif /* CONFIG_FREEZE */
1503
1c79356b
A
1504kern_return_t
1505host_security_set_task_token(
1506 host_security_t host_security,
1507 task_t task,
1508 security_token_t sec_token,
55e303ae 1509 audit_token_t audit_token,
1c79356b
A
1510 host_priv_t host_priv)
1511{
55e303ae 1512 ipc_port_t host_port;
1c79356b
A
1513 kern_return_t kr;
1514
1515 if (task == TASK_NULL)
1516 return(KERN_INVALID_ARGUMENT);
1517
1518 if (host_security == HOST_NULL)
1519 return(KERN_INVALID_SECURITY);
1520
1521 task_lock(task);
1522 task->sec_token = sec_token;
55e303ae 1523 task->audit_token = audit_token;
1c79356b
A
1524 task_unlock(task);
1525
1526 if (host_priv != HOST_PRIV_NULL) {
55e303ae 1527 kr = host_get_host_priv_port(host_priv, &host_port);
1c79356b 1528 } else {
55e303ae 1529 kr = host_get_host_port(host_priv_self(), &host_port);
1c79356b 1530 }
55e303ae
A
1531 assert(kr == KERN_SUCCESS);
1532 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1c79356b
A
1533 return(kr);
1534}
1535
1c79356b
A
1536/*
1537 * This routine was added, pretty much exclusively, for registering the
1538 * RPC glue vector for in-kernel short circuited tasks. Rather than
1539 * removing it completely, I have only disabled that feature (which was
1540 * the only feature at the time). It just appears that we are going to
1541 * want to add some user data to tasks in the future (i.e. bsd info,
1542 * task names, etc...), so I left it in the formal task interface.
1543 */
1544kern_return_t
1545task_set_info(
1546 task_t task,
1547 task_flavor_t flavor,
91447636
A
1548 __unused task_info_t task_info_in, /* pointer to IN array */
1549 __unused mach_msg_type_number_t task_info_count)
1c79356b 1550{
1c79356b
A
1551 if (task == TASK_NULL)
1552 return(KERN_INVALID_ARGUMENT);
1553
1554 switch (flavor) {
1555 default:
1556 return (KERN_INVALID_ARGUMENT);
1557 }
1558 return (KERN_SUCCESS);
1559}
1560
1561kern_return_t
1562task_info(
91447636
A
1563 task_t task,
1564 task_flavor_t flavor,
1565 task_info_t task_info_out,
1c79356b
A
1566 mach_msg_type_number_t *task_info_count)
1567{
b0d623f7
A
1568 kern_return_t error = KERN_SUCCESS;
1569
1c79356b 1570 if (task == TASK_NULL)
91447636 1571 return (KERN_INVALID_ARGUMENT);
1c79356b 1572
b0d623f7
A
1573 task_lock(task);
1574
1575 if ((task != current_task()) && (!task->active)) {
1576 task_unlock(task);
1577 return (KERN_INVALID_ARGUMENT);
1578 }
1579
1c79356b
A
1580 switch (flavor) {
1581
91447636 1582 case TASK_BASIC_INFO_32:
2d21ac55 1583 case TASK_BASIC2_INFO_32:
91447636
A
1584 {
1585 task_basic_info_32_t basic_info;
b0d623f7
A
1586 vm_map_t map;
1587 clock_sec_t secs;
1588 clock_usec_t usecs;
1c79356b 1589
b0d623f7
A
1590 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
1591 error = KERN_INVALID_ARGUMENT;
1592 break;
1593 }
1c79356b 1594
91447636 1595 basic_info = (task_basic_info_32_t)task_info_out;
1c79356b 1596
91447636 1597 map = (task == kernel_task)? kernel_map: task->map;
b0d623f7 1598 basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
2d21ac55
A
1599 if (flavor == TASK_BASIC2_INFO_32) {
1600 /*
1601 * The "BASIC2" flavor gets the maximum resident
1602 * size instead of the current resident size...
1603 */
1604 basic_info->resident_size = pmap_resident_max(map->pmap);
1605 } else {
1606 basic_info->resident_size = pmap_resident_count(map->pmap);
1607 }
1608 basic_info->resident_size *= PAGE_SIZE;
1c79356b 1609
0b4e3aa0
A
1610 basic_info->policy = ((task != kernel_task)?
1611 POLICY_TIMESHARE: POLICY_RR);
1c79356b 1612 basic_info->suspend_count = task->user_stop_count;
91447636 1613
b0d623f7
A
1614 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1615 basic_info->user_time.seconds =
1616 (typeof(basic_info->user_time.seconds))secs;
1617 basic_info->user_time.microseconds = usecs;
1618
1619 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1620 basic_info->system_time.seconds =
1621 (typeof(basic_info->system_time.seconds))secs;
1622 basic_info->system_time.microseconds = usecs;
1c79356b 1623
91447636 1624 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1c79356b 1625 break;
91447636 1626 }
1c79356b 1627
91447636
A
1628 case TASK_BASIC_INFO_64:
1629 {
1630 task_basic_info_64_t basic_info;
b0d623f7
A
1631 vm_map_t map;
1632 clock_sec_t secs;
1633 clock_usec_t usecs;
1c79356b 1634
b0d623f7
A
1635 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
1636 error = KERN_INVALID_ARGUMENT;
1637 break;
1638 }
91447636
A
1639
1640 basic_info = (task_basic_info_64_t)task_info_out;
1641
1642 map = (task == kernel_task)? kernel_map: task->map;
1643 basic_info->virtual_size = map->size;
2d21ac55
A
1644 basic_info->resident_size =
1645 (mach_vm_size_t)(pmap_resident_count(map->pmap))
1646 * PAGE_SIZE_64;
91447636 1647
91447636
A
1648 basic_info->policy = ((task != kernel_task)?
1649 POLICY_TIMESHARE: POLICY_RR);
1650 basic_info->suspend_count = task->user_stop_count;
1651
b0d623f7
A
1652 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1653 basic_info->user_time.seconds =
1654 (typeof(basic_info->user_time.seconds))secs;
1655 basic_info->user_time.microseconds = usecs;
1656
1657 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1658 basic_info->system_time.seconds =
1659 (typeof(basic_info->system_time.seconds))secs;
1660 basic_info->system_time.microseconds = usecs;
91447636
A
1661
1662 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1663 break;
1664 }
1665
316670eb
A
1666 case MACH_TASK_BASIC_INFO:
1667 {
1668 mach_task_basic_info_t basic_info;
1669 vm_map_t map;
1670 clock_sec_t secs;
1671 clock_usec_t usecs;
1672
1673 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
1674 error = KERN_INVALID_ARGUMENT;
1675 break;
1676 }
1677
1678 basic_info = (mach_task_basic_info_t)task_info_out;
1679
1680 map = (task == kernel_task) ? kernel_map : task->map;
1681
1682 basic_info->virtual_size = map->size;
1683
1684 basic_info->resident_size =
1685 (mach_vm_size_t)(pmap_resident_count(map->pmap));
1686 basic_info->resident_size *= PAGE_SIZE_64;
1687
1688 basic_info->resident_size_max =
1689 (mach_vm_size_t)(pmap_resident_max(map->pmap));
1690 basic_info->resident_size_max *= PAGE_SIZE_64;
1691
1692 basic_info->policy = ((task != kernel_task) ?
1693 POLICY_TIMESHARE : POLICY_RR);
1694
1695 basic_info->suspend_count = task->user_stop_count;
1696
1697 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1698 basic_info->user_time.seconds =
1699 (typeof(basic_info->user_time.seconds))secs;
1700 basic_info->user_time.microseconds = usecs;
1701
1702 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1703 basic_info->system_time.seconds =
1704 (typeof(basic_info->system_time.seconds))secs;
1705 basic_info->system_time.microseconds = usecs;
1706
1707 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1708 break;
1709 }
1710
91447636
A
1711 case TASK_THREAD_TIMES_INFO:
1712 {
1713 register task_thread_times_info_t times_info;
1714 register thread_t thread;
1715
b0d623f7
A
1716 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1717 error = KERN_INVALID_ARGUMENT;
1718 break;
1719 }
1c79356b
A
1720
1721 times_info = (task_thread_times_info_t) task_info_out;
1722 times_info->user_time.seconds = 0;
1723 times_info->user_time.microseconds = 0;
1724 times_info->system_time.seconds = 0;
1725 times_info->system_time.microseconds = 0;
1726
1c79356b 1727
91447636
A
1728 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1729 time_value_t user_time, system_time;
1c79356b
A
1730
1731 thread_read_times(thread, &user_time, &system_time);
1732
1c79356b
A
1733 time_value_add(&times_info->user_time, &user_time);
1734 time_value_add(&times_info->system_time, &system_time);
1735 }
91447636 1736
1c79356b
A
1737
1738 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1739 break;
91447636
A
1740 }
1741
1742 case TASK_ABSOLUTETIME_INFO:
1743 {
1744 task_absolutetime_info_t info;
1745 register thread_t thread;
1746
b0d623f7
A
1747 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
1748 error = KERN_INVALID_ARGUMENT;
1749 break;
1750 }
91447636
A
1751
1752 info = (task_absolutetime_info_t)task_info_out;
1753 info->threads_user = info->threads_system = 0;
1754
91447636
A
1755
1756 info->total_user = task->total_user_time;
1757 info->total_system = task->total_system_time;
1758
1759 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1760 uint64_t tval;
316670eb
A
1761 spl_t x;
1762
1763 x = splsched();
1764 thread_lock(thread);
91447636
A
1765
1766 tval = timer_grab(&thread->user_timer);
1767 info->threads_user += tval;
1768 info->total_user += tval;
1769
1770 tval = timer_grab(&thread->system_timer);
316670eb
A
1771 if (thread->precise_user_kernel_time) {
1772 info->threads_system += tval;
1773 info->total_system += tval;
1774 } else {
1775 /* system_timer may represent either sys or user */
1776 info->threads_user += tval;
1777 info->total_user += tval;
1778 }
1779
1780 thread_unlock(thread);
1781 splx(x);
91447636
A
1782 }
1783
91447636
A
1784
1785 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1786 break;
1787 }
1c79356b 1788
b0d623f7
A
1789 case TASK_DYLD_INFO:
1790 {
1791 task_dyld_info_t info;
1792
6d2010ae
A
1793 /*
1794 * We added the format field to TASK_DYLD_INFO output. For
1795 * temporary backward compatibility, accept the fact that
1796 * clients may ask for the old version - distinquished by the
1797 * size of the expected result structure.
1798 */
1799#define TASK_LEGACY_DYLD_INFO_COUNT \
1800 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
1801
1802 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
b0d623f7
A
1803 error = KERN_INVALID_ARGUMENT;
1804 break;
1805 }
6d2010ae 1806
b0d623f7
A
1807 info = (task_dyld_info_t)task_info_out;
1808 info->all_image_info_addr = task->all_image_info_addr;
1809 info->all_image_info_size = task->all_image_info_size;
6d2010ae
A
1810
1811 /* only set format on output for those expecting it */
1812 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
1813 info->all_image_info_format = task_has_64BitAddr(task) ?
1814 TASK_DYLD_ALL_IMAGE_INFO_64 :
1815 TASK_DYLD_ALL_IMAGE_INFO_32 ;
1816 *task_info_count = TASK_DYLD_INFO_COUNT;
1817 } else {
1818 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
1819 }
b0d623f7
A
1820 break;
1821 }
1822
6d2010ae
A
1823 case TASK_EXTMOD_INFO:
1824 {
1825 task_extmod_info_t info;
1826 void *p;
1827
1828 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
1829 error = KERN_INVALID_ARGUMENT;
1830 break;
1831 }
1832
1833 info = (task_extmod_info_t)task_info_out;
1834
1835 p = get_bsdtask_info(task);
1836 if (p) {
1837 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
1838 } else {
1839 bzero(info->task_uuid, sizeof(info->task_uuid));
1840 }
1841 info->extmod_statistics = task->extmod_statistics;
1842 *task_info_count = TASK_EXTMOD_INFO_COUNT;
1843
1844 break;
1845 }
1846
1847 case TASK_KERNELMEMORY_INFO:
1848 {
1849 task_kernelmemory_info_t tkm_info;
316670eb 1850 ledger_amount_t credit, debit;
6d2010ae
A
1851
1852 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
1853 error = KERN_INVALID_ARGUMENT;
1854 break;
1855 }
1856
1857 tkm_info = (task_kernelmemory_info_t) task_info_out;
316670eb
A
1858 tkm_info->total_palloc = 0;
1859 tkm_info->total_pfree = 0;
1860 tkm_info->total_salloc = 0;
1861 tkm_info->total_sfree = 0;
6d2010ae
A
1862
1863 if (task == kernel_task) {
1864 /*
1865 * All shared allocs/frees from other tasks count against
1866 * the kernel private memory usage. If we are looking up
1867 * info for the kernel task, gather from everywhere.
1868 */
1869 task_unlock(task);
1870
1871 /* start by accounting for all the terminated tasks against the kernel */
1872 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
1873 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
6d2010ae
A
1874
1875 /* count all other task/thread shared alloc/free against the kernel */
1876 lck_mtx_lock(&tasks_threads_lock);
316670eb
A
1877
1878 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
6d2010ae
A
1879 queue_iterate(&tasks, task, task_t, tasks) {
1880 if (task == kernel_task) {
316670eb
A
1881 if (ledger_get_entries(task->ledger,
1882 task_ledgers.tkm_private, &credit,
1883 &debit) == KERN_SUCCESS) {
1884 tkm_info->total_palloc += credit;
1885 tkm_info->total_pfree += debit;
1886 }
6d2010ae 1887 }
316670eb
A
1888 if (!ledger_get_entries(task->ledger,
1889 task_ledgers.tkm_shared, &credit, &debit)) {
1890 tkm_info->total_palloc += credit;
1891 tkm_info->total_pfree += debit;
6d2010ae 1892 }
6d2010ae
A
1893 }
1894 lck_mtx_unlock(&tasks_threads_lock);
1895 } else {
316670eb
A
1896 if (!ledger_get_entries(task->ledger,
1897 task_ledgers.tkm_private, &credit, &debit)) {
1898 tkm_info->total_palloc = credit;
1899 tkm_info->total_pfree = debit;
1900 }
1901 if (!ledger_get_entries(task->ledger,
1902 task_ledgers.tkm_shared, &credit, &debit)) {
1903 tkm_info->total_salloc = credit;
1904 tkm_info->total_sfree = debit;
6d2010ae
A
1905 }
1906 task_unlock(task);
1907 }
1908
1909 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
1910 return KERN_SUCCESS;
1911 }
1912
91447636
A
1913 /* OBSOLETE */
1914 case TASK_SCHED_FIFO_INFO:
1915 {
1c79356b 1916
b0d623f7
A
1917 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
1918 error = KERN_INVALID_ARGUMENT;
1919 break;
1920 }
1c79356b 1921
b0d623f7 1922 error = KERN_INVALID_POLICY;
6d2010ae 1923 break;
91447636 1924 }
1c79356b 1925
91447636
A
1926 /* OBSOLETE */
1927 case TASK_SCHED_RR_INFO:
1928 {
1c79356b 1929 register policy_rr_base_t rr_base;
6d2010ae
A
1930 uint32_t quantum_time;
1931 uint64_t quantum_ns;
1c79356b 1932
b0d623f7
A
1933 if (*task_info_count < POLICY_RR_BASE_COUNT) {
1934 error = KERN_INVALID_ARGUMENT;
1935 break;
1936 }
1c79356b
A
1937
1938 rr_base = (policy_rr_base_t) task_info_out;
1939
0b4e3aa0 1940 if (task != kernel_task) {
b0d623f7
A
1941 error = KERN_INVALID_POLICY;
1942 break;
1c79356b
A
1943 }
1944
1945 rr_base->base_priority = task->priority;
1c79356b 1946
6d2010ae
A
1947 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
1948 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
1949
1950 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
1c79356b
A
1951
1952 *task_info_count = POLICY_RR_BASE_COUNT;
1953 break;
91447636 1954 }
1c79356b 1955
91447636
A
1956 /* OBSOLETE */
1957 case TASK_SCHED_TIMESHARE_INFO:
1958 {
1c79356b
A
1959 register policy_timeshare_base_t ts_base;
1960
b0d623f7
A
1961 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
1962 error = KERN_INVALID_ARGUMENT;
1963 break;
1964 }
1c79356b
A
1965
1966 ts_base = (policy_timeshare_base_t) task_info_out;
1967
0b4e3aa0 1968 if (task == kernel_task) {
b0d623f7
A
1969 error = KERN_INVALID_POLICY;
1970 break;
1c79356b
A
1971 }
1972
1973 ts_base->base_priority = task->priority;
1c79356b
A
1974
1975 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1976 break;
91447636 1977 }
1c79356b 1978
91447636
A
1979 case TASK_SECURITY_TOKEN:
1980 {
1981 register security_token_t *sec_token_p;
1c79356b 1982
b0d623f7
A
1983 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1984 error = KERN_INVALID_ARGUMENT;
1985 break;
1986 }
1c79356b
A
1987
1988 sec_token_p = (security_token_t *) task_info_out;
1989
1c79356b 1990 *sec_token_p = task->sec_token;
1c79356b
A
1991
1992 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
91447636
A
1993 break;
1994 }
1c79356b 1995
91447636
A
1996 case TASK_AUDIT_TOKEN:
1997 {
1998 register audit_token_t *audit_token_p;
55e303ae 1999
b0d623f7
A
2000 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
2001 error = KERN_INVALID_ARGUMENT;
2002 break;
2003 }
55e303ae
A
2004
2005 audit_token_p = (audit_token_t *) task_info_out;
2006
55e303ae 2007 *audit_token_p = task->audit_token;
55e303ae
A
2008
2009 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
91447636
A
2010 break;
2011 }
55e303ae 2012
91447636 2013 case TASK_SCHED_INFO:
b0d623f7 2014 error = KERN_INVALID_ARGUMENT;
6d2010ae 2015 break;
1c79356b 2016
91447636
A
2017 case TASK_EVENTS_INFO:
2018 {
1c79356b 2019 register task_events_info_t events_info;
2d21ac55 2020 register thread_t thread;
1c79356b 2021
b0d623f7
A
2022 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
2023 error = KERN_INVALID_ARGUMENT;
2024 break;
2025 }
1c79356b
A
2026
2027 events_info = (task_events_info_t) task_info_out;
2028
2d21ac55 2029
1c79356b
A
2030 events_info->faults = task->faults;
2031 events_info->pageins = task->pageins;
2032 events_info->cow_faults = task->cow_faults;
2033 events_info->messages_sent = task->messages_sent;
2034 events_info->messages_received = task->messages_received;
2035 events_info->syscalls_mach = task->syscalls_mach;
2036 events_info->syscalls_unix = task->syscalls_unix;
2d21ac55
A
2037
2038 events_info->csw = task->c_switch;
2039
2040 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6d2010ae
A
2041 events_info->csw += thread->c_switch;
2042 events_info->syscalls_mach += thread->syscalls_mach;
2043 events_info->syscalls_unix += thread->syscalls_unix;
2d21ac55
A
2044 }
2045
1c79356b
A
2046
2047 *task_info_count = TASK_EVENTS_INFO_COUNT;
2048 break;
91447636 2049 }
2d21ac55
A
2050 case TASK_AFFINITY_TAG_INFO:
2051 {
b0d623f7
A
2052 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
2053 error = KERN_INVALID_ARGUMENT;
2054 break;
2055 }
2d21ac55 2056
b0d623f7 2057 error = task_affinity_info(task, task_info_out, task_info_count);
6d2010ae 2058 break;
2d21ac55 2059 }
4b17d6b6
A
2060
2061 case TASK_POWER_INFO:
2062 {
2063 task_power_info_t info;
2064 thread_t thread;
2065 ledger_amount_t tmp;
2066
2067 if (*task_info_count < TASK_POWER_INFO_COUNT) {
2068 error = KERN_INVALID_ARGUMENT;
2069 break;
2070 }
2071
2072 info = (task_power_info_t)task_info_out;
2073
2074 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
2075 (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
2076 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
2077 (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
2078
2079 info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
2080 info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
2081
2082 info->total_user = task->total_user_time;
2083 info->total_system = task->total_system_time;
2084
2085 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2086 uint64_t tval;
2087 spl_t x;
2088
2089 if ((task == kernel_task) && (thread->priority == IDLEPRI) && (thread->sched_pri == IDLEPRI))
2090 continue;
2091 x = splsched();
2092 thread_lock(thread);
2093
2094 info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
2095 info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
2096
2097 tval = timer_grab(&thread->user_timer);
2098 info->total_user += tval;
2099
2100 tval = timer_grab(&thread->system_timer);
2101 if (thread->precise_user_kernel_time) {
2102 info->total_system += tval;
2103 } else {
2104 /* system_timer may represent either sys or user */
2105 info->total_user += tval;
2106 }
2107
2108 thread_unlock(thread);
2109 splx(x);
2110 }
2111 break;
2112 }
2113
91447636 2114 default:
b0d623f7 2115 error = KERN_INVALID_ARGUMENT;
1c79356b
A
2116 }
2117
b0d623f7
A
2118 task_unlock(task);
2119 return (error);
1c79356b
A
2120}
2121
2d21ac55
A
2122void
2123task_vtimer_set(
2124 task_t task,
2125 integer_t which)
2126{
2127 thread_t thread;
316670eb 2128 spl_t x;
2d21ac55
A
2129
2130 /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
2131
2132 task_lock(task);
2133
2134 task->vtimers |= which;
2135
2136 switch (which) {
2137
2138 case TASK_VTIMER_USER:
2139 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
2140 x = splsched();
2141 thread_lock(thread);
2142 if (thread->precise_user_kernel_time)
2143 thread->vtimer_user_save = timer_grab(&thread->user_timer);
2144 else
2145 thread->vtimer_user_save = timer_grab(&thread->system_timer);
2146 thread_unlock(thread);
2147 splx(x);
2d21ac55
A
2148 }
2149 break;
2150
2151 case TASK_VTIMER_PROF:
2152 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
2153 x = splsched();
2154 thread_lock(thread);
2d21ac55
A
2155 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
2156 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
316670eb
A
2157 thread_unlock(thread);
2158 splx(x);
2d21ac55
A
2159 }
2160 break;
2161
2162 case TASK_VTIMER_RLIM:
2163 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
2164 x = splsched();
2165 thread_lock(thread);
2d21ac55
A
2166 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
2167 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
316670eb
A
2168 thread_unlock(thread);
2169 splx(x);
2d21ac55
A
2170 }
2171 break;
2172 }
2173
2174 task_unlock(task);
2175}
2176
2177void
2178task_vtimer_clear(
2179 task_t task,
2180 integer_t which)
2181{
2182 assert(task == current_task());
2183
2184 task_lock(task);
2185
2186 task->vtimers &= ~which;
2187
2188 task_unlock(task);
2189}
2190
2191void
2192task_vtimer_update(
2193__unused
2194 task_t task,
2195 integer_t which,
2196 uint32_t *microsecs)
2197{
2198 thread_t thread = current_thread();
b0d623f7
A
2199 uint32_t tdelt;
2200 clock_sec_t secs;
2d21ac55
A
2201 uint64_t tsum;
2202
2203 assert(task == current_task());
2204
2205 assert(task->vtimers & which);
2206
b0d623f7 2207 secs = tdelt = 0;
2d21ac55
A
2208
2209 switch (which) {
2210
2211 case TASK_VTIMER_USER:
316670eb
A
2212 if (thread->precise_user_kernel_time) {
2213 tdelt = (uint32_t)timer_delta(&thread->user_timer,
2214 &thread->vtimer_user_save);
2215 } else {
2216 tdelt = (uint32_t)timer_delta(&thread->system_timer,
2d21ac55 2217 &thread->vtimer_user_save);
316670eb 2218 }
b0d623f7 2219 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
2220 break;
2221
2222 case TASK_VTIMER_PROF:
2223 tsum = timer_grab(&thread->user_timer);
2224 tsum += timer_grab(&thread->system_timer);
b0d623f7
A
2225 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
2226 absolutetime_to_microtime(tdelt, &secs, microsecs);
2227 /* if the time delta is smaller than a usec, ignore */
2228 if (*microsecs != 0)
2229 thread->vtimer_prof_save = tsum;
2d21ac55
A
2230 break;
2231
2232 case TASK_VTIMER_RLIM:
2233 tsum = timer_grab(&thread->user_timer);
2234 tsum += timer_grab(&thread->system_timer);
b0d623f7 2235 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
2d21ac55 2236 thread->vtimer_rlim_save = tsum;
b0d623f7 2237 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
2238 break;
2239 }
2240
2d21ac55
A
2241}
2242
1c79356b
A
2243/*
2244 * task_assign:
2245 *
2246 * Change the assigned processor set for the task
2247 */
2248kern_return_t
2249task_assign(
91447636
A
2250 __unused task_t task,
2251 __unused processor_set_t new_pset,
2252 __unused boolean_t assign_threads)
1c79356b 2253{
1c79356b
A
2254 return(KERN_FAILURE);
2255}
2256
2257/*
2258 * task_assign_default:
2259 *
2260 * Version of task_assign to assign to default processor set.
2261 */
2262kern_return_t
2263task_assign_default(
2264 task_t task,
2265 boolean_t assign_threads)
2266{
2d21ac55 2267 return (task_assign(task, &pset0, assign_threads));
1c79356b
A
2268}
2269
2270/*
2271 * task_get_assignment
2272 *
2273 * Return name of processor set that task is assigned to.
2274 */
2275kern_return_t
2276task_get_assignment(
2277 task_t task,
2278 processor_set_t *pset)
2279{
2280 if (!task->active)
2281 return(KERN_FAILURE);
2282
2d21ac55
A
2283 *pset = &pset0;
2284
2285 return (KERN_SUCCESS);
1c79356b
A
2286}
2287
2288
2289/*
2290 * task_policy
2291 *
2292 * Set scheduling policy and parameters, both base and limit, for
2293 * the given task. Policy must be a policy which is enabled for the
2294 * processor set. Change contained threads if requested.
2295 */
2296kern_return_t
2297task_policy(
91447636
A
2298 __unused task_t task,
2299 __unused policy_t policy_id,
2300 __unused policy_base_t base,
2301 __unused mach_msg_type_number_t count,
2302 __unused boolean_t set_limit,
2303 __unused boolean_t change)
1c79356b
A
2304{
2305 return(KERN_FAILURE);
2306}
2307
2308/*
2309 * task_set_policy
2310 *
2311 * Set scheduling policy and parameters, both base and limit, for
2312 * the given task. Policy can be any policy implemented by the
2313 * processor set, whether enabled or not. Change contained threads
2314 * if requested.
2315 */
2316kern_return_t
2317task_set_policy(
91447636
A
2318 __unused task_t task,
2319 __unused processor_set_t pset,
2320 __unused policy_t policy_id,
2321 __unused policy_base_t base,
2322 __unused mach_msg_type_number_t base_count,
2323 __unused policy_limit_t limit,
2324 __unused mach_msg_type_number_t limit_count,
2325 __unused boolean_t change)
1c79356b
A
2326{
2327 return(KERN_FAILURE);
2328}
2329
91447636 2330#if FAST_TAS
1c79356b
A
2331kern_return_t
2332task_set_ras_pc(
2333 task_t task,
2334 vm_offset_t pc,
2335 vm_offset_t endpc)
2336{
1c79356b
A
2337 extern int fast_tas_debug;
2338
2339 if (fast_tas_debug) {
2340 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
2341 task, pc, endpc);
2342 }
2343 task_lock(task);
2344 task->fast_tas_base = pc;
2345 task->fast_tas_end = endpc;
2346 task_unlock(task);
2347 return KERN_SUCCESS;
91447636 2348}
1c79356b 2349#else /* FAST_TAS */
91447636
A
2350kern_return_t
2351task_set_ras_pc(
2352 __unused task_t task,
2353 __unused vm_offset_t pc,
2354 __unused vm_offset_t endpc)
2355{
1c79356b 2356 return KERN_FAILURE;
1c79356b 2357}
91447636 2358#endif /* FAST_TAS */
1c79356b
A
2359
2360void
2361task_synchronizer_destroy_all(task_t task)
2362{
2363 semaphore_t semaphore;
2364 lock_set_t lock_set;
2365
2366 /*
2367 * Destroy owned semaphores
2368 */
2369
2370 while (!queue_empty(&task->semaphore_list)) {
2371 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
2372 (void) semaphore_destroy(task, semaphore);
2373 }
2374
2375 /*
2376 * Destroy owned lock sets
2377 */
2378
2379 while (!queue_empty(&task->lock_set_list)) {
2380 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
2381 (void) lock_set_destroy(task, lock_set);
2382 }
2383}
2384
b0d623f7
A
2385/*
2386 * Install default (machine-dependent) initial thread state
2387 * on the task. Subsequent thread creation will have this initial
2388 * state set on the thread by machine_thread_inherit_taskwide().
2389 * Flavors and structures are exactly the same as those to thread_set_state()
2390 */
2391kern_return_t
2392task_set_state(
2393 task_t task,
2394 int flavor,
2395 thread_state_t state,
2396 mach_msg_type_number_t state_count)
2397{
2398 kern_return_t ret;
2399
2400 if (task == TASK_NULL) {
2401 return (KERN_INVALID_ARGUMENT);
2402 }
2403
2404 task_lock(task);
2405
2406 if (!task->active) {
2407 task_unlock(task);
2408 return (KERN_FAILURE);
2409 }
2410
2411 ret = machine_task_set_state(task, flavor, state, state_count);
2412
2413 task_unlock(task);
2414 return ret;
2415}
2416
2417/*
2418 * Examine the default (machine-dependent) initial thread state
2419 * on the task, as set by task_set_state(). Flavors and structures
2420 * are exactly the same as those passed to thread_get_state().
2421 */
2422kern_return_t
2423task_get_state(
2424 task_t task,
2425 int flavor,
2426 thread_state_t state,
2427 mach_msg_type_number_t *state_count)
2428{
2429 kern_return_t ret;
2430
2431 if (task == TASK_NULL) {
2432 return (KERN_INVALID_ARGUMENT);
2433 }
2434
2435 task_lock(task);
2436
2437 if (!task->active) {
2438 task_unlock(task);
2439 return (KERN_FAILURE);
2440 }
2441
2442 ret = machine_task_get_state(task, flavor, state, state_count);
2443
2444 task_unlock(task);
2445 return ret;
2446}
2447
2448
1c79356b
A
2449/*
2450 * We need to export some functions to other components that
2451 * are currently implemented in macros within the osfmk
2452 * component. Just export them as functions of the same name.
2453 */
2454boolean_t is_kerneltask(task_t t)
2455{
2456 if (t == kernel_task)
55e303ae
A
2457 return (TRUE);
2458
2459 return (FALSE);
1c79356b
A
2460}
2461
b0d623f7
A
2462int
2463check_for_tasksuspend(task_t task)
2464{
2465
2466 if (task == TASK_NULL)
2467 return (0);
2468
2469 return (task->suspend_count > 0);
2470}
2471
1c79356b 2472#undef current_task
91447636
A
2473task_t current_task(void);
2474task_t current_task(void)
1c79356b
A
2475{
2476 return (current_task_fast());
2477}
91447636
A
2478
2479#undef task_reference
2480void task_reference(task_t task);
2481void
2482task_reference(
2483 task_t task)
2484{
2485 if (task != TASK_NULL)
2486 task_reference_internal(task);
2487}
2d21ac55 2488
6d2010ae
A
2489/*
2490 * This routine is called always with task lock held.
2491 * And it returns a thread handle without reference as the caller
2492 * operates on it under the task lock held.
2493 */
2494thread_t
2495task_findtid(task_t task, uint64_t tid)
2496{
2497 thread_t thread= THREAD_NULL;
2498
2499 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2500 if (thread->thread_id == tid)
316670eb 2501 return(thread);
6d2010ae 2502 }
316670eb 2503 return(THREAD_NULL);
6d2010ae
A
2504}
2505
2506
2d21ac55
A
2507#if CONFIG_MACF_MACH
2508/*
2509 * Protect 2 task labels against modification by adding a reference on
2510 * both label handles. The locks do not actually have to be held while
2511 * using the labels as only labels with one reference can be modified
2512 * in place.
2513 */
2514
2515void
2516tasklabel_lock2(
2517 task_t a,
2518 task_t b)
2519{
2520 labelh_reference(a->label);
2521 labelh_reference(b->label);
2522}
2523
2524void
2525tasklabel_unlock2(
2526 task_t a,
2527 task_t b)
2528{
2529 labelh_release(a->label);
2530 labelh_release(b->label);
2531}
2532
2533void
2534mac_task_label_update_internal(
2535 struct label *pl,
2536 struct task *task)
2537{
2538
2539 tasklabel_lock(task);
2540 task->label = labelh_modify(task->label);
2541 mac_task_label_update(pl, &task->maclabel);
2542 tasklabel_unlock(task);
2543 ip_lock(task->itk_self);
2544 mac_port_label_update_cred(pl, &task->itk_self->ip_label);
2545 ip_unlock(task->itk_self);
2546}
2547
2548void
2549mac_task_label_modify(
2550 struct task *task,
2551 void *arg,
2552 void (*f) (struct label *l, void *arg))
2553{
2554
2555 tasklabel_lock(task);
2556 task->label = labelh_modify(task->label);
2557 (*f)(&task->maclabel, arg);
2558 tasklabel_unlock(task);
2559}
2560
2561struct label *
2562mac_task_get_label(struct task *task)
2563{
2564 return (&task->maclabel);
2565}
2566#endif