]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/task.c
xnu-2050.22.13.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
CommitLineData
1c79356b 1/*
316670eb 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63/*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
2d21ac55
A
81/*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
1c79356b 88
1c79356b 89#include <fast_tas.h>
1c79356b
A
90#include <platforms.h>
91
91447636 92#include <mach/mach_types.h>
1c79356b 93#include <mach/boolean.h>
91447636 94#include <mach/host_priv.h>
1c79356b
A
95#include <mach/machine/vm_types.h>
96#include <mach/vm_param.h>
97#include <mach/semaphore.h>
98#include <mach/task_info.h>
99#include <mach/task_special_ports.h>
91447636
A
100
101#include <ipc/ipc_types.h>
1c79356b
A
102#include <ipc/ipc_space.h>
103#include <ipc/ipc_entry.h>
91447636
A
104
105#include <kern/kern_types.h>
1c79356b
A
106#include <kern/mach_param.h>
107#include <kern/misc_protos.h>
108#include <kern/task.h>
109#include <kern/thread.h>
110#include <kern/zalloc.h>
111#include <kern/kalloc.h>
112#include <kern/processor.h>
113#include <kern/sched_prim.h> /* for thread_wakeup */
1c79356b 114#include <kern/ipc_tt.h>
1c79356b 115#include <kern/host.h>
91447636
A
116#include <kern/clock.h>
117#include <kern/timer.h>
1c79356b
A
118#include <kern/assert.h>
119#include <kern/sync_lock.h>
2d21ac55 120#include <kern/affinity.h>
91447636
A
121
122#include <vm/pmap.h>
123#include <vm/vm_map.h>
124#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
125#include <vm/vm_pageout.h>
2d21ac55 126#include <vm/vm_protos.h>
91447636 127
1c79356b
A
128/*
129 * Exported interfaces
130 */
131
132#include <mach/task_server.h>
133#include <mach/mach_host_server.h>
134#include <mach/host_security_server.h>
91447636 135#include <mach/mach_port_server.h>
2d21ac55 136#include <mach/security_server.h>
91447636 137
2d21ac55
A
138#include <vm/vm_shared_region.h>
139
140#if CONFIG_MACF_MACH
141#include <security/mac_mach_internal.h>
142#endif
1c79356b 143
b0d623f7
A
144#if CONFIG_COUNTERS
145#include <pmc/pmc.h>
146#endif /* CONFIG_COUNTERS */
147
148task_t kernel_task;
149zone_t task_zone;
150lck_attr_t task_lck_attr;
151lck_grp_t task_lck_grp;
152lck_grp_attr_t task_lck_grp_attr;
316670eb
A
153#if CONFIG_EMBEDDED
154lck_mtx_t task_watch_mtx;
155#endif /* CONFIG_EMBEDDED */
b0d623f7 156
6d2010ae
A
157zinfo_usage_store_t tasks_tkm_private;
158zinfo_usage_store_t tasks_tkm_shared;
159
316670eb
A
160static ledger_template_t task_ledger_template = NULL;
161struct _task_ledger_indices task_ledgers = {-1, -1, -1, -1, -1};
162void init_task_ledgers(void);
163
164
b0d623f7 165int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
1c79356b 166
6d2010ae
A
167/* externs for BSD kernel */
168extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
169
1c79356b
A
170/* Forwards */
171
172void task_hold_locked(
173 task_t task);
174void task_wait_locked(
316670eb
A
175 task_t task,
176 boolean_t until_not_runnable);
1c79356b
A
177void task_release_locked(
178 task_t task);
1c79356b
A
179void task_free(
180 task_t task );
181void task_synchronizer_destroy_all(
182 task_t task);
1c79356b 183
b0d623f7
A
184int check_for_tasksuspend(
185 task_t task);
186
55e303ae
A
187void
188task_backing_store_privileged(
189 task_t task)
190{
191 task_lock(task);
192 task->priv_flags |= VM_BACKING_STORE_PRIV;
193 task_unlock(task);
194 return;
195}
196
91447636
A
197
198void
199task_set_64bit(
200 task_t task,
201 boolean_t is64bit)
202{
b0d623f7 203#if defined(__i386__) || defined(__x86_64__)
2d21ac55
A
204 thread_t thread;
205#endif /* __i386__ */
206 int vm_flags = 0;
0c530ab8
A
207
208 if (is64bit) {
2d21ac55
A
209 if (task_has_64BitAddr(task))
210 return;
0c530ab8 211
91447636 212 task_set_64BitAddr(task);
91447636 213 } else {
2d21ac55
A
214 if ( !task_has_64BitAddr(task))
215 return;
0c530ab8 216
91447636
A
217 /*
218 * Deallocate all memory previously allocated
219 * above the 32-bit address space, since it won't
220 * be accessible anymore.
221 */
2d21ac55
A
222 /* remove regular VM map entries & pmap mappings */
223 (void) vm_map_remove(task->map,
224 (vm_map_offset_t) VM_MAX_ADDRESS,
225 MACH_VM_MAX_ADDRESS,
226 0);
2d21ac55 227 /* remove the higher VM mappings */
91447636 228 (void) vm_map_remove(task->map,
91447636 229 MACH_VM_MAX_ADDRESS,
2d21ac55
A
230 0xFFFFFFFFFFFFF000ULL,
231 vm_flags);
91447636 232 task_clear_64BitAddr(task);
91447636 233 }
0c530ab8
A
234 /* FIXME: On x86, the thread save state flavor can diverge from the
235 * task's 64-bit feature flag due to the 32-bit/64-bit register save
236 * state dichotomy. Since we can be pre-empted in this interval,
237 * certain routines may observe the thread as being in an inconsistent
238 * state with respect to its task's 64-bitness.
239 */
b0d623f7
A
240#if defined(__i386__) || defined(__x86_64__)
241 task_lock(task);
0c530ab8 242 queue_iterate(&task->threads, thread, thread_t, task_threads) {
b0d623f7 243 thread_mtx_lock(thread);
2d21ac55 244 machine_thread_switch_addrmode(thread);
b0d623f7 245 thread_mtx_unlock(thread);
0c530ab8 246 }
b0d623f7 247 task_unlock(task);
2d21ac55 248#endif /* __i386__ */
91447636
A
249}
250
b0d623f7
A
251
252void
253task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size)
254{
255 task_lock(task);
256 task->all_image_info_addr = addr;
257 task->all_image_info_size = size;
258 task_unlock(task);
259}
260
1c79356b
A
261void
262task_init(void)
263{
b0d623f7
A
264
265 lck_grp_attr_setdefault(&task_lck_grp_attr);
266 lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
267 lck_attr_setdefault(&task_lck_attr);
268 lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
316670eb
A
269#if CONFIG_EMBEDDED
270 lck_mtx_init(&task_watch_mtx, &task_lck_grp, &task_lck_attr);
271#endif /* CONFIG_EMBEDDED */
b0d623f7 272
1c79356b
A
273 task_zone = zinit(
274 sizeof(struct task),
b0d623f7 275 task_max * sizeof(struct task),
1c79356b
A
276 TASK_CHUNK * sizeof(struct task),
277 "tasks");
6d2010ae 278
0b4c1975 279 zone_change(task_zone, Z_NOENCRYPT, TRUE);
1c79356b 280
316670eb
A
281 init_task_ledgers();
282
1c79356b
A
283 /*
284 * Create the kernel task as the first task.
1c79356b 285 */
b0d623f7
A
286#ifdef __LP64__
287 if (task_create_internal(TASK_NULL, FALSE, TRUE, &kernel_task) != KERN_SUCCESS)
288#else
0c530ab8 289 if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
b0d623f7 290#endif
1c79356b 291 panic("task_init\n");
55e303ae 292
1c79356b
A
293 vm_map_deallocate(kernel_task->map);
294 kernel_task->map = kernel_map;
316670eb 295
1c79356b
A
296}
297
1c79356b
A
298/*
299 * Create a task running in the kernel address space. It may
300 * have its own map of size mem_size and may have ipc privileges.
301 */
302kern_return_t
303kernel_task_create(
91447636
A
304 __unused task_t parent_task,
305 __unused vm_offset_t map_base,
306 __unused vm_size_t map_size,
307 __unused task_t *child_task)
1c79356b 308{
55e303ae 309 return (KERN_INVALID_ARGUMENT);
1c79356b
A
310}
311
312kern_return_t
313task_create(
2d21ac55 314 task_t parent_task,
91447636 315 __unused ledger_port_array_t ledger_ports,
2d21ac55
A
316 __unused mach_msg_type_number_t num_ledger_ports,
317 __unused boolean_t inherit_memory,
318 __unused task_t *child_task) /* OUT */
1c79356b
A
319{
320 if (parent_task == TASK_NULL)
321 return(KERN_INVALID_ARGUMENT);
322
2d21ac55
A
323 /*
324 * No longer supported: too many calls assume that a task has a valid
325 * process attached.
326 */
327 return(KERN_FAILURE);
1c79356b
A
328}
329
330kern_return_t
331host_security_create_task_token(
91447636 332 host_security_t host_security,
2d21ac55
A
333 task_t parent_task,
334 __unused security_token_t sec_token,
335 __unused audit_token_t audit_token,
336 __unused host_priv_t host_priv,
91447636
A
337 __unused ledger_port_array_t ledger_ports,
338 __unused mach_msg_type_number_t num_ledger_ports,
2d21ac55
A
339 __unused boolean_t inherit_memory,
340 __unused task_t *child_task) /* OUT */
1c79356b 341{
1c79356b
A
342 if (parent_task == TASK_NULL)
343 return(KERN_INVALID_ARGUMENT);
344
345 if (host_security == HOST_NULL)
346 return(KERN_INVALID_SECURITY);
347
2d21ac55
A
348 /*
349 * No longer supported.
350 */
351 return(KERN_FAILURE);
1c79356b
A
352}
353
316670eb
A
354void
355init_task_ledgers(void)
356{
357 ledger_template_t t;
358
359 assert(task_ledger_template == NULL);
360 assert(kernel_task == TASK_NULL);
361
362 if ((t = ledger_template_create("Per-task ledger")) == NULL)
363 panic("couldn't create task ledger template");
364
365 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
366 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
367 "physmem", "bytes");
368 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
369 "bytes");
370 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
371 "bytes");
372 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
373 "bytes");
374
375 if ((task_ledgers.cpu_time < 0) || (task_ledgers.tkm_private < 0) ||
376 (task_ledgers.tkm_shared < 0) || (task_ledgers.phys_mem < 0) ||
377 (task_ledgers.wired_mem < 0)) {
378 panic("couldn't create entries for task ledger template");
379 }
380
381 task_ledger_template = t;
382}
383
1c79356b 384kern_return_t
55e303ae 385task_create_internal(
1c79356b
A
386 task_t parent_task,
387 boolean_t inherit_memory,
0c530ab8 388 boolean_t is_64bit,
1c79356b
A
389 task_t *child_task) /* OUT */
390{
2d21ac55
A
391 task_t new_task;
392 vm_shared_region_t shared_region;
316670eb 393 ledger_t ledger = NULL;
1c79356b
A
394
395 new_task = (task_t) zalloc(task_zone);
396
397 if (new_task == TASK_NULL)
398 return(KERN_RESOURCE_SHORTAGE);
399
400 /* one ref for just being alive; one for our caller */
401 new_task->ref_count = 2;
402
316670eb
A
403 /* allocate with active entries */
404 assert(task_ledger_template != NULL);
405 if ((ledger = ledger_instantiate(task_ledger_template,
406 LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
407 zfree(task_zone, new_task);
408 return(KERN_RESOURCE_SHORTAGE);
409 }
410 new_task->ledger = ledger;
411
b0d623f7 412 /* if inherit_memory is true, parent_task MUST not be NULL */
1c79356b 413 if (inherit_memory)
316670eb 414 new_task->map = vm_map_fork(ledger, parent_task->map);
1c79356b 415 else
316670eb
A
416 new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit),
417 (vm_map_offset_t)(VM_MIN_ADDRESS),
418 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
1c79356b 419
2d21ac55
A
420 /* Inherit memlock limit from parent */
421 if (parent_task)
b0d623f7 422 vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
2d21ac55 423
b0d623f7 424 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
55e303ae 425 queue_init(&new_task->threads);
1c79356b 426 new_task->suspend_count = 0;
55e303ae 427 new_task->thread_count = 0;
55e303ae 428 new_task->active_thread_count = 0;
1c79356b 429 new_task->user_stop_count = 0;
0b4e3aa0 430 new_task->role = TASK_UNSPECIFIED;
1c79356b 431 new_task->active = TRUE;
b0d623f7 432 new_task->halting = FALSE;
2d21ac55 433 new_task->user_data = NULL;
1c79356b
A
434 new_task->faults = 0;
435 new_task->cow_faults = 0;
436 new_task->pageins = 0;
437 new_task->messages_sent = 0;
438 new_task->messages_received = 0;
439 new_task->syscalls_mach = 0;
55e303ae 440 new_task->priv_flags = 0;
1c79356b 441 new_task->syscalls_unix=0;
2d21ac55 442 new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
55e303ae
A
443 new_task->taskFeatures[0] = 0; /* Init task features */
444 new_task->taskFeatures[1] = 0; /* Init task features */
1c79356b 445
6d2010ae
A
446 zinfo_task_init(new_task);
447
1c79356b 448#ifdef MACH_BSD
2d21ac55 449 new_task->bsd_info = NULL;
1c79356b
A
450#endif /* MACH_BSD */
451
b0d623f7 452#if defined(__i386__) || defined(__x86_64__)
0c530ab8 453 new_task->i386_ldt = 0;
b0d623f7 454 new_task->task_debug = NULL;
0c530ab8
A
455#endif
456
55e303ae 457
1c79356b
A
458 queue_init(&new_task->semaphore_list);
459 queue_init(&new_task->lock_set_list);
460 new_task->semaphores_owned = 0;
461 new_task->lock_sets_owned = 0;
462
2d21ac55 463#if CONFIG_MACF_MACH
2d21ac55
A
464 new_task->label = labelh_new(1);
465 mac_task_label_init (&new_task->maclabel);
466#endif
1c79356b
A
467
468 ipc_task_init(new_task, parent_task);
469
91447636
A
470 new_task->total_user_time = 0;
471 new_task->total_system_time = 0;
1c79356b 472
2d21ac55 473 new_task->vtimers = 0;
1c79356b 474
2d21ac55
A
475 new_task->shared_region = NULL;
476
477 new_task->affinity_space = NULL;
1c79356b 478
b0d623f7
A
479#if CONFIG_COUNTERS
480 new_task->t_chud = 0U;
481#endif
482
316670eb
A
483 new_task->pidsuspended = FALSE;
484 new_task->frozen = FALSE;
485 new_task->rusage_cpu_flags = 0;
486 new_task->rusage_cpu_percentage = 0;
487 new_task->rusage_cpu_interval = 0;
488 new_task->rusage_cpu_deadline = 0;
489 new_task->rusage_cpu_callt = NULL;
490 new_task->proc_terminate = 0;
491#if CONFIG_EMBEDDED
492 queue_init(&new_task->task_watchers);
493 new_task->appstate = TASK_APPSTATE_ACTIVE;
494 new_task->num_taskwatchers = 0;
495 new_task->watchapplying = 0;
496#endif /* CONFIG_EMBEDDED */
497
db609669
A
498 new_task->uexc_range_start = new_task->uexc_range_size = new_task->uexc_handler = 0;
499
2d21ac55 500 if (parent_task != TASK_NULL) {
1c79356b 501 new_task->sec_token = parent_task->sec_token;
55e303ae 502 new_task->audit_token = parent_task->audit_token;
1c79356b 503
2d21ac55
A
504 /* inherit the parent's shared region */
505 shared_region = vm_shared_region_get(parent_task);
506 vm_shared_region_set(new_task, shared_region);
1c79356b 507
91447636
A
508 if(task_has_64BitAddr(parent_task))
509 task_set_64BitAddr(new_task);
b0d623f7
A
510 new_task->all_image_info_addr = parent_task->all_image_info_addr;
511 new_task->all_image_info_size = parent_task->all_image_info_size;
0c530ab8 512
b0d623f7 513#if defined(__i386__) || defined(__x86_64__)
0c530ab8
A
514 if (inherit_memory && parent_task->i386_ldt)
515 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
516#endif
2d21ac55
A
517 if (inherit_memory && parent_task->affinity_space)
518 task_affinity_create(parent_task, new_task);
b0d623f7
A
519
520 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
6d2010ae
A
521 new_task->policystate = parent_task->policystate;
522 /* inherit the self action state */
316670eb 523 new_task->appliedstate = parent_task->appliedstate;
6d2010ae
A
524 new_task->ext_policystate = parent_task->ext_policystate;
525#if NOTYET
526 /* till the child lifecycle is cleared do not inherit external action */
316670eb 527 new_task->ext_appliedstate = parent_task->ext_appliedstate;
6d2010ae 528#else
316670eb 529 new_task->ext_appliedstate = default_task_null_policy;
6d2010ae 530#endif
1c79356b
A
531 }
532 else {
1c79356b 533 new_task->sec_token = KERNEL_SECURITY_TOKEN;
55e303ae 534 new_task->audit_token = KERNEL_AUDIT_TOKEN;
b0d623f7
A
535#ifdef __LP64__
536 if(is_64bit)
537 task_set_64BitAddr(new_task);
538#endif
6d2010ae
A
539 new_task->all_image_info_addr = (mach_vm_address_t)0;
540 new_task->all_image_info_size = (mach_vm_size_t)0;
b0d623f7
A
541
542 new_task->pset_hint = PROCESSOR_SET_NULL;
6d2010ae
A
543 new_task->policystate = default_task_proc_policy;
544 new_task->ext_policystate = default_task_proc_policy;
316670eb
A
545 new_task->appliedstate = default_task_null_policy;
546 new_task->ext_appliedstate = default_task_null_policy;
1c79356b
A
547 }
548
0b4e3aa0 549 if (kernel_task == TASK_NULL) {
55e303ae 550 new_task->priority = BASEPRI_KERNEL;
0b4e3aa0
A
551 new_task->max_priority = MAXPRI_KERNEL;
552 }
553 else {
554 new_task->priority = BASEPRI_DEFAULT;
555 new_task->max_priority = MAXPRI_USER;
556 }
6d2010ae
A
557
558 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
2d21ac55 559
b0d623f7 560 lck_mtx_lock(&tasks_threads_lock);
2d21ac55
A
561 queue_enter(&tasks, new_task, task_t, tasks);
562 tasks_count++;
b0d623f7 563 lck_mtx_unlock(&tasks_threads_lock);
1c79356b 564
55e303ae
A
565 if (vm_backing_store_low && parent_task != NULL)
566 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
1c79356b
A
567
568 ipc_task_enable(new_task);
569
1c79356b
A
570 *child_task = new_task;
571 return(KERN_SUCCESS);
572}
573
574/*
91447636 575 * task_deallocate:
1c79356b 576 *
91447636 577 * Drop a reference on a task.
1c79356b
A
578 */
579void
9bccf70c 580task_deallocate(
1c79356b
A
581 task_t task)
582{
316670eb
A
583 ledger_amount_t credit, debit;
584
9bccf70c
A
585 if (task == TASK_NULL)
586 return;
587
91447636 588 if (task_deallocate_internal(task) > 0)
9bccf70c 589 return;
1c79356b 590
6d2010ae
A
591 lck_mtx_lock(&tasks_threads_lock);
592 queue_remove(&terminated_tasks, task, task_t, tasks);
593 lck_mtx_unlock(&tasks_threads_lock);
594
316670eb
A
595 /*
596 * Give the machine dependent code a chance
597 * to perform cleanup before ripping apart
598 * the task.
599 */
600 machine_task_terminate(task);
601
9bccf70c
A
602 ipc_task_terminate(task);
603
2d21ac55
A
604 if (task->affinity_space)
605 task_affinity_deallocate(task);
606
1c79356b
A
607 vm_map_deallocate(task->map);
608 is_release(task->itk_space);
1c79356b 609
b0d623f7
A
610 lck_mtx_destroy(&task->lock, &task_lck_grp);
611
2d21ac55
A
612#if CONFIG_MACF_MACH
613 labelh_release(task->label);
614#endif
316670eb
A
615
616 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
617 &debit)) {
618 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
619 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
620 }
621 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
622 &debit)) {
623 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
624 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
625 }
626 ledger_dereference(task->ledger);
6d2010ae 627 zinfo_task_free(task);
91447636 628 zfree(task_zone, task);
1c79356b
A
629}
630
0c530ab8
A
631/*
632 * task_name_deallocate:
633 *
634 * Drop a reference on a task name.
635 */
636void
637task_name_deallocate(
638 task_name_t task_name)
639{
640 return(task_deallocate((task_t)task_name));
641}
642
643
1c79356b
A
644/*
645 * task_terminate:
646 *
647 * Terminate the specified task. See comments on thread_terminate
648 * (kern/thread.c) about problems with terminating the "current task."
649 */
650
651kern_return_t
652task_terminate(
653 task_t task)
654{
655 if (task == TASK_NULL)
91447636
A
656 return (KERN_INVALID_ARGUMENT);
657
1c79356b 658 if (task->bsd_info)
91447636
A
659 return (KERN_FAILURE);
660
1c79356b
A
661 return (task_terminate_internal(task));
662}
663
664kern_return_t
665task_terminate_internal(
91447636 666 task_t task)
1c79356b 667{
91447636
A
668 thread_t thread, self;
669 task_t self_task;
670 boolean_t interrupt_save;
1c79356b
A
671
672 assert(task != kernel_task);
673
91447636
A
674 self = current_thread();
675 self_task = self->task;
1c79356b
A
676
677 /*
678 * Get the task locked and make sure that we are not racing
679 * with someone else trying to terminate us.
680 */
91447636 681 if (task == self_task)
1c79356b 682 task_lock(task);
91447636
A
683 else
684 if (task < self_task) {
1c79356b 685 task_lock(task);
91447636
A
686 task_lock(self_task);
687 }
688 else {
689 task_lock(self_task);
1c79356b
A
690 task_lock(task);
691 }
692
6d2010ae 693 if (!task->active) {
1c79356b 694 /*
6d2010ae 695 * Task is already being terminated.
1c79356b
A
696 * Just return an error. If we are dying, this will
697 * just get us to our AST special handler and that
698 * will get us to finalize the termination of ourselves.
699 */
700 task_unlock(task);
91447636
A
701 if (self_task != task)
702 task_unlock(self_task);
703
704 return (KERN_FAILURE);
1c79356b 705 }
91447636
A
706
707 if (self_task != task)
708 task_unlock(self_task);
1c79356b 709
e7c99d92
A
710 /*
711 * Make sure the current thread does not get aborted out of
712 * the waits inside these operations.
713 */
9bccf70c 714 interrupt_save = thread_interrupt_level(THREAD_UNINT);
e7c99d92 715
1c79356b
A
716 /*
717 * Indicate that we want all the threads to stop executing
718 * at user space by holding the task (we would have held
719 * each thread independently in thread_terminate_internal -
720 * but this way we may be more likely to already find it
721 * held there). Mark the task inactive, and prevent
722 * further task operations via the task port.
723 */
724 task_hold_locked(task);
725 task->active = FALSE;
726 ipc_task_disable(task);
727
728 /*
91447636
A
729 * Terminate each thread in the task.
730 */
731 queue_iterate(&task->threads, thread, thread_t, task_threads) {
732 thread_terminate_internal(thread);
1c79356b 733 }
e7c99d92 734
316670eb
A
735 task_unlock(task);
736
737#if CONFIG_EMBEDDED
e7c99d92 738 /*
316670eb 739 * remove all task watchers
e7c99d92 740 */
316670eb
A
741 task_removewatchers(task);
742#endif /* CONFIG_EMBEDDED */
1c79356b
A
743
744 /*
745 * Destroy all synchronizers owned by the task.
746 */
747 task_synchronizer_destroy_all(task);
748
1c79356b
A
749 /*
750 * Destroy the IPC space, leaving just a reference for it.
751 */
316670eb 752 ipc_space_terminate(task->itk_space);
1c79356b 753
0c530ab8
A
754 if (vm_map_has_4GB_pagezero(task->map))
755 vm_map_clear_4GB_pagezero(task->map);
91447636 756
1c79356b
A
757 /*
758 * If the current thread is a member of the task
759 * being terminated, then the last reference to
760 * the task will not be dropped until the thread
761 * is finally reaped. To avoid incurring the
762 * expense of removing the address space regions
763 * at reap time, we do it explictly here.
764 */
2d21ac55
A
765 vm_map_remove(task->map,
766 task->map->min_offset,
767 task->map->max_offset,
768 VM_MAP_NO_FLAGS);
1c79356b 769
2d21ac55
A
770 /* release our shared region */
771 vm_shared_region_set(task, NULL);
9bccf70c 772
b0d623f7 773 lck_mtx_lock(&tasks_threads_lock);
2d21ac55 774 queue_remove(&tasks, task, task_t, tasks);
6d2010ae 775 queue_enter(&terminated_tasks, task, task_t, tasks);
2d21ac55 776 tasks_count--;
b0d623f7 777 lck_mtx_unlock(&tasks_threads_lock);
9bccf70c 778
1c79356b 779 /*
e7c99d92
A
780 * We no longer need to guard against being aborted, so restore
781 * the previous interruptible state.
782 */
9bccf70c 783 thread_interrupt_level(interrupt_save);
e7c99d92
A
784
785 /*
786 * Get rid of the task active reference on itself.
1c79356b 787 */
1c79356b
A
788 task_deallocate(task);
789
91447636 790 return (KERN_SUCCESS);
1c79356b
A
791}
792
793/*
b0d623f7 794 * task_start_halt:
91447636
A
795 *
796 * Shut the current task down (except for the current thread) in
797 * preparation for dramatic changes to the task (probably exec).
b0d623f7
A
798 * We hold the task and mark all other threads in the task for
799 * termination.
1c79356b
A
800 */
801kern_return_t
b0d623f7 802task_start_halt(
1c79356b
A
803 task_t task)
804{
91447636 805 thread_t thread, self;
1c79356b
A
806
807 assert(task != kernel_task);
808
91447636 809 self = current_thread();
1c79356b 810
91447636
A
811 if (task != self->task)
812 return (KERN_INVALID_ARGUMENT);
1c79356b
A
813
814 task_lock(task);
815
b0d623f7 816 if (task->halting || !task->active || !self->active) {
1c79356b
A
817 /*
818 * Task or current thread is already being terminated.
819 * Hurry up and return out of the current kernel context
820 * so that we run our AST special handler to terminate
821 * ourselves.
822 */
823 task_unlock(task);
91447636
A
824
825 return (KERN_FAILURE);
1c79356b
A
826 }
827
b0d623f7
A
828 task->halting = TRUE;
829
55e303ae 830 if (task->thread_count > 1) {
b0d623f7 831
1c79356b
A
832 /*
833 * Mark all the threads to keep them from starting any more
834 * user-level execution. The thread_terminate_internal code
835 * would do this on a thread by thread basis anyway, but this
836 * gives us a better chance of not having to wait there.
837 */
838 task_hold_locked(task);
839
840 /*
91447636 841 * Terminate all the other threads in the task.
1c79356b 842 */
91447636
A
843 queue_iterate(&task->threads, thread, thread_t, task_threads) {
844 if (thread != self)
845 thread_terminate_internal(thread);
1c79356b 846 }
91447636 847
1c79356b
A
848 task_release_locked(task);
849 }
b0d623f7
A
850 task_unlock(task);
851 return KERN_SUCCESS;
852}
853
854
855/*
856 * task_complete_halt:
857 *
858 * Complete task halt by waiting for threads to terminate, then clean
859 * up task resources (VM, port namespace, etc...) and then let the
860 * current thread go in the (practically empty) task context.
861 */
862void
863task_complete_halt(task_t task)
864{
865 task_lock(task);
866 assert(task->halting);
867 assert(task == current_task());
e7c99d92 868
b0d623f7
A
869 /*
870 * Wait for the other threads to get shut down.
871 * When the last other thread is reaped, we'll be
316670eb 872 * woken up.
b0d623f7
A
873 */
874 if (task->thread_count > 1) {
875 assert_wait((event_t)&task->halting, THREAD_UNINT);
876 task_unlock(task);
877 thread_block(THREAD_CONTINUE_NULL);
878 } else {
879 task_unlock(task);
880 }
1c79356b 881
316670eb
A
882 /*
883 * Give the machine dependent code a chance
884 * to perform cleanup of task-level resources
885 * associated with the current thread before
886 * ripping apart the task.
887 */
888 machine_task_terminate(task);
889
1c79356b
A
890 /*
891 * Destroy all synchronizers owned by the task.
892 */
893 task_synchronizer_destroy_all(task);
894
895 /*
9bccf70c
A
896 * Destroy the contents of the IPC space, leaving just
897 * a reference for it.
e7c99d92 898 */
55e303ae 899 ipc_space_clean(task->itk_space);
1c79356b
A
900
901 /*
902 * Clean out the address space, as we are going to be
903 * getting a new one.
904 */
91447636
A
905 vm_map_remove(task->map, task->map->min_offset,
906 task->map->max_offset, VM_MAP_NO_FLAGS);
1c79356b 907
b0d623f7 908 task->halting = FALSE;
1c79356b
A
909}
910
911/*
912 * task_hold_locked:
913 *
914 * Suspend execution of the specified task.
915 * This is a recursive-style suspension of the task, a count of
916 * suspends is maintained.
917 *
918 * CONDITIONS: the task is locked and active.
919 */
920void
921task_hold_locked(
91447636 922 register task_t task)
1c79356b 923{
91447636 924 register thread_t thread;
1c79356b
A
925
926 assert(task->active);
927
9bccf70c
A
928 if (task->suspend_count++ > 0)
929 return;
1c79356b
A
930
931 /*
91447636 932 * Iterate through all the threads and hold them.
1c79356b 933 */
91447636
A
934 queue_iterate(&task->threads, thread, thread_t, task_threads) {
935 thread_mtx_lock(thread);
936 thread_hold(thread);
937 thread_mtx_unlock(thread);
1c79356b
A
938 }
939}
940
941/*
942 * task_hold:
943 *
944 * Same as the internal routine above, except that is must lock
945 * and verify that the task is active. This differs from task_suspend
946 * in that it places a kernel hold on the task rather than just a
947 * user-level hold. This keeps users from over resuming and setting
948 * it running out from under the kernel.
949 *
950 * CONDITIONS: the caller holds a reference on the task
951 */
952kern_return_t
91447636
A
953task_hold(
954 register task_t task)
1c79356b 955{
1c79356b
A
956 if (task == TASK_NULL)
957 return (KERN_INVALID_ARGUMENT);
91447636 958
1c79356b 959 task_lock(task);
91447636 960
1c79356b
A
961 if (!task->active) {
962 task_unlock(task);
91447636 963
1c79356b
A
964 return (KERN_FAILURE);
965 }
1c79356b 966
91447636
A
967 task_hold_locked(task);
968 task_unlock(task);
969
970 return (KERN_SUCCESS);
1c79356b
A
971}
972
316670eb
A
973kern_return_t
974task_wait(
975 task_t task,
976 boolean_t until_not_runnable)
977{
978 if (task == TASK_NULL)
979 return (KERN_INVALID_ARGUMENT);
980
981 task_lock(task);
982
983 if (!task->active) {
984 task_unlock(task);
985
986 return (KERN_FAILURE);
987 }
988
989 task_wait_locked(task, until_not_runnable);
990 task_unlock(task);
991
992 return (KERN_SUCCESS);
993}
994
1c79356b 995/*
91447636
A
996 * task_wait_locked:
997 *
1c79356b
A
998 * Wait for all threads in task to stop.
999 *
1000 * Conditions:
1001 * Called with task locked, active, and held.
1002 */
1003void
1004task_wait_locked(
316670eb
A
1005 register task_t task,
1006 boolean_t until_not_runnable)
1c79356b 1007{
91447636 1008 register thread_t thread, self;
1c79356b
A
1009
1010 assert(task->active);
1011 assert(task->suspend_count > 0);
1012
91447636
A
1013 self = current_thread();
1014
1c79356b 1015 /*
91447636 1016 * Iterate through all the threads and wait for them to
1c79356b
A
1017 * stop. Do not wait for the current thread if it is within
1018 * the task.
1019 */
91447636
A
1020 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1021 if (thread != self)
316670eb 1022 thread_wait(thread, until_not_runnable);
1c79356b
A
1023 }
1024}
1025
1026/*
1027 * task_release_locked:
1028 *
1029 * Release a kernel hold on a task.
1030 *
1031 * CONDITIONS: the task is locked and active
1032 */
1033void
1034task_release_locked(
91447636 1035 register task_t task)
1c79356b 1036{
91447636 1037 register thread_t thread;
1c79356b
A
1038
1039 assert(task->active);
9bccf70c 1040 assert(task->suspend_count > 0);
1c79356b 1041
9bccf70c
A
1042 if (--task->suspend_count > 0)
1043 return;
1c79356b 1044
91447636
A
1045 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1046 thread_mtx_lock(thread);
1047 thread_release(thread);
1048 thread_mtx_unlock(thread);
1c79356b
A
1049 }
1050}
1051
1052/*
1053 * task_release:
1054 *
1055 * Same as the internal routine above, except that it must lock
1056 * and verify that the task is active.
1057 *
1058 * CONDITIONS: The caller holds a reference to the task
1059 */
1060kern_return_t
91447636
A
1061task_release(
1062 task_t task)
1c79356b 1063{
1c79356b
A
1064 if (task == TASK_NULL)
1065 return (KERN_INVALID_ARGUMENT);
91447636 1066
1c79356b 1067 task_lock(task);
91447636 1068
1c79356b
A
1069 if (!task->active) {
1070 task_unlock(task);
91447636 1071
1c79356b
A
1072 return (KERN_FAILURE);
1073 }
1c79356b 1074
91447636
A
1075 task_release_locked(task);
1076 task_unlock(task);
1077
1078 return (KERN_SUCCESS);
1c79356b
A
1079}
1080
1081kern_return_t
1082task_threads(
91447636
A
1083 task_t task,
1084 thread_act_array_t *threads_out,
1c79356b
A
1085 mach_msg_type_number_t *count)
1086{
91447636 1087 mach_msg_type_number_t actual;
2d21ac55 1088 thread_t *thread_list;
91447636
A
1089 thread_t thread;
1090 vm_size_t size, size_needed;
1091 void *addr;
1092 unsigned int i, j;
1c79356b
A
1093
1094 if (task == TASK_NULL)
91447636 1095 return (KERN_INVALID_ARGUMENT);
1c79356b 1096
2d21ac55 1097 size = 0; addr = NULL;
1c79356b
A
1098
1099 for (;;) {
1100 task_lock(task);
1101 if (!task->active) {
1102 task_unlock(task);
91447636 1103
1c79356b
A
1104 if (size != 0)
1105 kfree(addr, size);
91447636
A
1106
1107 return (KERN_FAILURE);
1c79356b
A
1108 }
1109
55e303ae 1110 actual = task->thread_count;
1c79356b
A
1111
1112 /* do we have the memory we need? */
91447636 1113 size_needed = actual * sizeof (mach_port_t);
1c79356b
A
1114 if (size_needed <= size)
1115 break;
1116
1117 /* unlock the task and allocate more memory */
1118 task_unlock(task);
1119
1120 if (size != 0)
1121 kfree(addr, size);
1122
1123 assert(size_needed > 0);
1124 size = size_needed;
1125
1126 addr = kalloc(size);
1127 if (addr == 0)
91447636 1128 return (KERN_RESOURCE_SHORTAGE);
1c79356b
A
1129 }
1130
1131 /* OK, have memory and the task is locked & active */
2d21ac55 1132 thread_list = (thread_t *)addr;
91447636
A
1133
1134 i = j = 0;
1135
1136 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1137 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1138 thread_reference_internal(thread);
2d21ac55 1139 thread_list[j++] = thread;
1c79356b 1140 }
91447636
A
1141
1142 assert(queue_end(&task->threads, (queue_entry_t)thread));
1c79356b
A
1143
1144 actual = j;
91447636 1145 size_needed = actual * sizeof (mach_port_t);
1c79356b 1146
91447636 1147 /* can unlock task now that we've got the thread refs */
1c79356b
A
1148 task_unlock(task);
1149
1150 if (actual == 0) {
91447636 1151 /* no threads, so return null pointer and deallocate memory */
1c79356b 1152
2d21ac55 1153 *threads_out = NULL;
1c79356b
A
1154 *count = 0;
1155
1156 if (size != 0)
1157 kfree(addr, size);
91447636
A
1158 }
1159 else {
1c79356b
A
1160 /* if we allocated too much, must copy */
1161
1162 if (size_needed < size) {
91447636 1163 void *newaddr;
1c79356b
A
1164
1165 newaddr = kalloc(size_needed);
1166 if (newaddr == 0) {
91447636 1167 for (i = 0; i < actual; ++i)
2d21ac55 1168 thread_deallocate(thread_list[i]);
1c79356b 1169 kfree(addr, size);
91447636 1170 return (KERN_RESOURCE_SHORTAGE);
1c79356b
A
1171 }
1172
91447636 1173 bcopy(addr, newaddr, size_needed);
1c79356b 1174 kfree(addr, size);
2d21ac55 1175 thread_list = (thread_t *)newaddr;
1c79356b
A
1176 }
1177
2d21ac55 1178 *threads_out = thread_list;
1c79356b
A
1179 *count = actual;
1180
1181 /* do the conversion that Mig should handle */
1182
91447636 1183 for (i = 0; i < actual; ++i)
2d21ac55 1184 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1c79356b
A
1185 }
1186
91447636 1187 return (KERN_SUCCESS);
1c79356b
A
1188}
1189
316670eb
A
1190static kern_return_t
1191place_task_hold (
1c79356b 1192 register task_t task)
316670eb 1193{
1c79356b 1194 if (!task->active) {
1c79356b
A
1195 return (KERN_FAILURE);
1196 }
91447636
A
1197
1198 if (task->user_stop_count++ > 0) {
1c79356b
A
1199 /*
1200 * If the stop count was positive, the task is
1201 * already stopped and we can exit.
1202 */
1c79356b
A
1203 return (KERN_SUCCESS);
1204 }
1205
1206 /*
1207 * Put a kernel-level hold on the threads in the task (all
1208 * user-level task suspensions added together represent a
1209 * single kernel-level hold). We then wait for the threads
1210 * to stop executing user code.
1211 */
1212 task_hold_locked(task);
316670eb
A
1213 task_wait_locked(task, TRUE);
1214
1215 return (KERN_SUCCESS);
1216}
1217
1218static kern_return_t
1219release_task_hold (
1220 register task_t task,
1221 boolean_t pidresume)
1222{
1223 register boolean_t release = FALSE;
1224
1225 if (!task->active) {
1226 return (KERN_FAILURE);
1227 }
1228
1229 if (pidresume) {
1230 if (task->pidsuspended == FALSE) {
1231 return (KERN_FAILURE);
1232 }
1233 task->pidsuspended = FALSE;
1234 }
1235
1236 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
1237 if (--task->user_stop_count == 0) {
1238 release = TRUE;
1239 }
1240 }
1241 else {
1242 return (KERN_FAILURE);
1243 }
1244
1245 /*
1246 * Release the task if necessary.
1247 */
1248 if (release)
1249 task_release_locked(task);
1250
1251 return (KERN_SUCCESS);
1252}
1253
1254/*
1255 * task_suspend:
1256 *
1257 * Implement a user-level suspension on a task.
1258 *
1259 * Conditions:
1260 * The caller holds a reference to the task
1261 */
1262kern_return_t
1263task_suspend(
1264 register task_t task)
1265{
1266 kern_return_t kr;
1267
1268 if (task == TASK_NULL || task == kernel_task)
1269 return (KERN_INVALID_ARGUMENT);
1270
1271 task_lock(task);
1272
1273 kr = place_task_hold(task);
91447636 1274
1c79356b 1275 task_unlock(task);
91447636 1276
316670eb 1277 return (kr);
1c79356b
A
1278}
1279
1280/*
91447636 1281 * task_resume:
1c79356b
A
1282 * Release a kernel hold on a task.
1283 *
1284 * Conditions:
1285 * The caller holds a reference to the task
1286 */
1287kern_return_t
91447636
A
1288task_resume(
1289 register task_t task)
1c79356b 1290{
316670eb 1291 kern_return_t kr;
1c79356b 1292
91447636
A
1293 if (task == TASK_NULL || task == kernel_task)
1294 return (KERN_INVALID_ARGUMENT);
1c79356b 1295
1c79356b 1296 task_lock(task);
91447636 1297
316670eb 1298 kr = release_task_hold(task, FALSE);
91447636 1299
316670eb 1300 task_unlock(task);
91447636 1301
316670eb
A
1302 return (kr);
1303}
1304
1305kern_return_t
1306task_pidsuspend_locked(task_t task)
1307{
1308 kern_return_t kr;
1309
1310 if (task->pidsuspended) {
1311 kr = KERN_FAILURE;
1312 goto out;
1c79356b 1313 }
91447636 1314
316670eb
A
1315 task->pidsuspended = TRUE;
1316
1317 kr = place_task_hold(task);
1318 if (kr != KERN_SUCCESS) {
1319 task->pidsuspended = FALSE;
1c79356b 1320 }
316670eb
A
1321out:
1322 return(kr);
1323}
1c79356b 1324
316670eb
A
1325
1326/*
1327 * task_pidsuspend:
1328 *
1329 * Suspends a task by placing a hold on its threads.
1330 *
1331 * Conditions:
1332 * The caller holds a reference to the task
1333 */
1334kern_return_t
1335task_pidsuspend(
1336 register task_t task)
1337{
1338 kern_return_t kr;
1339
1340 if (task == TASK_NULL || task == kernel_task)
1341 return (KERN_INVALID_ARGUMENT);
1342
1343 task_lock(task);
1344
1345 kr = task_pidsuspend_locked(task);
1c79356b
A
1346
1347 task_unlock(task);
91447636 1348
316670eb
A
1349 return (kr);
1350}
1351
1352/* If enabled, we bring all the frozen pages back in prior to resumption; otherwise, they're faulted back in on demand */
1353#define THAW_ON_RESUME 1
1354
1355/*
1356 * task_pidresume:
1357 * Resumes a previously suspended task.
1358 *
1359 * Conditions:
1360 * The caller holds a reference to the task
1361 */
1362kern_return_t
1363task_pidresume(
1364 register task_t task)
1365{
1366 kern_return_t kr;
1367#if (CONFIG_FREEZE && THAW_ON_RESUME)
1368 boolean_t frozen;
1369#endif
1370
1371 if (task == TASK_NULL || task == kernel_task)
1372 return (KERN_INVALID_ARGUMENT);
1373
1374 task_lock(task);
1375
1376#if (CONFIG_FREEZE && THAW_ON_RESUME)
1377 frozen = task->frozen;
1378 task->frozen = FALSE;
1379#endif
1380
1381 kr = release_task_hold(task, TRUE);
1382
1383 task_unlock(task);
1384
1385#if (CONFIG_FREEZE && THAW_ON_RESUME)
1386 if ((kr == KERN_SUCCESS) && (frozen == TRUE)) {
1387 kr = vm_map_thaw(task->map);
1388 }
1389#endif
1390
1391 return (kr);
1c79356b
A
1392}
1393
6d2010ae
A
1394#if CONFIG_FREEZE
1395
1396/*
1397 * task_freeze:
1398 *
316670eb 1399 * Freeze a task.
6d2010ae
A
1400 *
1401 * Conditions:
1402 * The caller holds a reference to the task
1403 */
1404kern_return_t
1405task_freeze(
1406 register task_t task,
1407 uint32_t *purgeable_count,
1408 uint32_t *wired_count,
1409 uint32_t *clean_count,
1410 uint32_t *dirty_count,
316670eb 1411 uint32_t dirty_budget,
6d2010ae
A
1412 boolean_t *shared,
1413 boolean_t walk_only)
1414{
316670eb
A
1415 kern_return_t kr;
1416
6d2010ae
A
1417 if (task == TASK_NULL || task == kernel_task)
1418 return (KERN_INVALID_ARGUMENT);
1419
316670eb
A
1420 task_lock(task);
1421
1422 if (task->frozen) {
1423 task_unlock(task);
1424 return (KERN_FAILURE);
1425 }
1426
1427 if (walk_only == FALSE) {
1428 task->frozen = TRUE;
1429 }
1430
1431 task_unlock(task);
1432
6d2010ae 1433 if (walk_only) {
316670eb 1434 kr = vm_map_freeze_walk(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
6d2010ae 1435 } else {
316670eb 1436 kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
6d2010ae
A
1437 }
1438
316670eb 1439 return (kr);
6d2010ae
A
1440}
1441
1442/*
1443 * task_thaw:
1444 *
1445 * Thaw a currently frozen task.
1446 *
1447 * Conditions:
1448 * The caller holds a reference to the task
1449 */
1450kern_return_t
1451task_thaw(
1452 register task_t task)
1453{
316670eb
A
1454 kern_return_t kr;
1455
6d2010ae
A
1456 if (task == TASK_NULL || task == kernel_task)
1457 return (KERN_INVALID_ARGUMENT);
1458
316670eb
A
1459 task_lock(task);
1460
1461 if (!task->frozen) {
1462 task_unlock(task);
1463 return (KERN_FAILURE);
1464 }
1465
1466 task->frozen = FALSE;
6d2010ae 1467
316670eb
A
1468 task_unlock(task);
1469
1470 kr = vm_map_thaw(task->map);
1471
1472 return (kr);
6d2010ae
A
1473}
1474
1475#endif /* CONFIG_FREEZE */
1476
1c79356b
A
1477kern_return_t
1478host_security_set_task_token(
1479 host_security_t host_security,
1480 task_t task,
1481 security_token_t sec_token,
55e303ae 1482 audit_token_t audit_token,
1c79356b
A
1483 host_priv_t host_priv)
1484{
55e303ae 1485 ipc_port_t host_port;
1c79356b
A
1486 kern_return_t kr;
1487
1488 if (task == TASK_NULL)
1489 return(KERN_INVALID_ARGUMENT);
1490
1491 if (host_security == HOST_NULL)
1492 return(KERN_INVALID_SECURITY);
1493
1494 task_lock(task);
1495 task->sec_token = sec_token;
55e303ae 1496 task->audit_token = audit_token;
1c79356b
A
1497 task_unlock(task);
1498
1499 if (host_priv != HOST_PRIV_NULL) {
55e303ae 1500 kr = host_get_host_priv_port(host_priv, &host_port);
1c79356b 1501 } else {
55e303ae 1502 kr = host_get_host_port(host_priv_self(), &host_port);
1c79356b 1503 }
55e303ae
A
1504 assert(kr == KERN_SUCCESS);
1505 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1c79356b
A
1506 return(kr);
1507}
1508
1c79356b
A
1509/*
1510 * This routine was added, pretty much exclusively, for registering the
1511 * RPC glue vector for in-kernel short circuited tasks. Rather than
1512 * removing it completely, I have only disabled that feature (which was
1513 * the only feature at the time). It just appears that we are going to
1514 * want to add some user data to tasks in the future (i.e. bsd info,
1515 * task names, etc...), so I left it in the formal task interface.
1516 */
1517kern_return_t
1518task_set_info(
1519 task_t task,
1520 task_flavor_t flavor,
91447636
A
1521 __unused task_info_t task_info_in, /* pointer to IN array */
1522 __unused mach_msg_type_number_t task_info_count)
1c79356b 1523{
1c79356b
A
1524 if (task == TASK_NULL)
1525 return(KERN_INVALID_ARGUMENT);
1526
1527 switch (flavor) {
1528 default:
1529 return (KERN_INVALID_ARGUMENT);
1530 }
1531 return (KERN_SUCCESS);
1532}
1533
1534kern_return_t
1535task_info(
91447636
A
1536 task_t task,
1537 task_flavor_t flavor,
1538 task_info_t task_info_out,
1c79356b
A
1539 mach_msg_type_number_t *task_info_count)
1540{
b0d623f7
A
1541 kern_return_t error = KERN_SUCCESS;
1542
1c79356b 1543 if (task == TASK_NULL)
91447636 1544 return (KERN_INVALID_ARGUMENT);
1c79356b 1545
b0d623f7
A
1546 task_lock(task);
1547
1548 if ((task != current_task()) && (!task->active)) {
1549 task_unlock(task);
1550 return (KERN_INVALID_ARGUMENT);
1551 }
1552
1c79356b
A
1553 switch (flavor) {
1554
91447636 1555 case TASK_BASIC_INFO_32:
2d21ac55 1556 case TASK_BASIC2_INFO_32:
91447636
A
1557 {
1558 task_basic_info_32_t basic_info;
b0d623f7
A
1559 vm_map_t map;
1560 clock_sec_t secs;
1561 clock_usec_t usecs;
1c79356b 1562
b0d623f7
A
1563 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
1564 error = KERN_INVALID_ARGUMENT;
1565 break;
1566 }
1c79356b 1567
91447636 1568 basic_info = (task_basic_info_32_t)task_info_out;
1c79356b 1569
91447636 1570 map = (task == kernel_task)? kernel_map: task->map;
b0d623f7 1571 basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
2d21ac55
A
1572 if (flavor == TASK_BASIC2_INFO_32) {
1573 /*
1574 * The "BASIC2" flavor gets the maximum resident
1575 * size instead of the current resident size...
1576 */
1577 basic_info->resident_size = pmap_resident_max(map->pmap);
1578 } else {
1579 basic_info->resident_size = pmap_resident_count(map->pmap);
1580 }
1581 basic_info->resident_size *= PAGE_SIZE;
1c79356b 1582
0b4e3aa0
A
1583 basic_info->policy = ((task != kernel_task)?
1584 POLICY_TIMESHARE: POLICY_RR);
1c79356b 1585 basic_info->suspend_count = task->user_stop_count;
91447636 1586
b0d623f7
A
1587 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1588 basic_info->user_time.seconds =
1589 (typeof(basic_info->user_time.seconds))secs;
1590 basic_info->user_time.microseconds = usecs;
1591
1592 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1593 basic_info->system_time.seconds =
1594 (typeof(basic_info->system_time.seconds))secs;
1595 basic_info->system_time.microseconds = usecs;
1c79356b 1596
91447636 1597 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1c79356b 1598 break;
91447636 1599 }
1c79356b 1600
91447636
A
1601 case TASK_BASIC_INFO_64:
1602 {
1603 task_basic_info_64_t basic_info;
b0d623f7
A
1604 vm_map_t map;
1605 clock_sec_t secs;
1606 clock_usec_t usecs;
1c79356b 1607
b0d623f7
A
1608 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
1609 error = KERN_INVALID_ARGUMENT;
1610 break;
1611 }
91447636
A
1612
1613 basic_info = (task_basic_info_64_t)task_info_out;
1614
1615 map = (task == kernel_task)? kernel_map: task->map;
1616 basic_info->virtual_size = map->size;
2d21ac55
A
1617 basic_info->resident_size =
1618 (mach_vm_size_t)(pmap_resident_count(map->pmap))
1619 * PAGE_SIZE_64;
91447636 1620
91447636
A
1621 basic_info->policy = ((task != kernel_task)?
1622 POLICY_TIMESHARE: POLICY_RR);
1623 basic_info->suspend_count = task->user_stop_count;
1624
b0d623f7
A
1625 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1626 basic_info->user_time.seconds =
1627 (typeof(basic_info->user_time.seconds))secs;
1628 basic_info->user_time.microseconds = usecs;
1629
1630 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1631 basic_info->system_time.seconds =
1632 (typeof(basic_info->system_time.seconds))secs;
1633 basic_info->system_time.microseconds = usecs;
91447636
A
1634
1635 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1636 break;
1637 }
1638
316670eb
A
1639 case MACH_TASK_BASIC_INFO:
1640 {
1641 mach_task_basic_info_t basic_info;
1642 vm_map_t map;
1643 clock_sec_t secs;
1644 clock_usec_t usecs;
1645
1646 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
1647 error = KERN_INVALID_ARGUMENT;
1648 break;
1649 }
1650
1651 basic_info = (mach_task_basic_info_t)task_info_out;
1652
1653 map = (task == kernel_task) ? kernel_map : task->map;
1654
1655 basic_info->virtual_size = map->size;
1656
1657 basic_info->resident_size =
1658 (mach_vm_size_t)(pmap_resident_count(map->pmap));
1659 basic_info->resident_size *= PAGE_SIZE_64;
1660
1661 basic_info->resident_size_max =
1662 (mach_vm_size_t)(pmap_resident_max(map->pmap));
1663 basic_info->resident_size_max *= PAGE_SIZE_64;
1664
1665 basic_info->policy = ((task != kernel_task) ?
1666 POLICY_TIMESHARE : POLICY_RR);
1667
1668 basic_info->suspend_count = task->user_stop_count;
1669
1670 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1671 basic_info->user_time.seconds =
1672 (typeof(basic_info->user_time.seconds))secs;
1673 basic_info->user_time.microseconds = usecs;
1674
1675 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1676 basic_info->system_time.seconds =
1677 (typeof(basic_info->system_time.seconds))secs;
1678 basic_info->system_time.microseconds = usecs;
1679
1680 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1681 break;
1682 }
1683
91447636
A
1684 case TASK_THREAD_TIMES_INFO:
1685 {
1686 register task_thread_times_info_t times_info;
1687 register thread_t thread;
1688
b0d623f7
A
1689 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1690 error = KERN_INVALID_ARGUMENT;
1691 break;
1692 }
1c79356b
A
1693
1694 times_info = (task_thread_times_info_t) task_info_out;
1695 times_info->user_time.seconds = 0;
1696 times_info->user_time.microseconds = 0;
1697 times_info->system_time.seconds = 0;
1698 times_info->system_time.microseconds = 0;
1699
1c79356b 1700
91447636
A
1701 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1702 time_value_t user_time, system_time;
1c79356b
A
1703
1704 thread_read_times(thread, &user_time, &system_time);
1705
1c79356b
A
1706 time_value_add(&times_info->user_time, &user_time);
1707 time_value_add(&times_info->system_time, &system_time);
1708 }
91447636 1709
1c79356b
A
1710
1711 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1712 break;
91447636
A
1713 }
1714
1715 case TASK_ABSOLUTETIME_INFO:
1716 {
1717 task_absolutetime_info_t info;
1718 register thread_t thread;
1719
b0d623f7
A
1720 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
1721 error = KERN_INVALID_ARGUMENT;
1722 break;
1723 }
91447636
A
1724
1725 info = (task_absolutetime_info_t)task_info_out;
1726 info->threads_user = info->threads_system = 0;
1727
91447636
A
1728
1729 info->total_user = task->total_user_time;
1730 info->total_system = task->total_system_time;
1731
1732 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1733 uint64_t tval;
316670eb
A
1734 spl_t x;
1735
1736 x = splsched();
1737 thread_lock(thread);
91447636
A
1738
1739 tval = timer_grab(&thread->user_timer);
1740 info->threads_user += tval;
1741 info->total_user += tval;
1742
1743 tval = timer_grab(&thread->system_timer);
316670eb
A
1744 if (thread->precise_user_kernel_time) {
1745 info->threads_system += tval;
1746 info->total_system += tval;
1747 } else {
1748 /* system_timer may represent either sys or user */
1749 info->threads_user += tval;
1750 info->total_user += tval;
1751 }
1752
1753 thread_unlock(thread);
1754 splx(x);
91447636
A
1755 }
1756
91447636
A
1757
1758 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1759 break;
1760 }
1c79356b 1761
b0d623f7
A
1762 case TASK_DYLD_INFO:
1763 {
1764 task_dyld_info_t info;
1765
6d2010ae
A
1766 /*
1767 * We added the format field to TASK_DYLD_INFO output. For
1768 * temporary backward compatibility, accept the fact that
1769 * clients may ask for the old version - distinquished by the
1770 * size of the expected result structure.
1771 */
1772#define TASK_LEGACY_DYLD_INFO_COUNT \
1773 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
1774
1775 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
b0d623f7
A
1776 error = KERN_INVALID_ARGUMENT;
1777 break;
1778 }
6d2010ae 1779
b0d623f7
A
1780 info = (task_dyld_info_t)task_info_out;
1781 info->all_image_info_addr = task->all_image_info_addr;
1782 info->all_image_info_size = task->all_image_info_size;
6d2010ae
A
1783
1784 /* only set format on output for those expecting it */
1785 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
1786 info->all_image_info_format = task_has_64BitAddr(task) ?
1787 TASK_DYLD_ALL_IMAGE_INFO_64 :
1788 TASK_DYLD_ALL_IMAGE_INFO_32 ;
1789 *task_info_count = TASK_DYLD_INFO_COUNT;
1790 } else {
1791 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
1792 }
b0d623f7
A
1793 break;
1794 }
1795
6d2010ae
A
1796 case TASK_EXTMOD_INFO:
1797 {
1798 task_extmod_info_t info;
1799 void *p;
1800
1801 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
1802 error = KERN_INVALID_ARGUMENT;
1803 break;
1804 }
1805
1806 info = (task_extmod_info_t)task_info_out;
1807
1808 p = get_bsdtask_info(task);
1809 if (p) {
1810 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
1811 } else {
1812 bzero(info->task_uuid, sizeof(info->task_uuid));
1813 }
1814 info->extmod_statistics = task->extmod_statistics;
1815 *task_info_count = TASK_EXTMOD_INFO_COUNT;
1816
1817 break;
1818 }
1819
1820 case TASK_KERNELMEMORY_INFO:
1821 {
1822 task_kernelmemory_info_t tkm_info;
316670eb 1823 ledger_amount_t credit, debit;
6d2010ae
A
1824
1825 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
1826 error = KERN_INVALID_ARGUMENT;
1827 break;
1828 }
1829
1830 tkm_info = (task_kernelmemory_info_t) task_info_out;
316670eb
A
1831 tkm_info->total_palloc = 0;
1832 tkm_info->total_pfree = 0;
1833 tkm_info->total_salloc = 0;
1834 tkm_info->total_sfree = 0;
6d2010ae
A
1835
1836 if (task == kernel_task) {
1837 /*
1838 * All shared allocs/frees from other tasks count against
1839 * the kernel private memory usage. If we are looking up
1840 * info for the kernel task, gather from everywhere.
1841 */
1842 task_unlock(task);
1843
1844 /* start by accounting for all the terminated tasks against the kernel */
1845 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
1846 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
6d2010ae
A
1847
1848 /* count all other task/thread shared alloc/free against the kernel */
1849 lck_mtx_lock(&tasks_threads_lock);
316670eb
A
1850
1851 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
6d2010ae
A
1852 queue_iterate(&tasks, task, task_t, tasks) {
1853 if (task == kernel_task) {
316670eb
A
1854 if (ledger_get_entries(task->ledger,
1855 task_ledgers.tkm_private, &credit,
1856 &debit) == KERN_SUCCESS) {
1857 tkm_info->total_palloc += credit;
1858 tkm_info->total_pfree += debit;
1859 }
6d2010ae 1860 }
316670eb
A
1861 if (!ledger_get_entries(task->ledger,
1862 task_ledgers.tkm_shared, &credit, &debit)) {
1863 tkm_info->total_palloc += credit;
1864 tkm_info->total_pfree += debit;
6d2010ae 1865 }
6d2010ae
A
1866 }
1867 lck_mtx_unlock(&tasks_threads_lock);
1868 } else {
316670eb
A
1869 if (!ledger_get_entries(task->ledger,
1870 task_ledgers.tkm_private, &credit, &debit)) {
1871 tkm_info->total_palloc = credit;
1872 tkm_info->total_pfree = debit;
1873 }
1874 if (!ledger_get_entries(task->ledger,
1875 task_ledgers.tkm_shared, &credit, &debit)) {
1876 tkm_info->total_salloc = credit;
1877 tkm_info->total_sfree = debit;
6d2010ae
A
1878 }
1879 task_unlock(task);
1880 }
1881
1882 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
1883 return KERN_SUCCESS;
1884 }
1885
91447636
A
1886 /* OBSOLETE */
1887 case TASK_SCHED_FIFO_INFO:
1888 {
1c79356b 1889
b0d623f7
A
1890 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
1891 error = KERN_INVALID_ARGUMENT;
1892 break;
1893 }
1c79356b 1894
b0d623f7 1895 error = KERN_INVALID_POLICY;
6d2010ae 1896 break;
91447636 1897 }
1c79356b 1898
91447636
A
1899 /* OBSOLETE */
1900 case TASK_SCHED_RR_INFO:
1901 {
1c79356b 1902 register policy_rr_base_t rr_base;
6d2010ae
A
1903 uint32_t quantum_time;
1904 uint64_t quantum_ns;
1c79356b 1905
b0d623f7
A
1906 if (*task_info_count < POLICY_RR_BASE_COUNT) {
1907 error = KERN_INVALID_ARGUMENT;
1908 break;
1909 }
1c79356b
A
1910
1911 rr_base = (policy_rr_base_t) task_info_out;
1912
0b4e3aa0 1913 if (task != kernel_task) {
b0d623f7
A
1914 error = KERN_INVALID_POLICY;
1915 break;
1c79356b
A
1916 }
1917
1918 rr_base->base_priority = task->priority;
1c79356b 1919
6d2010ae
A
1920 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
1921 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
1922
1923 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
1c79356b
A
1924
1925 *task_info_count = POLICY_RR_BASE_COUNT;
1926 break;
91447636 1927 }
1c79356b 1928
91447636
A
1929 /* OBSOLETE */
1930 case TASK_SCHED_TIMESHARE_INFO:
1931 {
1c79356b
A
1932 register policy_timeshare_base_t ts_base;
1933
b0d623f7
A
1934 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
1935 error = KERN_INVALID_ARGUMENT;
1936 break;
1937 }
1c79356b
A
1938
1939 ts_base = (policy_timeshare_base_t) task_info_out;
1940
0b4e3aa0 1941 if (task == kernel_task) {
b0d623f7
A
1942 error = KERN_INVALID_POLICY;
1943 break;
1c79356b
A
1944 }
1945
1946 ts_base->base_priority = task->priority;
1c79356b
A
1947
1948 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1949 break;
91447636 1950 }
1c79356b 1951
91447636
A
1952 case TASK_SECURITY_TOKEN:
1953 {
1954 register security_token_t *sec_token_p;
1c79356b 1955
b0d623f7
A
1956 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1957 error = KERN_INVALID_ARGUMENT;
1958 break;
1959 }
1c79356b
A
1960
1961 sec_token_p = (security_token_t *) task_info_out;
1962
1c79356b 1963 *sec_token_p = task->sec_token;
1c79356b
A
1964
1965 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
91447636
A
1966 break;
1967 }
1c79356b 1968
91447636
A
1969 case TASK_AUDIT_TOKEN:
1970 {
1971 register audit_token_t *audit_token_p;
55e303ae 1972
b0d623f7
A
1973 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
1974 error = KERN_INVALID_ARGUMENT;
1975 break;
1976 }
55e303ae
A
1977
1978 audit_token_p = (audit_token_t *) task_info_out;
1979
55e303ae 1980 *audit_token_p = task->audit_token;
55e303ae
A
1981
1982 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
91447636
A
1983 break;
1984 }
55e303ae 1985
91447636 1986 case TASK_SCHED_INFO:
b0d623f7 1987 error = KERN_INVALID_ARGUMENT;
6d2010ae 1988 break;
1c79356b 1989
91447636
A
1990 case TASK_EVENTS_INFO:
1991 {
1c79356b 1992 register task_events_info_t events_info;
2d21ac55 1993 register thread_t thread;
1c79356b 1994
b0d623f7
A
1995 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1996 error = KERN_INVALID_ARGUMENT;
1997 break;
1998 }
1c79356b
A
1999
2000 events_info = (task_events_info_t) task_info_out;
2001
2d21ac55 2002
1c79356b
A
2003 events_info->faults = task->faults;
2004 events_info->pageins = task->pageins;
2005 events_info->cow_faults = task->cow_faults;
2006 events_info->messages_sent = task->messages_sent;
2007 events_info->messages_received = task->messages_received;
2008 events_info->syscalls_mach = task->syscalls_mach;
2009 events_info->syscalls_unix = task->syscalls_unix;
2d21ac55
A
2010
2011 events_info->csw = task->c_switch;
2012
2013 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6d2010ae
A
2014 events_info->csw += thread->c_switch;
2015 events_info->syscalls_mach += thread->syscalls_mach;
2016 events_info->syscalls_unix += thread->syscalls_unix;
2d21ac55
A
2017 }
2018
1c79356b
A
2019
2020 *task_info_count = TASK_EVENTS_INFO_COUNT;
2021 break;
91447636 2022 }
2d21ac55
A
2023 case TASK_AFFINITY_TAG_INFO:
2024 {
b0d623f7
A
2025 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
2026 error = KERN_INVALID_ARGUMENT;
2027 break;
2028 }
2d21ac55 2029
b0d623f7 2030 error = task_affinity_info(task, task_info_out, task_info_count);
6d2010ae 2031 break;
2d21ac55 2032 }
91447636 2033 default:
b0d623f7 2034 error = KERN_INVALID_ARGUMENT;
1c79356b
A
2035 }
2036
b0d623f7
A
2037 task_unlock(task);
2038 return (error);
1c79356b
A
2039}
2040
2d21ac55
A
2041void
2042task_vtimer_set(
2043 task_t task,
2044 integer_t which)
2045{
2046 thread_t thread;
316670eb 2047 spl_t x;
2d21ac55
A
2048
2049 /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
2050
2051 task_lock(task);
2052
2053 task->vtimers |= which;
2054
2055 switch (which) {
2056
2057 case TASK_VTIMER_USER:
2058 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
2059 x = splsched();
2060 thread_lock(thread);
2061 if (thread->precise_user_kernel_time)
2062 thread->vtimer_user_save = timer_grab(&thread->user_timer);
2063 else
2064 thread->vtimer_user_save = timer_grab(&thread->system_timer);
2065 thread_unlock(thread);
2066 splx(x);
2d21ac55
A
2067 }
2068 break;
2069
2070 case TASK_VTIMER_PROF:
2071 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
2072 x = splsched();
2073 thread_lock(thread);
2d21ac55
A
2074 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
2075 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
316670eb
A
2076 thread_unlock(thread);
2077 splx(x);
2d21ac55
A
2078 }
2079 break;
2080
2081 case TASK_VTIMER_RLIM:
2082 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
2083 x = splsched();
2084 thread_lock(thread);
2d21ac55
A
2085 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
2086 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
316670eb
A
2087 thread_unlock(thread);
2088 splx(x);
2d21ac55
A
2089 }
2090 break;
2091 }
2092
2093 task_unlock(task);
2094}
2095
2096void
2097task_vtimer_clear(
2098 task_t task,
2099 integer_t which)
2100{
2101 assert(task == current_task());
2102
2103 task_lock(task);
2104
2105 task->vtimers &= ~which;
2106
2107 task_unlock(task);
2108}
2109
2110void
2111task_vtimer_update(
2112__unused
2113 task_t task,
2114 integer_t which,
2115 uint32_t *microsecs)
2116{
2117 thread_t thread = current_thread();
b0d623f7
A
2118 uint32_t tdelt;
2119 clock_sec_t secs;
2d21ac55
A
2120 uint64_t tsum;
2121
2122 assert(task == current_task());
2123
2124 assert(task->vtimers & which);
2125
b0d623f7 2126 secs = tdelt = 0;
2d21ac55
A
2127
2128 switch (which) {
2129
2130 case TASK_VTIMER_USER:
316670eb
A
2131 if (thread->precise_user_kernel_time) {
2132 tdelt = (uint32_t)timer_delta(&thread->user_timer,
2133 &thread->vtimer_user_save);
2134 } else {
2135 tdelt = (uint32_t)timer_delta(&thread->system_timer,
2d21ac55 2136 &thread->vtimer_user_save);
316670eb 2137 }
b0d623f7 2138 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
2139 break;
2140
2141 case TASK_VTIMER_PROF:
2142 tsum = timer_grab(&thread->user_timer);
2143 tsum += timer_grab(&thread->system_timer);
b0d623f7
A
2144 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
2145 absolutetime_to_microtime(tdelt, &secs, microsecs);
2146 /* if the time delta is smaller than a usec, ignore */
2147 if (*microsecs != 0)
2148 thread->vtimer_prof_save = tsum;
2d21ac55
A
2149 break;
2150
2151 case TASK_VTIMER_RLIM:
2152 tsum = timer_grab(&thread->user_timer);
2153 tsum += timer_grab(&thread->system_timer);
b0d623f7 2154 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
2d21ac55 2155 thread->vtimer_rlim_save = tsum;
b0d623f7 2156 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
2157 break;
2158 }
2159
2d21ac55
A
2160}
2161
1c79356b
A
2162/*
2163 * task_assign:
2164 *
2165 * Change the assigned processor set for the task
2166 */
2167kern_return_t
2168task_assign(
91447636
A
2169 __unused task_t task,
2170 __unused processor_set_t new_pset,
2171 __unused boolean_t assign_threads)
1c79356b 2172{
1c79356b
A
2173 return(KERN_FAILURE);
2174}
2175
2176/*
2177 * task_assign_default:
2178 *
2179 * Version of task_assign to assign to default processor set.
2180 */
2181kern_return_t
2182task_assign_default(
2183 task_t task,
2184 boolean_t assign_threads)
2185{
2d21ac55 2186 return (task_assign(task, &pset0, assign_threads));
1c79356b
A
2187}
2188
2189/*
2190 * task_get_assignment
2191 *
2192 * Return name of processor set that task is assigned to.
2193 */
2194kern_return_t
2195task_get_assignment(
2196 task_t task,
2197 processor_set_t *pset)
2198{
2199 if (!task->active)
2200 return(KERN_FAILURE);
2201
2d21ac55
A
2202 *pset = &pset0;
2203
2204 return (KERN_SUCCESS);
1c79356b
A
2205}
2206
2207
2208/*
2209 * task_policy
2210 *
2211 * Set scheduling policy and parameters, both base and limit, for
2212 * the given task. Policy must be a policy which is enabled for the
2213 * processor set. Change contained threads if requested.
2214 */
2215kern_return_t
2216task_policy(
91447636
A
2217 __unused task_t task,
2218 __unused policy_t policy_id,
2219 __unused policy_base_t base,
2220 __unused mach_msg_type_number_t count,
2221 __unused boolean_t set_limit,
2222 __unused boolean_t change)
1c79356b
A
2223{
2224 return(KERN_FAILURE);
2225}
2226
2227/*
2228 * task_set_policy
2229 *
2230 * Set scheduling policy and parameters, both base and limit, for
2231 * the given task. Policy can be any policy implemented by the
2232 * processor set, whether enabled or not. Change contained threads
2233 * if requested.
2234 */
2235kern_return_t
2236task_set_policy(
91447636
A
2237 __unused task_t task,
2238 __unused processor_set_t pset,
2239 __unused policy_t policy_id,
2240 __unused policy_base_t base,
2241 __unused mach_msg_type_number_t base_count,
2242 __unused policy_limit_t limit,
2243 __unused mach_msg_type_number_t limit_count,
2244 __unused boolean_t change)
1c79356b
A
2245{
2246 return(KERN_FAILURE);
2247}
2248
91447636 2249#if FAST_TAS
1c79356b
A
2250kern_return_t
2251task_set_ras_pc(
2252 task_t task,
2253 vm_offset_t pc,
2254 vm_offset_t endpc)
2255{
1c79356b
A
2256 extern int fast_tas_debug;
2257
2258 if (fast_tas_debug) {
2259 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
2260 task, pc, endpc);
2261 }
2262 task_lock(task);
2263 task->fast_tas_base = pc;
2264 task->fast_tas_end = endpc;
2265 task_unlock(task);
2266 return KERN_SUCCESS;
91447636 2267}
1c79356b 2268#else /* FAST_TAS */
91447636
A
2269kern_return_t
2270task_set_ras_pc(
2271 __unused task_t task,
2272 __unused vm_offset_t pc,
2273 __unused vm_offset_t endpc)
2274{
1c79356b 2275 return KERN_FAILURE;
1c79356b 2276}
91447636 2277#endif /* FAST_TAS */
1c79356b
A
2278
2279void
2280task_synchronizer_destroy_all(task_t task)
2281{
2282 semaphore_t semaphore;
2283 lock_set_t lock_set;
2284
2285 /*
2286 * Destroy owned semaphores
2287 */
2288
2289 while (!queue_empty(&task->semaphore_list)) {
2290 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
2291 (void) semaphore_destroy(task, semaphore);
2292 }
2293
2294 /*
2295 * Destroy owned lock sets
2296 */
2297
2298 while (!queue_empty(&task->lock_set_list)) {
2299 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
2300 (void) lock_set_destroy(task, lock_set);
2301 }
2302}
2303
b0d623f7
A
2304/*
2305 * Install default (machine-dependent) initial thread state
2306 * on the task. Subsequent thread creation will have this initial
2307 * state set on the thread by machine_thread_inherit_taskwide().
2308 * Flavors and structures are exactly the same as those to thread_set_state()
2309 */
2310kern_return_t
2311task_set_state(
2312 task_t task,
2313 int flavor,
2314 thread_state_t state,
2315 mach_msg_type_number_t state_count)
2316{
2317 kern_return_t ret;
2318
2319 if (task == TASK_NULL) {
2320 return (KERN_INVALID_ARGUMENT);
2321 }
2322
2323 task_lock(task);
2324
2325 if (!task->active) {
2326 task_unlock(task);
2327 return (KERN_FAILURE);
2328 }
2329
2330 ret = machine_task_set_state(task, flavor, state, state_count);
2331
2332 task_unlock(task);
2333 return ret;
2334}
2335
2336/*
2337 * Examine the default (machine-dependent) initial thread state
2338 * on the task, as set by task_set_state(). Flavors and structures
2339 * are exactly the same as those passed to thread_get_state().
2340 */
2341kern_return_t
2342task_get_state(
2343 task_t task,
2344 int flavor,
2345 thread_state_t state,
2346 mach_msg_type_number_t *state_count)
2347{
2348 kern_return_t ret;
2349
2350 if (task == TASK_NULL) {
2351 return (KERN_INVALID_ARGUMENT);
2352 }
2353
2354 task_lock(task);
2355
2356 if (!task->active) {
2357 task_unlock(task);
2358 return (KERN_FAILURE);
2359 }
2360
2361 ret = machine_task_get_state(task, flavor, state, state_count);
2362
2363 task_unlock(task);
2364 return ret;
2365}
2366
2367
1c79356b
A
2368/*
2369 * We need to export some functions to other components that
2370 * are currently implemented in macros within the osfmk
2371 * component. Just export them as functions of the same name.
2372 */
2373boolean_t is_kerneltask(task_t t)
2374{
2375 if (t == kernel_task)
55e303ae
A
2376 return (TRUE);
2377
2378 return (FALSE);
1c79356b
A
2379}
2380
b0d623f7
A
2381int
2382check_for_tasksuspend(task_t task)
2383{
2384
2385 if (task == TASK_NULL)
2386 return (0);
2387
2388 return (task->suspend_count > 0);
2389}
2390
1c79356b 2391#undef current_task
91447636
A
2392task_t current_task(void);
2393task_t current_task(void)
1c79356b
A
2394{
2395 return (current_task_fast());
2396}
91447636
A
2397
2398#undef task_reference
2399void task_reference(task_t task);
2400void
2401task_reference(
2402 task_t task)
2403{
2404 if (task != TASK_NULL)
2405 task_reference_internal(task);
2406}
2d21ac55 2407
6d2010ae
A
2408/*
2409 * This routine is called always with task lock held.
2410 * And it returns a thread handle without reference as the caller
2411 * operates on it under the task lock held.
2412 */
2413thread_t
2414task_findtid(task_t task, uint64_t tid)
2415{
2416 thread_t thread= THREAD_NULL;
2417
2418 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2419 if (thread->thread_id == tid)
316670eb 2420 return(thread);
6d2010ae 2421 }
316670eb 2422 return(THREAD_NULL);
6d2010ae
A
2423}
2424
2425
2d21ac55
A
2426#if CONFIG_MACF_MACH
2427/*
2428 * Protect 2 task labels against modification by adding a reference on
2429 * both label handles. The locks do not actually have to be held while
2430 * using the labels as only labels with one reference can be modified
2431 * in place.
2432 */
2433
2434void
2435tasklabel_lock2(
2436 task_t a,
2437 task_t b)
2438{
2439 labelh_reference(a->label);
2440 labelh_reference(b->label);
2441}
2442
2443void
2444tasklabel_unlock2(
2445 task_t a,
2446 task_t b)
2447{
2448 labelh_release(a->label);
2449 labelh_release(b->label);
2450}
2451
2452void
2453mac_task_label_update_internal(
2454 struct label *pl,
2455 struct task *task)
2456{
2457
2458 tasklabel_lock(task);
2459 task->label = labelh_modify(task->label);
2460 mac_task_label_update(pl, &task->maclabel);
2461 tasklabel_unlock(task);
2462 ip_lock(task->itk_self);
2463 mac_port_label_update_cred(pl, &task->itk_self->ip_label);
2464 ip_unlock(task->itk_self);
2465}
2466
2467void
2468mac_task_label_modify(
2469 struct task *task,
2470 void *arg,
2471 void (*f) (struct label *l, void *arg))
2472{
2473
2474 tasklabel_lock(task);
2475 task->label = labelh_modify(task->label);
2476 (*f)(&task->maclabel, arg);
2477 tasklabel_unlock(task);
2478}
2479
2480struct label *
2481mac_task_get_label(struct task *task)
2482{
2483 return (&task->maclabel);
2484}
2485#endif