]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/task.c
xnu-2050.18.24.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
CommitLineData
1c79356b 1/*
316670eb 2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63/*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
2d21ac55
A
81/*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
1c79356b 88
1c79356b 89#include <fast_tas.h>
1c79356b
A
90#include <platforms.h>
91
91447636 92#include <mach/mach_types.h>
1c79356b 93#include <mach/boolean.h>
91447636 94#include <mach/host_priv.h>
1c79356b
A
95#include <mach/machine/vm_types.h>
96#include <mach/vm_param.h>
97#include <mach/semaphore.h>
98#include <mach/task_info.h>
99#include <mach/task_special_ports.h>
91447636
A
100
101#include <ipc/ipc_types.h>
1c79356b
A
102#include <ipc/ipc_space.h>
103#include <ipc/ipc_entry.h>
91447636
A
104
105#include <kern/kern_types.h>
1c79356b
A
106#include <kern/mach_param.h>
107#include <kern/misc_protos.h>
108#include <kern/task.h>
109#include <kern/thread.h>
110#include <kern/zalloc.h>
111#include <kern/kalloc.h>
112#include <kern/processor.h>
113#include <kern/sched_prim.h> /* for thread_wakeup */
1c79356b 114#include <kern/ipc_tt.h>
1c79356b 115#include <kern/host.h>
91447636
A
116#include <kern/clock.h>
117#include <kern/timer.h>
1c79356b
A
118#include <kern/assert.h>
119#include <kern/sync_lock.h>
2d21ac55 120#include <kern/affinity.h>
91447636
A
121
122#include <vm/pmap.h>
123#include <vm/vm_map.h>
124#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
125#include <vm/vm_pageout.h>
2d21ac55 126#include <vm/vm_protos.h>
91447636 127
1c79356b
A
128/*
129 * Exported interfaces
130 */
131
132#include <mach/task_server.h>
133#include <mach/mach_host_server.h>
134#include <mach/host_security_server.h>
91447636 135#include <mach/mach_port_server.h>
2d21ac55 136#include <mach/security_server.h>
91447636 137
2d21ac55
A
138#include <vm/vm_shared_region.h>
139
140#if CONFIG_MACF_MACH
141#include <security/mac_mach_internal.h>
142#endif
1c79356b 143
b0d623f7
A
144#if CONFIG_COUNTERS
145#include <pmc/pmc.h>
146#endif /* CONFIG_COUNTERS */
147
148task_t kernel_task;
149zone_t task_zone;
150lck_attr_t task_lck_attr;
151lck_grp_t task_lck_grp;
152lck_grp_attr_t task_lck_grp_attr;
316670eb
A
153#if CONFIG_EMBEDDED
154lck_mtx_t task_watch_mtx;
155#endif /* CONFIG_EMBEDDED */
b0d623f7 156
6d2010ae
A
157zinfo_usage_store_t tasks_tkm_private;
158zinfo_usage_store_t tasks_tkm_shared;
159
316670eb
A
160static ledger_template_t task_ledger_template = NULL;
161struct _task_ledger_indices task_ledgers = {-1, -1, -1, -1, -1};
162void init_task_ledgers(void);
163
164
b0d623f7 165int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
1c79356b 166
6d2010ae
A
167/* externs for BSD kernel */
168extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
169
1c79356b
A
170/* Forwards */
171
172void task_hold_locked(
173 task_t task);
174void task_wait_locked(
316670eb
A
175 task_t task,
176 boolean_t until_not_runnable);
1c79356b
A
177void task_release_locked(
178 task_t task);
1c79356b
A
179void task_free(
180 task_t task );
181void task_synchronizer_destroy_all(
182 task_t task);
1c79356b 183
b0d623f7
A
184int check_for_tasksuspend(
185 task_t task);
186
55e303ae
A
187void
188task_backing_store_privileged(
189 task_t task)
190{
191 task_lock(task);
192 task->priv_flags |= VM_BACKING_STORE_PRIV;
193 task_unlock(task);
194 return;
195}
196
91447636
A
197
198void
199task_set_64bit(
200 task_t task,
201 boolean_t is64bit)
202{
b0d623f7 203#if defined(__i386__) || defined(__x86_64__)
2d21ac55
A
204 thread_t thread;
205#endif /* __i386__ */
206 int vm_flags = 0;
0c530ab8
A
207
208 if (is64bit) {
2d21ac55
A
209 if (task_has_64BitAddr(task))
210 return;
0c530ab8 211
91447636 212 task_set_64BitAddr(task);
91447636 213 } else {
2d21ac55
A
214 if ( !task_has_64BitAddr(task))
215 return;
0c530ab8 216
91447636
A
217 /*
218 * Deallocate all memory previously allocated
219 * above the 32-bit address space, since it won't
220 * be accessible anymore.
221 */
2d21ac55
A
222 /* remove regular VM map entries & pmap mappings */
223 (void) vm_map_remove(task->map,
224 (vm_map_offset_t) VM_MAX_ADDRESS,
225 MACH_VM_MAX_ADDRESS,
226 0);
2d21ac55 227 /* remove the higher VM mappings */
91447636 228 (void) vm_map_remove(task->map,
91447636 229 MACH_VM_MAX_ADDRESS,
2d21ac55
A
230 0xFFFFFFFFFFFFF000ULL,
231 vm_flags);
91447636 232 task_clear_64BitAddr(task);
91447636 233 }
0c530ab8
A
234 /* FIXME: On x86, the thread save state flavor can diverge from the
235 * task's 64-bit feature flag due to the 32-bit/64-bit register save
236 * state dichotomy. Since we can be pre-empted in this interval,
237 * certain routines may observe the thread as being in an inconsistent
238 * state with respect to its task's 64-bitness.
239 */
b0d623f7
A
240#if defined(__i386__) || defined(__x86_64__)
241 task_lock(task);
0c530ab8 242 queue_iterate(&task->threads, thread, thread_t, task_threads) {
b0d623f7 243 thread_mtx_lock(thread);
2d21ac55 244 machine_thread_switch_addrmode(thread);
b0d623f7 245 thread_mtx_unlock(thread);
0c530ab8 246 }
b0d623f7 247 task_unlock(task);
2d21ac55 248#endif /* __i386__ */
91447636
A
249}
250
b0d623f7
A
251
252void
253task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size)
254{
255 task_lock(task);
256 task->all_image_info_addr = addr;
257 task->all_image_info_size = size;
258 task_unlock(task);
259}
260
1c79356b
A
261void
262task_init(void)
263{
b0d623f7
A
264
265 lck_grp_attr_setdefault(&task_lck_grp_attr);
266 lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
267 lck_attr_setdefault(&task_lck_attr);
268 lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
316670eb
A
269#if CONFIG_EMBEDDED
270 lck_mtx_init(&task_watch_mtx, &task_lck_grp, &task_lck_attr);
271#endif /* CONFIG_EMBEDDED */
b0d623f7 272
1c79356b
A
273 task_zone = zinit(
274 sizeof(struct task),
b0d623f7 275 task_max * sizeof(struct task),
1c79356b
A
276 TASK_CHUNK * sizeof(struct task),
277 "tasks");
6d2010ae 278
0b4c1975 279 zone_change(task_zone, Z_NOENCRYPT, TRUE);
1c79356b 280
316670eb
A
281 init_task_ledgers();
282
1c79356b
A
283 /*
284 * Create the kernel task as the first task.
1c79356b 285 */
b0d623f7
A
286#ifdef __LP64__
287 if (task_create_internal(TASK_NULL, FALSE, TRUE, &kernel_task) != KERN_SUCCESS)
288#else
0c530ab8 289 if (task_create_internal(TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS)
b0d623f7 290#endif
1c79356b 291 panic("task_init\n");
55e303ae 292
1c79356b
A
293 vm_map_deallocate(kernel_task->map);
294 kernel_task->map = kernel_map;
316670eb 295
1c79356b
A
296}
297
1c79356b
A
298/*
299 * Create a task running in the kernel address space. It may
300 * have its own map of size mem_size and may have ipc privileges.
301 */
302kern_return_t
303kernel_task_create(
91447636
A
304 __unused task_t parent_task,
305 __unused vm_offset_t map_base,
306 __unused vm_size_t map_size,
307 __unused task_t *child_task)
1c79356b 308{
55e303ae 309 return (KERN_INVALID_ARGUMENT);
1c79356b
A
310}
311
312kern_return_t
313task_create(
2d21ac55 314 task_t parent_task,
91447636 315 __unused ledger_port_array_t ledger_ports,
2d21ac55
A
316 __unused mach_msg_type_number_t num_ledger_ports,
317 __unused boolean_t inherit_memory,
318 __unused task_t *child_task) /* OUT */
1c79356b
A
319{
320 if (parent_task == TASK_NULL)
321 return(KERN_INVALID_ARGUMENT);
322
2d21ac55
A
323 /*
324 * No longer supported: too many calls assume that a task has a valid
325 * process attached.
326 */
327 return(KERN_FAILURE);
1c79356b
A
328}
329
330kern_return_t
331host_security_create_task_token(
91447636 332 host_security_t host_security,
2d21ac55
A
333 task_t parent_task,
334 __unused security_token_t sec_token,
335 __unused audit_token_t audit_token,
336 __unused host_priv_t host_priv,
91447636
A
337 __unused ledger_port_array_t ledger_ports,
338 __unused mach_msg_type_number_t num_ledger_ports,
2d21ac55
A
339 __unused boolean_t inherit_memory,
340 __unused task_t *child_task) /* OUT */
1c79356b 341{
1c79356b
A
342 if (parent_task == TASK_NULL)
343 return(KERN_INVALID_ARGUMENT);
344
345 if (host_security == HOST_NULL)
346 return(KERN_INVALID_SECURITY);
347
2d21ac55
A
348 /*
349 * No longer supported.
350 */
351 return(KERN_FAILURE);
1c79356b
A
352}
353
316670eb
A
354void
355init_task_ledgers(void)
356{
357 ledger_template_t t;
358
359 assert(task_ledger_template == NULL);
360 assert(kernel_task == TASK_NULL);
361
362 if ((t = ledger_template_create("Per-task ledger")) == NULL)
363 panic("couldn't create task ledger template");
364
365 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
366 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
367 "physmem", "bytes");
368 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
369 "bytes");
370 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
371 "bytes");
372 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
373 "bytes");
374
375 if ((task_ledgers.cpu_time < 0) || (task_ledgers.tkm_private < 0) ||
376 (task_ledgers.tkm_shared < 0) || (task_ledgers.phys_mem < 0) ||
377 (task_ledgers.wired_mem < 0)) {
378 panic("couldn't create entries for task ledger template");
379 }
380
381 task_ledger_template = t;
382}
383
1c79356b 384kern_return_t
55e303ae 385task_create_internal(
1c79356b
A
386 task_t parent_task,
387 boolean_t inherit_memory,
0c530ab8 388 boolean_t is_64bit,
1c79356b
A
389 task_t *child_task) /* OUT */
390{
2d21ac55
A
391 task_t new_task;
392 vm_shared_region_t shared_region;
316670eb 393 ledger_t ledger = NULL;
1c79356b
A
394
395 new_task = (task_t) zalloc(task_zone);
396
397 if (new_task == TASK_NULL)
398 return(KERN_RESOURCE_SHORTAGE);
399
400 /* one ref for just being alive; one for our caller */
401 new_task->ref_count = 2;
402
316670eb
A
403 /* allocate with active entries */
404 assert(task_ledger_template != NULL);
405 if ((ledger = ledger_instantiate(task_ledger_template,
406 LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
407 zfree(task_zone, new_task);
408 return(KERN_RESOURCE_SHORTAGE);
409 }
410 new_task->ledger = ledger;
411
b0d623f7 412 /* if inherit_memory is true, parent_task MUST not be NULL */
1c79356b 413 if (inherit_memory)
316670eb 414 new_task->map = vm_map_fork(ledger, parent_task->map);
1c79356b 415 else
316670eb
A
416 new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit),
417 (vm_map_offset_t)(VM_MIN_ADDRESS),
418 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
1c79356b 419
2d21ac55
A
420 /* Inherit memlock limit from parent */
421 if (parent_task)
b0d623f7 422 vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
2d21ac55 423
b0d623f7 424 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
55e303ae 425 queue_init(&new_task->threads);
1c79356b 426 new_task->suspend_count = 0;
55e303ae 427 new_task->thread_count = 0;
55e303ae 428 new_task->active_thread_count = 0;
1c79356b 429 new_task->user_stop_count = 0;
0b4e3aa0 430 new_task->role = TASK_UNSPECIFIED;
1c79356b 431 new_task->active = TRUE;
b0d623f7 432 new_task->halting = FALSE;
2d21ac55 433 new_task->user_data = NULL;
1c79356b
A
434 new_task->faults = 0;
435 new_task->cow_faults = 0;
436 new_task->pageins = 0;
437 new_task->messages_sent = 0;
438 new_task->messages_received = 0;
439 new_task->syscalls_mach = 0;
55e303ae 440 new_task->priv_flags = 0;
1c79356b 441 new_task->syscalls_unix=0;
2d21ac55 442 new_task->c_switch = new_task->p_switch = new_task->ps_switch = 0;
55e303ae
A
443 new_task->taskFeatures[0] = 0; /* Init task features */
444 new_task->taskFeatures[1] = 0; /* Init task features */
1c79356b 445
6d2010ae
A
446 zinfo_task_init(new_task);
447
1c79356b 448#ifdef MACH_BSD
2d21ac55 449 new_task->bsd_info = NULL;
1c79356b
A
450#endif /* MACH_BSD */
451
b0d623f7 452#if defined(__i386__) || defined(__x86_64__)
0c530ab8 453 new_task->i386_ldt = 0;
b0d623f7 454 new_task->task_debug = NULL;
0c530ab8
A
455#endif
456
55e303ae 457
1c79356b
A
458 queue_init(&new_task->semaphore_list);
459 queue_init(&new_task->lock_set_list);
460 new_task->semaphores_owned = 0;
461 new_task->lock_sets_owned = 0;
462
2d21ac55 463#if CONFIG_MACF_MACH
2d21ac55
A
464 new_task->label = labelh_new(1);
465 mac_task_label_init (&new_task->maclabel);
466#endif
1c79356b
A
467
468 ipc_task_init(new_task, parent_task);
469
91447636
A
470 new_task->total_user_time = 0;
471 new_task->total_system_time = 0;
1c79356b 472
2d21ac55 473 new_task->vtimers = 0;
1c79356b 474
2d21ac55
A
475 new_task->shared_region = NULL;
476
477 new_task->affinity_space = NULL;
1c79356b 478
b0d623f7
A
479#if CONFIG_COUNTERS
480 new_task->t_chud = 0U;
481#endif
482
316670eb
A
483 new_task->pidsuspended = FALSE;
484 new_task->frozen = FALSE;
485 new_task->rusage_cpu_flags = 0;
486 new_task->rusage_cpu_percentage = 0;
487 new_task->rusage_cpu_interval = 0;
488 new_task->rusage_cpu_deadline = 0;
489 new_task->rusage_cpu_callt = NULL;
490 new_task->proc_terminate = 0;
491#if CONFIG_EMBEDDED
492 queue_init(&new_task->task_watchers);
493 new_task->appstate = TASK_APPSTATE_ACTIVE;
494 new_task->num_taskwatchers = 0;
495 new_task->watchapplying = 0;
496#endif /* CONFIG_EMBEDDED */
497
2d21ac55 498 if (parent_task != TASK_NULL) {
1c79356b 499 new_task->sec_token = parent_task->sec_token;
55e303ae 500 new_task->audit_token = parent_task->audit_token;
1c79356b 501
2d21ac55
A
502 /* inherit the parent's shared region */
503 shared_region = vm_shared_region_get(parent_task);
504 vm_shared_region_set(new_task, shared_region);
1c79356b 505
91447636
A
506 if(task_has_64BitAddr(parent_task))
507 task_set_64BitAddr(new_task);
b0d623f7
A
508 new_task->all_image_info_addr = parent_task->all_image_info_addr;
509 new_task->all_image_info_size = parent_task->all_image_info_size;
0c530ab8 510
b0d623f7 511#if defined(__i386__) || defined(__x86_64__)
0c530ab8
A
512 if (inherit_memory && parent_task->i386_ldt)
513 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
514#endif
2d21ac55
A
515 if (inherit_memory && parent_task->affinity_space)
516 task_affinity_create(parent_task, new_task);
b0d623f7
A
517
518 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
6d2010ae
A
519 new_task->policystate = parent_task->policystate;
520 /* inherit the self action state */
316670eb 521 new_task->appliedstate = parent_task->appliedstate;
6d2010ae
A
522 new_task->ext_policystate = parent_task->ext_policystate;
523#if NOTYET
524 /* till the child lifecycle is cleared do not inherit external action */
316670eb 525 new_task->ext_appliedstate = parent_task->ext_appliedstate;
6d2010ae 526#else
316670eb 527 new_task->ext_appliedstate = default_task_null_policy;
6d2010ae 528#endif
1c79356b
A
529 }
530 else {
1c79356b 531 new_task->sec_token = KERNEL_SECURITY_TOKEN;
55e303ae 532 new_task->audit_token = KERNEL_AUDIT_TOKEN;
b0d623f7
A
533#ifdef __LP64__
534 if(is_64bit)
535 task_set_64BitAddr(new_task);
536#endif
6d2010ae
A
537 new_task->all_image_info_addr = (mach_vm_address_t)0;
538 new_task->all_image_info_size = (mach_vm_size_t)0;
b0d623f7
A
539
540 new_task->pset_hint = PROCESSOR_SET_NULL;
6d2010ae
A
541 new_task->policystate = default_task_proc_policy;
542 new_task->ext_policystate = default_task_proc_policy;
316670eb
A
543 new_task->appliedstate = default_task_null_policy;
544 new_task->ext_appliedstate = default_task_null_policy;
1c79356b
A
545 }
546
0b4e3aa0 547 if (kernel_task == TASK_NULL) {
55e303ae 548 new_task->priority = BASEPRI_KERNEL;
0b4e3aa0
A
549 new_task->max_priority = MAXPRI_KERNEL;
550 }
551 else {
552 new_task->priority = BASEPRI_DEFAULT;
553 new_task->max_priority = MAXPRI_USER;
554 }
6d2010ae
A
555
556 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
2d21ac55 557
b0d623f7 558 lck_mtx_lock(&tasks_threads_lock);
2d21ac55
A
559 queue_enter(&tasks, new_task, task_t, tasks);
560 tasks_count++;
b0d623f7 561 lck_mtx_unlock(&tasks_threads_lock);
1c79356b 562
55e303ae
A
563 if (vm_backing_store_low && parent_task != NULL)
564 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
1c79356b
A
565
566 ipc_task_enable(new_task);
567
1c79356b
A
568 *child_task = new_task;
569 return(KERN_SUCCESS);
570}
571
572/*
91447636 573 * task_deallocate:
1c79356b 574 *
91447636 575 * Drop a reference on a task.
1c79356b
A
576 */
577void
9bccf70c 578task_deallocate(
1c79356b
A
579 task_t task)
580{
316670eb
A
581 ledger_amount_t credit, debit;
582
9bccf70c
A
583 if (task == TASK_NULL)
584 return;
585
91447636 586 if (task_deallocate_internal(task) > 0)
9bccf70c 587 return;
1c79356b 588
6d2010ae
A
589 lck_mtx_lock(&tasks_threads_lock);
590 queue_remove(&terminated_tasks, task, task_t, tasks);
591 lck_mtx_unlock(&tasks_threads_lock);
592
316670eb
A
593 /*
594 * Give the machine dependent code a chance
595 * to perform cleanup before ripping apart
596 * the task.
597 */
598 machine_task_terminate(task);
599
9bccf70c
A
600 ipc_task_terminate(task);
601
2d21ac55
A
602 if (task->affinity_space)
603 task_affinity_deallocate(task);
604
1c79356b
A
605 vm_map_deallocate(task->map);
606 is_release(task->itk_space);
1c79356b 607
b0d623f7
A
608 lck_mtx_destroy(&task->lock, &task_lck_grp);
609
2d21ac55
A
610#if CONFIG_MACF_MACH
611 labelh_release(task->label);
612#endif
316670eb
A
613
614 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
615 &debit)) {
616 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
617 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
618 }
619 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
620 &debit)) {
621 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
622 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
623 }
624 ledger_dereference(task->ledger);
6d2010ae 625 zinfo_task_free(task);
91447636 626 zfree(task_zone, task);
1c79356b
A
627}
628
0c530ab8
A
629/*
630 * task_name_deallocate:
631 *
632 * Drop a reference on a task name.
633 */
634void
635task_name_deallocate(
636 task_name_t task_name)
637{
638 return(task_deallocate((task_t)task_name));
639}
640
641
1c79356b
A
642/*
643 * task_terminate:
644 *
645 * Terminate the specified task. See comments on thread_terminate
646 * (kern/thread.c) about problems with terminating the "current task."
647 */
648
649kern_return_t
650task_terminate(
651 task_t task)
652{
653 if (task == TASK_NULL)
91447636
A
654 return (KERN_INVALID_ARGUMENT);
655
1c79356b 656 if (task->bsd_info)
91447636
A
657 return (KERN_FAILURE);
658
1c79356b
A
659 return (task_terminate_internal(task));
660}
661
662kern_return_t
663task_terminate_internal(
91447636 664 task_t task)
1c79356b 665{
91447636
A
666 thread_t thread, self;
667 task_t self_task;
668 boolean_t interrupt_save;
1c79356b
A
669
670 assert(task != kernel_task);
671
91447636
A
672 self = current_thread();
673 self_task = self->task;
1c79356b
A
674
675 /*
676 * Get the task locked and make sure that we are not racing
677 * with someone else trying to terminate us.
678 */
91447636 679 if (task == self_task)
1c79356b 680 task_lock(task);
91447636
A
681 else
682 if (task < self_task) {
1c79356b 683 task_lock(task);
91447636
A
684 task_lock(self_task);
685 }
686 else {
687 task_lock(self_task);
1c79356b
A
688 task_lock(task);
689 }
690
6d2010ae 691 if (!task->active) {
1c79356b 692 /*
6d2010ae 693 * Task is already being terminated.
1c79356b
A
694 * Just return an error. If we are dying, this will
695 * just get us to our AST special handler and that
696 * will get us to finalize the termination of ourselves.
697 */
698 task_unlock(task);
91447636
A
699 if (self_task != task)
700 task_unlock(self_task);
701
702 return (KERN_FAILURE);
1c79356b 703 }
91447636
A
704
705 if (self_task != task)
706 task_unlock(self_task);
1c79356b 707
e7c99d92
A
708 /*
709 * Make sure the current thread does not get aborted out of
710 * the waits inside these operations.
711 */
9bccf70c 712 interrupt_save = thread_interrupt_level(THREAD_UNINT);
e7c99d92 713
1c79356b
A
714 /*
715 * Indicate that we want all the threads to stop executing
716 * at user space by holding the task (we would have held
717 * each thread independently in thread_terminate_internal -
718 * but this way we may be more likely to already find it
719 * held there). Mark the task inactive, and prevent
720 * further task operations via the task port.
721 */
722 task_hold_locked(task);
723 task->active = FALSE;
724 ipc_task_disable(task);
725
726 /*
91447636
A
727 * Terminate each thread in the task.
728 */
729 queue_iterate(&task->threads, thread, thread_t, task_threads) {
730 thread_terminate_internal(thread);
1c79356b 731 }
e7c99d92 732
316670eb
A
733 task_unlock(task);
734
735#if CONFIG_EMBEDDED
e7c99d92 736 /*
316670eb 737 * remove all task watchers
e7c99d92 738 */
316670eb
A
739 task_removewatchers(task);
740#endif /* CONFIG_EMBEDDED */
1c79356b
A
741
742 /*
743 * Destroy all synchronizers owned by the task.
744 */
745 task_synchronizer_destroy_all(task);
746
1c79356b
A
747 /*
748 * Destroy the IPC space, leaving just a reference for it.
749 */
316670eb 750 ipc_space_terminate(task->itk_space);
1c79356b 751
0c530ab8
A
752 if (vm_map_has_4GB_pagezero(task->map))
753 vm_map_clear_4GB_pagezero(task->map);
91447636 754
1c79356b
A
755 /*
756 * If the current thread is a member of the task
757 * being terminated, then the last reference to
758 * the task will not be dropped until the thread
759 * is finally reaped. To avoid incurring the
760 * expense of removing the address space regions
761 * at reap time, we do it explictly here.
762 */
2d21ac55
A
763 vm_map_remove(task->map,
764 task->map->min_offset,
765 task->map->max_offset,
766 VM_MAP_NO_FLAGS);
1c79356b 767
2d21ac55
A
768 /* release our shared region */
769 vm_shared_region_set(task, NULL);
9bccf70c 770
b0d623f7 771 lck_mtx_lock(&tasks_threads_lock);
2d21ac55 772 queue_remove(&tasks, task, task_t, tasks);
6d2010ae 773 queue_enter(&terminated_tasks, task, task_t, tasks);
2d21ac55 774 tasks_count--;
b0d623f7 775 lck_mtx_unlock(&tasks_threads_lock);
9bccf70c 776
1c79356b 777 /*
e7c99d92
A
778 * We no longer need to guard against being aborted, so restore
779 * the previous interruptible state.
780 */
9bccf70c 781 thread_interrupt_level(interrupt_save);
e7c99d92
A
782
783 /*
784 * Get rid of the task active reference on itself.
1c79356b 785 */
1c79356b
A
786 task_deallocate(task);
787
91447636 788 return (KERN_SUCCESS);
1c79356b
A
789}
790
791/*
b0d623f7 792 * task_start_halt:
91447636
A
793 *
794 * Shut the current task down (except for the current thread) in
795 * preparation for dramatic changes to the task (probably exec).
b0d623f7
A
796 * We hold the task and mark all other threads in the task for
797 * termination.
1c79356b
A
798 */
799kern_return_t
b0d623f7 800task_start_halt(
1c79356b
A
801 task_t task)
802{
91447636 803 thread_t thread, self;
1c79356b
A
804
805 assert(task != kernel_task);
806
91447636 807 self = current_thread();
1c79356b 808
91447636
A
809 if (task != self->task)
810 return (KERN_INVALID_ARGUMENT);
1c79356b
A
811
812 task_lock(task);
813
b0d623f7 814 if (task->halting || !task->active || !self->active) {
1c79356b
A
815 /*
816 * Task or current thread is already being terminated.
817 * Hurry up and return out of the current kernel context
818 * so that we run our AST special handler to terminate
819 * ourselves.
820 */
821 task_unlock(task);
91447636
A
822
823 return (KERN_FAILURE);
1c79356b
A
824 }
825
b0d623f7
A
826 task->halting = TRUE;
827
55e303ae 828 if (task->thread_count > 1) {
b0d623f7 829
1c79356b
A
830 /*
831 * Mark all the threads to keep them from starting any more
832 * user-level execution. The thread_terminate_internal code
833 * would do this on a thread by thread basis anyway, but this
834 * gives us a better chance of not having to wait there.
835 */
836 task_hold_locked(task);
837
838 /*
91447636 839 * Terminate all the other threads in the task.
1c79356b 840 */
91447636
A
841 queue_iterate(&task->threads, thread, thread_t, task_threads) {
842 if (thread != self)
843 thread_terminate_internal(thread);
1c79356b 844 }
91447636 845
1c79356b
A
846 task_release_locked(task);
847 }
b0d623f7
A
848 task_unlock(task);
849 return KERN_SUCCESS;
850}
851
852
853/*
854 * task_complete_halt:
855 *
856 * Complete task halt by waiting for threads to terminate, then clean
857 * up task resources (VM, port namespace, etc...) and then let the
858 * current thread go in the (practically empty) task context.
859 */
860void
861task_complete_halt(task_t task)
862{
863 task_lock(task);
864 assert(task->halting);
865 assert(task == current_task());
e7c99d92 866
b0d623f7
A
867 /*
868 * Wait for the other threads to get shut down.
869 * When the last other thread is reaped, we'll be
316670eb 870 * woken up.
b0d623f7
A
871 */
872 if (task->thread_count > 1) {
873 assert_wait((event_t)&task->halting, THREAD_UNINT);
874 task_unlock(task);
875 thread_block(THREAD_CONTINUE_NULL);
876 } else {
877 task_unlock(task);
878 }
1c79356b 879
316670eb
A
880 /*
881 * Give the machine dependent code a chance
882 * to perform cleanup of task-level resources
883 * associated with the current thread before
884 * ripping apart the task.
885 */
886 machine_task_terminate(task);
887
1c79356b
A
888 /*
889 * Destroy all synchronizers owned by the task.
890 */
891 task_synchronizer_destroy_all(task);
892
893 /*
9bccf70c
A
894 * Destroy the contents of the IPC space, leaving just
895 * a reference for it.
e7c99d92 896 */
55e303ae 897 ipc_space_clean(task->itk_space);
1c79356b
A
898
899 /*
900 * Clean out the address space, as we are going to be
901 * getting a new one.
902 */
91447636
A
903 vm_map_remove(task->map, task->map->min_offset,
904 task->map->max_offset, VM_MAP_NO_FLAGS);
1c79356b 905
b0d623f7 906 task->halting = FALSE;
1c79356b
A
907}
908
909/*
910 * task_hold_locked:
911 *
912 * Suspend execution of the specified task.
913 * This is a recursive-style suspension of the task, a count of
914 * suspends is maintained.
915 *
916 * CONDITIONS: the task is locked and active.
917 */
918void
919task_hold_locked(
91447636 920 register task_t task)
1c79356b 921{
91447636 922 register thread_t thread;
1c79356b
A
923
924 assert(task->active);
925
9bccf70c
A
926 if (task->suspend_count++ > 0)
927 return;
1c79356b
A
928
929 /*
91447636 930 * Iterate through all the threads and hold them.
1c79356b 931 */
91447636
A
932 queue_iterate(&task->threads, thread, thread_t, task_threads) {
933 thread_mtx_lock(thread);
934 thread_hold(thread);
935 thread_mtx_unlock(thread);
1c79356b
A
936 }
937}
938
939/*
940 * task_hold:
941 *
942 * Same as the internal routine above, except that is must lock
943 * and verify that the task is active. This differs from task_suspend
944 * in that it places a kernel hold on the task rather than just a
945 * user-level hold. This keeps users from over resuming and setting
946 * it running out from under the kernel.
947 *
948 * CONDITIONS: the caller holds a reference on the task
949 */
950kern_return_t
91447636
A
951task_hold(
952 register task_t task)
1c79356b 953{
1c79356b
A
954 if (task == TASK_NULL)
955 return (KERN_INVALID_ARGUMENT);
91447636 956
1c79356b 957 task_lock(task);
91447636 958
1c79356b
A
959 if (!task->active) {
960 task_unlock(task);
91447636 961
1c79356b
A
962 return (KERN_FAILURE);
963 }
1c79356b 964
91447636
A
965 task_hold_locked(task);
966 task_unlock(task);
967
968 return (KERN_SUCCESS);
1c79356b
A
969}
970
316670eb
A
971kern_return_t
972task_wait(
973 task_t task,
974 boolean_t until_not_runnable)
975{
976 if (task == TASK_NULL)
977 return (KERN_INVALID_ARGUMENT);
978
979 task_lock(task);
980
981 if (!task->active) {
982 task_unlock(task);
983
984 return (KERN_FAILURE);
985 }
986
987 task_wait_locked(task, until_not_runnable);
988 task_unlock(task);
989
990 return (KERN_SUCCESS);
991}
992
1c79356b 993/*
91447636
A
994 * task_wait_locked:
995 *
1c79356b
A
996 * Wait for all threads in task to stop.
997 *
998 * Conditions:
999 * Called with task locked, active, and held.
1000 */
1001void
1002task_wait_locked(
316670eb
A
1003 register task_t task,
1004 boolean_t until_not_runnable)
1c79356b 1005{
91447636 1006 register thread_t thread, self;
1c79356b
A
1007
1008 assert(task->active);
1009 assert(task->suspend_count > 0);
1010
91447636
A
1011 self = current_thread();
1012
1c79356b 1013 /*
91447636 1014 * Iterate through all the threads and wait for them to
1c79356b
A
1015 * stop. Do not wait for the current thread if it is within
1016 * the task.
1017 */
91447636
A
1018 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1019 if (thread != self)
316670eb 1020 thread_wait(thread, until_not_runnable);
1c79356b
A
1021 }
1022}
1023
1024/*
1025 * task_release_locked:
1026 *
1027 * Release a kernel hold on a task.
1028 *
1029 * CONDITIONS: the task is locked and active
1030 */
1031void
1032task_release_locked(
91447636 1033 register task_t task)
1c79356b 1034{
91447636 1035 register thread_t thread;
1c79356b
A
1036
1037 assert(task->active);
9bccf70c 1038 assert(task->suspend_count > 0);
1c79356b 1039
9bccf70c
A
1040 if (--task->suspend_count > 0)
1041 return;
1c79356b 1042
91447636
A
1043 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1044 thread_mtx_lock(thread);
1045 thread_release(thread);
1046 thread_mtx_unlock(thread);
1c79356b
A
1047 }
1048}
1049
1050/*
1051 * task_release:
1052 *
1053 * Same as the internal routine above, except that it must lock
1054 * and verify that the task is active.
1055 *
1056 * CONDITIONS: The caller holds a reference to the task
1057 */
1058kern_return_t
91447636
A
1059task_release(
1060 task_t task)
1c79356b 1061{
1c79356b
A
1062 if (task == TASK_NULL)
1063 return (KERN_INVALID_ARGUMENT);
91447636 1064
1c79356b 1065 task_lock(task);
91447636 1066
1c79356b
A
1067 if (!task->active) {
1068 task_unlock(task);
91447636 1069
1c79356b
A
1070 return (KERN_FAILURE);
1071 }
1c79356b 1072
91447636
A
1073 task_release_locked(task);
1074 task_unlock(task);
1075
1076 return (KERN_SUCCESS);
1c79356b
A
1077}
1078
1079kern_return_t
1080task_threads(
91447636
A
1081 task_t task,
1082 thread_act_array_t *threads_out,
1c79356b
A
1083 mach_msg_type_number_t *count)
1084{
91447636 1085 mach_msg_type_number_t actual;
2d21ac55 1086 thread_t *thread_list;
91447636
A
1087 thread_t thread;
1088 vm_size_t size, size_needed;
1089 void *addr;
1090 unsigned int i, j;
1c79356b
A
1091
1092 if (task == TASK_NULL)
91447636 1093 return (KERN_INVALID_ARGUMENT);
1c79356b 1094
2d21ac55 1095 size = 0; addr = NULL;
1c79356b
A
1096
1097 for (;;) {
1098 task_lock(task);
1099 if (!task->active) {
1100 task_unlock(task);
91447636 1101
1c79356b
A
1102 if (size != 0)
1103 kfree(addr, size);
91447636
A
1104
1105 return (KERN_FAILURE);
1c79356b
A
1106 }
1107
55e303ae 1108 actual = task->thread_count;
1c79356b
A
1109
1110 /* do we have the memory we need? */
91447636 1111 size_needed = actual * sizeof (mach_port_t);
1c79356b
A
1112 if (size_needed <= size)
1113 break;
1114
1115 /* unlock the task and allocate more memory */
1116 task_unlock(task);
1117
1118 if (size != 0)
1119 kfree(addr, size);
1120
1121 assert(size_needed > 0);
1122 size = size_needed;
1123
1124 addr = kalloc(size);
1125 if (addr == 0)
91447636 1126 return (KERN_RESOURCE_SHORTAGE);
1c79356b
A
1127 }
1128
1129 /* OK, have memory and the task is locked & active */
2d21ac55 1130 thread_list = (thread_t *)addr;
91447636
A
1131
1132 i = j = 0;
1133
1134 for (thread = (thread_t)queue_first(&task->threads); i < actual;
1135 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1136 thread_reference_internal(thread);
2d21ac55 1137 thread_list[j++] = thread;
1c79356b 1138 }
91447636
A
1139
1140 assert(queue_end(&task->threads, (queue_entry_t)thread));
1c79356b
A
1141
1142 actual = j;
91447636 1143 size_needed = actual * sizeof (mach_port_t);
1c79356b 1144
91447636 1145 /* can unlock task now that we've got the thread refs */
1c79356b
A
1146 task_unlock(task);
1147
1148 if (actual == 0) {
91447636 1149 /* no threads, so return null pointer and deallocate memory */
1c79356b 1150
2d21ac55 1151 *threads_out = NULL;
1c79356b
A
1152 *count = 0;
1153
1154 if (size != 0)
1155 kfree(addr, size);
91447636
A
1156 }
1157 else {
1c79356b
A
1158 /* if we allocated too much, must copy */
1159
1160 if (size_needed < size) {
91447636 1161 void *newaddr;
1c79356b
A
1162
1163 newaddr = kalloc(size_needed);
1164 if (newaddr == 0) {
91447636 1165 for (i = 0; i < actual; ++i)
2d21ac55 1166 thread_deallocate(thread_list[i]);
1c79356b 1167 kfree(addr, size);
91447636 1168 return (KERN_RESOURCE_SHORTAGE);
1c79356b
A
1169 }
1170
91447636 1171 bcopy(addr, newaddr, size_needed);
1c79356b 1172 kfree(addr, size);
2d21ac55 1173 thread_list = (thread_t *)newaddr;
1c79356b
A
1174 }
1175
2d21ac55 1176 *threads_out = thread_list;
1c79356b
A
1177 *count = actual;
1178
1179 /* do the conversion that Mig should handle */
1180
91447636 1181 for (i = 0; i < actual; ++i)
2d21ac55 1182 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1c79356b
A
1183 }
1184
91447636 1185 return (KERN_SUCCESS);
1c79356b
A
1186}
1187
316670eb
A
1188static kern_return_t
1189place_task_hold (
1c79356b 1190 register task_t task)
316670eb 1191{
1c79356b 1192 if (!task->active) {
1c79356b
A
1193 return (KERN_FAILURE);
1194 }
91447636
A
1195
1196 if (task->user_stop_count++ > 0) {
1c79356b
A
1197 /*
1198 * If the stop count was positive, the task is
1199 * already stopped and we can exit.
1200 */
1c79356b
A
1201 return (KERN_SUCCESS);
1202 }
1203
1204 /*
1205 * Put a kernel-level hold on the threads in the task (all
1206 * user-level task suspensions added together represent a
1207 * single kernel-level hold). We then wait for the threads
1208 * to stop executing user code.
1209 */
1210 task_hold_locked(task);
316670eb
A
1211 task_wait_locked(task, TRUE);
1212
1213 return (KERN_SUCCESS);
1214}
1215
1216static kern_return_t
1217release_task_hold (
1218 register task_t task,
1219 boolean_t pidresume)
1220{
1221 register boolean_t release = FALSE;
1222
1223 if (!task->active) {
1224 return (KERN_FAILURE);
1225 }
1226
1227 if (pidresume) {
1228 if (task->pidsuspended == FALSE) {
1229 return (KERN_FAILURE);
1230 }
1231 task->pidsuspended = FALSE;
1232 }
1233
1234 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
1235 if (--task->user_stop_count == 0) {
1236 release = TRUE;
1237 }
1238 }
1239 else {
1240 return (KERN_FAILURE);
1241 }
1242
1243 /*
1244 * Release the task if necessary.
1245 */
1246 if (release)
1247 task_release_locked(task);
1248
1249 return (KERN_SUCCESS);
1250}
1251
1252/*
1253 * task_suspend:
1254 *
1255 * Implement a user-level suspension on a task.
1256 *
1257 * Conditions:
1258 * The caller holds a reference to the task
1259 */
1260kern_return_t
1261task_suspend(
1262 register task_t task)
1263{
1264 kern_return_t kr;
1265
1266 if (task == TASK_NULL || task == kernel_task)
1267 return (KERN_INVALID_ARGUMENT);
1268
1269 task_lock(task);
1270
1271 kr = place_task_hold(task);
91447636 1272
1c79356b 1273 task_unlock(task);
91447636 1274
316670eb 1275 return (kr);
1c79356b
A
1276}
1277
1278/*
91447636 1279 * task_resume:
1c79356b
A
1280 * Release a kernel hold on a task.
1281 *
1282 * Conditions:
1283 * The caller holds a reference to the task
1284 */
1285kern_return_t
91447636
A
1286task_resume(
1287 register task_t task)
1c79356b 1288{
316670eb 1289 kern_return_t kr;
1c79356b 1290
91447636
A
1291 if (task == TASK_NULL || task == kernel_task)
1292 return (KERN_INVALID_ARGUMENT);
1c79356b 1293
1c79356b 1294 task_lock(task);
91447636 1295
316670eb 1296 kr = release_task_hold(task, FALSE);
91447636 1297
316670eb 1298 task_unlock(task);
91447636 1299
316670eb
A
1300 return (kr);
1301}
1302
1303kern_return_t
1304task_pidsuspend_locked(task_t task)
1305{
1306 kern_return_t kr;
1307
1308 if (task->pidsuspended) {
1309 kr = KERN_FAILURE;
1310 goto out;
1c79356b 1311 }
91447636 1312
316670eb
A
1313 task->pidsuspended = TRUE;
1314
1315 kr = place_task_hold(task);
1316 if (kr != KERN_SUCCESS) {
1317 task->pidsuspended = FALSE;
1c79356b 1318 }
316670eb
A
1319out:
1320 return(kr);
1321}
1c79356b 1322
316670eb
A
1323
1324/*
1325 * task_pidsuspend:
1326 *
1327 * Suspends a task by placing a hold on its threads.
1328 *
1329 * Conditions:
1330 * The caller holds a reference to the task
1331 */
1332kern_return_t
1333task_pidsuspend(
1334 register task_t task)
1335{
1336 kern_return_t kr;
1337
1338 if (task == TASK_NULL || task == kernel_task)
1339 return (KERN_INVALID_ARGUMENT);
1340
1341 task_lock(task);
1342
1343 kr = task_pidsuspend_locked(task);
1c79356b
A
1344
1345 task_unlock(task);
91447636 1346
316670eb
A
1347 return (kr);
1348}
1349
1350/* If enabled, we bring all the frozen pages back in prior to resumption; otherwise, they're faulted back in on demand */
1351#define THAW_ON_RESUME 1
1352
1353/*
1354 * task_pidresume:
1355 * Resumes a previously suspended task.
1356 *
1357 * Conditions:
1358 * The caller holds a reference to the task
1359 */
1360kern_return_t
1361task_pidresume(
1362 register task_t task)
1363{
1364 kern_return_t kr;
1365#if (CONFIG_FREEZE && THAW_ON_RESUME)
1366 boolean_t frozen;
1367#endif
1368
1369 if (task == TASK_NULL || task == kernel_task)
1370 return (KERN_INVALID_ARGUMENT);
1371
1372 task_lock(task);
1373
1374#if (CONFIG_FREEZE && THAW_ON_RESUME)
1375 frozen = task->frozen;
1376 task->frozen = FALSE;
1377#endif
1378
1379 kr = release_task_hold(task, TRUE);
1380
1381 task_unlock(task);
1382
1383#if (CONFIG_FREEZE && THAW_ON_RESUME)
1384 if ((kr == KERN_SUCCESS) && (frozen == TRUE)) {
1385 kr = vm_map_thaw(task->map);
1386 }
1387#endif
1388
1389 return (kr);
1c79356b
A
1390}
1391
6d2010ae
A
1392#if CONFIG_FREEZE
1393
1394/*
1395 * task_freeze:
1396 *
316670eb 1397 * Freeze a task.
6d2010ae
A
1398 *
1399 * Conditions:
1400 * The caller holds a reference to the task
1401 */
1402kern_return_t
1403task_freeze(
1404 register task_t task,
1405 uint32_t *purgeable_count,
1406 uint32_t *wired_count,
1407 uint32_t *clean_count,
1408 uint32_t *dirty_count,
316670eb 1409 uint32_t dirty_budget,
6d2010ae
A
1410 boolean_t *shared,
1411 boolean_t walk_only)
1412{
316670eb
A
1413 kern_return_t kr;
1414
6d2010ae
A
1415 if (task == TASK_NULL || task == kernel_task)
1416 return (KERN_INVALID_ARGUMENT);
1417
316670eb
A
1418 task_lock(task);
1419
1420 if (task->frozen) {
1421 task_unlock(task);
1422 return (KERN_FAILURE);
1423 }
1424
1425 if (walk_only == FALSE) {
1426 task->frozen = TRUE;
1427 }
1428
1429 task_unlock(task);
1430
6d2010ae 1431 if (walk_only) {
316670eb 1432 kr = vm_map_freeze_walk(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
6d2010ae 1433 } else {
316670eb 1434 kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
6d2010ae
A
1435 }
1436
316670eb 1437 return (kr);
6d2010ae
A
1438}
1439
1440/*
1441 * task_thaw:
1442 *
1443 * Thaw a currently frozen task.
1444 *
1445 * Conditions:
1446 * The caller holds a reference to the task
1447 */
1448kern_return_t
1449task_thaw(
1450 register task_t task)
1451{
316670eb
A
1452 kern_return_t kr;
1453
6d2010ae
A
1454 if (task == TASK_NULL || task == kernel_task)
1455 return (KERN_INVALID_ARGUMENT);
1456
316670eb
A
1457 task_lock(task);
1458
1459 if (!task->frozen) {
1460 task_unlock(task);
1461 return (KERN_FAILURE);
1462 }
1463
1464 task->frozen = FALSE;
6d2010ae 1465
316670eb
A
1466 task_unlock(task);
1467
1468 kr = vm_map_thaw(task->map);
1469
1470 return (kr);
6d2010ae
A
1471}
1472
1473#endif /* CONFIG_FREEZE */
1474
1c79356b
A
1475kern_return_t
1476host_security_set_task_token(
1477 host_security_t host_security,
1478 task_t task,
1479 security_token_t sec_token,
55e303ae 1480 audit_token_t audit_token,
1c79356b
A
1481 host_priv_t host_priv)
1482{
55e303ae 1483 ipc_port_t host_port;
1c79356b
A
1484 kern_return_t kr;
1485
1486 if (task == TASK_NULL)
1487 return(KERN_INVALID_ARGUMENT);
1488
1489 if (host_security == HOST_NULL)
1490 return(KERN_INVALID_SECURITY);
1491
1492 task_lock(task);
1493 task->sec_token = sec_token;
55e303ae 1494 task->audit_token = audit_token;
1c79356b
A
1495 task_unlock(task);
1496
1497 if (host_priv != HOST_PRIV_NULL) {
55e303ae 1498 kr = host_get_host_priv_port(host_priv, &host_port);
1c79356b 1499 } else {
55e303ae 1500 kr = host_get_host_port(host_priv_self(), &host_port);
1c79356b 1501 }
55e303ae
A
1502 assert(kr == KERN_SUCCESS);
1503 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1c79356b
A
1504 return(kr);
1505}
1506
1c79356b
A
1507/*
1508 * This routine was added, pretty much exclusively, for registering the
1509 * RPC glue vector for in-kernel short circuited tasks. Rather than
1510 * removing it completely, I have only disabled that feature (which was
1511 * the only feature at the time). It just appears that we are going to
1512 * want to add some user data to tasks in the future (i.e. bsd info,
1513 * task names, etc...), so I left it in the formal task interface.
1514 */
1515kern_return_t
1516task_set_info(
1517 task_t task,
1518 task_flavor_t flavor,
91447636
A
1519 __unused task_info_t task_info_in, /* pointer to IN array */
1520 __unused mach_msg_type_number_t task_info_count)
1c79356b 1521{
1c79356b
A
1522 if (task == TASK_NULL)
1523 return(KERN_INVALID_ARGUMENT);
1524
1525 switch (flavor) {
1526 default:
1527 return (KERN_INVALID_ARGUMENT);
1528 }
1529 return (KERN_SUCCESS);
1530}
1531
1532kern_return_t
1533task_info(
91447636
A
1534 task_t task,
1535 task_flavor_t flavor,
1536 task_info_t task_info_out,
1c79356b
A
1537 mach_msg_type_number_t *task_info_count)
1538{
b0d623f7
A
1539 kern_return_t error = KERN_SUCCESS;
1540
1c79356b 1541 if (task == TASK_NULL)
91447636 1542 return (KERN_INVALID_ARGUMENT);
1c79356b 1543
b0d623f7
A
1544 task_lock(task);
1545
1546 if ((task != current_task()) && (!task->active)) {
1547 task_unlock(task);
1548 return (KERN_INVALID_ARGUMENT);
1549 }
1550
1c79356b
A
1551 switch (flavor) {
1552
91447636 1553 case TASK_BASIC_INFO_32:
2d21ac55 1554 case TASK_BASIC2_INFO_32:
91447636
A
1555 {
1556 task_basic_info_32_t basic_info;
b0d623f7
A
1557 vm_map_t map;
1558 clock_sec_t secs;
1559 clock_usec_t usecs;
1c79356b 1560
b0d623f7
A
1561 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
1562 error = KERN_INVALID_ARGUMENT;
1563 break;
1564 }
1c79356b 1565
91447636 1566 basic_info = (task_basic_info_32_t)task_info_out;
1c79356b 1567
91447636 1568 map = (task == kernel_task)? kernel_map: task->map;
b0d623f7 1569 basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
2d21ac55
A
1570 if (flavor == TASK_BASIC2_INFO_32) {
1571 /*
1572 * The "BASIC2" flavor gets the maximum resident
1573 * size instead of the current resident size...
1574 */
1575 basic_info->resident_size = pmap_resident_max(map->pmap);
1576 } else {
1577 basic_info->resident_size = pmap_resident_count(map->pmap);
1578 }
1579 basic_info->resident_size *= PAGE_SIZE;
1c79356b 1580
0b4e3aa0
A
1581 basic_info->policy = ((task != kernel_task)?
1582 POLICY_TIMESHARE: POLICY_RR);
1c79356b 1583 basic_info->suspend_count = task->user_stop_count;
91447636 1584
b0d623f7
A
1585 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1586 basic_info->user_time.seconds =
1587 (typeof(basic_info->user_time.seconds))secs;
1588 basic_info->user_time.microseconds = usecs;
1589
1590 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1591 basic_info->system_time.seconds =
1592 (typeof(basic_info->system_time.seconds))secs;
1593 basic_info->system_time.microseconds = usecs;
1c79356b 1594
91447636 1595 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1c79356b 1596 break;
91447636 1597 }
1c79356b 1598
91447636
A
1599 case TASK_BASIC_INFO_64:
1600 {
1601 task_basic_info_64_t basic_info;
b0d623f7
A
1602 vm_map_t map;
1603 clock_sec_t secs;
1604 clock_usec_t usecs;
1c79356b 1605
b0d623f7
A
1606 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
1607 error = KERN_INVALID_ARGUMENT;
1608 break;
1609 }
91447636
A
1610
1611 basic_info = (task_basic_info_64_t)task_info_out;
1612
1613 map = (task == kernel_task)? kernel_map: task->map;
1614 basic_info->virtual_size = map->size;
2d21ac55
A
1615 basic_info->resident_size =
1616 (mach_vm_size_t)(pmap_resident_count(map->pmap))
1617 * PAGE_SIZE_64;
91447636 1618
91447636
A
1619 basic_info->policy = ((task != kernel_task)?
1620 POLICY_TIMESHARE: POLICY_RR);
1621 basic_info->suspend_count = task->user_stop_count;
1622
b0d623f7
A
1623 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1624 basic_info->user_time.seconds =
1625 (typeof(basic_info->user_time.seconds))secs;
1626 basic_info->user_time.microseconds = usecs;
1627
1628 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1629 basic_info->system_time.seconds =
1630 (typeof(basic_info->system_time.seconds))secs;
1631 basic_info->system_time.microseconds = usecs;
91447636
A
1632
1633 *task_info_count = TASK_BASIC_INFO_64_COUNT;
1634 break;
1635 }
1636
316670eb
A
1637 case MACH_TASK_BASIC_INFO:
1638 {
1639 mach_task_basic_info_t basic_info;
1640 vm_map_t map;
1641 clock_sec_t secs;
1642 clock_usec_t usecs;
1643
1644 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
1645 error = KERN_INVALID_ARGUMENT;
1646 break;
1647 }
1648
1649 basic_info = (mach_task_basic_info_t)task_info_out;
1650
1651 map = (task == kernel_task) ? kernel_map : task->map;
1652
1653 basic_info->virtual_size = map->size;
1654
1655 basic_info->resident_size =
1656 (mach_vm_size_t)(pmap_resident_count(map->pmap));
1657 basic_info->resident_size *= PAGE_SIZE_64;
1658
1659 basic_info->resident_size_max =
1660 (mach_vm_size_t)(pmap_resident_max(map->pmap));
1661 basic_info->resident_size_max *= PAGE_SIZE_64;
1662
1663 basic_info->policy = ((task != kernel_task) ?
1664 POLICY_TIMESHARE : POLICY_RR);
1665
1666 basic_info->suspend_count = task->user_stop_count;
1667
1668 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
1669 basic_info->user_time.seconds =
1670 (typeof(basic_info->user_time.seconds))secs;
1671 basic_info->user_time.microseconds = usecs;
1672
1673 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
1674 basic_info->system_time.seconds =
1675 (typeof(basic_info->system_time.seconds))secs;
1676 basic_info->system_time.microseconds = usecs;
1677
1678 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
1679 break;
1680 }
1681
91447636
A
1682 case TASK_THREAD_TIMES_INFO:
1683 {
1684 register task_thread_times_info_t times_info;
1685 register thread_t thread;
1686
b0d623f7
A
1687 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
1688 error = KERN_INVALID_ARGUMENT;
1689 break;
1690 }
1c79356b
A
1691
1692 times_info = (task_thread_times_info_t) task_info_out;
1693 times_info->user_time.seconds = 0;
1694 times_info->user_time.microseconds = 0;
1695 times_info->system_time.seconds = 0;
1696 times_info->system_time.microseconds = 0;
1697
1c79356b 1698
91447636
A
1699 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1700 time_value_t user_time, system_time;
1c79356b
A
1701
1702 thread_read_times(thread, &user_time, &system_time);
1703
1c79356b
A
1704 time_value_add(&times_info->user_time, &user_time);
1705 time_value_add(&times_info->system_time, &system_time);
1706 }
91447636 1707
1c79356b
A
1708
1709 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
1710 break;
91447636
A
1711 }
1712
1713 case TASK_ABSOLUTETIME_INFO:
1714 {
1715 task_absolutetime_info_t info;
1716 register thread_t thread;
1717
b0d623f7
A
1718 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
1719 error = KERN_INVALID_ARGUMENT;
1720 break;
1721 }
91447636
A
1722
1723 info = (task_absolutetime_info_t)task_info_out;
1724 info->threads_user = info->threads_system = 0;
1725
91447636
A
1726
1727 info->total_user = task->total_user_time;
1728 info->total_system = task->total_system_time;
1729
1730 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1731 uint64_t tval;
316670eb
A
1732 spl_t x;
1733
1734 x = splsched();
1735 thread_lock(thread);
91447636
A
1736
1737 tval = timer_grab(&thread->user_timer);
1738 info->threads_user += tval;
1739 info->total_user += tval;
1740
1741 tval = timer_grab(&thread->system_timer);
316670eb
A
1742 if (thread->precise_user_kernel_time) {
1743 info->threads_system += tval;
1744 info->total_system += tval;
1745 } else {
1746 /* system_timer may represent either sys or user */
1747 info->threads_user += tval;
1748 info->total_user += tval;
1749 }
1750
1751 thread_unlock(thread);
1752 splx(x);
91447636
A
1753 }
1754
91447636
A
1755
1756 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
1757 break;
1758 }
1c79356b 1759
b0d623f7
A
1760 case TASK_DYLD_INFO:
1761 {
1762 task_dyld_info_t info;
1763
6d2010ae
A
1764 /*
1765 * We added the format field to TASK_DYLD_INFO output. For
1766 * temporary backward compatibility, accept the fact that
1767 * clients may ask for the old version - distinquished by the
1768 * size of the expected result structure.
1769 */
1770#define TASK_LEGACY_DYLD_INFO_COUNT \
1771 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
1772
1773 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
b0d623f7
A
1774 error = KERN_INVALID_ARGUMENT;
1775 break;
1776 }
6d2010ae 1777
b0d623f7
A
1778 info = (task_dyld_info_t)task_info_out;
1779 info->all_image_info_addr = task->all_image_info_addr;
1780 info->all_image_info_size = task->all_image_info_size;
6d2010ae
A
1781
1782 /* only set format on output for those expecting it */
1783 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
1784 info->all_image_info_format = task_has_64BitAddr(task) ?
1785 TASK_DYLD_ALL_IMAGE_INFO_64 :
1786 TASK_DYLD_ALL_IMAGE_INFO_32 ;
1787 *task_info_count = TASK_DYLD_INFO_COUNT;
1788 } else {
1789 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
1790 }
b0d623f7
A
1791 break;
1792 }
1793
6d2010ae
A
1794 case TASK_EXTMOD_INFO:
1795 {
1796 task_extmod_info_t info;
1797 void *p;
1798
1799 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
1800 error = KERN_INVALID_ARGUMENT;
1801 break;
1802 }
1803
1804 info = (task_extmod_info_t)task_info_out;
1805
1806 p = get_bsdtask_info(task);
1807 if (p) {
1808 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
1809 } else {
1810 bzero(info->task_uuid, sizeof(info->task_uuid));
1811 }
1812 info->extmod_statistics = task->extmod_statistics;
1813 *task_info_count = TASK_EXTMOD_INFO_COUNT;
1814
1815 break;
1816 }
1817
1818 case TASK_KERNELMEMORY_INFO:
1819 {
1820 task_kernelmemory_info_t tkm_info;
316670eb 1821 ledger_amount_t credit, debit;
6d2010ae
A
1822
1823 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
1824 error = KERN_INVALID_ARGUMENT;
1825 break;
1826 }
1827
1828 tkm_info = (task_kernelmemory_info_t) task_info_out;
316670eb
A
1829 tkm_info->total_palloc = 0;
1830 tkm_info->total_pfree = 0;
1831 tkm_info->total_salloc = 0;
1832 tkm_info->total_sfree = 0;
6d2010ae
A
1833
1834 if (task == kernel_task) {
1835 /*
1836 * All shared allocs/frees from other tasks count against
1837 * the kernel private memory usage. If we are looking up
1838 * info for the kernel task, gather from everywhere.
1839 */
1840 task_unlock(task);
1841
1842 /* start by accounting for all the terminated tasks against the kernel */
1843 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
1844 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
6d2010ae
A
1845
1846 /* count all other task/thread shared alloc/free against the kernel */
1847 lck_mtx_lock(&tasks_threads_lock);
316670eb
A
1848
1849 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
6d2010ae
A
1850 queue_iterate(&tasks, task, task_t, tasks) {
1851 if (task == kernel_task) {
316670eb
A
1852 if (ledger_get_entries(task->ledger,
1853 task_ledgers.tkm_private, &credit,
1854 &debit) == KERN_SUCCESS) {
1855 tkm_info->total_palloc += credit;
1856 tkm_info->total_pfree += debit;
1857 }
6d2010ae 1858 }
316670eb
A
1859 if (!ledger_get_entries(task->ledger,
1860 task_ledgers.tkm_shared, &credit, &debit)) {
1861 tkm_info->total_palloc += credit;
1862 tkm_info->total_pfree += debit;
6d2010ae 1863 }
6d2010ae
A
1864 }
1865 lck_mtx_unlock(&tasks_threads_lock);
1866 } else {
316670eb
A
1867 if (!ledger_get_entries(task->ledger,
1868 task_ledgers.tkm_private, &credit, &debit)) {
1869 tkm_info->total_palloc = credit;
1870 tkm_info->total_pfree = debit;
1871 }
1872 if (!ledger_get_entries(task->ledger,
1873 task_ledgers.tkm_shared, &credit, &debit)) {
1874 tkm_info->total_salloc = credit;
1875 tkm_info->total_sfree = debit;
6d2010ae
A
1876 }
1877 task_unlock(task);
1878 }
1879
1880 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
1881 return KERN_SUCCESS;
1882 }
1883
91447636
A
1884 /* OBSOLETE */
1885 case TASK_SCHED_FIFO_INFO:
1886 {
1c79356b 1887
b0d623f7
A
1888 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
1889 error = KERN_INVALID_ARGUMENT;
1890 break;
1891 }
1c79356b 1892
b0d623f7 1893 error = KERN_INVALID_POLICY;
6d2010ae 1894 break;
91447636 1895 }
1c79356b 1896
91447636
A
1897 /* OBSOLETE */
1898 case TASK_SCHED_RR_INFO:
1899 {
1c79356b 1900 register policy_rr_base_t rr_base;
6d2010ae
A
1901 uint32_t quantum_time;
1902 uint64_t quantum_ns;
1c79356b 1903
b0d623f7
A
1904 if (*task_info_count < POLICY_RR_BASE_COUNT) {
1905 error = KERN_INVALID_ARGUMENT;
1906 break;
1907 }
1c79356b
A
1908
1909 rr_base = (policy_rr_base_t) task_info_out;
1910
0b4e3aa0 1911 if (task != kernel_task) {
b0d623f7
A
1912 error = KERN_INVALID_POLICY;
1913 break;
1c79356b
A
1914 }
1915
1916 rr_base->base_priority = task->priority;
1c79356b 1917
6d2010ae
A
1918 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
1919 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
1920
1921 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
1c79356b
A
1922
1923 *task_info_count = POLICY_RR_BASE_COUNT;
1924 break;
91447636 1925 }
1c79356b 1926
91447636
A
1927 /* OBSOLETE */
1928 case TASK_SCHED_TIMESHARE_INFO:
1929 {
1c79356b
A
1930 register policy_timeshare_base_t ts_base;
1931
b0d623f7
A
1932 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
1933 error = KERN_INVALID_ARGUMENT;
1934 break;
1935 }
1c79356b
A
1936
1937 ts_base = (policy_timeshare_base_t) task_info_out;
1938
0b4e3aa0 1939 if (task == kernel_task) {
b0d623f7
A
1940 error = KERN_INVALID_POLICY;
1941 break;
1c79356b
A
1942 }
1943
1944 ts_base->base_priority = task->priority;
1c79356b
A
1945
1946 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
1947 break;
91447636 1948 }
1c79356b 1949
91447636
A
1950 case TASK_SECURITY_TOKEN:
1951 {
1952 register security_token_t *sec_token_p;
1c79356b 1953
b0d623f7
A
1954 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
1955 error = KERN_INVALID_ARGUMENT;
1956 break;
1957 }
1c79356b
A
1958
1959 sec_token_p = (security_token_t *) task_info_out;
1960
1c79356b 1961 *sec_token_p = task->sec_token;
1c79356b
A
1962
1963 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
91447636
A
1964 break;
1965 }
1c79356b 1966
91447636
A
1967 case TASK_AUDIT_TOKEN:
1968 {
1969 register audit_token_t *audit_token_p;
55e303ae 1970
b0d623f7
A
1971 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
1972 error = KERN_INVALID_ARGUMENT;
1973 break;
1974 }
55e303ae
A
1975
1976 audit_token_p = (audit_token_t *) task_info_out;
1977
55e303ae 1978 *audit_token_p = task->audit_token;
55e303ae
A
1979
1980 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
91447636
A
1981 break;
1982 }
55e303ae 1983
91447636 1984 case TASK_SCHED_INFO:
b0d623f7 1985 error = KERN_INVALID_ARGUMENT;
6d2010ae 1986 break;
1c79356b 1987
91447636
A
1988 case TASK_EVENTS_INFO:
1989 {
1c79356b 1990 register task_events_info_t events_info;
2d21ac55 1991 register thread_t thread;
1c79356b 1992
b0d623f7
A
1993 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
1994 error = KERN_INVALID_ARGUMENT;
1995 break;
1996 }
1c79356b
A
1997
1998 events_info = (task_events_info_t) task_info_out;
1999
2d21ac55 2000
1c79356b
A
2001 events_info->faults = task->faults;
2002 events_info->pageins = task->pageins;
2003 events_info->cow_faults = task->cow_faults;
2004 events_info->messages_sent = task->messages_sent;
2005 events_info->messages_received = task->messages_received;
2006 events_info->syscalls_mach = task->syscalls_mach;
2007 events_info->syscalls_unix = task->syscalls_unix;
2d21ac55
A
2008
2009 events_info->csw = task->c_switch;
2010
2011 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6d2010ae
A
2012 events_info->csw += thread->c_switch;
2013 events_info->syscalls_mach += thread->syscalls_mach;
2014 events_info->syscalls_unix += thread->syscalls_unix;
2d21ac55
A
2015 }
2016
1c79356b
A
2017
2018 *task_info_count = TASK_EVENTS_INFO_COUNT;
2019 break;
91447636 2020 }
2d21ac55
A
2021 case TASK_AFFINITY_TAG_INFO:
2022 {
b0d623f7
A
2023 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
2024 error = KERN_INVALID_ARGUMENT;
2025 break;
2026 }
2d21ac55 2027
b0d623f7 2028 error = task_affinity_info(task, task_info_out, task_info_count);
6d2010ae 2029 break;
2d21ac55 2030 }
91447636 2031 default:
b0d623f7 2032 error = KERN_INVALID_ARGUMENT;
1c79356b
A
2033 }
2034
b0d623f7
A
2035 task_unlock(task);
2036 return (error);
1c79356b
A
2037}
2038
2d21ac55
A
2039void
2040task_vtimer_set(
2041 task_t task,
2042 integer_t which)
2043{
2044 thread_t thread;
316670eb 2045 spl_t x;
2d21ac55
A
2046
2047 /* assert(task == current_task()); */ /* bogus assert 4803227 4807483 */
2048
2049 task_lock(task);
2050
2051 task->vtimers |= which;
2052
2053 switch (which) {
2054
2055 case TASK_VTIMER_USER:
2056 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
2057 x = splsched();
2058 thread_lock(thread);
2059 if (thread->precise_user_kernel_time)
2060 thread->vtimer_user_save = timer_grab(&thread->user_timer);
2061 else
2062 thread->vtimer_user_save = timer_grab(&thread->system_timer);
2063 thread_unlock(thread);
2064 splx(x);
2d21ac55
A
2065 }
2066 break;
2067
2068 case TASK_VTIMER_PROF:
2069 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
2070 x = splsched();
2071 thread_lock(thread);
2d21ac55
A
2072 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
2073 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
316670eb
A
2074 thread_unlock(thread);
2075 splx(x);
2d21ac55
A
2076 }
2077 break;
2078
2079 case TASK_VTIMER_RLIM:
2080 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
2081 x = splsched();
2082 thread_lock(thread);
2d21ac55
A
2083 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
2084 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
316670eb
A
2085 thread_unlock(thread);
2086 splx(x);
2d21ac55
A
2087 }
2088 break;
2089 }
2090
2091 task_unlock(task);
2092}
2093
2094void
2095task_vtimer_clear(
2096 task_t task,
2097 integer_t which)
2098{
2099 assert(task == current_task());
2100
2101 task_lock(task);
2102
2103 task->vtimers &= ~which;
2104
2105 task_unlock(task);
2106}
2107
2108void
2109task_vtimer_update(
2110__unused
2111 task_t task,
2112 integer_t which,
2113 uint32_t *microsecs)
2114{
2115 thread_t thread = current_thread();
b0d623f7
A
2116 uint32_t tdelt;
2117 clock_sec_t secs;
2d21ac55
A
2118 uint64_t tsum;
2119
2120 assert(task == current_task());
2121
2122 assert(task->vtimers & which);
2123
b0d623f7 2124 secs = tdelt = 0;
2d21ac55
A
2125
2126 switch (which) {
2127
2128 case TASK_VTIMER_USER:
316670eb
A
2129 if (thread->precise_user_kernel_time) {
2130 tdelt = (uint32_t)timer_delta(&thread->user_timer,
2131 &thread->vtimer_user_save);
2132 } else {
2133 tdelt = (uint32_t)timer_delta(&thread->system_timer,
2d21ac55 2134 &thread->vtimer_user_save);
316670eb 2135 }
b0d623f7 2136 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
2137 break;
2138
2139 case TASK_VTIMER_PROF:
2140 tsum = timer_grab(&thread->user_timer);
2141 tsum += timer_grab(&thread->system_timer);
b0d623f7
A
2142 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
2143 absolutetime_to_microtime(tdelt, &secs, microsecs);
2144 /* if the time delta is smaller than a usec, ignore */
2145 if (*microsecs != 0)
2146 thread->vtimer_prof_save = tsum;
2d21ac55
A
2147 break;
2148
2149 case TASK_VTIMER_RLIM:
2150 tsum = timer_grab(&thread->user_timer);
2151 tsum += timer_grab(&thread->system_timer);
b0d623f7 2152 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
2d21ac55 2153 thread->vtimer_rlim_save = tsum;
b0d623f7 2154 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
2155 break;
2156 }
2157
2d21ac55
A
2158}
2159
1c79356b
A
2160/*
2161 * task_assign:
2162 *
2163 * Change the assigned processor set for the task
2164 */
2165kern_return_t
2166task_assign(
91447636
A
2167 __unused task_t task,
2168 __unused processor_set_t new_pset,
2169 __unused boolean_t assign_threads)
1c79356b 2170{
1c79356b
A
2171 return(KERN_FAILURE);
2172}
2173
2174/*
2175 * task_assign_default:
2176 *
2177 * Version of task_assign to assign to default processor set.
2178 */
2179kern_return_t
2180task_assign_default(
2181 task_t task,
2182 boolean_t assign_threads)
2183{
2d21ac55 2184 return (task_assign(task, &pset0, assign_threads));
1c79356b
A
2185}
2186
2187/*
2188 * task_get_assignment
2189 *
2190 * Return name of processor set that task is assigned to.
2191 */
2192kern_return_t
2193task_get_assignment(
2194 task_t task,
2195 processor_set_t *pset)
2196{
2197 if (!task->active)
2198 return(KERN_FAILURE);
2199
2d21ac55
A
2200 *pset = &pset0;
2201
2202 return (KERN_SUCCESS);
1c79356b
A
2203}
2204
2205
2206/*
2207 * task_policy
2208 *
2209 * Set scheduling policy and parameters, both base and limit, for
2210 * the given task. Policy must be a policy which is enabled for the
2211 * processor set. Change contained threads if requested.
2212 */
2213kern_return_t
2214task_policy(
91447636
A
2215 __unused task_t task,
2216 __unused policy_t policy_id,
2217 __unused policy_base_t base,
2218 __unused mach_msg_type_number_t count,
2219 __unused boolean_t set_limit,
2220 __unused boolean_t change)
1c79356b
A
2221{
2222 return(KERN_FAILURE);
2223}
2224
2225/*
2226 * task_set_policy
2227 *
2228 * Set scheduling policy and parameters, both base and limit, for
2229 * the given task. Policy can be any policy implemented by the
2230 * processor set, whether enabled or not. Change contained threads
2231 * if requested.
2232 */
2233kern_return_t
2234task_set_policy(
91447636
A
2235 __unused task_t task,
2236 __unused processor_set_t pset,
2237 __unused policy_t policy_id,
2238 __unused policy_base_t base,
2239 __unused mach_msg_type_number_t base_count,
2240 __unused policy_limit_t limit,
2241 __unused mach_msg_type_number_t limit_count,
2242 __unused boolean_t change)
1c79356b
A
2243{
2244 return(KERN_FAILURE);
2245}
2246
91447636 2247#if FAST_TAS
1c79356b
A
2248kern_return_t
2249task_set_ras_pc(
2250 task_t task,
2251 vm_offset_t pc,
2252 vm_offset_t endpc)
2253{
1c79356b
A
2254 extern int fast_tas_debug;
2255
2256 if (fast_tas_debug) {
2257 printf("task 0x%x: setting fast_tas to [0x%x, 0x%x]\n",
2258 task, pc, endpc);
2259 }
2260 task_lock(task);
2261 task->fast_tas_base = pc;
2262 task->fast_tas_end = endpc;
2263 task_unlock(task);
2264 return KERN_SUCCESS;
91447636 2265}
1c79356b 2266#else /* FAST_TAS */
91447636
A
2267kern_return_t
2268task_set_ras_pc(
2269 __unused task_t task,
2270 __unused vm_offset_t pc,
2271 __unused vm_offset_t endpc)
2272{
1c79356b 2273 return KERN_FAILURE;
1c79356b 2274}
91447636 2275#endif /* FAST_TAS */
1c79356b
A
2276
2277void
2278task_synchronizer_destroy_all(task_t task)
2279{
2280 semaphore_t semaphore;
2281 lock_set_t lock_set;
2282
2283 /*
2284 * Destroy owned semaphores
2285 */
2286
2287 while (!queue_empty(&task->semaphore_list)) {
2288 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
2289 (void) semaphore_destroy(task, semaphore);
2290 }
2291
2292 /*
2293 * Destroy owned lock sets
2294 */
2295
2296 while (!queue_empty(&task->lock_set_list)) {
2297 lock_set = (lock_set_t) queue_first(&task->lock_set_list);
2298 (void) lock_set_destroy(task, lock_set);
2299 }
2300}
2301
b0d623f7
A
2302/*
2303 * Install default (machine-dependent) initial thread state
2304 * on the task. Subsequent thread creation will have this initial
2305 * state set on the thread by machine_thread_inherit_taskwide().
2306 * Flavors and structures are exactly the same as those to thread_set_state()
2307 */
2308kern_return_t
2309task_set_state(
2310 task_t task,
2311 int flavor,
2312 thread_state_t state,
2313 mach_msg_type_number_t state_count)
2314{
2315 kern_return_t ret;
2316
2317 if (task == TASK_NULL) {
2318 return (KERN_INVALID_ARGUMENT);
2319 }
2320
2321 task_lock(task);
2322
2323 if (!task->active) {
2324 task_unlock(task);
2325 return (KERN_FAILURE);
2326 }
2327
2328 ret = machine_task_set_state(task, flavor, state, state_count);
2329
2330 task_unlock(task);
2331 return ret;
2332}
2333
2334/*
2335 * Examine the default (machine-dependent) initial thread state
2336 * on the task, as set by task_set_state(). Flavors and structures
2337 * are exactly the same as those passed to thread_get_state().
2338 */
2339kern_return_t
2340task_get_state(
2341 task_t task,
2342 int flavor,
2343 thread_state_t state,
2344 mach_msg_type_number_t *state_count)
2345{
2346 kern_return_t ret;
2347
2348 if (task == TASK_NULL) {
2349 return (KERN_INVALID_ARGUMENT);
2350 }
2351
2352 task_lock(task);
2353
2354 if (!task->active) {
2355 task_unlock(task);
2356 return (KERN_FAILURE);
2357 }
2358
2359 ret = machine_task_get_state(task, flavor, state, state_count);
2360
2361 task_unlock(task);
2362 return ret;
2363}
2364
2365
1c79356b
A
2366/*
2367 * We need to export some functions to other components that
2368 * are currently implemented in macros within the osfmk
2369 * component. Just export them as functions of the same name.
2370 */
2371boolean_t is_kerneltask(task_t t)
2372{
2373 if (t == kernel_task)
55e303ae
A
2374 return (TRUE);
2375
2376 return (FALSE);
1c79356b
A
2377}
2378
b0d623f7
A
2379int
2380check_for_tasksuspend(task_t task)
2381{
2382
2383 if (task == TASK_NULL)
2384 return (0);
2385
2386 return (task->suspend_count > 0);
2387}
2388
1c79356b 2389#undef current_task
91447636
A
2390task_t current_task(void);
2391task_t current_task(void)
1c79356b
A
2392{
2393 return (current_task_fast());
2394}
91447636
A
2395
2396#undef task_reference
2397void task_reference(task_t task);
2398void
2399task_reference(
2400 task_t task)
2401{
2402 if (task != TASK_NULL)
2403 task_reference_internal(task);
2404}
2d21ac55 2405
6d2010ae
A
2406/*
2407 * This routine is called always with task lock held.
2408 * And it returns a thread handle without reference as the caller
2409 * operates on it under the task lock held.
2410 */
2411thread_t
2412task_findtid(task_t task, uint64_t tid)
2413{
2414 thread_t thread= THREAD_NULL;
2415
2416 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2417 if (thread->thread_id == tid)
316670eb 2418 return(thread);
6d2010ae 2419 }
316670eb 2420 return(THREAD_NULL);
6d2010ae
A
2421}
2422
2423
2d21ac55
A
2424#if CONFIG_MACF_MACH
2425/*
2426 * Protect 2 task labels against modification by adding a reference on
2427 * both label handles. The locks do not actually have to be held while
2428 * using the labels as only labels with one reference can be modified
2429 * in place.
2430 */
2431
2432void
2433tasklabel_lock2(
2434 task_t a,
2435 task_t b)
2436{
2437 labelh_reference(a->label);
2438 labelh_reference(b->label);
2439}
2440
2441void
2442tasklabel_unlock2(
2443 task_t a,
2444 task_t b)
2445{
2446 labelh_release(a->label);
2447 labelh_release(b->label);
2448}
2449
2450void
2451mac_task_label_update_internal(
2452 struct label *pl,
2453 struct task *task)
2454{
2455
2456 tasklabel_lock(task);
2457 task->label = labelh_modify(task->label);
2458 mac_task_label_update(pl, &task->maclabel);
2459 tasklabel_unlock(task);
2460 ip_lock(task->itk_self);
2461 mac_port_label_update_cred(pl, &task->itk_self->ip_label);
2462 ip_unlock(task->itk_self);
2463}
2464
2465void
2466mac_task_label_modify(
2467 struct task *task,
2468 void *arg,
2469 void (*f) (struct label *l, void *arg))
2470{
2471
2472 tasklabel_lock(task);
2473 task->label = labelh_modify(task->label);
2474 (*f)(&task->maclabel, arg);
2475 tasklabel_unlock(task);
2476}
2477
2478struct label *
2479mac_task_get_label(struct task *task)
2480{
2481 return (&task->maclabel);
2482}
2483#endif