X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..21362eb3e66fd2c787aee132bce100a44d71a99c:/osfmk/kern/task.c diff --git a/osfmk/kern/task.c b/osfmk/kern/task.c index 0eaf47eaa..62176da70 100644 --- a/osfmk/kern/task.c +++ b/osfmk/kern/task.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ @@ -77,19 +83,22 @@ #include #include #include -#include #include +#include #include +#include #include #include #include #include #include -#include -#include + +#include #include #include + +#include #include #include #include @@ -98,22 +107,29 @@ #include #include #include /* for thread_wakeup */ -#include -#include /*** ??? fix so this can be removed ***/ #include #include #include -#include /* for kernel_map, ipc_kernel_map */ +#include +#include #include #include #include + +#include +#include +#include /* for kernel_map, ipc_kernel_map */ +#include +#include /* for vm_map_remove_commpage64 */ + #if MACH_KDB #include #endif /* MACH_KDB */ -#if TASK_SWAPPER -#include -#endif /* TASK_SWAPPER */ +#ifdef __ppc__ +#include +#include +#endif /* * Exported interfaces @@ -122,6 +138,10 @@ #include #include #include +#include + +#include +#include task_t kernel_task; zone_t task_zone; @@ -134,19 +154,71 @@ void task_wait_locked( task_t task); void task_release_locked( task_t task); -void task_collect_scan(void); void task_free( task_t task ); void task_synchronizer_destroy_all( task_t task); -void task_subsystem_destroy_all( - task_t task); kern_return_t task_set_ledger( task_t task, ledger_t wired, ledger_t paged); +void +task_backing_store_privileged( + task_t task) +{ + task_lock(task); + task->priv_flags |= VM_BACKING_STORE_PRIV; + task_unlock(task); + return; +} + +void +task_working_set_disable(task_t task) +{ + struct tws_hash *ws; + + task_lock(task); + ws = task->dynamic_working_set; + task->dynamic_working_set = NULL; + task_unlock(task); + if (ws) { + tws_hash_ws_flush(ws); + tws_hash_destroy(ws); + } +} + +void +task_set_64bit( + task_t task, + boolean_t is64bit) +{ + if(is64bit) { + /* LP64todo - no task working set for 64-bit */ + task_set_64BitAddr(task); + task_working_set_disable(task); + task->map->max_offset = MACH_VM_MAX_ADDRESS; + } else { + /* + * Deallocate all memory previously allocated + * above the 32-bit address space, since it won't + * be accessible anymore. + */ + /* LP64todo - make this clean */ +#ifdef __ppc__ + vm_map_remove_commpage64(task->map); + pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */ +#endif + (void) vm_map_remove(task->map, + (vm_map_offset_t) VM_MAX_ADDRESS, + MACH_VM_MAX_ADDRESS, + VM_MAP_NO_FLAGS); + task_clear_64BitAddr(task); + task->map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS; + } +} + void task_init(void) { @@ -156,28 +228,20 @@ task_init(void) TASK_CHUNK * sizeof(struct task), "tasks"); - eml_init(); - /* * Create the kernel task as the first task. - * Task_create_local must assign to kernel_task as a side effect, - * for other initialization. (:-() */ - if (task_create_local( - TASK_NULL, FALSE, FALSE, &kernel_task) != KERN_SUCCESS) + if (task_create_internal(TASK_NULL, FALSE, &kernel_task) != KERN_SUCCESS) panic("task_init\n"); + vm_map_deallocate(kernel_task->map); kernel_task->map = kernel_map; - -#if MACH_ASSERT - if (watchacts & WA_TASK) - printf("task_init: kernel_task = %x map=%x\n", - kernel_task, kernel_map); -#endif /* MACH_ASSERT */ } #if MACH_HOST -void + +#if 0 +static void task_freeze( task_t task) { @@ -187,18 +251,23 @@ task_freeze( * wait for that to finish. */ while (task->may_assign == FALSE) { + wait_result_t res; + task->assign_active = TRUE; - thread_sleep_mutex((event_t) &task->assign_active, - &task->lock, THREAD_INTERRUPTIBLE); - task_lock(task); + res = thread_sleep_mutex((event_t) &task->assign_active, + &task->lock, THREAD_UNINT); + assert(res == THREAD_AWAKENED); } task->may_assign = FALSE; task_unlock(task); - return; } +#else +#define thread_freeze(thread) assert(task->processor_set == &default_pset) +#endif -void +#if 0 +static void task_unfreeze( task_t task) { @@ -210,9 +279,12 @@ task_unfreeze( thread_wakeup((event_t)&task->assign_active); } task_unlock(task); - return; } +#else +#define thread_unfreeze(thread) assert(task->processor_set == &default_pset) +#endif + #endif /* MACH_HOST */ /* @@ -221,80 +293,40 @@ task_unfreeze( */ kern_return_t kernel_task_create( - task_t parent_task, - vm_offset_t map_base, - vm_size_t map_size, - task_t *child_task) + __unused task_t parent_task, + __unused vm_offset_t map_base, + __unused vm_size_t map_size, + __unused task_t *child_task) { - kern_return_t result; - task_t new_task; - vm_map_t old_map; - - /* - * Create the task. - */ - result = task_create_local(parent_task, FALSE, TRUE, &new_task); - if (result != KERN_SUCCESS) - return (result); - - /* - * Task_create_local creates the task with a user-space map. - * We attempt to replace the map and free it afterwards; else - * task_deallocate will free it (can NOT set map to null before - * task_deallocate, this impersonates a norma placeholder task). - * _Mark the memory as pageable_ -- this is what we - * want for images (like servers) loaded into the kernel. - */ - if (map_size == 0) { - vm_map_deallocate(new_task->map); - new_task->map = kernel_map; - *child_task = new_task; - } else { - old_map = new_task->map; - if ((result = kmem_suballoc(kernel_map, &map_base, - map_size, TRUE, FALSE, - &new_task->map)) != KERN_SUCCESS) { - /* - * New task created with ref count of 2 -- decrement by - * one to force task deletion. - */ - printf("kmem_suballoc(%x,%x,%x,1,0,&new) Fails\n", - kernel_map, map_base, map_size); - --new_task->ref_count; - task_deallocate(new_task); - return (result); - } - vm_map_deallocate(old_map); - *child_task = new_task; - } - return (KERN_SUCCESS); + return (KERN_INVALID_ARGUMENT); } kern_return_t task_create( - task_t parent_task, - ledger_port_array_t ledger_ports, - mach_msg_type_number_t num_ledger_ports, - boolean_t inherit_memory, - task_t *child_task) /* OUT */ + task_t parent_task, + __unused ledger_port_array_t ledger_ports, + __unused mach_msg_type_number_t num_ledger_ports, + boolean_t inherit_memory, + task_t *child_task) /* OUT */ { if (parent_task == TASK_NULL) return(KERN_INVALID_ARGUMENT); - return task_create_local( - parent_task, inherit_memory, FALSE, child_task); + return task_create_internal( + parent_task, inherit_memory, child_task); } kern_return_t host_security_create_task_token( - host_security_t host_security, - task_t parent_task, - security_token_t sec_token, - host_priv_t host_priv, - ledger_port_array_t ledger_ports, - mach_msg_type_number_t num_ledger_ports, - boolean_t inherit_memory, - task_t *child_task) /* OUT */ + host_security_t host_security, + task_t parent_task, + security_token_t sec_token, + audit_token_t audit_token, + host_priv_t host_priv, + __unused ledger_port_array_t ledger_ports, + __unused mach_msg_type_number_t num_ledger_ports, + boolean_t inherit_memory, + task_t *child_task) /* OUT */ { kern_return_t result; @@ -304,8 +336,8 @@ host_security_create_task_token( if (host_security == HOST_NULL) return(KERN_INVALID_SECURITY); - result = task_create_local( - parent_task, inherit_memory, FALSE, child_task); + result = task_create_internal( + parent_task, inherit_memory, child_task); if (result != KERN_SUCCESS) return(result); @@ -313,6 +345,7 @@ host_security_create_task_token( result = host_security_set_task_token(host_security, *child_task, sec_token, + audit_token, host_priv); if (result != KERN_SUCCESS) @@ -322,10 +355,9 @@ host_security_create_task_token( } kern_return_t -task_create_local( +task_create_internal( task_t parent_task, boolean_t inherit_memory, - boolean_t kernel_loaded, task_t *child_task) /* OUT */ { task_t new_task; @@ -343,20 +375,17 @@ task_create_local( new_task->map = vm_map_fork(parent_task->map); else new_task->map = vm_map_create(pmap_create(0), - round_page(VM_MIN_ADDRESS), - trunc_page(VM_MAX_ADDRESS), TRUE); + (vm_map_offset_t)(VM_MIN_ADDRESS), + (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE); - mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW); - queue_init(&new_task->subsystem_list); - queue_init(&new_task->thr_acts); + mutex_init(&new_task->lock, 0); + queue_init(&new_task->threads); new_task->suspend_count = 0; - new_task->thr_act_count = 0; - new_task->res_act_count = 0; - new_task->active_act_count = 0; + new_task->thread_count = 0; + new_task->active_thread_count = 0; new_task->user_stop_count = 0; - new_task->importance = 0; + new_task->role = TASK_UNSPECIFIED; new_task->active = TRUE; - new_task->kernel_loaded = kernel_loaded; new_task->user_data = 0; new_task->faults = 0; new_task->cow_faults = 0; @@ -364,21 +393,23 @@ task_create_local( new_task->messages_sent = 0; new_task->messages_received = 0; new_task->syscalls_mach = 0; + new_task->priv_flags = 0; new_task->syscalls_unix=0; new_task->csw=0; + new_task->taskFeatures[0] = 0; /* Init task features */ + new_task->taskFeatures[1] = 0; /* Init task features */ + new_task->dynamic_working_set = 0; + + task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT, + 0, TWS_HASH_STYLE_DEFAULT); #ifdef MACH_BSD new_task->bsd_info = 0; #endif /* MACH_BSD */ -#if TASK_SWAPPER - new_task->swap_state = TASK_SW_IN; - new_task->swap_flags = 0; - new_task->swap_ast_waiting = 0; - new_task->swap_stamp = sched_tick; - new_task->swap_rss = 0; - new_task->swap_nswap = 0; -#endif /* TASK_SWAPPER */ +#ifdef __ppc__ + if(BootProcInfo.pf.Available & pf64Bit) new_task->taskFeatures[0] |= tf64BitData; /* If 64-bit machine, show we have 64-bit registers at least */ +#endif queue_init(&new_task->semaphore_list); queue_init(&new_task->lock_set_list); @@ -389,14 +420,11 @@ task_create_local( new_task->may_assign = TRUE; new_task->assign_active = FALSE; #endif /* MACH_HOST */ - eml_task_reference(new_task, parent_task); ipc_task_init(new_task, parent_task); - new_task->total_user_time.seconds = 0; - new_task->total_user_time.microseconds = 0; - new_task->total_system_time.seconds = 0; - new_task->total_system_time.microseconds = 0; + new_task->total_user_time = 0; + new_task->total_system_time = 0; task_prof_init(new_task); @@ -412,12 +440,8 @@ task_create_local( if (!pset->active) pset = &default_pset; - new_task->policy = parent_task->policy; - - new_task->priority = parent_task->priority; - new_task->max_priority = parent_task->max_priority; - new_task->sec_token = parent_task->sec_token; + new_task->audit_token = parent_task->audit_token; shared_region_mapping_ref(parent_task->system_shared_region); new_task->system_shared_region = parent_task->system_shared_region; @@ -426,28 +450,27 @@ task_create_local( convert_port_to_ledger(parent_task->wired_ledger_port)); new_task->paged_ledger_port = ledger_copy( convert_port_to_ledger(parent_task->paged_ledger_port)); + if(task_has_64BitAddr(parent_task)) + task_set_64BitAddr(new_task); } else { pset = &default_pset; - if (kernel_task == TASK_NULL) { - new_task->policy = POLICY_RR; - - new_task->priority = MINPRI_KERNBAND; - new_task->max_priority = MAXPRI_KERNBAND; - } - else { - new_task->policy = POLICY_TIMESHARE; - - new_task->priority = BASEPRI_DEFAULT; - new_task->max_priority = MAXPRI_HIGHBAND; - } - new_task->sec_token = KERNEL_SECURITY_TOKEN; + new_task->audit_token = KERNEL_AUDIT_TOKEN; new_task->wired_ledger_port = ledger_copy(root_wired_ledger); new_task->paged_ledger_port = ledger_copy(root_paged_ledger); } + if (kernel_task == TASK_NULL) { + new_task->priority = BASEPRI_KERNEL; + new_task->max_priority = MAXPRI_KERNEL; + } + else { + new_task->priority = BASEPRI_DEFAULT; + new_task->max_priority = MAXPRI_USER; + } + pset_lock(pset); pset_add_task(pset, new_task); pset_unlock(pset); @@ -456,147 +479,45 @@ task_create_local( task_unfreeze(parent_task); #endif /* MACH_HOST */ -#if FAST_TAS - if (inherit_memory) { - new_task->fast_tas_base = parent_task->fast_tas_base; - new_task->fast_tas_end = parent_task->fast_tas_end; - } else { - new_task->fast_tas_base = (vm_offset_t)0; - new_task->fast_tas_end = (vm_offset_t)0; - } -#endif /* FAST_TAS */ + if (vm_backing_store_low && parent_task != NULL) + new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV); ipc_task_enable(new_task); -#if TASK_SWAPPER - task_swapout_eligible(new_task); -#endif /* TASK_SWAPPER */ - -#if MACH_ASSERT - if (watchacts & WA_TASK) - printf("*** task_create_local(par=%x inh=%x) == 0x%x\n", - parent_task, inherit_memory, new_task); -#endif /* MACH_ASSERT */ - *child_task = new_task; return(KERN_SUCCESS); } /* - * task_free: + * task_deallocate: * - * Called by task_deallocate when the task's reference count drops to zero. - * Task is locked. + * Drop a reference on a task. */ void -task_free( +task_deallocate( task_t task) { - processor_set_t pset; - -#if MACH_ASSERT - assert(task != 0); - if (watchacts & (WA_EXIT|WA_TASK)) - printf("task_free(%x(%d)) map ref %d\n", task, task->ref_count, - task->map->ref_count); -#endif /* MACH_ASSERT */ + processor_set_t pset; -#if TASK_SWAPPER - /* task_terminate guarantees that this task is off the list */ - assert((task->swap_state & TASK_SW_ELIGIBLE) == 0); -#endif /* TASK_SWAPPER */ + if (task == TASK_NULL) + return; - eml_task_deallocate(task); + if (task_deallocate_internal(task) > 0) + return; - /* - * Temporarily restore the reference we dropped above, then - * freeze the task so that the task->processor_set field - * cannot change. In the !MACH_HOST case, the logic can be - * simplified, since the default_pset is the only pset. - */ - ++task->ref_count; - task_unlock(task); -#if MACH_HOST - task_freeze(task); -#endif /* MACH_HOST */ - pset = task->processor_set; - pset_lock(pset); - task_lock(task); - if (--task->ref_count > 0) { - /* - * A new reference appeared (probably from the pset). - * Back out. Must unfreeze inline since we'already - * dropped our reference. - */ -#if MACH_HOST - assert(task->may_assign == FALSE); - task->may_assign = TRUE; - if (task->assign_active == TRUE) { - task->assign_active = FALSE; - thread_wakeup((event_t)&task->assign_active); - } -#endif /* MACH_HOST */ - task_unlock(task); - pset_unlock(pset); - return; - } - pset_remove_task(pset,task); - task_unlock(task); - pset_unlock(pset); pset_deallocate(pset); + if(task->dynamic_working_set) + tws_hash_destroy(task->dynamic_working_set); + ipc_task_terminate(task); - shared_region_mapping_dealloc(task->system_shared_region); - if (task->kernel_loaded) - vm_map_remove(kernel_map, task->map->min_offset, - task->map->max_offset, VM_MAP_NO_FLAGS); vm_map_deallocate(task->map); is_release(task->itk_space); - task_prof_deallocate(task); - zfree(task_zone, (vm_offset_t) task); -} - -void -task_deallocate( - task_t task) -{ - if (task != TASK_NULL) { - int c; - - task_lock(task); - c = --task->ref_count; - if (c == 0) - task_free(task); /* unlocks task */ - else - task_unlock(task); - } -} - -void -task_reference( - task_t task) -{ - if (task != TASK_NULL) { - task_lock(task); - task->ref_count++; - task_unlock(task); - } -} -boolean_t -task_reference_try( - task_t task) -{ - if (task != TASK_NULL) { - if (task_lock_try(task)) { - task->ref_count++; - task_unlock(task); - return TRUE; - } - } - return FALSE; + task_prof_deallocate(task); + zfree(task_zone, task); } /* @@ -611,57 +532,45 @@ task_terminate( task_t task) { if (task == TASK_NULL) - return(KERN_INVALID_ARGUMENT); + return (KERN_INVALID_ARGUMENT); + if (task->bsd_info) - return(KERN_FAILURE); + return (KERN_FAILURE); + return (task_terminate_internal(task)); } kern_return_t task_terminate_internal( - task_t task) + task_t task) { - thread_act_t thr_act, cur_thr_act; - task_t cur_task; + processor_set_t pset; + thread_t thread, self; + task_t self_task; + boolean_t interrupt_save; assert(task != kernel_task); - cur_thr_act = current_act(); - cur_task = cur_thr_act->task; - -#if TASK_SWAPPER - /* - * If task is not resident (swapped out, or being swapped - * out), we want to bring it back in (this can block). - * NOTE: The only way that this can happen in the current - * system is if the task is swapped while it has a thread - * in exit(), and the thread does not hit a clean point - * to swap itself before getting here. - * Terminating other tasks is another way to this code, but - * it is not yet fully supported. - * The task_swapin is unconditional. It used to be done - * only if the task is not resident. Swapping in a - * resident task will prevent it from being swapped out - * while it terminates. - */ - task_swapin(task, TRUE); /* TRUE means make it unswappable */ -#endif /* TASK_SWAPPER */ + self = current_thread(); + self_task = self->task; /* * Get the task locked and make sure that we are not racing * with someone else trying to terminate us. */ - if (task == cur_task) { + if (task == self_task) task_lock(task); - } else if (task < cur_task) { + else + if (task < self_task) { task_lock(task); - task_lock(cur_task); - } else { - task_lock(cur_task); + task_lock(self_task); + } + else { + task_lock(self_task); task_lock(task); } - if (!task->active || !cur_thr_act->active) { + if (!task->active || !self->active) { /* * Task or current act is already being terminated. * Just return an error. If we are dying, this will @@ -669,12 +578,20 @@ task_terminate_internal( * will get us to finalize the termination of ourselves. */ task_unlock(task); - if (cur_task != task) - task_unlock(cur_task); - return(KERN_FAILURE); + if (self_task != task) + task_unlock(self_task); + + return (KERN_FAILURE); } - if (cur_task != task) - task_unlock(cur_task); + + if (self_task != task) + task_unlock(self_task); + + /* + * Make sure the current thread does not get aborted out of + * the waits inside these operations. + */ + interrupt_save = thread_interrupt_level(THREAD_UNINT); /* * Indicate that we want all the threads to stop executing @@ -689,46 +606,37 @@ task_terminate_internal( ipc_task_disable(task); /* - * Terminate each activation in the task. - * - * Each terminated activation will run it's special handler - * when its current kernel context is unwound. That will - * clean up most of the thread resources. Then it will be - * handed over to the reaper, who will finally remove the - * thread from the task list and free the structures. - * - * We can't terminate the current activation yet, because - * it has to wait for the others in an interruptible state. - * We may also block interruptibly during the rest of the - * cleanup. Wait until the very last to terminate ourself. - * - * But if we have virtual machine state, we need to clean - * that up now, because it may be holding wirings the task's - * map that would get stuck in the vm_map_remove() below. - */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { - if (thr_act != cur_thr_act) - thread_terminate_internal(thr_act); - else - act_virtual_machine_destroy(thr_act); + * Terminate each thread in the task. + */ + queue_iterate(&task->threads, thread, thread_t, task_threads) { + thread_terminate_internal(thread); } - task_unlock(task); /* - * Destroy all synchronizers owned by the task. + * Give the machine dependent code a chance + * to perform cleanup before ripping apart + * the task. */ - task_synchronizer_destroy_all(task); + if (self_task == task) + machine_thread_terminate_self(); + + task_unlock(task); /* - * Deallocate all subsystems owned by the task. + * Destroy all synchronizers owned by the task. */ - task_subsystem_destroy_all(task); + task_synchronizer_destroy_all(task); /* * Destroy the IPC space, leaving just a reference for it. */ - if (!task->kernel_loaded) - ipc_space_destroy(task->itk_space); + ipc_space_destroy(task->itk_space); + +/* LP64todo - make this clean */ +#ifdef __ppc__ + vm_map_remove_commpage64(task->map); + pmap_unmap_sharedpage(task->map->pmap); /* Unmap commpage */ +#endif /* * If the current thread is a member of the task @@ -738,56 +646,65 @@ task_terminate_internal( * expense of removing the address space regions * at reap time, we do it explictly here. */ - (void) vm_map_remove(task->map, - task->map->min_offset, - task->map->max_offset, VM_MAP_NO_FLAGS); + vm_map_remove(task->map, task->map->min_offset, + task->map->max_offset, VM_MAP_NO_FLAGS); + + shared_region_mapping_dealloc(task->system_shared_region); + + /* + * Flush working set here to avoid I/O in reaper thread + */ + if (task->dynamic_working_set) + tws_hash_ws_flush(task->dynamic_working_set); + + pset = task->processor_set; + pset_lock(pset); + pset_remove_task(pset,task); + pset_unlock(pset); + + /* + * We no longer need to guard against being aborted, so restore + * the previous interruptible state. + */ + thread_interrupt_level(interrupt_save); + +#if __ppc__ + perfmon_release_facility(task); // notify the perfmon facility +#endif /* - * Finally, mark ourself for termination and then - * deallocate the task's reference to itself. + * Get rid of the task active reference on itself. */ - if (task == cur_task) - thread_terminate(cur_thr_act); task_deallocate(task); - return(KERN_SUCCESS); + return (KERN_SUCCESS); } /* - * task_halt - Shut the current task down (except for the current thread) in - * preparation for dramatic changes to the task (probably exec). - * We hold the task, terminate all other threads in the task and - * wait for them to terminate, clean up the portspace, and when - * all done, let the current thread go. + * task_halt: + * + * Shut the current task down (except for the current thread) in + * preparation for dramatic changes to the task (probably exec). + * We hold the task, terminate all other threads in the task and + * wait for them to terminate, clean up the portspace, and when + * all done, let the current thread go. */ kern_return_t task_halt( task_t task) { - thread_act_t thr_act, cur_thr_act; - task_t cur_task; + thread_t thread, self; assert(task != kernel_task); - cur_thr_act = current_act(); - cur_task = cur_thr_act->task; + self = current_thread(); - if (task != cur_task) { - return(KERN_INVALID_ARGUMENT); - } - -#if TASK_SWAPPER - /* - * If task is not resident (swapped out, or being swapped - * out), we want to bring it back in and make it unswappable. - * This can block, so do it early. - */ - task_swapin(task, TRUE); /* TRUE means make it unswappable */ -#endif /* TASK_SWAPPER */ + if (task != self->task) + return (KERN_INVALID_ARGUMENT); task_lock(task); - if (!task->active || !cur_thr_act->active) { + if (!task->active || !self->active) { /* * Task or current thread is already being terminated. * Hurry up and return out of the current kernel context @@ -795,10 +712,11 @@ task_halt( * ourselves. */ task_unlock(task); - return(KERN_FAILURE); + + return (KERN_FAILURE); } - if (task->thr_act_count > 1) { + if (task->thread_count > 1) { /* * Mark all the threads to keep them from starting any more * user-level execution. The thread_terminate_internal code @@ -808,55 +726,44 @@ task_halt( task_hold_locked(task); /* - * Terminate all the other activations in the task. - * - * Each terminated activation will run it's special handler - * when its current kernel context is unwound. That will - * clean up most of the thread resources. Then it will be - * handed over to the reaper, who will finally remove the - * thread from the task list and free the structures. - * - * If the current thread has any virtual machine state - * associated with it, clean that up now before we try - * to clean up the task VM and port spaces. + * Terminate all the other threads in the task. */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t,thr_acts) { - if (thr_act != cur_thr_act) - thread_terminate_internal(thr_act); - else - act_virtual_machine_destroy(thr_act); + queue_iterate(&task->threads, thread, thread_t, task_threads) { + if (thread != self) + thread_terminate_internal(thread); } + task_release_locked(task); } - task_unlock(task); /* - * Destroy all synchronizers owned by the task. + * Give the machine dependent code a chance + * to perform cleanup before ripping apart + * the task. */ - task_synchronizer_destroy_all(task); + machine_thread_terminate_self(); + + task_unlock(task); /* - * Deallocate all subsystems owned by the task. + * Destroy all synchronizers owned by the task. */ - task_subsystem_destroy_all(task); + task_synchronizer_destroy_all(task); /* - * Destroy the IPC space, leaving just a reference for it. + * Destroy the contents of the IPC space, leaving just + * a reference for it. */ -#if 0 - if (!task->kernel_loaded) - ipc_space_clean(task->itk_space); -#endif + ipc_space_clean(task->itk_space); /* * Clean out the address space, as we are going to be * getting a new one. */ - (void) vm_map_remove(task->map, - task->map->min_offset, - task->map->max_offset, VM_MAP_NO_FLAGS); + vm_map_remove(task->map, task->map->min_offset, + task->map->max_offset, VM_MAP_NO_FLAGS); - return KERN_SUCCESS; + return (KERN_SUCCESS); } /* @@ -870,21 +777,22 @@ task_halt( */ void task_hold_locked( - register task_t task) + register task_t task) { - register thread_act_t thr_act; + register thread_t thread; assert(task->active); - task->suspend_count++; + if (task->suspend_count++ > 0) + return; /* - * Iterate through all the thread_act's and hold them. + * Iterate through all the threads and hold them. */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { - act_lock_thread(thr_act); - thread_hold(thr_act); - act_unlock_thread(thr_act); + queue_iterate(&task->threads, thread, thread_t, task_threads) { + thread_mtx_lock(thread); + thread_hold(thread); + thread_mtx_unlock(thread); } } @@ -900,25 +808,29 @@ task_hold_locked( * CONDITIONS: the caller holds a reference on the task */ kern_return_t -task_hold(task_t task) +task_hold( + register task_t task) { - kern_return_t kret; - if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); + task_lock(task); + if (!task->active) { task_unlock(task); + return (KERN_FAILURE); } - task_hold_locked(task); - task_unlock(task); - return(KERN_SUCCESS); + task_hold_locked(task); + task_unlock(task); + + return (KERN_SUCCESS); } /* - * Routine: task_wait_locked + * task_wait_locked: + * * Wait for all threads in task to stop. * * Conditions: @@ -928,25 +840,21 @@ void task_wait_locked( register task_t task) { - register thread_act_t thr_act, cur_thr_act; + register thread_t thread, self; assert(task->active); assert(task->suspend_count > 0); - cur_thr_act = current_act(); + self = current_thread(); + /* - * Iterate through all the thread's and wait for them to + * Iterate through all the threads and wait for them to * stop. Do not wait for the current thread if it is within * the task. */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { - if (thr_act != cur_thr_act) { - thread_shuttle_t thr_shuttle; - - thr_shuttle = act_lock_thread(thr_act); - thread_wait(thr_shuttle); - act_unlock_thread(thr_act); - } + queue_iterate(&task->threads, thread, thread_t, task_threads) { + if (thread != self) + thread_wait(thread); } } @@ -959,24 +867,20 @@ task_wait_locked( */ void task_release_locked( - register task_t task) + register task_t task) { - register thread_act_t thr_act; + register thread_t thread; assert(task->active); + assert(task->suspend_count > 0); - task->suspend_count--; - assert(task->suspend_count >= 0); + if (--task->suspend_count > 0) + return; - /* - * Iterate through all the thread_act's and hold them. - * Do not hold the current thread_act if it is within the - * task. - */ - queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { - act_lock_thread(thr_act); - thread_release(thr_act); - act_unlock_thread(thr_act); + queue_iterate(&task->threads, thread, thread_t, task_threads) { + thread_mtx_lock(thread); + thread_release(thread); + thread_mtx_unlock(thread); } } @@ -989,40 +893,41 @@ task_release_locked( * CONDITIONS: The caller holds a reference to the task */ kern_return_t -task_release(task_t task) +task_release( + task_t task) { - kern_return_t kret; - if (task == TASK_NULL) return (KERN_INVALID_ARGUMENT); + task_lock(task); + if (!task->active) { task_unlock(task); + return (KERN_FAILURE); } - task_release_locked(task); - task_unlock(task); - return(KERN_SUCCESS); + task_release_locked(task); + task_unlock(task); + + return (KERN_SUCCESS); } kern_return_t task_threads( - task_t task, - thread_act_array_t *thr_act_list, + task_t task, + thread_act_array_t *threads_out, mach_msg_type_number_t *count) { - unsigned int actual; /* this many thr_acts */ - thread_act_t thr_act; - thread_act_t *thr_acts; - thread_t thread; - int i, j; - - vm_size_t size, size_needed; - vm_offset_t addr; + mach_msg_type_number_t actual; + thread_t *threads; + thread_t thread; + vm_size_t size, size_needed; + void *addr; + unsigned int i, j; if (task == TASK_NULL) - return KERN_INVALID_ARGUMENT; + return (KERN_INVALID_ARGUMENT); size = 0; addr = 0; @@ -1030,15 +935,17 @@ task_threads( task_lock(task); if (!task->active) { task_unlock(task); + if (size != 0) kfree(addr, size); - return KERN_FAILURE; + + return (KERN_FAILURE); } - actual = task->thr_act_count; + actual = task->thread_count; /* do we have the memory we need? */ - size_needed = actual * sizeof(mach_port_t); + size_needed = actual * sizeof (mach_port_t); if (size_needed <= size) break; @@ -1053,72 +960,71 @@ task_threads( addr = kalloc(size); if (addr == 0) - return KERN_RESOURCE_SHORTAGE; + return (KERN_RESOURCE_SHORTAGE); } /* OK, have memory and the task is locked & active */ - thr_acts = (thread_act_t *) addr; - - for (i = j = 0, thr_act = (thread_act_t) queue_first(&task->thr_acts); - i < actual; - i++, thr_act = (thread_act_t) queue_next(&thr_act->thr_acts)) { - act_lock(thr_act); - if (thr_act->ref_count > 0) { - act_locked_act_reference(thr_act); - thr_acts[j++] = thr_act; - } - act_unlock(thr_act); + threads = (thread_t *)addr; + + i = j = 0; + + for (thread = (thread_t)queue_first(&task->threads); i < actual; + ++i, thread = (thread_t)queue_next(&thread->task_threads)) { + thread_reference_internal(thread); + threads[j++] = thread; } - assert(queue_end(&task->thr_acts, (queue_entry_t) thr_act)); + + assert(queue_end(&task->threads, (queue_entry_t)thread)); actual = j; - size_needed = actual * sizeof(mach_port_t); + size_needed = actual * sizeof (mach_port_t); - /* can unlock task now that we've got the thr_act refs */ + /* can unlock task now that we've got the thread refs */ task_unlock(task); if (actual == 0) { - /* no thr_acts, so return null pointer and deallocate memory */ + /* no threads, so return null pointer and deallocate memory */ - *thr_act_list = 0; + *threads_out = 0; *count = 0; if (size != 0) kfree(addr, size); - } else { + } + else { /* if we allocated too much, must copy */ if (size_needed < size) { - vm_offset_t newaddr; + void *newaddr; newaddr = kalloc(size_needed); if (newaddr == 0) { - for (i = 0; i < actual; i++) - act_deallocate(thr_acts[i]); + for (i = 0; i < actual; ++i) + thread_deallocate(threads[i]); kfree(addr, size); - return KERN_RESOURCE_SHORTAGE; + return (KERN_RESOURCE_SHORTAGE); } - bcopy((char *) addr, (char *) newaddr, size_needed); + bcopy(addr, newaddr, size_needed); kfree(addr, size); - thr_acts = (thread_act_t *) newaddr; + threads = (thread_t *)newaddr; } - *thr_act_list = thr_acts; + *threads_out = threads; *count = actual; /* do the conversion that Mig should handle */ - for (i = 0; i < actual; i++) - ((ipc_port_t *) thr_acts)[i] = - convert_act_to_port(thr_acts[i]); + for (i = 0; i < actual; ++i) + ((ipc_port_t *) threads)[i] = convert_thread_to_port(threads[i]); } - return KERN_SUCCESS; + return (KERN_SUCCESS); } /* - * Routine: task_suspend + * task_suspend: + * * Implement a user-level suspension on a task. * * Conditions: @@ -1128,20 +1034,24 @@ kern_return_t task_suspend( register task_t task) { - if (task == TASK_NULL) + if (task == TASK_NULL || task == kernel_task) return (KERN_INVALID_ARGUMENT); task_lock(task); + if (!task->active) { task_unlock(task); + return (KERN_FAILURE); } - if ((task->user_stop_count)++ > 0) { + + if (task->user_stop_count++ > 0) { /* * If the stop count was positive, the task is * already stopped and we can exit. */ task_unlock(task); + return (KERN_SUCCESS); } @@ -1153,38 +1063,44 @@ task_suspend( */ task_hold_locked(task); task_wait_locked(task); + task_unlock(task); + return (KERN_SUCCESS); } /* - * Routine: task_resume + * task_resume: * Release a kernel hold on a task. * * Conditions: * The caller holds a reference to the task */ kern_return_t -task_resume(register task_t task) +task_resume( + register task_t task) { - register boolean_t release; + register boolean_t release = FALSE; - if (task == TASK_NULL) - return(KERN_INVALID_ARGUMENT); + if (task == TASK_NULL || task == kernel_task) + return (KERN_INVALID_ARGUMENT); - release = FALSE; task_lock(task); + if (!task->active) { task_unlock(task); - return(KERN_FAILURE); + + return (KERN_FAILURE); } + if (task->user_stop_count > 0) { - if (--(task->user_stop_count) == 0) - release = TRUE; + if (--task->user_stop_count == 0) + release = TRUE; } else { task_unlock(task); - return(KERN_FAILURE); + + return (KERN_FAILURE); } /* @@ -1194,7 +1110,8 @@ task_resume(register task_t task) task_release_locked(task); task_unlock(task); - return(KERN_SUCCESS); + + return (KERN_SUCCESS); } kern_return_t @@ -1202,8 +1119,10 @@ host_security_set_task_token( host_security_t host_security, task_t task, security_token_t sec_token, + audit_token_t audit_token, host_priv_t host_priv) { + ipc_port_t host_port; kern_return_t kr; if (task == TASK_NULL) @@ -1214,17 +1133,16 @@ host_security_set_task_token( task_lock(task); task->sec_token = sec_token; + task->audit_token = audit_token; task_unlock(task); if (host_priv != HOST_PRIV_NULL) { - kr = task_set_special_port(task, - TASK_HOST_PORT, - ipc_port_make_send(realhost.host_priv_self)); + kr = host_get_host_priv_port(host_priv, &host_port); } else { - kr = task_set_special_port(task, - TASK_HOST_PORT, - ipc_port_make_send(realhost.host_self)); + kr = host_get_host_port(host_priv_self(), &host_port); } + assert(kr == KERN_SUCCESS); + kr = task_set_special_port(task, TASK_HOST_PORT, host_port); return(kr); } @@ -1266,11 +1184,9 @@ kern_return_t task_set_info( task_t task, task_flavor_t flavor, - task_info_t task_info_in, /* pointer to IN array */ - mach_msg_type_number_t task_info_count) + __unused task_info_t task_info_in, /* pointer to IN array */ + __unused mach_msg_type_number_t task_info_count) { - vm_map_t map; - if (task == TASK_NULL) return(KERN_INVALID_ARGUMENT); @@ -1283,61 +1199,91 @@ task_set_info( kern_return_t task_info( - task_t task, - task_flavor_t flavor, - task_info_t task_info_out, + task_t task, + task_flavor_t flavor, + task_info_t task_info_out, mach_msg_type_number_t *task_info_count) { - thread_t thread; - vm_map_t map; - if (task == TASK_NULL) - return(KERN_INVALID_ARGUMENT); + return (KERN_INVALID_ARGUMENT); switch (flavor) { - case TASK_BASIC_INFO: - { - register task_basic_info_t basic_info; - - if (*task_info_count < TASK_BASIC_INFO_COUNT) { - return(KERN_INVALID_ARGUMENT); - } + case TASK_BASIC_INFO_32: + { + task_basic_info_32_t basic_info; + vm_map_t map; - basic_info = (task_basic_info_t) task_info_out; + if (*task_info_count < TASK_BASIC_INFO_32_COUNT) + return (KERN_INVALID_ARGUMENT); - map = (task == kernel_task) ? kernel_map : task->map; + basic_info = (task_basic_info_32_t)task_info_out; - basic_info->virtual_size = map->size; + map = (task == kernel_task)? kernel_map: task->map; + basic_info->virtual_size = CAST_DOWN(vm_offset_t,map->size); basic_info->resident_size = pmap_resident_count(map->pmap) * PAGE_SIZE; task_lock(task); - basic_info->policy = task->policy; + basic_info->policy = ((task != kernel_task)? + POLICY_TIMESHARE: POLICY_RR); basic_info->suspend_count = task->user_stop_count; - basic_info->user_time.seconds - = task->total_user_time.seconds; - basic_info->user_time.microseconds - = task->total_user_time.microseconds; - basic_info->system_time.seconds - = task->total_system_time.seconds; - basic_info->system_time.microseconds - = task->total_system_time.microseconds; + + absolutetime_to_microtime( + task->total_user_time, + &basic_info->user_time.seconds, + &basic_info->user_time.microseconds); + absolutetime_to_microtime( + task->total_system_time, + &basic_info->system_time.seconds, + &basic_info->system_time.microseconds); task_unlock(task); - *task_info_count = TASK_BASIC_INFO_COUNT; + *task_info_count = TASK_BASIC_INFO_32_COUNT; break; - } + } - case TASK_THREAD_TIMES_INFO: - { - register task_thread_times_info_t times_info; - register thread_t thread; - register thread_act_t thr_act; + case TASK_BASIC_INFO_64: + { + task_basic_info_64_t basic_info; + vm_map_t map; - if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) { + if (*task_info_count < TASK_BASIC_INFO_64_COUNT) + return (KERN_INVALID_ARGUMENT); + + basic_info = (task_basic_info_64_t)task_info_out; + + map = (task == kernel_task)? kernel_map: task->map; + basic_info->virtual_size = map->size; + basic_info->resident_size = (mach_vm_size_t)(pmap_resident_count(map->pmap) + * PAGE_SIZE); + + task_lock(task); + basic_info->policy = ((task != kernel_task)? + POLICY_TIMESHARE: POLICY_RR); + basic_info->suspend_count = task->user_stop_count; + + absolutetime_to_microtime( + task->total_user_time, + &basic_info->user_time.seconds, + &basic_info->user_time.microseconds); + absolutetime_to_microtime( + task->total_system_time, + &basic_info->system_time.seconds, + &basic_info->system_time.microseconds); + task_unlock(task); + + *task_info_count = TASK_BASIC_INFO_64_COUNT; + break; + } + + case TASK_THREAD_TIMES_INFO: + { + register task_thread_times_info_t times_info; + register thread_t thread; + + if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) return (KERN_INVALID_ARGUMENT); - } times_info = (task_thread_times_info_t) task_info_out; times_info->user_time.seconds = 0; @@ -1346,99 +1292,105 @@ task_info( times_info->system_time.microseconds = 0; task_lock(task); - queue_iterate(&task->thr_acts, thr_act, - thread_act_t, thr_acts) - { - time_value_t user_time, system_time; - spl_t s; - - thread = act_lock_thread(thr_act); - - /* Skip empty threads and threads that have migrated - * into this task: - */ - if (!thread || thr_act->pool_port) { - act_unlock_thread(thr_act); - continue; - } - assert(thread); /* Must have thread, if no thread_pool*/ - s = splsched(); - thread_lock(thread); - thread_read_times(thread, &user_time, &system_time); + queue_iterate(&task->threads, thread, thread_t, task_threads) { + time_value_t user_time, system_time; - thread_unlock(thread); - splx(s); - act_unlock_thread(thr_act); + thread_read_times(thread, &user_time, &system_time); time_value_add(×_info->user_time, &user_time); time_value_add(×_info->system_time, &system_time); } + task_unlock(task); *task_info_count = TASK_THREAD_TIMES_INFO_COUNT; break; - } + } - case TASK_SCHED_FIFO_INFO: - { - register policy_fifo_base_t fifo_base; + case TASK_ABSOLUTETIME_INFO: + { + task_absolutetime_info_t info; + register thread_t thread; - if (*task_info_count < POLICY_FIFO_BASE_COUNT) - return(KERN_INVALID_ARGUMENT); + if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) + return (KERN_INVALID_ARGUMENT); - fifo_base = (policy_fifo_base_t) task_info_out; + info = (task_absolutetime_info_t)task_info_out; + info->threads_user = info->threads_system = 0; task_lock(task); - if (task->policy != POLICY_FIFO) { - task_unlock(task); - return(KERN_INVALID_POLICY); + + info->total_user = task->total_user_time; + info->total_system = task->total_system_time; + + queue_iterate(&task->threads, thread, thread_t, task_threads) { + uint64_t tval; + + tval = timer_grab(&thread->user_timer); + info->threads_user += tval; + info->total_user += tval; + + tval = timer_grab(&thread->system_timer); + info->threads_system += tval; + info->total_system += tval; } - fifo_base->base_priority = task->priority; task_unlock(task); - *task_info_count = POLICY_FIFO_BASE_COUNT; + *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT; break; - } + } + + /* OBSOLETE */ + case TASK_SCHED_FIFO_INFO: + { + + if (*task_info_count < POLICY_FIFO_BASE_COUNT) + return (KERN_INVALID_ARGUMENT); + + return (KERN_INVALID_POLICY); + } - case TASK_SCHED_RR_INFO: - { + /* OBSOLETE */ + case TASK_SCHED_RR_INFO: + { register policy_rr_base_t rr_base; if (*task_info_count < POLICY_RR_BASE_COUNT) - return(KERN_INVALID_ARGUMENT); + return (KERN_INVALID_ARGUMENT); rr_base = (policy_rr_base_t) task_info_out; task_lock(task); - if (task->policy != POLICY_RR) { + if (task != kernel_task) { task_unlock(task); - return(KERN_INVALID_POLICY); + return (KERN_INVALID_POLICY); } rr_base->base_priority = task->priority; task_unlock(task); - rr_base->quantum = (min_quantum * tick) / 1000; + rr_base->quantum = std_quantum_us / 1000; *task_info_count = POLICY_RR_BASE_COUNT; break; - } + } - case TASK_SCHED_TIMESHARE_INFO: - { + /* OBSOLETE */ + case TASK_SCHED_TIMESHARE_INFO: + { register policy_timeshare_base_t ts_base; if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) - return(KERN_INVALID_ARGUMENT); + return (KERN_INVALID_ARGUMENT); ts_base = (policy_timeshare_base_t) task_info_out; task_lock(task); - if (task->policy != POLICY_TIMESHARE) { + if (task == kernel_task) { task_unlock(task); - return(KERN_INVALID_POLICY); + return (KERN_INVALID_POLICY); } ts_base->base_priority = task->priority; @@ -1446,15 +1398,14 @@ task_info( *task_info_count = POLICY_TIMESHARE_BASE_COUNT; break; - } + } - case TASK_SECURITY_TOKEN: - { - register security_token_t *sec_token_p; + case TASK_SECURITY_TOKEN: + { + register security_token_t *sec_token_p; - if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) { - return(KERN_INVALID_ARGUMENT); - } + if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) + return (KERN_INVALID_ARGUMENT); sec_token_p = (security_token_t *) task_info_out; @@ -1463,19 +1414,35 @@ task_info( task_unlock(task); *task_info_count = TASK_SECURITY_TOKEN_COUNT; - break; - } + break; + } - case TASK_SCHED_INFO: - return(KERN_INVALID_ARGUMENT); + case TASK_AUDIT_TOKEN: + { + register audit_token_t *audit_token_p; - case TASK_EVENTS_INFO: - { + if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) + return (KERN_INVALID_ARGUMENT); + + audit_token_p = (audit_token_t *) task_info_out; + + task_lock(task); + *audit_token_p = task->audit_token; + task_unlock(task); + + *task_info_count = TASK_AUDIT_TOKEN_COUNT; + break; + } + + case TASK_SCHED_INFO: + return (KERN_INVALID_ARGUMENT); + + case TASK_EVENTS_INFO: + { register task_events_info_t events_info; - if (*task_info_count < TASK_EVENTS_INFO_COUNT) { - return(KERN_INVALID_ARGUMENT); - } + if (*task_info_count < TASK_EVENTS_INFO_COUNT) + return (KERN_INVALID_ARGUMENT); events_info = (task_events_info_t) task_info_out; @@ -1492,13 +1459,13 @@ task_info( *task_info_count = TASK_EVENTS_INFO_COUNT; break; - } + } - default: + default: return (KERN_INVALID_ARGUMENT); } - return(KERN_SUCCESS); + return (KERN_SUCCESS); } /* @@ -1508,13 +1475,10 @@ task_info( */ kern_return_t task_assign( - task_t task, - processor_set_t new_pset, - boolean_t assign_threads) + __unused task_t task, + __unused processor_set_t new_pset, + __unused boolean_t assign_threads) { -#ifdef lint - task++; new_pset++; assign_threads++; -#endif /* lint */ return(KERN_FAILURE); } @@ -1559,12 +1523,12 @@ task_get_assignment( */ kern_return_t task_policy( - task_t task, - policy_t policy_id, - policy_base_t base, - mach_msg_type_number_t count, - boolean_t set_limit, - boolean_t change) + __unused task_t task, + __unused policy_t policy_id, + __unused policy_base_t base, + __unused mach_msg_type_number_t count, + __unused boolean_t set_limit, + __unused boolean_t change) { return(KERN_FAILURE); } @@ -1579,91 +1543,25 @@ task_policy( */ kern_return_t task_set_policy( - task_t task, - processor_set_t pset, - policy_t policy_id, - policy_base_t base, - mach_msg_type_number_t base_count, - policy_limit_t limit, - mach_msg_type_number_t limit_count, - boolean_t change) + __unused task_t task, + __unused processor_set_t pset, + __unused policy_t policy_id, + __unused policy_base_t base, + __unused mach_msg_type_number_t base_count, + __unused policy_limit_t limit, + __unused mach_msg_type_number_t limit_count, + __unused boolean_t change) { return(KERN_FAILURE); } -/* - * task_collect_scan: - * - * Attempt to free resources owned by tasks. - */ - -void -task_collect_scan(void) -{ - register task_t task, prev_task; - processor_set_t pset = &default_pset; - - prev_task = TASK_NULL; - - pset_lock(pset); - pset->ref_count++; - task = (task_t) queue_first(&pset->tasks); - while (!queue_end(&pset->tasks, (queue_entry_t) task)) { - task_reference(task); - pset_unlock(pset); - - pmap_collect(task->map->pmap); - - if (prev_task != TASK_NULL) - task_deallocate(prev_task); - prev_task = task; - - pset_lock(pset); - task = (task_t) queue_next(&task->pset_tasks); - } - pset_unlock(pset); - - pset_deallocate(pset); - - if (prev_task != TASK_NULL) - task_deallocate(prev_task); -} - -boolean_t task_collect_allowed = FALSE; -unsigned task_collect_last_tick = 0; -unsigned task_collect_max_rate = 0; /* in ticks */ - -/* - * consider_task_collect: - * - * Called by the pageout daemon when the system needs more free pages. - */ - -void -consider_task_collect(void) -{ - /* - * By default, don't attempt task collection more frequently - * than once per second. - */ - - if (task_collect_max_rate == 0) - task_collect_max_rate = (2 << SCHED_TICK_SHIFT); - - if (task_collect_allowed && - (sched_tick > (task_collect_last_tick + task_collect_max_rate))) { - task_collect_last_tick = sched_tick; - task_collect_scan(); - } -} - +#if FAST_TAS kern_return_t task_set_ras_pc( task_t task, vm_offset_t pc, vm_offset_t endpc) { -#if FAST_TAS extern int fast_tas_debug; if (fast_tas_debug) { @@ -1675,18 +1573,17 @@ task_set_ras_pc( task->fast_tas_end = endpc; task_unlock(task); return KERN_SUCCESS; - +} #else /* FAST_TAS */ -#ifdef lint - task++; - pc++; - endpc++; -#endif /* lint */ - +kern_return_t +task_set_ras_pc( + __unused task_t task, + __unused vm_offset_t pc, + __unused vm_offset_t endpc) +{ return KERN_FAILURE; - -#endif /* FAST_TAS */ } +#endif /* FAST_TAS */ void task_synchronizer_destroy_all(task_t task) @@ -1713,41 +1610,6 @@ task_synchronizer_destroy_all(task_t task) } } -void -task_subsystem_destroy_all(task_t task) -{ - subsystem_t subsystem; - - /* - * Destroy owned subsystems - */ - - while (!queue_empty(&task->subsystem_list)) { - subsystem = (subsystem_t) queue_first(&task->subsystem_list); - subsystem_deallocate(subsystem); - } -} - -/* - * task_set_port_space: - * - * Set port name space of task to specified size. - */ - -kern_return_t -task_set_port_space( - task_t task, - int table_entries) -{ - kern_return_t kr; - - is_write_lock(task->itk_space); - kr = ipc_entry_grow_table(task->itk_space, table_entries); - if (kr == KERN_SUCCESS) - is_write_unlock(task->itk_space); - return kr; -} - /* * We need to export some functions to other components that * are currently implemented in macros within the osfmk @@ -1756,13 +1618,24 @@ task_set_port_space( boolean_t is_kerneltask(task_t t) { if (t == kernel_task) - return(TRUE); - else - return((t->kernel_loaded)); + return (TRUE); + + return (FALSE); } #undef current_task -task_t current_task() +task_t current_task(void); +task_t current_task(void) { return (current_task_fast()); } + +#undef task_reference +void task_reference(task_t task); +void +task_reference( + task_t task) +{ + if (task != TASK_NULL) + task_reference_internal(task); +}