X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..ac5ea4a98a4e1d34a076095ea1eaa87e43d1f335:/osfmk/kern/task.c diff --git a/osfmk/kern/task.c b/osfmk/kern/task.c index 0eaf47eaa..2621419f9 100644 --- a/osfmk/kern/task.c +++ b/osfmk/kern/task.c @@ -87,7 +87,6 @@ #include #include #include -#include #include #include #include @@ -98,8 +97,6 @@ #include #include #include /* for thread_wakeup */ -#include -#include /*** ??? fix so this can be removed ***/ #include #include #include @@ -122,6 +119,7 @@ #include #include #include +#include task_t kernel_task; zone_t task_zone; @@ -139,8 +137,6 @@ void task_free( task_t task ); void task_synchronizer_destroy_all( task_t task); -void task_subsystem_destroy_all( - task_t task); kern_return_t task_set_ledger( task_t task, @@ -177,7 +173,9 @@ task_init(void) } #if MACH_HOST -void + +#if 0 +static void task_freeze( task_t task) { @@ -187,18 +185,23 @@ task_freeze( * wait for that to finish. */ while (task->may_assign == FALSE) { + wait_result_t res; + task->assign_active = TRUE; - thread_sleep_mutex((event_t) &task->assign_active, - &task->lock, THREAD_INTERRUPTIBLE); - task_lock(task); + res = thread_sleep_mutex((event_t) &task->assign_active, + &task->lock, THREAD_UNINT); + assert(res == THREAD_AWAKENED); } task->may_assign = FALSE; task_unlock(task); - return; } +#else +#define thread_freeze(thread) assert(task->processor_set == &default_pset) +#endif -void +#if 0 +static void task_unfreeze( task_t task) { @@ -210,9 +213,12 @@ task_unfreeze( thread_wakeup((event_t)&task->assign_active); } task_unlock(task); - return; } +#else +#define thread_unfreeze(thread) assert(task->processor_set == &default_pset) +#endif + #endif /* MACH_HOST */ /* @@ -347,14 +353,13 @@ task_create_local( trunc_page(VM_MAX_ADDRESS), TRUE); mutex_init(&new_task->lock, ETAP_THREAD_TASK_NEW); - queue_init(&new_task->subsystem_list); queue_init(&new_task->thr_acts); new_task->suspend_count = 0; new_task->thr_act_count = 0; new_task->res_act_count = 0; new_task->active_act_count = 0; new_task->user_stop_count = 0; - new_task->importance = 0; + new_task->role = TASK_UNSPECIFIED; new_task->active = TRUE; new_task->kernel_loaded = kernel_loaded; new_task->user_data = 0; @@ -366,6 +371,10 @@ task_create_local( new_task->syscalls_mach = 0; new_task->syscalls_unix=0; new_task->csw=0; + new_task->dynamic_working_set = 0; + + task_working_set_create(new_task, TWS_SMALL_HASH_LINE_COUNT, + 0, TWS_HASH_STYLE_DEFAULT); #ifdef MACH_BSD new_task->bsd_info = 0; @@ -412,11 +421,6 @@ task_create_local( if (!pset->active) pset = &default_pset; - new_task->policy = parent_task->policy; - - new_task->priority = parent_task->priority; - new_task->max_priority = parent_task->max_priority; - new_task->sec_token = parent_task->sec_token; shared_region_mapping_ref(parent_task->system_shared_region); @@ -430,24 +434,20 @@ task_create_local( else { pset = &default_pset; - if (kernel_task == TASK_NULL) { - new_task->policy = POLICY_RR; - - new_task->priority = MINPRI_KERNBAND; - new_task->max_priority = MAXPRI_KERNBAND; - } - else { - new_task->policy = POLICY_TIMESHARE; - - new_task->priority = BASEPRI_DEFAULT; - new_task->max_priority = MAXPRI_HIGHBAND; - } - new_task->sec_token = KERNEL_SECURITY_TOKEN; new_task->wired_ledger_port = ledger_copy(root_wired_ledger); new_task->paged_ledger_port = ledger_copy(root_paged_ledger); } + if (kernel_task == TASK_NULL) { + new_task->priority = MINPRI_KERNEL; + new_task->max_priority = MAXPRI_KERNEL; + } + else { + new_task->priority = BASEPRI_DEFAULT; + new_task->max_priority = MAXPRI_USER; + } + pset_lock(pset); pset_add_task(pset, new_task); pset_unlock(pset); @@ -483,71 +483,54 @@ task_create_local( } /* - * task_free: + * task_deallocate * - * Called by task_deallocate when the task's reference count drops to zero. + * Drop a reference on a task * Task is locked. */ void -task_free( +task_deallocate( task_t task) { processor_set_t pset; + int refs; -#if MACH_ASSERT - assert(task != 0); - if (watchacts & (WA_EXIT|WA_TASK)) - printf("task_free(%x(%d)) map ref %d\n", task, task->ref_count, - task->map->ref_count); -#endif /* MACH_ASSERT */ + if (task == TASK_NULL) + return; + + task_lock(task); + refs = --task->ref_count; + task_unlock(task); + + if (refs > 0) + return; #if TASK_SWAPPER /* task_terminate guarantees that this task is off the list */ assert((task->swap_state & TASK_SW_ELIGIBLE) == 0); #endif /* TASK_SWAPPER */ + if(task->dynamic_working_set) + tws_hash_destroy((tws_hash_t)task->dynamic_working_set); + + eml_task_deallocate(task); - /* - * Temporarily restore the reference we dropped above, then - * freeze the task so that the task->processor_set field - * cannot change. In the !MACH_HOST case, the logic can be - * simplified, since the default_pset is the only pset. - */ - ++task->ref_count; - task_unlock(task); -#if MACH_HOST + ipc_task_terminate(task); + +#if MACH_HOST task_freeze(task); -#endif /* MACH_HOST */ - +#endif + pset = task->processor_set; pset_lock(pset); - task_lock(task); - if (--task->ref_count > 0) { - /* - * A new reference appeared (probably from the pset). - * Back out. Must unfreeze inline since we'already - * dropped our reference. - */ -#if MACH_HOST - assert(task->may_assign == FALSE); - task->may_assign = TRUE; - if (task->assign_active == TRUE) { - task->assign_active = FALSE; - thread_wakeup((event_t)&task->assign_active); - } -#endif /* MACH_HOST */ - task_unlock(task); - pset_unlock(pset); - return; - } pset_remove_task(pset,task); - task_unlock(task); pset_unlock(pset); pset_deallocate(pset); - ipc_task_terminate(task); - shared_region_mapping_dealloc(task->system_shared_region); +#if MACH_HOST + task_unfreeze(task); +#endif if (task->kernel_loaded) vm_map_remove(kernel_map, task->map->min_offset, @@ -558,21 +541,6 @@ task_free( zfree(task_zone, (vm_offset_t) task); } -void -task_deallocate( - task_t task) -{ - if (task != TASK_NULL) { - int c; - - task_lock(task); - c = --task->ref_count; - if (c == 0) - task_free(task); /* unlocks task */ - else - task_unlock(task); - } -} void task_reference( @@ -623,6 +591,7 @@ task_terminate_internal( { thread_act_t thr_act, cur_thr_act; task_t cur_task; + boolean_t interrupt_save; assert(task != kernel_task); @@ -676,6 +645,12 @@ task_terminate_internal( if (cur_task != task) task_unlock(cur_task); + /* + * Make sure the current thread does not get aborted out of + * the waits inside these operations. + */ + interrupt_save = thread_interrupt_level(THREAD_UNINT); + /* * Indicate that we want all the threads to stop executing * at user space by holding the task (we would have held @@ -696,33 +671,26 @@ task_terminate_internal( * clean up most of the thread resources. Then it will be * handed over to the reaper, who will finally remove the * thread from the task list and free the structures. - * - * We can't terminate the current activation yet, because - * it has to wait for the others in an interruptible state. - * We may also block interruptibly during the rest of the - * cleanup. Wait until the very last to terminate ourself. - * - * But if we have virtual machine state, we need to clean - * that up now, because it may be holding wirings the task's - * map that would get stuck in the vm_map_remove() below. */ queue_iterate(&task->thr_acts, thr_act, thread_act_t, thr_acts) { - if (thr_act != cur_thr_act) thread_terminate_internal(thr_act); - else - act_virtual_machine_destroy(thr_act); } - task_unlock(task); /* - * Destroy all synchronizers owned by the task. + * Clean up any virtual machine state/resources associated + * with the current activation because it may hold wiring + * and other references on resources we will be trying to + * release below. */ - task_synchronizer_destroy_all(task); + if (cur_thr_act->task == task) + act_virtual_machine_destroy(cur_thr_act); + + task_unlock(task); /* - * Deallocate all subsystems owned by the task. + * Destroy all synchronizers owned by the task. */ - task_subsystem_destroy_all(task); + task_synchronizer_destroy_all(task); /* * Destroy the IPC space, leaving just a reference for it. @@ -742,12 +710,24 @@ task_terminate_internal( task->map->min_offset, task->map->max_offset, VM_MAP_NO_FLAGS); + shared_region_mapping_dealloc(task->system_shared_region); + + /* + * Flush working set here to avoid I/O in reaper thread + */ + if(task->dynamic_working_set) + tws_hash_ws_flush((tws_hash_t) + task->dynamic_working_set); + + /* + * We no longer need to guard against being aborted, so restore + * the previous interruptible state. + */ + thread_interrupt_level(interrupt_save); + /* - * Finally, mark ourself for termination and then - * deallocate the task's reference to itself. + * Get rid of the task active reference on itself. */ - if (task == cur_task) - thread_terminate(cur_thr_act); task_deallocate(task); return(KERN_SUCCESS); @@ -815,38 +795,35 @@ task_halt( * clean up most of the thread resources. Then it will be * handed over to the reaper, who will finally remove the * thread from the task list and free the structures. - * - * If the current thread has any virtual machine state - * associated with it, clean that up now before we try - * to clean up the task VM and port spaces. */ queue_iterate(&task->thr_acts, thr_act, thread_act_t,thr_acts) { if (thr_act != cur_thr_act) thread_terminate_internal(thr_act); - else - act_virtual_machine_destroy(thr_act); } task_release_locked(task); } - task_unlock(task); /* - * Destroy all synchronizers owned by the task. + * If the current thread has any virtual machine state + * associated with it, we need to explicitly clean that + * up now (because we did not terminate the current act) + * before we try to clean up the task VM and port spaces. */ - task_synchronizer_destroy_all(task); + act_virtual_machine_destroy(cur_thr_act); + + task_unlock(task); /* - * Deallocate all subsystems owned by the task. + * Destroy all synchronizers owned by the task. */ - task_subsystem_destroy_all(task); + task_synchronizer_destroy_all(task); /* - * Destroy the IPC space, leaving just a reference for it. + * Destroy the contents of the IPC space, leaving just + * a reference for it. */ -#if 0 if (!task->kernel_loaded) ipc_space_clean(task->itk_space); -#endif /* * Clean out the address space, as we are going to be @@ -876,7 +853,8 @@ task_hold_locked( assert(task->active); - task->suspend_count++; + if (task->suspend_count++ > 0) + return; /* * Iterate through all the thread_act's and hold them. @@ -964,9 +942,10 @@ task_release_locked( register thread_act_t thr_act; assert(task->active); + assert(task->suspend_count > 0); - task->suspend_count--; - assert(task->suspend_count >= 0); + if (--task->suspend_count > 0) + return; /* * Iterate through all the thread_act's and hold them. @@ -1313,7 +1292,8 @@ task_info( * PAGE_SIZE; task_lock(task); - basic_info->policy = task->policy; + basic_info->policy = ((task != kernel_task)? + POLICY_TIMESHARE: POLICY_RR); basic_info->suspend_count = task->user_stop_count; basic_info->user_time.seconds = task->total_user_time.seconds; @@ -1354,14 +1334,11 @@ task_info( thread = act_lock_thread(thr_act); - /* Skip empty threads and threads that have migrated - * into this task: + /* JMM - add logic to skip threads that have migrated + * into this task? */ - if (!thread || thr_act->pool_port) { - act_unlock_thread(thr_act); - continue; - } - assert(thread); /* Must have thread, if no thread_pool*/ + + assert(thread); /* Must have thread */ s = splsched(); thread_lock(thread); @@ -1382,24 +1359,11 @@ task_info( case TASK_SCHED_FIFO_INFO: { - register policy_fifo_base_t fifo_base; if (*task_info_count < POLICY_FIFO_BASE_COUNT) return(KERN_INVALID_ARGUMENT); - fifo_base = (policy_fifo_base_t) task_info_out; - - task_lock(task); - if (task->policy != POLICY_FIFO) { - task_unlock(task); - return(KERN_INVALID_POLICY); - } - - fifo_base->base_priority = task->priority; - task_unlock(task); - - *task_info_count = POLICY_FIFO_BASE_COUNT; - break; + return(KERN_INVALID_POLICY); } case TASK_SCHED_RR_INFO: @@ -1412,7 +1376,7 @@ task_info( rr_base = (policy_rr_base_t) task_info_out; task_lock(task); - if (task->policy != POLICY_RR) { + if (task != kernel_task) { task_unlock(task); return(KERN_INVALID_POLICY); } @@ -1420,7 +1384,7 @@ task_info( rr_base->base_priority = task->priority; task_unlock(task); - rr_base->quantum = (min_quantum * tick) / 1000; + rr_base->quantum = tick / 1000; *task_info_count = POLICY_RR_BASE_COUNT; break; @@ -1436,7 +1400,7 @@ task_info( ts_base = (policy_timeshare_base_t) task_info_out; task_lock(task); - if (task->policy != POLICY_TIMESHARE) { + if (task == kernel_task) { task_unlock(task); return(KERN_INVALID_POLICY); } @@ -1603,32 +1567,51 @@ task_collect_scan(void) register task_t task, prev_task; processor_set_t pset = &default_pset; - prev_task = TASK_NULL; - pset_lock(pset); pset->ref_count++; task = (task_t) queue_first(&pset->tasks); while (!queue_end(&pset->tasks, (queue_entry_t) task)) { - task_reference(task); - pset_unlock(pset); + task_lock(task); + if (task->ref_count > 0) { - pmap_collect(task->map->pmap); + task_reference_locked(task); + task_unlock(task); - if (prev_task != TASK_NULL) - task_deallocate(prev_task); - prev_task = task; +#if MACH_HOST + /* + * While we still have the pset locked, freeze the task in + * this pset. That way, when we get back from collecting + * it, we can dereference the pset_tasks chain for the task + * and be assured that we are still in this chain. + */ + task_freeze(task); +#endif + + pset_unlock(pset); + + pmap_collect(task->map->pmap); - pset_lock(pset); - task = (task_t) queue_next(&task->pset_tasks); + pset_lock(pset); + prev_task = task; + task = (task_t) queue_next(&task->pset_tasks); + +#if MACH_HOST + task_unfreeze(prev_task); +#endif + + task_deallocate(prev_task); + } else { + task_unlock(task); + task = (task_t) queue_next(&task->pset_tasks); + } } + pset_unlock(pset); pset_deallocate(pset); - - if (prev_task != TASK_NULL) - task_deallocate(prev_task); } +/* Also disabled in vm/vm_pageout.c */ boolean_t task_collect_allowed = FALSE; unsigned task_collect_last_tick = 0; unsigned task_collect_max_rate = 0; /* in ticks */ @@ -1648,7 +1631,7 @@ consider_task_collect(void) */ if (task_collect_max_rate == 0) - task_collect_max_rate = (2 << SCHED_TICK_SHIFT); + task_collect_max_rate = (1 << SCHED_TICK_SHIFT) + 1; if (task_collect_allowed && (sched_tick > (task_collect_last_tick + task_collect_max_rate))) { @@ -1713,21 +1696,6 @@ task_synchronizer_destroy_all(task_t task) } } -void -task_subsystem_destroy_all(task_t task) -{ - subsystem_t subsystem; - - /* - * Destroy owned subsystems - */ - - while (!queue_empty(&task->subsystem_list)) { - subsystem = (subsystem_t) queue_first(&task->subsystem_list); - subsystem_deallocate(subsystem); - } -} - /* * task_set_port_space: *