X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..143cc14e17b26a90f1f4060725df7ea635161581:/osfmk/i386/pcb.c diff --git a/osfmk/i386/pcb.c b/osfmk/i386/pcb.c index b75cf689c..c261f4214 100644 --- a/osfmk/i386/pcb.c +++ b/osfmk/i386/pcb.c @@ -58,7 +58,6 @@ #include #include #include -#include #include #include @@ -360,6 +359,8 @@ switch_context( * Load the rest of the user state for the new thread */ act_machine_switch_pcb(new_act); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_SCHED) | DBG_FUNC_NONE, + (int)old, (int)new, old->sched_pri, new->sched_pri, 0); return(Switch_context(old, continuation, new)); } @@ -1052,7 +1053,7 @@ thread_set_syscall_return( kern_return_t thread_machine_create(thread_t thread, thread_act_t thr_act, void (*start_pos)(thread_t)) { - MachineThrAct_t mact = &thr_act->mact; + MachineThrAct_t mact = &thr_act->mact; #if MACH_ASSERT if (watchacts & WA_PCB) @@ -1060,23 +1061,21 @@ thread_machine_create(thread_t thread, thread_act_t thr_act, void (*start_pos)(t thread, thr_act, start_pos); #endif /* MACH_ASSERT */ - assert(thread != NULL); - assert(thr_act != NULL); - - /* - * Allocate a kernel stack per shuttle - */ - thread->kernel_stack = (int)stack_alloc(thread,start_pos); - assert(thread->kernel_stack != 0); + assert(thread != NULL); + assert(thr_act != NULL); - /* - * Point top of kernel stack to user`s registers. - */ - STACK_IEL(thread->kernel_stack)->saved_state = &mact->pcb->iss; + /* + * Allocate a kernel stack per shuttle + */ + thread->kernel_stack = (int)stack_alloc(thread,start_pos); + thread->state &= ~TH_STACK_HANDOFF; + assert(thread->kernel_stack != 0); /* - * Utah code fiddles with pcb here - (we don't need to) + * Point top of kernel stack to user`s registers. */ + STACK_IEL(thread->kernel_stack)->saved_state = &mact->pcb->iss; + return(KERN_SUCCESS); } @@ -1131,12 +1130,6 @@ void act_machine_init() /* Good to verify this once */ assert( THREAD_MACHINE_STATE_MAX <= THREAD_STATE_MAX ); - - /* - * If we start using kernel activations, - * would normally create kernel_thread_pool here, - * populating it from the act_zone - */ } kern_return_t @@ -1206,67 +1199,6 @@ act_machine_return(int code) assert( code == KERN_TERMINATED ); assert( thr_act ); -#ifdef CALLOUT_RPC_MODEL - /* - * JMM - RPC is not going to be done with a callout/direct- - * stack manipulation mechanism. Instead we will return/ - * unwind normally as if from a continuation. - */ - act_lock_thread(thr_act); - - if (thr_act->thread->top_act != thr_act) { - /* - * this is not the top activation; - * if possible, we should clone the shuttle so that - * both the root RPC-chain and the soon-to-be-orphaned - * RPC-chain have shuttles - * - * JMM - Cloning shuttles isn't the right approach. We - * need to alert the higher up activations to return our - * shuttle (because scheduling attributes may TRUELY be - * unique and not cloneable. - */ - act_unlock_thread(thr_act); - panic("act_machine_return: ORPHAN CASE NOT YET IMPLEMENTED"); - } - - if (thr_act->lower != THR_ACT_NULL) { - thread_t cur_thread = current_thread(); - thread_act_t cur_act; - struct ipc_port *iplock; - - /* send it an appropriate return code */ - thr_act->lower->alerts |= SERVER_TERMINATED; - install_special_handler(thr_act->lower); - - /* Return to previous act with error code */ - act_locked_act_reference(thr_act); /* keep it around */ - act_switch_swapcheck(cur_thread, (ipc_port_t)0); - (void) switch_act(THR_ACT_NULL); - /* assert(thr_act->ref_count == 0); */ /* XXX */ - cur_act = cur_thread->top_act; - MACH_RPC_RET(cur_act) = KERN_RPC_SERVER_TERMINATED; - - machine_kernel_stack_init(cur_thread, mach_rpc_return_error); - /* - * The following unlocks must be done separately since fields - * used by `act_unlock_thread()' have been cleared, meaning - * that it would not release all of the appropriate locks. - */ - iplock = thr_act->pool_port; /* remember for unlock call */ - rpc_unlock(cur_thread); - if (iplock) ip_unlock(iplock); /* must be done separately */ - act_unlock(thr_act); - act_deallocate(thr_act); /* free it */ - Load_context(cur_thread); - /*NOTREACHED*/ - - panic("act_machine_return: TALKING ZOMBIE! (2)"); - } - act_unlock_thread(thr_act); - -#endif /* CALLOUT_RPC_MODEL */ - /* This is the only activation attached to the shuttle... */ /* terminate the entire thread (shuttle plus activation) */ @@ -1343,13 +1275,6 @@ dump_act(thread_act_t thr_act) thr_act->thread, thr_act->thread ? thr_act->thread->ref_count:0, thr_act->task, thr_act->task ? thr_act->task->ref_count : 0); - if (thr_act->pool_port) { - thread_pool_t actpp = &thr_act->pool_port->ip_thread_pool; - printf("\tpool(acts_p=%x, waiting=%d) pool_next %x\n", - actpp->thr_acts, actpp->waiting, thr_act->thread_pool_next); - }else - printf("\tno thread_pool\n"); - printf("\talerts=%x mask=%x susp=%d user_stop=%d active=%x ast=%x\n", thr_act->alerts, thr_act->alert_mask, thr_act->suspend_count, thr_act->user_stop_count, @@ -1418,7 +1343,6 @@ stack_attach(struct thread_shuttle *thread, void (*start_pos)(thread_t)) { struct i386_kernel_state *statep; - thread_act_t thr_act; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_ATTACH), thread, thread->priority, @@ -1432,8 +1356,8 @@ stack_attach(struct thread_shuttle *thread, statep->k_eip = (unsigned long) Thread_continue; statep->k_ebx = (unsigned long) start_pos; statep->k_esp = (unsigned long) STACK_IEL(stack); - - STACK_IEL(stack)->saved_state = &thr_act->mact.pcb->iss; + assert(thread->top_act); + STACK_IEL(stack)->saved_state = &thread->top_act->mact.pcb->iss; return; } @@ -1465,9 +1389,72 @@ stack_handoff(thread_t old, if (old->top_act->task->map->pmap != new_pmap) PMAP_ACTIVATE_MAP(new->top_act->task->map, cpu_number()); + KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_STACK_HANDOFF) | DBG_FUNC_NONE, + (int)old, (int)new, old->sched_pri, new->sched_pri, 0); + thread_machine_set_current(new); active_stacks[cpu_number()] = new->kernel_stack; return; } + +struct i386_act_context { + struct i386_saved_state ss; + struct i386_float_state fs; +}; + +void * +act_thread_csave(void) +{ +struct i386_act_context *ic; +kern_return_t kret; +int val; + + ic = (struct i386_act_context *)kalloc(sizeof(struct i386_act_context)); + + if (ic == (struct i386_act_context *)NULL) + return((void *)0); + + val = i386_SAVED_STATE_COUNT; + kret = act_machine_get_state(current_act(), i386_SAVED_STATE, &ic->ss, &val); + if (kret != KERN_SUCCESS) { + kfree((vm_offset_t)ic,sizeof(struct i386_act_context)); + return((void *)0); + } + val = i386_FLOAT_STATE_COUNT; + kret = act_machine_get_state(current_act(), i386_FLOAT_STATE, &ic->fs, &val); + if (kret != KERN_SUCCESS) { + kfree((vm_offset_t)ic,sizeof(struct i386_act_context)); + return((void *)0); + } + return(ic); +} +void +act_thread_catt(void *ctx) +{ +struct i386_act_context *ic; +kern_return_t kret; +int val; + + ic = (struct i386_act_context *)ctx; + + if (ic == (struct i386_act_context *)NULL) + return; + + kret = act_machine_set_state(current_act(), i386_SAVED_STATE, &ic->ss, i386_SAVED_STATE_COUNT); + if (kret != KERN_SUCCESS) + goto out; + + kret = act_machine_set_state(current_act(), i386_FLOAT_STATE, &ic->fs, i386_FLOAT_STATE_COUNT); + if (kret != KERN_SUCCESS) + goto out; +out: + kfree((vm_offset_t)ic,sizeof(struct i386_act_context)); +} + +void act_thread_cfree(void *ctx) +{ + kfree((vm_offset_t)ctx,sizeof(struct i386_act_context)); +} +