#undef thread_should_halt
#undef ipc_port_release
-#undef thread_ast_set
-#undef thread_ast_clear
-
-decl_simple_lock_data(extern,reaper_lock)
-extern queue_head_t reaper_queue;
/* BSD KERN COMPONENT INTERFACE */
-vm_address_t bsd_init_task = 0;
+task_t bsd_init_task = TASK_NULL;
char init_task_failure_data[1024];
+extern unsigned int not_in_kdp; /* Skip acquiring locks if we're in kdp */
thread_act_t get_firstthread(task_t);
vm_map_t get_task_map(task_t);
ipc_space_t get_task_ipcspace(task_t);
boolean_t is_kerneltask(task_t);
boolean_t is_thread_idle(thread_t);
-boolean_t is_thread_running(thread_t);
-thread_shuttle_t getshuttle_thread( thread_act_t);
-thread_act_t getact_thread( thread_shuttle_t);
vm_offset_t get_map_min( vm_map_t);
vm_offset_t get_map_max( vm_map_t);
int get_task_userstop(task_t);
int get_thread_userstop(thread_act_t);
-int inc_task_userstop(task_t);
-boolean_t thread_should_abort(thread_shuttle_t);
+boolean_t thread_should_abort(thread_t);
boolean_t current_thread_aborted(void);
void task_act_iterate_wth_args(task_t, void(*)(thread_act_t, void *), void *);
void ipc_port_release(ipc_port_t);
-void thread_ast_set(thread_act_t, ast_t);
-void thread_ast_clear(thread_act_t, ast_t);
boolean_t is_thread_active(thread_t);
-event_t get_thread_waitevent(thread_t);
kern_return_t get_thread_waitresult(thread_t);
vm_size_t get_vmmap_size(vm_map_t);
int get_vmmap_entries(vm_map_t);
int get_task_numacts(task_t);
thread_act_t get_firstthread(task_t task);
-kern_return_t get_signalact(task_t , thread_act_t *, thread_t *, int);
-
-kern_return_t bsd_refvm_object(vm_object_t object);
-
+kern_return_t get_signalact(task_t , thread_act_t *, int);
+void astbsd_on(void);
/*
*
{
thread_act_t thr_act;
- thr_act = (thread_act_t)queue_first(&task->thr_acts);
- if (thr_act == (thread_act_t)&task->thr_acts)
+ thr_act = (thread_act_t)queue_first(&task->threads);
+ if (queue_end(&task->threads, (queue_entry_t)thr_act))
thr_act = THR_ACT_NULL;
if (!task->active)
return(THR_ACT_NULL);
return(thr_act);
}
-kern_return_t get_signalact(task_t task,thread_act_t * thact, thread_t * thshut, int setast)
+kern_return_t get_signalact(task_t task,thread_act_t * thact, int setast)
{
thread_act_t inc;
}
thr_act = THR_ACT_NULL;
- for (inc = (thread_act_t)queue_first(&task->thr_acts);
- inc != (thread_act_t)&task->thr_acts;
+ for (inc = (thread_act_t)queue_first(&task->threads);
+ !queue_end(&task->threads, (queue_entry_t)inc);
inc = ninc) {
th = act_lock_thread(inc);
- if ((inc->active) && ((th->state & TH_ABORT) != TH_ABORT)) {
+ if ((inc->active) &&
+ ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
thr_act = inc;
break;
}
act_unlock_thread(inc);
- ninc = (thread_act_t)queue_next(&inc->thr_acts);
+ ninc = (thread_act_t)queue_next(&inc->task_threads);
}
out:
if (thact)
*thact = thr_act;
-
- if (thshut)
- *thshut = thr_act? thr_act->thread: THREAD_NULL ;
if (thr_act) {
- if (setast) {
- thread_ast_set(thr_act, AST_BSD);
- if (current_act() == thr_act)
- ast_on(AST_BSD);
- }
+ if (setast)
+ act_set_astbsd(thr_act);
+
act_unlock_thread(thr_act);
}
task_unlock(task);
return(KERN_FAILURE);
}
+
+kern_return_t check_actforsig(task_t task, thread_act_t thact, int setast)
+{
+
+ thread_act_t inc;
+ thread_act_t ninc;
+ thread_act_t thr_act;
+ thread_t th;
+ int found=0;
+
+ task_lock(task);
+ if (!task->active) {
+ task_unlock(task);
+ return(KERN_FAILURE);
+ }
+
+ thr_act = THR_ACT_NULL;
+ for (inc = (thread_act_t)queue_first(&task->threads);
+ !queue_end(&task->threads, (queue_entry_t)inc);
+ inc = ninc) {
+
+ if (inc != thact) {
+ ninc = (thread_act_t)queue_next(&inc->task_threads);
+ continue;
+ }
+ th = act_lock_thread(inc);
+ if ((inc->active) &&
+ ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) != TH_ABORT)) {
+ found = 1;
+ thr_act = inc;
+ break;
+ }
+ act_unlock_thread(inc);
+ /* ninc = (thread_act_t)queue_next(&inc->thr_acts); */
+ break;
+ }
+out:
+ if (found) {
+ if (setast)
+ act_set_astbsd(thr_act);
+
+ act_unlock_thread(thr_act);
+ }
+ task_unlock(task);
+
+ if (found)
+ return(KERN_SUCCESS);
+ else
+ return(KERN_FAILURE);
+}
+
/*
*
*/
int get_task_numacts(task_t t)
{
- return(t->thr_act_count);
+ return(t->thread_count);
+}
+
+/* does this machine need 64bit register set for signal handler */
+int is_64signalregset(void)
+{
+ task_t t = current_task();
+ if(t->taskFeatures[0] & tf64BitData)
+ return(1);
+ else
+ return(0);
}
/*
- * Reset the current task's map by taking a reference
- * on the new map. The old map reference is returned.
+ * The old map reference is returned.
*/
vm_map_t
swap_task_map(task_t task,vm_map_t map)
{
+ thread_act_t act = current_act();
vm_map_t old_map;
- vm_map_reference(map);
+ if (task != act->task)
+ panic("swap_task_map");
+
task_lock(task);
old_map = task->map;
- task->map = map;
+ act->map = task->map = map;
task_unlock(task);
return old_map;
}
-/*
- * Reset the current act map.
- * The caller donates us a reference to the new map
- * and we donote our reference to the old map to him.
- */
vm_map_t
swap_act_map(thread_act_t thr_act,vm_map_t map)
{
- vm_map_t old_map;
-
- act_lock(thr_act);
- old_map = thr_act->map;
- thr_act->map = map;
- act_unlock(thr_act);
- return old_map;
+ panic("swap_act_map");
}
/*
/*
*
*/
-thread_shuttle_t
+thread_t
getshuttle_thread(
- thread_act_t th)
+ thread_t th)
{
-#ifdef DEBUG
- assert(th->thread);
-#endif
- return(th->thread);
+ return(th);
}
/*
*
*/
-thread_act_t
+thread_t
getact_thread(
- thread_shuttle_t th)
+ thread_t th)
{
-#ifdef DEBUG
- assert(th->top_act);
-#endif
- return(th->top_act);
+ return(th);
}
/*
int total_entries = 0;
vm_map_entry_t entry;
- vm_map_lock(map);
+ if (not_in_kdp)
+ vm_map_lock(map);
entry = vm_map_first_entry(map);
while((entry != vm_map_to_entry(map)) && (entry->vme_start < start)) {
entry = entry->vme_next;
}
entry = entry->vme_next;
}
- vm_map_unlock(map);
+ if (not_in_kdp)
+ vm_map_unlock(map);
return(total_entries);
}
int total_entries = 0;
vm_map_entry_t entry;
- vm_map_lock(map);
+ if (not_in_kdp)
+ vm_map_lock(map);
entry = vm_map_first_entry(map);
while(entry != vm_map_to_entry(map)) {
}
entry = entry->vme_next;
}
- vm_map_unlock(map);
+ if (not_in_kdp)
+ vm_map_unlock(map);
return(total_entries);
}
return(th->user_stop_count);
}
-/*
- *
- */
-int
-inc_task_userstop(
- task_t task)
-{
- int i=0;
- i = task->user_stop_count;
- task->user_stop_count++;
- return(i);
-}
-
-
/*
*
*/
boolean_t
thread_should_abort(
- thread_shuttle_t th)
+ thread_t th)
{
- return( (!th->top_act || !th->top_act->active ||
- th->state & TH_ABORT));
+ return(!th->top_act ||
+ (th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT);
}
/*
- *
+ * This routine is like thread_should_abort() above. It checks to
+ * see if the current thread is aborted. But unlike above, it also
+ * checks to see if thread is safely aborted. If so, it returns
+ * that fact, and clears the condition (safe aborts only should
+ * have a single effect, and a poll of the abort status
+ * qualifies.
*/
boolean_t
current_thread_aborted (
void)
{
thread_t th = current_thread();
-
- return(!th->top_act || (th->state & TH_ABORT));
+ spl_t s;
+
+ if (!th->top_act ||
+ ((th->state & (TH_ABORT|TH_ABORT_SAFELY)) == TH_ABORT &&
+ th->interrupt_level != THREAD_UNINT))
+ return (TRUE);
+ if (th->state & TH_ABORT_SAFELY) {
+ s = splsched();
+ thread_lock(th);
+ if (th->state & TH_ABORT_SAFELY)
+ th->state &= ~(TH_ABORT|TH_ABORT_SAFELY);
+ thread_unlock(th);
+ splx(s);
+ }
+ return FALSE;
}
/*
thread_act_t inc, ninc;
task_lock(task);
- for (inc = (thread_act_t)queue_first(&task->thr_acts);
- inc != (thread_act_t)&task->thr_acts;
+ for (inc = (thread_act_t)queue_first(&task->threads);
+ !queue_end(&task->threads, (queue_entry_t)inc);
inc = ninc) {
- ninc = (thread_act_t)queue_next(&inc->thr_acts);
+ ninc = (thread_act_t)queue_next(&inc->task_threads);
(void) (*func_callback)(inc, func_arg);
}
task_unlock(task);
ipc_object_release(&(port)->ip_object);
}
-void
-thread_ast_set(
- thread_act_t act,
- ast_t reason)
-{
- act->ast |= reason;
-}
-void
-thread_ast_clear(
- thread_act_t act,
- ast_t reason)
-{
- act->ast &= ~(reason);
-}
-
boolean_t
is_thread_active(
- thread_shuttle_t th)
+ thread_t th)
{
return(th->active);
}
-event_t
-get_thread_waitevent(
- thread_shuttle_t th)
-{
- return(th->wait_event);
-}
-
kern_return_t
get_thread_waitresult(
- thread_shuttle_t th)
+ thread_t th)
{
return(th->wait_result);
}
-kern_return_t
-bsd_refvm_object(vm_object_t object)
+void
+astbsd_on(void)
{
- vm_object_reference(object);
- return(KERN_SUCCESS);
-}
+ boolean_t reenable;
+ reenable = ml_set_interrupts_enabled(FALSE);
+ ast_on_fast(AST_BSD);
+ (void)ml_set_interrupts_enabled(reenable);
+}