X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/2d21ac55c334faf3a56e5634905ed6987fc787d4..cc8bc92ae4a8e9f1a1ab61bf83d34ad8150b3405:/osfmk/kern/affinity.c?ds=inline diff --git a/osfmk/kern/affinity.c b/osfmk/kern/affinity.c index 1b319c753..bcce9af9b 100644 --- a/osfmk/kern/affinity.c +++ b/osfmk/kern/affinity.c @@ -59,8 +59,8 @@ #endif struct affinity_space { - mutex_t aspc_lock; - uint32_t aspc_task_count; + lck_mtx_t aspc_lock; + uint32_t aspc_task_count; queue_head_t aspc_affinities; }; typedef struct affinity_space *affinity_space_t; @@ -79,9 +79,18 @@ static affinity_set_t affinity_set_remove(affinity_set_t aset, thread_t thread); * kern.affinity_sets_enabled - disables hinting if cleared * kern.affinity_sets_mapping - controls cache distribution policy * See bsd/kern_sysctl.c + * + * Affinity sets are not used on embedded, which typically only + * has a single pset, and last-processor affinity is + * more important than pset affinity. */ +#if CONFIG_EMBEDDED +boolean_t affinity_sets_enabled = FALSE; +int affinity_sets_mapping = 0; +#else /* !CONFIG_EMBEDDED */ boolean_t affinity_sets_enabled = TRUE; int affinity_sets_mapping = 1; +#endif /* !CONFIG_EMBEDDED */ boolean_t thread_affinity_is_supported(void) @@ -149,7 +158,7 @@ thread_affinity_set(thread_t thread, uint32_t tag) return KERN_TERMINATED; } - mutex_lock(&aspc->aspc_lock); + lck_mtx_lock(&aspc->aspc_lock); aset = thread->affinity_set; if (aset != NULL) { /* @@ -179,7 +188,7 @@ thread_affinity_set(thread_t thread, uint32_t tag) } else { aset = affinity_set_alloc(); if (aset == NULL) { - mutex_unlock(&aspc->aspc_lock); + lck_mtx_unlock(&aspc->aspc_lock); thread_mtx_unlock(thread); return KERN_RESOURCE_SHORTAGE; } @@ -192,7 +201,7 @@ thread_affinity_set(thread_t thread, uint32_t tag) affinity_set_add(aset, thread); } - mutex_unlock(&aspc->aspc_lock); + lck_mtx_unlock(&aspc->aspc_lock); thread_mtx_unlock(thread); /* @@ -225,10 +234,10 @@ task_affinity_create(task_t parent_task, task_t child_task) * Bump the task reference count on the shared namespace and * give it to the child. */ - mutex_lock(&aspc->aspc_lock); + lck_mtx_lock(&aspc->aspc_lock); aspc->aspc_task_count++; child_task->affinity_space = aspc; - mutex_unlock(&aspc->aspc_lock); + lck_mtx_unlock(&aspc->aspc_lock); } /* @@ -243,19 +252,21 @@ task_affinity_deallocate(task_t task) DBG("task_affinity_deallocate(%p) aspc %p task_count %d\n", task, aspc, aspc->aspc_task_count); - mutex_lock(&aspc->aspc_lock); + lck_mtx_lock(&aspc->aspc_lock); if (--(aspc->aspc_task_count) == 0) { assert(queue_empty(&aspc->aspc_affinities)); - mutex_unlock(&aspc->aspc_lock); + lck_mtx_unlock(&aspc->aspc_lock); affinity_space_free(aspc); } else { - mutex_unlock(&aspc->aspc_lock); + lck_mtx_unlock(&aspc->aspc_lock); } } /* * task_affinity_info() * Return affinity tag info (number, min, max) for the task. + * + * Conditions: task is locked. */ kern_return_t task_affinity_info( @@ -274,10 +285,9 @@ task_affinity_info( info->min = THREAD_AFFINITY_TAG_NULL; info->max = THREAD_AFFINITY_TAG_NULL; - task_lock(task); aspc = task->affinity_space; if (aspc) { - mutex_lock(&aspc->aspc_lock); + lck_mtx_lock(&aspc->aspc_lock); queue_iterate(&aspc->aspc_affinities, aset, affinity_set_t, aset_affinities) { info->set_count++; @@ -289,9 +299,8 @@ task_affinity_info( info->max = aset->aset_tag; } info->task_count = aspc->aspc_task_count; - mutex_unlock(&aspc->aspc_lock); + lck_mtx_unlock(&aspc->aspc_lock); } - task_unlock(task); return KERN_SUCCESS; } @@ -318,9 +327,9 @@ thread_affinity_dup(thread_t parent, thread_t child) assert(aspc == parent->task->affinity_space); assert(aspc == child->task->affinity_space); - mutex_lock(&aspc->aspc_lock); + lck_mtx_lock(&aspc->aspc_lock); affinity_set_add(aset, child); - mutex_unlock(&aspc->aspc_lock); + lck_mtx_unlock(&aspc->aspc_lock); thread_mtx_unlock(parent); } @@ -339,11 +348,23 @@ thread_affinity_terminate(thread_t thread) DBG("thread_affinity_terminate(%p)\n", thread); aspc = aset->aset_space; - mutex_lock(&aspc->aspc_lock); + lck_mtx_lock(&aspc->aspc_lock); if (affinity_set_remove(aset, thread)) { affinity_set_free(aset); } - mutex_unlock(&aspc->aspc_lock); + lck_mtx_unlock(&aspc->aspc_lock); +} + +/* + * thread_affinity_exec() + * Called from execve() to cancel any current affinity - a new image implies + * the calling thread terminates any expressed or inherited affinity. + */ +void +thread_affinity_exec(thread_t thread) +{ + if (thread->affinity_set != AFFINITY_SET_NULL) + thread_affinity_terminate(thread); } /* @@ -358,7 +379,7 @@ affinity_space_alloc(void) if (aspc == NULL) return NULL; - mutex_init(&aspc->aspc_lock, 0); + lck_mtx_init(&aspc->aspc_lock, &task_lck_grp, &task_lck_attr); queue_init(&aspc->aspc_affinities); aspc->aspc_task_count = 1; @@ -374,6 +395,7 @@ affinity_space_free(affinity_space_t aspc) { assert(queue_empty(&aspc->aspc_affinities)); + lck_mtx_destroy(&aspc->aspc_lock, &task_lck_grp); DBG("affinity_space_free(%p)\n", aspc); kfree(aspc, sizeof(struct affinity_space)); } @@ -516,7 +538,10 @@ affinity_set_place(affinity_space_t aspc, affinity_set_t new_aset) */ queue_iterate(&aspc->aspc_affinities, aset, affinity_set_t, aset_affinities) { - set_occupancy[aset->aset_num]++; + if(aset->aset_num < num_cpu_asets) + set_occupancy[aset->aset_num]++; + else + panic("aset_num = %d in %s\n", aset->aset_num, __FUNCTION__); } /* @@ -529,7 +554,7 @@ affinity_set_place(affinity_space_t aspc, affinity_set_t new_aset) if (affinity_sets_mapping == 0) i_least_occupied = 0; else - i_least_occupied = ((unsigned int)aspc % 127) % num_cpu_asets; + i_least_occupied = (unsigned int)(((uintptr_t)aspc % 127) % num_cpu_asets); for (i = 0; i < num_cpu_asets; i++) { unsigned int j = (i_least_occupied + i) % num_cpu_asets; if (set_occupancy[j] == 0) {