+int startup_miss = 0;
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
*/
-#include <mach/rpc.h>
+#include <mach/mach_types.h>
+#include <mach/shared_memory_server.h>
#include <vm/task_working_set.h>
+#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <kern/sched.h>
extern unsigned sched_tick;
+extern zone_t lsf_zone;
+
+/* declarations for internal use only routines */
+
+tws_startup_t
+tws_create_startup_list(
+ tws_hash_t tws);
+
+unsigned int
+tws_startup_list_lookup(
+ tws_startup_t startup,
+ vm_offset_t addr);
+
+kern_return_t
+tws_internal_startup_send(
+ tws_hash_t tws);
+
+void
+tws_traverse_address_hash_list (
+ tws_hash_t tws,
+ unsigned int index,
+ vm_offset_t page_addr,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_map_t map,
+ tws_hash_ptr_t *target_ele,
+ tws_hash_ptr_t **previous_ptr,
+ tws_hash_ptr_t **free_list,
+ unsigned int exclusive_addr);
+
+void
+tws_traverse_object_hash_list (
+ tws_hash_t tws,
+ unsigned int index,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ unsigned int page_mask,
+ tws_hash_ptr_t *target_ele,
+ tws_hash_ptr_t **previous_ptr,
+ tws_hash_ptr_t **free_list);
+
+tws_hash_ptr_t
+new_addr_hash(
+ tws_hash_t tws,
+ unsigned int set,
+ unsigned int index);
+
+tws_hash_ptr_t
+new_obj_hash(
+ tws_hash_t tws,
+ unsigned int set,
+ unsigned int index);
+
+int tws_test_for_community(
+ tws_hash_t tws,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ unsigned int threshold,
+ unsigned int *page_mask);
/* Note: all of the routines below depend on the associated map lock for */
/* synchronization, the map lock will be on when the routines are called */
if(tws == (tws_hash_t)NULL)
return tws;
- if((tws->table[0] = (tws_hash_ele_t *)
- kalloc(sizeof(tws_hash_ele_t) * 2 * lines * rows))
+ if((tws->table[0] = (tws_hash_ptr_t *)
+ kalloc(sizeof(tws_hash_ptr_t) * lines * rows))
== NULL) {
kfree((vm_offset_t)tws, sizeof(struct tws_hash));
return (tws_hash_t)NULL;
}
- if((tws->alt_table[0] = (tws_hash_ele_t *)
- kalloc(sizeof(tws_hash_ele_t) * 2 * lines * rows))
+ if((tws->table_ele[0] = (tws_hash_ptr_t)
+ kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
== NULL) {
- kfree((vm_offset_t)tws, sizeof(struct tws_hash));
kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ele_t)
- * 2 * lines * rows);
+ * lines * rows);
+ kfree((vm_offset_t)tws, sizeof(struct tws_hash));
+ return (tws_hash_t)NULL;
+ }
+ if((tws->alt_ele[0] = (tws_hash_ptr_t)
+ kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
+ == NULL) {
+ kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
+ * lines * rows);
+ kfree((vm_offset_t)tws->table_ele[0],
+ sizeof(struct tws_hash_ptr)
+ * lines * rows);
+ kfree((vm_offset_t)tws, sizeof(struct tws_hash));
return (tws_hash_t)NULL;
}
if((tws->cache[0] = (struct tws_hash_line *)
kalloc(sizeof(struct tws_hash_line) * lines))
== NULL) {
+ kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
+ * lines * rows);
+ kfree((vm_offset_t)tws->table_ele[0],
+ sizeof(struct tws_hash_ptr)
+ * lines * rows);
+ kfree((vm_offset_t)tws->alt_ele[0], sizeof(struct tws_hash_ptr)
+ * lines * rows);
kfree((vm_offset_t)tws, sizeof(struct tws_hash));
- kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ele_t)
- * 2 * lines * rows);
- kfree((vm_offset_t)tws->alt_table[0], sizeof(tws_hash_ele_t)
- * 2 * lines * rows);
return (tws_hash_t)NULL;
}
+ tws->free_hash_ele[0] = (tws_hash_ptr_t)0;
+ tws->obj_free_count[0] = 0;
+ tws->addr_free_count[0] = 0;
/* most defaults are such that a bzero will initialize */
- bzero((char *)tws->table[0],sizeof(tws_hash_ele_t)
- * 2 * lines * rows);
- bzero((char *)tws->alt_table[0],sizeof(tws_hash_ele_t)
- * 2 * lines * rows);
+ bzero((char *)tws->table[0],sizeof(tws_hash_ptr_t)
+ * lines * rows);
+ bzero((char *)tws->table_ele[0],sizeof(struct tws_hash_ptr)
+ * lines * rows);
+ bzero((char *)tws->alt_ele[0],sizeof(struct tws_hash_ptr)
+ * lines * rows);
bzero((char *)tws->cache[0], sizeof(struct tws_hash_line)
* lines);
tws->current_line = 0;
tws->pageout_count = 0;
tws->line_count = 0;
+ tws->startup_cache = NULL;
+ tws->startup_name = NULL;
tws->number_of_lines = lines;
tws->number_of_elements = rows;
tws->expansion_count = 1;
boolean_t live)
{
struct tws_hash_ele *hash_ele;
+ struct tws_hash_ptr **trailer;
+ struct tws_hash_ptr **free_list;
+ tws_hash_ele_t addr_ele;
int index;
unsigned int i, j, k;
- int alt_index;
int dump_pmap;
int hash_loop;
}
}
hash_line->ele_count = 0;
- for (i=0; i<tws->number_of_elements; i++) {
- hash_loop = 0;
- hash_ele = &(hash_line->list[i]);
- if(hash_ele->object != 0) {
- vm_offset_t vaddr_off = 0;
- vm_object_offset_t local_off = 0;
- for (j = 0x1; j != 0; j = j<<1) {
- if(j & hash_ele->page_cache) {
- unsigned int alt_index;
- alt_index = alt_tws_hash(
- hash_ele->page_addr + vaddr_off,
- tws->number_of_elements,
- tws->number_of_lines);
- for(k = 0; k < tws->expansion_count; k++) {
- if(tws->alt_table[k][alt_index] == hash_ele) {
- tws->alt_table[k][alt_index] = 0;
- }
- }
- vaddr_off += PAGE_SIZE;
- }
- }
-
- if((hash_ele->map != NULL) && (live)) {
- vm_page_t p;
-
- for (j = 0x1; j != 0; j = j<<1) {
- if(j & hash_ele->page_cache) {
- p = vm_page_lookup(hash_ele->object,
+ for (i=0; i<tws->number_of_elements; i++) {
+ hash_loop = 0;
+ hash_ele = &(hash_line->list[i]);
+ if(hash_ele->object != 0) {
+
+ vm_object_offset_t local_off = 0;
+ tws_hash_ptr_t cache_ele;
+
+ index = alt_tws_hash(
+ hash_ele->page_addr & TWS_HASH_OFF_MASK,
+ tws->number_of_elements,
+ tws->number_of_lines);
+
+ tws_traverse_address_hash_list(tws, index,
+ hash_ele->page_addr, hash_ele->object,
+ hash_ele->offset, hash_ele->map,
+ &cache_ele, &trailer, &free_list, 0);
+ if(cache_ele != NULL) {
+ addr_ele = (tws_hash_ele_t)((unsigned int)
+ (cache_ele->element) & ~TWS_ADDR_HASH);
+ if(addr_ele != hash_ele)
+ panic("tws_hash_line_clear:"
+ " out of sync\n");
+ cache_ele->element = 0;
+ *trailer = cache_ele->next;
+ cache_ele->next = *free_list;
+ *free_list = cache_ele;
+ }
+
+ index = alt_tws_hash(
+ (hash_ele->page_addr - 0x1f000)
+ & TWS_HASH_OFF_MASK,
+ tws->number_of_elements,
+ tws->number_of_lines);
+
+ tws_traverse_address_hash_list(tws, index,
+ hash_ele->page_addr, hash_ele->object,
+ hash_ele->offset, hash_ele->map,
+ &cache_ele, &trailer, &free_list, 0);
+
+ if(cache_ele != NULL) {
+ addr_ele = (tws_hash_ele_t)((unsigned int)
+ (cache_ele->element) & ~TWS_ADDR_HASH);
+ if(addr_ele != hash_ele)
+ panic("tws_hash_line_clear: "
+ "out of sync\n");
+ cache_ele->element = 0;
+ *trailer = cache_ele->next;
+ cache_ele->next = *free_list;
+ *free_list = cache_ele;
+ }
+
+
+ if((hash_ele->map != NULL) && (live)) {
+ vm_page_t p;
+
+ for (j = 0x1; j != 0; j = j<<1) {
+ if(j & hash_ele->page_cache) {
+ p = vm_page_lookup(hash_ele->object,
hash_ele->offset + local_off);
- if((p != NULL) && (p->wire_count == 0)
- && (dump_pmap == 1)) {
- pmap_remove_some_phys((pmap_t)
- vm_map_pmap(hash_ele->map),
- p->phys_addr);
+ if((p != NULL) && (p->wire_count == 0)
+ && (dump_pmap == 1)) {
+ pmap_remove_some_phys((pmap_t)
+ vm_map_pmap(
+ hash_ele->map),
+ p->phys_addr);
+ }
+ }
+ local_off += PAGE_SIZE_64;
}
- }
- local_off += PAGE_SIZE_64;
}
- }
- if(tws->style == TWS_HASH_STYLE_SIGNAL) {
- vm_object_deallocate(hash_ele->object);
- vm_map_deallocate(hash_ele->map);
- }
+
+ if(tws->style == TWS_HASH_STYLE_SIGNAL) {
+ vm_object_deallocate(hash_ele->object);
+ vm_map_deallocate(hash_ele->map);
+ }
- index = do_tws_hash(hash_ele->object, hash_ele->offset,
- tws->number_of_elements, tws->number_of_lines);
+ index = do_tws_hash(hash_ele->object, hash_ele->offset,
+ tws->number_of_elements,
+ tws->number_of_lines);
- while (hash_loop < TWS_MAX_REHASH) {
- for(k = 0; k < tws->expansion_count; k++) {
- if((tws->table[k][index] != 0) &&
- (tws->table[k][index] == hash_ele)) {
- tws->table[k][index] = 0;
- break;
- }
- if(k < tws->expansion_count)
- break;
+ tws_traverse_object_hash_list(tws,
+ index, hash_ele->object, hash_ele->offset,
+ 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
+ if((cache_ele != NULL) && (cache_ele->element == hash_ele)) {
+ cache_ele->element = 0;
+ *trailer = cache_ele->next;
+ cache_ele->next = *free_list;
+ *free_list = cache_ele;
}
- index += 17;
- if(index >= (2 *
- tws->number_of_lines
- * tws->number_of_elements)) {
- index = index - (2 *
- tws->number_of_lines
- * tws->number_of_elements);
- }
- hash_loop++;
+ hash_ele->object = 0;
}
- hash_ele->object = 0;
- hash_ele->page_cache = 0;
-
-if(newtest != 0) {
- if (hash_loop == TWS_MAX_REHASH) {
- panic("tws_hash_line_clear: Cache and Hash out of sync\n");
- }
-}
- }
}
}
vm_object_t object,
tws_hash_line_t *line)
{
- struct tws_hash_ele *hash_ele;
int index;
- int k;
int loop;
+ int set;
+ int ele_line;
+ vm_offset_t pagenum;
+ tws_hash_ptr_t cache_ele;
+ tws_hash_ptr_t *trailer;
+ tws_hash_ptr_t *free_list;
/* don't cache private objects */
if(object->private)
tws->lookup_count++;
if(tws->lookup_count == 0)
tws->insert_count = 0;
- while (loop < TWS_MAX_REHASH) {
- for(k=0; k<tws->expansion_count; k++) {
- if((hash_ele = tws->table[k][index]) != 0) {
- if((hash_ele->offset == (offset & TWS_HASH_OFF_MASK))
- && (hash_ele->object == object)) {
- vm_offset_t pagenum;
-
- pagenum = (vm_offset_t)
- (offset & TWS_INDEX_MASK);
- pagenum = pagenum >> 12;
-
- if((1<<pagenum) & hash_ele->page_cache) {
- int set;
- int ele_line;
+ if(tws->startup_name != NULL) {
+ int age_of_cache;
+ age_of_cache = ((sched_tick
+ - tws->time_of_creation) >> SCHED_TICK_SHIFT);
+ if (age_of_cache > 35) {
+ tws_unlock(tws);
+ return KERN_OPERATION_TIMED_OUT;
+ }
+ }
- set = hash_ele->line/tws->number_of_lines;
- ele_line = hash_ele->line - set;
- *line = &tws->cache[k][ele_line];
- tws_unlock(tws);
- return KERN_SUCCESS;
+ if(tws->lookup_count > (4 * tws->expansion_count
+ * tws->number_of_elements * tws->number_of_lines) &&
+ (tws->lookup_count > (2 * tws->insert_count))) {
+ if(tws->startup_cache) {
+ int age_of_cache;
+ age_of_cache = ((sched_tick
+ - tws->time_of_creation) >> SCHED_TICK_SHIFT);
+ if (age_of_cache > 60) {
+ tws_unlock(tws);
+ return KERN_OPERATION_TIMED_OUT;
}
- }
- }
- }
- index += 17;
- if(index >= (2 * tws->number_of_lines * tws->number_of_elements)) {
- index = index -
- (2 * tws->number_of_lines * tws->number_of_elements);
- }
- loop++;
+ }
}
+
+ pagenum = (vm_offset_t)(offset & TWS_INDEX_MASK);
+ pagenum = pagenum >> 12;
+ pagenum = 1 << pagenum; /* get the appropriate page in 32 page block */
+ tws_traverse_object_hash_list(tws, index, object, offset, pagenum,
+ &cache_ele, &trailer, &free_list);
+ if(cache_ele != NULL) {
+ set = cache_ele->element->line/tws->number_of_lines;
+ ele_line = cache_ele->element->line - set;
+ *line = &tws->cache[set][ele_line];
+ tws_unlock(tws);
+ return KERN_SUCCESS;
+ }
+
tws_unlock(tws);
return KERN_FAILURE;
+
+
}
kern_return_t
tws_expand_working_set(
vm_offset_t tws,
- int line_count)
+ int line_count,
+ boolean_t dump_data)
{
tws_hash_t new_tws;
tws_hash_t old_tws;
}
tws_lock(old_tws);
- for(i = 0; i<old_tws->number_of_lines; i++) {
- for(j = 0; j<old_tws->number_of_elements; j++) {
- for(k = 0; k<old_tws->expansion_count; k++) {
- tws_hash_ele_t entry;
- vm_object_offset_t paddr;
- unsigned int page_index;
- entry = &old_tws->cache[k][i].list[j];
- if(entry->object != 0) {
+ if(!dump_data) {
+ for(i = 0; i<old_tws->number_of_lines; i++) {
+ for(j = 0; j<old_tws->number_of_elements; j++) {
+ for(k = 0; k<old_tws->expansion_count; k++) {
+ tws_hash_ele_t entry;
+ vm_object_offset_t paddr;
+ unsigned int page_index;
+ entry = &old_tws->cache[k][i].list[j];
+ if(entry->object != 0) {
paddr = 0;
for(page_index = 1; page_index != 0;
page_index = page_index << 1); {
paddr+=PAGE_SIZE;
}
- }
- }
+ }
+ }
+ }
}
}
temp.lookup_count = new_tws->lookup_count;
temp.insert_count = new_tws->insert_count;
for(i = 0; i<new_tws->expansion_count; i++) {
+ temp.obj_free_count[i] = new_tws->obj_free_count[i];
+ temp.addr_free_count[i] = new_tws->addr_free_count[i];
+ temp.free_hash_ele[i] = new_tws->free_hash_ele[i];
temp.table[i] = new_tws->table[i];
- temp.alt_table[i] = new_tws->alt_table[i];
+ temp.table_ele[i] = new_tws->table_ele[i];
+ temp.alt_ele[i] = new_tws->alt_ele[i];
temp.cache[i] = new_tws->cache[i];
}
new_tws->lookup_count = old_tws->lookup_count;
new_tws->insert_count = old_tws->insert_count;
for(i = 0; i<old_tws->expansion_count; i++) {
+ new_tws->obj_free_count[i] = old_tws->obj_free_count[i];
+ new_tws->addr_free_count[i] = old_tws->addr_free_count[i];
+ new_tws->free_hash_ele[i] = old_tws->free_hash_ele[i];
new_tws->table[i] = old_tws->table[i];
- new_tws->alt_table[i] = old_tws->alt_table[i];
+ new_tws->table_ele[i] = old_tws->table_ele[i];
+ new_tws->alt_ele[i] = old_tws->alt_ele[i];
new_tws->cache[i] = old_tws->cache[i];
}
old_tws->lookup_count = temp.lookup_count;
old_tws->insert_count = temp.insert_count;
for(i = 0; i<temp.expansion_count; i++) {
+ old_tws->obj_free_count[i] = temp.obj_free_count[i];;
+ old_tws->addr_free_count[i] = temp.addr_free_count[i];;
+ old_tws->free_hash_ele[i] = NULL;
old_tws->table[i] = temp.table[i];
- old_tws->alt_table[i] = temp.alt_table[i];
+ old_tws->table_ele[i] = temp.table_ele[i];
+ old_tws->alt_ele[i] = temp.alt_ele[i];
old_tws->cache[i] = temp.cache[i];
}
return KERN_SUCCESS;
}
+tws_hash_t test_tws = 0;
+
kern_return_t
tws_insert(
tws_hash_t tws,
vm_map_t map)
{
queue_t bucket;
- struct tws_hash_ele *new_entry;
unsigned int index;
unsigned int alt_index;
+ unsigned int index_enum[2];
unsigned int ele_index;
- unsigned int page_index;
+ tws_hash_ptr_t cache_ele;
+ tws_hash_ptr_t obj_ele = NULL;
+ tws_hash_ptr_t addr_ele = NULL;
+ tws_hash_ptr_t *trailer;
+ tws_hash_ptr_t *free_list;
+ tws_hash_ele_t target_element = NULL;
int i,k;
- int alt_k;
- int alt_hash_count;
int current_line;
int set;
- int hash_loop;
+ int ctr;
+ unsigned int startup_cache_line;
+ vm_offset_t startup_page_addr;
+ int cache_full = 0;
+ int ask_for_startup_cache_release = 0;
+
if(!tws_lock_try(tws)) {
return KERN_FAILURE;
tws->insert_count++;
current_line = 0xFFFFFFFF;
+ startup_cache_line = 0;
+ startup_page_addr =
+ page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
+ if(tws->startup_cache) {
+ int age_of_cache;
+ age_of_cache = ((sched_tick - tws->time_of_creation)
+ >> SCHED_TICK_SHIFT);
+ startup_cache_line = tws_startup_list_lookup(
+ tws->startup_cache, startup_page_addr);
+if(tws == test_tws) {
+printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", startup_cache_line, startup_page_addr, object, offset);
+}
+ if(age_of_cache > 60) {
+ ask_for_startup_cache_release = 1;
+ }
+ }
+ if((tws->startup_name != NULL) && (tws->mod == 0)) {
+ /* Ensure as good a working set as possible */
+ pmap_remove(map->pmap, 0, GLOBAL_SHARED_TEXT_SEGMENT);
+ pmap_remove(map->pmap,
+ GLOBAL_SHARED_DATA_SEGMENT
+ + SHARED_DATA_REGION_SIZE, 0xFFFFF000);
+ }
+
/* This next bit of code, the and alternate hash */
/* are all made necessary because of IPC COW */
- alt_index = alt_tws_hash(page_addr,
+ /* Note: the use of page_addr modified by delta from offset */
+ /* frame base means we may miss some previous entries. However */
+ /* we will not miss the present entry. This is most important */
+ /* in avoiding duplication of entries against long lived non-cow */
+ /* objects */
+ index_enum[0] = alt_tws_hash(
+ page_addr & TWS_HASH_OFF_MASK,
tws->number_of_elements, tws->number_of_lines);
- for(alt_k=0; alt_k<tws->expansion_count; alt_k++) {
- new_entry = tws->alt_table[alt_k][alt_index];
- if((new_entry == 0) || (new_entry->object == 0)) {
- tws->alt_table[alt_k][alt_index] = 0;
- continue;
- }
- if(!((new_entry->offset == (offset & TWS_HASH_OFF_MASK))
- && (new_entry->object == object))) {
-
- tws->alt_table[alt_k][alt_index] = 0;
-
- index = do_tws_hash(
- new_entry->object, new_entry->offset,
- tws->number_of_elements, tws->number_of_lines);
-
- hash_loop = 0;
- while (hash_loop < TWS_MAX_REHASH) {
-
- for(k=0; k<tws->expansion_count; k++) {
- if(tws->table[k][index] == new_entry) {
- break;
- }
- }
+ index_enum[1] = alt_tws_hash(
+ (page_addr - 0x1f000) & TWS_HASH_OFF_MASK,
+ tws->number_of_elements, tws->number_of_lines);
- if(k == tws->expansion_count) {
- index+=17;
- if(index >= (2 * tws->number_of_lines
- * tws->number_of_elements)) {
- index = index -
- (2 * tws->number_of_lines
- * tws->number_of_elements);
- }
- } else {
+ for(ctr = 0; ctr < 2;) {
+ tws_hash_ele_t resident;
+ tws_traverse_address_hash_list(tws,
+ index_enum[ctr], page_addr, NULL,
+ 0, NULL,
+ &cache_ele, &trailer, &free_list, 1);
+ if(cache_ele != NULL) {
+ /* found one */
+ resident = (tws_hash_ele_t)((unsigned int)
+ cache_ele->element & ~TWS_ADDR_HASH);
+ if((object == resident->object) &&
+ resident->offset ==
+ (offset & TWS_HASH_OFF_MASK)) {
+ /* This is our object/offset */
+ resident->page_cache
+ |= startup_cache_line;
+ resident->page_cache |=
+ (1<<(((vm_offset_t)
+ (offset & TWS_INDEX_MASK))>>12));
+ tws_unlock(tws);
+ if(ask_for_startup_cache_release)
+ return KERN_OPERATION_TIMED_OUT;
+ return KERN_SUCCESS;
+ }
+ if((object->shadow ==
+ resident->object) &&
+ ((resident->offset
+ + object->shadow_offset)
+ == (offset & TWS_HASH_OFF_MASK))) {
+ /* if we just shadowed, inherit */
+ /* access pattern from parent */
+ startup_cache_line |=
+ resident->page_cache;
+ /* thow out old entry */
+ resident->page_cache = 0;
break;
- }
- hash_loop++;
-
+ } else {
+ resident->page_cache &=
+ ~(1<<(((vm_offset_t)(page_addr
+ - resident->page_addr))
+ >>12));
}
- if((k < tws->expansion_count) &&
- (tws->table[k][index] == new_entry)) {
- page_index = (offset & TWS_INDEX_MASK) >> 12;
- new_entry->page_cache &=
- ~((unsigned int)(1 << page_index));
-
- if(new_entry->page_cache == 0) {
-
+ /* Throw out old entry if there are no */
+ /* more pages in cache */
+ if(resident->page_cache == 0) {
+ /* delete addr hash entry */
+ cache_ele->element = 0;
+ *trailer = cache_ele->next;
+ cache_ele->next = *free_list;
+ *free_list = cache_ele;
+ /* go after object hash */
+ index = do_tws_hash(
+ resident->object,
+ resident->offset,
+ tws->number_of_elements,
+ tws->number_of_lines);
+ tws_traverse_object_hash_list(tws,
+ index, resident->object,
+ resident->offset,
+ 0xFFFFFFFF, &cache_ele,
+ &trailer, &free_list);
+ if(cache_ele != NULL) {
if(tws->style ==
- TWS_HASH_STYLE_SIGNAL) {
+ TWS_HASH_STYLE_SIGNAL) {
vm_object_deallocate(
- new_entry->object);
+ cache_ele->element->object);
vm_map_deallocate(
- new_entry->map);
+ cache_ele->element->map);
+ }
+ current_line =
+ cache_ele->element->line;
+ set = current_line
+ /tws->number_of_lines;
+ current_line -= set *
+ tws->number_of_lines;
+ if(cache_ele->element->object != 0) {
+ cache_ele->element->object = 0;
+ tws->cache[set]
+ [current_line].ele_count--;
}
- new_entry->object = 0;
- tws->table[k][index] = 0;
- current_line = new_entry->line;
- set = current_line/tws->number_of_lines;
- current_line = current_line -
- (set * tws->number_of_lines);
- tws->cache[set]
- [current_line].ele_count--;
+ cache_ele->element = 0;
+ *trailer = cache_ele->next;
+ cache_ele->next = *free_list;
+ *free_list = cache_ele;
}
-
}
- break;
+ continue;
}
+ ctr+=1;
}
+ /*
+ * We may or may not have a current line setting coming out of
+ * the code above. If we have a current line it means we can
+ * choose to back-fill the spot vacated by a previous entry.
+ * We have yet to do a definitive check using the original obj/off
+ * We will do that now and override the current line if we
+ * find an element
+ */
+
index = do_tws_hash(object, offset,
tws->number_of_elements, tws->number_of_lines);
- alt_hash_count = 0;
- /* we will do MAX_REHASH hash attempts and then give up */
- while (alt_hash_count < TWS_MAX_REHASH) {
- for(k=0; k<tws->expansion_count; k++) {
- new_entry = tws->table[k][index];
- if(new_entry == NULL)
- continue;
- if((new_entry->object == object) &&
- (new_entry->offset ==
- (offset & TWS_HASH_OFF_MASK))) {
- new_entry->page_cache |=
- (1<<(((vm_offset_t)
- (offset & TWS_INDEX_MASK))>>12));
- tws->alt_table[k][alt_index] = new_entry;
- tws_unlock(tws);
- return KERN_SUCCESS;
- }
- }
-
- alt_hash_count += 1;
- index += 17;
- if(index >= (2 *
- tws->number_of_lines * tws->number_of_elements))
- index = index -
- (2 * tws->number_of_lines * tws->number_of_elements);
- }
- alt_hash_count = 0;
- index = do_tws_hash(object, offset,
- tws->number_of_elements, tws->number_of_lines);
- while (alt_hash_count < TWS_MAX_REHASH) {
- for(k=0; k<tws->expansion_count; k++) {
- new_entry = tws->table[k][index];
- if(new_entry == NULL)
- break;
- }
- if (k<tws->expansion_count)
- break;
+ alt_index = index_enum[0];
- alt_hash_count += 1;
- index += 17;
- if(index >= (2 *
- tws->number_of_lines * tws->number_of_elements))
- index = index -
- (2 * tws->number_of_lines * tws->number_of_elements);
+ tws_traverse_object_hash_list(tws, index, object, offset,
+ 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
+ if(cache_ele != NULL) {
+ obj_ele = cache_ele;
+ current_line = cache_ele->element->line;
+ set = current_line/tws->number_of_lines;
+ current_line -= set * tws->number_of_lines;
+ target_element = cache_ele->element;
+
+ /* Now check to see if we have a hash addr for it */
+ tws_traverse_address_hash_list(tws,
+ alt_index, obj_ele->element->page_addr,
+ obj_ele->element->object,
+ obj_ele->element->offset,
+ obj_ele->element->map,
+ &cache_ele, &trailer, &free_list, 0);
+ if(cache_ele != NULL) {
+ addr_ele = cache_ele;
+ } else {
+ addr_ele = new_addr_hash(tws, set, alt_index);
+ /* if cannot allocate just do without */
+ /* we'll get it next time around */
+ }
}
- if(alt_hash_count == TWS_MAX_REHASH) {
- tws_unlock(tws);
- return KERN_FAILURE;
- }
+
if(tws->style == TWS_HASH_STYLE_SIGNAL) {
vm_object_reference(object);
}
if(current_line == 0xFFFFFFFF) {
-
current_line = tws->current_line;
set = current_line/tws->number_of_lines;
current_line = current_line - (set * tws->number_of_lines);
+#ifdef notdef
+ if(cache_full) {
+ tws->current_line = tws->number_of_lines - 1;
+ }
+#endif
if(tws->cache[set][current_line].ele_count
- >= tws->number_of_elements) {
+ >= tws->number_of_elements) {
current_line++;
tws->current_line++;
if(current_line == tws->number_of_lines) {
tws_unlock(tws);
return KERN_NO_SPACE;
}
- if((tws->table[set] = (tws_hash_ele_t *)
- kalloc(sizeof(tws_hash_ele_t)
- * 2 * tws->number_of_lines
+ if((tws->table[set] = (tws_hash_ptr_t *)
+ kalloc(sizeof(tws_hash_ptr_t)
+ * tws->number_of_lines
* tws->number_of_elements))
== NULL) {
set = 0;
- } else if((tws->alt_table[set] =
- (tws_hash_ele_t *)
- kalloc(sizeof(tws_hash_ele_t)
- * 2 * tws->number_of_lines
+ } else if((tws->table_ele[set] =
+ (tws_hash_ptr_t)
+ kalloc(sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
* tws->number_of_elements))
== NULL) {
kfree((vm_offset_t)tws->table[set],
- sizeof(tws_hash_ele_t)
- * 2 * tws->number_of_lines
+ sizeof(tws_hash_ptr_t)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ set = 0;
+ } else if((tws->alt_ele[set] =
+ (tws_hash_ptr_t)
+ kalloc(sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
+ * tws->number_of_elements))
+ == NULL) {
+ kfree((vm_offset_t)tws->table_ele[set],
+ sizeof(tws_hash_ptr_t)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree((vm_offset_t)tws->table[set],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
* tws->number_of_elements);
tws->table[set] = NULL;
set = 0;
* tws->number_of_lines))
== NULL) {
kfree((vm_offset_t)tws->table[set],
- sizeof(tws_hash_ele_t)
- * 2 * tws->number_of_lines
- * tws->number_of_elements);
- kfree((vm_offset_t)tws->alt_table[set],
- sizeof(tws_hash_ele_t)
- * 2 * tws->number_of_lines
- * tws->number_of_elements);
+ sizeof(tws_hash_ptr_t)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree((vm_offset_t)tws->table_ele[set],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree((vm_offset_t)tws->alt_ele[set],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
+ * tws->number_of_elements);
tws->table[set] = NULL;
set = 0;
} else {
+ tws->free_hash_ele[set] =
+ (tws_hash_ptr_t)0;
+ tws->obj_free_count[set] = 0;
+ tws->addr_free_count[set] = 0;
bzero((char *)tws->table[set],
- sizeof(tws_hash_ele_t)
- * 2 * tws->number_of_lines
+ sizeof(tws_hash_ptr_t)
+ * tws->number_of_lines
* tws->number_of_elements);
- bzero((char *)tws->alt_table[set],
- sizeof(tws_hash_ele_t)
- * 2 * tws->number_of_lines
+ bzero((char *)tws->table_ele[set],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ bzero((char *)tws->alt_ele[set],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
* tws->number_of_elements);
bzero((char *)tws->cache[set],
sizeof(struct tws_hash_line)
* tws->number_of_lines);
}
} else {
+ int age_of_cache;
+ age_of_cache =
+ ((sched_tick -
+ tws->time_of_creation)
+ >> SCHED_TICK_SHIFT);
+
+ if((tws->startup_cache) &&
+ (age_of_cache > 60)) {
+ ask_for_startup_cache_release = 1;
+ }
+ if((tws->startup_name != NULL) &&
+ (age_of_cache > 15)) {
+ tws->current_line--;
+ tws_unlock(tws);
+ return KERN_OPERATION_TIMED_OUT;
+ }
+ if((tws->startup_name != NULL) &&
+ (age_of_cache < 15)) {
+ /* If we are creating a */
+ /* cache, don't lose the */
+ /* early info */
+ tws->current_line--;
+ tws_unlock(tws);
+ return KERN_FAILURE;
+ }
tws->lookup_count = 0;
tws->insert_count = 0;
set = 0;
}
}
- ele_index = 0;
- for(i = 0; i<tws->number_of_elements; i++) {
- if(tws->cache[set][current_line].
- list[ele_index].object == 0) {
- break;
+
+ /* set object hash element */
+ if(obj_ele == NULL) {
+ obj_ele = new_obj_hash(tws, set, index);
+ if(obj_ele == NULL) {
+ tws->cache[set][current_line].ele_count
+ = tws->number_of_elements;
+ tws_unlock(tws);
+ return KERN_FAILURE;
}
- ele_index++;
- if(ele_index >= tws->number_of_elements)
- ele_index = 0;
-
}
- if(i == tws->number_of_elements)
- panic("tws_insert: no free elements");
+ /* set address hash element */
+ if(addr_ele == NULL) {
+ addr_ele = new_addr_hash(tws, set, alt_index);
+ }
+
+ if(target_element == NULL) {
+ ele_index = 0;
+ for(i = 0; i<tws->number_of_elements; i++) {
+ if(tws->cache[set][current_line].
+ list[ele_index].object == 0) {
+ break;
+ }
+ ele_index++;
+ if(ele_index >= tws->number_of_elements)
+ ele_index = 0;
+
+ }
+
+ if(i == tws->number_of_elements)
+ panic("tws_insert: no free elements");
+ target_element =
+ &(tws->cache[set][current_line].list[ele_index]);
- tws->cache[set][current_line].list[ele_index].object = object;
- tws->cache[set][current_line].list[ele_index].offset =
- offset & TWS_HASH_OFF_MASK;
- tws->cache[set][current_line].
- list[ele_index].page_addr = page_addr & TWS_HASH_OFF_MASK;
- tws->cache[set][current_line].list[ele_index].map = map;
- tws->cache[set][current_line].list[ele_index].line =
+ tws->cache[set][current_line].ele_count++;
+ }
+
+ obj_ele->element = target_element;
+ if(addr_ele) {
+ addr_ele->element = (tws_hash_ele_t)
+ (((unsigned int)target_element) | TWS_ADDR_HASH);
+ }
+ target_element->object = object;
+ target_element->offset = offset & TWS_HASH_OFF_MASK;
+ target_element->page_addr =
+ page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
+ target_element->map = map;
+ target_element->line =
current_line + (set * tws->number_of_lines);
- tws->cache[set][current_line].list[ele_index].page_cache =
+ if(startup_cache_line) {
+ target_element->page_cache = startup_cache_line;
+ }
+ target_element->page_cache |=
1<<(((vm_offset_t)(offset & TWS_INDEX_MASK))>>12);
- tws->table[k][index] = &tws->cache[set][current_line].list[ele_index];
- for(alt_k=0; alt_k<tws->expansion_count; alt_k++) {
- if(tws->alt_table[alt_k][alt_index] == 0) {
- tws->alt_table[alt_k][alt_index] =
- &tws->cache[set][current_line].list[ele_index];
- break;
- }
- }
- tws->cache[set][current_line].ele_count++;
tws_unlock(tws);
+ if(ask_for_startup_cache_release)
+ return KERN_OPERATION_TIMED_OUT;
return KERN_SUCCESS;
}
task_t task;
vm_object_offset_t before = *start;
vm_object_offset_t after = *end;
+ vm_object_offset_t original_start = *start;
+ vm_object_offset_t original_end = *end;
vm_size_t length = (vm_size_t)(*end - *start);
vm_page_t m;
kern_return_t kret;
vm_object_offset_t object_size;
- int pre_heat_size;
- int age_of_cache;
+ int age_of_cache;
+ int pre_heat_size;
+ unsigned int ele_cache;
+ unsigned int end_cache = NULL;
+ unsigned int start_cache = NULL;
- if(object->private)
+ if((object->private) || !(object->pager))
return;
if (!object->internal) {
object->pager,
&object_size);
} else {
- object_size = 0xFFFFFFFFFFFFFFFF;
+ object_size = object->size;
}
- /*
- * determine age of cache in seconds
- */
- age_of_cache = ((sched_tick - tws->time_of_creation) >> SCHED_TICK_SHIFT);
-
- if (object->internal || age_of_cache > 15 || (age_of_cache > 5 && vm_page_free_count < (vm_page_free_target * 2 ))) {
- pre_heat_size = 0;
+
+ age_of_cache = ((sched_tick
+ - tws->time_of_creation) >> SCHED_TICK_SHIFT);
+
+ /* When pre-heat files are not available, resort to speculation */
+ /* based on size of file */
+
+ if(tws->startup_cache || object->internal || age_of_cache > 15 ||
+ (age_of_cache > 5 &&
+ vm_page_free_count < (vm_page_free_target * 2) )) {
+ pre_heat_size = 0;
} else {
- if (object_size > (vm_object_offset_t)(1024 * 1024))
- pre_heat_size = 8 * PAGE_SIZE;
- else if (object_size > (vm_object_offset_t)(128 * 1024))
- pre_heat_size = 4 * PAGE_SIZE;
- else
- pre_heat_size = 2 * PAGE_SIZE;
+ if (object_size > (vm_object_offset_t)(1024 * 1024))
+ pre_heat_size = 8 * PAGE_SIZE;
+ else if (object_size > (vm_object_offset_t)(128 * 1024))
+ pre_heat_size = 4 * PAGE_SIZE;
+ else
+ pre_heat_size = 2 * PAGE_SIZE;
+ }
+
+ if ((age_of_cache < 10) && (tws->startup_cache)) {
+ if ((max_length >= ((*end - *start)
+ + (32 * PAGE_SIZE))) &&
+ (tws_test_for_community(tws, object,
+ *start, 3, &ele_cache))) {
+ int expanded;
+ start_cache = ele_cache;
+ *start = *start & TWS_HASH_OFF_MASK;
+ *end = *start + (32 * PAGE_SIZE_64);
+ if(*end > object_size) {
+ *end = trunc_page(object_size);
+ max_length = 0;
+ if(before >= *end) {
+ *end = after;
+ } else {
+ end_cache = ele_cache;
+ }
+ } else {
+ end_cache = ele_cache;
+ }
+ while (max_length > ((*end - *start)
+ + (32 * PAGE_SIZE))) {
+ expanded = 0;
+ after = *end;
+ before = *start - PAGE_SIZE_64;
+ if((*end <= (object->size
+ + (32 * PAGE_SIZE_64))) &&
+ (tws_test_for_community(tws,
+ object, after,
+ 5, &ele_cache))) {
+ *end = after +
+ (32 * PAGE_SIZE_64);
+ if(*end > object_size) {
+ *end = trunc_page(object_size);
+ max_length = 0;
+ if(*start >= *end) {
+ *end = after;
+ }
+ }
+ end_cache = ele_cache;
+ expanded = 1;
+ }
+ if (max_length > ((*end - *start)
+ + (32 * PAGE_SIZE_64))) {
+ break;
+ }
+ if((*start >= (32 * PAGE_SIZE_64)) &&
+ (tws_test_for_community(tws, object,
+ before, 5, &ele_cache))) {
+ *start = before;
+ start_cache = ele_cache;
+ expanded = 1;
+ }
+ if(expanded == 0)
+ break;
+ }
+
+ if(start_cache != NULL) {
+ unsigned int mask;
+
+ for (mask = 1; mask != 0; mask = mask << 1) {
+ if (*start == original_start)
+ break;
+ if (!(start_cache & mask))
+ *start += PAGE_SIZE_64;
+ else
+ break;
+ }
+ }
+ if(end_cache != NULL) {
+ unsigned int mask;
+
+ for (mask = 0x80000000;
+ mask != 0; mask = mask >> 1) {
+ if (*end == original_end)
+ break;
+ if(!(end_cache & mask))
+ *end -= PAGE_SIZE_64;
+ else
+ break;
+ }
+ }
+
+ if (*start >= *end)
+ panic("bad clipping occurred\n");
+
+ return;
+ }
}
while ((length < max_length) &&
(object_size >=
- (object->paging_offset + after + PAGE_SIZE_64))) {
-
- if(length >= pre_heat_size)
- {
- if(tws_lookup(tws, after, object,
+ (after + PAGE_SIZE_64))) {
+ if(length >= pre_heat_size) {
+ if(tws_lookup(tws, after, object,
&line) != KERN_SUCCESS) {
vm_object_offset_t extend;
extend = after + PAGE_SIZE_64;
- if(tws_lookup(tws, extend, object,
+ if(tws_lookup(tws, extend, object,
&line) != KERN_SUCCESS) {
break;
}
- }
- }
- if (((object->existence_map != NULL)
- && (!LOOK_FOR(object, after))) ||
- (vm_page_lookup(object, after)
- != VM_PAGE_NULL)) {
+ }
+ }
+
+ if ((object->existence_map != NULL)
+ && (!LOOK_FOR(object, after))) {
break;
+ }
+
+ if (vm_page_lookup(object, after) != VM_PAGE_NULL) {
+ /* we can bridge resident pages */
+ after += PAGE_SIZE_64;
+ length += PAGE_SIZE;
+ continue;
}
+
if (object->internal) {
/*
* need to acquire a real page in
break;
before -= PAGE_SIZE_64;
- if(length >= pre_heat_size)
- {
-
- if(tws_lookup(tws, before, object,
+ if(length >= pre_heat_size) {
+ if(tws_lookup(tws, before, object,
&line) != KERN_SUCCESS) {
vm_object_offset_t extend;
if (extend == 0)
break;
extend -= PAGE_SIZE_64;
- if(tws_lookup(tws, extend, object,
+ if(tws_lookup(tws, extend, object,
&line) != KERN_SUCCESS) {
break;
}
- }
- }
- if (((object->existence_map != NULL)
- && (!LOOK_FOR(object, before))) ||
- (vm_page_lookup(object, before)
- != VM_PAGE_NULL)) {
+ }
+ }
+ if ((object->existence_map != NULL)
+ && (!LOOK_FOR(object, before))) {
break;
}
+
+ if (vm_page_lookup(object, before) != VM_PAGE_NULL) {
+ /* we can bridge resident pages */
+ *start -= PAGE_SIZE_64;
+ length += PAGE_SIZE;
+ continue;
+ }
+
if (object->internal) {
/*
* need to acquire a real page in
vm_map_unlock(map);
}
+/* tws locked on entry */
+
+tws_startup_t
+tws_create_startup_list(
+ tws_hash_t tws)
+{
+
+ tws_startup_t startup;
+ unsigned int i,j,k;
+ unsigned int total_elements;
+ unsigned int startup_size;
+ unsigned int sindex;
+ unsigned int hash_index;
+ tws_startup_ptr_t element;
+
+ total_elements = tws->expansion_count *
+ (tws->number_of_lines * tws->number_of_elements);
+
+ startup_size = sizeof(struct tws_startup)
+ + (total_elements * sizeof(tws_startup_ptr_t *))
+ + (total_elements * sizeof(struct tws_startup_ptr))
+ + (total_elements * sizeof(struct tws_startup_ele));
+ startup = (tws_startup_t)(kalloc(startup_size));
+
+ if(startup == NULL)
+ return startup;
+
+ bzero((char *) startup, startup_size);
+
+ startup->table = (tws_startup_ptr_t *)
+ (((int)startup) + (sizeof(struct tws_startup)));
+ startup->ele = (struct tws_startup_ptr *)
+ (((vm_offset_t)startup->table) +
+ (total_elements * sizeof(tws_startup_ptr_t)));
+
+ startup->array = (struct tws_startup_ele *)
+ (((vm_offset_t)startup->ele) +
+ (total_elements * sizeof(struct tws_startup_ptr)));
+
+ startup->tws_hash_size = startup_size;
+ startup->ele_count = 0; /* burn first hash ele, else we can't tell from zero */
+ startup->array_size = total_elements;
+ startup->hash_count = 1;
+
+ sindex = 0;
+
+
+ for(i = 0; i<tws->number_of_lines; i++) {
+ for(j = 0; j<tws->number_of_elements; j++) {
+ for(k = 0; k<tws->expansion_count; k++) {
+ tws_hash_ele_t entry;
+ unsigned int hash_retry;
+ vm_offset_t addr;
+
+ entry = &tws->cache[k][i].list[j];
+ addr = entry->page_addr;
+ hash_retry = 0;
+ if(entry->object != 0) {
+ /* get a hash element */
+ hash_index = do_startup_hash(addr,
+ startup->array_size);
+
+ if(startup->hash_count < total_elements) {
+ element = &(startup->ele[startup->hash_count]);
+ startup->hash_count += 1;
+ } else {
+ /* exit we're out of elements */
+ break;
+ }
+ /* place the hash element */
+ element->next = startup->table[hash_index];
+ startup->table[hash_index] = (tws_startup_ptr_t)
+ ((int)element - (int)&startup->ele[0]);
+
+ /* set entry OFFSET in hash element */
+ element->element = (tws_startup_ele_t)
+ ((int)&startup->array[sindex] -
+ (int)&startup->array[0]);
+
+ startup->array[sindex].page_addr = entry->page_addr;
+ startup->array[sindex].page_cache = entry->page_cache;
+ startup->ele_count++;
+ sindex++;
+
+ }
+ }
+ }
+ }
+
+ return startup;
+}
+
+
+/*
+ * Returns an entire cache line. The line is deleted from the startup
+ * cache on return. The caller can check startup->ele_count for an empty
+ * list. Access synchronization is the responsibility of the caller.
+ */
+
+unsigned int
+tws_startup_list_lookup(
+ tws_startup_t startup,
+ vm_offset_t addr)
+{
+ unsigned int hash_index;
+ unsigned int page_cache_bits;
+ unsigned int startup_shift;
+ tws_startup_ele_t entry;
+ vm_offset_t next_addr;
+ tws_startup_ptr_t element;
+ tws_startup_ptr_t base_ele;
+ tws_startup_ptr_t *previous_ptr;
+
+ page_cache_bits = 0;
+
+ hash_index = do_startup_hash(addr, startup->array_size);
+
+ if(((unsigned int)&(startup->table[hash_index])) >= startup->tws_hash_size) {
+ return page_cache_bits = 0;
+ }
+ element = (tws_startup_ptr_t)((int)startup->table[hash_index] +
+ (int)&startup->ele[0]);
+ base_ele = element;
+ previous_ptr = &(startup->table[hash_index]);
+ while(element > &startup->ele[0]) {
+ if (((int)element + sizeof(struct tws_startup_ptr))
+ > ((int)startup + startup->tws_hash_size)) {
+ return page_cache_bits;
+ }
+ entry = (tws_startup_ele_t)
+ ((int)element->element
+ + (int)&startup->array[0]);
+ if((((int)entry + sizeof(struct tws_startup_ele))
+ > ((int)startup + startup->tws_hash_size))
+ || ((int)entry < (int)startup)) {
+ return page_cache_bits;
+ }
+ if ((addr >= entry->page_addr) &&
+ (addr <= (entry->page_addr + 0x1F000))) {
+ startup_shift = (addr - entry->page_addr)>>12;
+ page_cache_bits |= entry->page_cache >> startup_shift;
+ /* don't dump the pages, unless the addresses */
+ /* line up perfectly. The cache may be used */
+ /* by other mappings */
+ entry->page_cache &= (1 << startup_shift) - 1;
+ if(addr == entry->page_addr) {
+ if(base_ele == element) {
+ base_ele = (tws_startup_ptr_t)
+ ((int)element->next
+ + (int)&startup->ele[0]);
+ startup->table[hash_index] = element->next;
+ element = base_ele;
+ } else {
+ *previous_ptr = element->next;
+ element = (tws_startup_ptr_t)
+ ((int)*previous_ptr
+ + (int)&startup->ele[0]);
+ }
+ entry->page_addr = 0;
+ startup->ele_count--;
+ continue;
+ }
+ }
+ next_addr = addr + 0x1F000;
+ if ((next_addr >= entry->page_addr) &&
+ (next_addr <= (entry->page_addr + 0x1F000))) {
+ startup_shift = (next_addr - entry->page_addr)>>12;
+ page_cache_bits |= entry->page_cache << (0x1F - startup_shift);
+ entry->page_cache &= ~((1 << (startup_shift + 1)) - 1);
+ if(entry->page_cache == 0) {
+ if(base_ele == element) {
+ base_ele = (tws_startup_ptr_t)
+ ((int)element->next
+ + (int)&startup->ele[0]);
+ startup->table[hash_index] = element->next;
+ element = base_ele;
+ } else {
+ *previous_ptr = element->next;
+ element = (tws_startup_ptr_t)
+ ((int)*previous_ptr
+ + (int)&startup->ele[0]);
+ }
+ entry->page_addr = 0;
+ startup->ele_count--;
+ continue;
+ }
+ }
+ previous_ptr = &(element->next);
+ element = (tws_startup_ptr_t)
+ ((int) element->next + (int) &startup->ele[0]);
+ }
+
+ return page_cache_bits;
+}
+
+kern_return_t
+tws_send_startup_info(
+ task_t task)
+{
+
+ tws_hash_t tws;
+ tws_startup_t scache;
+
+ task_lock(task);
+ tws = (tws_hash_t)task->dynamic_working_set;
+ task_unlock(task);
+ if(tws == NULL) {
+ return KERN_FAILURE;
+ }
+ return tws_internal_startup_send(tws);
+}
+
+
+kern_return_t
+tws_internal_startup_send(
+ tws_hash_t tws)
+{
+
+ tws_startup_t scache;
+
+ if(tws == NULL) {
+ return KERN_FAILURE;
+ }
+ tws_lock(tws);
+ /* used to signal write or release depending on state of tws */
+ if(tws->startup_cache) {
+ vm_offset_t startup_buf;
+ vm_size_t size;
+ startup_buf = (vm_offset_t)tws->startup_cache;
+ size = tws->startup_cache->tws_hash_size;
+ tws->startup_cache = 0;
+ tws_unlock(tws);
+ kmem_free(kernel_map, startup_buf, size);
+ return KERN_SUCCESS;
+ }
+ if(tws->startup_name == NULL) {
+ tws_unlock(tws);
+ return KERN_FAILURE;
+ }
+ scache = tws_create_startup_list(tws);
+ if(scache == NULL)
+ return KERN_FAILURE;
+ bsd_write_page_cache_file(tws->uid, tws->startup_name,
+ scache, scache->tws_hash_size,
+ tws->mod, tws->fid);
+ kfree((vm_offset_t)scache, scache->tws_hash_size);
+ kfree((vm_offset_t) tws->startup_name, tws->startup_name_length);
+ tws->startup_name = NULL;
+ tws_unlock(tws);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+tws_handle_startup_file(
+ task_t task,
+ unsigned int uid,
+ char *app_name,
+ vm_offset_t app_vp,
+ boolean_t *new_info)
+
+{
+ tws_startup_t startup;
+ vm_offset_t cache_size;
+ kern_return_t error;
+ int fid;
+ int mod;
+
+ *new_info = FALSE;
+ /* don't pre-heat kernel task */
+ if(task == kernel_task)
+ return KERN_SUCCESS;
+ error = bsd_read_page_cache_file(uid, &fid,
+ &mod, app_name,
+ app_vp, &startup,
+ &cache_size);
+ if(error) {
+ return KERN_FAILURE;
+ }
+ if(startup == NULL) {
+ /* Entry for app does not exist, make */
+ /* one */
+ /* we will want our own copy of the shared */
+ /* regions to pick up a true picture of all */
+ /* the pages we will touch. */
+ if((lsf_zone->count * lsf_zone->elem_size)
+ > (lsf_zone->max_size >> 1)) {
+ /* We don't want to run out of shared memory */
+ /* map entries by starting too many private versions */
+ /* of the shared library structures */
+ return KERN_SUCCESS;
+ }
+ *new_info = TRUE;
+ error = tws_write_startup_file(task,
+ fid, mod, app_name, uid);
+ if(error)
+ return error;
+ /* use the mod in the write case as an init */
+ /* flag */
+ mod = 0;
+
+ } else {
+ error = tws_read_startup_file(task,
+ (tws_startup_t)startup,
+ cache_size);
+ if(error) {
+ kmem_free(kernel_map,
+ (vm_offset_t)startup, cache_size);
+ return error;
+ }
+ }
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+tws_write_startup_file(
+ task_t task,
+ int fid,
+ int mod,
+ char *name,
+ unsigned int uid)
+{
+ tws_hash_t tws;
+ unsigned int string_length;
+
+ string_length = strlen(name);
+
+ task_lock(task);
+ tws = (tws_hash_t)task->dynamic_working_set;
+
+ task_unlock(task);
+ if(tws == NULL) {
+ /* create a dynamic working set of normal size */
+ task_working_set_create(task, 0,
+ 0, TWS_HASH_STYLE_DEFAULT);
+ }
+ tws_lock(tws);
+
+ if(tws->startup_name != NULL) {
+ tws_unlock(tws);
+ return KERN_FAILURE;
+ }
+
+ tws->startup_name = (char *)
+ kalloc((string_length + 1) * (sizeof(char)));
+ if(tws->startup_name == NULL) {
+ tws_unlock(tws);
+ return KERN_FAILURE;
+ }
+
+ bcopy(name, (char *)tws->startup_name, string_length + 1);
+ tws->startup_name_length = (string_length + 1) * sizeof(char);
+ tws->uid = uid;
+ tws->fid = fid;
+ tws->mod = mod;
+
+ tws_unlock(tws);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+tws_read_startup_file(
+ task_t task,
+ tws_startup_t startup,
+ vm_offset_t cache_size)
+{
+ tws_hash_t tws;
+ int error;
+ int lines;
+ int old_exp_count;
+
+ task_lock(task);
+ tws = (tws_hash_t)task->dynamic_working_set;
+
+ if(cache_size < sizeof(struct tws_hash)) {
+ task_unlock(task);
+ kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
+ return(KERN_SUCCESS);
+ }
+
+ /* create a dynamic working set to match file size */
+ lines = (cache_size - sizeof(struct tws_hash))/TWS_ARRAY_SIZE;
+ /* we now need to divide out element size and word size */
+ /* all fields are 4 bytes. There are 8 bytes in each hash element */
+ /* entry, 4 bytes in each table ptr location and 8 bytes in each */
+ /* page_cache entry, making a total of 20 bytes for each entry */
+ lines = (lines/(20));
+ if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
+ lines = TWS_SMALL_HASH_LINE_COUNT;
+ task_unlock(task);
+ kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
+ return(KERN_SUCCESS);
+ } else {
+ old_exp_count = lines/TWS_HASH_LINE_COUNT;
+ if((old_exp_count * TWS_HASH_LINE_COUNT) != lines) {
+ lines = (old_exp_count + 1)
+ * TWS_HASH_LINE_COUNT;
+ }
+ if(tws == NULL) {
+ task_working_set_create(task, lines,
+ 0, TWS_HASH_STYLE_DEFAULT);
+ task_unlock(task);
+ } else {
+ task_unlock(task);
+ tws_expand_working_set(
+ (vm_offset_t)tws, lines, TRUE);
+ }
+ }
+
+
+ tws_lock(tws);
+
+ if(tws->startup_cache != NULL) {
+ tws_unlock(tws);
+ return KERN_FAILURE;
+ }
+
+
+ /* now need to fix up internal table pointers */
+ startup->table = (tws_startup_ptr_t *)
+ (((int)startup) + (sizeof(struct tws_startup)));
+ startup->ele = (struct tws_startup_ptr *)
+ (((vm_offset_t)startup->table) +
+ (startup->array_size * sizeof(tws_startup_ptr_t)));
+ startup->array = (struct tws_startup_ele *)
+ (((vm_offset_t)startup->ele) +
+ (startup->array_size * sizeof(struct tws_startup_ptr)));
+ /* the allocation size and file size should be the same */
+ /* just in case their not, make sure we dealloc correctly */
+ startup->tws_hash_size = cache_size;
+
+
+ tws->startup_cache = startup;
+ tws_unlock(tws);
+ return KERN_SUCCESS;
+}
void
int i,k;
vm_size_t cache_size;
+ if(tws->startup_cache != NULL) {
+ kmem_free(kernel_map,
+ (vm_offset_t)tws->startup_cache,
+ tws->startup_cache->tws_hash_size);
+ tws->startup_cache = NULL;
+ }
+ if(tws->startup_name != NULL) {
+ tws_internal_startup_send(tws);
+ }
for (i=0; i<tws->number_of_lines; i++) {
for(k=0; k<tws->expansion_count; k++) {
/* clear the object refs */
i = 0;
while (i < tws->expansion_count) {
- kfree((vm_offset_t)tws->table[i], sizeof(tws_hash_ele_t)
- * 2 * tws->number_of_lines
+ kfree((vm_offset_t)tws->table[i], sizeof(tws_hash_ptr_t)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree((vm_offset_t)tws->table_ele[i],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
* tws->number_of_elements);
- kfree((vm_offset_t)tws->alt_table[i], sizeof(tws_hash_ele_t)
- * 2 * tws->number_of_lines
+ kfree((vm_offset_t)tws->alt_ele[i],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
* tws->number_of_elements);
kfree((vm_offset_t)tws->cache[i], sizeof(struct tws_hash_line)
* tws->number_of_lines);
i++;
}
+ if(tws->startup_name != NULL) {
+ kfree((vm_offset_t)tws->startup_name,
+ tws->startup_name_length);
+ }
kfree((vm_offset_t)tws, sizeof(struct tws_hash));
}
task_unlock(task);
return KERN_SUCCESS;
}
+
+
+/* Internal use only routines */
+
+
+/*
+ * internal sub-function for address space lookup
+ * returns the target element and the address of the
+ * previous pointer The previous pointer is the address
+ * of the pointer pointing to the target element.
+ * TWS must be locked
+ */
+
+void
+tws_traverse_address_hash_list (
+ tws_hash_t tws,
+ unsigned int index,
+ vm_offset_t page_addr,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ vm_map_t map,
+ tws_hash_ptr_t *target_ele,
+ tws_hash_ptr_t **previous_ptr,
+ tws_hash_ptr_t **free_list,
+ unsigned int exclusive_addr)
+{
+ int k;
+ tws_hash_ptr_t cache_ele;
+ tws_hash_ptr_t base_ele;
+
+ *target_ele = NULL;
+ *previous_ptr = NULL;
+
+ for(k=0; k<tws->expansion_count; k++) {
+ tws_hash_ele_t ele;
+ cache_ele = tws->table[k][index];
+ base_ele = cache_ele;
+ *previous_ptr = (tws_hash_ptr_t *)&(tws->table[k][index]);
+ while(cache_ele != NULL) {
+ if(((unsigned int)
+ cache_ele->element & TWS_ADDR_HASH) == 0) {
+ *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
+ cache_ele = cache_ele->next;
+ continue;
+ }
+ ele = (tws_hash_ele_t)((unsigned int)
+ cache_ele->element & ~TWS_ADDR_HASH);
+ if ((ele == 0) || (ele->object == 0)) {
+ /* A little clean-up of empty elements */
+ cache_ele->element = 0;
+ if(base_ele == cache_ele) {
+ base_ele = cache_ele->next;
+ tws->table[k][index] = cache_ele->next;
+ cache_ele->next = tws->free_hash_ele[k];
+ tws->free_hash_ele[k] = cache_ele;
+ cache_ele = base_ele;
+ } else {
+ **previous_ptr = cache_ele->next;
+ cache_ele->next = tws->free_hash_ele[k];
+ tws->free_hash_ele[k] = cache_ele;
+ cache_ele = **previous_ptr;
+ }
+ continue;
+ }
+
+ if ((ele->page_addr <= page_addr)
+ && (page_addr <= (ele->page_addr +
+ (vm_offset_t)TWS_INDEX_MASK))
+ && ((object == NULL)
+ || ((object == ele->object)
+ && (offset == ele->offset)
+ && (map == ele->map)))) {
+ if(exclusive_addr) {
+ int delta;
+ delta = ((page_addr - ele->page_addr)
+ >> 12);
+ if((1 << delta) & ele->page_cache) {
+ /* We've found a match */
+ *target_ele = cache_ele;
+ *free_list =
+ (tws_hash_ptr_t *)
+ &(tws->free_hash_ele[k]);
+ return;
+ }
+ } else {
+ /* We've found a match */
+ *target_ele = cache_ele;
+ *free_list = (tws_hash_ptr_t *)
+ &(tws->free_hash_ele[k]);
+ return;
+ }
+ }
+ *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
+ cache_ele = cache_ele->next;
+ }
+ }
+}
+
+
+/*
+ * internal sub-function for object space lookup
+ * returns the target element and the address of the
+ * previous pointer The previous pointer is the address
+ * of the pointer pointing to the target element.
+ * TWS must be locked
+ */
+
+
+void
+tws_traverse_object_hash_list (
+ tws_hash_t tws,
+ unsigned int index,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ unsigned int page_mask,
+ tws_hash_ptr_t *target_ele,
+ tws_hash_ptr_t **previous_ptr,
+ tws_hash_ptr_t **free_list)
+{
+ int k;
+ tws_hash_ptr_t cache_ele;
+ tws_hash_ptr_t base_ele;
+
+ *target_ele = NULL;
+ *previous_ptr = NULL;
+
+ for(k=0; k<tws->expansion_count; k++) {
+ cache_ele = tws->table[k][index];
+ base_ele = cache_ele;
+ *previous_ptr = &(tws->table[k][index]);
+ while(cache_ele != NULL) {
+ if((((unsigned int)cache_ele->element)
+ & TWS_ADDR_HASH) != 0) {
+ *previous_ptr = &(cache_ele->next);
+ cache_ele = cache_ele->next;
+ continue;
+ }
+ if ((cache_ele->element == 0) ||
+ (cache_ele->element->object == 0)) {
+ /* A little clean-up of empty elements */
+ cache_ele->element = 0;
+ if(base_ele == cache_ele) {
+ base_ele = cache_ele->next;
+ tws->table[k][index] = cache_ele->next;
+ cache_ele->next = tws->free_hash_ele[k];
+ tws->free_hash_ele[k] = cache_ele;
+ cache_ele = tws->table[k][index];
+ } else {
+ **previous_ptr = cache_ele->next;
+ cache_ele->next = tws->free_hash_ele[k];
+ tws->free_hash_ele[k] = cache_ele;
+ cache_ele = **previous_ptr;
+ }
+ continue;
+ }
+ if ((cache_ele->element->object == object)
+ && (cache_ele->element->offset ==
+ (offset - (offset & ~TWS_HASH_OFF_MASK)))) {
+ if((cache_ele->element->page_cache & page_mask)
+ || (page_mask == 0xFFFFFFFF)) {
+ /* We've found a match */
+ *target_ele = cache_ele;
+ *free_list = &(tws->free_hash_ele[k]);
+ return;
+ }
+ }
+ *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
+ cache_ele = cache_ele->next;
+ }
+ }
+}
+
+
+/*
+ * For a given object/offset, discover whether the indexed 32 page frame
+ * containing the object/offset exists and if their are at least threshold
+ * pages present. Returns true if population meets threshold.
+ */
+int
+tws_test_for_community(
+ tws_hash_t tws,
+ vm_object_t object,
+ vm_object_offset_t offset,
+ unsigned int threshold,
+ unsigned int *page_mask)
+{
+ int index;
+ tws_hash_ptr_t cache_ele;
+ tws_hash_ptr_t *trailer;
+ tws_hash_ptr_t *free_list;
+ int community = 0;
+
+ index = do_tws_hash(object, offset,
+ tws->number_of_elements, tws->number_of_lines);
+ tws_traverse_object_hash_list(tws, index, object, offset, 0xFFFFFFFF,
+ &cache_ele, &trailer, &free_list);
+
+ if(cache_ele != NULL) {
+ int i;
+ int ctr;
+ ctr = 0;
+ for(i=1; i!=0; i=i<<1) {
+ if(i & cache_ele->element->page_cache)
+ ctr++;
+ if(ctr == threshold) {
+ community = 1;
+ *page_mask = cache_ele->element->page_cache;
+ break;
+ }
+ }
+ }
+
+ return community;
+
+}
+
+
+/*
+ * Gets new hash element for object hash from free pools
+ * TWS must be locked
+ */
+
+tws_hash_ptr_t
+new_obj_hash(
+ tws_hash_t tws,
+ unsigned int set,
+ unsigned int index)
+{
+ tws_hash_ptr_t element;
+
+ if(tws->obj_free_count[set] < tws->number_of_lines * tws->number_of_elements) {
+ element = &(tws->table_ele[set][tws->obj_free_count[set]]);
+ tws->obj_free_count[set]+=1;
+ } else if(tws->free_hash_ele[set] == NULL) {
+ return NULL;
+ } else {
+ element = tws->free_hash_ele[set];
+ if(element == NULL)
+ return element;
+ tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
+ }
+ element->element = 0;
+ element->next = tws->table[set][index];
+ tws->table[set][index] = element;
+ return element;
+}
+
+/*
+ * Gets new hash element for addr hash from free pools
+ * TWS must be locked
+ */
+
+tws_hash_ptr_t
+new_addr_hash(
+ tws_hash_t tws,
+ unsigned int set,
+ unsigned int index)
+{
+ tws_hash_ptr_t element;
+
+ if(tws->addr_free_count[set]
+ < tws->number_of_lines * tws->number_of_elements) {
+ element = &(tws->alt_ele[set][tws->addr_free_count[set]]);
+ tws->addr_free_count[set]+=1;
+ } else if(tws->free_hash_ele[set] == NULL) {
+ return NULL;
+ } else {
+ element = tws->free_hash_ele[set];
+ if(element == NULL)
+ return element;
+ tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
+ }
+ element->element = (tws_hash_ele_t)TWS_ADDR_HASH;
+ element->next = tws->table[set][index];
+ tws->table[set][index] = element;
+ return element;
+}