+/* tws locked on entry */
+
+tws_startup_t
+tws_create_startup_list(
+ tws_hash_t tws)
+{
+
+ tws_startup_t startup;
+ unsigned int i,j,k;
+ unsigned int total_elements;
+ unsigned int startup_size;
+ unsigned int sindex;
+ unsigned int hash_index;
+ tws_startup_ptr_t element;
+
+ total_elements = tws->expansion_count *
+ (tws->number_of_lines * tws->number_of_elements);
+
+ startup_size = sizeof(struct tws_startup)
+ + (total_elements * sizeof(tws_startup_ptr_t *))
+ + (total_elements * sizeof(struct tws_startup_ptr))
+ + (total_elements * sizeof(struct tws_startup_ele));
+ startup = (tws_startup_t)(kalloc(startup_size));
+
+ if(startup == NULL)
+ return startup;
+
+ bzero((char *) startup, startup_size);
+
+ startup->table = (tws_startup_ptr_t *)
+ (((int)startup) + (sizeof(struct tws_startup)));
+ startup->ele = (struct tws_startup_ptr *)
+ (((vm_offset_t)startup->table) +
+ (total_elements * sizeof(tws_startup_ptr_t)));
+
+ startup->array = (struct tws_startup_ele *)
+ (((vm_offset_t)startup->ele) +
+ (total_elements * sizeof(struct tws_startup_ptr)));
+
+ startup->tws_hash_size = startup_size;
+ startup->ele_count = 0; /* burn first hash ele, else we can't tell from zero */
+ startup->array_size = total_elements;
+ startup->hash_count = 1;
+
+ sindex = 0;
+
+
+ for(i = 0; i<tws->number_of_lines; i++) {
+ for(j = 0; j<tws->number_of_elements; j++) {
+ for(k = 0; k<tws->expansion_count; k++) {
+ tws_hash_ele_t entry;
+ unsigned int hash_retry;
+ vm_offset_t addr;
+
+ entry = &tws->cache[k][i].list[j];
+ addr = entry->page_addr;
+ hash_retry = 0;
+ if(entry->object != 0) {
+ /* get a hash element */
+ hash_index = do_startup_hash(addr,
+ startup->array_size);
+
+ if(startup->hash_count < total_elements) {
+ element = &(startup->ele[startup->hash_count]);
+ startup->hash_count += 1;
+ } else {
+ /* exit we're out of elements */
+ break;
+ }
+ /* place the hash element */
+ element->next = startup->table[hash_index];
+ startup->table[hash_index] = (tws_startup_ptr_t)
+ ((int)element - (int)&startup->ele[0]);
+
+ /* set entry OFFSET in hash element */
+ element->element = (tws_startup_ele_t)
+ ((int)&startup->array[sindex] -
+ (int)&startup->array[0]);
+
+ startup->array[sindex].page_addr = entry->page_addr;
+ startup->array[sindex].page_cache = entry->page_cache;
+ startup->ele_count++;
+ sindex++;
+
+ }
+ }
+ }
+ }
+
+ return startup;
+}
+
+
+/*
+ * Returns an entire cache line. The line is deleted from the startup
+ * cache on return. The caller can check startup->ele_count for an empty
+ * list. Access synchronization is the responsibility of the caller.
+ */
+
+unsigned int
+tws_startup_list_lookup(
+ tws_startup_t startup,
+ vm_offset_t addr)
+{
+ unsigned int hash_index;
+ unsigned int page_cache_bits;
+ unsigned int startup_shift;
+ tws_startup_ele_t entry;
+ vm_offset_t next_addr;
+ tws_startup_ptr_t element;
+ tws_startup_ptr_t base_ele;
+ tws_startup_ptr_t *previous_ptr;
+
+ page_cache_bits = 0;
+
+ hash_index = do_startup_hash(addr, startup->array_size);
+
+ if(((unsigned int)&(startup->table[hash_index])) >= startup->tws_hash_size) {
+ return page_cache_bits = 0;
+ }
+ element = (tws_startup_ptr_t)((int)startup->table[hash_index] +
+ (int)&startup->ele[0]);
+ base_ele = element;
+ previous_ptr = &(startup->table[hash_index]);
+ while(element > &startup->ele[0]) {
+ if (((int)element + sizeof(struct tws_startup_ptr))
+ > ((int)startup + startup->tws_hash_size)) {
+ return page_cache_bits;
+ }
+ entry = (tws_startup_ele_t)
+ ((int)element->element
+ + (int)&startup->array[0]);
+ if((((int)entry + sizeof(struct tws_startup_ele))
+ > ((int)startup + startup->tws_hash_size))
+ || ((int)entry < (int)startup)) {
+ return page_cache_bits;
+ }
+ if ((addr >= entry->page_addr) &&
+ (addr <= (entry->page_addr + 0x1F000))) {
+ startup_shift = (addr - entry->page_addr)>>12;
+ page_cache_bits |= entry->page_cache >> startup_shift;
+ /* don't dump the pages, unless the addresses */
+ /* line up perfectly. The cache may be used */
+ /* by other mappings */
+ entry->page_cache &= (1 << startup_shift) - 1;
+ if(addr == entry->page_addr) {
+ if(base_ele == element) {
+ base_ele = (tws_startup_ptr_t)
+ ((int)element->next
+ + (int)&startup->ele[0]);
+ startup->table[hash_index] = element->next;
+ element = base_ele;
+ } else {
+ *previous_ptr = element->next;
+ element = (tws_startup_ptr_t)
+ ((int)*previous_ptr
+ + (int)&startup->ele[0]);
+ }
+ entry->page_addr = 0;
+ startup->ele_count--;
+ continue;
+ }
+ }
+ next_addr = addr + 0x1F000;
+ if ((next_addr >= entry->page_addr) &&
+ (next_addr <= (entry->page_addr + 0x1F000))) {
+ startup_shift = (next_addr - entry->page_addr)>>12;
+ page_cache_bits |= entry->page_cache << (0x1F - startup_shift);
+ entry->page_cache &= ~((1 << (startup_shift + 1)) - 1);
+ if(entry->page_cache == 0) {
+ if(base_ele == element) {
+ base_ele = (tws_startup_ptr_t)
+ ((int)element->next
+ + (int)&startup->ele[0]);
+ startup->table[hash_index] = element->next;
+ element = base_ele;
+ } else {
+ *previous_ptr = element->next;
+ element = (tws_startup_ptr_t)
+ ((int)*previous_ptr
+ + (int)&startup->ele[0]);
+ }
+ entry->page_addr = 0;
+ startup->ele_count--;
+ continue;
+ }
+ }
+ previous_ptr = &(element->next);
+ element = (tws_startup_ptr_t)
+ ((int) element->next + (int) &startup->ele[0]);
+ }
+
+ return page_cache_bits;
+}
+
+kern_return_t
+tws_send_startup_info(
+ task_t task)
+{
+
+ tws_hash_t tws;
+ tws_startup_t scache;
+
+ task_lock(task);
+ tws = (tws_hash_t)task->dynamic_working_set;
+ task_unlock(task);
+ if(tws == NULL) {
+ return KERN_FAILURE;
+ }
+ return tws_internal_startup_send(tws);
+}
+
+
+kern_return_t
+tws_internal_startup_send(
+ tws_hash_t tws)
+{
+
+ tws_startup_t scache;
+
+ if(tws == NULL) {
+ return KERN_FAILURE;
+ }
+ tws_lock(tws);
+ /* used to signal write or release depending on state of tws */
+ if(tws->startup_cache) {
+ vm_offset_t startup_buf;
+ vm_size_t size;
+ startup_buf = (vm_offset_t)tws->startup_cache;
+ size = tws->startup_cache->tws_hash_size;
+ tws->startup_cache = 0;
+ tws_unlock(tws);
+ kmem_free(kernel_map, startup_buf, size);
+ return KERN_SUCCESS;
+ }
+ if(tws->startup_name == NULL) {
+ tws_unlock(tws);
+ return KERN_FAILURE;
+ }
+ scache = tws_create_startup_list(tws);
+ if(scache == NULL)
+ return KERN_FAILURE;
+ bsd_write_page_cache_file(tws->uid, tws->startup_name,
+ scache, scache->tws_hash_size,
+ tws->mod, tws->fid);
+ kfree((vm_offset_t)scache, scache->tws_hash_size);
+ kfree((vm_offset_t) tws->startup_name, tws->startup_name_length);
+ tws->startup_name = NULL;
+ tws_unlock(tws);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+tws_handle_startup_file(
+ task_t task,
+ unsigned int uid,
+ char *app_name,
+ vm_offset_t app_vp,
+ boolean_t *new_info)
+
+{
+ tws_startup_t startup;
+ vm_offset_t cache_size;
+ kern_return_t error;
+ int fid;
+ int mod;
+
+ *new_info = FALSE;
+ /* don't pre-heat kernel task */
+ if(task == kernel_task)
+ return KERN_SUCCESS;
+ error = bsd_read_page_cache_file(uid, &fid,
+ &mod, app_name,
+ app_vp, &startup,
+ &cache_size);
+ if(error) {
+ return KERN_FAILURE;
+ }
+ if(startup == NULL) {
+ /* Entry for app does not exist, make */
+ /* one */
+ /* we will want our own copy of the shared */
+ /* regions to pick up a true picture of all */
+ /* the pages we will touch. */
+ if((lsf_zone->count * lsf_zone->elem_size)
+ > (lsf_zone->max_size >> 1)) {
+ /* We don't want to run out of shared memory */
+ /* map entries by starting too many private versions */
+ /* of the shared library structures */
+ return KERN_SUCCESS;
+ }
+ *new_info = TRUE;
+ error = tws_write_startup_file(task,
+ fid, mod, app_name, uid);
+ if(error)
+ return error;
+ /* use the mod in the write case as an init */
+ /* flag */
+ mod = 0;
+
+ } else {
+ error = tws_read_startup_file(task,
+ (tws_startup_t)startup,
+ cache_size);
+ if(error) {
+ kmem_free(kernel_map,
+ (vm_offset_t)startup, cache_size);
+ return error;
+ }
+ }
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+tws_write_startup_file(
+ task_t task,
+ int fid,
+ int mod,
+ char *name,
+ unsigned int uid)
+{
+ tws_hash_t tws;
+ unsigned int string_length;
+
+ string_length = strlen(name);
+
+ task_lock(task);
+ tws = (tws_hash_t)task->dynamic_working_set;
+
+ task_unlock(task);
+ if(tws == NULL) {
+ /* create a dynamic working set of normal size */
+ task_working_set_create(task, 0,
+ 0, TWS_HASH_STYLE_DEFAULT);
+ }
+ tws_lock(tws);
+
+ if(tws->startup_name != NULL) {
+ tws_unlock(tws);
+ return KERN_FAILURE;
+ }
+
+ tws->startup_name = (char *)
+ kalloc((string_length + 1) * (sizeof(char)));
+ if(tws->startup_name == NULL) {
+ tws_unlock(tws);
+ return KERN_FAILURE;
+ }
+
+ bcopy(name, (char *)tws->startup_name, string_length + 1);
+ tws->startup_name_length = (string_length + 1) * sizeof(char);
+ tws->uid = uid;
+ tws->fid = fid;
+ tws->mod = mod;
+
+ tws_unlock(tws);
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+tws_read_startup_file(
+ task_t task,
+ tws_startup_t startup,
+ vm_offset_t cache_size)
+{
+ tws_hash_t tws;
+ int error;
+ int lines;
+ int old_exp_count;
+
+ task_lock(task);
+ tws = (tws_hash_t)task->dynamic_working_set;
+
+ if(cache_size < sizeof(struct tws_hash)) {
+ task_unlock(task);
+ kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
+ return(KERN_SUCCESS);
+ }
+
+ /* create a dynamic working set to match file size */
+ lines = (cache_size - sizeof(struct tws_hash))/TWS_ARRAY_SIZE;
+ /* we now need to divide out element size and word size */
+ /* all fields are 4 bytes. There are 8 bytes in each hash element */
+ /* entry, 4 bytes in each table ptr location and 8 bytes in each */
+ /* page_cache entry, making a total of 20 bytes for each entry */
+ lines = (lines/(20));
+ if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
+ lines = TWS_SMALL_HASH_LINE_COUNT;
+ task_unlock(task);
+ kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
+ return(KERN_SUCCESS);
+ } else {
+ old_exp_count = lines/TWS_HASH_LINE_COUNT;
+ if((old_exp_count * TWS_HASH_LINE_COUNT) != lines) {
+ lines = (old_exp_count + 1)
+ * TWS_HASH_LINE_COUNT;
+ }
+ if(tws == NULL) {
+ task_working_set_create(task, lines,
+ 0, TWS_HASH_STYLE_DEFAULT);
+ task_unlock(task);
+ } else {
+ task_unlock(task);
+ tws_expand_working_set(
+ (vm_offset_t)tws, lines, TRUE);
+ }
+ }
+
+
+ tws_lock(tws);
+
+ if(tws->startup_cache != NULL) {
+ tws_unlock(tws);
+ return KERN_FAILURE;
+ }
+
+
+ /* now need to fix up internal table pointers */
+ startup->table = (tws_startup_ptr_t *)
+ (((int)startup) + (sizeof(struct tws_startup)));
+ startup->ele = (struct tws_startup_ptr *)
+ (((vm_offset_t)startup->table) +
+ (startup->array_size * sizeof(tws_startup_ptr_t)));
+ startup->array = (struct tws_startup_ele *)
+ (((vm_offset_t)startup->ele) +
+ (startup->array_size * sizeof(struct tws_startup_ptr)));
+ /* the allocation size and file size should be the same */
+ /* just in case their not, make sure we dealloc correctly */
+ startup->tws_hash_size = cache_size;
+
+
+ tws->startup_cache = startup;
+ tws_unlock(tws);
+ return KERN_SUCCESS;
+}