]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/vm/task_working_set.c
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / vm / task_working_set.c
... / ...
CommitLineData
1int startup_miss = 0;
2/*
3 * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 *
9 * This file contains Original Code and/or Modifications of Original Code
10 * as defined in and that are subject to the Apple Public Source License
11 * Version 2.0 (the 'License'). You may not use this file except in
12 * compliance with the License. Please obtain a copy of the License at
13 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * file.
15 *
16 * The Original Code and all software distributed under the License are
17 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
18 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
19 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
21 * Please see the License for the specific language governing rights and
22 * limitations under the License.
23 *
24 * @APPLE_LICENSE_HEADER_END@
25 */
26/*
27 */
28/*
29 * File: vm/task_working_set.c
30 * Author: Chris Youngworth
31 * Date: 2001
32 *
33 * Working set detection and maintainence module
34 */
35
36
37#include <mach/mach_types.h>
38#include <mach/shared_memory_server.h>
39#include <vm/task_working_set.h>
40#include <vm/vm_kern.h>
41#include <vm/vm_map.h>
42#include <vm/vm_page.h>
43#include <vm/vm_pageout.h>
44#include <kern/sched.h>
45
46extern unsigned sched_tick;
47extern zone_t lsf_zone;
48
49/* declarations for internal use only routines */
50
51tws_startup_t
52tws_create_startup_list(
53 tws_hash_t tws);
54
55unsigned int
56tws_startup_list_lookup(
57 tws_startup_t startup,
58 vm_offset_t addr);
59
60kern_return_t
61tws_internal_startup_send(
62 tws_hash_t tws);
63
64void
65tws_traverse_address_hash_list (
66 tws_hash_t tws,
67 unsigned int index,
68 vm_offset_t page_addr,
69 vm_object_t object,
70 vm_object_offset_t offset,
71 vm_map_t map,
72 tws_hash_ptr_t *target_ele,
73 tws_hash_ptr_t **previous_ptr,
74 tws_hash_ptr_t **free_list,
75 unsigned int exclusive_addr);
76
77void
78tws_traverse_object_hash_list (
79 tws_hash_t tws,
80 unsigned int index,
81 vm_object_t object,
82 vm_object_offset_t offset,
83 unsigned int page_mask,
84 tws_hash_ptr_t *target_ele,
85 tws_hash_ptr_t **previous_ptr,
86 tws_hash_ptr_t **free_list);
87
88tws_hash_ptr_t
89new_addr_hash(
90 tws_hash_t tws,
91 unsigned int set,
92 unsigned int index);
93
94tws_hash_ptr_t
95new_obj_hash(
96 tws_hash_t tws,
97 unsigned int set,
98 unsigned int index);
99
100int tws_test_for_community(
101 tws_hash_t tws,
102 vm_object_t object,
103 vm_object_offset_t offset,
104 unsigned int threshold,
105 unsigned int *page_mask);
106
107/* Note: all of the routines below depend on the associated map lock for */
108/* synchronization, the map lock will be on when the routines are called */
109/* and on when they return */
110
111tws_hash_t
112tws_hash_create(
113 unsigned int lines,
114 unsigned int rows,
115 unsigned int style)
116{
117 tws_hash_t tws;
118 int i,j;
119
120
121 if ((style != TWS_HASH_STYLE_BASIC) &&
122 (style != TWS_HASH_STYLE_BASIC)) {
123 return((tws_hash_t)NULL);
124 }
125
126
127 tws = (tws_hash_t)(kalloc(sizeof(struct tws_hash)));
128 if(tws == (tws_hash_t)NULL)
129 return tws;
130
131 if((tws->table[0] = (tws_hash_ptr_t *)
132 kalloc(sizeof(tws_hash_ptr_t) * lines * rows))
133 == NULL) {
134 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
135 return (tws_hash_t)NULL;
136 }
137 if((tws->table_ele[0] = (tws_hash_ptr_t)
138 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
139 == NULL) {
140 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ele_t)
141 * lines * rows);
142 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
143 return (tws_hash_t)NULL;
144 }
145 if((tws->alt_ele[0] = (tws_hash_ptr_t)
146 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
147 == NULL) {
148 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
149 * lines * rows);
150 kfree((vm_offset_t)tws->table_ele[0],
151 sizeof(struct tws_hash_ptr)
152 * lines * rows);
153 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
154 return (tws_hash_t)NULL;
155 }
156 if((tws->cache[0] = (struct tws_hash_line *)
157 kalloc(sizeof(struct tws_hash_line) * lines))
158 == NULL) {
159 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
160 * lines * rows);
161 kfree((vm_offset_t)tws->table_ele[0],
162 sizeof(struct tws_hash_ptr)
163 * lines * rows);
164 kfree((vm_offset_t)tws->alt_ele[0], sizeof(struct tws_hash_ptr)
165 * lines * rows);
166 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
167 return (tws_hash_t)NULL;
168 }
169 tws->free_hash_ele[0] = (tws_hash_ptr_t)0;
170 tws->obj_free_count[0] = 0;
171 tws->addr_free_count[0] = 0;
172
173 /* most defaults are such that a bzero will initialize */
174 bzero((char *)tws->table[0],sizeof(tws_hash_ptr_t)
175 * lines * rows);
176 bzero((char *)tws->table_ele[0],sizeof(struct tws_hash_ptr)
177 * lines * rows);
178 bzero((char *)tws->alt_ele[0],sizeof(struct tws_hash_ptr)
179 * lines * rows);
180 bzero((char *)tws->cache[0], sizeof(struct tws_hash_line)
181 * lines);
182
183 mutex_init(&tws->lock, ETAP_VM_MAP);
184 tws->style = style;
185 tws->current_line = 0;
186 tws->pageout_count = 0;
187 tws->line_count = 0;
188 tws->startup_cache = NULL;
189 tws->startup_name = NULL;
190 tws->number_of_lines = lines;
191 tws->number_of_elements = rows;
192 tws->expansion_count = 1;
193 tws->lookup_count = 0;
194 tws->insert_count = 0;
195 tws->time_of_creation = sched_tick;
196
197 return tws;
198}
199
200int newtest = 0;
201void
202tws_hash_line_clear(
203 tws_hash_t tws,
204 tws_hash_line_t hash_line,
205 boolean_t live)
206{
207 struct tws_hash_ele *hash_ele;
208 struct tws_hash_ptr **trailer;
209 struct tws_hash_ptr **free_list;
210 tws_hash_ele_t addr_ele;
211 int index;
212 unsigned int i, j, k;
213 int dump_pmap;
214 int hash_loop;
215
216
217 if(tws->line_count < tws->number_of_lines) {
218 tws->line_count++;
219 dump_pmap = 1;
220 } else {
221 if(tws->pageout_count != vm_pageout_scan_event_counter) {
222 tws->pageout_count =
223 vm_pageout_scan_event_counter;
224 tws->line_count = 0;
225 dump_pmap = 1;
226 } else {
227 dump_pmap = 0;
228 }
229 }
230 hash_line->ele_count = 0;
231
232 for (i=0; i<tws->number_of_elements; i++) {
233 hash_loop = 0;
234 hash_ele = &(hash_line->list[i]);
235 if(hash_ele->object != 0) {
236
237 vm_object_offset_t local_off = 0;
238 tws_hash_ptr_t cache_ele;
239
240 index = alt_tws_hash(
241 hash_ele->page_addr & TWS_HASH_OFF_MASK,
242 tws->number_of_elements,
243 tws->number_of_lines);
244
245 tws_traverse_address_hash_list(tws, index,
246 hash_ele->page_addr, hash_ele->object,
247 hash_ele->offset, hash_ele->map,
248 &cache_ele, &trailer, &free_list, 0);
249 if(cache_ele != NULL) {
250 addr_ele = (tws_hash_ele_t)((unsigned int)
251 (cache_ele->element) & ~TWS_ADDR_HASH);
252 if(addr_ele != hash_ele)
253 panic("tws_hash_line_clear:"
254 " out of sync\n");
255 cache_ele->element = 0;
256 *trailer = cache_ele->next;
257 cache_ele->next = *free_list;
258 *free_list = cache_ele;
259 }
260
261 index = alt_tws_hash(
262 (hash_ele->page_addr - 0x1f000)
263 & TWS_HASH_OFF_MASK,
264 tws->number_of_elements,
265 tws->number_of_lines);
266
267 tws_traverse_address_hash_list(tws, index,
268 hash_ele->page_addr, hash_ele->object,
269 hash_ele->offset, hash_ele->map,
270 &cache_ele, &trailer, &free_list, 0);
271
272 if(cache_ele != NULL) {
273 addr_ele = (tws_hash_ele_t)((unsigned int)
274 (cache_ele->element) & ~TWS_ADDR_HASH);
275 if(addr_ele != hash_ele)
276 panic("tws_hash_line_clear: "
277 "out of sync\n");
278 cache_ele->element = 0;
279 *trailer = cache_ele->next;
280 cache_ele->next = *free_list;
281 *free_list = cache_ele;
282 }
283
284
285 if((hash_ele->map != NULL) && (live)) {
286 vm_page_t p;
287
288 for (j = 0x1; j != 0; j = j<<1) {
289 if(j & hash_ele->page_cache) {
290 p = vm_page_lookup(hash_ele->object,
291 hash_ele->offset + local_off);
292 if((p != NULL) && (p->wire_count == 0)
293 && (dump_pmap == 1)) {
294 pmap_remove_some_phys((pmap_t)
295 vm_map_pmap(
296 current_map()),
297 p->phys_page);
298 }
299 }
300 local_off += PAGE_SIZE_64;
301 }
302 }
303
304 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
305 vm_object_deallocate(hash_ele->object);
306 vm_map_deallocate(hash_ele->map);
307 }
308
309 index = do_tws_hash(hash_ele->object, hash_ele->offset,
310 tws->number_of_elements,
311 tws->number_of_lines);
312
313 tws_traverse_object_hash_list(tws,
314 index, hash_ele->object, hash_ele->offset,
315 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
316 if((cache_ele != NULL) && (cache_ele->element == hash_ele)) {
317 cache_ele->element = 0;
318 *trailer = cache_ele->next;
319 cache_ele->next = *free_list;
320 *free_list = cache_ele;
321 }
322 hash_ele->object = 0;
323 }
324 }
325}
326
327kern_return_t
328tws_internal_lookup(
329 tws_hash_t tws,
330 vm_object_offset_t offset,
331 vm_object_t object,
332 tws_hash_line_t *line)
333{
334 int index;
335 int loop;
336 int set;
337 int ele_line;
338 vm_offset_t pagenum;
339 tws_hash_ptr_t cache_ele;
340 tws_hash_ptr_t *trailer;
341 tws_hash_ptr_t *free_list;
342
343 /* don't cache private objects */
344 if(object->private)
345 return KERN_SUCCESS;
346
347 index = do_tws_hash(object, offset,
348 tws->number_of_elements, tws->number_of_lines);
349 loop = 0;
350
351 tws->lookup_count++;
352 if(tws->lookup_count == 0)
353 tws->insert_count = 0;
354 if(tws->startup_name != NULL) {
355 int age_of_cache;
356 age_of_cache = ((sched_tick
357 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
358 if (age_of_cache > 35) {
359 return KERN_OPERATION_TIMED_OUT;
360 }
361 }
362
363 if(tws->lookup_count > (4 * tws->expansion_count
364 * tws->number_of_elements * tws->number_of_lines) &&
365 (tws->lookup_count > (2 * tws->insert_count))) {
366 if(tws->startup_cache) {
367 int age_of_cache;
368 age_of_cache = ((sched_tick
369 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
370 if (age_of_cache > 60) {
371 return KERN_OPERATION_TIMED_OUT;
372 }
373 }
374 }
375
376 pagenum = (vm_offset_t)(offset & TWS_INDEX_MASK);
377 pagenum = pagenum >> 12;
378 pagenum = 1 << pagenum; /* get the appropriate page in 32 page block */
379 tws_traverse_object_hash_list(tws, index, object, offset, pagenum,
380 &cache_ele, &trailer, &free_list);
381 if(cache_ele != NULL) {
382 set = cache_ele->element->line/tws->number_of_lines;
383 ele_line = cache_ele->element->line - set;
384 *line = &tws->cache[set][ele_line];
385 return KERN_SUCCESS;
386 }
387
388 return KERN_FAILURE;
389
390
391}
392
393kern_return_t
394tws_lookup(
395 tws_hash_t tws,
396 vm_object_offset_t offset,
397 vm_object_t object,
398 tws_hash_line_t *line)
399{
400 kern_return_t kr;
401
402 if(!tws_lock_try(tws)) {
403 return KERN_FAILURE;
404 }
405 kr = tws_internal_lookup(tws,
406 offset, object, line);
407 tws_unlock(tws);
408 return kr;
409}
410
411kern_return_t
412tws_expand_working_set(
413 vm_offset_t tws,
414 int line_count,
415 boolean_t dump_data)
416{
417 tws_hash_t new_tws;
418 tws_hash_t old_tws;
419 unsigned int i,j,k;
420 struct tws_hash temp;
421
422 old_tws = (tws_hash_t)tws;
423
424 /* Note we do an elaborate dance to preserve the header that */
425 /* task is pointing to. In this way we can avoid taking a task */
426 /* lock every time we want to access the tws */
427
428 if (old_tws->number_of_lines >= line_count) {
429 return KERN_FAILURE;
430 }
431 if((new_tws = tws_hash_create(line_count,
432 old_tws->number_of_elements, old_tws->style)) == 0) {
433 return(KERN_NO_SPACE);
434 }
435 tws_lock(old_tws);
436
437 if(!dump_data) {
438 for(i = 0; i<old_tws->number_of_lines; i++) {
439 for(j = 0; j<old_tws->number_of_elements; j++) {
440 for(k = 0; k<old_tws->expansion_count; k++) {
441 tws_hash_ele_t entry;
442 vm_object_offset_t paddr;
443 unsigned int page_index;
444 entry = &old_tws->cache[k][i].list[j];
445 if(entry->object != 0) {
446 paddr = 0;
447 for(page_index = 1; page_index != 0;
448 page_index = page_index << 1); {
449 if (entry->page_cache & page_index) {
450 tws_insert(new_tws,
451 entry->offset+paddr,
452 entry->object,
453 entry->page_addr+paddr,
454 entry->map);
455 }
456 paddr+=PAGE_SIZE;
457 }
458
459 }
460 }
461 }
462 }
463 }
464
465 temp.style = new_tws->style;
466 temp.current_line = new_tws->current_line;
467 temp.pageout_count = new_tws->pageout_count;
468 temp.line_count = new_tws->line_count;
469 temp.number_of_lines = new_tws->number_of_lines;
470 temp.number_of_elements = new_tws->number_of_elements;
471 temp.expansion_count = new_tws->expansion_count;
472 temp.lookup_count = new_tws->lookup_count;
473 temp.insert_count = new_tws->insert_count;
474 for(i = 0; i<new_tws->expansion_count; i++) {
475 temp.obj_free_count[i] = new_tws->obj_free_count[i];
476 temp.addr_free_count[i] = new_tws->addr_free_count[i];
477 temp.free_hash_ele[i] = new_tws->free_hash_ele[i];
478 temp.table[i] = new_tws->table[i];
479 temp.table_ele[i] = new_tws->table_ele[i];
480 temp.alt_ele[i] = new_tws->alt_ele[i];
481 temp.cache[i] = new_tws->cache[i];
482 }
483
484 new_tws->style = old_tws->style;
485 new_tws->current_line = old_tws->current_line;
486 new_tws->pageout_count = old_tws->pageout_count;
487 new_tws->line_count = old_tws->line_count;
488 new_tws->number_of_lines = old_tws->number_of_lines;
489 new_tws->number_of_elements = old_tws->number_of_elements;
490 new_tws->expansion_count = old_tws->expansion_count;
491 new_tws->lookup_count = old_tws->lookup_count;
492 new_tws->insert_count = old_tws->insert_count;
493 for(i = 0; i<old_tws->expansion_count; i++) {
494 new_tws->obj_free_count[i] = old_tws->obj_free_count[i];
495 new_tws->addr_free_count[i] = old_tws->addr_free_count[i];
496 new_tws->free_hash_ele[i] = old_tws->free_hash_ele[i];
497 new_tws->table[i] = old_tws->table[i];
498 new_tws->table_ele[i] = old_tws->table_ele[i];
499 new_tws->alt_ele[i] = old_tws->alt_ele[i];
500 new_tws->cache[i] = old_tws->cache[i];
501 }
502
503 old_tws->style = temp.style;
504 old_tws->current_line = temp.current_line;
505 old_tws->pageout_count = temp.pageout_count;
506 old_tws->line_count = temp.line_count;
507 old_tws->number_of_lines = temp.number_of_lines;
508 old_tws->number_of_elements = temp.number_of_elements;
509 old_tws->expansion_count = temp.expansion_count;
510 old_tws->lookup_count = temp.lookup_count;
511 old_tws->insert_count = temp.insert_count;
512 for(i = 0; i<temp.expansion_count; i++) {
513 old_tws->obj_free_count[i] = temp.obj_free_count[i];;
514 old_tws->addr_free_count[i] = temp.addr_free_count[i];;
515 old_tws->free_hash_ele[i] = NULL;
516 old_tws->table[i] = temp.table[i];
517 old_tws->table_ele[i] = temp.table_ele[i];
518 old_tws->alt_ele[i] = temp.alt_ele[i];
519 old_tws->cache[i] = temp.cache[i];
520 }
521
522 tws_hash_destroy(new_tws);
523 tws_unlock(old_tws);
524 return KERN_SUCCESS;
525}
526
527tws_hash_t test_tws = 0;
528
529kern_return_t
530tws_insert(
531 tws_hash_t tws,
532 vm_object_offset_t offset,
533 vm_object_t object,
534 vm_offset_t page_addr,
535 vm_map_t map)
536{
537 queue_t bucket;
538 unsigned int index;
539 unsigned int alt_index;
540 unsigned int index_enum[2];
541 unsigned int ele_index;
542 tws_hash_ptr_t cache_ele;
543 tws_hash_ptr_t obj_ele = NULL;
544 tws_hash_ptr_t addr_ele = NULL;
545 tws_hash_ptr_t *trailer;
546 tws_hash_ptr_t *free_list;
547 tws_hash_ele_t target_element = NULL;
548 int i,k;
549 int current_line;
550 int set;
551 int ctr;
552 unsigned int startup_cache_line;
553 vm_offset_t startup_page_addr;
554 int cache_full = 0;
555 int ask_for_startup_cache_release = 0;
556
557
558 if(!tws_lock_try(tws)) {
559 return KERN_FAILURE;
560 }
561 tws->insert_count++;
562 current_line = 0xFFFFFFFF;
563
564 startup_cache_line = 0;
565 startup_page_addr =
566 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
567 if(tws->startup_cache) {
568 int age_of_cache;
569 age_of_cache = ((sched_tick - tws->time_of_creation)
570 >> SCHED_TICK_SHIFT);
571 startup_cache_line = tws_startup_list_lookup(
572 tws->startup_cache, startup_page_addr);
573if(tws == test_tws) {
574printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", startup_cache_line, startup_page_addr, object, offset);
575}
576 if(age_of_cache > 60) {
577 ask_for_startup_cache_release = 1;
578 }
579 }
580 if((tws->startup_name != NULL) && (tws->mod == 0)) {
581 /* Ensure as good a working set as possible */
582 pmap_remove(map->pmap, 0, GLOBAL_SHARED_TEXT_SEGMENT);
583 pmap_remove(map->pmap,
584 GLOBAL_SHARED_DATA_SEGMENT
585 + SHARED_DATA_REGION_SIZE, 0xFFFFFFFFFFFFF000);
586 }
587
588 /* This next bit of code, the and alternate hash */
589 /* are all made necessary because of IPC COW */
590
591 /* Note: the use of page_addr modified by delta from offset */
592 /* frame base means we may miss some previous entries. However */
593 /* we will not miss the present entry. This is most important */
594 /* in avoiding duplication of entries against long lived non-cow */
595 /* objects */
596 index_enum[0] = alt_tws_hash(
597 page_addr & TWS_HASH_OFF_MASK,
598 tws->number_of_elements, tws->number_of_lines);
599
600 index_enum[1] = alt_tws_hash(
601 (page_addr - 0x1f000) & TWS_HASH_OFF_MASK,
602 tws->number_of_elements, tws->number_of_lines);
603
604 for(ctr = 0; ctr < 2;) {
605 tws_hash_ele_t resident;
606 tws_traverse_address_hash_list(tws,
607 index_enum[ctr], page_addr, NULL,
608 0, NULL,
609 &cache_ele, &trailer, &free_list, 1);
610 if(cache_ele != NULL) {
611 /* found one */
612 resident = (tws_hash_ele_t)((unsigned int)
613 cache_ele->element & ~TWS_ADDR_HASH);
614 if((object == resident->object) &&
615 resident->offset ==
616 (offset & TWS_HASH_OFF_MASK)) {
617 /* This is our object/offset */
618 resident->page_cache
619 |= startup_cache_line;
620 resident->page_cache |=
621 (1<<(((vm_offset_t)
622 (offset & TWS_INDEX_MASK))>>12));
623 tws_unlock(tws);
624 if(ask_for_startup_cache_release)
625 return KERN_OPERATION_TIMED_OUT;
626 return KERN_SUCCESS;
627 }
628 if((object->shadow ==
629 resident->object) &&
630 ((resident->offset
631 + object->shadow_offset)
632 == (offset & TWS_HASH_OFF_MASK))) {
633 /* if we just shadowed, inherit */
634 /* access pattern from parent */
635 startup_cache_line |=
636 resident->page_cache;
637 /* thow out old entry */
638 resident->page_cache = 0;
639 break;
640 } else {
641 resident->page_cache &=
642 ~(1<<(((vm_offset_t)(page_addr
643 - resident->page_addr))
644 >>12));
645 }
646 /* Throw out old entry if there are no */
647 /* more pages in cache */
648 if(resident->page_cache == 0) {
649 /* delete addr hash entry */
650 cache_ele->element = 0;
651 *trailer = cache_ele->next;
652 cache_ele->next = *free_list;
653 *free_list = cache_ele;
654 /* go after object hash */
655 index = do_tws_hash(
656 resident->object,
657 resident->offset,
658 tws->number_of_elements,
659 tws->number_of_lines);
660 tws_traverse_object_hash_list(tws,
661 index, resident->object,
662 resident->offset,
663 0xFFFFFFFF, &cache_ele,
664 &trailer, &free_list);
665 if(cache_ele != NULL) {
666 if(tws->style ==
667 TWS_HASH_STYLE_SIGNAL) {
668 vm_object_deallocate(
669 cache_ele->element->object);
670 vm_map_deallocate(
671 cache_ele->element->map);
672 }
673 current_line =
674 cache_ele->element->line;
675 set = current_line
676 /tws->number_of_lines;
677 current_line -= set *
678 tws->number_of_lines;
679 if(cache_ele->element->object != 0) {
680 cache_ele->element->object = 0;
681 tws->cache[set]
682 [current_line].ele_count--;
683 }
684 cache_ele->element = 0;
685 *trailer = cache_ele->next;
686 cache_ele->next = *free_list;
687 *free_list = cache_ele;
688 }
689 }
690 continue;
691 }
692 ctr+=1;
693 }
694
695 /*
696 * We may or may not have a current line setting coming out of
697 * the code above. If we have a current line it means we can
698 * choose to back-fill the spot vacated by a previous entry.
699 * We have yet to do a definitive check using the original obj/off
700 * We will do that now and override the current line if we
701 * find an element
702 */
703
704 index = do_tws_hash(object, offset,
705 tws->number_of_elements, tws->number_of_lines);
706
707 alt_index = index_enum[0];
708
709 tws_traverse_object_hash_list(tws, index, object, offset,
710 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
711 if(cache_ele != NULL) {
712 obj_ele = cache_ele;
713 current_line = cache_ele->element->line;
714 set = current_line/tws->number_of_lines;
715 current_line -= set * tws->number_of_lines;
716 target_element = cache_ele->element;
717
718 /* Now check to see if we have a hash addr for it */
719 tws_traverse_address_hash_list(tws,
720 alt_index, obj_ele->element->page_addr,
721 obj_ele->element->object,
722 obj_ele->element->offset,
723 obj_ele->element->map,
724 &cache_ele, &trailer, &free_list, 0);
725 if(cache_ele != NULL) {
726 addr_ele = cache_ele;
727 } else {
728 addr_ele = new_addr_hash(tws, set, alt_index);
729 /* if cannot allocate just do without */
730 /* we'll get it next time around */
731 }
732 }
733
734
735
736 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
737 vm_object_reference(object);
738 vm_map_reference(map);
739 }
740
741 if(current_line == 0xFFFFFFFF) {
742 current_line = tws->current_line;
743 set = current_line/tws->number_of_lines;
744 current_line = current_line - (set * tws->number_of_lines);
745
746#ifdef notdef
747 if(cache_full) {
748 tws->current_line = tws->number_of_lines - 1;
749 }
750#endif
751 if(tws->cache[set][current_line].ele_count
752 >= tws->number_of_elements) {
753 current_line++;
754 tws->current_line++;
755 if(current_line == tws->number_of_lines) {
756 set++;
757 current_line = 0;
758 if (set == tws->expansion_count) {
759 if((tws->lookup_count <
760 (2 * tws->insert_count)) &&
761 (set<TWS_HASH_EXPANSION_MAX)) {
762 tws->lookup_count = 0;
763 tws->insert_count = 0;
764 if(tws->number_of_lines
765 < TWS_HASH_LINE_COUNT) {
766 tws->current_line--;
767 tws_unlock(tws);
768 return KERN_NO_SPACE;
769 }
770 if((tws->table[set] = (tws_hash_ptr_t *)
771 kalloc(sizeof(tws_hash_ptr_t)
772 * tws->number_of_lines
773 * tws->number_of_elements))
774 == NULL) {
775 set = 0;
776 } else if((tws->table_ele[set] =
777 (tws_hash_ptr_t)
778 kalloc(sizeof(struct tws_hash_ptr)
779 * tws->number_of_lines
780 * tws->number_of_elements))
781 == NULL) {
782 kfree((vm_offset_t)tws->table[set],
783 sizeof(tws_hash_ptr_t)
784 * tws->number_of_lines
785 * tws->number_of_elements);
786 set = 0;
787 } else if((tws->alt_ele[set] =
788 (tws_hash_ptr_t)
789 kalloc(sizeof(struct tws_hash_ptr)
790 * tws->number_of_lines
791 * tws->number_of_elements))
792 == NULL) {
793 kfree((vm_offset_t)tws->table_ele[set],
794 sizeof(tws_hash_ptr_t)
795 * tws->number_of_lines
796 * tws->number_of_elements);
797 kfree((vm_offset_t)tws->table[set],
798 sizeof(struct tws_hash_ptr)
799 * tws->number_of_lines
800 * tws->number_of_elements);
801 tws->table[set] = NULL;
802 set = 0;
803
804 } else if((tws->cache[set] =
805 (struct tws_hash_line *)
806 kalloc(sizeof
807 (struct tws_hash_line)
808 * tws->number_of_lines))
809 == NULL) {
810 kfree((vm_offset_t)tws->table[set],
811 sizeof(tws_hash_ptr_t)
812 * tws->number_of_lines
813 * tws->number_of_elements);
814 kfree((vm_offset_t)tws->table_ele[set],
815 sizeof(struct tws_hash_ptr)
816 * tws->number_of_lines
817 * tws->number_of_elements);
818 kfree((vm_offset_t)tws->alt_ele[set],
819 sizeof(struct tws_hash_ptr)
820 * tws->number_of_lines
821 * tws->number_of_elements);
822 tws->table[set] = NULL;
823 set = 0;
824
825 } else {
826 tws->free_hash_ele[set] =
827 (tws_hash_ptr_t)0;
828 tws->obj_free_count[set] = 0;
829 tws->addr_free_count[set] = 0;
830 bzero((char *)tws->table[set],
831 sizeof(tws_hash_ptr_t)
832 * tws->number_of_lines
833 * tws->number_of_elements);
834 bzero((char *)tws->table_ele[set],
835 sizeof(struct tws_hash_ptr)
836 * tws->number_of_lines
837 * tws->number_of_elements);
838 bzero((char *)tws->alt_ele[set],
839 sizeof(struct tws_hash_ptr)
840 * tws->number_of_lines
841 * tws->number_of_elements);
842 bzero((char *)tws->cache[set],
843 sizeof(struct tws_hash_line)
844 * tws->number_of_lines);
845 }
846 } else {
847 int age_of_cache;
848 age_of_cache =
849 ((sched_tick -
850 tws->time_of_creation)
851 >> SCHED_TICK_SHIFT);
852
853 if((tws->startup_cache) &&
854 (age_of_cache > 60)) {
855 ask_for_startup_cache_release = 1;
856 }
857 if((tws->startup_name != NULL) &&
858 (age_of_cache > 15)) {
859 tws->current_line--;
860 tws_unlock(tws);
861 return KERN_OPERATION_TIMED_OUT;
862 }
863 if((tws->startup_name != NULL) &&
864 (age_of_cache < 15)) {
865 /* If we are creating a */
866 /* cache, don't lose the */
867 /* early info */
868 tws->current_line--;
869 tws_unlock(tws);
870 return KERN_FAILURE;
871 }
872 tws->lookup_count = 0;
873 tws->insert_count = 0;
874 set = 0;
875 }
876 }
877 tws->current_line = set * tws->number_of_lines;
878 }
879 if(set < tws->expansion_count) {
880 tws_hash_line_clear(tws,
881 &(tws->cache[set][current_line]), TRUE);
882 if(tws->cache[set][current_line].ele_count
883 >= tws->number_of_elements) {
884 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
885 vm_object_deallocate(object);
886 vm_map_deallocate(map);
887 }
888 tws_unlock(tws);
889 return KERN_FAILURE;
890 }
891 } else {
892 tws->expansion_count++;
893 }
894 }
895 }
896
897
898 /* set object hash element */
899 if(obj_ele == NULL) {
900 obj_ele = new_obj_hash(tws, set, index);
901 if(obj_ele == NULL) {
902 tws->cache[set][current_line].ele_count
903 = tws->number_of_elements;
904 tws_unlock(tws);
905 return KERN_FAILURE;
906 }
907 }
908
909 /* set address hash element */
910 if(addr_ele == NULL) {
911 addr_ele = new_addr_hash(tws, set, alt_index);
912 }
913
914 if(target_element == NULL) {
915 ele_index = 0;
916 for(i = 0; i<tws->number_of_elements; i++) {
917 if(tws->cache[set][current_line].
918 list[ele_index].object == 0) {
919 break;
920 }
921 ele_index++;
922 if(ele_index >= tws->number_of_elements)
923 ele_index = 0;
924
925 }
926
927 if(i == tws->number_of_elements)
928 panic("tws_insert: no free elements");
929
930 target_element =
931 &(tws->cache[set][current_line].list[ele_index]);
932
933 tws->cache[set][current_line].ele_count++;
934 }
935
936 obj_ele->element = target_element;
937 if(addr_ele) {
938 addr_ele->element = (tws_hash_ele_t)
939 (((unsigned int)target_element) | TWS_ADDR_HASH);
940 }
941 target_element->object = object;
942 target_element->offset = offset & TWS_HASH_OFF_MASK;
943 target_element->page_addr =
944 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
945 target_element->map = map;
946 target_element->line =
947 current_line + (set * tws->number_of_lines);
948 if(startup_cache_line) {
949 target_element->page_cache = startup_cache_line;
950 }
951 target_element->page_cache |=
952 1<<(((vm_offset_t)(offset & TWS_INDEX_MASK))>>12);
953
954
955 tws_unlock(tws);
956 if(ask_for_startup_cache_release)
957 return KERN_OPERATION_TIMED_OUT;
958 return KERN_SUCCESS;
959}
960
961/*
962 * tws_build_cluster
963 * lengthen the cluster of pages by the number of pages encountered in the
964 * working set up to the limit requested by the caller. The object needs
965 * to be locked on entry. The map does not because the tws_lookup function
966 * is used only to find if their is an entry in the cache. No transient
967 * data from the cache is de-referenced.
968 *
969 */
970#if MACH_PAGEMAP
971/*
972 * MACH page map - an optional optimization where a bit map is maintained
973 * by the VM subsystem for internal objects to indicate which pages of
974 * the object currently reside on backing store. This existence map
975 * duplicates information maintained by the vnode pager. It is
976 * created at the time of the first pageout against the object, i.e.
977 * at the same time pager for the object is created. The optimization
978 * is designed to eliminate pager interaction overhead, if it is
979 * 'known' that the page does not exist on backing store.
980 *
981 * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is
982 * either marked as paged out in the existence map for the object or no
983 * existence map exists for the object. LOOK_FOR() is one of the
984 * criteria in the decision to invoke the pager. It is also used as one
985 * of the criteria to terminate the scan for adjacent pages in a clustered
986 * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for
987 * permanent objects. Note also that if the pager for an internal object
988 * has not been created, the pager is not invoked regardless of the value
989 * of LOOK_FOR() and that clustered pagein scans are only done on an object
990 * for which a pager has been created.
991 *
992 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
993 * is marked as paged out in the existence map for the object. PAGED_OUT()
994 * PAGED_OUT() is used to determine if a page has already been pushed
995 * into a copy object in order to avoid a redundant page out operation.
996 */
997#define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
998 != VM_EXTERNAL_STATE_ABSENT)
999#define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
1000 == VM_EXTERNAL_STATE_EXISTS)
1001#else /* MACH_PAGEMAP */
1002/*
1003 * If the MACH page map optimization is not enabled,
1004 * LOOK_FOR() always evaluates to TRUE. The pager will always be
1005 * invoked to resolve missing pages in an object, assuming the pager
1006 * has been created for the object. In a clustered page operation, the
1007 * absence of a page on backing backing store cannot be used to terminate
1008 * a scan for adjacent pages since that information is available only in
1009 * the pager. Hence pages that may not be paged out are potentially
1010 * included in a clustered request. The vnode pager is coded to deal
1011 * with any combination of absent/present pages in a clustered
1012 * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager
1013 * will always be invoked to push a dirty page into a copy object assuming
1014 * a pager has been created. If the page has already been pushed, the
1015 * pager will ingore the new request.
1016 */
1017#define LOOK_FOR(o, f) TRUE
1018#define PAGED_OUT(o, f) FALSE
1019#endif /* MACH_PAGEMAP */
1020
1021void
1022tws_build_cluster(
1023 tws_hash_t tws,
1024 vm_object_t object,
1025 vm_object_offset_t *start,
1026 vm_object_offset_t *end,
1027 vm_size_t max_length)
1028{
1029 tws_hash_line_t line;
1030 task_t task;
1031 vm_object_offset_t before = *start;
1032 vm_object_offset_t after = *end;
1033 vm_object_offset_t original_start = *start;
1034 vm_object_offset_t original_end = *end;
1035 vm_size_t length = (vm_size_t)(*end - *start);
1036 vm_page_t m;
1037 kern_return_t kret;
1038 vm_object_offset_t object_size;
1039 int age_of_cache;
1040 int pre_heat_size;
1041 unsigned int ele_cache;
1042 unsigned int end_cache = 0;
1043 unsigned int start_cache = 0;
1044
1045 if((object->private) || !(object->pager))
1046 return;
1047
1048 if (!object->internal) {
1049 kret = vnode_pager_get_object_size(
1050 object->pager,
1051 &object_size);
1052 } else {
1053 object_size = object->size;
1054 }
1055
1056 if((!tws) || (!tws_lock_try(tws))) {
1057 return;
1058 }
1059
1060 age_of_cache = ((sched_tick
1061 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
1062
1063 /* When pre-heat files are not available, resort to speculation */
1064 /* based on size of file */
1065
1066 if(tws->startup_cache || object->internal || age_of_cache > 15 ||
1067 (age_of_cache > 5 &&
1068 vm_page_free_count < (vm_page_free_target * 2) )) {
1069 pre_heat_size = 0;
1070 } else {
1071 if (object_size > (vm_object_offset_t)(1024 * 1024))
1072 pre_heat_size = 8 * PAGE_SIZE;
1073 else if (object_size > (vm_object_offset_t)(128 * 1024))
1074 pre_heat_size = 4 * PAGE_SIZE;
1075 else
1076 pre_heat_size = 2 * PAGE_SIZE;
1077 }
1078
1079 if ((age_of_cache < 10) && (tws->startup_cache)) {
1080 if ((max_length >= ((*end - *start)
1081 + (32 * PAGE_SIZE))) &&
1082 (tws_test_for_community(tws, object,
1083 *start, 3, &ele_cache))) {
1084 int expanded;
1085 start_cache = ele_cache;
1086 *start = *start & TWS_HASH_OFF_MASK;
1087 *end = *start + (32 * PAGE_SIZE_64);
1088 if(*end > object_size) {
1089 *end = trunc_page_64(object_size);
1090 max_length = 0;
1091 if(before >= *end) {
1092 *end = after;
1093 } else {
1094 end_cache = ele_cache;
1095 }
1096 } else {
1097 end_cache = ele_cache;
1098 }
1099 while (max_length > ((*end - *start)
1100 + (32 * PAGE_SIZE))) {
1101 expanded = 0;
1102 after = *end;
1103 before = *start - PAGE_SIZE_64;
1104 if((*end <= (object->size
1105 + (32 * PAGE_SIZE_64))) &&
1106 (tws_test_for_community(tws,
1107 object, after,
1108 5, &ele_cache))) {
1109 *end = after +
1110 (32 * PAGE_SIZE_64);
1111 if(*end > object_size) {
1112 *end = trunc_page_64(object_size);
1113 max_length = 0;
1114 if(*start >= *end) {
1115 *end = after;
1116 }
1117 }
1118 end_cache = ele_cache;
1119 expanded = 1;
1120 }
1121 if (max_length > ((*end - *start)
1122 + (32 * PAGE_SIZE_64))) {
1123 break;
1124 }
1125 if((*start >= (32 * PAGE_SIZE_64)) &&
1126 (tws_test_for_community(tws, object,
1127 before, 5, &ele_cache))) {
1128 *start = before;
1129 start_cache = ele_cache;
1130 expanded = 1;
1131 }
1132 if(expanded == 0)
1133 break;
1134 }
1135
1136 if(start_cache != 0) {
1137 unsigned int mask;
1138
1139 for (mask = 1; mask != 0; mask = mask << 1) {
1140 if (*start == original_start)
1141 break;
1142 if (!(start_cache & mask))
1143 *start += PAGE_SIZE_64;
1144 else
1145 break;
1146 }
1147 }
1148 if(end_cache != 0) {
1149 unsigned int mask;
1150
1151 for (mask = 0x80000000;
1152 mask != 0; mask = mask >> 1) {
1153 if (*end == original_end)
1154 break;
1155 if(!(end_cache & mask))
1156 *end -= PAGE_SIZE_64;
1157 else
1158 break;
1159 }
1160 }
1161
1162 if (*start >= *end)
1163 panic("bad clipping occurred\n");
1164
1165 tws_unlock(tws);
1166 return;
1167 }
1168 }
1169
1170 while ((length < max_length) &&
1171 (object_size >=
1172 (after + PAGE_SIZE_64))) {
1173 if(length >= pre_heat_size) {
1174 if(tws_internal_lookup(tws, after, object,
1175 &line) != KERN_SUCCESS) {
1176 vm_object_offset_t extend;
1177
1178 extend = after + PAGE_SIZE_64;
1179 if(tws_internal_lookup(tws, extend, object,
1180 &line) != KERN_SUCCESS) {
1181 break;
1182 }
1183 }
1184 }
1185
1186 if ((object->existence_map != NULL)
1187 && (!LOOK_FOR(object, after))) {
1188 break;
1189 }
1190
1191 if (vm_page_lookup(object, after) != VM_PAGE_NULL) {
1192 /* we can bridge resident pages */
1193 after += PAGE_SIZE_64;
1194 length += PAGE_SIZE;
1195 continue;
1196 }
1197
1198 if (object->internal) {
1199 /*
1200 * need to acquire a real page in
1201 * advance because this acts as
1202 * a throttling mechanism for
1203 * data_requests to the default
1204 * pager. If this fails, give up
1205 * trying to find any more pages
1206 * in the cluster and send off the
1207 * request for what we already have.
1208 */
1209 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1210 break;
1211 }
1212 } else if ((m = vm_page_grab_fictitious())
1213 == VM_PAGE_NULL) {
1214 break;
1215 }
1216 m->absent = TRUE;
1217 m->unusual = TRUE;
1218 m->clustered = TRUE;
1219 m->list_req_pending = TRUE;
1220
1221 vm_page_insert(m, object, after);
1222 object->absent_count++;
1223 after += PAGE_SIZE_64;
1224 length += PAGE_SIZE;
1225 }
1226 *end = after;
1227 while (length < max_length) {
1228 if (before == 0)
1229 break;
1230 before -= PAGE_SIZE_64;
1231
1232 if(length >= pre_heat_size) {
1233 if(tws_internal_lookup(tws, before, object,
1234 &line) != KERN_SUCCESS) {
1235 vm_object_offset_t extend;
1236
1237 extend = before;
1238 if (extend == 0)
1239 break;
1240 extend -= PAGE_SIZE_64;
1241 if(tws_internal_lookup(tws, extend, object,
1242 &line) != KERN_SUCCESS) {
1243 break;
1244 }
1245 }
1246 }
1247 if ((object->existence_map != NULL)
1248 && (!LOOK_FOR(object, before))) {
1249 break;
1250 }
1251
1252 if (vm_page_lookup(object, before) != VM_PAGE_NULL) {
1253 /* we can bridge resident pages */
1254 *start -= PAGE_SIZE_64;
1255 length += PAGE_SIZE;
1256 continue;
1257 }
1258
1259 if (object->internal) {
1260 /*
1261 * need to acquire a real page in
1262 * advance because this acts as
1263 * a throttling mechanism for
1264 * data_requests to the default
1265 * pager. If this fails, give up
1266 * trying to find any more pages
1267 * in the cluster and send off the
1268 * request for what we already have.
1269 */
1270 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1271 break;
1272 }
1273 } else if ((m = vm_page_grab_fictitious())
1274 == VM_PAGE_NULL) {
1275 break;
1276 }
1277 m->absent = TRUE;
1278 m->unusual = TRUE;
1279 m->clustered = TRUE;
1280 m->list_req_pending = TRUE;
1281
1282 vm_page_insert(m, object, before);
1283 object->absent_count++;
1284 *start -= PAGE_SIZE_64;
1285 length += PAGE_SIZE;
1286 }
1287 tws_unlock(tws);
1288}
1289
1290tws_line_signal(
1291 tws_hash_t tws,
1292 vm_map_t map,
1293 tws_hash_line_t hash_line,
1294 vm_offset_t target_page)
1295{
1296 unsigned int i,j;
1297 vm_object_t object;
1298 vm_object_offset_t offset;
1299 vm_object_offset_t before;
1300 vm_object_offset_t after;
1301 struct tws_hash_ele *element;
1302 vm_page_t m,p;
1303 kern_return_t rc;
1304
1305 if(tws->style != TWS_HASH_STYLE_SIGNAL)
1306 return;
1307
1308 vm_map_lock(map);
1309 for (i=0; i<tws->number_of_elements; i++) {
1310
1311 vm_object_offset_t local_off = 0;
1312
1313 if(hash_line->list[i].object == 0)
1314 continue;
1315
1316 element = &hash_line->list[i];
1317
1318 if (element->page_addr == target_page)
1319 continue;
1320
1321 j = 1;
1322 while (j != 0) {
1323 if(j & element->page_cache)
1324 break;
1325 j << 1;
1326 local_off += PAGE_SIZE_64;
1327 }
1328 object = element->object;
1329 offset = element->offset + local_off;
1330
1331 /* first try a fast test to speed up no-op signal */
1332 if (((p = vm_page_lookup(object, offset)) != NULL)
1333 || (object->pager == NULL)
1334 || (object->shadow_severed)) {
1335 continue;
1336 }
1337
1338 if((!object->alive) ||
1339 (!object->pager_created) || (!object->pager_ready))
1340 continue;
1341
1342 if (object->internal) {
1343 if (object->existence_map == NULL) {
1344 if (object->shadow)
1345 continue;
1346 } else {
1347 if(!LOOK_FOR(object, offset))
1348 continue;
1349 }
1350 }
1351
1352 vm_object_reference(object);
1353 vm_map_unlock(map);
1354
1355 if(object->internal) {
1356 m = vm_page_grab();
1357 } else {
1358 m = vm_page_grab_fictitious();
1359 }
1360
1361 if(m == NULL) {
1362 vm_object_deallocate(object);
1363 vm_map_lock(map);
1364 continue;
1365 }
1366
1367 vm_object_lock(object);
1368 if (((p = vm_page_lookup(object, offset)) != NULL)
1369 || (object->pager == NULL)
1370 || (object->shadow_severed)) {
1371 VM_PAGE_FREE(m);
1372 vm_object_unlock(object);
1373 vm_object_deallocate(object);
1374 vm_map_lock(map);
1375 continue;
1376 }
1377
1378 vm_page_insert(m, object, offset);
1379
1380 if (object->absent_count > vm_object_absent_max) {
1381 VM_PAGE_FREE(m);
1382 vm_object_unlock(object);
1383 vm_object_deallocate(object);
1384 vm_map_lock(map);
1385 break;
1386 }
1387 m->list_req_pending = TRUE;
1388 m->absent = TRUE;
1389 m->unusual = TRUE;
1390 object->absent_count++;
1391
1392 before = offset;
1393 after = offset + PAGE_SIZE_64;
1394 tws_build_cluster(tws, object, &before, &after, 0x16000);
1395 vm_object_unlock(object);
1396
1397 rc = memory_object_data_request(object->pager,
1398 before + object->paging_offset,
1399 (vm_size_t)(after - before), VM_PROT_READ);
1400 if (rc != KERN_SUCCESS) {
1401 offset = before;
1402 vm_object_lock(object);
1403 while (offset < after) {
1404 m = vm_page_lookup(object, offset);
1405 if(m && m->absent && m->busy)
1406 VM_PAGE_FREE(m);
1407 offset += PAGE_SIZE;
1408 }
1409 vm_object_unlock(object);
1410 vm_object_deallocate(object);
1411 } else {
1412 vm_object_deallocate(object);
1413 }
1414 vm_map_lock(map);
1415 continue;
1416 }
1417 vm_map_unlock(map);
1418}
1419
1420/* tws locked on entry */
1421
1422tws_startup_t
1423tws_create_startup_list(
1424 tws_hash_t tws)
1425{
1426
1427 tws_startup_t startup;
1428 unsigned int i,j,k;
1429 unsigned int total_elements;
1430 unsigned int startup_size;
1431 unsigned int sindex;
1432 unsigned int hash_index;
1433 tws_startup_ptr_t element;
1434
1435 total_elements = tws->expansion_count *
1436 (tws->number_of_lines * tws->number_of_elements);
1437
1438 startup_size = sizeof(struct tws_startup)
1439 + (total_elements * sizeof(tws_startup_ptr_t *))
1440 + (total_elements * sizeof(struct tws_startup_ptr))
1441 + (total_elements * sizeof(struct tws_startup_ele));
1442 startup = (tws_startup_t)(kalloc(startup_size));
1443
1444 if(startup == NULL)
1445 return startup;
1446
1447 bzero((char *) startup, startup_size);
1448
1449 startup->table = (tws_startup_ptr_t *)
1450 (((int)startup) + (sizeof(struct tws_startup)));
1451 startup->ele = (struct tws_startup_ptr *)
1452 (((vm_offset_t)startup->table) +
1453 (total_elements * sizeof(tws_startup_ptr_t)));
1454
1455 startup->array = (struct tws_startup_ele *)
1456 (((vm_offset_t)startup->ele) +
1457 (total_elements * sizeof(struct tws_startup_ptr)));
1458
1459 startup->tws_hash_size = startup_size;
1460 startup->ele_count = 0; /* burn first hash ele, else we can't tell from zero */
1461 startup->array_size = total_elements;
1462 startup->hash_count = 1;
1463
1464 sindex = 0;
1465
1466
1467 for(i = 0; i<tws->number_of_lines; i++) {
1468 for(j = 0; j<tws->number_of_elements; j++) {
1469 for(k = 0; k<tws->expansion_count; k++) {
1470 tws_hash_ele_t entry;
1471 unsigned int hash_retry;
1472 vm_offset_t addr;
1473
1474 entry = &tws->cache[k][i].list[j];
1475 addr = entry->page_addr;
1476 hash_retry = 0;
1477 if(entry->object != 0) {
1478 /* get a hash element */
1479 hash_index = do_startup_hash(addr,
1480 startup->array_size);
1481
1482 if(startup->hash_count < total_elements) {
1483 element = &(startup->ele[startup->hash_count]);
1484 startup->hash_count += 1;
1485 } else {
1486 /* exit we're out of elements */
1487 break;
1488 }
1489 /* place the hash element */
1490 element->next = startup->table[hash_index];
1491 startup->table[hash_index] = (tws_startup_ptr_t)
1492 ((int)element - (int)&startup->ele[0]);
1493
1494 /* set entry OFFSET in hash element */
1495 element->element = (tws_startup_ele_t)
1496 ((int)&startup->array[sindex] -
1497 (int)&startup->array[0]);
1498
1499 startup->array[sindex].page_addr = entry->page_addr;
1500 startup->array[sindex].page_cache = entry->page_cache;
1501 startup->ele_count++;
1502 sindex++;
1503
1504 }
1505 }
1506 }
1507 }
1508
1509 return startup;
1510}
1511
1512
1513/*
1514 * Returns an entire cache line. The line is deleted from the startup
1515 * cache on return. The caller can check startup->ele_count for an empty
1516 * list. Access synchronization is the responsibility of the caller.
1517 */
1518
1519unsigned int
1520tws_startup_list_lookup(
1521 tws_startup_t startup,
1522 vm_offset_t addr)
1523{
1524 unsigned int hash_index;
1525 unsigned int page_cache_bits;
1526 unsigned int startup_shift;
1527 tws_startup_ele_t entry;
1528 vm_offset_t next_addr;
1529 tws_startup_ptr_t element;
1530 tws_startup_ptr_t base_ele;
1531 tws_startup_ptr_t *previous_ptr;
1532
1533 page_cache_bits = 0;
1534
1535 hash_index = do_startup_hash(addr, startup->array_size);
1536
1537 if(((unsigned int)&(startup->table[hash_index])) >= startup->tws_hash_size) {
1538 return page_cache_bits = 0;
1539 }
1540 element = (tws_startup_ptr_t)((int)startup->table[hash_index] +
1541 (int)&startup->ele[0]);
1542 base_ele = element;
1543 previous_ptr = &(startup->table[hash_index]);
1544 while(element > &startup->ele[0]) {
1545 if (((int)element + sizeof(struct tws_startup_ptr))
1546 > ((int)startup + startup->tws_hash_size)) {
1547 return page_cache_bits;
1548 }
1549 entry = (tws_startup_ele_t)
1550 ((int)element->element
1551 + (int)&startup->array[0]);
1552 if((((int)entry + sizeof(struct tws_startup_ele))
1553 > ((int)startup + startup->tws_hash_size))
1554 || ((int)entry < (int)startup)) {
1555 return page_cache_bits;
1556 }
1557 if ((addr >= entry->page_addr) &&
1558 (addr <= (entry->page_addr + 0x1F000))) {
1559 startup_shift = (addr - entry->page_addr)>>12;
1560 page_cache_bits |= entry->page_cache >> startup_shift;
1561 /* don't dump the pages, unless the addresses */
1562 /* line up perfectly. The cache may be used */
1563 /* by other mappings */
1564 entry->page_cache &= (1 << startup_shift) - 1;
1565 if(addr == entry->page_addr) {
1566 if(base_ele == element) {
1567 base_ele = (tws_startup_ptr_t)
1568 ((int)element->next
1569 + (int)&startup->ele[0]);
1570 startup->table[hash_index] = element->next;
1571 element = base_ele;
1572 } else {
1573 *previous_ptr = element->next;
1574 element = (tws_startup_ptr_t)
1575 ((int)*previous_ptr
1576 + (int)&startup->ele[0]);
1577 }
1578 entry->page_addr = 0;
1579 startup->ele_count--;
1580 continue;
1581 }
1582 }
1583 next_addr = addr + 0x1F000;
1584 if ((next_addr >= entry->page_addr) &&
1585 (next_addr <= (entry->page_addr + 0x1F000))) {
1586 startup_shift = (next_addr - entry->page_addr)>>12;
1587 page_cache_bits |= entry->page_cache << (0x1F - startup_shift);
1588 entry->page_cache &= ~((1 << (startup_shift + 1)) - 1);
1589 if(entry->page_cache == 0) {
1590 if(base_ele == element) {
1591 base_ele = (tws_startup_ptr_t)
1592 ((int)element->next
1593 + (int)&startup->ele[0]);
1594 startup->table[hash_index] = element->next;
1595 element = base_ele;
1596 } else {
1597 *previous_ptr = element->next;
1598 element = (tws_startup_ptr_t)
1599 ((int)*previous_ptr
1600 + (int)&startup->ele[0]);
1601 }
1602 entry->page_addr = 0;
1603 startup->ele_count--;
1604 continue;
1605 }
1606 }
1607 previous_ptr = &(element->next);
1608 element = (tws_startup_ptr_t)
1609 ((int) element->next + (int) &startup->ele[0]);
1610 }
1611
1612 return page_cache_bits;
1613}
1614
1615kern_return_t
1616tws_send_startup_info(
1617 task_t task)
1618{
1619
1620 tws_hash_t tws;
1621 tws_startup_t scache;
1622
1623 task_lock(task);
1624 tws = (tws_hash_t)task->dynamic_working_set;
1625 task_unlock(task);
1626 if(tws == NULL) {
1627 return KERN_FAILURE;
1628 }
1629 return tws_internal_startup_send(tws);
1630}
1631
1632
1633kern_return_t
1634tws_internal_startup_send(
1635 tws_hash_t tws)
1636{
1637
1638 tws_startup_t scache;
1639
1640 if(tws == NULL) {
1641 return KERN_FAILURE;
1642 }
1643 tws_lock(tws);
1644 /* used to signal write or release depending on state of tws */
1645 if(tws->startup_cache) {
1646 vm_offset_t startup_buf;
1647 vm_size_t size;
1648 startup_buf = (vm_offset_t)tws->startup_cache;
1649 size = tws->startup_cache->tws_hash_size;
1650 tws->startup_cache = 0;
1651 tws_unlock(tws);
1652 kmem_free(kernel_map, startup_buf, size);
1653 return KERN_SUCCESS;
1654 }
1655 if(tws->startup_name == NULL) {
1656 tws_unlock(tws);
1657 return KERN_FAILURE;
1658 }
1659 scache = tws_create_startup_list(tws);
1660 if(scache == NULL)
1661 return KERN_FAILURE;
1662 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1663 scache, scache->tws_hash_size,
1664 tws->mod, tws->fid);
1665 kfree((vm_offset_t)scache, scache->tws_hash_size);
1666 kfree((vm_offset_t) tws->startup_name, tws->startup_name_length);
1667 tws->startup_name = NULL;
1668 tws_unlock(tws);
1669 return KERN_SUCCESS;
1670}
1671
1672kern_return_t
1673tws_handle_startup_file(
1674 task_t task,
1675 unsigned int uid,
1676 char *app_name,
1677 vm_offset_t app_vp,
1678 boolean_t *new_info)
1679
1680{
1681 tws_startup_t startup;
1682 vm_offset_t cache_size;
1683 kern_return_t error;
1684 int fid;
1685 int mod;
1686
1687 *new_info = FALSE;
1688 /* don't pre-heat kernel task */
1689 if(task == kernel_task)
1690 return KERN_SUCCESS;
1691 error = bsd_read_page_cache_file(uid, &fid,
1692 &mod, app_name,
1693 app_vp, &startup,
1694 &cache_size);
1695 if(error) {
1696 return KERN_FAILURE;
1697 }
1698 if(startup == NULL) {
1699 /* Entry for app does not exist, make */
1700 /* one */
1701 /* we will want our own copy of the shared */
1702 /* regions to pick up a true picture of all */
1703 /* the pages we will touch. */
1704 if((lsf_zone->count * lsf_zone->elem_size)
1705 > (lsf_zone->max_size >> 1)) {
1706 /* We don't want to run out of shared memory */
1707 /* map entries by starting too many private versions */
1708 /* of the shared library structures */
1709 return KERN_SUCCESS;
1710 }
1711 *new_info = TRUE;
1712 error = tws_write_startup_file(task,
1713 fid, mod, app_name, uid);
1714 if(error)
1715 return error;
1716 /* use the mod in the write case as an init */
1717 /* flag */
1718 mod = 0;
1719
1720 } else {
1721 error = tws_read_startup_file(task,
1722 (tws_startup_t)startup,
1723 cache_size);
1724 if(error) {
1725 kmem_free(kernel_map,
1726 (vm_offset_t)startup, cache_size);
1727 return error;
1728 }
1729 }
1730 return KERN_SUCCESS;
1731}
1732
1733kern_return_t
1734tws_write_startup_file(
1735 task_t task,
1736 int fid,
1737 int mod,
1738 char *name,
1739 unsigned int uid)
1740{
1741 tws_hash_t tws;
1742 unsigned int string_length;
1743
1744 string_length = strlen(name);
1745
1746 task_lock(task);
1747 tws = (tws_hash_t)task->dynamic_working_set;
1748
1749 task_unlock(task);
1750 if(tws == NULL) {
1751 /* create a dynamic working set of normal size */
1752 task_working_set_create(task, 0,
1753 0, TWS_HASH_STYLE_DEFAULT);
1754 }
1755 tws_lock(tws);
1756
1757 if(tws->startup_name != NULL) {
1758 tws_unlock(tws);
1759 return KERN_FAILURE;
1760 }
1761
1762 tws->startup_name = (char *)
1763 kalloc((string_length + 1) * (sizeof(char)));
1764 if(tws->startup_name == NULL) {
1765 tws_unlock(tws);
1766 return KERN_FAILURE;
1767 }
1768
1769 bcopy(name, (char *)tws->startup_name, string_length + 1);
1770 tws->startup_name_length = (string_length + 1) * sizeof(char);
1771 tws->uid = uid;
1772 tws->fid = fid;
1773 tws->mod = mod;
1774
1775 tws_unlock(tws);
1776 return KERN_SUCCESS;
1777}
1778
1779kern_return_t
1780tws_read_startup_file(
1781 task_t task,
1782 tws_startup_t startup,
1783 vm_offset_t cache_size)
1784{
1785 tws_hash_t tws;
1786 int error;
1787 int lines;
1788 int old_exp_count;
1789
1790 task_lock(task);
1791 tws = (tws_hash_t)task->dynamic_working_set;
1792
1793 if(cache_size < sizeof(struct tws_hash)) {
1794 task_unlock(task);
1795 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1796 return(KERN_SUCCESS);
1797 }
1798
1799 /* create a dynamic working set to match file size */
1800 lines = (cache_size - sizeof(struct tws_hash))/TWS_ARRAY_SIZE;
1801 /* we now need to divide out element size and word size */
1802 /* all fields are 4 bytes. There are 8 bytes in each hash element */
1803 /* entry, 4 bytes in each table ptr location and 8 bytes in each */
1804 /* page_cache entry, making a total of 20 bytes for each entry */
1805 lines = (lines/(20));
1806 if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
1807 lines = TWS_SMALL_HASH_LINE_COUNT;
1808 task_unlock(task);
1809 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1810 return(KERN_SUCCESS);
1811 } else {
1812 old_exp_count = lines/TWS_HASH_LINE_COUNT;
1813 if((old_exp_count * TWS_HASH_LINE_COUNT) != lines) {
1814 lines = (old_exp_count + 1)
1815 * TWS_HASH_LINE_COUNT;
1816 }
1817 if(tws == NULL) {
1818 task_working_set_create(task, lines,
1819 0, TWS_HASH_STYLE_DEFAULT);
1820 task_unlock(task);
1821 } else {
1822 task_unlock(task);
1823 tws_expand_working_set(
1824 (vm_offset_t)tws, lines, TRUE);
1825 }
1826 }
1827
1828
1829 tws_lock(tws);
1830
1831 if(tws->startup_cache != NULL) {
1832 tws_unlock(tws);
1833 return KERN_FAILURE;
1834 }
1835
1836
1837 /* now need to fix up internal table pointers */
1838 startup->table = (tws_startup_ptr_t *)
1839 (((int)startup) + (sizeof(struct tws_startup)));
1840 startup->ele = (struct tws_startup_ptr *)
1841 (((vm_offset_t)startup->table) +
1842 (startup->array_size * sizeof(tws_startup_ptr_t)));
1843 startup->array = (struct tws_startup_ele *)
1844 (((vm_offset_t)startup->ele) +
1845 (startup->array_size * sizeof(struct tws_startup_ptr)));
1846 /* the allocation size and file size should be the same */
1847 /* just in case their not, make sure we dealloc correctly */
1848 startup->tws_hash_size = cache_size;
1849
1850
1851 tws->startup_cache = startup;
1852 tws_unlock(tws);
1853 return KERN_SUCCESS;
1854}
1855
1856
1857void
1858tws_hash_ws_flush(tws_hash_t tws) {
1859 tws_startup_t scache;
1860 if(tws == NULL) {
1861 return;
1862 }
1863 tws_lock(tws);
1864 if(tws->startup_name != NULL) {
1865 scache = tws_create_startup_list(tws);
1866 if(scache == NULL) {
1867 /* dump the name cache, we'll */
1868 /* get it next time */
1869 kfree((vm_offset_t)
1870 tws->startup_name,
1871 tws->startup_name_length);
1872 tws->startup_name = NULL;
1873 tws_unlock(tws);
1874 return;
1875 }
1876 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1877 scache, scache->tws_hash_size,
1878 tws->mod, tws->fid);
1879 kfree((vm_offset_t)scache,
1880 scache->tws_hash_size);
1881 kfree((vm_offset_t)
1882 tws->startup_name,
1883 tws->startup_name_length);
1884 tws->startup_name = NULL;
1885 }
1886 tws_unlock(tws);
1887 return;
1888}
1889
1890void
1891tws_hash_destroy(tws_hash_t tws)
1892{
1893 int i,k;
1894 vm_size_t cache_size;
1895
1896 if(tws->startup_cache != NULL) {
1897 kmem_free(kernel_map,
1898 (vm_offset_t)tws->startup_cache,
1899 tws->startup_cache->tws_hash_size);
1900 tws->startup_cache = NULL;
1901 }
1902 if(tws->startup_name != NULL) {
1903 tws_internal_startup_send(tws);
1904 }
1905 for (i=0; i<tws->number_of_lines; i++) {
1906 for(k=0; k<tws->expansion_count; k++) {
1907 /* clear the object refs */
1908 tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
1909 }
1910 }
1911 i = 0;
1912 while (i < tws->expansion_count) {
1913
1914 kfree((vm_offset_t)tws->table[i], sizeof(tws_hash_ptr_t)
1915 * tws->number_of_lines
1916 * tws->number_of_elements);
1917 kfree((vm_offset_t)tws->table_ele[i],
1918 sizeof(struct tws_hash_ptr)
1919 * tws->number_of_lines
1920 * tws->number_of_elements);
1921 kfree((vm_offset_t)tws->alt_ele[i],
1922 sizeof(struct tws_hash_ptr)
1923 * tws->number_of_lines
1924 * tws->number_of_elements);
1925 kfree((vm_offset_t)tws->cache[i], sizeof(struct tws_hash_line)
1926 * tws->number_of_lines);
1927 i++;
1928 }
1929 if(tws->startup_name != NULL) {
1930 kfree((vm_offset_t)tws->startup_name,
1931 tws->startup_name_length);
1932 }
1933 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
1934}
1935
1936void
1937tws_hash_clear(tws_hash_t tws)
1938{
1939 int i, k;
1940
1941 for (i=0; i<tws->number_of_lines; i++) {
1942 for(k=0; k<tws->expansion_count; k++) {
1943 /* clear the object refs */
1944 tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
1945 }
1946 }
1947}
1948
1949kern_return_t
1950task_working_set_create(
1951 task_t task,
1952 unsigned int lines,
1953 unsigned int rows,
1954 unsigned int style)
1955{
1956
1957 if (lines == 0) {
1958 lines = TWS_HASH_LINE_COUNT;
1959 }
1960 if (rows == 0) {
1961 rows = TWS_ARRAY_SIZE;
1962 }
1963 if (style == TWS_HASH_STYLE_DEFAULT) {
1964 style = TWS_HASH_STYLE_BASIC;
1965 }
1966 task_lock(task);
1967 if(task->dynamic_working_set != 0) {
1968 task_unlock(task);
1969 return(KERN_FAILURE);
1970 } else if((task->dynamic_working_set
1971 = (vm_offset_t) tws_hash_create(lines, rows, style)) == 0) {
1972 task_unlock(task);
1973 return(KERN_NO_SPACE);
1974 }
1975 task_unlock(task);
1976 return KERN_SUCCESS;
1977}
1978
1979
1980/* Internal use only routines */
1981
1982
1983/*
1984 * internal sub-function for address space lookup
1985 * returns the target element and the address of the
1986 * previous pointer The previous pointer is the address
1987 * of the pointer pointing to the target element.
1988 * TWS must be locked
1989 */
1990
1991void
1992tws_traverse_address_hash_list (
1993 tws_hash_t tws,
1994 unsigned int index,
1995 vm_offset_t page_addr,
1996 vm_object_t object,
1997 vm_object_offset_t offset,
1998 vm_map_t map,
1999 tws_hash_ptr_t *target_ele,
2000 tws_hash_ptr_t **previous_ptr,
2001 tws_hash_ptr_t **free_list,
2002 unsigned int exclusive_addr)
2003{
2004 int k;
2005 tws_hash_ptr_t cache_ele;
2006 tws_hash_ptr_t base_ele;
2007
2008 *target_ele = NULL;
2009 *previous_ptr = NULL;
2010
2011 for(k=0; k<tws->expansion_count; k++) {
2012 tws_hash_ele_t ele;
2013 cache_ele = tws->table[k][index];
2014 base_ele = cache_ele;
2015 *previous_ptr = (tws_hash_ptr_t *)&(tws->table[k][index]);
2016 while(cache_ele != NULL) {
2017 if(((unsigned int)
2018 cache_ele->element & TWS_ADDR_HASH) == 0) {
2019 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2020 cache_ele = cache_ele->next;
2021 continue;
2022 }
2023 ele = (tws_hash_ele_t)((unsigned int)
2024 cache_ele->element & ~TWS_ADDR_HASH);
2025 if ((ele == 0) || (ele->object == 0)) {
2026 /* A little clean-up of empty elements */
2027 cache_ele->element = 0;
2028 if(base_ele == cache_ele) {
2029 base_ele = cache_ele->next;
2030 tws->table[k][index] = cache_ele->next;
2031 cache_ele->next = tws->free_hash_ele[k];
2032 tws->free_hash_ele[k] = cache_ele;
2033 cache_ele = base_ele;
2034 } else {
2035 **previous_ptr = cache_ele->next;
2036 cache_ele->next = tws->free_hash_ele[k];
2037 tws->free_hash_ele[k] = cache_ele;
2038 cache_ele = **previous_ptr;
2039 }
2040 continue;
2041 }
2042
2043 if ((ele->page_addr <= page_addr)
2044 && (page_addr <= (ele->page_addr +
2045 (vm_offset_t)TWS_INDEX_MASK))
2046 && ((object == NULL)
2047 || ((object == ele->object)
2048 && (offset == ele->offset)
2049 && (map == ele->map)))) {
2050 if(exclusive_addr) {
2051 int delta;
2052 delta = ((page_addr - ele->page_addr)
2053 >> 12);
2054 if((1 << delta) & ele->page_cache) {
2055 /* We've found a match */
2056 *target_ele = cache_ele;
2057 *free_list =
2058 (tws_hash_ptr_t *)
2059 &(tws->free_hash_ele[k]);
2060 return;
2061 }
2062 } else {
2063 /* We've found a match */
2064 *target_ele = cache_ele;
2065 *free_list = (tws_hash_ptr_t *)
2066 &(tws->free_hash_ele[k]);
2067 return;
2068 }
2069 }
2070 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2071 cache_ele = cache_ele->next;
2072 }
2073 }
2074}
2075
2076
2077/*
2078 * internal sub-function for object space lookup
2079 * returns the target element and the address of the
2080 * previous pointer The previous pointer is the address
2081 * of the pointer pointing to the target element.
2082 * TWS must be locked
2083 */
2084
2085
2086void
2087tws_traverse_object_hash_list (
2088 tws_hash_t tws,
2089 unsigned int index,
2090 vm_object_t object,
2091 vm_object_offset_t offset,
2092 unsigned int page_mask,
2093 tws_hash_ptr_t *target_ele,
2094 tws_hash_ptr_t **previous_ptr,
2095 tws_hash_ptr_t **free_list)
2096{
2097 int k;
2098 tws_hash_ptr_t cache_ele;
2099 tws_hash_ptr_t base_ele;
2100
2101 *target_ele = NULL;
2102 *previous_ptr = NULL;
2103
2104 for(k=0; k<tws->expansion_count; k++) {
2105 cache_ele = tws->table[k][index];
2106 base_ele = cache_ele;
2107 *previous_ptr = &(tws->table[k][index]);
2108 while(cache_ele != NULL) {
2109 if((((unsigned int)cache_ele->element)
2110 & TWS_ADDR_HASH) != 0) {
2111 *previous_ptr = &(cache_ele->next);
2112 cache_ele = cache_ele->next;
2113 continue;
2114 }
2115 if ((cache_ele->element == 0) ||
2116 (cache_ele->element->object == 0)) {
2117 /* A little clean-up of empty elements */
2118 cache_ele->element = 0;
2119 if(base_ele == cache_ele) {
2120 base_ele = cache_ele->next;
2121 tws->table[k][index] = cache_ele->next;
2122 cache_ele->next = tws->free_hash_ele[k];
2123 tws->free_hash_ele[k] = cache_ele;
2124 cache_ele = tws->table[k][index];
2125 } else {
2126 **previous_ptr = cache_ele->next;
2127 cache_ele->next = tws->free_hash_ele[k];
2128 tws->free_hash_ele[k] = cache_ele;
2129 cache_ele = **previous_ptr;
2130 }
2131 continue;
2132 }
2133 if ((cache_ele->element->object == object)
2134 && (cache_ele->element->offset ==
2135 (offset - (offset & ~TWS_HASH_OFF_MASK)))) {
2136 if((cache_ele->element->page_cache & page_mask)
2137 || (page_mask == 0xFFFFFFFF)) {
2138 /* We've found a match */
2139 *target_ele = cache_ele;
2140 *free_list = &(tws->free_hash_ele[k]);
2141 return;
2142 }
2143 }
2144 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2145 cache_ele = cache_ele->next;
2146 }
2147 }
2148}
2149
2150
2151/*
2152 * For a given object/offset, discover whether the indexed 32 page frame
2153 * containing the object/offset exists and if their are at least threshold
2154 * pages present. Returns true if population meets threshold.
2155 */
2156int
2157tws_test_for_community(
2158 tws_hash_t tws,
2159 vm_object_t object,
2160 vm_object_offset_t offset,
2161 unsigned int threshold,
2162 unsigned int *page_mask)
2163{
2164 int index;
2165 tws_hash_ptr_t cache_ele;
2166 tws_hash_ptr_t *trailer;
2167 tws_hash_ptr_t *free_list;
2168 int community = 0;
2169
2170 index = do_tws_hash(object, offset,
2171 tws->number_of_elements, tws->number_of_lines);
2172 tws_traverse_object_hash_list(tws, index, object, offset, 0xFFFFFFFF,
2173 &cache_ele, &trailer, &free_list);
2174
2175 if(cache_ele != NULL) {
2176 int i;
2177 int ctr;
2178 ctr = 0;
2179 for(i=1; i!=0; i=i<<1) {
2180 if(i & cache_ele->element->page_cache)
2181 ctr++;
2182 if(ctr == threshold) {
2183 community = 1;
2184 *page_mask = cache_ele->element->page_cache;
2185 break;
2186 }
2187 }
2188 }
2189
2190 return community;
2191
2192}
2193
2194
2195/*
2196 * Gets new hash element for object hash from free pools
2197 * TWS must be locked
2198 */
2199
2200tws_hash_ptr_t
2201new_obj_hash(
2202 tws_hash_t tws,
2203 unsigned int set,
2204 unsigned int index)
2205{
2206 tws_hash_ptr_t element;
2207
2208 if(tws->obj_free_count[set] < tws->number_of_lines * tws->number_of_elements) {
2209 element = &(tws->table_ele[set][tws->obj_free_count[set]]);
2210 tws->obj_free_count[set]+=1;
2211 } else if(tws->free_hash_ele[set] == NULL) {
2212 return NULL;
2213 } else {
2214 element = tws->free_hash_ele[set];
2215 if(element == NULL)
2216 return element;
2217 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2218 }
2219 element->element = 0;
2220 element->next = tws->table[set][index];
2221 tws->table[set][index] = element;
2222 return element;
2223}
2224
2225/*
2226 * Gets new hash element for addr hash from free pools
2227 * TWS must be locked
2228 */
2229
2230tws_hash_ptr_t
2231new_addr_hash(
2232 tws_hash_t tws,
2233 unsigned int set,
2234 unsigned int index)
2235{
2236 tws_hash_ptr_t element;
2237
2238 if(tws->addr_free_count[set]
2239 < tws->number_of_lines * tws->number_of_elements) {
2240 element = &(tws->alt_ele[set][tws->addr_free_count[set]]);
2241 tws->addr_free_count[set]+=1;
2242 } else if(tws->free_hash_ele[set] == NULL) {
2243 return NULL;
2244 } else {
2245 element = tws->free_hash_ele[set];
2246 if(element == NULL)
2247 return element;
2248 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2249 }
2250 element->element = (tws_hash_ele_t)TWS_ADDR_HASH;
2251 element->next = tws->table[set][index];
2252 tws->table[set][index] = element;
2253 return element;
2254}