]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/task_working_set.c
xnu-344.tar.gz
[apple/xnu.git] / osfmk / vm / task_working_set.c
1 int startup_miss = 0;
2 /*
3 * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * The contents of this file constitute Original Code as defined in and
8 * are subject to the Apple Public Source License Version 1.1 (the
9 * "License"). You may not use this file except in compliance with the
10 * License. Please obtain a copy of the License at
11 * http://www.apple.com/publicsource and read it before using this file.
12 *
13 * This Original Code and all software distributed under the License are
14 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
18 * License for the specific language governing rights and limitations
19 * under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 */
25 /*
26 * File: vm/task_working_set.c
27 * Author: Chris Youngworth
28 * Date: 2001
29 *
30 * Working set detection and maintainence module
31 */
32
33
34 #include <mach/mach_types.h>
35 #include <mach/shared_memory_server.h>
36 #include <vm/task_working_set.h>
37 #include <vm/vm_kern.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_page.h>
40 #include <vm/vm_pageout.h>
41 #include <kern/sched.h>
42
43 extern unsigned sched_tick;
44 extern zone_t lsf_zone;
45
46 /* declarations for internal use only routines */
47
48 tws_startup_t
49 tws_create_startup_list(
50 tws_hash_t tws);
51
52 unsigned int
53 tws_startup_list_lookup(
54 tws_startup_t startup,
55 vm_offset_t addr);
56
57 kern_return_t
58 tws_internal_startup_send(
59 tws_hash_t tws);
60
61 void
62 tws_traverse_address_hash_list (
63 tws_hash_t tws,
64 unsigned int index,
65 vm_offset_t page_addr,
66 vm_object_t object,
67 vm_object_offset_t offset,
68 vm_map_t map,
69 tws_hash_ptr_t *target_ele,
70 tws_hash_ptr_t **previous_ptr,
71 tws_hash_ptr_t **free_list,
72 unsigned int exclusive_addr);
73
74 void
75 tws_traverse_object_hash_list (
76 tws_hash_t tws,
77 unsigned int index,
78 vm_object_t object,
79 vm_object_offset_t offset,
80 unsigned int page_mask,
81 tws_hash_ptr_t *target_ele,
82 tws_hash_ptr_t **previous_ptr,
83 tws_hash_ptr_t **free_list);
84
85 tws_hash_ptr_t
86 new_addr_hash(
87 tws_hash_t tws,
88 unsigned int set,
89 unsigned int index);
90
91 tws_hash_ptr_t
92 new_obj_hash(
93 tws_hash_t tws,
94 unsigned int set,
95 unsigned int index);
96
97 int tws_test_for_community(
98 tws_hash_t tws,
99 vm_object_t object,
100 vm_object_offset_t offset,
101 unsigned int threshold,
102 unsigned int *page_mask);
103
104 /* Note: all of the routines below depend on the associated map lock for */
105 /* synchronization, the map lock will be on when the routines are called */
106 /* and on when they return */
107
108 tws_hash_t
109 tws_hash_create(
110 unsigned int lines,
111 unsigned int rows,
112 unsigned int style)
113 {
114 tws_hash_t tws;
115 int i,j;
116
117
118 if ((style != TWS_HASH_STYLE_BASIC) &&
119 (style != TWS_HASH_STYLE_BASIC)) {
120 return((tws_hash_t)NULL);
121 }
122
123
124 tws = (tws_hash_t)(kalloc(sizeof(struct tws_hash)));
125 if(tws == (tws_hash_t)NULL)
126 return tws;
127
128 if((tws->table[0] = (tws_hash_ptr_t *)
129 kalloc(sizeof(tws_hash_ptr_t) * lines * rows))
130 == NULL) {
131 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
132 return (tws_hash_t)NULL;
133 }
134 if((tws->table_ele[0] = (tws_hash_ptr_t)
135 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
136 == NULL) {
137 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ele_t)
138 * lines * rows);
139 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
140 return (tws_hash_t)NULL;
141 }
142 if((tws->alt_ele[0] = (tws_hash_ptr_t)
143 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
144 == NULL) {
145 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
146 * lines * rows);
147 kfree((vm_offset_t)tws->table_ele[0],
148 sizeof(struct tws_hash_ptr)
149 * lines * rows);
150 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
151 return (tws_hash_t)NULL;
152 }
153 if((tws->cache[0] = (struct tws_hash_line *)
154 kalloc(sizeof(struct tws_hash_line) * lines))
155 == NULL) {
156 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
157 * lines * rows);
158 kfree((vm_offset_t)tws->table_ele[0],
159 sizeof(struct tws_hash_ptr)
160 * lines * rows);
161 kfree((vm_offset_t)tws->alt_ele[0], sizeof(struct tws_hash_ptr)
162 * lines * rows);
163 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
164 return (tws_hash_t)NULL;
165 }
166 tws->free_hash_ele[0] = (tws_hash_ptr_t)0;
167 tws->obj_free_count[0] = 0;
168 tws->addr_free_count[0] = 0;
169
170 /* most defaults are such that a bzero will initialize */
171 bzero((char *)tws->table[0],sizeof(tws_hash_ptr_t)
172 * lines * rows);
173 bzero((char *)tws->table_ele[0],sizeof(struct tws_hash_ptr)
174 * lines * rows);
175 bzero((char *)tws->alt_ele[0],sizeof(struct tws_hash_ptr)
176 * lines * rows);
177 bzero((char *)tws->cache[0], sizeof(struct tws_hash_line)
178 * lines);
179
180 mutex_init(&tws->lock, ETAP_VM_MAP);
181 tws->style = style;
182 tws->current_line = 0;
183 tws->pageout_count = 0;
184 tws->line_count = 0;
185 tws->startup_cache = NULL;
186 tws->startup_name = NULL;
187 tws->number_of_lines = lines;
188 tws->number_of_elements = rows;
189 tws->expansion_count = 1;
190 tws->lookup_count = 0;
191 tws->insert_count = 0;
192 tws->time_of_creation = sched_tick;
193
194 return tws;
195 }
196
197 int newtest = 0;
198 void
199 tws_hash_line_clear(
200 tws_hash_t tws,
201 tws_hash_line_t hash_line,
202 boolean_t live)
203 {
204 struct tws_hash_ele *hash_ele;
205 struct tws_hash_ptr **trailer;
206 struct tws_hash_ptr **free_list;
207 tws_hash_ele_t addr_ele;
208 int index;
209 unsigned int i, j, k;
210 int dump_pmap;
211 int hash_loop;
212
213
214 if(tws->line_count < tws->number_of_lines) {
215 tws->line_count++;
216 dump_pmap = 1;
217 } else {
218 if(tws->pageout_count != vm_pageout_scan_event_counter) {
219 tws->pageout_count =
220 vm_pageout_scan_event_counter;
221 tws->line_count = 0;
222 dump_pmap = 1;
223 } else {
224 dump_pmap = 0;
225 }
226 }
227 hash_line->ele_count = 0;
228
229 for (i=0; i<tws->number_of_elements; i++) {
230 hash_loop = 0;
231 hash_ele = &(hash_line->list[i]);
232 if(hash_ele->object != 0) {
233
234 vm_object_offset_t local_off = 0;
235 tws_hash_ptr_t cache_ele;
236
237 index = alt_tws_hash(
238 hash_ele->page_addr & TWS_HASH_OFF_MASK,
239 tws->number_of_elements,
240 tws->number_of_lines);
241
242 tws_traverse_address_hash_list(tws, index,
243 hash_ele->page_addr, hash_ele->object,
244 hash_ele->offset, hash_ele->map,
245 &cache_ele, &trailer, &free_list, 0);
246 if(cache_ele != NULL) {
247 addr_ele = (tws_hash_ele_t)((unsigned int)
248 (cache_ele->element) & ~TWS_ADDR_HASH);
249 if(addr_ele != hash_ele)
250 panic("tws_hash_line_clear:"
251 " out of sync\n");
252 cache_ele->element = 0;
253 *trailer = cache_ele->next;
254 cache_ele->next = *free_list;
255 *free_list = cache_ele;
256 }
257
258 index = alt_tws_hash(
259 (hash_ele->page_addr - 0x1f000)
260 & TWS_HASH_OFF_MASK,
261 tws->number_of_elements,
262 tws->number_of_lines);
263
264 tws_traverse_address_hash_list(tws, index,
265 hash_ele->page_addr, hash_ele->object,
266 hash_ele->offset, hash_ele->map,
267 &cache_ele, &trailer, &free_list, 0);
268
269 if(cache_ele != NULL) {
270 addr_ele = (tws_hash_ele_t)((unsigned int)
271 (cache_ele->element) & ~TWS_ADDR_HASH);
272 if(addr_ele != hash_ele)
273 panic("tws_hash_line_clear: "
274 "out of sync\n");
275 cache_ele->element = 0;
276 *trailer = cache_ele->next;
277 cache_ele->next = *free_list;
278 *free_list = cache_ele;
279 }
280
281
282 if((hash_ele->map != NULL) && (live)) {
283 vm_page_t p;
284
285 for (j = 0x1; j != 0; j = j<<1) {
286 if(j & hash_ele->page_cache) {
287 p = vm_page_lookup(hash_ele->object,
288 hash_ele->offset + local_off);
289 if((p != NULL) && (p->wire_count == 0)
290 && (dump_pmap == 1)) {
291 pmap_remove_some_phys((pmap_t)
292 vm_map_pmap(
293 hash_ele->map),
294 p->phys_addr);
295 }
296 }
297 local_off += PAGE_SIZE_64;
298 }
299 }
300
301 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
302 vm_object_deallocate(hash_ele->object);
303 vm_map_deallocate(hash_ele->map);
304 }
305
306 index = do_tws_hash(hash_ele->object, hash_ele->offset,
307 tws->number_of_elements,
308 tws->number_of_lines);
309
310 tws_traverse_object_hash_list(tws,
311 index, hash_ele->object, hash_ele->offset,
312 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
313 if((cache_ele != NULL) && (cache_ele->element == hash_ele)) {
314 cache_ele->element = 0;
315 *trailer = cache_ele->next;
316 cache_ele->next = *free_list;
317 *free_list = cache_ele;
318 }
319 hash_ele->object = 0;
320 }
321 }
322 }
323
324 kern_return_t
325 tws_lookup(
326 tws_hash_t tws,
327 vm_object_offset_t offset,
328 vm_object_t object,
329 tws_hash_line_t *line)
330 {
331 int index;
332 int loop;
333 int set;
334 int ele_line;
335 vm_offset_t pagenum;
336 tws_hash_ptr_t cache_ele;
337 tws_hash_ptr_t *trailer;
338 tws_hash_ptr_t *free_list;
339
340 /* don't cache private objects */
341 if(object->private)
342 return KERN_SUCCESS;
343
344 if(!tws_lock_try(tws)) {
345 return KERN_FAILURE;
346 }
347
348 index = do_tws_hash(object, offset,
349 tws->number_of_elements, tws->number_of_lines);
350 loop = 0;
351
352 tws->lookup_count++;
353 if(tws->lookup_count == 0)
354 tws->insert_count = 0;
355 if(tws->startup_name != NULL) {
356 int age_of_cache;
357 age_of_cache = ((sched_tick
358 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
359 if (age_of_cache > 35) {
360 tws_unlock(tws);
361 return KERN_OPERATION_TIMED_OUT;
362 }
363 }
364
365 if(tws->lookup_count > (4 * tws->expansion_count
366 * tws->number_of_elements * tws->number_of_lines) &&
367 (tws->lookup_count > (2 * tws->insert_count))) {
368 if(tws->startup_cache) {
369 int age_of_cache;
370 age_of_cache = ((sched_tick
371 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
372 if (age_of_cache > 60) {
373 tws_unlock(tws);
374 return KERN_OPERATION_TIMED_OUT;
375 }
376 }
377 }
378
379 pagenum = (vm_offset_t)(offset & TWS_INDEX_MASK);
380 pagenum = pagenum >> 12;
381 pagenum = 1 << pagenum; /* get the appropriate page in 32 page block */
382 tws_traverse_object_hash_list(tws, index, object, offset, pagenum,
383 &cache_ele, &trailer, &free_list);
384 if(cache_ele != NULL) {
385 set = cache_ele->element->line/tws->number_of_lines;
386 ele_line = cache_ele->element->line - set;
387 *line = &tws->cache[set][ele_line];
388 tws_unlock(tws);
389 return KERN_SUCCESS;
390 }
391
392 tws_unlock(tws);
393 return KERN_FAILURE;
394
395
396 }
397
398 kern_return_t
399 tws_expand_working_set(
400 vm_offset_t tws,
401 int line_count,
402 boolean_t dump_data)
403 {
404 tws_hash_t new_tws;
405 tws_hash_t old_tws;
406 unsigned int i,j,k;
407 struct tws_hash temp;
408
409 old_tws = (tws_hash_t)tws;
410
411 /* Note we do an elaborate dance to preserve the header that */
412 /* task is pointing to. In this way we can avoid taking a task */
413 /* lock every time we want to access the tws */
414
415 if (old_tws->number_of_lines >= line_count) {
416 return KERN_FAILURE;
417 }
418 if((new_tws = tws_hash_create(line_count,
419 old_tws->number_of_elements, old_tws->style)) == 0) {
420 return(KERN_NO_SPACE);
421 }
422 tws_lock(old_tws);
423
424 if(!dump_data) {
425 for(i = 0; i<old_tws->number_of_lines; i++) {
426 for(j = 0; j<old_tws->number_of_elements; j++) {
427 for(k = 0; k<old_tws->expansion_count; k++) {
428 tws_hash_ele_t entry;
429 vm_object_offset_t paddr;
430 unsigned int page_index;
431 entry = &old_tws->cache[k][i].list[j];
432 if(entry->object != 0) {
433 paddr = 0;
434 for(page_index = 1; page_index != 0;
435 page_index = page_index << 1); {
436 if (entry->page_cache & page_index) {
437 tws_insert(new_tws,
438 entry->offset+paddr,
439 entry->object,
440 entry->page_addr+paddr,
441 entry->map);
442 }
443 paddr+=PAGE_SIZE;
444 }
445
446 }
447 }
448 }
449 }
450 }
451
452 temp.style = new_tws->style;
453 temp.current_line = new_tws->current_line;
454 temp.pageout_count = new_tws->pageout_count;
455 temp.line_count = new_tws->line_count;
456 temp.number_of_lines = new_tws->number_of_lines;
457 temp.number_of_elements = new_tws->number_of_elements;
458 temp.expansion_count = new_tws->expansion_count;
459 temp.lookup_count = new_tws->lookup_count;
460 temp.insert_count = new_tws->insert_count;
461 for(i = 0; i<new_tws->expansion_count; i++) {
462 temp.obj_free_count[i] = new_tws->obj_free_count[i];
463 temp.addr_free_count[i] = new_tws->addr_free_count[i];
464 temp.free_hash_ele[i] = new_tws->free_hash_ele[i];
465 temp.table[i] = new_tws->table[i];
466 temp.table_ele[i] = new_tws->table_ele[i];
467 temp.alt_ele[i] = new_tws->alt_ele[i];
468 temp.cache[i] = new_tws->cache[i];
469 }
470
471 new_tws->style = old_tws->style;
472 new_tws->current_line = old_tws->current_line;
473 new_tws->pageout_count = old_tws->pageout_count;
474 new_tws->line_count = old_tws->line_count;
475 new_tws->number_of_lines = old_tws->number_of_lines;
476 new_tws->number_of_elements = old_tws->number_of_elements;
477 new_tws->expansion_count = old_tws->expansion_count;
478 new_tws->lookup_count = old_tws->lookup_count;
479 new_tws->insert_count = old_tws->insert_count;
480 for(i = 0; i<old_tws->expansion_count; i++) {
481 new_tws->obj_free_count[i] = old_tws->obj_free_count[i];
482 new_tws->addr_free_count[i] = old_tws->addr_free_count[i];
483 new_tws->free_hash_ele[i] = old_tws->free_hash_ele[i];
484 new_tws->table[i] = old_tws->table[i];
485 new_tws->table_ele[i] = old_tws->table_ele[i];
486 new_tws->alt_ele[i] = old_tws->alt_ele[i];
487 new_tws->cache[i] = old_tws->cache[i];
488 }
489
490 old_tws->style = temp.style;
491 old_tws->current_line = temp.current_line;
492 old_tws->pageout_count = temp.pageout_count;
493 old_tws->line_count = temp.line_count;
494 old_tws->number_of_lines = temp.number_of_lines;
495 old_tws->number_of_elements = temp.number_of_elements;
496 old_tws->expansion_count = temp.expansion_count;
497 old_tws->lookup_count = temp.lookup_count;
498 old_tws->insert_count = temp.insert_count;
499 for(i = 0; i<temp.expansion_count; i++) {
500 old_tws->obj_free_count[i] = temp.obj_free_count[i];;
501 old_tws->addr_free_count[i] = temp.addr_free_count[i];;
502 old_tws->free_hash_ele[i] = NULL;
503 old_tws->table[i] = temp.table[i];
504 old_tws->table_ele[i] = temp.table_ele[i];
505 old_tws->alt_ele[i] = temp.alt_ele[i];
506 old_tws->cache[i] = temp.cache[i];
507 }
508
509 tws_hash_destroy(new_tws);
510 tws_unlock(old_tws);
511 return KERN_SUCCESS;
512 }
513
514 tws_hash_t test_tws = 0;
515
516 kern_return_t
517 tws_insert(
518 tws_hash_t tws,
519 vm_object_offset_t offset,
520 vm_object_t object,
521 vm_offset_t page_addr,
522 vm_map_t map)
523 {
524 queue_t bucket;
525 unsigned int index;
526 unsigned int alt_index;
527 unsigned int index_enum[2];
528 unsigned int ele_index;
529 tws_hash_ptr_t cache_ele;
530 tws_hash_ptr_t obj_ele = NULL;
531 tws_hash_ptr_t addr_ele = NULL;
532 tws_hash_ptr_t *trailer;
533 tws_hash_ptr_t *free_list;
534 tws_hash_ele_t target_element = NULL;
535 int i,k;
536 int current_line;
537 int set;
538 int ctr;
539 unsigned int startup_cache_line;
540 vm_offset_t startup_page_addr;
541 int cache_full = 0;
542 int ask_for_startup_cache_release = 0;
543
544
545 if(!tws_lock_try(tws)) {
546 return KERN_FAILURE;
547 }
548 tws->insert_count++;
549 current_line = 0xFFFFFFFF;
550
551 startup_cache_line = 0;
552 startup_page_addr =
553 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
554 if(tws->startup_cache) {
555 int age_of_cache;
556 age_of_cache = ((sched_tick - tws->time_of_creation)
557 >> SCHED_TICK_SHIFT);
558 startup_cache_line = tws_startup_list_lookup(
559 tws->startup_cache, startup_page_addr);
560 if(tws == test_tws) {
561 printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", startup_cache_line, startup_page_addr, object, offset);
562 }
563 if(age_of_cache > 60) {
564 ask_for_startup_cache_release = 1;
565 }
566 }
567 if((tws->startup_name != NULL) && (tws->mod == 0)) {
568 /* Ensure as good a working set as possible */
569 pmap_remove(map->pmap, 0, GLOBAL_SHARED_TEXT_SEGMENT);
570 pmap_remove(map->pmap,
571 GLOBAL_SHARED_DATA_SEGMENT
572 + SHARED_DATA_REGION_SIZE, 0xFFFFF000);
573 }
574
575 /* This next bit of code, the and alternate hash */
576 /* are all made necessary because of IPC COW */
577
578 /* Note: the use of page_addr modified by delta from offset */
579 /* frame base means we may miss some previous entries. However */
580 /* we will not miss the present entry. This is most important */
581 /* in avoiding duplication of entries against long lived non-cow */
582 /* objects */
583 index_enum[0] = alt_tws_hash(
584 page_addr & TWS_HASH_OFF_MASK,
585 tws->number_of_elements, tws->number_of_lines);
586
587 index_enum[1] = alt_tws_hash(
588 (page_addr - 0x1f000) & TWS_HASH_OFF_MASK,
589 tws->number_of_elements, tws->number_of_lines);
590
591 for(ctr = 0; ctr < 2;) {
592 tws_hash_ele_t resident;
593 tws_traverse_address_hash_list(tws,
594 index_enum[ctr], page_addr, NULL,
595 0, NULL,
596 &cache_ele, &trailer, &free_list, 1);
597 if(cache_ele != NULL) {
598 /* found one */
599 resident = (tws_hash_ele_t)((unsigned int)
600 cache_ele->element & ~TWS_ADDR_HASH);
601 if((object == resident->object) &&
602 resident->offset ==
603 (offset & TWS_HASH_OFF_MASK)) {
604 /* This is our object/offset */
605 resident->page_cache
606 |= startup_cache_line;
607 resident->page_cache |=
608 (1<<(((vm_offset_t)
609 (offset & TWS_INDEX_MASK))>>12));
610 tws_unlock(tws);
611 if(ask_for_startup_cache_release)
612 return KERN_OPERATION_TIMED_OUT;
613 return KERN_SUCCESS;
614 }
615 if((object->shadow ==
616 resident->object) &&
617 ((resident->offset
618 + object->shadow_offset)
619 == (offset & TWS_HASH_OFF_MASK))) {
620 /* if we just shadowed, inherit */
621 /* access pattern from parent */
622 startup_cache_line |=
623 resident->page_cache;
624 /* thow out old entry */
625 resident->page_cache = 0;
626 break;
627 } else {
628 resident->page_cache &=
629 ~(1<<(((vm_offset_t)(page_addr
630 - resident->page_addr))
631 >>12));
632 }
633 /* Throw out old entry if there are no */
634 /* more pages in cache */
635 if(resident->page_cache == 0) {
636 /* delete addr hash entry */
637 cache_ele->element = 0;
638 *trailer = cache_ele->next;
639 cache_ele->next = *free_list;
640 *free_list = cache_ele;
641 /* go after object hash */
642 index = do_tws_hash(
643 resident->object,
644 resident->offset,
645 tws->number_of_elements,
646 tws->number_of_lines);
647 tws_traverse_object_hash_list(tws,
648 index, resident->object,
649 resident->offset,
650 0xFFFFFFFF, &cache_ele,
651 &trailer, &free_list);
652 if(cache_ele != NULL) {
653 if(tws->style ==
654 TWS_HASH_STYLE_SIGNAL) {
655 vm_object_deallocate(
656 cache_ele->element->object);
657 vm_map_deallocate(
658 cache_ele->element->map);
659 }
660 current_line =
661 cache_ele->element->line;
662 set = current_line
663 /tws->number_of_lines;
664 current_line -= set *
665 tws->number_of_lines;
666 if(cache_ele->element->object != 0) {
667 cache_ele->element->object = 0;
668 tws->cache[set]
669 [current_line].ele_count--;
670 }
671 cache_ele->element = 0;
672 *trailer = cache_ele->next;
673 cache_ele->next = *free_list;
674 *free_list = cache_ele;
675 }
676 }
677 continue;
678 }
679 ctr+=1;
680 }
681
682 /*
683 * We may or may not have a current line setting coming out of
684 * the code above. If we have a current line it means we can
685 * choose to back-fill the spot vacated by a previous entry.
686 * We have yet to do a definitive check using the original obj/off
687 * We will do that now and override the current line if we
688 * find an element
689 */
690
691 index = do_tws_hash(object, offset,
692 tws->number_of_elements, tws->number_of_lines);
693
694 alt_index = index_enum[0];
695
696 tws_traverse_object_hash_list(tws, index, object, offset,
697 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
698 if(cache_ele != NULL) {
699 obj_ele = cache_ele;
700 current_line = cache_ele->element->line;
701 set = current_line/tws->number_of_lines;
702 current_line -= set * tws->number_of_lines;
703 target_element = cache_ele->element;
704
705 /* Now check to see if we have a hash addr for it */
706 tws_traverse_address_hash_list(tws,
707 alt_index, obj_ele->element->page_addr,
708 obj_ele->element->object,
709 obj_ele->element->offset,
710 obj_ele->element->map,
711 &cache_ele, &trailer, &free_list, 0);
712 if(cache_ele != NULL) {
713 addr_ele = cache_ele;
714 } else {
715 addr_ele = new_addr_hash(tws, set, alt_index);
716 /* if cannot allocate just do without */
717 /* we'll get it next time around */
718 }
719 }
720
721
722
723 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
724 vm_object_reference(object);
725 vm_map_reference(map);
726 }
727
728 if(current_line == 0xFFFFFFFF) {
729 current_line = tws->current_line;
730 set = current_line/tws->number_of_lines;
731 current_line = current_line - (set * tws->number_of_lines);
732
733 #ifdef notdef
734 if(cache_full) {
735 tws->current_line = tws->number_of_lines - 1;
736 }
737 #endif
738 if(tws->cache[set][current_line].ele_count
739 >= tws->number_of_elements) {
740 current_line++;
741 tws->current_line++;
742 if(current_line == tws->number_of_lines) {
743 set++;
744 current_line = 0;
745 if (set == tws->expansion_count) {
746 if((tws->lookup_count <
747 (2 * tws->insert_count)) &&
748 (set<TWS_HASH_EXPANSION_MAX)) {
749 tws->lookup_count = 0;
750 tws->insert_count = 0;
751 if(tws->number_of_lines
752 < TWS_HASH_LINE_COUNT) {
753 tws->current_line--;
754 tws_unlock(tws);
755 return KERN_NO_SPACE;
756 }
757 if((tws->table[set] = (tws_hash_ptr_t *)
758 kalloc(sizeof(tws_hash_ptr_t)
759 * tws->number_of_lines
760 * tws->number_of_elements))
761 == NULL) {
762 set = 0;
763 } else if((tws->table_ele[set] =
764 (tws_hash_ptr_t)
765 kalloc(sizeof(struct tws_hash_ptr)
766 * tws->number_of_lines
767 * tws->number_of_elements))
768 == NULL) {
769 kfree((vm_offset_t)tws->table[set],
770 sizeof(tws_hash_ptr_t)
771 * tws->number_of_lines
772 * tws->number_of_elements);
773 set = 0;
774 } else if((tws->alt_ele[set] =
775 (tws_hash_ptr_t)
776 kalloc(sizeof(struct tws_hash_ptr)
777 * tws->number_of_lines
778 * tws->number_of_elements))
779 == NULL) {
780 kfree((vm_offset_t)tws->table_ele[set],
781 sizeof(tws_hash_ptr_t)
782 * tws->number_of_lines
783 * tws->number_of_elements);
784 kfree((vm_offset_t)tws->table[set],
785 sizeof(struct tws_hash_ptr)
786 * tws->number_of_lines
787 * tws->number_of_elements);
788 tws->table[set] = NULL;
789 set = 0;
790
791 } else if((tws->cache[set] =
792 (struct tws_hash_line *)
793 kalloc(sizeof
794 (struct tws_hash_line)
795 * tws->number_of_lines))
796 == NULL) {
797 kfree((vm_offset_t)tws->table[set],
798 sizeof(tws_hash_ptr_t)
799 * tws->number_of_lines
800 * tws->number_of_elements);
801 kfree((vm_offset_t)tws->table_ele[set],
802 sizeof(struct tws_hash_ptr)
803 * tws->number_of_lines
804 * tws->number_of_elements);
805 kfree((vm_offset_t)tws->alt_ele[set],
806 sizeof(struct tws_hash_ptr)
807 * tws->number_of_lines
808 * tws->number_of_elements);
809 tws->table[set] = NULL;
810 set = 0;
811
812 } else {
813 tws->free_hash_ele[set] =
814 (tws_hash_ptr_t)0;
815 tws->obj_free_count[set] = 0;
816 tws->addr_free_count[set] = 0;
817 bzero((char *)tws->table[set],
818 sizeof(tws_hash_ptr_t)
819 * tws->number_of_lines
820 * tws->number_of_elements);
821 bzero((char *)tws->table_ele[set],
822 sizeof(struct tws_hash_ptr)
823 * tws->number_of_lines
824 * tws->number_of_elements);
825 bzero((char *)tws->alt_ele[set],
826 sizeof(struct tws_hash_ptr)
827 * tws->number_of_lines
828 * tws->number_of_elements);
829 bzero((char *)tws->cache[set],
830 sizeof(struct tws_hash_line)
831 * tws->number_of_lines);
832 }
833 } else {
834 int age_of_cache;
835 age_of_cache =
836 ((sched_tick -
837 tws->time_of_creation)
838 >> SCHED_TICK_SHIFT);
839
840 if((tws->startup_cache) &&
841 (age_of_cache > 60)) {
842 ask_for_startup_cache_release = 1;
843 }
844 if((tws->startup_name != NULL) &&
845 (age_of_cache > 15)) {
846 tws->current_line--;
847 tws_unlock(tws);
848 return KERN_OPERATION_TIMED_OUT;
849 }
850 if((tws->startup_name != NULL) &&
851 (age_of_cache < 15)) {
852 /* If we are creating a */
853 /* cache, don't lose the */
854 /* early info */
855 tws->current_line--;
856 tws_unlock(tws);
857 return KERN_FAILURE;
858 }
859 tws->lookup_count = 0;
860 tws->insert_count = 0;
861 set = 0;
862 }
863 }
864 tws->current_line = set * tws->number_of_lines;
865 }
866 if(set < tws->expansion_count) {
867 tws_hash_line_clear(tws,
868 &(tws->cache[set][current_line]), TRUE);
869 if(tws->cache[set][current_line].ele_count
870 >= tws->number_of_elements) {
871 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
872 vm_object_deallocate(object);
873 vm_map_deallocate(map);
874 }
875 tws_unlock(tws);
876 return KERN_FAILURE;
877 }
878 } else {
879 tws->expansion_count++;
880 }
881 }
882 }
883
884
885 /* set object hash element */
886 if(obj_ele == NULL) {
887 obj_ele = new_obj_hash(tws, set, index);
888 if(obj_ele == NULL) {
889 tws->cache[set][current_line].ele_count
890 = tws->number_of_elements;
891 tws_unlock(tws);
892 return KERN_FAILURE;
893 }
894 }
895
896 /* set address hash element */
897 if(addr_ele == NULL) {
898 addr_ele = new_addr_hash(tws, set, alt_index);
899 }
900
901 if(target_element == NULL) {
902 ele_index = 0;
903 for(i = 0; i<tws->number_of_elements; i++) {
904 if(tws->cache[set][current_line].
905 list[ele_index].object == 0) {
906 break;
907 }
908 ele_index++;
909 if(ele_index >= tws->number_of_elements)
910 ele_index = 0;
911
912 }
913
914 if(i == tws->number_of_elements)
915 panic("tws_insert: no free elements");
916
917 target_element =
918 &(tws->cache[set][current_line].list[ele_index]);
919
920 tws->cache[set][current_line].ele_count++;
921 }
922
923 obj_ele->element = target_element;
924 if(addr_ele) {
925 addr_ele->element = (tws_hash_ele_t)
926 (((unsigned int)target_element) | TWS_ADDR_HASH);
927 }
928 target_element->object = object;
929 target_element->offset = offset & TWS_HASH_OFF_MASK;
930 target_element->page_addr =
931 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
932 target_element->map = map;
933 target_element->line =
934 current_line + (set * tws->number_of_lines);
935 if(startup_cache_line) {
936 target_element->page_cache = startup_cache_line;
937 }
938 target_element->page_cache |=
939 1<<(((vm_offset_t)(offset & TWS_INDEX_MASK))>>12);
940
941
942 tws_unlock(tws);
943 if(ask_for_startup_cache_release)
944 return KERN_OPERATION_TIMED_OUT;
945 return KERN_SUCCESS;
946 }
947
948 /*
949 * tws_build_cluster
950 * lengthen the cluster of pages by the number of pages encountered in the
951 * working set up to the limit requested by the caller. The object needs
952 * to be locked on entry. The map does not because the tws_lookup function
953 * is used only to find if their is an entry in the cache. No transient
954 * data from the cache is de-referenced.
955 *
956 */
957 #if MACH_PAGEMAP
958 /*
959 * MACH page map - an optional optimization where a bit map is maintained
960 * by the VM subsystem for internal objects to indicate which pages of
961 * the object currently reside on backing store. This existence map
962 * duplicates information maintained by the vnode pager. It is
963 * created at the time of the first pageout against the object, i.e.
964 * at the same time pager for the object is created. The optimization
965 * is designed to eliminate pager interaction overhead, if it is
966 * 'known' that the page does not exist on backing store.
967 *
968 * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is
969 * either marked as paged out in the existence map for the object or no
970 * existence map exists for the object. LOOK_FOR() is one of the
971 * criteria in the decision to invoke the pager. It is also used as one
972 * of the criteria to terminate the scan for adjacent pages in a clustered
973 * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for
974 * permanent objects. Note also that if the pager for an internal object
975 * has not been created, the pager is not invoked regardless of the value
976 * of LOOK_FOR() and that clustered pagein scans are only done on an object
977 * for which a pager has been created.
978 *
979 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
980 * is marked as paged out in the existence map for the object. PAGED_OUT()
981 * PAGED_OUT() is used to determine if a page has already been pushed
982 * into a copy object in order to avoid a redundant page out operation.
983 */
984 #define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
985 != VM_EXTERNAL_STATE_ABSENT)
986 #define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
987 == VM_EXTERNAL_STATE_EXISTS)
988 #else /* MACH_PAGEMAP */
989 /*
990 * If the MACH page map optimization is not enabled,
991 * LOOK_FOR() always evaluates to TRUE. The pager will always be
992 * invoked to resolve missing pages in an object, assuming the pager
993 * has been created for the object. In a clustered page operation, the
994 * absence of a page on backing backing store cannot be used to terminate
995 * a scan for adjacent pages since that information is available only in
996 * the pager. Hence pages that may not be paged out are potentially
997 * included in a clustered request. The vnode pager is coded to deal
998 * with any combination of absent/present pages in a clustered
999 * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager
1000 * will always be invoked to push a dirty page into a copy object assuming
1001 * a pager has been created. If the page has already been pushed, the
1002 * pager will ingore the new request.
1003 */
1004 #define LOOK_FOR(o, f) TRUE
1005 #define PAGED_OUT(o, f) FALSE
1006 #endif /* MACH_PAGEMAP */
1007
1008 void
1009 tws_build_cluster(
1010 tws_hash_t tws,
1011 vm_object_t object,
1012 vm_object_offset_t *start,
1013 vm_object_offset_t *end,
1014 vm_size_t max_length)
1015 {
1016 tws_hash_line_t line;
1017 task_t task;
1018 vm_object_offset_t before = *start;
1019 vm_object_offset_t after = *end;
1020 vm_object_offset_t original_start = *start;
1021 vm_object_offset_t original_end = *end;
1022 vm_size_t length = (vm_size_t)(*end - *start);
1023 vm_page_t m;
1024 kern_return_t kret;
1025 vm_object_offset_t object_size;
1026 int age_of_cache;
1027 int pre_heat_size;
1028 unsigned int ele_cache;
1029 unsigned int end_cache = NULL;
1030 unsigned int start_cache = NULL;
1031
1032 if((object->private) || !(object->pager))
1033 return;
1034
1035 if (!object->internal) {
1036 kret = vnode_pager_get_object_size(
1037 object->pager,
1038 &object_size);
1039 } else {
1040 object_size = object->size;
1041 }
1042
1043 age_of_cache = ((sched_tick
1044 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
1045
1046 /* When pre-heat files are not available, resort to speculation */
1047 /* based on size of file */
1048
1049 if(tws->startup_cache || object->internal || age_of_cache > 15 ||
1050 (age_of_cache > 5 &&
1051 vm_page_free_count < (vm_page_free_target * 2) )) {
1052 pre_heat_size = 0;
1053 } else {
1054 if (object_size > (vm_object_offset_t)(1024 * 1024))
1055 pre_heat_size = 8 * PAGE_SIZE;
1056 else if (object_size > (vm_object_offset_t)(128 * 1024))
1057 pre_heat_size = 4 * PAGE_SIZE;
1058 else
1059 pre_heat_size = 2 * PAGE_SIZE;
1060 }
1061
1062 if ((age_of_cache < 10) && (tws->startup_cache)) {
1063 if ((max_length >= ((*end - *start)
1064 + (32 * PAGE_SIZE))) &&
1065 (tws_test_for_community(tws, object,
1066 *start, 3, &ele_cache))) {
1067 int expanded;
1068 start_cache = ele_cache;
1069 *start = *start & TWS_HASH_OFF_MASK;
1070 *end = *start + (32 * PAGE_SIZE_64);
1071 if(*end > object_size) {
1072 *end = trunc_page(object_size);
1073 max_length = 0;
1074 if(before >= *end) {
1075 *end = after;
1076 } else {
1077 end_cache = ele_cache;
1078 }
1079 } else {
1080 end_cache = ele_cache;
1081 }
1082 while (max_length > ((*end - *start)
1083 + (32 * PAGE_SIZE))) {
1084 expanded = 0;
1085 after = *end;
1086 before = *start - PAGE_SIZE_64;
1087 if((*end <= (object->size
1088 + (32 * PAGE_SIZE_64))) &&
1089 (tws_test_for_community(tws,
1090 object, after,
1091 5, &ele_cache))) {
1092 *end = after +
1093 (32 * PAGE_SIZE_64);
1094 if(*end > object_size) {
1095 *end = trunc_page(object_size);
1096 max_length = 0;
1097 if(*start >= *end) {
1098 *end = after;
1099 }
1100 }
1101 end_cache = ele_cache;
1102 expanded = 1;
1103 }
1104 if (max_length > ((*end - *start)
1105 + (32 * PAGE_SIZE_64))) {
1106 break;
1107 }
1108 if((*start >= (32 * PAGE_SIZE_64)) &&
1109 (tws_test_for_community(tws, object,
1110 before, 5, &ele_cache))) {
1111 *start = before;
1112 start_cache = ele_cache;
1113 expanded = 1;
1114 }
1115 if(expanded == 0)
1116 break;
1117 }
1118
1119 if(start_cache != NULL) {
1120 unsigned int mask;
1121
1122 for (mask = 1; mask != 0; mask = mask << 1) {
1123 if (*start == original_start)
1124 break;
1125 if (!(start_cache & mask))
1126 *start += PAGE_SIZE_64;
1127 else
1128 break;
1129 }
1130 }
1131 if(end_cache != NULL) {
1132 unsigned int mask;
1133
1134 for (mask = 0x80000000;
1135 mask != 0; mask = mask >> 1) {
1136 if (*end == original_end)
1137 break;
1138 if(!(end_cache & mask))
1139 *end -= PAGE_SIZE_64;
1140 else
1141 break;
1142 }
1143 }
1144
1145 if (*start >= *end)
1146 panic("bad clipping occurred\n");
1147
1148 return;
1149 }
1150 }
1151
1152 while ((length < max_length) &&
1153 (object_size >=
1154 (after + PAGE_SIZE_64))) {
1155 if(length >= pre_heat_size) {
1156 if(tws_lookup(tws, after, object,
1157 &line) != KERN_SUCCESS) {
1158 vm_object_offset_t extend;
1159
1160 extend = after + PAGE_SIZE_64;
1161 if(tws_lookup(tws, extend, object,
1162 &line) != KERN_SUCCESS) {
1163 break;
1164 }
1165 }
1166 }
1167
1168 if ((object->existence_map != NULL)
1169 && (!LOOK_FOR(object, after))) {
1170 break;
1171 }
1172
1173 if (vm_page_lookup(object, after) != VM_PAGE_NULL) {
1174 /* we can bridge resident pages */
1175 after += PAGE_SIZE_64;
1176 length += PAGE_SIZE;
1177 continue;
1178 }
1179
1180 if (object->internal) {
1181 /*
1182 * need to acquire a real page in
1183 * advance because this acts as
1184 * a throttling mechanism for
1185 * data_requests to the default
1186 * pager. If this fails, give up
1187 * trying to find any more pages
1188 * in the cluster and send off the
1189 * request for what we already have.
1190 */
1191 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1192 break;
1193 }
1194 } else if ((m = vm_page_grab_fictitious())
1195 == VM_PAGE_NULL) {
1196 break;
1197 }
1198 m->absent = TRUE;
1199 m->unusual = TRUE;
1200 m->clustered = TRUE;
1201 m->list_req_pending = TRUE;
1202
1203 vm_page_insert(m, object, after);
1204 object->absent_count++;
1205 after += PAGE_SIZE_64;
1206 length += PAGE_SIZE;
1207 }
1208 *end = after;
1209 while (length < max_length) {
1210 if (before == 0)
1211 break;
1212 before -= PAGE_SIZE_64;
1213
1214 if(length >= pre_heat_size) {
1215 if(tws_lookup(tws, before, object,
1216 &line) != KERN_SUCCESS) {
1217 vm_object_offset_t extend;
1218
1219 extend = before;
1220 if (extend == 0)
1221 break;
1222 extend -= PAGE_SIZE_64;
1223 if(tws_lookup(tws, extend, object,
1224 &line) != KERN_SUCCESS) {
1225 break;
1226 }
1227 }
1228 }
1229 if ((object->existence_map != NULL)
1230 && (!LOOK_FOR(object, before))) {
1231 break;
1232 }
1233
1234 if (vm_page_lookup(object, before) != VM_PAGE_NULL) {
1235 /* we can bridge resident pages */
1236 *start -= PAGE_SIZE_64;
1237 length += PAGE_SIZE;
1238 continue;
1239 }
1240
1241 if (object->internal) {
1242 /*
1243 * need to acquire a real page in
1244 * advance because this acts as
1245 * a throttling mechanism for
1246 * data_requests to the default
1247 * pager. If this fails, give up
1248 * trying to find any more pages
1249 * in the cluster and send off the
1250 * request for what we already have.
1251 */
1252 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1253 break;
1254 }
1255 } else if ((m = vm_page_grab_fictitious())
1256 == VM_PAGE_NULL) {
1257 break;
1258 }
1259 m->absent = TRUE;
1260 m->unusual = TRUE;
1261 m->clustered = TRUE;
1262 m->list_req_pending = TRUE;
1263
1264 vm_page_insert(m, object, before);
1265 object->absent_count++;
1266 *start -= PAGE_SIZE_64;
1267 length += PAGE_SIZE;
1268 }
1269 }
1270
1271 tws_line_signal(
1272 tws_hash_t tws,
1273 vm_map_t map,
1274 tws_hash_line_t hash_line,
1275 vm_offset_t target_page)
1276 {
1277 unsigned int i,j;
1278 vm_object_t object;
1279 vm_object_offset_t offset;
1280 vm_object_offset_t before;
1281 vm_object_offset_t after;
1282 struct tws_hash_ele *element;
1283 vm_page_t m,p;
1284 kern_return_t rc;
1285
1286 if(tws->style != TWS_HASH_STYLE_SIGNAL)
1287 return;
1288
1289 vm_map_lock(map);
1290 for (i=0; i<tws->number_of_elements; i++) {
1291
1292 vm_object_offset_t local_off = 0;
1293
1294 if(hash_line->list[i].object == 0)
1295 continue;
1296
1297 element = &hash_line->list[i];
1298
1299 if (element->page_addr == target_page)
1300 continue;
1301
1302 j = 1;
1303 while (j != 0) {
1304 if(j & element->page_cache)
1305 break;
1306 j << 1;
1307 local_off += PAGE_SIZE_64;
1308 }
1309 object = element->object;
1310 offset = element->offset + local_off;
1311
1312 /* first try a fast test to speed up no-op signal */
1313 if (((p = vm_page_lookup(object, offset)) != NULL)
1314 || (object->pager == NULL)
1315 || (object->shadow_severed)) {
1316 continue;
1317 }
1318
1319 if((!object->alive) ||
1320 (!object->pager_created) || (!object->pager_ready))
1321 continue;
1322
1323 if (object->internal) {
1324 if (object->existence_map == NULL) {
1325 if (object->shadow)
1326 continue;
1327 } else {
1328 if(!LOOK_FOR(object, offset))
1329 continue;
1330 }
1331 }
1332
1333 vm_object_reference(object);
1334 vm_map_unlock(map);
1335
1336 if(object->internal) {
1337 m = vm_page_grab();
1338 } else {
1339 m = vm_page_grab_fictitious();
1340 }
1341
1342 if(m == NULL) {
1343 vm_object_deallocate(object);
1344 vm_map_lock(map);
1345 continue;
1346 }
1347
1348 vm_object_lock(object);
1349 if (((p = vm_page_lookup(object, offset)) != NULL)
1350 || (object->pager == NULL)
1351 || (object->shadow_severed)) {
1352 VM_PAGE_FREE(m);
1353 vm_object_unlock(object);
1354 vm_object_deallocate(object);
1355 vm_map_lock(map);
1356 continue;
1357 }
1358
1359 vm_page_insert(m, object, offset);
1360
1361 if (object->absent_count > vm_object_absent_max) {
1362 VM_PAGE_FREE(m);
1363 vm_object_unlock(object);
1364 vm_object_deallocate(object);
1365 vm_map_lock(map);
1366 break;
1367 }
1368 m->list_req_pending = TRUE;
1369 m->absent = TRUE;
1370 m->unusual = TRUE;
1371 object->absent_count++;
1372
1373 before = offset;
1374 after = offset + PAGE_SIZE_64;
1375 tws_build_cluster(tws, object, &before, &after, 0x16000);
1376 vm_object_unlock(object);
1377
1378 rc = memory_object_data_request(object->pager,
1379 before + object->paging_offset,
1380 (vm_size_t)(after - before), VM_PROT_READ);
1381 if (rc != KERN_SUCCESS) {
1382 offset = before;
1383 vm_object_lock(object);
1384 while (offset < after) {
1385 m = vm_page_lookup(object, offset);
1386 if(m && m->absent && m->busy)
1387 VM_PAGE_FREE(m);
1388 offset += PAGE_SIZE;
1389 }
1390 vm_object_unlock(object);
1391 vm_object_deallocate(object);
1392 } else {
1393 vm_object_deallocate(object);
1394 }
1395 vm_map_lock(map);
1396 continue;
1397 }
1398 vm_map_unlock(map);
1399 }
1400
1401 /* tws locked on entry */
1402
1403 tws_startup_t
1404 tws_create_startup_list(
1405 tws_hash_t tws)
1406 {
1407
1408 tws_startup_t startup;
1409 unsigned int i,j,k;
1410 unsigned int total_elements;
1411 unsigned int startup_size;
1412 unsigned int sindex;
1413 unsigned int hash_index;
1414 tws_startup_ptr_t element;
1415
1416 total_elements = tws->expansion_count *
1417 (tws->number_of_lines * tws->number_of_elements);
1418
1419 startup_size = sizeof(struct tws_startup)
1420 + (total_elements * sizeof(tws_startup_ptr_t *))
1421 + (total_elements * sizeof(struct tws_startup_ptr))
1422 + (total_elements * sizeof(struct tws_startup_ele));
1423 startup = (tws_startup_t)(kalloc(startup_size));
1424
1425 if(startup == NULL)
1426 return startup;
1427
1428 bzero((char *) startup, startup_size);
1429
1430 startup->table = (tws_startup_ptr_t *)
1431 (((int)startup) + (sizeof(struct tws_startup)));
1432 startup->ele = (struct tws_startup_ptr *)
1433 (((vm_offset_t)startup->table) +
1434 (total_elements * sizeof(tws_startup_ptr_t)));
1435
1436 startup->array = (struct tws_startup_ele *)
1437 (((vm_offset_t)startup->ele) +
1438 (total_elements * sizeof(struct tws_startup_ptr)));
1439
1440 startup->tws_hash_size = startup_size;
1441 startup->ele_count = 0; /* burn first hash ele, else we can't tell from zero */
1442 startup->array_size = total_elements;
1443 startup->hash_count = 1;
1444
1445 sindex = 0;
1446
1447
1448 for(i = 0; i<tws->number_of_lines; i++) {
1449 for(j = 0; j<tws->number_of_elements; j++) {
1450 for(k = 0; k<tws->expansion_count; k++) {
1451 tws_hash_ele_t entry;
1452 unsigned int hash_retry;
1453 vm_offset_t addr;
1454
1455 entry = &tws->cache[k][i].list[j];
1456 addr = entry->page_addr;
1457 hash_retry = 0;
1458 if(entry->object != 0) {
1459 /* get a hash element */
1460 hash_index = do_startup_hash(addr,
1461 startup->array_size);
1462
1463 if(startup->hash_count < total_elements) {
1464 element = &(startup->ele[startup->hash_count]);
1465 startup->hash_count += 1;
1466 } else {
1467 /* exit we're out of elements */
1468 break;
1469 }
1470 /* place the hash element */
1471 element->next = startup->table[hash_index];
1472 startup->table[hash_index] = (tws_startup_ptr_t)
1473 ((int)element - (int)&startup->ele[0]);
1474
1475 /* set entry OFFSET in hash element */
1476 element->element = (tws_startup_ele_t)
1477 ((int)&startup->array[sindex] -
1478 (int)&startup->array[0]);
1479
1480 startup->array[sindex].page_addr = entry->page_addr;
1481 startup->array[sindex].page_cache = entry->page_cache;
1482 startup->ele_count++;
1483 sindex++;
1484
1485 }
1486 }
1487 }
1488 }
1489
1490 return startup;
1491 }
1492
1493
1494 /*
1495 * Returns an entire cache line. The line is deleted from the startup
1496 * cache on return. The caller can check startup->ele_count for an empty
1497 * list. Access synchronization is the responsibility of the caller.
1498 */
1499
1500 unsigned int
1501 tws_startup_list_lookup(
1502 tws_startup_t startup,
1503 vm_offset_t addr)
1504 {
1505 unsigned int hash_index;
1506 unsigned int page_cache_bits;
1507 unsigned int startup_shift;
1508 tws_startup_ele_t entry;
1509 vm_offset_t next_addr;
1510 tws_startup_ptr_t element;
1511 tws_startup_ptr_t base_ele;
1512 tws_startup_ptr_t *previous_ptr;
1513
1514 page_cache_bits = 0;
1515
1516 hash_index = do_startup_hash(addr, startup->array_size);
1517
1518 if(((unsigned int)&(startup->table[hash_index])) >= startup->tws_hash_size) {
1519 return page_cache_bits = 0;
1520 }
1521 element = (tws_startup_ptr_t)((int)startup->table[hash_index] +
1522 (int)&startup->ele[0]);
1523 base_ele = element;
1524 previous_ptr = &(startup->table[hash_index]);
1525 while(element > &startup->ele[0]) {
1526 if (((int)element + sizeof(struct tws_startup_ptr))
1527 > ((int)startup + startup->tws_hash_size)) {
1528 return page_cache_bits;
1529 }
1530 entry = (tws_startup_ele_t)
1531 ((int)element->element
1532 + (int)&startup->array[0]);
1533 if((((int)entry + sizeof(struct tws_startup_ele))
1534 > ((int)startup + startup->tws_hash_size))
1535 || ((int)entry < (int)startup)) {
1536 return page_cache_bits;
1537 }
1538 if ((addr >= entry->page_addr) &&
1539 (addr <= (entry->page_addr + 0x1F000))) {
1540 startup_shift = (addr - entry->page_addr)>>12;
1541 page_cache_bits |= entry->page_cache >> startup_shift;
1542 /* don't dump the pages, unless the addresses */
1543 /* line up perfectly. The cache may be used */
1544 /* by other mappings */
1545 entry->page_cache &= (1 << startup_shift) - 1;
1546 if(addr == entry->page_addr) {
1547 if(base_ele == element) {
1548 base_ele = (tws_startup_ptr_t)
1549 ((int)element->next
1550 + (int)&startup->ele[0]);
1551 startup->table[hash_index] = element->next;
1552 element = base_ele;
1553 } else {
1554 *previous_ptr = element->next;
1555 element = (tws_startup_ptr_t)
1556 ((int)*previous_ptr
1557 + (int)&startup->ele[0]);
1558 }
1559 entry->page_addr = 0;
1560 startup->ele_count--;
1561 continue;
1562 }
1563 }
1564 next_addr = addr + 0x1F000;
1565 if ((next_addr >= entry->page_addr) &&
1566 (next_addr <= (entry->page_addr + 0x1F000))) {
1567 startup_shift = (next_addr - entry->page_addr)>>12;
1568 page_cache_bits |= entry->page_cache << (0x1F - startup_shift);
1569 entry->page_cache &= ~((1 << (startup_shift + 1)) - 1);
1570 if(entry->page_cache == 0) {
1571 if(base_ele == element) {
1572 base_ele = (tws_startup_ptr_t)
1573 ((int)element->next
1574 + (int)&startup->ele[0]);
1575 startup->table[hash_index] = element->next;
1576 element = base_ele;
1577 } else {
1578 *previous_ptr = element->next;
1579 element = (tws_startup_ptr_t)
1580 ((int)*previous_ptr
1581 + (int)&startup->ele[0]);
1582 }
1583 entry->page_addr = 0;
1584 startup->ele_count--;
1585 continue;
1586 }
1587 }
1588 previous_ptr = &(element->next);
1589 element = (tws_startup_ptr_t)
1590 ((int) element->next + (int) &startup->ele[0]);
1591 }
1592
1593 return page_cache_bits;
1594 }
1595
1596 kern_return_t
1597 tws_send_startup_info(
1598 task_t task)
1599 {
1600
1601 tws_hash_t tws;
1602 tws_startup_t scache;
1603
1604 task_lock(task);
1605 tws = (tws_hash_t)task->dynamic_working_set;
1606 task_unlock(task);
1607 if(tws == NULL) {
1608 return KERN_FAILURE;
1609 }
1610 return tws_internal_startup_send(tws);
1611 }
1612
1613
1614 kern_return_t
1615 tws_internal_startup_send(
1616 tws_hash_t tws)
1617 {
1618
1619 tws_startup_t scache;
1620
1621 if(tws == NULL) {
1622 return KERN_FAILURE;
1623 }
1624 tws_lock(tws);
1625 /* used to signal write or release depending on state of tws */
1626 if(tws->startup_cache) {
1627 vm_offset_t startup_buf;
1628 vm_size_t size;
1629 startup_buf = (vm_offset_t)tws->startup_cache;
1630 size = tws->startup_cache->tws_hash_size;
1631 tws->startup_cache = 0;
1632 tws_unlock(tws);
1633 kmem_free(kernel_map, startup_buf, size);
1634 return KERN_SUCCESS;
1635 }
1636 if(tws->startup_name == NULL) {
1637 tws_unlock(tws);
1638 return KERN_FAILURE;
1639 }
1640 scache = tws_create_startup_list(tws);
1641 if(scache == NULL)
1642 return KERN_FAILURE;
1643 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1644 scache, scache->tws_hash_size,
1645 tws->mod, tws->fid);
1646 kfree((vm_offset_t)scache, scache->tws_hash_size);
1647 kfree((vm_offset_t) tws->startup_name, tws->startup_name_length);
1648 tws->startup_name = NULL;
1649 tws_unlock(tws);
1650 return KERN_SUCCESS;
1651 }
1652
1653 kern_return_t
1654 tws_handle_startup_file(
1655 task_t task,
1656 unsigned int uid,
1657 char *app_name,
1658 vm_offset_t app_vp,
1659 boolean_t *new_info)
1660
1661 {
1662 tws_startup_t startup;
1663 vm_offset_t cache_size;
1664 kern_return_t error;
1665 int fid;
1666 int mod;
1667
1668 *new_info = FALSE;
1669 /* don't pre-heat kernel task */
1670 if(task == kernel_task)
1671 return KERN_SUCCESS;
1672 error = bsd_read_page_cache_file(uid, &fid,
1673 &mod, app_name,
1674 app_vp, &startup,
1675 &cache_size);
1676 if(error) {
1677 return KERN_FAILURE;
1678 }
1679 if(startup == NULL) {
1680 /* Entry for app does not exist, make */
1681 /* one */
1682 /* we will want our own copy of the shared */
1683 /* regions to pick up a true picture of all */
1684 /* the pages we will touch. */
1685 if((lsf_zone->count * lsf_zone->elem_size)
1686 > (lsf_zone->max_size >> 1)) {
1687 /* We don't want to run out of shared memory */
1688 /* map entries by starting too many private versions */
1689 /* of the shared library structures */
1690 return KERN_SUCCESS;
1691 }
1692 *new_info = TRUE;
1693 error = tws_write_startup_file(task,
1694 fid, mod, app_name, uid);
1695 if(error)
1696 return error;
1697 /* use the mod in the write case as an init */
1698 /* flag */
1699 mod = 0;
1700
1701 } else {
1702 error = tws_read_startup_file(task,
1703 (tws_startup_t)startup,
1704 cache_size);
1705 if(error) {
1706 kmem_free(kernel_map,
1707 (vm_offset_t)startup, cache_size);
1708 return error;
1709 }
1710 }
1711 return KERN_SUCCESS;
1712 }
1713
1714 kern_return_t
1715 tws_write_startup_file(
1716 task_t task,
1717 int fid,
1718 int mod,
1719 char *name,
1720 unsigned int uid)
1721 {
1722 tws_hash_t tws;
1723 unsigned int string_length;
1724
1725 string_length = strlen(name);
1726
1727 task_lock(task);
1728 tws = (tws_hash_t)task->dynamic_working_set;
1729
1730 task_unlock(task);
1731 if(tws == NULL) {
1732 /* create a dynamic working set of normal size */
1733 task_working_set_create(task, 0,
1734 0, TWS_HASH_STYLE_DEFAULT);
1735 }
1736 tws_lock(tws);
1737
1738 if(tws->startup_name != NULL) {
1739 tws_unlock(tws);
1740 return KERN_FAILURE;
1741 }
1742
1743 tws->startup_name = (char *)
1744 kalloc((string_length + 1) * (sizeof(char)));
1745 if(tws->startup_name == NULL) {
1746 tws_unlock(tws);
1747 return KERN_FAILURE;
1748 }
1749
1750 bcopy(name, (char *)tws->startup_name, string_length + 1);
1751 tws->startup_name_length = (string_length + 1) * sizeof(char);
1752 tws->uid = uid;
1753 tws->fid = fid;
1754 tws->mod = mod;
1755
1756 tws_unlock(tws);
1757 return KERN_SUCCESS;
1758 }
1759
1760 kern_return_t
1761 tws_read_startup_file(
1762 task_t task,
1763 tws_startup_t startup,
1764 vm_offset_t cache_size)
1765 {
1766 tws_hash_t tws;
1767 int error;
1768 int lines;
1769 int old_exp_count;
1770
1771 task_lock(task);
1772 tws = (tws_hash_t)task->dynamic_working_set;
1773
1774 if(cache_size < sizeof(struct tws_hash)) {
1775 task_unlock(task);
1776 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1777 return(KERN_SUCCESS);
1778 }
1779
1780 /* create a dynamic working set to match file size */
1781 lines = (cache_size - sizeof(struct tws_hash))/TWS_ARRAY_SIZE;
1782 /* we now need to divide out element size and word size */
1783 /* all fields are 4 bytes. There are 8 bytes in each hash element */
1784 /* entry, 4 bytes in each table ptr location and 8 bytes in each */
1785 /* page_cache entry, making a total of 20 bytes for each entry */
1786 lines = (lines/(20));
1787 if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
1788 lines = TWS_SMALL_HASH_LINE_COUNT;
1789 task_unlock(task);
1790 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1791 return(KERN_SUCCESS);
1792 } else {
1793 old_exp_count = lines/TWS_HASH_LINE_COUNT;
1794 if((old_exp_count * TWS_HASH_LINE_COUNT) != lines) {
1795 lines = (old_exp_count + 1)
1796 * TWS_HASH_LINE_COUNT;
1797 }
1798 if(tws == NULL) {
1799 task_working_set_create(task, lines,
1800 0, TWS_HASH_STYLE_DEFAULT);
1801 task_unlock(task);
1802 } else {
1803 task_unlock(task);
1804 tws_expand_working_set(
1805 (vm_offset_t)tws, lines, TRUE);
1806 }
1807 }
1808
1809
1810 tws_lock(tws);
1811
1812 if(tws->startup_cache != NULL) {
1813 tws_unlock(tws);
1814 return KERN_FAILURE;
1815 }
1816
1817
1818 /* now need to fix up internal table pointers */
1819 startup->table = (tws_startup_ptr_t *)
1820 (((int)startup) + (sizeof(struct tws_startup)));
1821 startup->ele = (struct tws_startup_ptr *)
1822 (((vm_offset_t)startup->table) +
1823 (startup->array_size * sizeof(tws_startup_ptr_t)));
1824 startup->array = (struct tws_startup_ele *)
1825 (((vm_offset_t)startup->ele) +
1826 (startup->array_size * sizeof(struct tws_startup_ptr)));
1827 /* the allocation size and file size should be the same */
1828 /* just in case their not, make sure we dealloc correctly */
1829 startup->tws_hash_size = cache_size;
1830
1831
1832 tws->startup_cache = startup;
1833 tws_unlock(tws);
1834 return KERN_SUCCESS;
1835 }
1836
1837
1838 void
1839 tws_hash_destroy(tws_hash_t tws)
1840 {
1841 int i,k;
1842 vm_size_t cache_size;
1843
1844 if(tws->startup_cache != NULL) {
1845 kmem_free(kernel_map,
1846 (vm_offset_t)tws->startup_cache,
1847 tws->startup_cache->tws_hash_size);
1848 tws->startup_cache = NULL;
1849 }
1850 if(tws->startup_name != NULL) {
1851 tws_internal_startup_send(tws);
1852 }
1853 for (i=0; i<tws->number_of_lines; i++) {
1854 for(k=0; k<tws->expansion_count; k++) {
1855 /* clear the object refs */
1856 tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
1857 }
1858 }
1859 i = 0;
1860 while (i < tws->expansion_count) {
1861
1862 kfree((vm_offset_t)tws->table[i], sizeof(tws_hash_ptr_t)
1863 * tws->number_of_lines
1864 * tws->number_of_elements);
1865 kfree((vm_offset_t)tws->table_ele[i],
1866 sizeof(struct tws_hash_ptr)
1867 * tws->number_of_lines
1868 * tws->number_of_elements);
1869 kfree((vm_offset_t)tws->alt_ele[i],
1870 sizeof(struct tws_hash_ptr)
1871 * tws->number_of_lines
1872 * tws->number_of_elements);
1873 kfree((vm_offset_t)tws->cache[i], sizeof(struct tws_hash_line)
1874 * tws->number_of_lines);
1875 i++;
1876 }
1877 if(tws->startup_name != NULL) {
1878 kfree((vm_offset_t)tws->startup_name,
1879 tws->startup_name_length);
1880 }
1881 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
1882 }
1883
1884 void
1885 tws_hash_clear(tws_hash_t tws)
1886 {
1887 int i, k;
1888
1889 for (i=0; i<tws->number_of_lines; i++) {
1890 for(k=0; k<tws->expansion_count; k++) {
1891 /* clear the object refs */
1892 tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
1893 }
1894 }
1895 }
1896
1897 kern_return_t
1898 task_working_set_create(
1899 task_t task,
1900 unsigned int lines,
1901 unsigned int rows,
1902 unsigned int style)
1903 {
1904
1905 if (lines == 0) {
1906 lines = TWS_HASH_LINE_COUNT;
1907 }
1908 if (rows == 0) {
1909 rows = TWS_ARRAY_SIZE;
1910 }
1911 if (style == TWS_HASH_STYLE_DEFAULT) {
1912 style = TWS_HASH_STYLE_BASIC;
1913 }
1914 task_lock(task);
1915 if(task->dynamic_working_set != 0) {
1916 task_unlock(task);
1917 return(KERN_FAILURE);
1918 } else if((task->dynamic_working_set
1919 = (vm_offset_t) tws_hash_create(lines, rows, style)) == 0) {
1920 task_unlock(task);
1921 return(KERN_NO_SPACE);
1922 }
1923 task_unlock(task);
1924 return KERN_SUCCESS;
1925 }
1926
1927
1928 /* Internal use only routines */
1929
1930
1931 /*
1932 * internal sub-function for address space lookup
1933 * returns the target element and the address of the
1934 * previous pointer The previous pointer is the address
1935 * of the pointer pointing to the target element.
1936 * TWS must be locked
1937 */
1938
1939 void
1940 tws_traverse_address_hash_list (
1941 tws_hash_t tws,
1942 unsigned int index,
1943 vm_offset_t page_addr,
1944 vm_object_t object,
1945 vm_object_offset_t offset,
1946 vm_map_t map,
1947 tws_hash_ptr_t *target_ele,
1948 tws_hash_ptr_t **previous_ptr,
1949 tws_hash_ptr_t **free_list,
1950 unsigned int exclusive_addr)
1951 {
1952 int k;
1953 tws_hash_ptr_t cache_ele;
1954 tws_hash_ptr_t base_ele;
1955
1956 *target_ele = NULL;
1957 *previous_ptr = NULL;
1958
1959 for(k=0; k<tws->expansion_count; k++) {
1960 tws_hash_ele_t ele;
1961 cache_ele = tws->table[k][index];
1962 base_ele = cache_ele;
1963 *previous_ptr = (tws_hash_ptr_t *)&(tws->table[k][index]);
1964 while(cache_ele != NULL) {
1965 if(((unsigned int)
1966 cache_ele->element & TWS_ADDR_HASH) == 0) {
1967 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
1968 cache_ele = cache_ele->next;
1969 continue;
1970 }
1971 ele = (tws_hash_ele_t)((unsigned int)
1972 cache_ele->element & ~TWS_ADDR_HASH);
1973 if ((ele == 0) || (ele->object == 0)) {
1974 /* A little clean-up of empty elements */
1975 cache_ele->element = 0;
1976 if(base_ele == cache_ele) {
1977 base_ele = cache_ele->next;
1978 tws->table[k][index] = cache_ele->next;
1979 cache_ele->next = tws->free_hash_ele[k];
1980 tws->free_hash_ele[k] = cache_ele;
1981 cache_ele = base_ele;
1982 } else {
1983 **previous_ptr = cache_ele->next;
1984 cache_ele->next = tws->free_hash_ele[k];
1985 tws->free_hash_ele[k] = cache_ele;
1986 cache_ele = **previous_ptr;
1987 }
1988 continue;
1989 }
1990
1991 if ((ele->page_addr <= page_addr)
1992 && (page_addr <= (ele->page_addr +
1993 (vm_offset_t)TWS_INDEX_MASK))
1994 && ((object == NULL)
1995 || ((object == ele->object)
1996 && (offset == ele->offset)
1997 && (map == ele->map)))) {
1998 if(exclusive_addr) {
1999 int delta;
2000 delta = ((page_addr - ele->page_addr)
2001 >> 12);
2002 if((1 << delta) & ele->page_cache) {
2003 /* We've found a match */
2004 *target_ele = cache_ele;
2005 *free_list =
2006 (tws_hash_ptr_t *)
2007 &(tws->free_hash_ele[k]);
2008 return;
2009 }
2010 } else {
2011 /* We've found a match */
2012 *target_ele = cache_ele;
2013 *free_list = (tws_hash_ptr_t *)
2014 &(tws->free_hash_ele[k]);
2015 return;
2016 }
2017 }
2018 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2019 cache_ele = cache_ele->next;
2020 }
2021 }
2022 }
2023
2024
2025 /*
2026 * internal sub-function for object space lookup
2027 * returns the target element and the address of the
2028 * previous pointer The previous pointer is the address
2029 * of the pointer pointing to the target element.
2030 * TWS must be locked
2031 */
2032
2033
2034 void
2035 tws_traverse_object_hash_list (
2036 tws_hash_t tws,
2037 unsigned int index,
2038 vm_object_t object,
2039 vm_object_offset_t offset,
2040 unsigned int page_mask,
2041 tws_hash_ptr_t *target_ele,
2042 tws_hash_ptr_t **previous_ptr,
2043 tws_hash_ptr_t **free_list)
2044 {
2045 int k;
2046 tws_hash_ptr_t cache_ele;
2047 tws_hash_ptr_t base_ele;
2048
2049 *target_ele = NULL;
2050 *previous_ptr = NULL;
2051
2052 for(k=0; k<tws->expansion_count; k++) {
2053 cache_ele = tws->table[k][index];
2054 base_ele = cache_ele;
2055 *previous_ptr = &(tws->table[k][index]);
2056 while(cache_ele != NULL) {
2057 if((((unsigned int)cache_ele->element)
2058 & TWS_ADDR_HASH) != 0) {
2059 *previous_ptr = &(cache_ele->next);
2060 cache_ele = cache_ele->next;
2061 continue;
2062 }
2063 if ((cache_ele->element == 0) ||
2064 (cache_ele->element->object == 0)) {
2065 /* A little clean-up of empty elements */
2066 cache_ele->element = 0;
2067 if(base_ele == cache_ele) {
2068 base_ele = cache_ele->next;
2069 tws->table[k][index] = cache_ele->next;
2070 cache_ele->next = tws->free_hash_ele[k];
2071 tws->free_hash_ele[k] = cache_ele;
2072 cache_ele = tws->table[k][index];
2073 } else {
2074 **previous_ptr = cache_ele->next;
2075 cache_ele->next = tws->free_hash_ele[k];
2076 tws->free_hash_ele[k] = cache_ele;
2077 cache_ele = **previous_ptr;
2078 }
2079 continue;
2080 }
2081 if ((cache_ele->element->object == object)
2082 && (cache_ele->element->offset ==
2083 (offset - (offset & ~TWS_HASH_OFF_MASK)))) {
2084 if((cache_ele->element->page_cache & page_mask)
2085 || (page_mask == 0xFFFFFFFF)) {
2086 /* We've found a match */
2087 *target_ele = cache_ele;
2088 *free_list = &(tws->free_hash_ele[k]);
2089 return;
2090 }
2091 }
2092 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2093 cache_ele = cache_ele->next;
2094 }
2095 }
2096 }
2097
2098
2099 /*
2100 * For a given object/offset, discover whether the indexed 32 page frame
2101 * containing the object/offset exists and if their are at least threshold
2102 * pages present. Returns true if population meets threshold.
2103 */
2104 int
2105 tws_test_for_community(
2106 tws_hash_t tws,
2107 vm_object_t object,
2108 vm_object_offset_t offset,
2109 unsigned int threshold,
2110 unsigned int *page_mask)
2111 {
2112 int index;
2113 tws_hash_ptr_t cache_ele;
2114 tws_hash_ptr_t *trailer;
2115 tws_hash_ptr_t *free_list;
2116 int community = 0;
2117
2118 index = do_tws_hash(object, offset,
2119 tws->number_of_elements, tws->number_of_lines);
2120 tws_traverse_object_hash_list(tws, index, object, offset, 0xFFFFFFFF,
2121 &cache_ele, &trailer, &free_list);
2122
2123 if(cache_ele != NULL) {
2124 int i;
2125 int ctr;
2126 ctr = 0;
2127 for(i=1; i!=0; i=i<<1) {
2128 if(i & cache_ele->element->page_cache)
2129 ctr++;
2130 if(ctr == threshold) {
2131 community = 1;
2132 *page_mask = cache_ele->element->page_cache;
2133 break;
2134 }
2135 }
2136 }
2137
2138 return community;
2139
2140 }
2141
2142
2143 /*
2144 * Gets new hash element for object hash from free pools
2145 * TWS must be locked
2146 */
2147
2148 tws_hash_ptr_t
2149 new_obj_hash(
2150 tws_hash_t tws,
2151 unsigned int set,
2152 unsigned int index)
2153 {
2154 tws_hash_ptr_t element;
2155
2156 if(tws->obj_free_count[set] < tws->number_of_lines * tws->number_of_elements) {
2157 element = &(tws->table_ele[set][tws->obj_free_count[set]]);
2158 tws->obj_free_count[set]+=1;
2159 } else if(tws->free_hash_ele[set] == NULL) {
2160 return NULL;
2161 } else {
2162 element = tws->free_hash_ele[set];
2163 if(element == NULL)
2164 return element;
2165 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2166 }
2167 element->element = 0;
2168 element->next = tws->table[set][index];
2169 tws->table[set][index] = element;
2170 return element;
2171 }
2172
2173 /*
2174 * Gets new hash element for addr hash from free pools
2175 * TWS must be locked
2176 */
2177
2178 tws_hash_ptr_t
2179 new_addr_hash(
2180 tws_hash_t tws,
2181 unsigned int set,
2182 unsigned int index)
2183 {
2184 tws_hash_ptr_t element;
2185
2186 if(tws->addr_free_count[set]
2187 < tws->number_of_lines * tws->number_of_elements) {
2188 element = &(tws->alt_ele[set][tws->addr_free_count[set]]);
2189 tws->addr_free_count[set]+=1;
2190 } else if(tws->free_hash_ele[set] == NULL) {
2191 return NULL;
2192 } else {
2193 element = tws->free_hash_ele[set];
2194 if(element == NULL)
2195 return element;
2196 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2197 }
2198 element->element = (tws_hash_ele_t)TWS_ADDR_HASH;
2199 element->next = tws->table[set][index];
2200 tws->table[set][index] = element;
2201 return element;
2202 }