]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/task_working_set.c
xnu-344.34.tar.gz
[apple/xnu.git] / osfmk / vm / task_working_set.c
1 int startup_miss = 0;
2 /*
3 * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * The contents of this file constitute Original Code as defined in and
8 * are subject to the Apple Public Source License Version 1.1 (the
9 * "License"). You may not use this file except in compliance with the
10 * License. Please obtain a copy of the License at
11 * http://www.apple.com/publicsource and read it before using this file.
12 *
13 * This Original Code and all software distributed under the License are
14 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
18 * License for the specific language governing rights and limitations
19 * under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 */
25 /*
26 * File: vm/task_working_set.c
27 * Author: Chris Youngworth
28 * Date: 2001
29 *
30 * Working set detection and maintainence module
31 */
32
33
34 #include <mach/mach_types.h>
35 #include <mach/shared_memory_server.h>
36 #include <vm/task_working_set.h>
37 #include <vm/vm_kern.h>
38 #include <vm/vm_map.h>
39 #include <vm/vm_page.h>
40 #include <vm/vm_pageout.h>
41 #include <kern/sched.h>
42
43 extern unsigned sched_tick;
44 extern zone_t lsf_zone;
45
46 /* declarations for internal use only routines */
47
48 tws_startup_t
49 tws_create_startup_list(
50 tws_hash_t tws);
51
52 unsigned int
53 tws_startup_list_lookup(
54 tws_startup_t startup,
55 vm_offset_t addr);
56
57 kern_return_t
58 tws_internal_startup_send(
59 tws_hash_t tws);
60
61 void
62 tws_traverse_address_hash_list (
63 tws_hash_t tws,
64 unsigned int index,
65 vm_offset_t page_addr,
66 vm_object_t object,
67 vm_object_offset_t offset,
68 vm_map_t map,
69 tws_hash_ptr_t *target_ele,
70 tws_hash_ptr_t **previous_ptr,
71 tws_hash_ptr_t **free_list,
72 unsigned int exclusive_addr);
73
74 void
75 tws_traverse_object_hash_list (
76 tws_hash_t tws,
77 unsigned int index,
78 vm_object_t object,
79 vm_object_offset_t offset,
80 unsigned int page_mask,
81 tws_hash_ptr_t *target_ele,
82 tws_hash_ptr_t **previous_ptr,
83 tws_hash_ptr_t **free_list);
84
85 tws_hash_ptr_t
86 new_addr_hash(
87 tws_hash_t tws,
88 unsigned int set,
89 unsigned int index);
90
91 tws_hash_ptr_t
92 new_obj_hash(
93 tws_hash_t tws,
94 unsigned int set,
95 unsigned int index);
96
97 int tws_test_for_community(
98 tws_hash_t tws,
99 vm_object_t object,
100 vm_object_offset_t offset,
101 unsigned int threshold,
102 unsigned int *page_mask);
103
104 /* Note: all of the routines below depend on the associated map lock for */
105 /* synchronization, the map lock will be on when the routines are called */
106 /* and on when they return */
107
108 tws_hash_t
109 tws_hash_create(
110 unsigned int lines,
111 unsigned int rows,
112 unsigned int style)
113 {
114 tws_hash_t tws;
115 int i,j;
116
117
118 if ((style != TWS_HASH_STYLE_BASIC) &&
119 (style != TWS_HASH_STYLE_BASIC)) {
120 return((tws_hash_t)NULL);
121 }
122
123
124 tws = (tws_hash_t)(kalloc(sizeof(struct tws_hash)));
125 if(tws == (tws_hash_t)NULL)
126 return tws;
127
128 if((tws->table[0] = (tws_hash_ptr_t *)
129 kalloc(sizeof(tws_hash_ptr_t) * lines * rows))
130 == NULL) {
131 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
132 return (tws_hash_t)NULL;
133 }
134 if((tws->table_ele[0] = (tws_hash_ptr_t)
135 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
136 == NULL) {
137 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ele_t)
138 * lines * rows);
139 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
140 return (tws_hash_t)NULL;
141 }
142 if((tws->alt_ele[0] = (tws_hash_ptr_t)
143 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
144 == NULL) {
145 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
146 * lines * rows);
147 kfree((vm_offset_t)tws->table_ele[0],
148 sizeof(struct tws_hash_ptr)
149 * lines * rows);
150 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
151 return (tws_hash_t)NULL;
152 }
153 if((tws->cache[0] = (struct tws_hash_line *)
154 kalloc(sizeof(struct tws_hash_line) * lines))
155 == NULL) {
156 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
157 * lines * rows);
158 kfree((vm_offset_t)tws->table_ele[0],
159 sizeof(struct tws_hash_ptr)
160 * lines * rows);
161 kfree((vm_offset_t)tws->alt_ele[0], sizeof(struct tws_hash_ptr)
162 * lines * rows);
163 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
164 return (tws_hash_t)NULL;
165 }
166 tws->free_hash_ele[0] = (tws_hash_ptr_t)0;
167 tws->obj_free_count[0] = 0;
168 tws->addr_free_count[0] = 0;
169
170 /* most defaults are such that a bzero will initialize */
171 bzero((char *)tws->table[0],sizeof(tws_hash_ptr_t)
172 * lines * rows);
173 bzero((char *)tws->table_ele[0],sizeof(struct tws_hash_ptr)
174 * lines * rows);
175 bzero((char *)tws->alt_ele[0],sizeof(struct tws_hash_ptr)
176 * lines * rows);
177 bzero((char *)tws->cache[0], sizeof(struct tws_hash_line)
178 * lines);
179
180 mutex_init(&tws->lock, ETAP_VM_MAP);
181 tws->style = style;
182 tws->current_line = 0;
183 tws->pageout_count = 0;
184 tws->line_count = 0;
185 tws->startup_cache = NULL;
186 tws->startup_name = NULL;
187 tws->number_of_lines = lines;
188 tws->number_of_elements = rows;
189 tws->expansion_count = 1;
190 tws->lookup_count = 0;
191 tws->insert_count = 0;
192 tws->time_of_creation = sched_tick;
193
194 return tws;
195 }
196
197 int newtest = 0;
198 void
199 tws_hash_line_clear(
200 tws_hash_t tws,
201 tws_hash_line_t hash_line,
202 boolean_t live)
203 {
204 struct tws_hash_ele *hash_ele;
205 struct tws_hash_ptr **trailer;
206 struct tws_hash_ptr **free_list;
207 tws_hash_ele_t addr_ele;
208 int index;
209 unsigned int i, j, k;
210 int dump_pmap;
211 int hash_loop;
212
213
214 if(tws->line_count < tws->number_of_lines) {
215 tws->line_count++;
216 dump_pmap = 1;
217 } else {
218 if(tws->pageout_count != vm_pageout_scan_event_counter) {
219 tws->pageout_count =
220 vm_pageout_scan_event_counter;
221 tws->line_count = 0;
222 dump_pmap = 1;
223 } else {
224 dump_pmap = 0;
225 }
226 }
227 hash_line->ele_count = 0;
228
229 for (i=0; i<tws->number_of_elements; i++) {
230 hash_loop = 0;
231 hash_ele = &(hash_line->list[i]);
232 if(hash_ele->object != 0) {
233
234 vm_object_offset_t local_off = 0;
235 tws_hash_ptr_t cache_ele;
236
237 index = alt_tws_hash(
238 hash_ele->page_addr & TWS_HASH_OFF_MASK,
239 tws->number_of_elements,
240 tws->number_of_lines);
241
242 tws_traverse_address_hash_list(tws, index,
243 hash_ele->page_addr, hash_ele->object,
244 hash_ele->offset, hash_ele->map,
245 &cache_ele, &trailer, &free_list, 0);
246 if(cache_ele != NULL) {
247 addr_ele = (tws_hash_ele_t)((unsigned int)
248 (cache_ele->element) & ~TWS_ADDR_HASH);
249 if(addr_ele != hash_ele)
250 panic("tws_hash_line_clear:"
251 " out of sync\n");
252 cache_ele->element = 0;
253 *trailer = cache_ele->next;
254 cache_ele->next = *free_list;
255 *free_list = cache_ele;
256 }
257
258 index = alt_tws_hash(
259 (hash_ele->page_addr - 0x1f000)
260 & TWS_HASH_OFF_MASK,
261 tws->number_of_elements,
262 tws->number_of_lines);
263
264 tws_traverse_address_hash_list(tws, index,
265 hash_ele->page_addr, hash_ele->object,
266 hash_ele->offset, hash_ele->map,
267 &cache_ele, &trailer, &free_list, 0);
268
269 if(cache_ele != NULL) {
270 addr_ele = (tws_hash_ele_t)((unsigned int)
271 (cache_ele->element) & ~TWS_ADDR_HASH);
272 if(addr_ele != hash_ele)
273 panic("tws_hash_line_clear: "
274 "out of sync\n");
275 cache_ele->element = 0;
276 *trailer = cache_ele->next;
277 cache_ele->next = *free_list;
278 *free_list = cache_ele;
279 }
280
281
282 if((hash_ele->map != NULL) && (live)) {
283 vm_page_t p;
284
285 for (j = 0x1; j != 0; j = j<<1) {
286 if(j & hash_ele->page_cache) {
287 p = vm_page_lookup(hash_ele->object,
288 hash_ele->offset + local_off);
289 if((p != NULL) && (p->wire_count == 0)
290 && (dump_pmap == 1)) {
291 pmap_remove_some_phys((pmap_t)
292 vm_map_pmap(
293 hash_ele->map),
294 p->phys_addr);
295 }
296 }
297 local_off += PAGE_SIZE_64;
298 }
299 }
300
301 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
302 vm_object_deallocate(hash_ele->object);
303 vm_map_deallocate(hash_ele->map);
304 }
305
306 index = do_tws_hash(hash_ele->object, hash_ele->offset,
307 tws->number_of_elements,
308 tws->number_of_lines);
309
310 tws_traverse_object_hash_list(tws,
311 index, hash_ele->object, hash_ele->offset,
312 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
313 if((cache_ele != NULL) && (cache_ele->element == hash_ele)) {
314 cache_ele->element = 0;
315 *trailer = cache_ele->next;
316 cache_ele->next = *free_list;
317 *free_list = cache_ele;
318 }
319 hash_ele->object = 0;
320 }
321 }
322 }
323
324 kern_return_t
325 tws_internal_lookup(
326 tws_hash_t tws,
327 vm_object_offset_t offset,
328 vm_object_t object,
329 tws_hash_line_t *line)
330 {
331 int index;
332 int loop;
333 int set;
334 int ele_line;
335 vm_offset_t pagenum;
336 tws_hash_ptr_t cache_ele;
337 tws_hash_ptr_t *trailer;
338 tws_hash_ptr_t *free_list;
339
340 /* don't cache private objects */
341 if(object->private)
342 return KERN_SUCCESS;
343
344 index = do_tws_hash(object, offset,
345 tws->number_of_elements, tws->number_of_lines);
346 loop = 0;
347
348 tws->lookup_count++;
349 if(tws->lookup_count == 0)
350 tws->insert_count = 0;
351 if(tws->startup_name != NULL) {
352 int age_of_cache;
353 age_of_cache = ((sched_tick
354 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
355 if (age_of_cache > 35) {
356 return KERN_OPERATION_TIMED_OUT;
357 }
358 }
359
360 if(tws->lookup_count > (4 * tws->expansion_count
361 * tws->number_of_elements * tws->number_of_lines) &&
362 (tws->lookup_count > (2 * tws->insert_count))) {
363 if(tws->startup_cache) {
364 int age_of_cache;
365 age_of_cache = ((sched_tick
366 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
367 if (age_of_cache > 60) {
368 return KERN_OPERATION_TIMED_OUT;
369 }
370 }
371 }
372
373 pagenum = (vm_offset_t)(offset & TWS_INDEX_MASK);
374 pagenum = pagenum >> 12;
375 pagenum = 1 << pagenum; /* get the appropriate page in 32 page block */
376 tws_traverse_object_hash_list(tws, index, object, offset, pagenum,
377 &cache_ele, &trailer, &free_list);
378 if(cache_ele != NULL) {
379 set = cache_ele->element->line/tws->number_of_lines;
380 ele_line = cache_ele->element->line - set;
381 *line = &tws->cache[set][ele_line];
382 return KERN_SUCCESS;
383 }
384
385 return KERN_FAILURE;
386
387
388 }
389
390 kern_return_t
391 tws_lookup(
392 tws_hash_t tws,
393 vm_object_offset_t offset,
394 vm_object_t object,
395 tws_hash_line_t *line)
396 {
397 kern_return_t kr;
398
399 if(!tws_lock_try(tws)) {
400 return KERN_FAILURE;
401 }
402 kr = tws_internal_lookup(tws,
403 offset, object, line);
404 tws_unlock(tws);
405 return kr;
406 }
407
408 kern_return_t
409 tws_expand_working_set(
410 vm_offset_t tws,
411 int line_count,
412 boolean_t dump_data)
413 {
414 tws_hash_t new_tws;
415 tws_hash_t old_tws;
416 unsigned int i,j,k;
417 struct tws_hash temp;
418
419 old_tws = (tws_hash_t)tws;
420
421 /* Note we do an elaborate dance to preserve the header that */
422 /* task is pointing to. In this way we can avoid taking a task */
423 /* lock every time we want to access the tws */
424
425 if (old_tws->number_of_lines >= line_count) {
426 return KERN_FAILURE;
427 }
428 if((new_tws = tws_hash_create(line_count,
429 old_tws->number_of_elements, old_tws->style)) == 0) {
430 return(KERN_NO_SPACE);
431 }
432 tws_lock(old_tws);
433
434 if(!dump_data) {
435 for(i = 0; i<old_tws->number_of_lines; i++) {
436 for(j = 0; j<old_tws->number_of_elements; j++) {
437 for(k = 0; k<old_tws->expansion_count; k++) {
438 tws_hash_ele_t entry;
439 vm_object_offset_t paddr;
440 unsigned int page_index;
441 entry = &old_tws->cache[k][i].list[j];
442 if(entry->object != 0) {
443 paddr = 0;
444 for(page_index = 1; page_index != 0;
445 page_index = page_index << 1); {
446 if (entry->page_cache & page_index) {
447 tws_insert(new_tws,
448 entry->offset+paddr,
449 entry->object,
450 entry->page_addr+paddr,
451 entry->map);
452 }
453 paddr+=PAGE_SIZE;
454 }
455
456 }
457 }
458 }
459 }
460 }
461
462 temp.style = new_tws->style;
463 temp.current_line = new_tws->current_line;
464 temp.pageout_count = new_tws->pageout_count;
465 temp.line_count = new_tws->line_count;
466 temp.number_of_lines = new_tws->number_of_lines;
467 temp.number_of_elements = new_tws->number_of_elements;
468 temp.expansion_count = new_tws->expansion_count;
469 temp.lookup_count = new_tws->lookup_count;
470 temp.insert_count = new_tws->insert_count;
471 for(i = 0; i<new_tws->expansion_count; i++) {
472 temp.obj_free_count[i] = new_tws->obj_free_count[i];
473 temp.addr_free_count[i] = new_tws->addr_free_count[i];
474 temp.free_hash_ele[i] = new_tws->free_hash_ele[i];
475 temp.table[i] = new_tws->table[i];
476 temp.table_ele[i] = new_tws->table_ele[i];
477 temp.alt_ele[i] = new_tws->alt_ele[i];
478 temp.cache[i] = new_tws->cache[i];
479 }
480
481 new_tws->style = old_tws->style;
482 new_tws->current_line = old_tws->current_line;
483 new_tws->pageout_count = old_tws->pageout_count;
484 new_tws->line_count = old_tws->line_count;
485 new_tws->number_of_lines = old_tws->number_of_lines;
486 new_tws->number_of_elements = old_tws->number_of_elements;
487 new_tws->expansion_count = old_tws->expansion_count;
488 new_tws->lookup_count = old_tws->lookup_count;
489 new_tws->insert_count = old_tws->insert_count;
490 for(i = 0; i<old_tws->expansion_count; i++) {
491 new_tws->obj_free_count[i] = old_tws->obj_free_count[i];
492 new_tws->addr_free_count[i] = old_tws->addr_free_count[i];
493 new_tws->free_hash_ele[i] = old_tws->free_hash_ele[i];
494 new_tws->table[i] = old_tws->table[i];
495 new_tws->table_ele[i] = old_tws->table_ele[i];
496 new_tws->alt_ele[i] = old_tws->alt_ele[i];
497 new_tws->cache[i] = old_tws->cache[i];
498 }
499
500 old_tws->style = temp.style;
501 old_tws->current_line = temp.current_line;
502 old_tws->pageout_count = temp.pageout_count;
503 old_tws->line_count = temp.line_count;
504 old_tws->number_of_lines = temp.number_of_lines;
505 old_tws->number_of_elements = temp.number_of_elements;
506 old_tws->expansion_count = temp.expansion_count;
507 old_tws->lookup_count = temp.lookup_count;
508 old_tws->insert_count = temp.insert_count;
509 for(i = 0; i<temp.expansion_count; i++) {
510 old_tws->obj_free_count[i] = temp.obj_free_count[i];;
511 old_tws->addr_free_count[i] = temp.addr_free_count[i];;
512 old_tws->free_hash_ele[i] = NULL;
513 old_tws->table[i] = temp.table[i];
514 old_tws->table_ele[i] = temp.table_ele[i];
515 old_tws->alt_ele[i] = temp.alt_ele[i];
516 old_tws->cache[i] = temp.cache[i];
517 }
518
519 tws_hash_destroy(new_tws);
520 tws_unlock(old_tws);
521 return KERN_SUCCESS;
522 }
523
524 tws_hash_t test_tws = 0;
525
526 kern_return_t
527 tws_insert(
528 tws_hash_t tws,
529 vm_object_offset_t offset,
530 vm_object_t object,
531 vm_offset_t page_addr,
532 vm_map_t map)
533 {
534 queue_t bucket;
535 unsigned int index;
536 unsigned int alt_index;
537 unsigned int index_enum[2];
538 unsigned int ele_index;
539 tws_hash_ptr_t cache_ele;
540 tws_hash_ptr_t obj_ele = NULL;
541 tws_hash_ptr_t addr_ele = NULL;
542 tws_hash_ptr_t *trailer;
543 tws_hash_ptr_t *free_list;
544 tws_hash_ele_t target_element = NULL;
545 int i,k;
546 int current_line;
547 int set;
548 int ctr;
549 unsigned int startup_cache_line;
550 vm_offset_t startup_page_addr;
551 int cache_full = 0;
552 int ask_for_startup_cache_release = 0;
553
554
555 if(!tws_lock_try(tws)) {
556 return KERN_FAILURE;
557 }
558 tws->insert_count++;
559 current_line = 0xFFFFFFFF;
560
561 startup_cache_line = 0;
562 startup_page_addr =
563 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
564 if(tws->startup_cache) {
565 int age_of_cache;
566 age_of_cache = ((sched_tick - tws->time_of_creation)
567 >> SCHED_TICK_SHIFT);
568 startup_cache_line = tws_startup_list_lookup(
569 tws->startup_cache, startup_page_addr);
570 if(tws == test_tws) {
571 printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", startup_cache_line, startup_page_addr, object, offset);
572 }
573 if(age_of_cache > 60) {
574 ask_for_startup_cache_release = 1;
575 }
576 }
577 if((tws->startup_name != NULL) && (tws->mod == 0)) {
578 /* Ensure as good a working set as possible */
579 pmap_remove(map->pmap, 0, GLOBAL_SHARED_TEXT_SEGMENT);
580 pmap_remove(map->pmap,
581 GLOBAL_SHARED_DATA_SEGMENT
582 + SHARED_DATA_REGION_SIZE, 0xFFFFF000);
583 }
584
585 /* This next bit of code, the and alternate hash */
586 /* are all made necessary because of IPC COW */
587
588 /* Note: the use of page_addr modified by delta from offset */
589 /* frame base means we may miss some previous entries. However */
590 /* we will not miss the present entry. This is most important */
591 /* in avoiding duplication of entries against long lived non-cow */
592 /* objects */
593 index_enum[0] = alt_tws_hash(
594 page_addr & TWS_HASH_OFF_MASK,
595 tws->number_of_elements, tws->number_of_lines);
596
597 index_enum[1] = alt_tws_hash(
598 (page_addr - 0x1f000) & TWS_HASH_OFF_MASK,
599 tws->number_of_elements, tws->number_of_lines);
600
601 for(ctr = 0; ctr < 2;) {
602 tws_hash_ele_t resident;
603 tws_traverse_address_hash_list(tws,
604 index_enum[ctr], page_addr, NULL,
605 0, NULL,
606 &cache_ele, &trailer, &free_list, 1);
607 if(cache_ele != NULL) {
608 /* found one */
609 resident = (tws_hash_ele_t)((unsigned int)
610 cache_ele->element & ~TWS_ADDR_HASH);
611 if((object == resident->object) &&
612 resident->offset ==
613 (offset & TWS_HASH_OFF_MASK)) {
614 /* This is our object/offset */
615 resident->page_cache
616 |= startup_cache_line;
617 resident->page_cache |=
618 (1<<(((vm_offset_t)
619 (offset & TWS_INDEX_MASK))>>12));
620 tws_unlock(tws);
621 if(ask_for_startup_cache_release)
622 return KERN_OPERATION_TIMED_OUT;
623 return KERN_SUCCESS;
624 }
625 if((object->shadow ==
626 resident->object) &&
627 ((resident->offset
628 + object->shadow_offset)
629 == (offset & TWS_HASH_OFF_MASK))) {
630 /* if we just shadowed, inherit */
631 /* access pattern from parent */
632 startup_cache_line |=
633 resident->page_cache;
634 /* thow out old entry */
635 resident->page_cache = 0;
636 break;
637 } else {
638 resident->page_cache &=
639 ~(1<<(((vm_offset_t)(page_addr
640 - resident->page_addr))
641 >>12));
642 }
643 /* Throw out old entry if there are no */
644 /* more pages in cache */
645 if(resident->page_cache == 0) {
646 /* delete addr hash entry */
647 cache_ele->element = 0;
648 *trailer = cache_ele->next;
649 cache_ele->next = *free_list;
650 *free_list = cache_ele;
651 /* go after object hash */
652 index = do_tws_hash(
653 resident->object,
654 resident->offset,
655 tws->number_of_elements,
656 tws->number_of_lines);
657 tws_traverse_object_hash_list(tws,
658 index, resident->object,
659 resident->offset,
660 0xFFFFFFFF, &cache_ele,
661 &trailer, &free_list);
662 if(cache_ele != NULL) {
663 if(tws->style ==
664 TWS_HASH_STYLE_SIGNAL) {
665 vm_object_deallocate(
666 cache_ele->element->object);
667 vm_map_deallocate(
668 cache_ele->element->map);
669 }
670 current_line =
671 cache_ele->element->line;
672 set = current_line
673 /tws->number_of_lines;
674 current_line -= set *
675 tws->number_of_lines;
676 if(cache_ele->element->object != 0) {
677 cache_ele->element->object = 0;
678 tws->cache[set]
679 [current_line].ele_count--;
680 }
681 cache_ele->element = 0;
682 *trailer = cache_ele->next;
683 cache_ele->next = *free_list;
684 *free_list = cache_ele;
685 }
686 }
687 continue;
688 }
689 ctr+=1;
690 }
691
692 /*
693 * We may or may not have a current line setting coming out of
694 * the code above. If we have a current line it means we can
695 * choose to back-fill the spot vacated by a previous entry.
696 * We have yet to do a definitive check using the original obj/off
697 * We will do that now and override the current line if we
698 * find an element
699 */
700
701 index = do_tws_hash(object, offset,
702 tws->number_of_elements, tws->number_of_lines);
703
704 alt_index = index_enum[0];
705
706 tws_traverse_object_hash_list(tws, index, object, offset,
707 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
708 if(cache_ele != NULL) {
709 obj_ele = cache_ele;
710 current_line = cache_ele->element->line;
711 set = current_line/tws->number_of_lines;
712 current_line -= set * tws->number_of_lines;
713 target_element = cache_ele->element;
714
715 /* Now check to see if we have a hash addr for it */
716 tws_traverse_address_hash_list(tws,
717 alt_index, obj_ele->element->page_addr,
718 obj_ele->element->object,
719 obj_ele->element->offset,
720 obj_ele->element->map,
721 &cache_ele, &trailer, &free_list, 0);
722 if(cache_ele != NULL) {
723 addr_ele = cache_ele;
724 } else {
725 addr_ele = new_addr_hash(tws, set, alt_index);
726 /* if cannot allocate just do without */
727 /* we'll get it next time around */
728 }
729 }
730
731
732
733 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
734 vm_object_reference(object);
735 vm_map_reference(map);
736 }
737
738 if(current_line == 0xFFFFFFFF) {
739 current_line = tws->current_line;
740 set = current_line/tws->number_of_lines;
741 current_line = current_line - (set * tws->number_of_lines);
742
743 #ifdef notdef
744 if(cache_full) {
745 tws->current_line = tws->number_of_lines - 1;
746 }
747 #endif
748 if(tws->cache[set][current_line].ele_count
749 >= tws->number_of_elements) {
750 current_line++;
751 tws->current_line++;
752 if(current_line == tws->number_of_lines) {
753 set++;
754 current_line = 0;
755 if (set == tws->expansion_count) {
756 if((tws->lookup_count <
757 (2 * tws->insert_count)) &&
758 (set<TWS_HASH_EXPANSION_MAX)) {
759 tws->lookup_count = 0;
760 tws->insert_count = 0;
761 if(tws->number_of_lines
762 < TWS_HASH_LINE_COUNT) {
763 tws->current_line--;
764 tws_unlock(tws);
765 return KERN_NO_SPACE;
766 }
767 if((tws->table[set] = (tws_hash_ptr_t *)
768 kalloc(sizeof(tws_hash_ptr_t)
769 * tws->number_of_lines
770 * tws->number_of_elements))
771 == NULL) {
772 set = 0;
773 } else if((tws->table_ele[set] =
774 (tws_hash_ptr_t)
775 kalloc(sizeof(struct tws_hash_ptr)
776 * tws->number_of_lines
777 * tws->number_of_elements))
778 == NULL) {
779 kfree((vm_offset_t)tws->table[set],
780 sizeof(tws_hash_ptr_t)
781 * tws->number_of_lines
782 * tws->number_of_elements);
783 set = 0;
784 } else if((tws->alt_ele[set] =
785 (tws_hash_ptr_t)
786 kalloc(sizeof(struct tws_hash_ptr)
787 * tws->number_of_lines
788 * tws->number_of_elements))
789 == NULL) {
790 kfree((vm_offset_t)tws->table_ele[set],
791 sizeof(tws_hash_ptr_t)
792 * tws->number_of_lines
793 * tws->number_of_elements);
794 kfree((vm_offset_t)tws->table[set],
795 sizeof(struct tws_hash_ptr)
796 * tws->number_of_lines
797 * tws->number_of_elements);
798 tws->table[set] = NULL;
799 set = 0;
800
801 } else if((tws->cache[set] =
802 (struct tws_hash_line *)
803 kalloc(sizeof
804 (struct tws_hash_line)
805 * tws->number_of_lines))
806 == NULL) {
807 kfree((vm_offset_t)tws->table[set],
808 sizeof(tws_hash_ptr_t)
809 * tws->number_of_lines
810 * tws->number_of_elements);
811 kfree((vm_offset_t)tws->table_ele[set],
812 sizeof(struct tws_hash_ptr)
813 * tws->number_of_lines
814 * tws->number_of_elements);
815 kfree((vm_offset_t)tws->alt_ele[set],
816 sizeof(struct tws_hash_ptr)
817 * tws->number_of_lines
818 * tws->number_of_elements);
819 tws->table[set] = NULL;
820 set = 0;
821
822 } else {
823 tws->free_hash_ele[set] =
824 (tws_hash_ptr_t)0;
825 tws->obj_free_count[set] = 0;
826 tws->addr_free_count[set] = 0;
827 bzero((char *)tws->table[set],
828 sizeof(tws_hash_ptr_t)
829 * tws->number_of_lines
830 * tws->number_of_elements);
831 bzero((char *)tws->table_ele[set],
832 sizeof(struct tws_hash_ptr)
833 * tws->number_of_lines
834 * tws->number_of_elements);
835 bzero((char *)tws->alt_ele[set],
836 sizeof(struct tws_hash_ptr)
837 * tws->number_of_lines
838 * tws->number_of_elements);
839 bzero((char *)tws->cache[set],
840 sizeof(struct tws_hash_line)
841 * tws->number_of_lines);
842 }
843 } else {
844 int age_of_cache;
845 age_of_cache =
846 ((sched_tick -
847 tws->time_of_creation)
848 >> SCHED_TICK_SHIFT);
849
850 if((tws->startup_cache) &&
851 (age_of_cache > 60)) {
852 ask_for_startup_cache_release = 1;
853 }
854 if((tws->startup_name != NULL) &&
855 (age_of_cache > 15)) {
856 tws->current_line--;
857 tws_unlock(tws);
858 return KERN_OPERATION_TIMED_OUT;
859 }
860 if((tws->startup_name != NULL) &&
861 (age_of_cache < 15)) {
862 /* If we are creating a */
863 /* cache, don't lose the */
864 /* early info */
865 tws->current_line--;
866 tws_unlock(tws);
867 return KERN_FAILURE;
868 }
869 tws->lookup_count = 0;
870 tws->insert_count = 0;
871 set = 0;
872 }
873 }
874 tws->current_line = set * tws->number_of_lines;
875 }
876 if(set < tws->expansion_count) {
877 tws_hash_line_clear(tws,
878 &(tws->cache[set][current_line]), TRUE);
879 if(tws->cache[set][current_line].ele_count
880 >= tws->number_of_elements) {
881 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
882 vm_object_deallocate(object);
883 vm_map_deallocate(map);
884 }
885 tws_unlock(tws);
886 return KERN_FAILURE;
887 }
888 } else {
889 tws->expansion_count++;
890 }
891 }
892 }
893
894
895 /* set object hash element */
896 if(obj_ele == NULL) {
897 obj_ele = new_obj_hash(tws, set, index);
898 if(obj_ele == NULL) {
899 tws->cache[set][current_line].ele_count
900 = tws->number_of_elements;
901 tws_unlock(tws);
902 return KERN_FAILURE;
903 }
904 }
905
906 /* set address hash element */
907 if(addr_ele == NULL) {
908 addr_ele = new_addr_hash(tws, set, alt_index);
909 }
910
911 if(target_element == NULL) {
912 ele_index = 0;
913 for(i = 0; i<tws->number_of_elements; i++) {
914 if(tws->cache[set][current_line].
915 list[ele_index].object == 0) {
916 break;
917 }
918 ele_index++;
919 if(ele_index >= tws->number_of_elements)
920 ele_index = 0;
921
922 }
923
924 if(i == tws->number_of_elements)
925 panic("tws_insert: no free elements");
926
927 target_element =
928 &(tws->cache[set][current_line].list[ele_index]);
929
930 tws->cache[set][current_line].ele_count++;
931 }
932
933 obj_ele->element = target_element;
934 if(addr_ele) {
935 addr_ele->element = (tws_hash_ele_t)
936 (((unsigned int)target_element) | TWS_ADDR_HASH);
937 }
938 target_element->object = object;
939 target_element->offset = offset & TWS_HASH_OFF_MASK;
940 target_element->page_addr =
941 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
942 target_element->map = map;
943 target_element->line =
944 current_line + (set * tws->number_of_lines);
945 if(startup_cache_line) {
946 target_element->page_cache = startup_cache_line;
947 }
948 target_element->page_cache |=
949 1<<(((vm_offset_t)(offset & TWS_INDEX_MASK))>>12);
950
951
952 tws_unlock(tws);
953 if(ask_for_startup_cache_release)
954 return KERN_OPERATION_TIMED_OUT;
955 return KERN_SUCCESS;
956 }
957
958 /*
959 * tws_build_cluster
960 * lengthen the cluster of pages by the number of pages encountered in the
961 * working set up to the limit requested by the caller. The object needs
962 * to be locked on entry. The map does not because the tws_lookup function
963 * is used only to find if their is an entry in the cache. No transient
964 * data from the cache is de-referenced.
965 *
966 */
967 #if MACH_PAGEMAP
968 /*
969 * MACH page map - an optional optimization where a bit map is maintained
970 * by the VM subsystem for internal objects to indicate which pages of
971 * the object currently reside on backing store. This existence map
972 * duplicates information maintained by the vnode pager. It is
973 * created at the time of the first pageout against the object, i.e.
974 * at the same time pager for the object is created. The optimization
975 * is designed to eliminate pager interaction overhead, if it is
976 * 'known' that the page does not exist on backing store.
977 *
978 * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is
979 * either marked as paged out in the existence map for the object or no
980 * existence map exists for the object. LOOK_FOR() is one of the
981 * criteria in the decision to invoke the pager. It is also used as one
982 * of the criteria to terminate the scan for adjacent pages in a clustered
983 * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for
984 * permanent objects. Note also that if the pager for an internal object
985 * has not been created, the pager is not invoked regardless of the value
986 * of LOOK_FOR() and that clustered pagein scans are only done on an object
987 * for which a pager has been created.
988 *
989 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
990 * is marked as paged out in the existence map for the object. PAGED_OUT()
991 * PAGED_OUT() is used to determine if a page has already been pushed
992 * into a copy object in order to avoid a redundant page out operation.
993 */
994 #define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
995 != VM_EXTERNAL_STATE_ABSENT)
996 #define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
997 == VM_EXTERNAL_STATE_EXISTS)
998 #else /* MACH_PAGEMAP */
999 /*
1000 * If the MACH page map optimization is not enabled,
1001 * LOOK_FOR() always evaluates to TRUE. The pager will always be
1002 * invoked to resolve missing pages in an object, assuming the pager
1003 * has been created for the object. In a clustered page operation, the
1004 * absence of a page on backing backing store cannot be used to terminate
1005 * a scan for adjacent pages since that information is available only in
1006 * the pager. Hence pages that may not be paged out are potentially
1007 * included in a clustered request. The vnode pager is coded to deal
1008 * with any combination of absent/present pages in a clustered
1009 * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager
1010 * will always be invoked to push a dirty page into a copy object assuming
1011 * a pager has been created. If the page has already been pushed, the
1012 * pager will ingore the new request.
1013 */
1014 #define LOOK_FOR(o, f) TRUE
1015 #define PAGED_OUT(o, f) FALSE
1016 #endif /* MACH_PAGEMAP */
1017
1018 void
1019 tws_build_cluster(
1020 tws_hash_t tws,
1021 vm_object_t object,
1022 vm_object_offset_t *start,
1023 vm_object_offset_t *end,
1024 vm_size_t max_length)
1025 {
1026 tws_hash_line_t line;
1027 task_t task;
1028 vm_object_offset_t before = *start;
1029 vm_object_offset_t after = *end;
1030 vm_object_offset_t original_start = *start;
1031 vm_object_offset_t original_end = *end;
1032 vm_size_t length = (vm_size_t)(*end - *start);
1033 vm_page_t m;
1034 kern_return_t kret;
1035 vm_object_offset_t object_size;
1036 int age_of_cache;
1037 int pre_heat_size;
1038 unsigned int ele_cache;
1039 unsigned int end_cache = NULL;
1040 unsigned int start_cache = NULL;
1041
1042 if((object->private) || !(object->pager))
1043 return;
1044
1045 if (!object->internal) {
1046 kret = vnode_pager_get_object_size(
1047 object->pager,
1048 &object_size);
1049 } else {
1050 object_size = object->size;
1051 }
1052
1053 if((!tws) || (!tws_lock_try(tws))) {
1054 return;
1055 }
1056
1057 age_of_cache = ((sched_tick
1058 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
1059
1060 /* When pre-heat files are not available, resort to speculation */
1061 /* based on size of file */
1062
1063 if(tws->startup_cache || object->internal || age_of_cache > 15 ||
1064 (age_of_cache > 5 &&
1065 vm_page_free_count < (vm_page_free_target * 2) )) {
1066 pre_heat_size = 0;
1067 } else {
1068 if (object_size > (vm_object_offset_t)(1024 * 1024))
1069 pre_heat_size = 8 * PAGE_SIZE;
1070 else if (object_size > (vm_object_offset_t)(128 * 1024))
1071 pre_heat_size = 4 * PAGE_SIZE;
1072 else
1073 pre_heat_size = 2 * PAGE_SIZE;
1074 }
1075
1076 if ((age_of_cache < 10) && (tws->startup_cache)) {
1077 if ((max_length >= ((*end - *start)
1078 + (32 * PAGE_SIZE))) &&
1079 (tws_test_for_community(tws, object,
1080 *start, 3, &ele_cache))) {
1081 int expanded;
1082 start_cache = ele_cache;
1083 *start = *start & TWS_HASH_OFF_MASK;
1084 *end = *start + (32 * PAGE_SIZE_64);
1085 if(*end > object_size) {
1086 *end = trunc_page(object_size);
1087 max_length = 0;
1088 if(before >= *end) {
1089 *end = after;
1090 } else {
1091 end_cache = ele_cache;
1092 }
1093 } else {
1094 end_cache = ele_cache;
1095 }
1096 while (max_length > ((*end - *start)
1097 + (32 * PAGE_SIZE))) {
1098 expanded = 0;
1099 after = *end;
1100 before = *start - PAGE_SIZE_64;
1101 if((*end <= (object->size
1102 + (32 * PAGE_SIZE_64))) &&
1103 (tws_test_for_community(tws,
1104 object, after,
1105 5, &ele_cache))) {
1106 *end = after +
1107 (32 * PAGE_SIZE_64);
1108 if(*end > object_size) {
1109 *end = trunc_page(object_size);
1110 max_length = 0;
1111 if(*start >= *end) {
1112 *end = after;
1113 }
1114 }
1115 end_cache = ele_cache;
1116 expanded = 1;
1117 }
1118 if (max_length > ((*end - *start)
1119 + (32 * PAGE_SIZE_64))) {
1120 break;
1121 }
1122 if((*start >= (32 * PAGE_SIZE_64)) &&
1123 (tws_test_for_community(tws, object,
1124 before, 5, &ele_cache))) {
1125 *start = before;
1126 start_cache = ele_cache;
1127 expanded = 1;
1128 }
1129 if(expanded == 0)
1130 break;
1131 }
1132
1133 if(start_cache != NULL) {
1134 unsigned int mask;
1135
1136 for (mask = 1; mask != 0; mask = mask << 1) {
1137 if (*start == original_start)
1138 break;
1139 if (!(start_cache & mask))
1140 *start += PAGE_SIZE_64;
1141 else
1142 break;
1143 }
1144 }
1145 if(end_cache != NULL) {
1146 unsigned int mask;
1147
1148 for (mask = 0x80000000;
1149 mask != 0; mask = mask >> 1) {
1150 if (*end == original_end)
1151 break;
1152 if(!(end_cache & mask))
1153 *end -= PAGE_SIZE_64;
1154 else
1155 break;
1156 }
1157 }
1158
1159 if (*start >= *end)
1160 panic("bad clipping occurred\n");
1161
1162 tws_unlock(tws);
1163 return;
1164 }
1165 }
1166
1167 while ((length < max_length) &&
1168 (object_size >=
1169 (after + PAGE_SIZE_64))) {
1170 if(length >= pre_heat_size) {
1171 if(tws_internal_lookup(tws, after, object,
1172 &line) != KERN_SUCCESS) {
1173 vm_object_offset_t extend;
1174
1175 extend = after + PAGE_SIZE_64;
1176 if(tws_internal_lookup(tws, extend, object,
1177 &line) != KERN_SUCCESS) {
1178 break;
1179 }
1180 }
1181 }
1182
1183 if ((object->existence_map != NULL)
1184 && (!LOOK_FOR(object, after))) {
1185 break;
1186 }
1187
1188 if (vm_page_lookup(object, after) != VM_PAGE_NULL) {
1189 /* we can bridge resident pages */
1190 after += PAGE_SIZE_64;
1191 length += PAGE_SIZE;
1192 continue;
1193 }
1194
1195 if (object->internal) {
1196 /*
1197 * need to acquire a real page in
1198 * advance because this acts as
1199 * a throttling mechanism for
1200 * data_requests to the default
1201 * pager. If this fails, give up
1202 * trying to find any more pages
1203 * in the cluster and send off the
1204 * request for what we already have.
1205 */
1206 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1207 break;
1208 }
1209 } else if ((m = vm_page_grab_fictitious())
1210 == VM_PAGE_NULL) {
1211 break;
1212 }
1213 m->absent = TRUE;
1214 m->unusual = TRUE;
1215 m->clustered = TRUE;
1216 m->list_req_pending = TRUE;
1217
1218 vm_page_insert(m, object, after);
1219 object->absent_count++;
1220 after += PAGE_SIZE_64;
1221 length += PAGE_SIZE;
1222 }
1223 *end = after;
1224 while (length < max_length) {
1225 if (before == 0)
1226 break;
1227 before -= PAGE_SIZE_64;
1228
1229 if(length >= pre_heat_size) {
1230 if(tws_internal_lookup(tws, before, object,
1231 &line) != KERN_SUCCESS) {
1232 vm_object_offset_t extend;
1233
1234 extend = before;
1235 if (extend == 0)
1236 break;
1237 extend -= PAGE_SIZE_64;
1238 if(tws_internal_lookup(tws, extend, object,
1239 &line) != KERN_SUCCESS) {
1240 break;
1241 }
1242 }
1243 }
1244 if ((object->existence_map != NULL)
1245 && (!LOOK_FOR(object, before))) {
1246 break;
1247 }
1248
1249 if (vm_page_lookup(object, before) != VM_PAGE_NULL) {
1250 /* we can bridge resident pages */
1251 *start -= PAGE_SIZE_64;
1252 length += PAGE_SIZE;
1253 continue;
1254 }
1255
1256 if (object->internal) {
1257 /*
1258 * need to acquire a real page in
1259 * advance because this acts as
1260 * a throttling mechanism for
1261 * data_requests to the default
1262 * pager. If this fails, give up
1263 * trying to find any more pages
1264 * in the cluster and send off the
1265 * request for what we already have.
1266 */
1267 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1268 break;
1269 }
1270 } else if ((m = vm_page_grab_fictitious())
1271 == VM_PAGE_NULL) {
1272 break;
1273 }
1274 m->absent = TRUE;
1275 m->unusual = TRUE;
1276 m->clustered = TRUE;
1277 m->list_req_pending = TRUE;
1278
1279 vm_page_insert(m, object, before);
1280 object->absent_count++;
1281 *start -= PAGE_SIZE_64;
1282 length += PAGE_SIZE;
1283 }
1284 tws_unlock(tws);
1285 }
1286
1287 tws_line_signal(
1288 tws_hash_t tws,
1289 vm_map_t map,
1290 tws_hash_line_t hash_line,
1291 vm_offset_t target_page)
1292 {
1293 unsigned int i,j;
1294 vm_object_t object;
1295 vm_object_offset_t offset;
1296 vm_object_offset_t before;
1297 vm_object_offset_t after;
1298 struct tws_hash_ele *element;
1299 vm_page_t m,p;
1300 kern_return_t rc;
1301
1302 if(tws->style != TWS_HASH_STYLE_SIGNAL)
1303 return;
1304
1305 vm_map_lock(map);
1306 for (i=0; i<tws->number_of_elements; i++) {
1307
1308 vm_object_offset_t local_off = 0;
1309
1310 if(hash_line->list[i].object == 0)
1311 continue;
1312
1313 element = &hash_line->list[i];
1314
1315 if (element->page_addr == target_page)
1316 continue;
1317
1318 j = 1;
1319 while (j != 0) {
1320 if(j & element->page_cache)
1321 break;
1322 j << 1;
1323 local_off += PAGE_SIZE_64;
1324 }
1325 object = element->object;
1326 offset = element->offset + local_off;
1327
1328 /* first try a fast test to speed up no-op signal */
1329 if (((p = vm_page_lookup(object, offset)) != NULL)
1330 || (object->pager == NULL)
1331 || (object->shadow_severed)) {
1332 continue;
1333 }
1334
1335 if((!object->alive) ||
1336 (!object->pager_created) || (!object->pager_ready))
1337 continue;
1338
1339 if (object->internal) {
1340 if (object->existence_map == NULL) {
1341 if (object->shadow)
1342 continue;
1343 } else {
1344 if(!LOOK_FOR(object, offset))
1345 continue;
1346 }
1347 }
1348
1349 vm_object_reference(object);
1350 vm_map_unlock(map);
1351
1352 if(object->internal) {
1353 m = vm_page_grab();
1354 } else {
1355 m = vm_page_grab_fictitious();
1356 }
1357
1358 if(m == NULL) {
1359 vm_object_deallocate(object);
1360 vm_map_lock(map);
1361 continue;
1362 }
1363
1364 vm_object_lock(object);
1365 if (((p = vm_page_lookup(object, offset)) != NULL)
1366 || (object->pager == NULL)
1367 || (object->shadow_severed)) {
1368 VM_PAGE_FREE(m);
1369 vm_object_unlock(object);
1370 vm_object_deallocate(object);
1371 vm_map_lock(map);
1372 continue;
1373 }
1374
1375 vm_page_insert(m, object, offset);
1376
1377 if (object->absent_count > vm_object_absent_max) {
1378 VM_PAGE_FREE(m);
1379 vm_object_unlock(object);
1380 vm_object_deallocate(object);
1381 vm_map_lock(map);
1382 break;
1383 }
1384 m->list_req_pending = TRUE;
1385 m->absent = TRUE;
1386 m->unusual = TRUE;
1387 object->absent_count++;
1388
1389 before = offset;
1390 after = offset + PAGE_SIZE_64;
1391 tws_build_cluster(tws, object, &before, &after, 0x16000);
1392 vm_object_unlock(object);
1393
1394 rc = memory_object_data_request(object->pager,
1395 before + object->paging_offset,
1396 (vm_size_t)(after - before), VM_PROT_READ);
1397 if (rc != KERN_SUCCESS) {
1398 offset = before;
1399 vm_object_lock(object);
1400 while (offset < after) {
1401 m = vm_page_lookup(object, offset);
1402 if(m && m->absent && m->busy)
1403 VM_PAGE_FREE(m);
1404 offset += PAGE_SIZE;
1405 }
1406 vm_object_unlock(object);
1407 vm_object_deallocate(object);
1408 } else {
1409 vm_object_deallocate(object);
1410 }
1411 vm_map_lock(map);
1412 continue;
1413 }
1414 vm_map_unlock(map);
1415 }
1416
1417 /* tws locked on entry */
1418
1419 tws_startup_t
1420 tws_create_startup_list(
1421 tws_hash_t tws)
1422 {
1423
1424 tws_startup_t startup;
1425 unsigned int i,j,k;
1426 unsigned int total_elements;
1427 unsigned int startup_size;
1428 unsigned int sindex;
1429 unsigned int hash_index;
1430 tws_startup_ptr_t element;
1431
1432 total_elements = tws->expansion_count *
1433 (tws->number_of_lines * tws->number_of_elements);
1434
1435 startup_size = sizeof(struct tws_startup)
1436 + (total_elements * sizeof(tws_startup_ptr_t *))
1437 + (total_elements * sizeof(struct tws_startup_ptr))
1438 + (total_elements * sizeof(struct tws_startup_ele));
1439 startup = (tws_startup_t)(kalloc(startup_size));
1440
1441 if(startup == NULL)
1442 return startup;
1443
1444 bzero((char *) startup, startup_size);
1445
1446 startup->table = (tws_startup_ptr_t *)
1447 (((int)startup) + (sizeof(struct tws_startup)));
1448 startup->ele = (struct tws_startup_ptr *)
1449 (((vm_offset_t)startup->table) +
1450 (total_elements * sizeof(tws_startup_ptr_t)));
1451
1452 startup->array = (struct tws_startup_ele *)
1453 (((vm_offset_t)startup->ele) +
1454 (total_elements * sizeof(struct tws_startup_ptr)));
1455
1456 startup->tws_hash_size = startup_size;
1457 startup->ele_count = 0; /* burn first hash ele, else we can't tell from zero */
1458 startup->array_size = total_elements;
1459 startup->hash_count = 1;
1460
1461 sindex = 0;
1462
1463
1464 for(i = 0; i<tws->number_of_lines; i++) {
1465 for(j = 0; j<tws->number_of_elements; j++) {
1466 for(k = 0; k<tws->expansion_count; k++) {
1467 tws_hash_ele_t entry;
1468 unsigned int hash_retry;
1469 vm_offset_t addr;
1470
1471 entry = &tws->cache[k][i].list[j];
1472 addr = entry->page_addr;
1473 hash_retry = 0;
1474 if(entry->object != 0) {
1475 /* get a hash element */
1476 hash_index = do_startup_hash(addr,
1477 startup->array_size);
1478
1479 if(startup->hash_count < total_elements) {
1480 element = &(startup->ele[startup->hash_count]);
1481 startup->hash_count += 1;
1482 } else {
1483 /* exit we're out of elements */
1484 break;
1485 }
1486 /* place the hash element */
1487 element->next = startup->table[hash_index];
1488 startup->table[hash_index] = (tws_startup_ptr_t)
1489 ((int)element - (int)&startup->ele[0]);
1490
1491 /* set entry OFFSET in hash element */
1492 element->element = (tws_startup_ele_t)
1493 ((int)&startup->array[sindex] -
1494 (int)&startup->array[0]);
1495
1496 startup->array[sindex].page_addr = entry->page_addr;
1497 startup->array[sindex].page_cache = entry->page_cache;
1498 startup->ele_count++;
1499 sindex++;
1500
1501 }
1502 }
1503 }
1504 }
1505
1506 return startup;
1507 }
1508
1509
1510 /*
1511 * Returns an entire cache line. The line is deleted from the startup
1512 * cache on return. The caller can check startup->ele_count for an empty
1513 * list. Access synchronization is the responsibility of the caller.
1514 */
1515
1516 unsigned int
1517 tws_startup_list_lookup(
1518 tws_startup_t startup,
1519 vm_offset_t addr)
1520 {
1521 unsigned int hash_index;
1522 unsigned int page_cache_bits;
1523 unsigned int startup_shift;
1524 tws_startup_ele_t entry;
1525 vm_offset_t next_addr;
1526 tws_startup_ptr_t element;
1527 tws_startup_ptr_t base_ele;
1528 tws_startup_ptr_t *previous_ptr;
1529
1530 page_cache_bits = 0;
1531
1532 hash_index = do_startup_hash(addr, startup->array_size);
1533
1534 if(((unsigned int)&(startup->table[hash_index])) >= startup->tws_hash_size) {
1535 return page_cache_bits = 0;
1536 }
1537 element = (tws_startup_ptr_t)((int)startup->table[hash_index] +
1538 (int)&startup->ele[0]);
1539 base_ele = element;
1540 previous_ptr = &(startup->table[hash_index]);
1541 while(element > &startup->ele[0]) {
1542 if (((int)element + sizeof(struct tws_startup_ptr))
1543 > ((int)startup + startup->tws_hash_size)) {
1544 return page_cache_bits;
1545 }
1546 entry = (tws_startup_ele_t)
1547 ((int)element->element
1548 + (int)&startup->array[0]);
1549 if((((int)entry + sizeof(struct tws_startup_ele))
1550 > ((int)startup + startup->tws_hash_size))
1551 || ((int)entry < (int)startup)) {
1552 return page_cache_bits;
1553 }
1554 if ((addr >= entry->page_addr) &&
1555 (addr <= (entry->page_addr + 0x1F000))) {
1556 startup_shift = (addr - entry->page_addr)>>12;
1557 page_cache_bits |= entry->page_cache >> startup_shift;
1558 /* don't dump the pages, unless the addresses */
1559 /* line up perfectly. The cache may be used */
1560 /* by other mappings */
1561 entry->page_cache &= (1 << startup_shift) - 1;
1562 if(addr == entry->page_addr) {
1563 if(base_ele == element) {
1564 base_ele = (tws_startup_ptr_t)
1565 ((int)element->next
1566 + (int)&startup->ele[0]);
1567 startup->table[hash_index] = element->next;
1568 element = base_ele;
1569 } else {
1570 *previous_ptr = element->next;
1571 element = (tws_startup_ptr_t)
1572 ((int)*previous_ptr
1573 + (int)&startup->ele[0]);
1574 }
1575 entry->page_addr = 0;
1576 startup->ele_count--;
1577 continue;
1578 }
1579 }
1580 next_addr = addr + 0x1F000;
1581 if ((next_addr >= entry->page_addr) &&
1582 (next_addr <= (entry->page_addr + 0x1F000))) {
1583 startup_shift = (next_addr - entry->page_addr)>>12;
1584 page_cache_bits |= entry->page_cache << (0x1F - startup_shift);
1585 entry->page_cache &= ~((1 << (startup_shift + 1)) - 1);
1586 if(entry->page_cache == 0) {
1587 if(base_ele == element) {
1588 base_ele = (tws_startup_ptr_t)
1589 ((int)element->next
1590 + (int)&startup->ele[0]);
1591 startup->table[hash_index] = element->next;
1592 element = base_ele;
1593 } else {
1594 *previous_ptr = element->next;
1595 element = (tws_startup_ptr_t)
1596 ((int)*previous_ptr
1597 + (int)&startup->ele[0]);
1598 }
1599 entry->page_addr = 0;
1600 startup->ele_count--;
1601 continue;
1602 }
1603 }
1604 previous_ptr = &(element->next);
1605 element = (tws_startup_ptr_t)
1606 ((int) element->next + (int) &startup->ele[0]);
1607 }
1608
1609 return page_cache_bits;
1610 }
1611
1612 kern_return_t
1613 tws_send_startup_info(
1614 task_t task)
1615 {
1616
1617 tws_hash_t tws;
1618 tws_startup_t scache;
1619
1620 task_lock(task);
1621 tws = (tws_hash_t)task->dynamic_working_set;
1622 task_unlock(task);
1623 if(tws == NULL) {
1624 return KERN_FAILURE;
1625 }
1626 return tws_internal_startup_send(tws);
1627 }
1628
1629
1630 kern_return_t
1631 tws_internal_startup_send(
1632 tws_hash_t tws)
1633 {
1634
1635 tws_startup_t scache;
1636
1637 if(tws == NULL) {
1638 return KERN_FAILURE;
1639 }
1640 tws_lock(tws);
1641 /* used to signal write or release depending on state of tws */
1642 if(tws->startup_cache) {
1643 vm_offset_t startup_buf;
1644 vm_size_t size;
1645 startup_buf = (vm_offset_t)tws->startup_cache;
1646 size = tws->startup_cache->tws_hash_size;
1647 tws->startup_cache = 0;
1648 tws_unlock(tws);
1649 kmem_free(kernel_map, startup_buf, size);
1650 return KERN_SUCCESS;
1651 }
1652 if(tws->startup_name == NULL) {
1653 tws_unlock(tws);
1654 return KERN_FAILURE;
1655 }
1656 scache = tws_create_startup_list(tws);
1657 if(scache == NULL)
1658 return KERN_FAILURE;
1659 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1660 scache, scache->tws_hash_size,
1661 tws->mod, tws->fid);
1662 kfree((vm_offset_t)scache, scache->tws_hash_size);
1663 kfree((vm_offset_t) tws->startup_name, tws->startup_name_length);
1664 tws->startup_name = NULL;
1665 tws_unlock(tws);
1666 return KERN_SUCCESS;
1667 }
1668
1669 kern_return_t
1670 tws_handle_startup_file(
1671 task_t task,
1672 unsigned int uid,
1673 char *app_name,
1674 vm_offset_t app_vp,
1675 boolean_t *new_info)
1676
1677 {
1678 tws_startup_t startup;
1679 vm_offset_t cache_size;
1680 kern_return_t error;
1681 int fid;
1682 int mod;
1683
1684 *new_info = FALSE;
1685 /* don't pre-heat kernel task */
1686 if(task == kernel_task)
1687 return KERN_SUCCESS;
1688 error = bsd_read_page_cache_file(uid, &fid,
1689 &mod, app_name,
1690 app_vp, &startup,
1691 &cache_size);
1692 if(error) {
1693 return KERN_FAILURE;
1694 }
1695 if(startup == NULL) {
1696 /* Entry for app does not exist, make */
1697 /* one */
1698 /* we will want our own copy of the shared */
1699 /* regions to pick up a true picture of all */
1700 /* the pages we will touch. */
1701 if((lsf_zone->count * lsf_zone->elem_size)
1702 > (lsf_zone->max_size >> 1)) {
1703 /* We don't want to run out of shared memory */
1704 /* map entries by starting too many private versions */
1705 /* of the shared library structures */
1706 return KERN_SUCCESS;
1707 }
1708 *new_info = TRUE;
1709 error = tws_write_startup_file(task,
1710 fid, mod, app_name, uid);
1711 if(error)
1712 return error;
1713 /* use the mod in the write case as an init */
1714 /* flag */
1715 mod = 0;
1716
1717 } else {
1718 error = tws_read_startup_file(task,
1719 (tws_startup_t)startup,
1720 cache_size);
1721 if(error) {
1722 kmem_free(kernel_map,
1723 (vm_offset_t)startup, cache_size);
1724 return error;
1725 }
1726 }
1727 return KERN_SUCCESS;
1728 }
1729
1730 kern_return_t
1731 tws_write_startup_file(
1732 task_t task,
1733 int fid,
1734 int mod,
1735 char *name,
1736 unsigned int uid)
1737 {
1738 tws_hash_t tws;
1739 unsigned int string_length;
1740
1741 string_length = strlen(name);
1742
1743 task_lock(task);
1744 tws = (tws_hash_t)task->dynamic_working_set;
1745
1746 task_unlock(task);
1747 if(tws == NULL) {
1748 /* create a dynamic working set of normal size */
1749 task_working_set_create(task, 0,
1750 0, TWS_HASH_STYLE_DEFAULT);
1751 }
1752 tws_lock(tws);
1753
1754 if(tws->startup_name != NULL) {
1755 tws_unlock(tws);
1756 return KERN_FAILURE;
1757 }
1758
1759 tws->startup_name = (char *)
1760 kalloc((string_length + 1) * (sizeof(char)));
1761 if(tws->startup_name == NULL) {
1762 tws_unlock(tws);
1763 return KERN_FAILURE;
1764 }
1765
1766 bcopy(name, (char *)tws->startup_name, string_length + 1);
1767 tws->startup_name_length = (string_length + 1) * sizeof(char);
1768 tws->uid = uid;
1769 tws->fid = fid;
1770 tws->mod = mod;
1771
1772 tws_unlock(tws);
1773 return KERN_SUCCESS;
1774 }
1775
1776 kern_return_t
1777 tws_read_startup_file(
1778 task_t task,
1779 tws_startup_t startup,
1780 vm_offset_t cache_size)
1781 {
1782 tws_hash_t tws;
1783 int error;
1784 int lines;
1785 int old_exp_count;
1786
1787 task_lock(task);
1788 tws = (tws_hash_t)task->dynamic_working_set;
1789
1790 if(cache_size < sizeof(struct tws_hash)) {
1791 task_unlock(task);
1792 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1793 return(KERN_SUCCESS);
1794 }
1795
1796 /* create a dynamic working set to match file size */
1797 lines = (cache_size - sizeof(struct tws_hash))/TWS_ARRAY_SIZE;
1798 /* we now need to divide out element size and word size */
1799 /* all fields are 4 bytes. There are 8 bytes in each hash element */
1800 /* entry, 4 bytes in each table ptr location and 8 bytes in each */
1801 /* page_cache entry, making a total of 20 bytes for each entry */
1802 lines = (lines/(20));
1803 if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
1804 lines = TWS_SMALL_HASH_LINE_COUNT;
1805 task_unlock(task);
1806 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1807 return(KERN_SUCCESS);
1808 } else {
1809 old_exp_count = lines/TWS_HASH_LINE_COUNT;
1810 if((old_exp_count * TWS_HASH_LINE_COUNT) != lines) {
1811 lines = (old_exp_count + 1)
1812 * TWS_HASH_LINE_COUNT;
1813 }
1814 if(tws == NULL) {
1815 task_working_set_create(task, lines,
1816 0, TWS_HASH_STYLE_DEFAULT);
1817 task_unlock(task);
1818 } else {
1819 task_unlock(task);
1820 tws_expand_working_set(
1821 (vm_offset_t)tws, lines, TRUE);
1822 }
1823 }
1824
1825
1826 tws_lock(tws);
1827
1828 if(tws->startup_cache != NULL) {
1829 tws_unlock(tws);
1830 return KERN_FAILURE;
1831 }
1832
1833
1834 /* now need to fix up internal table pointers */
1835 startup->table = (tws_startup_ptr_t *)
1836 (((int)startup) + (sizeof(struct tws_startup)));
1837 startup->ele = (struct tws_startup_ptr *)
1838 (((vm_offset_t)startup->table) +
1839 (startup->array_size * sizeof(tws_startup_ptr_t)));
1840 startup->array = (struct tws_startup_ele *)
1841 (((vm_offset_t)startup->ele) +
1842 (startup->array_size * sizeof(struct tws_startup_ptr)));
1843 /* the allocation size and file size should be the same */
1844 /* just in case their not, make sure we dealloc correctly */
1845 startup->tws_hash_size = cache_size;
1846
1847
1848 tws->startup_cache = startup;
1849 tws_unlock(tws);
1850 return KERN_SUCCESS;
1851 }
1852
1853
1854 void
1855 tws_hash_ws_flush(tws_hash_t tws) {
1856 tws_startup_t scache;
1857 if(tws == NULL) {
1858 return;
1859 }
1860 tws_lock(tws);
1861 if(tws->startup_name != NULL) {
1862 scache = tws_create_startup_list(tws);
1863 if(scache == NULL) {
1864 /* dump the name cache, we'll */
1865 /* get it next time */
1866 kfree((vm_offset_t)
1867 tws->startup_name,
1868 tws->startup_name_length);
1869 tws->startup_name = NULL;
1870 tws_unlock(tws);
1871 return;
1872 }
1873 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1874 scache, scache->tws_hash_size,
1875 tws->mod, tws->fid);
1876 kfree((vm_offset_t)scache,
1877 scache->tws_hash_size);
1878 kfree((vm_offset_t)
1879 tws->startup_name,
1880 tws->startup_name_length);
1881 tws->startup_name = NULL;
1882 }
1883 tws_unlock(tws);
1884 return;
1885 }
1886
1887 void
1888 tws_hash_destroy(tws_hash_t tws)
1889 {
1890 int i,k;
1891 vm_size_t cache_size;
1892
1893 if(tws->startup_cache != NULL) {
1894 kmem_free(kernel_map,
1895 (vm_offset_t)tws->startup_cache,
1896 tws->startup_cache->tws_hash_size);
1897 tws->startup_cache = NULL;
1898 }
1899 if(tws->startup_name != NULL) {
1900 tws_internal_startup_send(tws);
1901 }
1902 for (i=0; i<tws->number_of_lines; i++) {
1903 for(k=0; k<tws->expansion_count; k++) {
1904 /* clear the object refs */
1905 tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
1906 }
1907 }
1908 i = 0;
1909 while (i < tws->expansion_count) {
1910
1911 kfree((vm_offset_t)tws->table[i], sizeof(tws_hash_ptr_t)
1912 * tws->number_of_lines
1913 * tws->number_of_elements);
1914 kfree((vm_offset_t)tws->table_ele[i],
1915 sizeof(struct tws_hash_ptr)
1916 * tws->number_of_lines
1917 * tws->number_of_elements);
1918 kfree((vm_offset_t)tws->alt_ele[i],
1919 sizeof(struct tws_hash_ptr)
1920 * tws->number_of_lines
1921 * tws->number_of_elements);
1922 kfree((vm_offset_t)tws->cache[i], sizeof(struct tws_hash_line)
1923 * tws->number_of_lines);
1924 i++;
1925 }
1926 if(tws->startup_name != NULL) {
1927 kfree((vm_offset_t)tws->startup_name,
1928 tws->startup_name_length);
1929 }
1930 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
1931 }
1932
1933 void
1934 tws_hash_clear(tws_hash_t tws)
1935 {
1936 int i, k;
1937
1938 for (i=0; i<tws->number_of_lines; i++) {
1939 for(k=0; k<tws->expansion_count; k++) {
1940 /* clear the object refs */
1941 tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
1942 }
1943 }
1944 }
1945
1946 kern_return_t
1947 task_working_set_create(
1948 task_t task,
1949 unsigned int lines,
1950 unsigned int rows,
1951 unsigned int style)
1952 {
1953
1954 if (lines == 0) {
1955 lines = TWS_HASH_LINE_COUNT;
1956 }
1957 if (rows == 0) {
1958 rows = TWS_ARRAY_SIZE;
1959 }
1960 if (style == TWS_HASH_STYLE_DEFAULT) {
1961 style = TWS_HASH_STYLE_BASIC;
1962 }
1963 task_lock(task);
1964 if(task->dynamic_working_set != 0) {
1965 task_unlock(task);
1966 return(KERN_FAILURE);
1967 } else if((task->dynamic_working_set
1968 = (vm_offset_t) tws_hash_create(lines, rows, style)) == 0) {
1969 task_unlock(task);
1970 return(KERN_NO_SPACE);
1971 }
1972 task_unlock(task);
1973 return KERN_SUCCESS;
1974 }
1975
1976
1977 /* Internal use only routines */
1978
1979
1980 /*
1981 * internal sub-function for address space lookup
1982 * returns the target element and the address of the
1983 * previous pointer The previous pointer is the address
1984 * of the pointer pointing to the target element.
1985 * TWS must be locked
1986 */
1987
1988 void
1989 tws_traverse_address_hash_list (
1990 tws_hash_t tws,
1991 unsigned int index,
1992 vm_offset_t page_addr,
1993 vm_object_t object,
1994 vm_object_offset_t offset,
1995 vm_map_t map,
1996 tws_hash_ptr_t *target_ele,
1997 tws_hash_ptr_t **previous_ptr,
1998 tws_hash_ptr_t **free_list,
1999 unsigned int exclusive_addr)
2000 {
2001 int k;
2002 tws_hash_ptr_t cache_ele;
2003 tws_hash_ptr_t base_ele;
2004
2005 *target_ele = NULL;
2006 *previous_ptr = NULL;
2007
2008 for(k=0; k<tws->expansion_count; k++) {
2009 tws_hash_ele_t ele;
2010 cache_ele = tws->table[k][index];
2011 base_ele = cache_ele;
2012 *previous_ptr = (tws_hash_ptr_t *)&(tws->table[k][index]);
2013 while(cache_ele != NULL) {
2014 if(((unsigned int)
2015 cache_ele->element & TWS_ADDR_HASH) == 0) {
2016 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2017 cache_ele = cache_ele->next;
2018 continue;
2019 }
2020 ele = (tws_hash_ele_t)((unsigned int)
2021 cache_ele->element & ~TWS_ADDR_HASH);
2022 if ((ele == 0) || (ele->object == 0)) {
2023 /* A little clean-up of empty elements */
2024 cache_ele->element = 0;
2025 if(base_ele == cache_ele) {
2026 base_ele = cache_ele->next;
2027 tws->table[k][index] = cache_ele->next;
2028 cache_ele->next = tws->free_hash_ele[k];
2029 tws->free_hash_ele[k] = cache_ele;
2030 cache_ele = base_ele;
2031 } else {
2032 **previous_ptr = cache_ele->next;
2033 cache_ele->next = tws->free_hash_ele[k];
2034 tws->free_hash_ele[k] = cache_ele;
2035 cache_ele = **previous_ptr;
2036 }
2037 continue;
2038 }
2039
2040 if ((ele->page_addr <= page_addr)
2041 && (page_addr <= (ele->page_addr +
2042 (vm_offset_t)TWS_INDEX_MASK))
2043 && ((object == NULL)
2044 || ((object == ele->object)
2045 && (offset == ele->offset)
2046 && (map == ele->map)))) {
2047 if(exclusive_addr) {
2048 int delta;
2049 delta = ((page_addr - ele->page_addr)
2050 >> 12);
2051 if((1 << delta) & ele->page_cache) {
2052 /* We've found a match */
2053 *target_ele = cache_ele;
2054 *free_list =
2055 (tws_hash_ptr_t *)
2056 &(tws->free_hash_ele[k]);
2057 return;
2058 }
2059 } else {
2060 /* We've found a match */
2061 *target_ele = cache_ele;
2062 *free_list = (tws_hash_ptr_t *)
2063 &(tws->free_hash_ele[k]);
2064 return;
2065 }
2066 }
2067 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2068 cache_ele = cache_ele->next;
2069 }
2070 }
2071 }
2072
2073
2074 /*
2075 * internal sub-function for object space lookup
2076 * returns the target element and the address of the
2077 * previous pointer The previous pointer is the address
2078 * of the pointer pointing to the target element.
2079 * TWS must be locked
2080 */
2081
2082
2083 void
2084 tws_traverse_object_hash_list (
2085 tws_hash_t tws,
2086 unsigned int index,
2087 vm_object_t object,
2088 vm_object_offset_t offset,
2089 unsigned int page_mask,
2090 tws_hash_ptr_t *target_ele,
2091 tws_hash_ptr_t **previous_ptr,
2092 tws_hash_ptr_t **free_list)
2093 {
2094 int k;
2095 tws_hash_ptr_t cache_ele;
2096 tws_hash_ptr_t base_ele;
2097
2098 *target_ele = NULL;
2099 *previous_ptr = NULL;
2100
2101 for(k=0; k<tws->expansion_count; k++) {
2102 cache_ele = tws->table[k][index];
2103 base_ele = cache_ele;
2104 *previous_ptr = &(tws->table[k][index]);
2105 while(cache_ele != NULL) {
2106 if((((unsigned int)cache_ele->element)
2107 & TWS_ADDR_HASH) != 0) {
2108 *previous_ptr = &(cache_ele->next);
2109 cache_ele = cache_ele->next;
2110 continue;
2111 }
2112 if ((cache_ele->element == 0) ||
2113 (cache_ele->element->object == 0)) {
2114 /* A little clean-up of empty elements */
2115 cache_ele->element = 0;
2116 if(base_ele == cache_ele) {
2117 base_ele = cache_ele->next;
2118 tws->table[k][index] = cache_ele->next;
2119 cache_ele->next = tws->free_hash_ele[k];
2120 tws->free_hash_ele[k] = cache_ele;
2121 cache_ele = tws->table[k][index];
2122 } else {
2123 **previous_ptr = cache_ele->next;
2124 cache_ele->next = tws->free_hash_ele[k];
2125 tws->free_hash_ele[k] = cache_ele;
2126 cache_ele = **previous_ptr;
2127 }
2128 continue;
2129 }
2130 if ((cache_ele->element->object == object)
2131 && (cache_ele->element->offset ==
2132 (offset - (offset & ~TWS_HASH_OFF_MASK)))) {
2133 if((cache_ele->element->page_cache & page_mask)
2134 || (page_mask == 0xFFFFFFFF)) {
2135 /* We've found a match */
2136 *target_ele = cache_ele;
2137 *free_list = &(tws->free_hash_ele[k]);
2138 return;
2139 }
2140 }
2141 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2142 cache_ele = cache_ele->next;
2143 }
2144 }
2145 }
2146
2147
2148 /*
2149 * For a given object/offset, discover whether the indexed 32 page frame
2150 * containing the object/offset exists and if their are at least threshold
2151 * pages present. Returns true if population meets threshold.
2152 */
2153 int
2154 tws_test_for_community(
2155 tws_hash_t tws,
2156 vm_object_t object,
2157 vm_object_offset_t offset,
2158 unsigned int threshold,
2159 unsigned int *page_mask)
2160 {
2161 int index;
2162 tws_hash_ptr_t cache_ele;
2163 tws_hash_ptr_t *trailer;
2164 tws_hash_ptr_t *free_list;
2165 int community = 0;
2166
2167 index = do_tws_hash(object, offset,
2168 tws->number_of_elements, tws->number_of_lines);
2169 tws_traverse_object_hash_list(tws, index, object, offset, 0xFFFFFFFF,
2170 &cache_ele, &trailer, &free_list);
2171
2172 if(cache_ele != NULL) {
2173 int i;
2174 int ctr;
2175 ctr = 0;
2176 for(i=1; i!=0; i=i<<1) {
2177 if(i & cache_ele->element->page_cache)
2178 ctr++;
2179 if(ctr == threshold) {
2180 community = 1;
2181 *page_mask = cache_ele->element->page_cache;
2182 break;
2183 }
2184 }
2185 }
2186
2187 return community;
2188
2189 }
2190
2191
2192 /*
2193 * Gets new hash element for object hash from free pools
2194 * TWS must be locked
2195 */
2196
2197 tws_hash_ptr_t
2198 new_obj_hash(
2199 tws_hash_t tws,
2200 unsigned int set,
2201 unsigned int index)
2202 {
2203 tws_hash_ptr_t element;
2204
2205 if(tws->obj_free_count[set] < tws->number_of_lines * tws->number_of_elements) {
2206 element = &(tws->table_ele[set][tws->obj_free_count[set]]);
2207 tws->obj_free_count[set]+=1;
2208 } else if(tws->free_hash_ele[set] == NULL) {
2209 return NULL;
2210 } else {
2211 element = tws->free_hash_ele[set];
2212 if(element == NULL)
2213 return element;
2214 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2215 }
2216 element->element = 0;
2217 element->next = tws->table[set][index];
2218 tws->table[set][index] = element;
2219 return element;
2220 }
2221
2222 /*
2223 * Gets new hash element for addr hash from free pools
2224 * TWS must be locked
2225 */
2226
2227 tws_hash_ptr_t
2228 new_addr_hash(
2229 tws_hash_t tws,
2230 unsigned int set,
2231 unsigned int index)
2232 {
2233 tws_hash_ptr_t element;
2234
2235 if(tws->addr_free_count[set]
2236 < tws->number_of_lines * tws->number_of_elements) {
2237 element = &(tws->alt_ele[set][tws->addr_free_count[set]]);
2238 tws->addr_free_count[set]+=1;
2239 } else if(tws->free_hash_ele[set] == NULL) {
2240 return NULL;
2241 } else {
2242 element = tws->free_hash_ele[set];
2243 if(element == NULL)
2244 return element;
2245 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2246 }
2247 element->element = (tws_hash_ele_t)TWS_ADDR_HASH;
2248 element->next = tws->table[set][index];
2249 tws->table[set][index] = element;
2250 return element;
2251 }