]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/task_working_set.c
da8ecbac78bacae2eea9638f3d58917fc6a662ed
[apple/xnu.git] / osfmk / vm / task_working_set.c
1 int startup_miss = 0;
2 /*
3 * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 *
9 * This file contains Original Code and/or Modifications of Original Code
10 * as defined in and that are subject to the Apple Public Source License
11 * Version 2.0 (the 'License'). You may not use this file except in
12 * compliance with the License. Please obtain a copy of the License at
13 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * file.
15 *
16 * The Original Code and all software distributed under the License are
17 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
18 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
19 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
21 * Please see the License for the specific language governing rights and
22 * limitations under the License.
23 *
24 * @APPLE_LICENSE_HEADER_END@
25 */
26 /*
27 */
28 /*
29 * File: vm/task_working_set.c
30 * Author: Chris Youngworth
31 * Date: 2001
32 *
33 * Working set detection and maintainence module
34 */
35
36
37 #include <mach/mach_types.h>
38 #include <mach/shared_memory_server.h>
39 #include <vm/task_working_set.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44 #include <kern/sched.h>
45
46 extern unsigned sched_tick;
47 extern zone_t lsf_zone;
48
49 /* declarations for internal use only routines */
50
51 tws_startup_t
52 tws_create_startup_list(
53 tws_hash_t tws);
54
55 unsigned int
56 tws_startup_list_lookup(
57 tws_startup_t startup,
58 vm_offset_t addr);
59
60 kern_return_t
61 tws_internal_startup_send(
62 tws_hash_t tws);
63
64 void
65 tws_traverse_address_hash_list (
66 tws_hash_t tws,
67 unsigned int index,
68 vm_offset_t page_addr,
69 vm_object_t object,
70 vm_object_offset_t offset,
71 vm_map_t map,
72 tws_hash_ptr_t *target_ele,
73 tws_hash_ptr_t **previous_ptr,
74 tws_hash_ptr_t **free_list,
75 unsigned int exclusive_addr);
76
77 void
78 tws_traverse_object_hash_list (
79 tws_hash_t tws,
80 unsigned int index,
81 vm_object_t object,
82 vm_object_offset_t offset,
83 unsigned int page_mask,
84 tws_hash_ptr_t *target_ele,
85 tws_hash_ptr_t **previous_ptr,
86 tws_hash_ptr_t **free_list);
87
88 tws_hash_ptr_t
89 new_addr_hash(
90 tws_hash_t tws,
91 unsigned int set,
92 unsigned int index);
93
94 tws_hash_ptr_t
95 new_obj_hash(
96 tws_hash_t tws,
97 unsigned int set,
98 unsigned int index);
99
100 int tws_test_for_community(
101 tws_hash_t tws,
102 vm_object_t object,
103 vm_object_offset_t offset,
104 unsigned int threshold,
105 unsigned int *page_mask);
106
107 /* Note: all of the routines below depend on the associated map lock for */
108 /* synchronization, the map lock will be on when the routines are called */
109 /* and on when they return */
110
111 tws_hash_t
112 tws_hash_create(
113 unsigned int lines,
114 unsigned int rows,
115 unsigned int style)
116 {
117 tws_hash_t tws;
118 int i,j;
119
120
121 if ((style != TWS_HASH_STYLE_BASIC) &&
122 (style != TWS_HASH_STYLE_BASIC)) {
123 return((tws_hash_t)NULL);
124 }
125
126
127 tws = (tws_hash_t)(kalloc(sizeof(struct tws_hash)));
128 if(tws == (tws_hash_t)NULL)
129 return tws;
130
131 if((tws->table[0] = (tws_hash_ptr_t *)
132 kalloc(sizeof(tws_hash_ptr_t) * lines * rows))
133 == NULL) {
134 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
135 return (tws_hash_t)NULL;
136 }
137 if((tws->table_ele[0] = (tws_hash_ptr_t)
138 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
139 == NULL) {
140 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
141 * lines * rows);
142 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
143 return (tws_hash_t)NULL;
144 }
145 if((tws->alt_ele[0] = (tws_hash_ptr_t)
146 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
147 == NULL) {
148 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
149 * lines * rows);
150 kfree((vm_offset_t)tws->table_ele[0],
151 sizeof(struct tws_hash_ptr)
152 * lines * rows);
153 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
154 return (tws_hash_t)NULL;
155 }
156 if((tws->cache[0] = (struct tws_hash_line *)
157 kalloc(sizeof(struct tws_hash_line) * lines))
158 == NULL) {
159 kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
160 * lines * rows);
161 kfree((vm_offset_t)tws->table_ele[0],
162 sizeof(struct tws_hash_ptr)
163 * lines * rows);
164 kfree((vm_offset_t)tws->alt_ele[0], sizeof(struct tws_hash_ptr)
165 * lines * rows);
166 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
167 return (tws_hash_t)NULL;
168 }
169 tws->free_hash_ele[0] = (tws_hash_ptr_t)0;
170 tws->obj_free_count[0] = 0;
171 tws->addr_free_count[0] = 0;
172
173 /* most defaults are such that a bzero will initialize */
174 bzero((char *)tws->table[0],sizeof(tws_hash_ptr_t)
175 * lines * rows);
176 bzero((char *)tws->table_ele[0],sizeof(struct tws_hash_ptr)
177 * lines * rows);
178 bzero((char *)tws->alt_ele[0],sizeof(struct tws_hash_ptr)
179 * lines * rows);
180 bzero((char *)tws->cache[0], sizeof(struct tws_hash_line)
181 * lines);
182
183 mutex_init(&tws->lock, ETAP_VM_MAP);
184 tws->style = style;
185 tws->current_line = 0;
186 tws->pageout_count = 0;
187 tws->line_count = 0;
188 tws->startup_cache = NULL;
189 tws->startup_name = NULL;
190 tws->number_of_lines = lines;
191 tws->number_of_elements = rows;
192 tws->expansion_count = 1;
193 tws->lookup_count = 0;
194 tws->insert_count = 0;
195 tws->time_of_creation = sched_tick;
196
197 return tws;
198 }
199
200 int newtest = 0;
201 void
202 tws_hash_line_clear(
203 tws_hash_t tws,
204 tws_hash_line_t hash_line,
205 boolean_t live)
206 {
207 struct tws_hash_ele *hash_ele;
208 struct tws_hash_ptr **trailer;
209 struct tws_hash_ptr **free_list;
210 tws_hash_ele_t addr_ele;
211 int index;
212 unsigned int i, j, k;
213 int dump_pmap;
214 int hash_loop;
215
216
217 if(tws->line_count < tws->number_of_lines) {
218 tws->line_count++;
219 dump_pmap = 1;
220 } else {
221 if(tws->pageout_count != vm_pageout_scan_event_counter) {
222 tws->pageout_count =
223 vm_pageout_scan_event_counter;
224 tws->line_count = 0;
225 dump_pmap = 1;
226 } else {
227 dump_pmap = 0;
228 }
229 }
230 hash_line->ele_count = 0;
231
232 for (i=0; i<tws->number_of_elements; i++) {
233 hash_loop = 0;
234 hash_ele = &(hash_line->list[i]);
235 if(hash_ele->object != 0) {
236
237 vm_object_offset_t local_off = 0;
238 tws_hash_ptr_t cache_ele;
239
240 index = alt_tws_hash(
241 hash_ele->page_addr & TWS_HASH_OFF_MASK,
242 tws->number_of_elements,
243 tws->number_of_lines);
244
245 tws_traverse_address_hash_list(tws, index,
246 hash_ele->page_addr, hash_ele->object,
247 hash_ele->offset, hash_ele->map,
248 &cache_ele, &trailer, &free_list, 0);
249 if(cache_ele != NULL) {
250 addr_ele = (tws_hash_ele_t)((unsigned int)
251 (cache_ele->element) & ~TWS_ADDR_HASH);
252 if(addr_ele != hash_ele)
253 panic("tws_hash_line_clear:"
254 " out of sync\n");
255 cache_ele->element = 0;
256 *trailer = cache_ele->next;
257 cache_ele->next = *free_list;
258 *free_list = cache_ele;
259 }
260
261 index = alt_tws_hash(
262 (hash_ele->page_addr - 0x1f000)
263 & TWS_HASH_OFF_MASK,
264 tws->number_of_elements,
265 tws->number_of_lines);
266
267 tws_traverse_address_hash_list(tws, index,
268 hash_ele->page_addr, hash_ele->object,
269 hash_ele->offset, hash_ele->map,
270 &cache_ele, &trailer, &free_list, 0);
271
272 if(cache_ele != NULL) {
273 addr_ele = (tws_hash_ele_t)((unsigned int)
274 (cache_ele->element) & ~TWS_ADDR_HASH);
275 if(addr_ele != hash_ele)
276 panic("tws_hash_line_clear: "
277 "out of sync\n");
278 cache_ele->element = 0;
279 *trailer = cache_ele->next;
280 cache_ele->next = *free_list;
281 *free_list = cache_ele;
282 }
283
284
285 if((hash_ele->map != NULL) && (live)) {
286 vm_page_t p;
287
288 for (j = 0x1; j != 0; j = j<<1) {
289 if(j & hash_ele->page_cache) {
290 p = vm_page_lookup(hash_ele->object,
291 hash_ele->offset + local_off);
292 if((p != NULL) && (p->wire_count == 0)
293 && (dump_pmap == 1)) {
294 pmap_remove_some_phys((pmap_t)
295 vm_map_pmap(
296 current_map()),
297 p->phys_page);
298 }
299 }
300 local_off += PAGE_SIZE_64;
301 }
302 }
303
304 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
305 vm_object_deallocate(hash_ele->object);
306 vm_map_deallocate(hash_ele->map);
307 }
308
309 index = do_tws_hash(hash_ele->object, hash_ele->offset,
310 tws->number_of_elements,
311 tws->number_of_lines);
312
313 tws_traverse_object_hash_list(tws,
314 index, hash_ele->object, hash_ele->offset,
315 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
316 if((cache_ele != NULL) && (cache_ele->element == hash_ele)) {
317 cache_ele->element = 0;
318 *trailer = cache_ele->next;
319 cache_ele->next = *free_list;
320 *free_list = cache_ele;
321 }
322 hash_ele->object = 0;
323 }
324 }
325 }
326
327 kern_return_t
328 tws_internal_lookup(
329 tws_hash_t tws,
330 vm_object_offset_t offset,
331 vm_object_t object,
332 tws_hash_line_t *line)
333 {
334 int index;
335 int loop;
336 int set;
337 int ele_line;
338 vm_offset_t pagenum;
339 tws_hash_ptr_t cache_ele;
340 tws_hash_ptr_t *trailer;
341 tws_hash_ptr_t *free_list;
342
343 /* don't cache private objects */
344 if(object->private)
345 return KERN_SUCCESS;
346
347 index = do_tws_hash(object, offset,
348 tws->number_of_elements, tws->number_of_lines);
349 loop = 0;
350
351 tws->lookup_count++;
352 if(tws->lookup_count == 0)
353 tws->insert_count = 0;
354 if(tws->startup_name != NULL) {
355 int age_of_cache;
356 age_of_cache = ((sched_tick
357 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
358 if (age_of_cache > 35) {
359 return KERN_OPERATION_TIMED_OUT;
360 }
361 }
362
363 if(tws->lookup_count > (4 * tws->expansion_count
364 * tws->number_of_elements * tws->number_of_lines) &&
365 (tws->lookup_count > (2 * tws->insert_count))) {
366 if(tws->startup_cache) {
367 int age_of_cache;
368 age_of_cache = ((sched_tick
369 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
370 if (age_of_cache > 60) {
371 return KERN_OPERATION_TIMED_OUT;
372 }
373 }
374 }
375
376 pagenum = (vm_offset_t)(offset & TWS_INDEX_MASK);
377 pagenum = pagenum >> 12;
378 pagenum = 1 << pagenum; /* get the appropriate page in 32 page block */
379 tws_traverse_object_hash_list(tws, index, object, offset, pagenum,
380 &cache_ele, &trailer, &free_list);
381 if(cache_ele != NULL) {
382 set = cache_ele->element->line/tws->number_of_lines;
383 ele_line = cache_ele->element->line - set;
384 *line = &tws->cache[set][ele_line];
385 return KERN_SUCCESS;
386 }
387
388 return KERN_FAILURE;
389
390
391 }
392
393 kern_return_t
394 tws_lookup(
395 tws_hash_t tws,
396 vm_object_offset_t offset,
397 vm_object_t object,
398 tws_hash_line_t *line)
399 {
400 kern_return_t kr;
401
402 if(!tws_lock_try(tws)) {
403 return KERN_FAILURE;
404 }
405 kr = tws_internal_lookup(tws,
406 offset, object, line);
407 tws_unlock(tws);
408 return kr;
409 }
410
411 kern_return_t
412 tws_expand_working_set(
413 vm_offset_t tws,
414 int line_count,
415 boolean_t dump_data)
416 {
417 tws_hash_t new_tws;
418 tws_hash_t old_tws;
419 unsigned int i,j,k;
420 struct tws_hash temp;
421
422 old_tws = (tws_hash_t)tws;
423
424 /* Note we do an elaborate dance to preserve the header that */
425 /* task is pointing to. In this way we can avoid taking a task */
426 /* lock every time we want to access the tws */
427
428 if (old_tws->number_of_lines >= line_count) {
429 return KERN_FAILURE;
430 }
431 if((new_tws = tws_hash_create(line_count,
432 old_tws->number_of_elements, old_tws->style)) == 0) {
433 return(KERN_NO_SPACE);
434 }
435 tws_lock(old_tws);
436
437 if(!dump_data) {
438 for(i = 0; i<old_tws->number_of_lines; i++) {
439 for(j = 0; j<old_tws->number_of_elements; j++) {
440 for(k = 0; k<old_tws->expansion_count; k++) {
441 tws_hash_ele_t entry;
442 vm_object_offset_t paddr;
443 unsigned int page_index;
444 entry = &old_tws->cache[k][i].list[j];
445 if(entry->object != 0) {
446 paddr = 0;
447 for(page_index = 1; page_index != 0;
448 page_index = page_index << 1); {
449 if (entry->page_cache & page_index) {
450 tws_insert(new_tws,
451 entry->offset+paddr,
452 entry->object,
453 entry->page_addr+paddr,
454 entry->map);
455 }
456 paddr+=PAGE_SIZE;
457 }
458
459 }
460 }
461 }
462 }
463 }
464
465 temp.style = new_tws->style;
466 temp.current_line = new_tws->current_line;
467 temp.pageout_count = new_tws->pageout_count;
468 temp.line_count = new_tws->line_count;
469 temp.number_of_lines = new_tws->number_of_lines;
470 temp.number_of_elements = new_tws->number_of_elements;
471 temp.expansion_count = new_tws->expansion_count;
472 temp.lookup_count = new_tws->lookup_count;
473 temp.insert_count = new_tws->insert_count;
474 for(i = 0; i<new_tws->expansion_count; i++) {
475 temp.obj_free_count[i] = new_tws->obj_free_count[i];
476 temp.addr_free_count[i] = new_tws->addr_free_count[i];
477 temp.free_hash_ele[i] = new_tws->free_hash_ele[i];
478 temp.table[i] = new_tws->table[i];
479 temp.table_ele[i] = new_tws->table_ele[i];
480 temp.alt_ele[i] = new_tws->alt_ele[i];
481 temp.cache[i] = new_tws->cache[i];
482 }
483
484 new_tws->style = old_tws->style;
485 new_tws->current_line = old_tws->current_line;
486 new_tws->pageout_count = old_tws->pageout_count;
487 new_tws->line_count = old_tws->line_count;
488 new_tws->number_of_lines = old_tws->number_of_lines;
489 new_tws->number_of_elements = old_tws->number_of_elements;
490 new_tws->expansion_count = old_tws->expansion_count;
491 new_tws->lookup_count = old_tws->lookup_count;
492 new_tws->insert_count = old_tws->insert_count;
493 for(i = 0; i<old_tws->expansion_count; i++) {
494 new_tws->obj_free_count[i] = old_tws->obj_free_count[i];
495 new_tws->addr_free_count[i] = old_tws->addr_free_count[i];
496 new_tws->free_hash_ele[i] = old_tws->free_hash_ele[i];
497 new_tws->table[i] = old_tws->table[i];
498 new_tws->table_ele[i] = old_tws->table_ele[i];
499 new_tws->alt_ele[i] = old_tws->alt_ele[i];
500 new_tws->cache[i] = old_tws->cache[i];
501 }
502
503 old_tws->style = temp.style;
504 old_tws->current_line = temp.current_line;
505 old_tws->pageout_count = temp.pageout_count;
506 old_tws->line_count = temp.line_count;
507 old_tws->number_of_lines = temp.number_of_lines;
508 old_tws->number_of_elements = temp.number_of_elements;
509 old_tws->expansion_count = temp.expansion_count;
510 old_tws->lookup_count = temp.lookup_count;
511 old_tws->insert_count = temp.insert_count;
512 for(i = 0; i<temp.expansion_count; i++) {
513 old_tws->obj_free_count[i] = temp.obj_free_count[i];;
514 old_tws->addr_free_count[i] = temp.addr_free_count[i];;
515 old_tws->free_hash_ele[i] = NULL;
516 old_tws->table[i] = temp.table[i];
517 old_tws->table_ele[i] = temp.table_ele[i];
518 old_tws->alt_ele[i] = temp.alt_ele[i];
519 old_tws->cache[i] = temp.cache[i];
520 }
521
522 tws_hash_destroy(new_tws);
523 tws_unlock(old_tws);
524 return KERN_SUCCESS;
525 }
526
527 tws_hash_t test_tws = 0;
528
529 kern_return_t
530 tws_insert(
531 tws_hash_t tws,
532 vm_object_offset_t offset,
533 vm_object_t object,
534 vm_offset_t page_addr,
535 vm_map_t map)
536 {
537 queue_t bucket;
538 unsigned int index;
539 unsigned int alt_index;
540 unsigned int index_enum[2];
541 unsigned int ele_index;
542 tws_hash_ptr_t cache_ele;
543 tws_hash_ptr_t obj_ele = NULL;
544 tws_hash_ptr_t addr_ele = NULL;
545 tws_hash_ptr_t *trailer;
546 tws_hash_ptr_t *free_list;
547 tws_hash_ele_t target_element = NULL;
548 int i,k;
549 int current_line;
550 int set;
551 int ctr;
552 unsigned int startup_cache_line;
553 vm_offset_t startup_page_addr;
554 int cache_full = 0;
555 int ask_for_startup_cache_release = 0;
556
557
558 if(!tws_lock_try(tws)) {
559 return KERN_FAILURE;
560 }
561 tws->insert_count++;
562 current_line = 0xFFFFFFFF;
563
564 startup_cache_line = 0;
565 startup_page_addr =
566 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
567 if(tws->startup_cache) {
568 int age_of_cache;
569 age_of_cache = ((sched_tick - tws->time_of_creation)
570 >> SCHED_TICK_SHIFT);
571 startup_cache_line = tws_startup_list_lookup(
572 tws->startup_cache, startup_page_addr);
573 if(tws == test_tws) {
574 printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", startup_cache_line, startup_page_addr, object, offset);
575 }
576 if(age_of_cache > 60) {
577 ask_for_startup_cache_release = 1;
578 }
579 }
580 /* This next bit of code, the and alternate hash */
581 /* are all made necessary because of IPC COW */
582
583 /* Note: the use of page_addr modified by delta from offset */
584 /* frame base means we may miss some previous entries. However */
585 /* we will not miss the present entry. This is most important */
586 /* in avoiding duplication of entries against long lived non-cow */
587 /* objects */
588 index_enum[0] = alt_tws_hash(
589 page_addr & TWS_HASH_OFF_MASK,
590 tws->number_of_elements, tws->number_of_lines);
591
592 index_enum[1] = alt_tws_hash(
593 (page_addr - 0x1f000) & TWS_HASH_OFF_MASK,
594 tws->number_of_elements, tws->number_of_lines);
595
596 for(ctr = 0; ctr < 2;) {
597 tws_hash_ele_t resident;
598 tws_traverse_address_hash_list(tws,
599 index_enum[ctr], page_addr, NULL,
600 0, NULL,
601 &cache_ele, &trailer, &free_list, 1);
602 if(cache_ele != NULL) {
603 /* found one */
604 resident = (tws_hash_ele_t)((unsigned int)
605 cache_ele->element & ~TWS_ADDR_HASH);
606 if((object == resident->object) &&
607 resident->offset ==
608 (offset & TWS_HASH_OFF_MASK)) {
609 /* This is our object/offset */
610 resident->page_cache
611 |= startup_cache_line;
612 resident->page_cache |=
613 (1<<(((vm_offset_t)
614 (offset & TWS_INDEX_MASK))>>12));
615 tws_unlock(tws);
616 if(ask_for_startup_cache_release)
617 return KERN_OPERATION_TIMED_OUT;
618 return KERN_SUCCESS;
619 }
620 if((object->shadow ==
621 resident->object) &&
622 ((resident->offset
623 + object->shadow_offset)
624 == (offset & TWS_HASH_OFF_MASK))) {
625 /* if we just shadowed, inherit */
626 /* access pattern from parent */
627 startup_cache_line |=
628 resident->page_cache;
629 /* thow out old entry */
630 resident->page_cache = 0;
631 break;
632 } else {
633 resident->page_cache &=
634 ~(1<<(((vm_offset_t)(page_addr
635 - resident->page_addr))
636 >>12));
637 }
638 /* Throw out old entry if there are no */
639 /* more pages in cache */
640 if(resident->page_cache == 0) {
641 /* delete addr hash entry */
642 cache_ele->element = 0;
643 *trailer = cache_ele->next;
644 cache_ele->next = *free_list;
645 *free_list = cache_ele;
646 /* go after object hash */
647 index = do_tws_hash(
648 resident->object,
649 resident->offset,
650 tws->number_of_elements,
651 tws->number_of_lines);
652 tws_traverse_object_hash_list(tws,
653 index, resident->object,
654 resident->offset,
655 0xFFFFFFFF, &cache_ele,
656 &trailer, &free_list);
657 if(cache_ele != NULL) {
658 if(tws->style ==
659 TWS_HASH_STYLE_SIGNAL) {
660 vm_object_deallocate(
661 cache_ele->element->object);
662 vm_map_deallocate(
663 cache_ele->element->map);
664 }
665 current_line =
666 cache_ele->element->line;
667 set = current_line
668 /tws->number_of_lines;
669 current_line -= set *
670 tws->number_of_lines;
671 if(cache_ele->element->object != 0) {
672 cache_ele->element->object = 0;
673 tws->cache[set]
674 [current_line].ele_count--;
675 }
676 cache_ele->element = 0;
677 *trailer = cache_ele->next;
678 cache_ele->next = *free_list;
679 *free_list = cache_ele;
680 }
681 }
682 continue;
683 }
684 ctr+=1;
685 }
686
687 /*
688 * We may or may not have a current line setting coming out of
689 * the code above. If we have a current line it means we can
690 * choose to back-fill the spot vacated by a previous entry.
691 * We have yet to do a definitive check using the original obj/off
692 * We will do that now and override the current line if we
693 * find an element
694 */
695
696 index = do_tws_hash(object, offset,
697 tws->number_of_elements, tws->number_of_lines);
698
699 alt_index = index_enum[0];
700
701 tws_traverse_object_hash_list(tws, index, object, offset,
702 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
703 if(cache_ele != NULL) {
704 obj_ele = cache_ele;
705 current_line = cache_ele->element->line;
706 set = current_line/tws->number_of_lines;
707 current_line -= set * tws->number_of_lines;
708 target_element = cache_ele->element;
709
710 /* Now check to see if we have a hash addr for it */
711 tws_traverse_address_hash_list(tws,
712 alt_index, obj_ele->element->page_addr,
713 obj_ele->element->object,
714 obj_ele->element->offset,
715 obj_ele->element->map,
716 &cache_ele, &trailer, &free_list, 0);
717 if(cache_ele != NULL) {
718 addr_ele = cache_ele;
719 } else {
720 addr_ele = new_addr_hash(tws, set, alt_index);
721 /* if cannot allocate just do without */
722 /* we'll get it next time around */
723 }
724 }
725
726
727
728 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
729 vm_object_reference(object);
730 vm_map_reference(map);
731 }
732
733 if(current_line == 0xFFFFFFFF) {
734 current_line = tws->current_line;
735 set = current_line/tws->number_of_lines;
736 current_line = current_line - (set * tws->number_of_lines);
737
738 #ifdef notdef
739 if(cache_full) {
740 tws->current_line = tws->number_of_lines - 1;
741 }
742 #endif
743 if(tws->cache[set][current_line].ele_count
744 >= tws->number_of_elements) {
745 current_line++;
746 tws->current_line++;
747 if(current_line == tws->number_of_lines) {
748 set++;
749 current_line = 0;
750 if (set == tws->expansion_count) {
751 if((tws->lookup_count <
752 (2 * tws->insert_count)) &&
753 (set<TWS_HASH_EXPANSION_MAX)) {
754 tws->lookup_count = 0;
755 tws->insert_count = 0;
756 if(tws->number_of_lines
757 < TWS_HASH_LINE_COUNT) {
758 tws->current_line--;
759 tws_unlock(tws);
760 return KERN_NO_SPACE;
761 }
762 /* object persistence is guaranteed by */
763 /* an elevated paging or object */
764 /* reference count in the caller. */
765 vm_object_unlock(object);
766 if((tws->table[set] = (tws_hash_ptr_t *)
767 kalloc(sizeof(tws_hash_ptr_t)
768 * tws->number_of_lines
769 * tws->number_of_elements))
770 == NULL) {
771 set = 0;
772 } else if((tws->table_ele[set] =
773 (tws_hash_ptr_t)
774 kalloc(sizeof(struct tws_hash_ptr)
775 * tws->number_of_lines
776 * tws->number_of_elements))
777 == NULL) {
778 kfree((vm_offset_t)tws->table[set],
779 sizeof(tws_hash_ptr_t)
780 * tws->number_of_lines
781 * tws->number_of_elements);
782 set = 0;
783 } else if((tws->alt_ele[set] =
784 (tws_hash_ptr_t)
785 kalloc(sizeof(struct tws_hash_ptr)
786 * tws->number_of_lines
787 * tws->number_of_elements))
788 == NULL) {
789 kfree((vm_offset_t)tws->table_ele[set],
790 sizeof(struct tws_hash_ptr)
791 * tws->number_of_lines
792 * tws->number_of_elements);
793 kfree((vm_offset_t)tws->table[set],
794 sizeof(tws_hash_ptr_t)
795 * tws->number_of_lines
796 * tws->number_of_elements);
797 tws->table[set] = NULL;
798 set = 0;
799
800 } else if((tws->cache[set] =
801 (struct tws_hash_line *)
802 kalloc(sizeof
803 (struct tws_hash_line)
804 * tws->number_of_lines))
805 == NULL) {
806 kfree((vm_offset_t)tws->alt_ele[set],
807 sizeof(struct tws_hash_ptr)
808 * tws->number_of_lines
809 * tws->number_of_elements);
810 kfree((vm_offset_t)tws->table_ele[set],
811 sizeof(struct tws_hash_ptr)
812 * tws->number_of_lines
813 * tws->number_of_elements);
814 kfree((vm_offset_t)tws->table[set],
815 sizeof(tws_hash_ptr_t)
816 * tws->number_of_lines
817 * tws->number_of_elements);
818 tws->table[set] = NULL;
819 set = 0;
820
821 } else {
822 tws->free_hash_ele[set] =
823 (tws_hash_ptr_t)0;
824 tws->obj_free_count[set] = 0;
825 tws->addr_free_count[set] = 0;
826 bzero((char *)tws->table[set],
827 sizeof(tws_hash_ptr_t)
828 * tws->number_of_lines
829 * tws->number_of_elements);
830 bzero((char *)tws->table_ele[set],
831 sizeof(struct tws_hash_ptr)
832 * tws->number_of_lines
833 * tws->number_of_elements);
834 bzero((char *)tws->alt_ele[set],
835 sizeof(struct tws_hash_ptr)
836 * tws->number_of_lines
837 * tws->number_of_elements);
838 bzero((char *)tws->cache[set],
839 sizeof(struct tws_hash_line)
840 * tws->number_of_lines);
841 }
842 vm_object_lock(object);
843 } else {
844 int age_of_cache;
845 age_of_cache =
846 ((sched_tick -
847 tws->time_of_creation)
848 >> SCHED_TICK_SHIFT);
849
850 if((tws->startup_cache) &&
851 (age_of_cache > 60)) {
852 ask_for_startup_cache_release = 1;
853 }
854 if((tws->startup_name != NULL) &&
855 (age_of_cache > 15)) {
856 tws->current_line--;
857 tws_unlock(tws);
858 return KERN_OPERATION_TIMED_OUT;
859 }
860 if((tws->startup_name != NULL) &&
861 (age_of_cache < 15)) {
862 /* If we are creating a */
863 /* cache, don't lose the */
864 /* early info */
865 tws->current_line--;
866 tws_unlock(tws);
867 return KERN_FAILURE;
868 }
869 tws->lookup_count = 0;
870 tws->insert_count = 0;
871 set = 0;
872 }
873 }
874 tws->current_line = set * tws->number_of_lines;
875 }
876 if(set < tws->expansion_count) {
877 tws_hash_line_clear(tws,
878 &(tws->cache[set][current_line]), TRUE);
879 if(tws->cache[set][current_line].ele_count
880 >= tws->number_of_elements) {
881 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
882 vm_object_deallocate(object);
883 vm_map_deallocate(map);
884 }
885 tws_unlock(tws);
886 return KERN_FAILURE;
887 }
888 } else {
889 tws->expansion_count++;
890 }
891 }
892 }
893
894
895 /* set object hash element */
896 if(obj_ele == NULL) {
897 obj_ele = new_obj_hash(tws, set, index);
898 if(obj_ele == NULL) {
899 tws->cache[set][current_line].ele_count
900 = tws->number_of_elements;
901 tws_unlock(tws);
902 return KERN_FAILURE;
903 }
904 }
905
906 /* set address hash element */
907 if(addr_ele == NULL) {
908 addr_ele = new_addr_hash(tws, set, alt_index);
909 }
910
911 if(target_element == NULL) {
912 ele_index = 0;
913 for(i = 0; i<tws->number_of_elements; i++) {
914 if(tws->cache[set][current_line].
915 list[ele_index].object == 0) {
916 break;
917 }
918 ele_index++;
919 if(ele_index >= tws->number_of_elements)
920 ele_index = 0;
921
922 }
923
924 if(i == tws->number_of_elements)
925 panic("tws_insert: no free elements");
926
927 target_element =
928 &(tws->cache[set][current_line].list[ele_index]);
929
930 tws->cache[set][current_line].ele_count++;
931 }
932
933 obj_ele->element = target_element;
934 if(addr_ele) {
935 addr_ele->element = (tws_hash_ele_t)
936 (((unsigned int)target_element) | TWS_ADDR_HASH);
937 }
938 target_element->object = object;
939 target_element->offset = offset & TWS_HASH_OFF_MASK;
940 target_element->page_addr =
941 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
942 target_element->map = map;
943 target_element->line =
944 current_line + (set * tws->number_of_lines);
945 if(startup_cache_line) {
946 target_element->page_cache = startup_cache_line;
947 }
948 target_element->page_cache |=
949 1<<(((vm_offset_t)(offset & TWS_INDEX_MASK))>>12);
950
951
952 tws_unlock(tws);
953 if(ask_for_startup_cache_release)
954 return KERN_OPERATION_TIMED_OUT;
955 return KERN_SUCCESS;
956 }
957
958 /*
959 * tws_build_cluster
960 * lengthen the cluster of pages by the number of pages encountered in the
961 * working set up to the limit requested by the caller. The object needs
962 * to be locked on entry. The map does not because the tws_lookup function
963 * is used only to find if their is an entry in the cache. No transient
964 * data from the cache is de-referenced.
965 *
966 */
967 #if MACH_PAGEMAP
968 /*
969 * MACH page map - an optional optimization where a bit map is maintained
970 * by the VM subsystem for internal objects to indicate which pages of
971 * the object currently reside on backing store. This existence map
972 * duplicates information maintained by the vnode pager. It is
973 * created at the time of the first pageout against the object, i.e.
974 * at the same time pager for the object is created. The optimization
975 * is designed to eliminate pager interaction overhead, if it is
976 * 'known' that the page does not exist on backing store.
977 *
978 * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is
979 * either marked as paged out in the existence map for the object or no
980 * existence map exists for the object. LOOK_FOR() is one of the
981 * criteria in the decision to invoke the pager. It is also used as one
982 * of the criteria to terminate the scan for adjacent pages in a clustered
983 * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for
984 * permanent objects. Note also that if the pager for an internal object
985 * has not been created, the pager is not invoked regardless of the value
986 * of LOOK_FOR() and that clustered pagein scans are only done on an object
987 * for which a pager has been created.
988 *
989 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
990 * is marked as paged out in the existence map for the object. PAGED_OUT()
991 * PAGED_OUT() is used to determine if a page has already been pushed
992 * into a copy object in order to avoid a redundant page out operation.
993 */
994 #define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
995 != VM_EXTERNAL_STATE_ABSENT)
996 #define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
997 == VM_EXTERNAL_STATE_EXISTS)
998 #else /* MACH_PAGEMAP */
999 /*
1000 * If the MACH page map optimization is not enabled,
1001 * LOOK_FOR() always evaluates to TRUE. The pager will always be
1002 * invoked to resolve missing pages in an object, assuming the pager
1003 * has been created for the object. In a clustered page operation, the
1004 * absence of a page on backing backing store cannot be used to terminate
1005 * a scan for adjacent pages since that information is available only in
1006 * the pager. Hence pages that may not be paged out are potentially
1007 * included in a clustered request. The vnode pager is coded to deal
1008 * with any combination of absent/present pages in a clustered
1009 * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager
1010 * will always be invoked to push a dirty page into a copy object assuming
1011 * a pager has been created. If the page has already been pushed, the
1012 * pager will ingore the new request.
1013 */
1014 #define LOOK_FOR(o, f) TRUE
1015 #define PAGED_OUT(o, f) FALSE
1016 #endif /* MACH_PAGEMAP */
1017
1018 void
1019 tws_build_cluster(
1020 tws_hash_t tws,
1021 vm_object_t object,
1022 vm_object_offset_t *start,
1023 vm_object_offset_t *end,
1024 vm_size_t max_length)
1025 {
1026 tws_hash_line_t line;
1027 task_t task;
1028 vm_object_offset_t before = *start;
1029 vm_object_offset_t after = *end;
1030 vm_object_offset_t original_start = *start;
1031 vm_object_offset_t original_end = *end;
1032 vm_size_t length = (vm_size_t)(*end - *start);
1033 vm_page_t m;
1034 kern_return_t kret;
1035 vm_object_offset_t object_size;
1036 int age_of_cache;
1037 int pre_heat_size;
1038 unsigned int ele_cache;
1039 unsigned int end_cache = 0;
1040 unsigned int start_cache = 0;
1041
1042 if((object->private) || !(object->pager))
1043 return;
1044
1045 if (!object->internal) {
1046 kret = vnode_pager_get_object_size(
1047 object->pager,
1048 &object_size);
1049 } else {
1050 object_size = object->size;
1051 }
1052
1053 if((!tws) || (!tws_lock_try(tws))) {
1054 return;
1055 }
1056
1057 age_of_cache = ((sched_tick
1058 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
1059
1060 /* When pre-heat files are not available, resort to speculation */
1061 /* based on size of file */
1062
1063 if(tws->startup_cache || object->internal || age_of_cache > 15 ||
1064 (age_of_cache > 5 &&
1065 vm_page_free_count < (vm_page_free_target * 2) )) {
1066 pre_heat_size = 0;
1067 } else {
1068 if (object_size > (vm_object_offset_t)(1024 * 1024))
1069 pre_heat_size = 8 * PAGE_SIZE;
1070 else if (object_size > (vm_object_offset_t)(128 * 1024))
1071 pre_heat_size = 4 * PAGE_SIZE;
1072 else
1073 pre_heat_size = 2 * PAGE_SIZE;
1074 }
1075
1076 if ((age_of_cache < 10) && (tws->startup_cache)) {
1077 if ((max_length >= ((*end - *start)
1078 + (32 * PAGE_SIZE))) &&
1079 (tws_test_for_community(tws, object,
1080 *start, 3, &ele_cache))) {
1081 int expanded;
1082 start_cache = ele_cache;
1083 *start = *start & TWS_HASH_OFF_MASK;
1084 *end = *start + (32 * PAGE_SIZE_64);
1085 if(*end > object_size) {
1086 *end = trunc_page_64(object_size);
1087 max_length = 0;
1088 if(before >= *end) {
1089 *end = after;
1090 } else {
1091 end_cache = ele_cache;
1092 }
1093 } else {
1094 end_cache = ele_cache;
1095 }
1096 while (max_length > ((*end - *start)
1097 + (32 * PAGE_SIZE))) {
1098 expanded = 0;
1099 after = *end;
1100 before = *start - PAGE_SIZE_64;
1101 if((*end <= (object->size
1102 + (32 * PAGE_SIZE_64))) &&
1103 (tws_test_for_community(tws,
1104 object, after,
1105 5, &ele_cache))) {
1106 *end = after +
1107 (32 * PAGE_SIZE_64);
1108 if(*end > object_size) {
1109 *end = trunc_page_64(object_size);
1110 max_length = 0;
1111 if(*start >= *end) {
1112 *end = after;
1113 }
1114 }
1115 end_cache = ele_cache;
1116 expanded = 1;
1117 }
1118 if (max_length > ((*end - *start)
1119 + (32 * PAGE_SIZE_64))) {
1120 break;
1121 }
1122 if((*start >= (32 * PAGE_SIZE_64)) &&
1123 (tws_test_for_community(tws, object,
1124 before, 5, &ele_cache))) {
1125 *start = before;
1126 start_cache = ele_cache;
1127 expanded = 1;
1128 }
1129 if(expanded == 0)
1130 break;
1131 }
1132
1133 if(start_cache != 0) {
1134 unsigned int mask;
1135
1136 for (mask = 1; mask != 0; mask = mask << 1) {
1137 if (*start == original_start)
1138 break;
1139 if (!(start_cache & mask))
1140 *start += PAGE_SIZE_64;
1141 else
1142 break;
1143 }
1144 }
1145 if(end_cache != 0) {
1146 unsigned int mask;
1147
1148 for (mask = 0x80000000;
1149 mask != 0; mask = mask >> 1) {
1150 if (*end == original_end)
1151 break;
1152 if(!(end_cache & mask))
1153 *end -= PAGE_SIZE_64;
1154 else
1155 break;
1156 }
1157 }
1158
1159 if (*start >= *end)
1160 panic("bad clipping occurred\n");
1161
1162 tws_unlock(tws);
1163 return;
1164 }
1165 }
1166
1167 while ((length < max_length) &&
1168 (object_size >=
1169 (after + PAGE_SIZE_64))) {
1170 if(length >= pre_heat_size) {
1171 if(tws_internal_lookup(tws, after, object,
1172 &line) != KERN_SUCCESS) {
1173 vm_object_offset_t extend;
1174
1175 extend = after + PAGE_SIZE_64;
1176 if(tws_internal_lookup(tws, extend, object,
1177 &line) != KERN_SUCCESS) {
1178 break;
1179 }
1180 }
1181 }
1182
1183 if ((object->existence_map != NULL)
1184 && (!LOOK_FOR(object, after))) {
1185 break;
1186 }
1187
1188 if (vm_page_lookup(object, after) != VM_PAGE_NULL) {
1189 /*
1190 * don't bridge resident pages
1191 */
1192 break;
1193 }
1194
1195 if (object->internal) {
1196 /*
1197 * need to acquire a real page in
1198 * advance because this acts as
1199 * a throttling mechanism for
1200 * data_requests to the default
1201 * pager. If this fails, give up
1202 * trying to find any more pages
1203 * in the cluster and send off the
1204 * request for what we already have.
1205 */
1206 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1207 break;
1208 }
1209 } else if ((m = vm_page_grab_fictitious())
1210 == VM_PAGE_NULL) {
1211 break;
1212 }
1213 m->absent = TRUE;
1214 m->unusual = TRUE;
1215 m->clustered = TRUE;
1216 m->list_req_pending = TRUE;
1217
1218 vm_page_insert(m, object, after);
1219 object->absent_count++;
1220 after += PAGE_SIZE_64;
1221 length += PAGE_SIZE;
1222 }
1223 *end = after;
1224 while (length < max_length) {
1225 if (before == 0)
1226 break;
1227 before -= PAGE_SIZE_64;
1228
1229 if(length >= pre_heat_size) {
1230 if(tws_internal_lookup(tws, before, object,
1231 &line) != KERN_SUCCESS) {
1232 vm_object_offset_t extend;
1233
1234 extend = before;
1235 if (extend == 0)
1236 break;
1237 extend -= PAGE_SIZE_64;
1238 if(tws_internal_lookup(tws, extend, object,
1239 &line) != KERN_SUCCESS) {
1240 break;
1241 }
1242 }
1243 }
1244 if ((object->existence_map != NULL)
1245 && (!LOOK_FOR(object, before))) {
1246 break;
1247 }
1248
1249 if (vm_page_lookup(object, before) != VM_PAGE_NULL) {
1250 /*
1251 * don't bridge resident pages
1252 */
1253 break;
1254 }
1255
1256 if (object->internal) {
1257 /*
1258 * need to acquire a real page in
1259 * advance because this acts as
1260 * a throttling mechanism for
1261 * data_requests to the default
1262 * pager. If this fails, give up
1263 * trying to find any more pages
1264 * in the cluster and send off the
1265 * request for what we already have.
1266 */
1267 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1268 break;
1269 }
1270 } else if ((m = vm_page_grab_fictitious())
1271 == VM_PAGE_NULL) {
1272 break;
1273 }
1274 m->absent = TRUE;
1275 m->unusual = TRUE;
1276 m->clustered = TRUE;
1277 m->list_req_pending = TRUE;
1278
1279 vm_page_insert(m, object, before);
1280 object->absent_count++;
1281 *start -= PAGE_SIZE_64;
1282 length += PAGE_SIZE;
1283 }
1284 tws_unlock(tws);
1285 }
1286
1287 tws_line_signal(
1288 tws_hash_t tws,
1289 vm_map_t map,
1290 tws_hash_line_t hash_line,
1291 vm_offset_t target_page)
1292 {
1293 unsigned int i,j;
1294 vm_object_t object;
1295 vm_object_offset_t offset;
1296 vm_object_offset_t before;
1297 vm_object_offset_t after;
1298 struct tws_hash_ele *element;
1299 vm_page_t m,p;
1300 kern_return_t rc;
1301
1302 if(tws->style != TWS_HASH_STYLE_SIGNAL)
1303 return;
1304
1305 vm_map_lock(map);
1306 for (i=0; i<tws->number_of_elements; i++) {
1307
1308 vm_object_offset_t local_off = 0;
1309
1310 if(hash_line->list[i].object == 0)
1311 continue;
1312
1313 element = &hash_line->list[i];
1314
1315 if (element->page_addr == target_page)
1316 continue;
1317
1318 j = 1;
1319 while (j != 0) {
1320 if(j & element->page_cache)
1321 break;
1322 j << 1;
1323 local_off += PAGE_SIZE_64;
1324 }
1325 object = element->object;
1326 offset = element->offset + local_off;
1327
1328 /* first try a fast test to speed up no-op signal */
1329 if (((p = vm_page_lookup(object, offset)) != NULL)
1330 || (object->pager == NULL)
1331 || (object->shadow_severed)) {
1332 continue;
1333 }
1334
1335 if((!object->alive) ||
1336 (!object->pager_created) || (!object->pager_ready))
1337 continue;
1338
1339 if (object->internal) {
1340 if (object->existence_map == NULL) {
1341 if (object->shadow)
1342 continue;
1343 } else {
1344 if(!LOOK_FOR(object, offset))
1345 continue;
1346 }
1347 }
1348
1349 vm_object_reference(object);
1350 vm_map_unlock(map);
1351
1352 if(object->internal) {
1353 m = vm_page_grab();
1354 } else {
1355 m = vm_page_grab_fictitious();
1356 }
1357
1358 if(m == NULL) {
1359 vm_object_deallocate(object);
1360 vm_map_lock(map);
1361 continue;
1362 }
1363
1364 vm_object_lock(object);
1365 if (((p = vm_page_lookup(object, offset)) != NULL)
1366 || (object->pager == NULL)
1367 || (object->shadow_severed)) {
1368 VM_PAGE_FREE(m);
1369 vm_object_unlock(object);
1370 vm_object_deallocate(object);
1371 vm_map_lock(map);
1372 continue;
1373 }
1374
1375 vm_page_insert(m, object, offset);
1376
1377 if (object->absent_count > vm_object_absent_max) {
1378 VM_PAGE_FREE(m);
1379 vm_object_unlock(object);
1380 vm_object_deallocate(object);
1381 vm_map_lock(map);
1382 break;
1383 }
1384 m->list_req_pending = TRUE;
1385 m->absent = TRUE;
1386 m->unusual = TRUE;
1387 object->absent_count++;
1388
1389 before = offset;
1390 after = offset + PAGE_SIZE_64;
1391 tws_build_cluster(tws, object, &before, &after, 0x16000);
1392 vm_object_unlock(object);
1393
1394 rc = memory_object_data_request(object->pager,
1395 before + object->paging_offset,
1396 (vm_size_t)(after - before), VM_PROT_READ);
1397 if (rc != KERN_SUCCESS) {
1398 offset = before;
1399 vm_object_lock(object);
1400 while (offset < after) {
1401 m = vm_page_lookup(object, offset);
1402 if(m && m->absent && m->busy)
1403 VM_PAGE_FREE(m);
1404 offset += PAGE_SIZE;
1405 }
1406 vm_object_unlock(object);
1407 vm_object_deallocate(object);
1408 } else {
1409 vm_object_deallocate(object);
1410 }
1411 vm_map_lock(map);
1412 continue;
1413 }
1414 vm_map_unlock(map);
1415 }
1416
1417 /* tws locked on entry */
1418
1419 tws_startup_t
1420 tws_create_startup_list(
1421 tws_hash_t tws)
1422 {
1423
1424 tws_startup_t startup;
1425 unsigned int i,j,k;
1426 unsigned int total_elements;
1427 unsigned int startup_size;
1428 unsigned int sindex;
1429 unsigned int hash_index;
1430 tws_startup_ptr_t element;
1431
1432 total_elements = tws->expansion_count *
1433 (tws->number_of_lines * tws->number_of_elements);
1434
1435 startup_size = sizeof(struct tws_startup)
1436 + (total_elements * sizeof(tws_startup_ptr_t *))
1437 + (total_elements * sizeof(struct tws_startup_ptr))
1438 + (total_elements * sizeof(struct tws_startup_ele));
1439 startup = (tws_startup_t)(kalloc(startup_size));
1440
1441 if(startup == NULL)
1442 return startup;
1443
1444 bzero((char *) startup, startup_size);
1445
1446 startup->table = (tws_startup_ptr_t *)
1447 (((int)startup) + (sizeof(struct tws_startup)));
1448 startup->ele = (struct tws_startup_ptr *)
1449 (((vm_offset_t)startup->table) +
1450 (total_elements * sizeof(tws_startup_ptr_t)));
1451
1452 startup->array = (struct tws_startup_ele *)
1453 (((vm_offset_t)startup->ele) +
1454 (total_elements * sizeof(struct tws_startup_ptr)));
1455
1456 startup->tws_hash_size = startup_size;
1457 startup->ele_count = 0; /* burn first hash ele, else we can't tell from zero */
1458 startup->array_size = total_elements;
1459 startup->hash_count = 1;
1460
1461 sindex = 0;
1462
1463
1464 for(i = 0; i<tws->number_of_lines; i++) {
1465 for(j = 0; j<tws->number_of_elements; j++) {
1466 for(k = 0; k<tws->expansion_count; k++) {
1467 tws_hash_ele_t entry;
1468 unsigned int hash_retry;
1469 vm_offset_t addr;
1470
1471 entry = &tws->cache[k][i].list[j];
1472 addr = entry->page_addr;
1473 hash_retry = 0;
1474 if(entry->object != 0) {
1475 /* get a hash element */
1476 hash_index = do_startup_hash(addr,
1477 startup->array_size);
1478
1479 if(startup->hash_count < total_elements) {
1480 element = &(startup->ele[startup->hash_count]);
1481 startup->hash_count += 1;
1482 } else {
1483 /* exit we're out of elements */
1484 break;
1485 }
1486 /* place the hash element */
1487 element->next = startup->table[hash_index];
1488 startup->table[hash_index] = (tws_startup_ptr_t)
1489 ((int)element - (int)&startup->ele[0]);
1490
1491 /* set entry OFFSET in hash element */
1492 element->element = (tws_startup_ele_t)
1493 ((int)&startup->array[sindex] -
1494 (int)&startup->array[0]);
1495
1496 startup->array[sindex].page_addr = entry->page_addr;
1497 startup->array[sindex].page_cache = entry->page_cache;
1498 startup->ele_count++;
1499 sindex++;
1500
1501 }
1502 }
1503 }
1504 }
1505
1506 return startup;
1507 }
1508
1509
1510 /*
1511 * Returns an entire cache line. The line is deleted from the startup
1512 * cache on return. The caller can check startup->ele_count for an empty
1513 * list. Access synchronization is the responsibility of the caller.
1514 */
1515
1516 unsigned int
1517 tws_startup_list_lookup(
1518 tws_startup_t startup,
1519 vm_offset_t addr)
1520 {
1521 unsigned int hash_index;
1522 unsigned int page_cache_bits;
1523 unsigned int startup_shift;
1524 tws_startup_ele_t entry;
1525 vm_offset_t next_addr;
1526 tws_startup_ptr_t element;
1527 tws_startup_ptr_t base_ele;
1528 tws_startup_ptr_t *previous_ptr;
1529
1530 page_cache_bits = 0;
1531
1532 hash_index = do_startup_hash(addr, startup->array_size);
1533
1534 if(((unsigned int)&(startup->table[hash_index])) >= startup->tws_hash_size) {
1535 return page_cache_bits = 0;
1536 }
1537 element = (tws_startup_ptr_t)((int)startup->table[hash_index] +
1538 (int)&startup->ele[0]);
1539 base_ele = element;
1540 previous_ptr = &(startup->table[hash_index]);
1541 while(element > &startup->ele[0]) {
1542 if (((int)element + sizeof(struct tws_startup_ptr))
1543 > ((int)startup + startup->tws_hash_size)) {
1544 return page_cache_bits;
1545 }
1546 entry = (tws_startup_ele_t)
1547 ((int)element->element
1548 + (int)&startup->array[0]);
1549 if((((int)entry + sizeof(struct tws_startup_ele))
1550 > ((int)startup + startup->tws_hash_size))
1551 || ((int)entry < (int)startup)) {
1552 return page_cache_bits;
1553 }
1554 if ((addr >= entry->page_addr) &&
1555 (addr <= (entry->page_addr + 0x1F000))) {
1556 startup_shift = (addr - entry->page_addr)>>12;
1557 page_cache_bits |= entry->page_cache >> startup_shift;
1558 /* don't dump the pages, unless the addresses */
1559 /* line up perfectly. The cache may be used */
1560 /* by other mappings */
1561 entry->page_cache &= (1 << startup_shift) - 1;
1562 if(addr == entry->page_addr) {
1563 if(base_ele == element) {
1564 base_ele = (tws_startup_ptr_t)
1565 ((int)element->next
1566 + (int)&startup->ele[0]);
1567 startup->table[hash_index] = element->next;
1568 element = base_ele;
1569 } else {
1570 *previous_ptr = element->next;
1571 element = (tws_startup_ptr_t)
1572 ((int)*previous_ptr
1573 + (int)&startup->ele[0]);
1574 }
1575 entry->page_addr = 0;
1576 startup->ele_count--;
1577 continue;
1578 }
1579 }
1580 next_addr = addr + 0x1F000;
1581 if ((next_addr >= entry->page_addr) &&
1582 (next_addr <= (entry->page_addr + 0x1F000))) {
1583 startup_shift = (next_addr - entry->page_addr)>>12;
1584 page_cache_bits |= entry->page_cache << (0x1F - startup_shift);
1585 entry->page_cache &= ~((1 << (startup_shift + 1)) - 1);
1586 if(entry->page_cache == 0) {
1587 if(base_ele == element) {
1588 base_ele = (tws_startup_ptr_t)
1589 ((int)element->next
1590 + (int)&startup->ele[0]);
1591 startup->table[hash_index] = element->next;
1592 element = base_ele;
1593 } else {
1594 *previous_ptr = element->next;
1595 element = (tws_startup_ptr_t)
1596 ((int)*previous_ptr
1597 + (int)&startup->ele[0]);
1598 }
1599 entry->page_addr = 0;
1600 startup->ele_count--;
1601 continue;
1602 }
1603 }
1604 previous_ptr = &(element->next);
1605 element = (tws_startup_ptr_t)
1606 ((int) element->next + (int) &startup->ele[0]);
1607 }
1608
1609 return page_cache_bits;
1610 }
1611
1612 kern_return_t
1613 tws_send_startup_info(
1614 task_t task)
1615 {
1616
1617 tws_hash_t tws;
1618 tws_startup_t scache;
1619
1620 task_lock(task);
1621 tws = (tws_hash_t)task->dynamic_working_set;
1622 task_unlock(task);
1623 if(tws == NULL) {
1624 return KERN_FAILURE;
1625 }
1626 return tws_internal_startup_send(tws);
1627 }
1628
1629
1630 kern_return_t
1631 tws_internal_startup_send(
1632 tws_hash_t tws)
1633 {
1634
1635 tws_startup_t scache;
1636
1637 if(tws == NULL) {
1638 return KERN_FAILURE;
1639 }
1640 tws_lock(tws);
1641 /* used to signal write or release depending on state of tws */
1642 if(tws->startup_cache) {
1643 vm_offset_t startup_buf;
1644 vm_size_t size;
1645 startup_buf = (vm_offset_t)tws->startup_cache;
1646 size = tws->startup_cache->tws_hash_size;
1647 tws->startup_cache = 0;
1648 tws_unlock(tws);
1649 kmem_free(kernel_map, startup_buf, size);
1650 return KERN_SUCCESS;
1651 }
1652 if(tws->startup_name == NULL) {
1653 tws_unlock(tws);
1654 return KERN_FAILURE;
1655 }
1656 scache = tws_create_startup_list(tws);
1657 if(scache == NULL)
1658 return KERN_FAILURE;
1659 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1660 scache, scache->tws_hash_size,
1661 tws->mod, tws->fid);
1662 kfree((vm_offset_t)scache, scache->tws_hash_size);
1663 kfree((vm_offset_t) tws->startup_name, tws->startup_name_length);
1664 tws->startup_name = NULL;
1665 tws_unlock(tws);
1666 return KERN_SUCCESS;
1667 }
1668
1669 kern_return_t
1670 tws_handle_startup_file(
1671 task_t task,
1672 unsigned int uid,
1673 char *app_name,
1674 vm_offset_t app_vp,
1675 boolean_t *new_info)
1676
1677 {
1678 tws_startup_t startup;
1679 vm_offset_t cache_size;
1680 kern_return_t error;
1681 int fid;
1682 int mod;
1683
1684 *new_info = FALSE;
1685 /* don't pre-heat kernel task */
1686 if(task == kernel_task)
1687 return KERN_SUCCESS;
1688 error = bsd_read_page_cache_file(uid, &fid,
1689 &mod, app_name,
1690 app_vp, &startup,
1691 &cache_size);
1692 if(error) {
1693 return KERN_FAILURE;
1694 }
1695 if(startup == NULL) {
1696 /* Entry for app does not exist, make */
1697 /* one */
1698 /* we will want our own copy of the shared */
1699 /* regions to pick up a true picture of all */
1700 /* the pages we will touch. */
1701 if((lsf_zone->count * lsf_zone->elem_size)
1702 > (lsf_zone->max_size >> 1)) {
1703 /* We don't want to run out of shared memory */
1704 /* map entries by starting too many private versions */
1705 /* of the shared library structures */
1706 return KERN_SUCCESS;
1707 }
1708 *new_info = TRUE;
1709
1710 error = tws_write_startup_file(task,
1711 fid, mod, app_name, uid);
1712 if(error)
1713 return error;
1714
1715 } else {
1716 error = tws_read_startup_file(task,
1717 (tws_startup_t)startup,
1718 cache_size);
1719 if(error) {
1720 kmem_free(kernel_map,
1721 (vm_offset_t)startup, cache_size);
1722 return error;
1723 }
1724 }
1725 return KERN_SUCCESS;
1726 }
1727
1728 kern_return_t
1729 tws_write_startup_file(
1730 task_t task,
1731 int fid,
1732 int mod,
1733 char *name,
1734 unsigned int uid)
1735 {
1736 tws_hash_t tws;
1737 unsigned int string_length;
1738
1739 string_length = strlen(name);
1740
1741 task_lock(task);
1742 tws = (tws_hash_t)task->dynamic_working_set;
1743
1744 task_unlock(task);
1745 if(tws == NULL) {
1746 /* create a dynamic working set of normal size */
1747 task_working_set_create(task, 0,
1748 0, TWS_HASH_STYLE_DEFAULT);
1749 }
1750 tws_lock(tws);
1751
1752 if(tws->startup_name != NULL) {
1753 tws_unlock(tws);
1754 return KERN_FAILURE;
1755 }
1756
1757 tws->startup_name = (char *)
1758 kalloc((string_length + 1) * (sizeof(char)));
1759 if(tws->startup_name == NULL) {
1760 tws_unlock(tws);
1761 return KERN_FAILURE;
1762 }
1763
1764 bcopy(name, (char *)tws->startup_name, string_length + 1);
1765 tws->startup_name_length = (string_length + 1) * sizeof(char);
1766 tws->uid = uid;
1767 tws->fid = fid;
1768 tws->mod = mod;
1769
1770 tws_unlock(tws);
1771 return KERN_SUCCESS;
1772 }
1773
1774 kern_return_t
1775 tws_read_startup_file(
1776 task_t task,
1777 tws_startup_t startup,
1778 vm_offset_t cache_size)
1779 {
1780 tws_hash_t tws;
1781 int error;
1782 int lines;
1783 int old_exp_count;
1784
1785 task_lock(task);
1786 tws = (tws_hash_t)task->dynamic_working_set;
1787
1788 if(cache_size < sizeof(struct tws_hash)) {
1789 task_unlock(task);
1790 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1791 return(KERN_SUCCESS);
1792 }
1793
1794 /* create a dynamic working set to match file size */
1795 lines = (cache_size - sizeof(struct tws_hash))/TWS_ARRAY_SIZE;
1796 /* we now need to divide out element size and word size */
1797 /* all fields are 4 bytes. There are 8 bytes in each hash element */
1798 /* entry, 4 bytes in each table ptr location and 8 bytes in each */
1799 /* page_cache entry, making a total of 20 bytes for each entry */
1800 lines = (lines/(20));
1801 if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
1802 lines = TWS_SMALL_HASH_LINE_COUNT;
1803 task_unlock(task);
1804 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1805 return(KERN_SUCCESS);
1806 } else {
1807 old_exp_count = lines/TWS_HASH_LINE_COUNT;
1808 if((old_exp_count * TWS_HASH_LINE_COUNT) != lines) {
1809 lines = (old_exp_count + 1)
1810 * TWS_HASH_LINE_COUNT;
1811 }
1812 if(tws == NULL) {
1813 task_working_set_create(task, lines,
1814 0, TWS_HASH_STYLE_DEFAULT);
1815 task_unlock(task);
1816 } else {
1817 task_unlock(task);
1818 tws_expand_working_set(
1819 (vm_offset_t)tws, lines, TRUE);
1820 }
1821 }
1822
1823
1824 tws_lock(tws);
1825
1826 if(tws->startup_cache != NULL) {
1827 tws_unlock(tws);
1828 return KERN_FAILURE;
1829 }
1830
1831
1832 /* now need to fix up internal table pointers */
1833 startup->table = (tws_startup_ptr_t *)
1834 (((int)startup) + (sizeof(struct tws_startup)));
1835 startup->ele = (struct tws_startup_ptr *)
1836 (((vm_offset_t)startup->table) +
1837 (startup->array_size * sizeof(tws_startup_ptr_t)));
1838 startup->array = (struct tws_startup_ele *)
1839 (((vm_offset_t)startup->ele) +
1840 (startup->array_size * sizeof(struct tws_startup_ptr)));
1841 /* the allocation size and file size should be the same */
1842 /* just in case their not, make sure we dealloc correctly */
1843 startup->tws_hash_size = cache_size;
1844
1845 tws->startup_cache = startup;
1846 tws_unlock(tws);
1847 return KERN_SUCCESS;
1848 }
1849
1850
1851 void
1852 tws_hash_ws_flush(tws_hash_t tws) {
1853 tws_startup_t scache;
1854 if(tws == NULL) {
1855 return;
1856 }
1857 tws_lock(tws);
1858 if(tws->startup_name != NULL) {
1859 scache = tws_create_startup_list(tws);
1860 if(scache == NULL) {
1861 /* dump the name cache, we'll */
1862 /* get it next time */
1863 kfree((vm_offset_t)
1864 tws->startup_name,
1865 tws->startup_name_length);
1866 tws->startup_name = NULL;
1867 tws_unlock(tws);
1868 return;
1869 }
1870 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1871 scache, scache->tws_hash_size,
1872 tws->mod, tws->fid);
1873 kfree((vm_offset_t)scache,
1874 scache->tws_hash_size);
1875 kfree((vm_offset_t)
1876 tws->startup_name,
1877 tws->startup_name_length);
1878 tws->startup_name = NULL;
1879 }
1880 tws_unlock(tws);
1881 return;
1882 }
1883
1884 void
1885 tws_hash_destroy(tws_hash_t tws)
1886 {
1887 int i,k;
1888 vm_size_t cache_size;
1889
1890 if(tws->startup_cache != NULL) {
1891 kmem_free(kernel_map,
1892 (vm_offset_t)tws->startup_cache,
1893 tws->startup_cache->tws_hash_size);
1894 tws->startup_cache = NULL;
1895 }
1896 if(tws->startup_name != NULL) {
1897 tws_internal_startup_send(tws);
1898 }
1899 for (i=0; i<tws->number_of_lines; i++) {
1900 for(k=0; k<tws->expansion_count; k++) {
1901 /* clear the object refs */
1902 tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
1903 }
1904 }
1905 i = 0;
1906 while (i < tws->expansion_count) {
1907
1908 kfree((vm_offset_t)tws->table[i], sizeof(tws_hash_ptr_t)
1909 * tws->number_of_lines
1910 * tws->number_of_elements);
1911 kfree((vm_offset_t)tws->table_ele[i],
1912 sizeof(struct tws_hash_ptr)
1913 * tws->number_of_lines
1914 * tws->number_of_elements);
1915 kfree((vm_offset_t)tws->alt_ele[i],
1916 sizeof(struct tws_hash_ptr)
1917 * tws->number_of_lines
1918 * tws->number_of_elements);
1919 kfree((vm_offset_t)tws->cache[i], sizeof(struct tws_hash_line)
1920 * tws->number_of_lines);
1921 i++;
1922 }
1923 if(tws->startup_name != NULL) {
1924 kfree((vm_offset_t)tws->startup_name,
1925 tws->startup_name_length);
1926 }
1927 kfree((vm_offset_t)tws, sizeof(struct tws_hash));
1928 }
1929
1930 void
1931 tws_hash_clear(tws_hash_t tws)
1932 {
1933 int i, k;
1934
1935 for (i=0; i<tws->number_of_lines; i++) {
1936 for(k=0; k<tws->expansion_count; k++) {
1937 /* clear the object refs */
1938 tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
1939 }
1940 }
1941 }
1942
1943 kern_return_t
1944 task_working_set_create(
1945 task_t task,
1946 unsigned int lines,
1947 unsigned int rows,
1948 unsigned int style)
1949 {
1950
1951 if (lines == 0) {
1952 lines = TWS_HASH_LINE_COUNT;
1953 }
1954 if (rows == 0) {
1955 rows = TWS_ARRAY_SIZE;
1956 }
1957 if (style == TWS_HASH_STYLE_DEFAULT) {
1958 style = TWS_HASH_STYLE_BASIC;
1959 }
1960 task_lock(task);
1961 if(task->dynamic_working_set != 0) {
1962 task_unlock(task);
1963 return(KERN_FAILURE);
1964 } else if((task->dynamic_working_set
1965 = (vm_offset_t) tws_hash_create(lines, rows, style)) == 0) {
1966 task_unlock(task);
1967 return(KERN_NO_SPACE);
1968 }
1969 task_unlock(task);
1970 return KERN_SUCCESS;
1971 }
1972
1973
1974 /* Internal use only routines */
1975
1976
1977 /*
1978 * internal sub-function for address space lookup
1979 * returns the target element and the address of the
1980 * previous pointer The previous pointer is the address
1981 * of the pointer pointing to the target element.
1982 * TWS must be locked
1983 */
1984
1985 void
1986 tws_traverse_address_hash_list (
1987 tws_hash_t tws,
1988 unsigned int index,
1989 vm_offset_t page_addr,
1990 vm_object_t object,
1991 vm_object_offset_t offset,
1992 vm_map_t map,
1993 tws_hash_ptr_t *target_ele,
1994 tws_hash_ptr_t **previous_ptr,
1995 tws_hash_ptr_t **free_list,
1996 unsigned int exclusive_addr)
1997 {
1998 int k;
1999 tws_hash_ptr_t cache_ele;
2000 tws_hash_ptr_t base_ele;
2001
2002 *target_ele = NULL;
2003 *previous_ptr = NULL;
2004
2005 for(k=0; k<tws->expansion_count; k++) {
2006 tws_hash_ele_t ele;
2007 cache_ele = tws->table[k][index];
2008 base_ele = cache_ele;
2009 *previous_ptr = (tws_hash_ptr_t *)&(tws->table[k][index]);
2010 while(cache_ele != NULL) {
2011 if(((unsigned int)
2012 cache_ele->element & TWS_ADDR_HASH) == 0) {
2013 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2014 cache_ele = cache_ele->next;
2015 continue;
2016 }
2017 ele = (tws_hash_ele_t)((unsigned int)
2018 cache_ele->element & ~TWS_ADDR_HASH);
2019 if ((ele == 0) || (ele->object == 0)) {
2020 /* A little clean-up of empty elements */
2021 cache_ele->element = 0;
2022 if(base_ele == cache_ele) {
2023 base_ele = cache_ele->next;
2024 tws->table[k][index] = cache_ele->next;
2025 cache_ele->next = tws->free_hash_ele[k];
2026 tws->free_hash_ele[k] = cache_ele;
2027 cache_ele = base_ele;
2028 } else {
2029 **previous_ptr = cache_ele->next;
2030 cache_ele->next = tws->free_hash_ele[k];
2031 tws->free_hash_ele[k] = cache_ele;
2032 cache_ele = **previous_ptr;
2033 }
2034 continue;
2035 }
2036
2037 if ((ele->page_addr <= page_addr)
2038 && (page_addr <= (ele->page_addr +
2039 (vm_offset_t)TWS_INDEX_MASK))
2040 && ((object == NULL)
2041 || ((object == ele->object)
2042 && (offset == ele->offset)
2043 && (map == ele->map)))) {
2044 if(exclusive_addr) {
2045 int delta;
2046 delta = ((page_addr - ele->page_addr)
2047 >> 12);
2048 if((1 << delta) & ele->page_cache) {
2049 /* We've found a match */
2050 *target_ele = cache_ele;
2051 *free_list =
2052 (tws_hash_ptr_t *)
2053 &(tws->free_hash_ele[k]);
2054 return;
2055 }
2056 } else {
2057 /* We've found a match */
2058 *target_ele = cache_ele;
2059 *free_list = (tws_hash_ptr_t *)
2060 &(tws->free_hash_ele[k]);
2061 return;
2062 }
2063 }
2064 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2065 cache_ele = cache_ele->next;
2066 }
2067 }
2068 }
2069
2070
2071 /*
2072 * internal sub-function for object space lookup
2073 * returns the target element and the address of the
2074 * previous pointer The previous pointer is the address
2075 * of the pointer pointing to the target element.
2076 * TWS must be locked
2077 */
2078
2079
2080 void
2081 tws_traverse_object_hash_list (
2082 tws_hash_t tws,
2083 unsigned int index,
2084 vm_object_t object,
2085 vm_object_offset_t offset,
2086 unsigned int page_mask,
2087 tws_hash_ptr_t *target_ele,
2088 tws_hash_ptr_t **previous_ptr,
2089 tws_hash_ptr_t **free_list)
2090 {
2091 int k;
2092 tws_hash_ptr_t cache_ele;
2093 tws_hash_ptr_t base_ele;
2094
2095 *target_ele = NULL;
2096 *previous_ptr = NULL;
2097
2098 for(k=0; k<tws->expansion_count; k++) {
2099 cache_ele = tws->table[k][index];
2100 base_ele = cache_ele;
2101 *previous_ptr = &(tws->table[k][index]);
2102 while(cache_ele != NULL) {
2103 if((((unsigned int)cache_ele->element)
2104 & TWS_ADDR_HASH) != 0) {
2105 *previous_ptr = &(cache_ele->next);
2106 cache_ele = cache_ele->next;
2107 continue;
2108 }
2109 if ((cache_ele->element == 0) ||
2110 (cache_ele->element->object == 0)) {
2111 /* A little clean-up of empty elements */
2112 cache_ele->element = 0;
2113 if(base_ele == cache_ele) {
2114 base_ele = cache_ele->next;
2115 tws->table[k][index] = cache_ele->next;
2116 cache_ele->next = tws->free_hash_ele[k];
2117 tws->free_hash_ele[k] = cache_ele;
2118 cache_ele = tws->table[k][index];
2119 } else {
2120 **previous_ptr = cache_ele->next;
2121 cache_ele->next = tws->free_hash_ele[k];
2122 tws->free_hash_ele[k] = cache_ele;
2123 cache_ele = **previous_ptr;
2124 }
2125 continue;
2126 }
2127 if ((cache_ele->element->object == object)
2128 && (cache_ele->element->offset ==
2129 (offset - (offset & ~TWS_HASH_OFF_MASK)))) {
2130 if((cache_ele->element->page_cache & page_mask)
2131 || (page_mask == 0xFFFFFFFF)) {
2132 /* We've found a match */
2133 *target_ele = cache_ele;
2134 *free_list = &(tws->free_hash_ele[k]);
2135 return;
2136 }
2137 }
2138 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2139 cache_ele = cache_ele->next;
2140 }
2141 }
2142 }
2143
2144
2145 /*
2146 * For a given object/offset, discover whether the indexed 32 page frame
2147 * containing the object/offset exists and if their are at least threshold
2148 * pages present. Returns true if population meets threshold.
2149 */
2150 int
2151 tws_test_for_community(
2152 tws_hash_t tws,
2153 vm_object_t object,
2154 vm_object_offset_t offset,
2155 unsigned int threshold,
2156 unsigned int *page_mask)
2157 {
2158 int index;
2159 tws_hash_ptr_t cache_ele;
2160 tws_hash_ptr_t *trailer;
2161 tws_hash_ptr_t *free_list;
2162 int community = 0;
2163
2164 index = do_tws_hash(object, offset,
2165 tws->number_of_elements, tws->number_of_lines);
2166 tws_traverse_object_hash_list(tws, index, object, offset, 0xFFFFFFFF,
2167 &cache_ele, &trailer, &free_list);
2168
2169 if(cache_ele != NULL) {
2170 int i;
2171 int ctr;
2172 ctr = 0;
2173 for(i=1; i!=0; i=i<<1) {
2174 if(i & cache_ele->element->page_cache)
2175 ctr++;
2176 if(ctr == threshold) {
2177 community = 1;
2178 *page_mask = cache_ele->element->page_cache;
2179 break;
2180 }
2181 }
2182 }
2183
2184 return community;
2185
2186 }
2187
2188
2189 /*
2190 * Gets new hash element for object hash from free pools
2191 * TWS must be locked
2192 */
2193
2194 tws_hash_ptr_t
2195 new_obj_hash(
2196 tws_hash_t tws,
2197 unsigned int set,
2198 unsigned int index)
2199 {
2200 tws_hash_ptr_t element;
2201
2202 if(tws->obj_free_count[set] < tws->number_of_lines * tws->number_of_elements) {
2203 element = &(tws->table_ele[set][tws->obj_free_count[set]]);
2204 tws->obj_free_count[set]+=1;
2205 } else if(tws->free_hash_ele[set] == NULL) {
2206 return NULL;
2207 } else {
2208 element = tws->free_hash_ele[set];
2209 if(element == NULL)
2210 return element;
2211 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2212 }
2213 element->element = 0;
2214 element->next = tws->table[set][index];
2215 tws->table[set][index] = element;
2216 return element;
2217 }
2218
2219 /*
2220 * Gets new hash element for addr hash from free pools
2221 * TWS must be locked
2222 */
2223
2224 tws_hash_ptr_t
2225 new_addr_hash(
2226 tws_hash_t tws,
2227 unsigned int set,
2228 unsigned int index)
2229 {
2230 tws_hash_ptr_t element;
2231
2232 if(tws->addr_free_count[set]
2233 < tws->number_of_lines * tws->number_of_elements) {
2234 element = &(tws->alt_ele[set][tws->addr_free_count[set]]);
2235 tws->addr_free_count[set]+=1;
2236 } else if(tws->free_hash_ele[set] == NULL) {
2237 return NULL;
2238 } else {
2239 element = tws->free_hash_ele[set];
2240 if(element == NULL)
2241 return element;
2242 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2243 }
2244 element->element = (tws_hash_ele_t)TWS_ADDR_HASH;
2245 element->next = tws->table[set][index];
2246 tws->table[set][index] = element;
2247 return element;
2248 }