]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/task_working_set.c
c799dc3c68c841b1b620bc8e308e9a6fc94c1c77
[apple/xnu.git] / osfmk / vm / task_working_set.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 */
30 /*
31 * File: vm/task_working_set.c
32 * Author: Chris Youngworth
33 * Date: 2001
34 *
35 * Working set detection and maintainence module
36 */
37
38
39 #include <mach/mach_types.h>
40 #include <mach/memory_object.h>
41 #include <mach/shared_memory_server.h>
42 #include <vm/task_working_set.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pageout.h>
47 #include <kern/sched.h>
48 #include <kern/kalloc.h>
49
50 #include <vm/vm_protos.h>
51
52 /*
53 * LP64todo - Task Working Set Support is for 32-bit only
54 */
55 extern zone_t lsf_zone;
56
57 /* declarations for internal use only routines */
58 int startup_miss = 0;
59
60 tws_hash_t
61 tws_hash_create(
62 unsigned int lines,
63 unsigned int rows,
64 unsigned int style);
65
66 kern_return_t
67 tws_write_startup_file(
68 task_t task,
69 int fid,
70 int mod,
71 char *name,
72 unsigned int string_length);
73
74 kern_return_t
75 tws_read_startup_file(
76 task_t task,
77 tws_startup_t startup,
78 vm_offset_t cache_size);
79
80 tws_startup_t
81 tws_create_startup_list(
82 tws_hash_t tws);
83
84 unsigned int
85 tws_startup_list_lookup(
86 tws_startup_t startup,
87 vm_offset_t addr);
88
89 kern_return_t
90 tws_internal_startup_send(
91 tws_hash_t tws);
92
93 void
94 tws_hash_line_clear(
95 tws_hash_t tws,
96 tws_hash_line_t hash_line,
97 vm_object_t object,
98 boolean_t live);
99
100 void
101 tws_hash_clear(
102 tws_hash_t tws);
103
104 void
105 tws_traverse_address_hash_list (
106 tws_hash_t tws,
107 unsigned int index,
108 vm_offset_t page_addr,
109 vm_object_t object,
110 vm_object_offset_t offset,
111 vm_map_t map,
112 tws_hash_ptr_t *target_ele,
113 tws_hash_ptr_t **previous_ptr,
114 tws_hash_ptr_t **free_list,
115 unsigned int exclusive_addr);
116
117 void
118 tws_traverse_object_hash_list (
119 tws_hash_t tws,
120 unsigned int index,
121 vm_object_t object,
122 vm_object_offset_t offset,
123 unsigned int pagemask,
124 tws_hash_ptr_t *target_ele,
125 tws_hash_ptr_t **previous_ptr,
126 tws_hash_ptr_t **free_list);
127
128 tws_hash_ptr_t
129 new_addr_hash(
130 tws_hash_t tws,
131 unsigned int set,
132 unsigned int index);
133
134 tws_hash_ptr_t
135 new_obj_hash(
136 tws_hash_t tws,
137 unsigned int set,
138 unsigned int index);
139
140 int tws_test_for_community(
141 tws_hash_t tws,
142 vm_object_t object,
143 vm_object_offset_t offset,
144 unsigned int threshold,
145 unsigned int *pagemask);
146
147 kern_return_t
148 tws_internal_lookup(
149 tws_hash_t tws,
150 vm_object_offset_t offset,
151 vm_object_t object,
152 tws_hash_line_t *line);
153
154 /* Note: all of the routines below depend on the associated map lock for */
155 /* synchronization, the map lock will be on when the routines are called */
156 /* and on when they return */
157
158 tws_hash_t
159 tws_hash_create(
160 unsigned int lines,
161 unsigned int rows,
162 unsigned int style)
163 {
164 tws_hash_t tws;
165
166 if ((style != TWS_HASH_STYLE_BASIC) &&
167 (style != TWS_HASH_STYLE_BASIC)) {
168 return((tws_hash_t)NULL);
169 }
170
171
172 tws = (tws_hash_t)(kalloc(sizeof(struct tws_hash)));
173 if(tws == (tws_hash_t)NULL)
174 return tws;
175
176 if((tws->table[0] = (tws_hash_ptr_t *)
177 kalloc(sizeof(tws_hash_ptr_t) * lines * rows))
178 == NULL) {
179 kfree(tws, sizeof(struct tws_hash));
180 return (tws_hash_t)NULL;
181 }
182 if((tws->table_ele[0] = (tws_hash_ptr_t)
183 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
184 == NULL) {
185 kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
186 kfree(tws, sizeof(struct tws_hash));
187 return (tws_hash_t)NULL;
188 }
189 if((tws->alt_ele[0] = (tws_hash_ptr_t)
190 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
191 == NULL) {
192 kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
193 kfree(tws->table_ele[0],
194 sizeof(struct tws_hash_ptr) * lines * rows);
195 kfree(tws, sizeof(struct tws_hash));
196 return (tws_hash_t)NULL;
197 }
198 if((tws->cache[0] = (struct tws_hash_line *)
199 kalloc(sizeof(struct tws_hash_line) * lines))
200 == NULL) {
201 kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
202 kfree(tws->table_ele[0],
203 sizeof(struct tws_hash_ptr) * lines * rows);
204 kfree(tws->alt_ele[0],
205 sizeof(struct tws_hash_ptr) * lines * rows);
206 kfree(tws, sizeof(struct tws_hash));
207 return (tws_hash_t)NULL;
208 }
209 tws->free_hash_ele[0] = (tws_hash_ptr_t)0;
210 tws->obj_free_count[0] = 0;
211 tws->addr_free_count[0] = 0;
212
213 /* most defaults are such that a bzero will initialize */
214 bzero((char *)tws->table[0],sizeof(tws_hash_ptr_t)
215 * lines * rows);
216 bzero((char *)tws->table_ele[0],sizeof(struct tws_hash_ptr)
217 * lines * rows);
218 bzero((char *)tws->alt_ele[0],sizeof(struct tws_hash_ptr)
219 * lines * rows);
220 bzero((char *)tws->cache[0], sizeof(struct tws_hash_line)
221 * lines);
222
223 mutex_init(&tws->lock, 0);
224 tws->style = style;
225 tws->current_line = 0;
226 tws->pageout_count = 0;
227 tws->line_count = 0;
228 tws->startup_cache = NULL;
229 tws->startup_name = NULL;
230 tws->number_of_lines = lines;
231 tws->number_of_elements = rows;
232 tws->expansion_count = 1;
233 tws->lookup_count = 0;
234 tws->insert_count = 0;
235 tws->time_of_creation = sched_tick;
236
237 return tws;
238 }
239
240
241 extern vm_page_t
242 vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset);
243
244
245 void
246 tws_hash_line_clear(
247 tws_hash_t tws,
248 tws_hash_line_t hash_line,
249 __unused vm_object_t object,
250 boolean_t live)
251 {
252 struct tws_hash_ele *hash_ele;
253 struct tws_hash_ptr **trailer;
254 struct tws_hash_ptr **free_list;
255 tws_hash_ele_t addr_ele;
256 int index;
257 unsigned int i, j;
258 int dump_pmap;
259 int hash_loop;
260
261
262 if(tws->line_count < tws->number_of_lines) {
263 tws->line_count++;
264 dump_pmap = 1;
265 } else {
266 if(tws->pageout_count != vm_pageout_scan_event_counter) {
267 tws->pageout_count =
268 vm_pageout_scan_event_counter;
269 tws->line_count = 0;
270 dump_pmap = 1;
271 } else {
272 dump_pmap = 0;
273 }
274 }
275 hash_line->ele_count = 0;
276
277 for (i=0; i<tws->number_of_elements; i++) {
278 hash_loop = 0;
279 hash_ele = &(hash_line->list[i]);
280 if(hash_ele->object != 0) {
281
282 vm_object_offset_t local_off = 0;
283 tws_hash_ptr_t cache_ele;
284
285 index = alt_tws_hash(
286 hash_ele->page_addr & TWS_ADDR_OFF_MASK,
287 tws->number_of_elements,
288 tws->number_of_lines);
289
290 tws_traverse_address_hash_list(tws, index,
291 hash_ele->page_addr, hash_ele->object,
292 hash_ele->offset, hash_ele->map,
293 &cache_ele, &trailer, &free_list, 0);
294 if(cache_ele != NULL) {
295 addr_ele = (tws_hash_ele_t)((unsigned int)
296 (cache_ele->element) & ~TWS_ADDR_HASH);
297 if(addr_ele != hash_ele)
298 panic("tws_hash_line_clear:"
299 " out of sync\n");
300 cache_ele->element = 0;
301 *trailer = cache_ele->next;
302 cache_ele->next = *free_list;
303 *free_list = cache_ele;
304 }
305
306 index = alt_tws_hash(
307 (hash_ele->page_addr - 0x1f000)
308 & TWS_ADDR_OFF_MASK,
309 tws->number_of_elements,
310 tws->number_of_lines);
311
312 tws_traverse_address_hash_list(tws, index,
313 hash_ele->page_addr, hash_ele->object,
314 hash_ele->offset, hash_ele->map,
315 &cache_ele, &trailer, &free_list, 0);
316
317 if(cache_ele != NULL) {
318 addr_ele = (tws_hash_ele_t)((unsigned int)
319 (cache_ele->element) & ~TWS_ADDR_HASH);
320 if(addr_ele != hash_ele)
321 panic("tws_hash_line_clear: "
322 "out of sync\n");
323 cache_ele->element = 0;
324 *trailer = cache_ele->next;
325 cache_ele->next = *free_list;
326 *free_list = cache_ele;
327 }
328
329
330 if((hash_ele->map != NULL) && (live)) {
331 vm_page_t p;
332
333 #if 0
334 if (object != hash_ele->object) {
335 if (object)
336 vm_object_unlock(object);
337 vm_object_lock(hash_ele->object);
338 }
339 #endif
340 if (dump_pmap == 1) {
341 for (j = 0x1; j != 0; j = j<<1) {
342 if(j & hash_ele->page_cache) {
343 p = vm_page_lookup_nohint(hash_ele->object,
344 hash_ele->offset + local_off);
345 if((p != NULL) && (p->wire_count == 0)) {
346 pmap_remove_some_phys((pmap_t)vm_map_pmap(current_map()),
347 p->phys_page);
348 }
349 }
350 local_off += PAGE_SIZE_64;
351 }
352 }
353 #if 0
354 if (object != hash_ele->object) {
355 vm_object_unlock(hash_ele->object);
356 if (object)
357 vm_object_lock(object);
358 }
359 #endif
360 }
361
362 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
363 vm_object_deallocate(hash_ele->object);
364 vm_map_deallocate(hash_ele->map);
365 }
366
367 index = do_tws_hash(hash_ele->object, hash_ele->offset,
368 tws->number_of_elements,
369 tws->number_of_lines);
370
371 tws_traverse_object_hash_list(tws,
372 index, hash_ele->object, hash_ele->offset,
373 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
374 if((cache_ele != NULL) && (cache_ele->element == hash_ele)) {
375 cache_ele->element = 0;
376 *trailer = cache_ele->next;
377 cache_ele->next = *free_list;
378 *free_list = cache_ele;
379 }
380 hash_ele->object = 0;
381 }
382 }
383 }
384
385 kern_return_t
386 tws_internal_lookup(
387 tws_hash_t tws,
388 vm_object_offset_t offset,
389 vm_object_t object,
390 tws_hash_line_t *line)
391 {
392 int index;
393 int loop;
394 int set;
395 int ele_line;
396 vm_offset_t pagenum;
397 tws_hash_ptr_t cache_ele;
398 tws_hash_ptr_t *trailer;
399 tws_hash_ptr_t *free_list;
400
401 /* don't cache private objects */
402 if(object->private)
403 return KERN_SUCCESS;
404
405 index = do_tws_hash(object, offset,
406 tws->number_of_elements, tws->number_of_lines);
407 loop = 0;
408
409 tws->lookup_count++;
410 if(tws->lookup_count == 0)
411 tws->insert_count = 0;
412 if(tws->startup_name != NULL) {
413 int age_of_cache;
414 age_of_cache = ((sched_tick
415 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
416 if (age_of_cache > 45) {
417 return KERN_OPERATION_TIMED_OUT;
418 }
419 }
420
421 if(tws->lookup_count > (4 * tws->expansion_count
422 * tws->number_of_elements * tws->number_of_lines) &&
423 (tws->lookup_count > (2 * tws->insert_count))) {
424 if(tws->startup_cache) {
425 int age_of_cache;
426 age_of_cache = ((sched_tick
427 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
428 if (age_of_cache > 45) {
429 return KERN_OPERATION_TIMED_OUT;
430 }
431 }
432 }
433
434 pagenum = (vm_offset_t)(offset & TWS_INDEX_MASK);
435 pagenum = pagenum >> 12;
436 pagenum = 1 << pagenum; /* get the appropriate page in 32 page block */
437 tws_traverse_object_hash_list(tws, index, object, offset, pagenum,
438 &cache_ele, &trailer, &free_list);
439 if(cache_ele != NULL) {
440 set = cache_ele->element->line/tws->number_of_lines;
441 ele_line = cache_ele->element->line - set;
442 *line = &tws->cache[set][ele_line];
443 return KERN_SUCCESS;
444 }
445
446 return KERN_FAILURE;
447
448
449 }
450
451 kern_return_t
452 tws_lookup(
453 tws_hash_t tws,
454 vm_object_offset_t offset,
455 vm_object_t object,
456 tws_hash_line_t *line)
457 {
458 kern_return_t kr;
459
460 if(!tws_lock_try(tws)) {
461 return KERN_FAILURE;
462 }
463 kr = tws_internal_lookup(tws,
464 offset, object, line);
465 tws_unlock(tws);
466 return kr;
467 }
468
469 kern_return_t
470 tws_expand_working_set(
471 tws_hash_t old_tws,
472 unsigned int line_count,
473 boolean_t dump_data)
474 {
475 tws_hash_t new_tws;
476 unsigned int i,j,k;
477 struct tws_hash temp;
478
479 /* Note we do an elaborate dance to preserve the header that */
480 /* task is pointing to. In this way we can avoid taking a task */
481 /* lock every time we want to access the tws */
482
483 if (old_tws->number_of_lines >= line_count) {
484 return KERN_FAILURE;
485 }
486 if((new_tws = tws_hash_create(line_count,
487 old_tws->number_of_elements, old_tws->style)) == 0) {
488 return(KERN_NO_SPACE);
489 }
490 tws_lock(old_tws);
491
492 if(!dump_data) {
493 for(i = 0; i<old_tws->number_of_lines; i++) {
494 for(j = 0; j<old_tws->number_of_elements; j++) {
495 for(k = 0; k<old_tws->expansion_count; k++) {
496 tws_hash_ele_t entry;
497 vm_object_offset_t paddr;
498 unsigned int page_index;
499 entry = &old_tws->cache[k][i].list[j];
500 if(entry->object != 0) {
501 paddr = 0;
502 for(page_index = 1; page_index != 0;
503 page_index = page_index << 1) {
504 if (entry->page_cache & page_index) {
505 tws_insert(new_tws,
506 entry->offset+paddr,
507 entry->object,
508 entry->page_addr+paddr,
509 entry->map);
510 }
511 paddr+=PAGE_SIZE;
512 }
513
514 }
515 }
516 }
517 }
518 }
519
520 temp.style = new_tws->style;
521 temp.current_line = new_tws->current_line;
522 temp.pageout_count = new_tws->pageout_count;
523 temp.line_count = new_tws->line_count;
524 temp.number_of_lines = new_tws->number_of_lines;
525 temp.number_of_elements = new_tws->number_of_elements;
526 temp.expansion_count = new_tws->expansion_count;
527 temp.lookup_count = new_tws->lookup_count;
528 temp.insert_count = new_tws->insert_count;
529 for(i = 0; i<new_tws->expansion_count; i++) {
530 temp.obj_free_count[i] = new_tws->obj_free_count[i];
531 temp.addr_free_count[i] = new_tws->addr_free_count[i];
532 temp.free_hash_ele[i] = new_tws->free_hash_ele[i];
533 temp.table[i] = new_tws->table[i];
534 temp.table_ele[i] = new_tws->table_ele[i];
535 temp.alt_ele[i] = new_tws->alt_ele[i];
536 temp.cache[i] = new_tws->cache[i];
537 }
538
539 new_tws->style = old_tws->style;
540 new_tws->current_line = old_tws->current_line;
541 new_tws->pageout_count = old_tws->pageout_count;
542 new_tws->line_count = old_tws->line_count;
543 new_tws->number_of_lines = old_tws->number_of_lines;
544 new_tws->number_of_elements = old_tws->number_of_elements;
545 new_tws->expansion_count = old_tws->expansion_count;
546 new_tws->lookup_count = old_tws->lookup_count;
547 new_tws->insert_count = old_tws->insert_count;
548 for(i = 0; i<old_tws->expansion_count; i++) {
549 new_tws->obj_free_count[i] = old_tws->obj_free_count[i];
550 new_tws->addr_free_count[i] = old_tws->addr_free_count[i];
551 new_tws->free_hash_ele[i] = old_tws->free_hash_ele[i];
552 new_tws->table[i] = old_tws->table[i];
553 new_tws->table_ele[i] = old_tws->table_ele[i];
554 new_tws->alt_ele[i] = old_tws->alt_ele[i];
555 new_tws->cache[i] = old_tws->cache[i];
556 }
557
558 old_tws->style = temp.style;
559 old_tws->current_line = temp.current_line;
560 old_tws->pageout_count = temp.pageout_count;
561 old_tws->line_count = temp.line_count;
562 old_tws->number_of_lines = temp.number_of_lines;
563 old_tws->number_of_elements = temp.number_of_elements;
564 old_tws->expansion_count = temp.expansion_count;
565 old_tws->lookup_count = temp.lookup_count;
566 old_tws->insert_count = temp.insert_count;
567 for(i = 0; i<temp.expansion_count; i++) {
568 old_tws->obj_free_count[i] = temp.obj_free_count[i];;
569 old_tws->addr_free_count[i] = temp.addr_free_count[i];;
570 old_tws->free_hash_ele[i] = NULL;
571 old_tws->table[i] = temp.table[i];
572 old_tws->table_ele[i] = temp.table_ele[i];
573 old_tws->alt_ele[i] = temp.alt_ele[i];
574 old_tws->cache[i] = temp.cache[i];
575 }
576
577 tws_hash_destroy(new_tws);
578 tws_unlock(old_tws);
579 return KERN_SUCCESS;
580 }
581
582 tws_hash_t test_tws = 0;
583
584 kern_return_t
585 tws_insert(
586 tws_hash_t tws,
587 vm_object_offset_t offset,
588 vm_object_t object,
589 vm_offset_t page_addr,
590 vm_map_t map)
591 {
592 unsigned int index;
593 unsigned int alt_index;
594 unsigned int index_enum[2];
595 unsigned int ele_index;
596 tws_hash_ptr_t cache_ele;
597 tws_hash_ptr_t obj_ele = NULL;
598 tws_hash_ptr_t addr_ele = NULL;
599 tws_hash_ptr_t *trailer;
600 tws_hash_ptr_t *free_list;
601 tws_hash_ele_t target_element = NULL;
602 unsigned int i;
603 unsigned int current_line;
604 unsigned int set;
605 int ctr;
606 unsigned int startup_cache_line;
607 int age_of_cache = 0;
608
609 if(!tws_lock_try(tws)) {
610 return KERN_FAILURE;
611 }
612 tws->insert_count++;
613 current_line = 0xFFFFFFFF;
614 set = 0;
615
616 startup_cache_line = 0;
617
618 if (tws->startup_cache) {
619 vm_offset_t startup_page_addr;
620
621 startup_page_addr = page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
622
623 age_of_cache = ((sched_tick - tws->time_of_creation) >> SCHED_TICK_SHIFT);
624
625 startup_cache_line = tws_startup_list_lookup(tws->startup_cache, startup_page_addr);
626 }
627 /* This next bit of code, the and alternate hash */
628 /* are all made necessary because of IPC COW */
629
630 /* Note: the use of page_addr modified by delta from offset */
631 /* frame base means we may miss some previous entries. However */
632 /* we will not miss the present entry. This is most important */
633 /* in avoiding duplication of entries against long lived non-cow */
634 /* objects */
635 index_enum[0] = alt_tws_hash(
636 page_addr & TWS_ADDR_OFF_MASK,
637 tws->number_of_elements, tws->number_of_lines);
638
639 index_enum[1] = alt_tws_hash(
640 (page_addr - 0x1f000) & TWS_ADDR_OFF_MASK,
641 tws->number_of_elements, tws->number_of_lines);
642
643 for(ctr = 0; ctr < 2;) {
644 tws_hash_ele_t resident;
645 tws_traverse_address_hash_list(tws,
646 index_enum[ctr], page_addr, NULL,
647 0, NULL,
648 &cache_ele, &trailer, &free_list, 1);
649 if(cache_ele != NULL) {
650 /* found one */
651 resident = (tws_hash_ele_t)((unsigned int)
652 cache_ele->element & ~TWS_ADDR_HASH);
653 if((object == resident->object) &&
654 resident->offset ==
655 (offset & TWS_HASH_OFF_MASK)) {
656 /* This is our object/offset */
657 resident->page_cache
658 |= startup_cache_line;
659 resident->page_cache |=
660 (1<<(((vm_offset_t)
661 (offset & TWS_INDEX_MASK))>>12));
662 tws_unlock(tws);
663 if (age_of_cache > 45)
664 return KERN_OPERATION_TIMED_OUT;
665 return KERN_SUCCESS;
666 }
667 if((object->shadow ==
668 resident->object) &&
669 ((resident->offset
670 + object->shadow_offset)
671 == (offset & TWS_HASH_OFF_MASK))) {
672 /* if we just shadowed, inherit */
673 /* access pattern from parent */
674 startup_cache_line |=
675 resident->page_cache;
676 /* thow out old entry */
677 resident->page_cache = 0;
678 break;
679 } else {
680 resident->page_cache &=
681 ~(1<<(((vm_offset_t)(page_addr
682 - resident->page_addr))
683 >>12));
684 }
685 /* Throw out old entry if there are no */
686 /* more pages in cache */
687 if(resident->page_cache == 0) {
688 /* delete addr hash entry */
689 cache_ele->element = 0;
690 *trailer = cache_ele->next;
691 cache_ele->next = *free_list;
692 *free_list = cache_ele;
693 /* go after object hash */
694 index = do_tws_hash(
695 resident->object,
696 resident->offset,
697 tws->number_of_elements,
698 tws->number_of_lines);
699 tws_traverse_object_hash_list(tws,
700 index, resident->object,
701 resident->offset,
702 0xFFFFFFFF, &cache_ele,
703 &trailer, &free_list);
704 if(cache_ele != NULL) {
705 if(tws->style ==
706 TWS_HASH_STYLE_SIGNAL) {
707 vm_object_deallocate(
708 cache_ele->element->object);
709 vm_map_deallocate(
710 cache_ele->element->map);
711 }
712 current_line =
713 cache_ele->element->line;
714 set = current_line
715 /tws->number_of_lines;
716 current_line -= set *
717 tws->number_of_lines;
718 if(cache_ele->element->object != 0) {
719 cache_ele->element->object = 0;
720 tws->cache[set]
721 [current_line].ele_count--;
722 }
723 cache_ele->element = 0;
724 *trailer = cache_ele->next;
725 cache_ele->next = *free_list;
726 *free_list = cache_ele;
727 }
728 }
729 continue;
730 }
731 ctr+=1;
732 }
733
734 /*
735 * We may or may not have a current line setting coming out of
736 * the code above. If we have a current line it means we can
737 * choose to back-fill the spot vacated by a previous entry.
738 * We have yet to do a definitive check using the original obj/off
739 * We will do that now and override the current line if we
740 * find an element
741 */
742
743 index = do_tws_hash(object, offset,
744 tws->number_of_elements, tws->number_of_lines);
745
746 alt_index = index_enum[0];
747
748 tws_traverse_object_hash_list(tws, index, object, offset,
749 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
750 if(cache_ele != NULL) {
751 obj_ele = cache_ele;
752 current_line = cache_ele->element->line;
753 set = current_line/tws->number_of_lines;
754 current_line -= set * tws->number_of_lines;
755 target_element = cache_ele->element;
756
757 /* Now check to see if we have a hash addr for it */
758 tws_traverse_address_hash_list(tws,
759 alt_index, obj_ele->element->page_addr,
760 obj_ele->element->object,
761 obj_ele->element->offset,
762 obj_ele->element->map,
763 &cache_ele, &trailer, &free_list, 0);
764 if(cache_ele != NULL) {
765 addr_ele = cache_ele;
766 } else {
767 addr_ele = new_addr_hash(tws, set, alt_index);
768 /* if cannot allocate just do without */
769 /* we'll get it next time around */
770 }
771 }
772
773
774
775 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
776 vm_object_reference(object);
777 vm_map_reference(map);
778 }
779
780 if(current_line == 0xFFFFFFFF) {
781 current_line = tws->current_line;
782 set = current_line/tws->number_of_lines;
783 current_line = current_line - (set * tws->number_of_lines);
784
785 #ifdef notdef
786 if(cache_full) {
787 tws->current_line = tws->number_of_lines - 1;
788 }
789 #endif
790 if(tws->cache[set][current_line].ele_count
791 >= tws->number_of_elements) {
792 current_line++;
793 tws->current_line++;
794 if(current_line == tws->number_of_lines) {
795 set++;
796 current_line = 0;
797 if (set == tws->expansion_count) {
798 if((tws->lookup_count <
799 (2 * tws->insert_count)) &&
800 (set<TWS_HASH_EXPANSION_MAX)) {
801 tws->lookup_count = 0;
802 tws->insert_count = 0;
803 if(tws->number_of_lines
804 < TWS_HASH_LINE_COUNT) {
805 tws->current_line--;
806 tws_unlock(tws);
807 return KERN_NO_SPACE;
808 }
809 /* object persistence is guaranteed by */
810 /* an elevated paging or object */
811 /* reference count in the caller. */
812 vm_object_unlock(object);
813 if((tws->table[set] = (tws_hash_ptr_t *)
814 kalloc(sizeof(tws_hash_ptr_t)
815 * tws->number_of_lines
816 * tws->number_of_elements))
817 == NULL) {
818 set = 0;
819 } else if((tws->table_ele[set] =
820 (tws_hash_ptr_t)
821 kalloc(sizeof(struct tws_hash_ptr)
822 * tws->number_of_lines
823 * tws->number_of_elements))
824 == NULL) {
825 kfree(tws->table[set],
826 sizeof(tws_hash_ptr_t)
827 * tws->number_of_lines
828 * tws->number_of_elements);
829 set = 0;
830 } else if((tws->alt_ele[set] =
831 (tws_hash_ptr_t)
832 kalloc(sizeof(struct tws_hash_ptr)
833 * tws->number_of_lines
834 * tws->number_of_elements))
835 == NULL) {
836 kfree(tws->table_ele[set],
837 sizeof(struct tws_hash_ptr)
838 * tws->number_of_lines
839 * tws->number_of_elements);
840 kfree(tws->table[set],
841 sizeof(tws_hash_ptr_t)
842 * tws->number_of_lines
843 * tws->number_of_elements);
844 tws->table[set] = NULL;
845 set = 0;
846
847 } else if((tws->cache[set] =
848 (struct tws_hash_line *)
849 kalloc(sizeof
850 (struct tws_hash_line)
851 * tws->number_of_lines))
852 == NULL) {
853 kfree(tws->alt_ele[set],
854 sizeof(struct tws_hash_ptr)
855 * tws->number_of_lines
856 * tws->number_of_elements);
857 kfree(tws->table_ele[set],
858 sizeof(struct tws_hash_ptr)
859 * tws->number_of_lines
860 * tws->number_of_elements);
861 kfree(tws->table[set],
862 sizeof(tws_hash_ptr_t)
863 * tws->number_of_lines
864 * tws->number_of_elements);
865 tws->table[set] = NULL;
866 set = 0;
867
868 } else {
869 tws->free_hash_ele[set] =
870 (tws_hash_ptr_t)0;
871 tws->obj_free_count[set] = 0;
872 tws->addr_free_count[set] = 0;
873 bzero((char *)tws->table[set],
874 sizeof(tws_hash_ptr_t)
875 * tws->number_of_lines
876 * tws->number_of_elements);
877 bzero((char *)tws->table_ele[set],
878 sizeof(struct tws_hash_ptr)
879 * tws->number_of_lines
880 * tws->number_of_elements);
881 bzero((char *)tws->alt_ele[set],
882 sizeof(struct tws_hash_ptr)
883 * tws->number_of_lines
884 * tws->number_of_elements);
885 bzero((char *)tws->cache[set],
886 sizeof(struct tws_hash_line)
887 * tws->number_of_lines);
888 }
889 vm_object_lock(object);
890 } else {
891 if (tws->startup_name != NULL) {
892 tws->current_line--;
893
894 age_of_cache = ((sched_tick - tws->time_of_creation) >> SCHED_TICK_SHIFT);
895
896 tws_unlock(tws);
897
898 if (age_of_cache > 45)
899 return KERN_OPERATION_TIMED_OUT;
900
901 return KERN_FAILURE;
902 }
903 tws->lookup_count = 0;
904 tws->insert_count = 0;
905 set = 0;
906 }
907 }
908 tws->current_line = set * tws->number_of_lines;
909 }
910 if(set < tws->expansion_count) {
911 tws_hash_line_clear(tws,
912 &(tws->cache[set][current_line]), object, TRUE);
913 if(tws->cache[set][current_line].ele_count
914 >= tws->number_of_elements) {
915 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
916 vm_object_deallocate(object);
917 vm_map_deallocate(map);
918 }
919 tws_unlock(tws);
920 return KERN_FAILURE;
921 }
922 } else {
923 tws->expansion_count++;
924 }
925 }
926 }
927
928
929 /* set object hash element */
930 if(obj_ele == NULL) {
931 obj_ele = new_obj_hash(tws, set, index);
932 if(obj_ele == NULL) {
933 tws->cache[set][current_line].ele_count
934 = tws->number_of_elements;
935 tws_unlock(tws);
936 return KERN_FAILURE;
937 }
938 }
939
940 /* set address hash element */
941 if(addr_ele == NULL) {
942 addr_ele = new_addr_hash(tws, set, alt_index);
943 }
944
945 if(target_element == NULL) {
946 ele_index = 0;
947 for(i = 0; i<tws->number_of_elements; i++) {
948 if(tws->cache[set][current_line].
949 list[ele_index].object == 0) {
950 break;
951 }
952 ele_index++;
953 if(ele_index >= tws->number_of_elements)
954 ele_index = 0;
955
956 }
957
958 if(i == tws->number_of_elements)
959 panic("tws_insert: no free elements");
960
961 target_element =
962 &(tws->cache[set][current_line].list[ele_index]);
963
964 tws->cache[set][current_line].ele_count++;
965 }
966
967 obj_ele->element = target_element;
968 if(addr_ele) {
969 addr_ele->element = (tws_hash_ele_t)
970 (((unsigned int)target_element) | TWS_ADDR_HASH);
971 }
972 target_element->object = object;
973 target_element->offset = offset & TWS_HASH_OFF_MASK;
974 target_element->page_addr =
975 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
976 target_element->map = map;
977 target_element->line =
978 current_line + (set * tws->number_of_lines);
979
980 target_element->page_cache |= startup_cache_line;
981 target_element->page_cache |= 1<<(((vm_offset_t)(offset & TWS_INDEX_MASK))>>12);
982
983 tws_unlock(tws);
984
985 if (age_of_cache > 45)
986 return KERN_OPERATION_TIMED_OUT;
987
988 return KERN_SUCCESS;
989 }
990
991 /*
992 * tws_build_cluster
993 * lengthen the cluster of pages by the number of pages encountered in the
994 * working set up to the limit requested by the caller. The object needs
995 * to be locked on entry. The map does not because the tws_lookup function
996 * is used only to find if their is an entry in the cache. No transient
997 * data from the cache is de-referenced.
998 *
999 */
1000 #if MACH_PAGEMAP
1001 /*
1002 * MACH page map - an optional optimization where a bit map is maintained
1003 * by the VM subsystem for internal objects to indicate which pages of
1004 * the object currently reside on backing store. This existence map
1005 * duplicates information maintained by the vnode pager. It is
1006 * created at the time of the first pageout against the object, i.e.
1007 * at the same time pager for the object is created. The optimization
1008 * is designed to eliminate pager interaction overhead, if it is
1009 * 'known' that the page does not exist on backing store.
1010 *
1011 * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is
1012 * either marked as paged out in the existence map for the object or no
1013 * existence map exists for the object. LOOK_FOR() is one of the
1014 * criteria in the decision to invoke the pager. It is also used as one
1015 * of the criteria to terminate the scan for adjacent pages in a clustered
1016 * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for
1017 * permanent objects. Note also that if the pager for an internal object
1018 * has not been created, the pager is not invoked regardless of the value
1019 * of LOOK_FOR() and that clustered pagein scans are only done on an object
1020 * for which a pager has been created.
1021 *
1022 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1023 * is marked as paged out in the existence map for the object. PAGED_OUT()
1024 * PAGED_OUT() is used to determine if a page has already been pushed
1025 * into a copy object in order to avoid a redundant page out operation.
1026 */
1027 #define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
1028 != VM_EXTERNAL_STATE_ABSENT)
1029 #define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
1030 == VM_EXTERNAL_STATE_EXISTS)
1031 #else /* MACH_PAGEMAP */
1032 /*
1033 * If the MACH page map optimization is not enabled,
1034 * LOOK_FOR() always evaluates to TRUE. The pager will always be
1035 * invoked to resolve missing pages in an object, assuming the pager
1036 * has been created for the object. In a clustered page operation, the
1037 * absence of a page on backing backing store cannot be used to terminate
1038 * a scan for adjacent pages since that information is available only in
1039 * the pager. Hence pages that may not be paged out are potentially
1040 * included in a clustered request. The vnode pager is coded to deal
1041 * with any combination of absent/present pages in a clustered
1042 * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager
1043 * will always be invoked to push a dirty page into a copy object assuming
1044 * a pager has been created. If the page has already been pushed, the
1045 * pager will ingore the new request.
1046 */
1047 #define LOOK_FOR(o, f) TRUE
1048 #define PAGED_OUT(o, f) FALSE
1049 #endif /* MACH_PAGEMAP */
1050
1051
1052 void
1053 tws_build_cluster(
1054 tws_hash_t tws,
1055 vm_object_t object,
1056 vm_object_offset_t *start,
1057 vm_object_offset_t *end,
1058 vm_size_t max_length)
1059 {
1060 tws_hash_line_t line;
1061 vm_object_offset_t before = *start;
1062 vm_object_offset_t after = *end;
1063 vm_object_offset_t original_start = *start;
1064 vm_object_offset_t original_end = *end;
1065 vm_size_t length = (vm_size_t)(*end - *start);
1066 vm_page_t m;
1067 kern_return_t kret;
1068 vm_object_offset_t object_size;
1069 int age_of_cache;
1070 vm_size_t pre_heat_size;
1071 unsigned int ele_cache;
1072 unsigned int end_cache = 0;
1073 unsigned int start_cache = 0;
1074 unsigned int memory_scarce = 0;
1075
1076 if((object->private) || !(object->pager))
1077 return;
1078
1079 if (!object->internal) {
1080 /* XXX FBDP !internal doesn't mean vnode pager */
1081 kret = vnode_pager_get_object_size(
1082 object->pager,
1083 &object_size);
1084 if (kret != KERN_SUCCESS) {
1085 object_size = object->size;
1086 }
1087 } else {
1088 object_size = object->size;
1089 }
1090
1091 if((!tws) || (!tws_lock_try(tws))) {
1092 return;
1093 }
1094 age_of_cache = ((sched_tick
1095 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
1096
1097 if (vm_page_free_count < (2 * vm_page_free_target))
1098 memory_scarce = 1;
1099
1100 /* When pre-heat files are not available, resort to speculation */
1101 /* based on size of file */
1102
1103 if (tws->startup_cache || object->internal || age_of_cache > 45) {
1104 pre_heat_size = 0;
1105 } else {
1106 if (object_size > (vm_object_offset_t)(1024 * 1024))
1107 pre_heat_size = 8 * PAGE_SIZE;
1108 else if (object_size > (vm_object_offset_t)(128 * 1024))
1109 pre_heat_size = 4 * PAGE_SIZE;
1110 else
1111 pre_heat_size = 2 * PAGE_SIZE;
1112 }
1113
1114 if (tws->startup_cache) {
1115 int target_page_count;
1116
1117 if (memory_scarce)
1118 target_page_count = 16;
1119 else
1120 target_page_count = 4;
1121
1122 if (tws_test_for_community(tws, object, *start, target_page_count, &ele_cache))
1123 {
1124 start_cache = ele_cache;
1125 *start = *start & TWS_HASH_OFF_MASK;
1126 *end = *start + (32 * PAGE_SIZE_64);
1127
1128 if (*end > object_size) {
1129 *end = round_page_64(object_size);
1130 max_length = 0;
1131 } else
1132 end_cache = ele_cache;
1133
1134 while (max_length > ((*end - *start) + (32 * PAGE_SIZE))) {
1135 int expanded;
1136
1137 expanded = 0;
1138 after = *end;
1139
1140 if ((after + (32 * PAGE_SIZE_64)) <= object_size &&
1141 (tws_test_for_community(tws, object, after, 8, &ele_cache))) {
1142
1143 *end = after + (32 * PAGE_SIZE_64);
1144 end_cache = ele_cache;
1145 expanded = 1;
1146 }
1147 if (max_length < ((*end - *start) + (32 * PAGE_SIZE_64))) {
1148 break;
1149 }
1150 if (*start) {
1151 before = (*start - PAGE_SIZE_64) & TWS_HASH_OFF_MASK;
1152
1153 if (tws_test_for_community(tws, object, before, 8, &ele_cache)) {
1154
1155 *start = before;
1156 start_cache = ele_cache;
1157 expanded = 1;
1158 }
1159 }
1160 if (expanded == 0)
1161 break;
1162 }
1163 if (end_cache)
1164 *end -= PAGE_SIZE_64;
1165
1166 if (start_cache != 0) {
1167 unsigned int mask;
1168
1169 for (mask = 1; mask != 0; mask = mask << 1) {
1170 if (*start == original_start)
1171 break;
1172 if (!(start_cache & mask))
1173 *start += PAGE_SIZE_64;
1174 else
1175 break;
1176 }
1177 }
1178 if (end_cache != 0) {
1179 unsigned int mask;
1180
1181 for (mask = 0x80000000;
1182 mask != 0; mask = mask >> 1) {
1183 if (*end == original_end)
1184 break;
1185 if (!(end_cache & mask))
1186 *end -= PAGE_SIZE_64;
1187 else
1188 break;
1189 }
1190 }
1191 tws_unlock(tws);
1192
1193 if (*end < original_end)
1194 *end = original_end;
1195 return;
1196 }
1197 }
1198
1199 while ((length < max_length) &&
1200 (object_size >=
1201 (after + PAGE_SIZE_64))) {
1202 if(length >= pre_heat_size) {
1203 if(tws_internal_lookup(tws, after, object,
1204 &line) != KERN_SUCCESS) {
1205 vm_object_offset_t extend;
1206
1207 extend = after + PAGE_SIZE_64;
1208 if(tws_internal_lookup(tws, extend, object,
1209 &line) != KERN_SUCCESS) {
1210 break;
1211 }
1212 }
1213 }
1214
1215 if ((object->existence_map != NULL)
1216 && (!LOOK_FOR(object, after))) {
1217 break;
1218 }
1219
1220 if (vm_page_lookup(object, after) != VM_PAGE_NULL) {
1221 /*
1222 * don't bridge resident pages
1223 */
1224 break;
1225 }
1226
1227 if (object->internal) {
1228 /*
1229 * need to acquire a real page in
1230 * advance because this acts as
1231 * a throttling mechanism for
1232 * data_requests to the default
1233 * pager. If this fails, give up
1234 * trying to find any more pages
1235 * in the cluster and send off the
1236 * request for what we already have.
1237 */
1238 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1239 break;
1240 }
1241 } else if ((m = vm_page_grab_fictitious())
1242 == VM_PAGE_NULL) {
1243 break;
1244 }
1245 m->absent = TRUE;
1246 m->unusual = TRUE;
1247 m->clustered = TRUE;
1248 m->list_req_pending = TRUE;
1249
1250 vm_page_insert(m, object, after);
1251 object->absent_count++;
1252 after += PAGE_SIZE_64;
1253 length += PAGE_SIZE;
1254 }
1255 *end = after;
1256 while (length < max_length) {
1257 if (before == 0)
1258 break;
1259 before -= PAGE_SIZE_64;
1260
1261 if(length >= pre_heat_size) {
1262 if(tws_internal_lookup(tws, before, object,
1263 &line) != KERN_SUCCESS) {
1264 vm_object_offset_t extend;
1265
1266 extend = before;
1267 if (extend == 0)
1268 break;
1269 extend -= PAGE_SIZE_64;
1270 if(tws_internal_lookup(tws, extend, object,
1271 &line) != KERN_SUCCESS) {
1272 break;
1273 }
1274 }
1275 }
1276 if ((object->existence_map != NULL)
1277 && (!LOOK_FOR(object, before))) {
1278 break;
1279 }
1280
1281 if (vm_page_lookup(object, before) != VM_PAGE_NULL) {
1282 /*
1283 * don't bridge resident pages
1284 */
1285 break;
1286 }
1287
1288 if (object->internal) {
1289 /*
1290 * need to acquire a real page in
1291 * advance because this acts as
1292 * a throttling mechanism for
1293 * data_requests to the default
1294 * pager. If this fails, give up
1295 * trying to find any more pages
1296 * in the cluster and send off the
1297 * request for what we already have.
1298 */
1299 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1300 break;
1301 }
1302 } else if ((m = vm_page_grab_fictitious())
1303 == VM_PAGE_NULL) {
1304 break;
1305 }
1306 m->absent = TRUE;
1307 m->unusual = TRUE;
1308 m->clustered = TRUE;
1309 m->list_req_pending = TRUE;
1310
1311 vm_page_insert(m, object, before);
1312 object->absent_count++;
1313 *start -= PAGE_SIZE_64;
1314 length += PAGE_SIZE;
1315 }
1316 tws_unlock(tws);
1317 }
1318
1319 void
1320 tws_line_signal(
1321 tws_hash_t tws,
1322 vm_map_t map,
1323 tws_hash_line_t hash_line,
1324 vm_offset_t target_page)
1325 {
1326 unsigned int i,j;
1327 vm_object_t object;
1328 vm_object_offset_t offset;
1329 vm_object_offset_t before;
1330 vm_object_offset_t after;
1331 struct tws_hash_ele *element;
1332 vm_page_t m,p;
1333 kern_return_t rc;
1334
1335 if(tws->style != TWS_HASH_STYLE_SIGNAL)
1336 return;
1337
1338 vm_map_lock(map);
1339 for (i=0; i<tws->number_of_elements; i++) {
1340
1341 vm_object_offset_t local_off = 0;
1342
1343 if(hash_line->list[i].object == 0)
1344 continue;
1345
1346 element = &hash_line->list[i];
1347
1348 if (element->page_addr == target_page)
1349 continue;
1350
1351 j = 1;
1352 while (j != 0) {
1353 if(j & element->page_cache)
1354 break;
1355 j <<= 1;
1356 local_off += PAGE_SIZE_64;
1357 }
1358 object = element->object;
1359 offset = element->offset + local_off;
1360
1361 /* first try a fast test to speed up no-op signal */
1362 if (((p = vm_page_lookup(object, offset)) != NULL)
1363 || (object->pager == NULL)
1364 || (object->shadow_severed)) {
1365 continue;
1366 }
1367
1368 if((!object->alive) ||
1369 (!object->pager_created) || (!object->pager_ready))
1370 continue;
1371
1372 if (object->internal) {
1373 if (object->existence_map == NULL) {
1374 if (object->shadow)
1375 continue;
1376 } else {
1377 if(!LOOK_FOR(object, offset))
1378 continue;
1379 }
1380 }
1381
1382 vm_object_reference(object);
1383 vm_map_unlock(map);
1384
1385 if(object->internal) {
1386 m = vm_page_grab();
1387 } else {
1388 m = vm_page_grab_fictitious();
1389 }
1390
1391 if(m == NULL) {
1392 vm_object_deallocate(object);
1393 vm_map_lock(map);
1394 continue;
1395 }
1396
1397 vm_object_lock(object);
1398 if (((p = vm_page_lookup(object, offset)) != NULL)
1399 || (object->pager == NULL)
1400 || (object->shadow_severed)) {
1401 VM_PAGE_FREE(m);
1402 vm_object_unlock(object);
1403 vm_object_deallocate(object);
1404 vm_map_lock(map);
1405 continue;
1406 }
1407
1408 vm_page_insert(m, object, offset);
1409
1410 if (object->absent_count > vm_object_absent_max) {
1411 VM_PAGE_FREE(m);
1412 vm_object_unlock(object);
1413 vm_object_deallocate(object);
1414 vm_map_lock(map);
1415 break;
1416 }
1417 m->list_req_pending = TRUE;
1418 m->absent = TRUE;
1419 m->unusual = TRUE;
1420 object->absent_count++;
1421
1422 before = offset;
1423 after = offset + PAGE_SIZE_64;
1424 tws_build_cluster(tws, object, &before, &after, 0x16000);
1425 vm_object_unlock(object);
1426
1427 rc = memory_object_data_request(object->pager,
1428 before + object->paging_offset,
1429 (vm_size_t)(after - before), VM_PROT_READ);
1430 if (rc != KERN_SUCCESS) {
1431 offset = before;
1432 vm_object_lock(object);
1433 while (offset < after) {
1434 m = vm_page_lookup(object, offset);
1435 if(m && m->absent && m->busy)
1436 VM_PAGE_FREE(m);
1437 offset += PAGE_SIZE;
1438 }
1439 vm_object_unlock(object);
1440 vm_object_deallocate(object);
1441 } else {
1442 vm_object_deallocate(object);
1443 }
1444 vm_map_lock(map);
1445 continue;
1446 }
1447 vm_map_unlock(map);
1448 }
1449
1450 /* tws locked on entry */
1451
1452 tws_startup_t
1453 tws_create_startup_list(
1454 tws_hash_t tws)
1455 {
1456
1457 tws_startup_t startup;
1458 unsigned int i,j,k;
1459 unsigned int total_elements;
1460 unsigned int startup_size;
1461 unsigned int sindex;
1462 unsigned int hash_index;
1463 tws_startup_ptr_t element;
1464
1465 total_elements = tws->expansion_count *
1466 (tws->number_of_lines * tws->number_of_elements);
1467
1468 startup_size = sizeof(struct tws_startup)
1469 + (total_elements * sizeof(tws_startup_ptr_t *))
1470 + (total_elements * sizeof(struct tws_startup_ptr))
1471 + (total_elements * sizeof(struct tws_startup_ele));
1472 startup = (tws_startup_t)(kalloc(startup_size));
1473
1474 if(startup == NULL)
1475 return startup;
1476
1477 bzero((char *) startup, startup_size);
1478
1479 startup->table = (tws_startup_ptr_t *)
1480 (((int)startup) + (sizeof(struct tws_startup)));
1481 startup->ele = (struct tws_startup_ptr *)
1482 (((vm_offset_t)startup->table) +
1483 (total_elements * sizeof(tws_startup_ptr_t)));
1484
1485 startup->array = (struct tws_startup_ele *)
1486 (((vm_offset_t)startup->ele) +
1487 (total_elements * sizeof(struct tws_startup_ptr)));
1488
1489 startup->tws_hash_size = startup_size;
1490 startup->ele_count = 0; /* burn first hash ele, else we can't tell from zero */
1491 startup->array_size = total_elements;
1492 startup->hash_count = 1;
1493
1494 sindex = 0;
1495
1496
1497 for(i = 0; i<tws->number_of_lines; i++) {
1498 for(j = 0; j<tws->number_of_elements; j++) {
1499 for(k = 0; k<tws->expansion_count; k++) {
1500 tws_hash_ele_t entry;
1501 unsigned int hash_retry;
1502 vm_offset_t addr;
1503
1504 entry = &tws->cache[k][i].list[j];
1505 addr = entry->page_addr;
1506 hash_retry = 0;
1507 if(entry->object != 0) {
1508 /* get a hash element */
1509 hash_index = do_startup_hash(addr,
1510 startup->array_size);
1511
1512 if(startup->hash_count < total_elements) {
1513 element = &(startup->ele[startup->hash_count]);
1514 startup->hash_count += 1;
1515 } else {
1516 /* exit we're out of elements */
1517 break;
1518 }
1519 /* place the hash element */
1520 element->next = startup->table[hash_index];
1521 startup->table[hash_index] = (tws_startup_ptr_t)
1522 ((int)element - (int)&startup->ele[0]);
1523
1524 /* set entry OFFSET in hash element */
1525 element->element = (tws_startup_ele_t)
1526 ((int)&startup->array[sindex] -
1527 (int)&startup->array[0]);
1528
1529 startup->array[sindex].page_addr = entry->page_addr;
1530 startup->array[sindex].page_cache = entry->page_cache;
1531 startup->ele_count++;
1532 sindex++;
1533
1534 }
1535 }
1536 }
1537 }
1538
1539 return startup;
1540 }
1541
1542
1543 /*
1544 * Returns an entire cache line. The line is deleted from the startup
1545 * cache on return. The caller can check startup->ele_count for an empty
1546 * list. Access synchronization is the responsibility of the caller.
1547 */
1548
1549 unsigned int
1550 tws_startup_list_lookup(
1551 tws_startup_t startup,
1552 vm_offset_t addr)
1553 {
1554 unsigned int hash_index;
1555 unsigned int page_cache_bits;
1556 unsigned int startup_shift;
1557 tws_startup_ele_t entry;
1558 vm_offset_t next_addr;
1559 tws_startup_ptr_t element;
1560 tws_startup_ptr_t base_ele;
1561 tws_startup_ptr_t *previous_ptr;
1562
1563 page_cache_bits = 0;
1564
1565 hash_index = do_startup_hash(addr, startup->array_size);
1566
1567 if(((unsigned int)&(startup->table[hash_index])) >= ((unsigned int)startup + startup->tws_hash_size)) {
1568 return page_cache_bits = 0;
1569 }
1570 element = (tws_startup_ptr_t)((int)startup->table[hash_index] +
1571 (int)&startup->ele[0]);
1572 base_ele = element;
1573 previous_ptr = &(startup->table[hash_index]);
1574 while(element > &startup->ele[0]) {
1575 if (((int)element + sizeof(struct tws_startup_ptr))
1576 > ((int)startup + startup->tws_hash_size)) {
1577 return page_cache_bits;
1578 }
1579 entry = (tws_startup_ele_t)
1580 ((int)element->element
1581 + (int)&startup->array[0]);
1582 if((((int)entry + sizeof(struct tws_startup_ele))
1583 > ((int)startup + startup->tws_hash_size))
1584 || ((int)entry < (int)startup)) {
1585 return page_cache_bits;
1586 }
1587 if ((addr >= entry->page_addr) &&
1588 (addr <= (entry->page_addr + 0x1F000))) {
1589 startup_shift = (addr - entry->page_addr)>>12;
1590 page_cache_bits |= entry->page_cache >> startup_shift;
1591 /* don't dump the pages, unless the addresses */
1592 /* line up perfectly. The cache may be used */
1593 /* by other mappings */
1594 entry->page_cache &= (1 << startup_shift) - 1;
1595 if(addr == entry->page_addr) {
1596 if(base_ele == element) {
1597 base_ele = (tws_startup_ptr_t)
1598 ((int)element->next
1599 + (int)&startup->ele[0]);
1600 startup->table[hash_index] = element->next;
1601 element = base_ele;
1602 } else {
1603 *previous_ptr = element->next;
1604 element = (tws_startup_ptr_t)
1605 ((int)*previous_ptr
1606 + (int)&startup->ele[0]);
1607 }
1608 entry->page_addr = 0;
1609 startup->ele_count--;
1610 continue;
1611 }
1612 }
1613 next_addr = addr + 0x1F000;
1614 if ((next_addr >= entry->page_addr) &&
1615 (next_addr <= (entry->page_addr + 0x1F000))) {
1616 startup_shift = (next_addr - entry->page_addr)>>12;
1617 page_cache_bits |= entry->page_cache << (0x1F - startup_shift);
1618 entry->page_cache &= ~((1 << (startup_shift + 1)) - 1);
1619 if(entry->page_cache == 0) {
1620 if(base_ele == element) {
1621 base_ele = (tws_startup_ptr_t)
1622 ((int)element->next
1623 + (int)&startup->ele[0]);
1624 startup->table[hash_index] = element->next;
1625 element = base_ele;
1626 } else {
1627 *previous_ptr = element->next;
1628 element = (tws_startup_ptr_t)
1629 ((int)*previous_ptr
1630 + (int)&startup->ele[0]);
1631 }
1632 entry->page_addr = 0;
1633 startup->ele_count--;
1634 continue;
1635 }
1636 }
1637 previous_ptr = &(element->next);
1638 element = (tws_startup_ptr_t)
1639 ((int) element->next + (int) &startup->ele[0]);
1640 }
1641
1642 return page_cache_bits;
1643 }
1644
1645 kern_return_t
1646 tws_send_startup_info(
1647 task_t task)
1648 {
1649
1650 tws_hash_t tws;
1651
1652 task_lock(task);
1653 tws = task->dynamic_working_set;
1654 task_unlock(task);
1655 if(tws == NULL) {
1656 return KERN_FAILURE;
1657 }
1658 return tws_internal_startup_send(tws);
1659 }
1660
1661
1662 kern_return_t
1663 tws_internal_startup_send(
1664 tws_hash_t tws)
1665 {
1666
1667 tws_startup_t scache;
1668
1669 if(tws == NULL) {
1670 return KERN_FAILURE;
1671 }
1672 tws_lock(tws);
1673 /* used to signal write or release depending on state of tws */
1674 if(tws->startup_cache) {
1675 vm_offset_t startup_buf;
1676 vm_size_t size;
1677 startup_buf = (vm_offset_t)tws->startup_cache;
1678 size = tws->startup_cache->tws_hash_size;
1679 tws->startup_cache = 0;
1680 tws_unlock(tws);
1681 kmem_free(kernel_map, startup_buf, size);
1682 return KERN_SUCCESS;
1683 }
1684 if(tws->startup_name == NULL) {
1685 tws_unlock(tws);
1686 return KERN_FAILURE;
1687 }
1688 scache = tws_create_startup_list(tws);
1689 if(scache == NULL)
1690 return KERN_FAILURE;
1691 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1692 (caddr_t) scache, scache->tws_hash_size,
1693 tws->mod, tws->fid);
1694 kfree(scache, scache->tws_hash_size);
1695 kfree(tws->startup_name, tws->startup_name_length);
1696 tws->startup_name = NULL;
1697 tws_unlock(tws);
1698 return KERN_SUCCESS;
1699 }
1700
1701 kern_return_t
1702 tws_handle_startup_file(
1703 task_t task,
1704 unsigned int uid,
1705 char *app_name,
1706 void *app_vp,
1707 boolean_t *new_info)
1708
1709 {
1710 tws_startup_t startup;
1711 vm_offset_t cache_size;
1712 kern_return_t error;
1713 int fid;
1714 int mod;
1715
1716 *new_info = FALSE;
1717 /* don't pre-heat kernel task */
1718 if(task == kernel_task)
1719 return KERN_SUCCESS;
1720 error = bsd_read_page_cache_file(uid, &fid,
1721 &mod, app_name,
1722 app_vp,
1723 (vm_offset_t *) &startup,
1724 &cache_size);
1725 if(error) {
1726 return KERN_FAILURE;
1727 }
1728 if(startup == NULL) {
1729 /* Entry for app does not exist, make */
1730 /* one */
1731 /* we will want our own copy of the shared */
1732 /* regions to pick up a true picture of all */
1733 /* the pages we will touch. */
1734 if((lsf_zone->count * lsf_zone->elem_size)
1735 > (lsf_zone->max_size >> 1)) {
1736 /* We don't want to run out of shared memory */
1737 /* map entries by starting too many private versions */
1738 /* of the shared library structures */
1739 return KERN_SUCCESS;
1740 }
1741 *new_info = TRUE;
1742
1743 error = tws_write_startup_file(task,
1744 fid, mod, app_name, uid);
1745 if(error)
1746 return error;
1747
1748 } else {
1749 error = tws_read_startup_file(task,
1750 (tws_startup_t)startup,
1751 cache_size);
1752 if(error) {
1753 kmem_free(kernel_map,
1754 (vm_offset_t)startup, cache_size);
1755 return error;
1756 }
1757 }
1758 return KERN_SUCCESS;
1759 }
1760
1761 kern_return_t
1762 tws_write_startup_file(
1763 task_t task,
1764 int fid,
1765 int mod,
1766 char *name,
1767 unsigned int uid)
1768 {
1769 tws_hash_t tws;
1770 unsigned int string_length;
1771
1772 string_length = strlen(name);
1773
1774 restart:
1775 task_lock(task);
1776 tws = task->dynamic_working_set;
1777 task_unlock(task);
1778
1779 if(tws == NULL) {
1780 kern_return_t error;
1781
1782 /* create a dynamic working set of normal size */
1783 if((error = task_working_set_create(task, 0, 0, TWS_HASH_STYLE_DEFAULT)) != KERN_SUCCESS)
1784 return error;
1785 /* we need to reset tws and relock */
1786 goto restart;
1787 }
1788 tws_lock(tws);
1789
1790 if(tws->startup_name != NULL) {
1791 tws_unlock(tws);
1792 return KERN_FAILURE;
1793 }
1794
1795 tws->startup_name = (char *)
1796 kalloc((string_length + 1) * (sizeof(char)));
1797 if(tws->startup_name == NULL) {
1798 tws_unlock(tws);
1799 return KERN_FAILURE;
1800 }
1801
1802 bcopy(name, (char *)tws->startup_name, string_length + 1);
1803 tws->startup_name_length = (string_length + 1) * sizeof(char);
1804 tws->uid = uid;
1805 tws->fid = fid;
1806 tws->mod = mod;
1807
1808 tws_unlock(tws);
1809 return KERN_SUCCESS;
1810 }
1811
1812 unsigned long tws_read_startup_file_rejects = 0;
1813
1814 kern_return_t
1815 tws_read_startup_file(
1816 task_t task,
1817 tws_startup_t startup,
1818 vm_offset_t cache_size)
1819 {
1820 tws_hash_t tws;
1821 int lines;
1822 int old_exp_count;
1823 unsigned int ele_count;
1824
1825 restart:
1826 task_lock(task);
1827 tws = task->dynamic_working_set;
1828
1829 /* create a dynamic working set to match file size */
1830
1831 /* start with total size of the data we got from app_profile */
1832 ele_count = cache_size;
1833 /* skip the startup header */
1834 ele_count -= sizeof(struct tws_startup);
1835 /*
1836 * For each startup cache entry, we have one of these:
1837 * tws_startup_ptr_t startup->table[];
1838 * struct tws_startup_ptr startup->ele[];
1839 * struct tws_startup_ele startup->array[];
1840 */
1841 ele_count /= (sizeof (tws_startup_ptr_t) +
1842 sizeof (struct tws_startup_ptr) +
1843 sizeof (struct tws_startup_ele));
1844
1845 /*
1846 * Sanity check: make sure the value for startup->array_size
1847 * that we read from the app_profile file matches the size
1848 * of the data we read from disk. If it doesn't match, we
1849 * can't trust the data and we just drop it all.
1850 */
1851 if (cache_size < sizeof(struct tws_startup) ||
1852 startup->array_size != ele_count) {
1853 tws_read_startup_file_rejects++;
1854 task_unlock(task);
1855 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1856 return(KERN_SUCCESS);
1857 }
1858
1859 /*
1860 * We'll create the task working set with the default row size
1861 * (TWS_ARRAY_SIZE), so this will give us the number of lines
1862 * we need to store all the data from the app_profile startup
1863 * cache.
1864 */
1865 lines = ele_count / TWS_ARRAY_SIZE;
1866
1867 if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
1868 lines = TWS_SMALL_HASH_LINE_COUNT;
1869 task_unlock(task);
1870 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1871 return(KERN_SUCCESS);
1872 } else {
1873 old_exp_count = lines/TWS_HASH_LINE_COUNT;
1874 if((old_exp_count * TWS_HASH_LINE_COUNT) != lines) {
1875 lines = (old_exp_count + 1)
1876 * TWS_HASH_LINE_COUNT;
1877 }
1878 if(tws == NULL) {
1879 kern_return_t error;
1880
1881 task_unlock(task);
1882 if ((error = task_working_set_create(task, lines, 0, TWS_HASH_STYLE_DEFAULT)) != KERN_SUCCESS)
1883 return error;
1884 /* we need to reset tws and relock */
1885 goto restart;
1886 } else {
1887 task_unlock(task);
1888 tws_expand_working_set(
1889 (void *)tws, lines, TRUE);
1890 }
1891 }
1892
1893 tws_lock(tws);
1894
1895 if(tws->startup_cache != NULL) {
1896 tws_unlock(tws);
1897 return KERN_FAILURE;
1898 }
1899
1900
1901 /* now need to fix up internal table pointers */
1902 startup->table = (tws_startup_ptr_t *)
1903 (((int)startup) + (sizeof(struct tws_startup)));
1904 startup->ele = (struct tws_startup_ptr *)
1905 (((vm_offset_t)startup->table) +
1906 (startup->array_size * sizeof(tws_startup_ptr_t)));
1907 startup->array = (struct tws_startup_ele *)
1908 (((vm_offset_t)startup->ele) +
1909 (startup->array_size * sizeof(struct tws_startup_ptr)));
1910 /* the allocation size and file size should be the same */
1911 /* just in case their not, make sure we dealloc correctly */
1912 startup->tws_hash_size = cache_size;
1913
1914 tws->startup_cache = startup;
1915 tws_unlock(tws);
1916 return KERN_SUCCESS;
1917 }
1918
1919
1920 void
1921 tws_hash_ws_flush(tws_hash_t tws) {
1922 tws_startup_t scache;
1923 if(tws == NULL) {
1924 return;
1925 }
1926 tws_lock(tws);
1927 if(tws->startup_name != NULL) {
1928 scache = tws_create_startup_list(tws);
1929 if(scache == NULL) {
1930 /* dump the name cache, we'll */
1931 /* get it next time */
1932 kfree(tws->startup_name, tws->startup_name_length);
1933 tws->startup_name = NULL;
1934 tws_unlock(tws);
1935 return;
1936 }
1937 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1938 (caddr_t) scache, scache->tws_hash_size,
1939 tws->mod, tws->fid);
1940 kfree(scache, scache->tws_hash_size);
1941 kfree(tws->startup_name, tws->startup_name_length);
1942 tws->startup_name = NULL;
1943 }
1944 tws_unlock(tws);
1945 return;
1946 }
1947
1948 void
1949 tws_hash_destroy(tws_hash_t tws)
1950 {
1951 unsigned int i,k;
1952
1953 if(tws->startup_cache != NULL) {
1954 kmem_free(kernel_map,
1955 (vm_offset_t)tws->startup_cache,
1956 tws->startup_cache->tws_hash_size);
1957 tws->startup_cache = NULL;
1958 }
1959 if(tws->startup_name != NULL) {
1960 tws_internal_startup_send(tws);
1961 }
1962 for (i=0; i<tws->number_of_lines; i++) {
1963 for(k=0; k<tws->expansion_count; k++) {
1964 /* clear the object refs */
1965 tws_hash_line_clear(tws, &(tws->cache[k][i]), NULL, FALSE);
1966 }
1967 }
1968 i = 0;
1969 while (i < tws->expansion_count) {
1970
1971 kfree(tws->table[i],
1972 sizeof(tws_hash_ptr_t)
1973 * tws->number_of_lines
1974 * tws->number_of_elements);
1975 kfree(tws->table_ele[i],
1976 sizeof(struct tws_hash_ptr)
1977 * tws->number_of_lines
1978 * tws->number_of_elements);
1979 kfree(tws->alt_ele[i],
1980 sizeof(struct tws_hash_ptr)
1981 * tws->number_of_lines
1982 * tws->number_of_elements);
1983 kfree(tws->cache[i],
1984 sizeof(struct tws_hash_line) * tws->number_of_lines);
1985 i++;
1986 }
1987 if(tws->startup_name != NULL) {
1988 kfree(tws->startup_name, tws->startup_name_length);
1989 }
1990 kfree(tws, sizeof(struct tws_hash));
1991 }
1992
1993 void
1994 tws_hash_clear(tws_hash_t tws)
1995 {
1996 unsigned int i, k;
1997
1998 for (i=0; i<tws->number_of_lines; i++) {
1999 for(k=0; k<tws->expansion_count; k++) {
2000 /* clear the object refs */
2001 tws_hash_line_clear(tws, &(tws->cache[k][i]), NULL, FALSE);
2002 }
2003 }
2004 }
2005
2006 kern_return_t
2007 task_working_set_create(
2008 task_t task,
2009 unsigned int lines,
2010 unsigned int rows,
2011 unsigned int style)
2012 {
2013
2014 if (lines == 0) {
2015 lines = TWS_HASH_LINE_COUNT;
2016 }
2017 if (rows == 0) {
2018 rows = TWS_ARRAY_SIZE;
2019 }
2020 if (style == TWS_HASH_STYLE_DEFAULT) {
2021 style = TWS_HASH_STYLE_BASIC;
2022 }
2023 task_lock(task);
2024 if(task->dynamic_working_set != 0) {
2025 task_unlock(task);
2026 return(KERN_FAILURE);
2027 } else if((task->dynamic_working_set =
2028 tws_hash_create(lines, rows, style)) == 0) {
2029 task_unlock(task);
2030 return(KERN_NO_SPACE);
2031 }
2032 task_unlock(task);
2033 return KERN_SUCCESS;
2034 }
2035
2036
2037 /* Internal use only routines */
2038
2039
2040 /*
2041 * internal sub-function for address space lookup
2042 * returns the target element and the address of the
2043 * previous pointer The previous pointer is the address
2044 * of the pointer pointing to the target element.
2045 * TWS must be locked
2046 */
2047
2048 void
2049 tws_traverse_address_hash_list (
2050 tws_hash_t tws,
2051 unsigned int index,
2052 vm_offset_t page_addr,
2053 vm_object_t object,
2054 vm_object_offset_t offset,
2055 vm_map_t map,
2056 tws_hash_ptr_t *target_ele,
2057 tws_hash_ptr_t **previous_ptr,
2058 tws_hash_ptr_t **free_list,
2059 unsigned int exclusive_addr)
2060 {
2061 unsigned int k;
2062 tws_hash_ptr_t cache_ele;
2063 tws_hash_ptr_t base_ele;
2064
2065 *target_ele = NULL;
2066 *previous_ptr = NULL;
2067
2068 for(k=0; k<tws->expansion_count; k++) {
2069 tws_hash_ele_t ele;
2070 cache_ele = tws->table[k][index];
2071 base_ele = cache_ele;
2072 *previous_ptr = (tws_hash_ptr_t *)&(tws->table[k][index]);
2073 while(cache_ele != NULL) {
2074 if(((unsigned int)
2075 cache_ele->element & TWS_ADDR_HASH) == 0) {
2076 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2077 cache_ele = cache_ele->next;
2078 continue;
2079 }
2080 ele = (tws_hash_ele_t)((unsigned int)
2081 cache_ele->element & ~TWS_ADDR_HASH);
2082 if ((ele == 0) || (ele->object == 0)) {
2083 /* A little clean-up of empty elements */
2084 cache_ele->element = 0;
2085 if(base_ele == cache_ele) {
2086 base_ele = cache_ele->next;
2087 tws->table[k][index] = cache_ele->next;
2088 cache_ele->next = tws->free_hash_ele[k];
2089 tws->free_hash_ele[k] = cache_ele;
2090 cache_ele = base_ele;
2091 } else {
2092 **previous_ptr = cache_ele->next;
2093 cache_ele->next = tws->free_hash_ele[k];
2094 tws->free_hash_ele[k] = cache_ele;
2095 cache_ele = **previous_ptr;
2096 }
2097 continue;
2098 }
2099
2100 if ((ele->page_addr <= page_addr)
2101 && (page_addr <= (ele->page_addr +
2102 (vm_offset_t)TWS_INDEX_MASK))
2103 && ((object == NULL)
2104 || ((object == ele->object)
2105 && (offset == ele->offset)
2106 && (map == ele->map)))) {
2107 if(exclusive_addr) {
2108 int delta;
2109 delta = ((page_addr - ele->page_addr)
2110 >> 12);
2111 if((1 << delta) & ele->page_cache) {
2112 /* We've found a match */
2113 *target_ele = cache_ele;
2114 *free_list =
2115 (tws_hash_ptr_t *)
2116 &(tws->free_hash_ele[k]);
2117 return;
2118 }
2119 } else {
2120 /* We've found a match */
2121 *target_ele = cache_ele;
2122 *free_list = (tws_hash_ptr_t *)
2123 &(tws->free_hash_ele[k]);
2124 return;
2125 }
2126 }
2127 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2128 cache_ele = cache_ele->next;
2129 }
2130 }
2131 }
2132
2133
2134 /*
2135 * internal sub-function for object space lookup
2136 * returns the target element and the address of the
2137 * previous pointer The previous pointer is the address
2138 * of the pointer pointing to the target element.
2139 * TWS must be locked
2140 */
2141
2142
2143 void
2144 tws_traverse_object_hash_list (
2145 tws_hash_t tws,
2146 unsigned int index,
2147 vm_object_t object,
2148 vm_object_offset_t offset,
2149 unsigned int pagemask,
2150 tws_hash_ptr_t *target_ele,
2151 tws_hash_ptr_t **previous_ptr,
2152 tws_hash_ptr_t **free_list)
2153 {
2154 unsigned int k;
2155 tws_hash_ptr_t cache_ele;
2156 tws_hash_ptr_t base_ele;
2157
2158 *target_ele = NULL;
2159 *previous_ptr = NULL;
2160
2161 for(k=0; k<tws->expansion_count; k++) {
2162 cache_ele = tws->table[k][index];
2163 base_ele = cache_ele;
2164 *previous_ptr = &(tws->table[k][index]);
2165 while(cache_ele != NULL) {
2166 if((((unsigned int)cache_ele->element)
2167 & TWS_ADDR_HASH) != 0) {
2168 *previous_ptr = &(cache_ele->next);
2169 cache_ele = cache_ele->next;
2170 continue;
2171 }
2172 if ((cache_ele->element == 0) ||
2173 (cache_ele->element->object == 0)) {
2174 /* A little clean-up of empty elements */
2175 cache_ele->element = 0;
2176 if(base_ele == cache_ele) {
2177 base_ele = cache_ele->next;
2178 tws->table[k][index] = cache_ele->next;
2179 cache_ele->next = tws->free_hash_ele[k];
2180 tws->free_hash_ele[k] = cache_ele;
2181 cache_ele = tws->table[k][index];
2182 } else {
2183 **previous_ptr = cache_ele->next;
2184 cache_ele->next = tws->free_hash_ele[k];
2185 tws->free_hash_ele[k] = cache_ele;
2186 cache_ele = **previous_ptr;
2187 }
2188 continue;
2189 }
2190 if ((cache_ele->element->object == object)
2191 && (cache_ele->element->offset ==
2192 (offset - (offset & ~TWS_HASH_OFF_MASK)))) {
2193 if((cache_ele->element->page_cache & pagemask)
2194 || (pagemask == 0xFFFFFFFF)) {
2195 /* We've found a match */
2196 *target_ele = cache_ele;
2197 *free_list = &(tws->free_hash_ele[k]);
2198 return;
2199 }
2200 }
2201 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2202 cache_ele = cache_ele->next;
2203 }
2204 }
2205 }
2206
2207
2208 /*
2209 * For a given object/offset, discover whether the indexed 32 page frame
2210 * containing the object/offset exists and if their are at least threshold
2211 * pages present. Returns true if population meets threshold.
2212 */
2213 int
2214 tws_test_for_community(
2215 tws_hash_t tws,
2216 vm_object_t object,
2217 vm_object_offset_t offset,
2218 unsigned int threshold,
2219 unsigned int *pagemask)
2220 {
2221 int index;
2222 tws_hash_ptr_t cache_ele;
2223 tws_hash_ptr_t *trailer;
2224 tws_hash_ptr_t *free_list;
2225 int community = 0;
2226
2227 index = do_tws_hash(object, offset,
2228 tws->number_of_elements, tws->number_of_lines);
2229 tws_traverse_object_hash_list(tws, index, object, offset, 0xFFFFFFFF,
2230 &cache_ele, &trailer, &free_list);
2231
2232 if(cache_ele != NULL) {
2233 int i;
2234 unsigned int ctr;
2235 ctr = 0;
2236 for(i=1; i!=0; i=i<<1) {
2237 if(i & cache_ele->element->page_cache)
2238 ctr++;
2239 if(ctr == threshold) {
2240 community = 1;
2241 *pagemask = cache_ele->element->page_cache;
2242 break;
2243 }
2244 }
2245 }
2246
2247 return community;
2248
2249 }
2250
2251
2252 /*
2253 * Gets new hash element for object hash from free pools
2254 * TWS must be locked
2255 */
2256
2257 tws_hash_ptr_t
2258 new_obj_hash(
2259 tws_hash_t tws,
2260 unsigned int set,
2261 unsigned int index)
2262 {
2263 tws_hash_ptr_t element;
2264
2265 if(tws->obj_free_count[set] < tws->number_of_lines * tws->number_of_elements) {
2266 element = &(tws->table_ele[set][tws->obj_free_count[set]]);
2267 tws->obj_free_count[set]+=1;
2268 } else if(tws->free_hash_ele[set] == NULL) {
2269 return NULL;
2270 } else {
2271 element = tws->free_hash_ele[set];
2272 if(element == NULL)
2273 return element;
2274 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2275 }
2276 element->element = 0;
2277 element->next = tws->table[set][index];
2278 tws->table[set][index] = element;
2279 return element;
2280 }
2281
2282 /*
2283 * Gets new hash element for addr hash from free pools
2284 * TWS must be locked
2285 */
2286
2287 tws_hash_ptr_t
2288 new_addr_hash(
2289 tws_hash_t tws,
2290 unsigned int set,
2291 unsigned int index)
2292 {
2293 tws_hash_ptr_t element;
2294
2295 if(tws->addr_free_count[set]
2296 < tws->number_of_lines * tws->number_of_elements) {
2297 element = &(tws->alt_ele[set][tws->addr_free_count[set]]);
2298 tws->addr_free_count[set]+=1;
2299 } else if(tws->free_hash_ele[set] == NULL) {
2300 return NULL;
2301 } else {
2302 element = tws->free_hash_ele[set];
2303 if(element == NULL)
2304 return element;
2305 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2306 }
2307 element->element = (tws_hash_ele_t)TWS_ADDR_HASH;
2308 element->next = tws->table[set][index];
2309 tws->table[set][index] = element;
2310 return element;
2311 }