]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/task_working_set.c
8965549c7beba67a87e1a460b5b82d1a193bf975
[apple/xnu.git] / osfmk / vm / task_working_set.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 */
25 /*
26 * File: vm/task_working_set.c
27 * Author: Chris Youngworth
28 * Date: 2001
29 *
30 * Working set detection and maintainence module
31 */
32
33
34 #include <mach/mach_types.h>
35 #include <mach/memory_object.h>
36 #include <mach/shared_memory_server.h>
37 #include <vm/task_working_set.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_page.h>
41 #include <vm/vm_pageout.h>
42 #include <kern/sched.h>
43 #include <kern/kalloc.h>
44
45 #include <vm/vm_protos.h>
46
47 /*
48 * LP64todo - Task Working Set Support is for 32-bit only
49 */
50 extern zone_t lsf_zone;
51
52 /* declarations for internal use only routines */
53 int startup_miss = 0;
54
55 tws_hash_t
56 tws_hash_create(
57 unsigned int lines,
58 unsigned int rows,
59 unsigned int style);
60
61 kern_return_t
62 tws_write_startup_file(
63 task_t task,
64 int fid,
65 int mod,
66 char *name,
67 unsigned int string_length);
68
69 kern_return_t
70 tws_read_startup_file(
71 task_t task,
72 tws_startup_t startup,
73 vm_offset_t cache_size);
74
75 tws_startup_t
76 tws_create_startup_list(
77 tws_hash_t tws);
78
79 unsigned int
80 tws_startup_list_lookup(
81 tws_startup_t startup,
82 vm_offset_t addr);
83
84 kern_return_t
85 tws_internal_startup_send(
86 tws_hash_t tws);
87
88 void
89 tws_hash_line_clear(
90 tws_hash_t tws,
91 tws_hash_line_t hash_line,
92 vm_object_t object,
93 boolean_t live);
94
95 void
96 tws_hash_clear(
97 tws_hash_t tws);
98
99 void
100 tws_traverse_address_hash_list (
101 tws_hash_t tws,
102 unsigned int index,
103 vm_offset_t page_addr,
104 vm_object_t object,
105 vm_object_offset_t offset,
106 vm_map_t map,
107 tws_hash_ptr_t *target_ele,
108 tws_hash_ptr_t **previous_ptr,
109 tws_hash_ptr_t **free_list,
110 unsigned int exclusive_addr);
111
112 void
113 tws_traverse_object_hash_list (
114 tws_hash_t tws,
115 unsigned int index,
116 vm_object_t object,
117 vm_object_offset_t offset,
118 unsigned int pagemask,
119 tws_hash_ptr_t *target_ele,
120 tws_hash_ptr_t **previous_ptr,
121 tws_hash_ptr_t **free_list);
122
123 tws_hash_ptr_t
124 new_addr_hash(
125 tws_hash_t tws,
126 unsigned int set,
127 unsigned int index);
128
129 tws_hash_ptr_t
130 new_obj_hash(
131 tws_hash_t tws,
132 unsigned int set,
133 unsigned int index);
134
135 int tws_test_for_community(
136 tws_hash_t tws,
137 vm_object_t object,
138 vm_object_offset_t offset,
139 unsigned int threshold,
140 unsigned int *pagemask);
141
142 kern_return_t
143 tws_internal_lookup(
144 tws_hash_t tws,
145 vm_object_offset_t offset,
146 vm_object_t object,
147 tws_hash_line_t *line);
148
149 /* Note: all of the routines below depend on the associated map lock for */
150 /* synchronization, the map lock will be on when the routines are called */
151 /* and on when they return */
152
153 tws_hash_t
154 tws_hash_create(
155 unsigned int lines,
156 unsigned int rows,
157 unsigned int style)
158 {
159 tws_hash_t tws;
160
161 if ((style != TWS_HASH_STYLE_BASIC) &&
162 (style != TWS_HASH_STYLE_BASIC)) {
163 return((tws_hash_t)NULL);
164 }
165
166
167 tws = (tws_hash_t)(kalloc(sizeof(struct tws_hash)));
168 if(tws == (tws_hash_t)NULL)
169 return tws;
170
171 if((tws->table[0] = (tws_hash_ptr_t *)
172 kalloc(sizeof(tws_hash_ptr_t) * lines * rows))
173 == NULL) {
174 kfree(tws, sizeof(struct tws_hash));
175 return (tws_hash_t)NULL;
176 }
177 if((tws->table_ele[0] = (tws_hash_ptr_t)
178 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
179 == NULL) {
180 kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
181 kfree(tws, sizeof(struct tws_hash));
182 return (tws_hash_t)NULL;
183 }
184 if((tws->alt_ele[0] = (tws_hash_ptr_t)
185 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
186 == NULL) {
187 kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
188 kfree(tws->table_ele[0],
189 sizeof(struct tws_hash_ptr) * lines * rows);
190 kfree(tws, sizeof(struct tws_hash));
191 return (tws_hash_t)NULL;
192 }
193 if((tws->cache[0] = (struct tws_hash_line *)
194 kalloc(sizeof(struct tws_hash_line) * lines))
195 == NULL) {
196 kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
197 kfree(tws->table_ele[0],
198 sizeof(struct tws_hash_ptr) * lines * rows);
199 kfree(tws->alt_ele[0],
200 sizeof(struct tws_hash_ptr) * lines * rows);
201 kfree(tws, sizeof(struct tws_hash));
202 return (tws_hash_t)NULL;
203 }
204 tws->free_hash_ele[0] = (tws_hash_ptr_t)0;
205 tws->obj_free_count[0] = 0;
206 tws->addr_free_count[0] = 0;
207
208 /* most defaults are such that a bzero will initialize */
209 bzero((char *)tws->table[0],sizeof(tws_hash_ptr_t)
210 * lines * rows);
211 bzero((char *)tws->table_ele[0],sizeof(struct tws_hash_ptr)
212 * lines * rows);
213 bzero((char *)tws->alt_ele[0],sizeof(struct tws_hash_ptr)
214 * lines * rows);
215 bzero((char *)tws->cache[0], sizeof(struct tws_hash_line)
216 * lines);
217
218 mutex_init(&tws->lock, 0);
219 tws->style = style;
220 tws->current_line = 0;
221 tws->pageout_count = 0;
222 tws->line_count = 0;
223 tws->startup_cache = NULL;
224 tws->startup_name = NULL;
225 tws->number_of_lines = lines;
226 tws->number_of_elements = rows;
227 tws->expansion_count = 1;
228 tws->lookup_count = 0;
229 tws->insert_count = 0;
230 tws->time_of_creation = sched_tick;
231
232 return tws;
233 }
234
235
236 extern vm_page_t
237 vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset);
238
239
240 void
241 tws_hash_line_clear(
242 tws_hash_t tws,
243 tws_hash_line_t hash_line,
244 __unused vm_object_t object,
245 boolean_t live)
246 {
247 struct tws_hash_ele *hash_ele;
248 struct tws_hash_ptr **trailer;
249 struct tws_hash_ptr **free_list;
250 tws_hash_ele_t addr_ele;
251 int index;
252 unsigned int i, j;
253 int dump_pmap;
254 int hash_loop;
255
256
257 if(tws->line_count < tws->number_of_lines) {
258 tws->line_count++;
259 dump_pmap = 1;
260 } else {
261 if(tws->pageout_count != vm_pageout_scan_event_counter) {
262 tws->pageout_count =
263 vm_pageout_scan_event_counter;
264 tws->line_count = 0;
265 dump_pmap = 1;
266 } else {
267 dump_pmap = 0;
268 }
269 }
270 hash_line->ele_count = 0;
271
272 for (i=0; i<tws->number_of_elements; i++) {
273 hash_loop = 0;
274 hash_ele = &(hash_line->list[i]);
275 if(hash_ele->object != 0) {
276
277 vm_object_offset_t local_off = 0;
278 tws_hash_ptr_t cache_ele;
279
280 index = alt_tws_hash(
281 hash_ele->page_addr & TWS_ADDR_OFF_MASK,
282 tws->number_of_elements,
283 tws->number_of_lines);
284
285 tws_traverse_address_hash_list(tws, index,
286 hash_ele->page_addr, hash_ele->object,
287 hash_ele->offset, hash_ele->map,
288 &cache_ele, &trailer, &free_list, 0);
289 if(cache_ele != NULL) {
290 addr_ele = (tws_hash_ele_t)((unsigned int)
291 (cache_ele->element) & ~TWS_ADDR_HASH);
292 if(addr_ele != hash_ele)
293 panic("tws_hash_line_clear:"
294 " out of sync\n");
295 cache_ele->element = 0;
296 *trailer = cache_ele->next;
297 cache_ele->next = *free_list;
298 *free_list = cache_ele;
299 }
300
301 index = alt_tws_hash(
302 (hash_ele->page_addr - 0x1f000)
303 & TWS_ADDR_OFF_MASK,
304 tws->number_of_elements,
305 tws->number_of_lines);
306
307 tws_traverse_address_hash_list(tws, index,
308 hash_ele->page_addr, hash_ele->object,
309 hash_ele->offset, hash_ele->map,
310 &cache_ele, &trailer, &free_list, 0);
311
312 if(cache_ele != NULL) {
313 addr_ele = (tws_hash_ele_t)((unsigned int)
314 (cache_ele->element) & ~TWS_ADDR_HASH);
315 if(addr_ele != hash_ele)
316 panic("tws_hash_line_clear: "
317 "out of sync\n");
318 cache_ele->element = 0;
319 *trailer = cache_ele->next;
320 cache_ele->next = *free_list;
321 *free_list = cache_ele;
322 }
323
324
325 if((hash_ele->map != NULL) && (live)) {
326 vm_page_t p;
327
328 #if 0
329 if (object != hash_ele->object) {
330 if (object)
331 vm_object_unlock(object);
332 vm_object_lock(hash_ele->object);
333 }
334 #endif
335 if (dump_pmap == 1) {
336 for (j = 0x1; j != 0; j = j<<1) {
337 if(j & hash_ele->page_cache) {
338 p = vm_page_lookup_nohint(hash_ele->object,
339 hash_ele->offset + local_off);
340 if((p != NULL) && (p->wire_count == 0)) {
341 pmap_remove_some_phys((pmap_t)vm_map_pmap(current_map()),
342 p->phys_page);
343 }
344 }
345 local_off += PAGE_SIZE_64;
346 }
347 }
348 #if 0
349 if (object != hash_ele->object) {
350 vm_object_unlock(hash_ele->object);
351 if (object)
352 vm_object_lock(object);
353 }
354 #endif
355 }
356
357 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
358 vm_object_deallocate(hash_ele->object);
359 vm_map_deallocate(hash_ele->map);
360 }
361
362 index = do_tws_hash(hash_ele->object, hash_ele->offset,
363 tws->number_of_elements,
364 tws->number_of_lines);
365
366 tws_traverse_object_hash_list(tws,
367 index, hash_ele->object, hash_ele->offset,
368 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
369 if((cache_ele != NULL) && (cache_ele->element == hash_ele)) {
370 cache_ele->element = 0;
371 *trailer = cache_ele->next;
372 cache_ele->next = *free_list;
373 *free_list = cache_ele;
374 }
375 hash_ele->object = 0;
376 }
377 }
378 }
379
380 kern_return_t
381 tws_internal_lookup(
382 tws_hash_t tws,
383 vm_object_offset_t offset,
384 vm_object_t object,
385 tws_hash_line_t *line)
386 {
387 int index;
388 int loop;
389 int set;
390 int ele_line;
391 vm_offset_t pagenum;
392 tws_hash_ptr_t cache_ele;
393 tws_hash_ptr_t *trailer;
394 tws_hash_ptr_t *free_list;
395
396 /* don't cache private objects */
397 if(object->private)
398 return KERN_SUCCESS;
399
400 index = do_tws_hash(object, offset,
401 tws->number_of_elements, tws->number_of_lines);
402 loop = 0;
403
404 tws->lookup_count++;
405 if(tws->lookup_count == 0)
406 tws->insert_count = 0;
407 if(tws->startup_name != NULL) {
408 int age_of_cache;
409 age_of_cache = ((sched_tick
410 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
411 if (age_of_cache > 45) {
412 return KERN_OPERATION_TIMED_OUT;
413 }
414 }
415
416 if(tws->lookup_count > (4 * tws->expansion_count
417 * tws->number_of_elements * tws->number_of_lines) &&
418 (tws->lookup_count > (2 * tws->insert_count))) {
419 if(tws->startup_cache) {
420 int age_of_cache;
421 age_of_cache = ((sched_tick
422 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
423 if (age_of_cache > 45) {
424 return KERN_OPERATION_TIMED_OUT;
425 }
426 }
427 }
428
429 pagenum = (vm_offset_t)(offset & TWS_INDEX_MASK);
430 pagenum = pagenum >> 12;
431 pagenum = 1 << pagenum; /* get the appropriate page in 32 page block */
432 tws_traverse_object_hash_list(tws, index, object, offset, pagenum,
433 &cache_ele, &trailer, &free_list);
434 if(cache_ele != NULL) {
435 set = cache_ele->element->line/tws->number_of_lines;
436 ele_line = cache_ele->element->line - set;
437 *line = &tws->cache[set][ele_line];
438 return KERN_SUCCESS;
439 }
440
441 return KERN_FAILURE;
442
443
444 }
445
446 kern_return_t
447 tws_lookup(
448 tws_hash_t tws,
449 vm_object_offset_t offset,
450 vm_object_t object,
451 tws_hash_line_t *line)
452 {
453 kern_return_t kr;
454
455 if(!tws_lock_try(tws)) {
456 return KERN_FAILURE;
457 }
458 kr = tws_internal_lookup(tws,
459 offset, object, line);
460 tws_unlock(tws);
461 return kr;
462 }
463
464 kern_return_t
465 tws_expand_working_set(
466 tws_hash_t old_tws,
467 unsigned int line_count,
468 boolean_t dump_data)
469 {
470 tws_hash_t new_tws;
471 unsigned int i,j,k;
472 struct tws_hash temp;
473
474 /* Note we do an elaborate dance to preserve the header that */
475 /* task is pointing to. In this way we can avoid taking a task */
476 /* lock every time we want to access the tws */
477
478 if (old_tws->number_of_lines >= line_count) {
479 return KERN_FAILURE;
480 }
481 if((new_tws = tws_hash_create(line_count,
482 old_tws->number_of_elements, old_tws->style)) == 0) {
483 return(KERN_NO_SPACE);
484 }
485 tws_lock(old_tws);
486
487 if(!dump_data) {
488 for(i = 0; i<old_tws->number_of_lines; i++) {
489 for(j = 0; j<old_tws->number_of_elements; j++) {
490 for(k = 0; k<old_tws->expansion_count; k++) {
491 tws_hash_ele_t entry;
492 vm_object_offset_t paddr;
493 unsigned int page_index;
494 entry = &old_tws->cache[k][i].list[j];
495 if(entry->object != 0) {
496 paddr = 0;
497 for(page_index = 1; page_index != 0;
498 page_index = page_index << 1) {
499 if (entry->page_cache & page_index) {
500 tws_insert(new_tws,
501 entry->offset+paddr,
502 entry->object,
503 entry->page_addr+paddr,
504 entry->map);
505 }
506 paddr+=PAGE_SIZE;
507 }
508
509 }
510 }
511 }
512 }
513 }
514
515 temp.style = new_tws->style;
516 temp.current_line = new_tws->current_line;
517 temp.pageout_count = new_tws->pageout_count;
518 temp.line_count = new_tws->line_count;
519 temp.number_of_lines = new_tws->number_of_lines;
520 temp.number_of_elements = new_tws->number_of_elements;
521 temp.expansion_count = new_tws->expansion_count;
522 temp.lookup_count = new_tws->lookup_count;
523 temp.insert_count = new_tws->insert_count;
524 for(i = 0; i<new_tws->expansion_count; i++) {
525 temp.obj_free_count[i] = new_tws->obj_free_count[i];
526 temp.addr_free_count[i] = new_tws->addr_free_count[i];
527 temp.free_hash_ele[i] = new_tws->free_hash_ele[i];
528 temp.table[i] = new_tws->table[i];
529 temp.table_ele[i] = new_tws->table_ele[i];
530 temp.alt_ele[i] = new_tws->alt_ele[i];
531 temp.cache[i] = new_tws->cache[i];
532 }
533
534 new_tws->style = old_tws->style;
535 new_tws->current_line = old_tws->current_line;
536 new_tws->pageout_count = old_tws->pageout_count;
537 new_tws->line_count = old_tws->line_count;
538 new_tws->number_of_lines = old_tws->number_of_lines;
539 new_tws->number_of_elements = old_tws->number_of_elements;
540 new_tws->expansion_count = old_tws->expansion_count;
541 new_tws->lookup_count = old_tws->lookup_count;
542 new_tws->insert_count = old_tws->insert_count;
543 for(i = 0; i<old_tws->expansion_count; i++) {
544 new_tws->obj_free_count[i] = old_tws->obj_free_count[i];
545 new_tws->addr_free_count[i] = old_tws->addr_free_count[i];
546 new_tws->free_hash_ele[i] = old_tws->free_hash_ele[i];
547 new_tws->table[i] = old_tws->table[i];
548 new_tws->table_ele[i] = old_tws->table_ele[i];
549 new_tws->alt_ele[i] = old_tws->alt_ele[i];
550 new_tws->cache[i] = old_tws->cache[i];
551 }
552
553 old_tws->style = temp.style;
554 old_tws->current_line = temp.current_line;
555 old_tws->pageout_count = temp.pageout_count;
556 old_tws->line_count = temp.line_count;
557 old_tws->number_of_lines = temp.number_of_lines;
558 old_tws->number_of_elements = temp.number_of_elements;
559 old_tws->expansion_count = temp.expansion_count;
560 old_tws->lookup_count = temp.lookup_count;
561 old_tws->insert_count = temp.insert_count;
562 for(i = 0; i<temp.expansion_count; i++) {
563 old_tws->obj_free_count[i] = temp.obj_free_count[i];;
564 old_tws->addr_free_count[i] = temp.addr_free_count[i];;
565 old_tws->free_hash_ele[i] = NULL;
566 old_tws->table[i] = temp.table[i];
567 old_tws->table_ele[i] = temp.table_ele[i];
568 old_tws->alt_ele[i] = temp.alt_ele[i];
569 old_tws->cache[i] = temp.cache[i];
570 }
571
572 tws_hash_destroy(new_tws);
573 tws_unlock(old_tws);
574 return KERN_SUCCESS;
575 }
576
577 tws_hash_t test_tws = 0;
578
579 kern_return_t
580 tws_insert(
581 tws_hash_t tws,
582 vm_object_offset_t offset,
583 vm_object_t object,
584 vm_offset_t page_addr,
585 vm_map_t map)
586 {
587 unsigned int index;
588 unsigned int alt_index;
589 unsigned int index_enum[2];
590 unsigned int ele_index;
591 tws_hash_ptr_t cache_ele;
592 tws_hash_ptr_t obj_ele = NULL;
593 tws_hash_ptr_t addr_ele = NULL;
594 tws_hash_ptr_t *trailer;
595 tws_hash_ptr_t *free_list;
596 tws_hash_ele_t target_element = NULL;
597 unsigned int i;
598 unsigned int current_line;
599 unsigned int set;
600 int ctr;
601 unsigned int startup_cache_line;
602 int age_of_cache = 0;
603
604 if(!tws_lock_try(tws)) {
605 return KERN_FAILURE;
606 }
607 tws->insert_count++;
608 current_line = 0xFFFFFFFF;
609 set = 0;
610
611 startup_cache_line = 0;
612
613 if (tws->startup_cache) {
614 vm_offset_t startup_page_addr;
615
616 startup_page_addr = page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
617
618 age_of_cache = ((sched_tick - tws->time_of_creation) >> SCHED_TICK_SHIFT);
619
620 startup_cache_line = tws_startup_list_lookup(tws->startup_cache, startup_page_addr);
621 }
622 /* This next bit of code, the and alternate hash */
623 /* are all made necessary because of IPC COW */
624
625 /* Note: the use of page_addr modified by delta from offset */
626 /* frame base means we may miss some previous entries. However */
627 /* we will not miss the present entry. This is most important */
628 /* in avoiding duplication of entries against long lived non-cow */
629 /* objects */
630 index_enum[0] = alt_tws_hash(
631 page_addr & TWS_ADDR_OFF_MASK,
632 tws->number_of_elements, tws->number_of_lines);
633
634 index_enum[1] = alt_tws_hash(
635 (page_addr - 0x1f000) & TWS_ADDR_OFF_MASK,
636 tws->number_of_elements, tws->number_of_lines);
637
638 for(ctr = 0; ctr < 2;) {
639 tws_hash_ele_t resident;
640 tws_traverse_address_hash_list(tws,
641 index_enum[ctr], page_addr, NULL,
642 0, NULL,
643 &cache_ele, &trailer, &free_list, 1);
644 if(cache_ele != NULL) {
645 /* found one */
646 resident = (tws_hash_ele_t)((unsigned int)
647 cache_ele->element & ~TWS_ADDR_HASH);
648 if((object == resident->object) &&
649 resident->offset ==
650 (offset & TWS_HASH_OFF_MASK)) {
651 /* This is our object/offset */
652 resident->page_cache
653 |= startup_cache_line;
654 resident->page_cache |=
655 (1<<(((vm_offset_t)
656 (offset & TWS_INDEX_MASK))>>12));
657 tws_unlock(tws);
658 if (age_of_cache > 45)
659 return KERN_OPERATION_TIMED_OUT;
660 return KERN_SUCCESS;
661 }
662 if((object->shadow ==
663 resident->object) &&
664 ((resident->offset
665 + object->shadow_offset)
666 == (offset & TWS_HASH_OFF_MASK))) {
667 /* if we just shadowed, inherit */
668 /* access pattern from parent */
669 startup_cache_line |=
670 resident->page_cache;
671 /* thow out old entry */
672 resident->page_cache = 0;
673 break;
674 } else {
675 resident->page_cache &=
676 ~(1<<(((vm_offset_t)(page_addr
677 - resident->page_addr))
678 >>12));
679 }
680 /* Throw out old entry if there are no */
681 /* more pages in cache */
682 if(resident->page_cache == 0) {
683 /* delete addr hash entry */
684 cache_ele->element = 0;
685 *trailer = cache_ele->next;
686 cache_ele->next = *free_list;
687 *free_list = cache_ele;
688 /* go after object hash */
689 index = do_tws_hash(
690 resident->object,
691 resident->offset,
692 tws->number_of_elements,
693 tws->number_of_lines);
694 tws_traverse_object_hash_list(tws,
695 index, resident->object,
696 resident->offset,
697 0xFFFFFFFF, &cache_ele,
698 &trailer, &free_list);
699 if(cache_ele != NULL) {
700 if(tws->style ==
701 TWS_HASH_STYLE_SIGNAL) {
702 vm_object_deallocate(
703 cache_ele->element->object);
704 vm_map_deallocate(
705 cache_ele->element->map);
706 }
707 current_line =
708 cache_ele->element->line;
709 set = current_line
710 /tws->number_of_lines;
711 current_line -= set *
712 tws->number_of_lines;
713 if(cache_ele->element->object != 0) {
714 cache_ele->element->object = 0;
715 tws->cache[set]
716 [current_line].ele_count--;
717 }
718 cache_ele->element = 0;
719 *trailer = cache_ele->next;
720 cache_ele->next = *free_list;
721 *free_list = cache_ele;
722 }
723 }
724 continue;
725 }
726 ctr+=1;
727 }
728
729 /*
730 * We may or may not have a current line setting coming out of
731 * the code above. If we have a current line it means we can
732 * choose to back-fill the spot vacated by a previous entry.
733 * We have yet to do a definitive check using the original obj/off
734 * We will do that now and override the current line if we
735 * find an element
736 */
737
738 index = do_tws_hash(object, offset,
739 tws->number_of_elements, tws->number_of_lines);
740
741 alt_index = index_enum[0];
742
743 tws_traverse_object_hash_list(tws, index, object, offset,
744 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
745 if(cache_ele != NULL) {
746 obj_ele = cache_ele;
747 current_line = cache_ele->element->line;
748 set = current_line/tws->number_of_lines;
749 current_line -= set * tws->number_of_lines;
750 target_element = cache_ele->element;
751
752 /* Now check to see if we have a hash addr for it */
753 tws_traverse_address_hash_list(tws,
754 alt_index, obj_ele->element->page_addr,
755 obj_ele->element->object,
756 obj_ele->element->offset,
757 obj_ele->element->map,
758 &cache_ele, &trailer, &free_list, 0);
759 if(cache_ele != NULL) {
760 addr_ele = cache_ele;
761 } else {
762 addr_ele = new_addr_hash(tws, set, alt_index);
763 /* if cannot allocate just do without */
764 /* we'll get it next time around */
765 }
766 }
767
768
769
770 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
771 vm_object_reference(object);
772 vm_map_reference(map);
773 }
774
775 if(current_line == 0xFFFFFFFF) {
776 current_line = tws->current_line;
777 set = current_line/tws->number_of_lines;
778 current_line = current_line - (set * tws->number_of_lines);
779
780 #ifdef notdef
781 if(cache_full) {
782 tws->current_line = tws->number_of_lines - 1;
783 }
784 #endif
785 if(tws->cache[set][current_line].ele_count
786 >= tws->number_of_elements) {
787 current_line++;
788 tws->current_line++;
789 if(current_line == tws->number_of_lines) {
790 set++;
791 current_line = 0;
792 if (set == tws->expansion_count) {
793 if((tws->lookup_count <
794 (2 * tws->insert_count)) &&
795 (set<TWS_HASH_EXPANSION_MAX)) {
796 tws->lookup_count = 0;
797 tws->insert_count = 0;
798 if(tws->number_of_lines
799 < TWS_HASH_LINE_COUNT) {
800 tws->current_line--;
801 tws_unlock(tws);
802 return KERN_NO_SPACE;
803 }
804 /* object persistence is guaranteed by */
805 /* an elevated paging or object */
806 /* reference count in the caller. */
807 vm_object_unlock(object);
808 if((tws->table[set] = (tws_hash_ptr_t *)
809 kalloc(sizeof(tws_hash_ptr_t)
810 * tws->number_of_lines
811 * tws->number_of_elements))
812 == NULL) {
813 set = 0;
814 } else if((tws->table_ele[set] =
815 (tws_hash_ptr_t)
816 kalloc(sizeof(struct tws_hash_ptr)
817 * tws->number_of_lines
818 * tws->number_of_elements))
819 == NULL) {
820 kfree(tws->table[set],
821 sizeof(tws_hash_ptr_t)
822 * tws->number_of_lines
823 * tws->number_of_elements);
824 set = 0;
825 } else if((tws->alt_ele[set] =
826 (tws_hash_ptr_t)
827 kalloc(sizeof(struct tws_hash_ptr)
828 * tws->number_of_lines
829 * tws->number_of_elements))
830 == NULL) {
831 kfree(tws->table_ele[set],
832 sizeof(struct tws_hash_ptr)
833 * tws->number_of_lines
834 * tws->number_of_elements);
835 kfree(tws->table[set],
836 sizeof(tws_hash_ptr_t)
837 * tws->number_of_lines
838 * tws->number_of_elements);
839 tws->table[set] = NULL;
840 set = 0;
841
842 } else if((tws->cache[set] =
843 (struct tws_hash_line *)
844 kalloc(sizeof
845 (struct tws_hash_line)
846 * tws->number_of_lines))
847 == NULL) {
848 kfree(tws->alt_ele[set],
849 sizeof(struct tws_hash_ptr)
850 * tws->number_of_lines
851 * tws->number_of_elements);
852 kfree(tws->table_ele[set],
853 sizeof(struct tws_hash_ptr)
854 * tws->number_of_lines
855 * tws->number_of_elements);
856 kfree(tws->table[set],
857 sizeof(tws_hash_ptr_t)
858 * tws->number_of_lines
859 * tws->number_of_elements);
860 tws->table[set] = NULL;
861 set = 0;
862
863 } else {
864 tws->free_hash_ele[set] =
865 (tws_hash_ptr_t)0;
866 tws->obj_free_count[set] = 0;
867 tws->addr_free_count[set] = 0;
868 bzero((char *)tws->table[set],
869 sizeof(tws_hash_ptr_t)
870 * tws->number_of_lines
871 * tws->number_of_elements);
872 bzero((char *)tws->table_ele[set],
873 sizeof(struct tws_hash_ptr)
874 * tws->number_of_lines
875 * tws->number_of_elements);
876 bzero((char *)tws->alt_ele[set],
877 sizeof(struct tws_hash_ptr)
878 * tws->number_of_lines
879 * tws->number_of_elements);
880 bzero((char *)tws->cache[set],
881 sizeof(struct tws_hash_line)
882 * tws->number_of_lines);
883 }
884 vm_object_lock(object);
885 } else {
886 if (tws->startup_name != NULL) {
887 tws->current_line--;
888
889 age_of_cache = ((sched_tick - tws->time_of_creation) >> SCHED_TICK_SHIFT);
890
891 tws_unlock(tws);
892
893 if (age_of_cache > 45)
894 return KERN_OPERATION_TIMED_OUT;
895
896 return KERN_FAILURE;
897 }
898 tws->lookup_count = 0;
899 tws->insert_count = 0;
900 set = 0;
901 }
902 }
903 tws->current_line = set * tws->number_of_lines;
904 }
905 if(set < tws->expansion_count) {
906 tws_hash_line_clear(tws,
907 &(tws->cache[set][current_line]), object, TRUE);
908 if(tws->cache[set][current_line].ele_count
909 >= tws->number_of_elements) {
910 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
911 vm_object_deallocate(object);
912 vm_map_deallocate(map);
913 }
914 tws_unlock(tws);
915 return KERN_FAILURE;
916 }
917 } else {
918 tws->expansion_count++;
919 }
920 }
921 }
922
923
924 /* set object hash element */
925 if(obj_ele == NULL) {
926 obj_ele = new_obj_hash(tws, set, index);
927 if(obj_ele == NULL) {
928 tws->cache[set][current_line].ele_count
929 = tws->number_of_elements;
930 tws_unlock(tws);
931 return KERN_FAILURE;
932 }
933 }
934
935 /* set address hash element */
936 if(addr_ele == NULL) {
937 addr_ele = new_addr_hash(tws, set, alt_index);
938 }
939
940 if(target_element == NULL) {
941 ele_index = 0;
942 for(i = 0; i<tws->number_of_elements; i++) {
943 if(tws->cache[set][current_line].
944 list[ele_index].object == 0) {
945 break;
946 }
947 ele_index++;
948 if(ele_index >= tws->number_of_elements)
949 ele_index = 0;
950
951 }
952
953 if(i == tws->number_of_elements)
954 panic("tws_insert: no free elements");
955
956 target_element =
957 &(tws->cache[set][current_line].list[ele_index]);
958
959 tws->cache[set][current_line].ele_count++;
960 }
961
962 obj_ele->element = target_element;
963 if(addr_ele) {
964 addr_ele->element = (tws_hash_ele_t)
965 (((unsigned int)target_element) | TWS_ADDR_HASH);
966 }
967 target_element->object = object;
968 target_element->offset = offset & TWS_HASH_OFF_MASK;
969 target_element->page_addr =
970 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
971 target_element->map = map;
972 target_element->line =
973 current_line + (set * tws->number_of_lines);
974
975 target_element->page_cache |= startup_cache_line;
976 target_element->page_cache |= 1<<(((vm_offset_t)(offset & TWS_INDEX_MASK))>>12);
977
978 tws_unlock(tws);
979
980 if (age_of_cache > 45)
981 return KERN_OPERATION_TIMED_OUT;
982
983 return KERN_SUCCESS;
984 }
985
986 /*
987 * tws_build_cluster
988 * lengthen the cluster of pages by the number of pages encountered in the
989 * working set up to the limit requested by the caller. The object needs
990 * to be locked on entry. The map does not because the tws_lookup function
991 * is used only to find if their is an entry in the cache. No transient
992 * data from the cache is de-referenced.
993 *
994 */
995 #if MACH_PAGEMAP
996 /*
997 * MACH page map - an optional optimization where a bit map is maintained
998 * by the VM subsystem for internal objects to indicate which pages of
999 * the object currently reside on backing store. This existence map
1000 * duplicates information maintained by the vnode pager. It is
1001 * created at the time of the first pageout against the object, i.e.
1002 * at the same time pager for the object is created. The optimization
1003 * is designed to eliminate pager interaction overhead, if it is
1004 * 'known' that the page does not exist on backing store.
1005 *
1006 * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is
1007 * either marked as paged out in the existence map for the object or no
1008 * existence map exists for the object. LOOK_FOR() is one of the
1009 * criteria in the decision to invoke the pager. It is also used as one
1010 * of the criteria to terminate the scan for adjacent pages in a clustered
1011 * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for
1012 * permanent objects. Note also that if the pager for an internal object
1013 * has not been created, the pager is not invoked regardless of the value
1014 * of LOOK_FOR() and that clustered pagein scans are only done on an object
1015 * for which a pager has been created.
1016 *
1017 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1018 * is marked as paged out in the existence map for the object. PAGED_OUT()
1019 * PAGED_OUT() is used to determine if a page has already been pushed
1020 * into a copy object in order to avoid a redundant page out operation.
1021 */
1022 #define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
1023 != VM_EXTERNAL_STATE_ABSENT)
1024 #define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
1025 == VM_EXTERNAL_STATE_EXISTS)
1026 #else /* MACH_PAGEMAP */
1027 /*
1028 * If the MACH page map optimization is not enabled,
1029 * LOOK_FOR() always evaluates to TRUE. The pager will always be
1030 * invoked to resolve missing pages in an object, assuming the pager
1031 * has been created for the object. In a clustered page operation, the
1032 * absence of a page on backing backing store cannot be used to terminate
1033 * a scan for adjacent pages since that information is available only in
1034 * the pager. Hence pages that may not be paged out are potentially
1035 * included in a clustered request. The vnode pager is coded to deal
1036 * with any combination of absent/present pages in a clustered
1037 * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager
1038 * will always be invoked to push a dirty page into a copy object assuming
1039 * a pager has been created. If the page has already been pushed, the
1040 * pager will ingore the new request.
1041 */
1042 #define LOOK_FOR(o, f) TRUE
1043 #define PAGED_OUT(o, f) FALSE
1044 #endif /* MACH_PAGEMAP */
1045
1046
1047 void
1048 tws_build_cluster(
1049 tws_hash_t tws,
1050 vm_object_t object,
1051 vm_object_offset_t *start,
1052 vm_object_offset_t *end,
1053 vm_size_t max_length)
1054 {
1055 tws_hash_line_t line;
1056 vm_object_offset_t before = *start;
1057 vm_object_offset_t after = *end;
1058 vm_object_offset_t original_start = *start;
1059 vm_object_offset_t original_end = *end;
1060 vm_size_t length = (vm_size_t)(*end - *start);
1061 vm_page_t m;
1062 kern_return_t kret;
1063 vm_object_offset_t object_size;
1064 int age_of_cache;
1065 vm_size_t pre_heat_size;
1066 unsigned int ele_cache;
1067 unsigned int end_cache = 0;
1068 unsigned int start_cache = 0;
1069 unsigned int memory_scarce = 0;
1070
1071 if((object->private) || !(object->pager))
1072 return;
1073
1074 if (!object->internal) {
1075 kret = vnode_pager_get_object_size(
1076 object->pager,
1077 &object_size);
1078 } else {
1079 object_size = object->size;
1080 }
1081
1082 if((!tws) || (!tws_lock_try(tws))) {
1083 return;
1084 }
1085 age_of_cache = ((sched_tick
1086 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
1087
1088 if (vm_page_free_count < (2 * vm_page_free_target))
1089 memory_scarce = 1;
1090
1091 /* When pre-heat files are not available, resort to speculation */
1092 /* based on size of file */
1093
1094 if (tws->startup_cache || object->internal || age_of_cache > 45) {
1095 pre_heat_size = 0;
1096 } else {
1097 if (object_size > (vm_object_offset_t)(1024 * 1024))
1098 pre_heat_size = 8 * PAGE_SIZE;
1099 else if (object_size > (vm_object_offset_t)(128 * 1024))
1100 pre_heat_size = 4 * PAGE_SIZE;
1101 else
1102 pre_heat_size = 2 * PAGE_SIZE;
1103 }
1104
1105 if (tws->startup_cache) {
1106 int target_page_count;
1107
1108 if (memory_scarce)
1109 target_page_count = 16;
1110 else
1111 target_page_count = 4;
1112
1113 if (tws_test_for_community(tws, object, *start, target_page_count, &ele_cache))
1114 {
1115 start_cache = ele_cache;
1116 *start = *start & TWS_HASH_OFF_MASK;
1117 *end = *start + (32 * PAGE_SIZE_64);
1118
1119 if (*end > object_size) {
1120 *end = round_page_64(object_size);
1121 max_length = 0;
1122 } else
1123 end_cache = ele_cache;
1124
1125 while (max_length > ((*end - *start) + (32 * PAGE_SIZE))) {
1126 int expanded;
1127
1128 expanded = 0;
1129 after = *end;
1130
1131 if ((after + (32 * PAGE_SIZE_64)) <= object_size &&
1132 (tws_test_for_community(tws, object, after, 8, &ele_cache))) {
1133
1134 *end = after + (32 * PAGE_SIZE_64);
1135 end_cache = ele_cache;
1136 expanded = 1;
1137 }
1138 if (max_length < ((*end - *start) + (32 * PAGE_SIZE_64))) {
1139 break;
1140 }
1141 if (*start) {
1142 before = (*start - PAGE_SIZE_64) & TWS_HASH_OFF_MASK;
1143
1144 if (tws_test_for_community(tws, object, before, 8, &ele_cache)) {
1145
1146 *start = before;
1147 start_cache = ele_cache;
1148 expanded = 1;
1149 }
1150 }
1151 if (expanded == 0)
1152 break;
1153 }
1154 if (end_cache)
1155 *end -= PAGE_SIZE_64;
1156
1157 if (start_cache != 0) {
1158 unsigned int mask;
1159
1160 for (mask = 1; mask != 0; mask = mask << 1) {
1161 if (*start == original_start)
1162 break;
1163 if (!(start_cache & mask))
1164 *start += PAGE_SIZE_64;
1165 else
1166 break;
1167 }
1168 }
1169 if (end_cache != 0) {
1170 unsigned int mask;
1171
1172 for (mask = 0x80000000;
1173 mask != 0; mask = mask >> 1) {
1174 if (*end == original_end)
1175 break;
1176 if (!(end_cache & mask))
1177 *end -= PAGE_SIZE_64;
1178 else
1179 break;
1180 }
1181 }
1182 tws_unlock(tws);
1183
1184 if (*end < original_end)
1185 *end = original_end;
1186 return;
1187 }
1188 }
1189
1190 while ((length < max_length) &&
1191 (object_size >=
1192 (after + PAGE_SIZE_64))) {
1193 if(length >= pre_heat_size) {
1194 if(tws_internal_lookup(tws, after, object,
1195 &line) != KERN_SUCCESS) {
1196 vm_object_offset_t extend;
1197
1198 extend = after + PAGE_SIZE_64;
1199 if(tws_internal_lookup(tws, extend, object,
1200 &line) != KERN_SUCCESS) {
1201 break;
1202 }
1203 }
1204 }
1205
1206 if ((object->existence_map != NULL)
1207 && (!LOOK_FOR(object, after))) {
1208 break;
1209 }
1210
1211 if (vm_page_lookup(object, after) != VM_PAGE_NULL) {
1212 /*
1213 * don't bridge resident pages
1214 */
1215 break;
1216 }
1217
1218 if (object->internal) {
1219 /*
1220 * need to acquire a real page in
1221 * advance because this acts as
1222 * a throttling mechanism for
1223 * data_requests to the default
1224 * pager. If this fails, give up
1225 * trying to find any more pages
1226 * in the cluster and send off the
1227 * request for what we already have.
1228 */
1229 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1230 break;
1231 }
1232 } else if ((m = vm_page_grab_fictitious())
1233 == VM_PAGE_NULL) {
1234 break;
1235 }
1236 m->absent = TRUE;
1237 m->unusual = TRUE;
1238 m->clustered = TRUE;
1239 m->list_req_pending = TRUE;
1240
1241 vm_page_insert(m, object, after);
1242 object->absent_count++;
1243 after += PAGE_SIZE_64;
1244 length += PAGE_SIZE;
1245 }
1246 *end = after;
1247 while (length < max_length) {
1248 if (before == 0)
1249 break;
1250 before -= PAGE_SIZE_64;
1251
1252 if(length >= pre_heat_size) {
1253 if(tws_internal_lookup(tws, before, object,
1254 &line) != KERN_SUCCESS) {
1255 vm_object_offset_t extend;
1256
1257 extend = before;
1258 if (extend == 0)
1259 break;
1260 extend -= PAGE_SIZE_64;
1261 if(tws_internal_lookup(tws, extend, object,
1262 &line) != KERN_SUCCESS) {
1263 break;
1264 }
1265 }
1266 }
1267 if ((object->existence_map != NULL)
1268 && (!LOOK_FOR(object, before))) {
1269 break;
1270 }
1271
1272 if (vm_page_lookup(object, before) != VM_PAGE_NULL) {
1273 /*
1274 * don't bridge resident pages
1275 */
1276 break;
1277 }
1278
1279 if (object->internal) {
1280 /*
1281 * need to acquire a real page in
1282 * advance because this acts as
1283 * a throttling mechanism for
1284 * data_requests to the default
1285 * pager. If this fails, give up
1286 * trying to find any more pages
1287 * in the cluster and send off the
1288 * request for what we already have.
1289 */
1290 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1291 break;
1292 }
1293 } else if ((m = vm_page_grab_fictitious())
1294 == VM_PAGE_NULL) {
1295 break;
1296 }
1297 m->absent = TRUE;
1298 m->unusual = TRUE;
1299 m->clustered = TRUE;
1300 m->list_req_pending = TRUE;
1301
1302 vm_page_insert(m, object, before);
1303 object->absent_count++;
1304 *start -= PAGE_SIZE_64;
1305 length += PAGE_SIZE;
1306 }
1307 tws_unlock(tws);
1308 }
1309
1310 void
1311 tws_line_signal(
1312 tws_hash_t tws,
1313 vm_map_t map,
1314 tws_hash_line_t hash_line,
1315 vm_offset_t target_page)
1316 {
1317 unsigned int i,j;
1318 vm_object_t object;
1319 vm_object_offset_t offset;
1320 vm_object_offset_t before;
1321 vm_object_offset_t after;
1322 struct tws_hash_ele *element;
1323 vm_page_t m,p;
1324 kern_return_t rc;
1325
1326 if(tws->style != TWS_HASH_STYLE_SIGNAL)
1327 return;
1328
1329 vm_map_lock(map);
1330 for (i=0; i<tws->number_of_elements; i++) {
1331
1332 vm_object_offset_t local_off = 0;
1333
1334 if(hash_line->list[i].object == 0)
1335 continue;
1336
1337 element = &hash_line->list[i];
1338
1339 if (element->page_addr == target_page)
1340 continue;
1341
1342 j = 1;
1343 while (j != 0) {
1344 if(j & element->page_cache)
1345 break;
1346 j <<= 1;
1347 local_off += PAGE_SIZE_64;
1348 }
1349 object = element->object;
1350 offset = element->offset + local_off;
1351
1352 /* first try a fast test to speed up no-op signal */
1353 if (((p = vm_page_lookup(object, offset)) != NULL)
1354 || (object->pager == NULL)
1355 || (object->shadow_severed)) {
1356 continue;
1357 }
1358
1359 if((!object->alive) ||
1360 (!object->pager_created) || (!object->pager_ready))
1361 continue;
1362
1363 if (object->internal) {
1364 if (object->existence_map == NULL) {
1365 if (object->shadow)
1366 continue;
1367 } else {
1368 if(!LOOK_FOR(object, offset))
1369 continue;
1370 }
1371 }
1372
1373 vm_object_reference(object);
1374 vm_map_unlock(map);
1375
1376 if(object->internal) {
1377 m = vm_page_grab();
1378 } else {
1379 m = vm_page_grab_fictitious();
1380 }
1381
1382 if(m == NULL) {
1383 vm_object_deallocate(object);
1384 vm_map_lock(map);
1385 continue;
1386 }
1387
1388 vm_object_lock(object);
1389 if (((p = vm_page_lookup(object, offset)) != NULL)
1390 || (object->pager == NULL)
1391 || (object->shadow_severed)) {
1392 VM_PAGE_FREE(m);
1393 vm_object_unlock(object);
1394 vm_object_deallocate(object);
1395 vm_map_lock(map);
1396 continue;
1397 }
1398
1399 vm_page_insert(m, object, offset);
1400
1401 if (object->absent_count > vm_object_absent_max) {
1402 VM_PAGE_FREE(m);
1403 vm_object_unlock(object);
1404 vm_object_deallocate(object);
1405 vm_map_lock(map);
1406 break;
1407 }
1408 m->list_req_pending = TRUE;
1409 m->absent = TRUE;
1410 m->unusual = TRUE;
1411 object->absent_count++;
1412
1413 before = offset;
1414 after = offset + PAGE_SIZE_64;
1415 tws_build_cluster(tws, object, &before, &after, 0x16000);
1416 vm_object_unlock(object);
1417
1418 rc = memory_object_data_request(object->pager,
1419 before + object->paging_offset,
1420 (vm_size_t)(after - before), VM_PROT_READ);
1421 if (rc != KERN_SUCCESS) {
1422 offset = before;
1423 vm_object_lock(object);
1424 while (offset < after) {
1425 m = vm_page_lookup(object, offset);
1426 if(m && m->absent && m->busy)
1427 VM_PAGE_FREE(m);
1428 offset += PAGE_SIZE;
1429 }
1430 vm_object_unlock(object);
1431 vm_object_deallocate(object);
1432 } else {
1433 vm_object_deallocate(object);
1434 }
1435 vm_map_lock(map);
1436 continue;
1437 }
1438 vm_map_unlock(map);
1439 }
1440
1441 /* tws locked on entry */
1442
1443 tws_startup_t
1444 tws_create_startup_list(
1445 tws_hash_t tws)
1446 {
1447
1448 tws_startup_t startup;
1449 unsigned int i,j,k;
1450 unsigned int total_elements;
1451 unsigned int startup_size;
1452 unsigned int sindex;
1453 unsigned int hash_index;
1454 tws_startup_ptr_t element;
1455
1456 total_elements = tws->expansion_count *
1457 (tws->number_of_lines * tws->number_of_elements);
1458
1459 startup_size = sizeof(struct tws_startup)
1460 + (total_elements * sizeof(tws_startup_ptr_t *))
1461 + (total_elements * sizeof(struct tws_startup_ptr))
1462 + (total_elements * sizeof(struct tws_startup_ele));
1463 startup = (tws_startup_t)(kalloc(startup_size));
1464
1465 if(startup == NULL)
1466 return startup;
1467
1468 bzero((char *) startup, startup_size);
1469
1470 startup->table = (tws_startup_ptr_t *)
1471 (((int)startup) + (sizeof(struct tws_startup)));
1472 startup->ele = (struct tws_startup_ptr *)
1473 (((vm_offset_t)startup->table) +
1474 (total_elements * sizeof(tws_startup_ptr_t)));
1475
1476 startup->array = (struct tws_startup_ele *)
1477 (((vm_offset_t)startup->ele) +
1478 (total_elements * sizeof(struct tws_startup_ptr)));
1479
1480 startup->tws_hash_size = startup_size;
1481 startup->ele_count = 0; /* burn first hash ele, else we can't tell from zero */
1482 startup->array_size = total_elements;
1483 startup->hash_count = 1;
1484
1485 sindex = 0;
1486
1487
1488 for(i = 0; i<tws->number_of_lines; i++) {
1489 for(j = 0; j<tws->number_of_elements; j++) {
1490 for(k = 0; k<tws->expansion_count; k++) {
1491 tws_hash_ele_t entry;
1492 unsigned int hash_retry;
1493 vm_offset_t addr;
1494
1495 entry = &tws->cache[k][i].list[j];
1496 addr = entry->page_addr;
1497 hash_retry = 0;
1498 if(entry->object != 0) {
1499 /* get a hash element */
1500 hash_index = do_startup_hash(addr,
1501 startup->array_size);
1502
1503 if(startup->hash_count < total_elements) {
1504 element = &(startup->ele[startup->hash_count]);
1505 startup->hash_count += 1;
1506 } else {
1507 /* exit we're out of elements */
1508 break;
1509 }
1510 /* place the hash element */
1511 element->next = startup->table[hash_index];
1512 startup->table[hash_index] = (tws_startup_ptr_t)
1513 ((int)element - (int)&startup->ele[0]);
1514
1515 /* set entry OFFSET in hash element */
1516 element->element = (tws_startup_ele_t)
1517 ((int)&startup->array[sindex] -
1518 (int)&startup->array[0]);
1519
1520 startup->array[sindex].page_addr = entry->page_addr;
1521 startup->array[sindex].page_cache = entry->page_cache;
1522 startup->ele_count++;
1523 sindex++;
1524
1525 }
1526 }
1527 }
1528 }
1529
1530 return startup;
1531 }
1532
1533
1534 /*
1535 * Returns an entire cache line. The line is deleted from the startup
1536 * cache on return. The caller can check startup->ele_count for an empty
1537 * list. Access synchronization is the responsibility of the caller.
1538 */
1539
1540 unsigned int
1541 tws_startup_list_lookup(
1542 tws_startup_t startup,
1543 vm_offset_t addr)
1544 {
1545 unsigned int hash_index;
1546 unsigned int page_cache_bits;
1547 unsigned int startup_shift;
1548 tws_startup_ele_t entry;
1549 vm_offset_t next_addr;
1550 tws_startup_ptr_t element;
1551 tws_startup_ptr_t base_ele;
1552 tws_startup_ptr_t *previous_ptr;
1553
1554 page_cache_bits = 0;
1555
1556 hash_index = do_startup_hash(addr, startup->array_size);
1557
1558 if(((unsigned int)&(startup->table[hash_index])) >= ((unsigned int)startup + startup->tws_hash_size)) {
1559 return page_cache_bits = 0;
1560 }
1561 element = (tws_startup_ptr_t)((int)startup->table[hash_index] +
1562 (int)&startup->ele[0]);
1563 base_ele = element;
1564 previous_ptr = &(startup->table[hash_index]);
1565 while(element > &startup->ele[0]) {
1566 if (((int)element + sizeof(struct tws_startup_ptr))
1567 > ((int)startup + startup->tws_hash_size)) {
1568 return page_cache_bits;
1569 }
1570 entry = (tws_startup_ele_t)
1571 ((int)element->element
1572 + (int)&startup->array[0]);
1573 if((((int)entry + sizeof(struct tws_startup_ele))
1574 > ((int)startup + startup->tws_hash_size))
1575 || ((int)entry < (int)startup)) {
1576 return page_cache_bits;
1577 }
1578 if ((addr >= entry->page_addr) &&
1579 (addr <= (entry->page_addr + 0x1F000))) {
1580 startup_shift = (addr - entry->page_addr)>>12;
1581 page_cache_bits |= entry->page_cache >> startup_shift;
1582 /* don't dump the pages, unless the addresses */
1583 /* line up perfectly. The cache may be used */
1584 /* by other mappings */
1585 entry->page_cache &= (1 << startup_shift) - 1;
1586 if(addr == entry->page_addr) {
1587 if(base_ele == element) {
1588 base_ele = (tws_startup_ptr_t)
1589 ((int)element->next
1590 + (int)&startup->ele[0]);
1591 startup->table[hash_index] = element->next;
1592 element = base_ele;
1593 } else {
1594 *previous_ptr = element->next;
1595 element = (tws_startup_ptr_t)
1596 ((int)*previous_ptr
1597 + (int)&startup->ele[0]);
1598 }
1599 entry->page_addr = 0;
1600 startup->ele_count--;
1601 continue;
1602 }
1603 }
1604 next_addr = addr + 0x1F000;
1605 if ((next_addr >= entry->page_addr) &&
1606 (next_addr <= (entry->page_addr + 0x1F000))) {
1607 startup_shift = (next_addr - entry->page_addr)>>12;
1608 page_cache_bits |= entry->page_cache << (0x1F - startup_shift);
1609 entry->page_cache &= ~((1 << (startup_shift + 1)) - 1);
1610 if(entry->page_cache == 0) {
1611 if(base_ele == element) {
1612 base_ele = (tws_startup_ptr_t)
1613 ((int)element->next
1614 + (int)&startup->ele[0]);
1615 startup->table[hash_index] = element->next;
1616 element = base_ele;
1617 } else {
1618 *previous_ptr = element->next;
1619 element = (tws_startup_ptr_t)
1620 ((int)*previous_ptr
1621 + (int)&startup->ele[0]);
1622 }
1623 entry->page_addr = 0;
1624 startup->ele_count--;
1625 continue;
1626 }
1627 }
1628 previous_ptr = &(element->next);
1629 element = (tws_startup_ptr_t)
1630 ((int) element->next + (int) &startup->ele[0]);
1631 }
1632
1633 return page_cache_bits;
1634 }
1635
1636 kern_return_t
1637 tws_send_startup_info(
1638 task_t task)
1639 {
1640
1641 tws_hash_t tws;
1642
1643 task_lock(task);
1644 tws = task->dynamic_working_set;
1645 task_unlock(task);
1646 if(tws == NULL) {
1647 return KERN_FAILURE;
1648 }
1649 return tws_internal_startup_send(tws);
1650 }
1651
1652
1653 kern_return_t
1654 tws_internal_startup_send(
1655 tws_hash_t tws)
1656 {
1657
1658 tws_startup_t scache;
1659
1660 if(tws == NULL) {
1661 return KERN_FAILURE;
1662 }
1663 tws_lock(tws);
1664 /* used to signal write or release depending on state of tws */
1665 if(tws->startup_cache) {
1666 vm_offset_t startup_buf;
1667 vm_size_t size;
1668 startup_buf = (vm_offset_t)tws->startup_cache;
1669 size = tws->startup_cache->tws_hash_size;
1670 tws->startup_cache = 0;
1671 tws_unlock(tws);
1672 kmem_free(kernel_map, startup_buf, size);
1673 return KERN_SUCCESS;
1674 }
1675 if(tws->startup_name == NULL) {
1676 tws_unlock(tws);
1677 return KERN_FAILURE;
1678 }
1679 scache = tws_create_startup_list(tws);
1680 if(scache == NULL)
1681 return KERN_FAILURE;
1682 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1683 (caddr_t) scache, scache->tws_hash_size,
1684 tws->mod, tws->fid);
1685 kfree(scache, scache->tws_hash_size);
1686 kfree(tws->startup_name, tws->startup_name_length);
1687 tws->startup_name = NULL;
1688 tws_unlock(tws);
1689 return KERN_SUCCESS;
1690 }
1691
1692 kern_return_t
1693 tws_handle_startup_file(
1694 task_t task,
1695 unsigned int uid,
1696 char *app_name,
1697 void *app_vp,
1698 boolean_t *new_info)
1699
1700 {
1701 tws_startup_t startup;
1702 vm_offset_t cache_size;
1703 kern_return_t error;
1704 int fid;
1705 int mod;
1706
1707 *new_info = FALSE;
1708 /* don't pre-heat kernel task */
1709 if(task == kernel_task)
1710 return KERN_SUCCESS;
1711 error = bsd_read_page_cache_file(uid, &fid,
1712 &mod, app_name,
1713 app_vp,
1714 (vm_offset_t *) &startup,
1715 &cache_size);
1716 if(error) {
1717 return KERN_FAILURE;
1718 }
1719 if(startup == NULL) {
1720 /* Entry for app does not exist, make */
1721 /* one */
1722 /* we will want our own copy of the shared */
1723 /* regions to pick up a true picture of all */
1724 /* the pages we will touch. */
1725 if((lsf_zone->count * lsf_zone->elem_size)
1726 > (lsf_zone->max_size >> 1)) {
1727 /* We don't want to run out of shared memory */
1728 /* map entries by starting too many private versions */
1729 /* of the shared library structures */
1730 return KERN_SUCCESS;
1731 }
1732 *new_info = TRUE;
1733
1734 error = tws_write_startup_file(task,
1735 fid, mod, app_name, uid);
1736 if(error)
1737 return error;
1738
1739 } else {
1740 error = tws_read_startup_file(task,
1741 (tws_startup_t)startup,
1742 cache_size);
1743 if(error) {
1744 kmem_free(kernel_map,
1745 (vm_offset_t)startup, cache_size);
1746 return error;
1747 }
1748 }
1749 return KERN_SUCCESS;
1750 }
1751
1752 kern_return_t
1753 tws_write_startup_file(
1754 task_t task,
1755 int fid,
1756 int mod,
1757 char *name,
1758 unsigned int uid)
1759 {
1760 tws_hash_t tws;
1761 unsigned int string_length;
1762
1763 string_length = strlen(name);
1764
1765 restart:
1766 task_lock(task);
1767 tws = task->dynamic_working_set;
1768 task_unlock(task);
1769
1770 if(tws == NULL) {
1771 kern_return_t error;
1772
1773 /* create a dynamic working set of normal size */
1774 if((error = task_working_set_create(task, 0, 0, TWS_HASH_STYLE_DEFAULT)) != KERN_SUCCESS)
1775 return error;
1776 /* we need to reset tws and relock */
1777 goto restart;
1778 }
1779 tws_lock(tws);
1780
1781 if(tws->startup_name != NULL) {
1782 tws_unlock(tws);
1783 return KERN_FAILURE;
1784 }
1785
1786 tws->startup_name = (char *)
1787 kalloc((string_length + 1) * (sizeof(char)));
1788 if(tws->startup_name == NULL) {
1789 tws_unlock(tws);
1790 return KERN_FAILURE;
1791 }
1792
1793 bcopy(name, (char *)tws->startup_name, string_length + 1);
1794 tws->startup_name_length = (string_length + 1) * sizeof(char);
1795 tws->uid = uid;
1796 tws->fid = fid;
1797 tws->mod = mod;
1798
1799 tws_unlock(tws);
1800 return KERN_SUCCESS;
1801 }
1802
1803 unsigned long tws_read_startup_file_rejects = 0;
1804
1805 kern_return_t
1806 tws_read_startup_file(
1807 task_t task,
1808 tws_startup_t startup,
1809 vm_offset_t cache_size)
1810 {
1811 tws_hash_t tws;
1812 int lines;
1813 int old_exp_count;
1814 unsigned int ele_count;
1815
1816 restart:
1817 task_lock(task);
1818 tws = task->dynamic_working_set;
1819
1820 /* create a dynamic working set to match file size */
1821
1822 /* start with total size of the data we got from app_profile */
1823 ele_count = cache_size;
1824 /* skip the startup header */
1825 ele_count -= sizeof(struct tws_startup);
1826 /*
1827 * For each startup cache entry, we have one of these:
1828 * tws_startup_ptr_t startup->table[];
1829 * struct tws_startup_ptr startup->ele[];
1830 * struct tws_startup_ele startup->array[];
1831 */
1832 ele_count /= (sizeof (tws_startup_ptr_t) +
1833 sizeof (struct tws_startup_ptr) +
1834 sizeof (struct tws_startup_ele));
1835
1836 /*
1837 * Sanity check: make sure the value for startup->array_size
1838 * that we read from the app_profile file matches the size
1839 * of the data we read from disk. If it doesn't match, we
1840 * can't trust the data and we just drop it all.
1841 */
1842 if (cache_size < sizeof(struct tws_startup) ||
1843 startup->array_size != ele_count) {
1844 tws_read_startup_file_rejects++;
1845 task_unlock(task);
1846 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1847 return(KERN_SUCCESS);
1848 }
1849
1850 /*
1851 * We'll create the task working set with the default row size
1852 * (TWS_ARRAY_SIZE), so this will give us the number of lines
1853 * we need to store all the data from the app_profile startup
1854 * cache.
1855 */
1856 lines = ele_count / TWS_ARRAY_SIZE;
1857
1858 if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
1859 lines = TWS_SMALL_HASH_LINE_COUNT;
1860 task_unlock(task);
1861 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1862 return(KERN_SUCCESS);
1863 } else {
1864 old_exp_count = lines/TWS_HASH_LINE_COUNT;
1865 if((old_exp_count * TWS_HASH_LINE_COUNT) != lines) {
1866 lines = (old_exp_count + 1)
1867 * TWS_HASH_LINE_COUNT;
1868 }
1869 if(tws == NULL) {
1870 kern_return_t error;
1871
1872 task_unlock(task);
1873 if ((error = task_working_set_create(task, lines, 0, TWS_HASH_STYLE_DEFAULT)) != KERN_SUCCESS)
1874 return error;
1875 /* we need to reset tws and relock */
1876 goto restart;
1877 } else {
1878 task_unlock(task);
1879 tws_expand_working_set(
1880 (void *)tws, lines, TRUE);
1881 }
1882 }
1883
1884 tws_lock(tws);
1885
1886 if(tws->startup_cache != NULL) {
1887 tws_unlock(tws);
1888 return KERN_FAILURE;
1889 }
1890
1891
1892 /* now need to fix up internal table pointers */
1893 startup->table = (tws_startup_ptr_t *)
1894 (((int)startup) + (sizeof(struct tws_startup)));
1895 startup->ele = (struct tws_startup_ptr *)
1896 (((vm_offset_t)startup->table) +
1897 (startup->array_size * sizeof(tws_startup_ptr_t)));
1898 startup->array = (struct tws_startup_ele *)
1899 (((vm_offset_t)startup->ele) +
1900 (startup->array_size * sizeof(struct tws_startup_ptr)));
1901 /* the allocation size and file size should be the same */
1902 /* just in case their not, make sure we dealloc correctly */
1903 startup->tws_hash_size = cache_size;
1904
1905 tws->startup_cache = startup;
1906 tws_unlock(tws);
1907 return KERN_SUCCESS;
1908 }
1909
1910
1911 void
1912 tws_hash_ws_flush(tws_hash_t tws) {
1913 tws_startup_t scache;
1914 if(tws == NULL) {
1915 return;
1916 }
1917 tws_lock(tws);
1918 if(tws->startup_name != NULL) {
1919 scache = tws_create_startup_list(tws);
1920 if(scache == NULL) {
1921 /* dump the name cache, we'll */
1922 /* get it next time */
1923 kfree(tws->startup_name, tws->startup_name_length);
1924 tws->startup_name = NULL;
1925 tws_unlock(tws);
1926 return;
1927 }
1928 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1929 (caddr_t) scache, scache->tws_hash_size,
1930 tws->mod, tws->fid);
1931 kfree(scache, scache->tws_hash_size);
1932 kfree(tws->startup_name, tws->startup_name_length);
1933 tws->startup_name = NULL;
1934 }
1935 tws_unlock(tws);
1936 return;
1937 }
1938
1939 void
1940 tws_hash_destroy(tws_hash_t tws)
1941 {
1942 unsigned int i,k;
1943
1944 if(tws->startup_cache != NULL) {
1945 kmem_free(kernel_map,
1946 (vm_offset_t)tws->startup_cache,
1947 tws->startup_cache->tws_hash_size);
1948 tws->startup_cache = NULL;
1949 }
1950 if(tws->startup_name != NULL) {
1951 tws_internal_startup_send(tws);
1952 }
1953 for (i=0; i<tws->number_of_lines; i++) {
1954 for(k=0; k<tws->expansion_count; k++) {
1955 /* clear the object refs */
1956 tws_hash_line_clear(tws, &(tws->cache[k][i]), NULL, FALSE);
1957 }
1958 }
1959 i = 0;
1960 while (i < tws->expansion_count) {
1961
1962 kfree(tws->table[i],
1963 sizeof(tws_hash_ptr_t)
1964 * tws->number_of_lines
1965 * tws->number_of_elements);
1966 kfree(tws->table_ele[i],
1967 sizeof(struct tws_hash_ptr)
1968 * tws->number_of_lines
1969 * tws->number_of_elements);
1970 kfree(tws->alt_ele[i],
1971 sizeof(struct tws_hash_ptr)
1972 * tws->number_of_lines
1973 * tws->number_of_elements);
1974 kfree(tws->cache[i],
1975 sizeof(struct tws_hash_line) * tws->number_of_lines);
1976 i++;
1977 }
1978 if(tws->startup_name != NULL) {
1979 kfree(tws->startup_name, tws->startup_name_length);
1980 }
1981 kfree(tws, sizeof(struct tws_hash));
1982 }
1983
1984 void
1985 tws_hash_clear(tws_hash_t tws)
1986 {
1987 unsigned int i, k;
1988
1989 for (i=0; i<tws->number_of_lines; i++) {
1990 for(k=0; k<tws->expansion_count; k++) {
1991 /* clear the object refs */
1992 tws_hash_line_clear(tws, &(tws->cache[k][i]), NULL, FALSE);
1993 }
1994 }
1995 }
1996
1997 kern_return_t
1998 task_working_set_create(
1999 task_t task,
2000 unsigned int lines,
2001 unsigned int rows,
2002 unsigned int style)
2003 {
2004
2005 if (lines == 0) {
2006 lines = TWS_HASH_LINE_COUNT;
2007 }
2008 if (rows == 0) {
2009 rows = TWS_ARRAY_SIZE;
2010 }
2011 if (style == TWS_HASH_STYLE_DEFAULT) {
2012 style = TWS_HASH_STYLE_BASIC;
2013 }
2014 task_lock(task);
2015 if(task->dynamic_working_set != 0) {
2016 task_unlock(task);
2017 return(KERN_FAILURE);
2018 } else if((task->dynamic_working_set =
2019 tws_hash_create(lines, rows, style)) == 0) {
2020 task_unlock(task);
2021 return(KERN_NO_SPACE);
2022 }
2023 task_unlock(task);
2024 return KERN_SUCCESS;
2025 }
2026
2027
2028 /* Internal use only routines */
2029
2030
2031 /*
2032 * internal sub-function for address space lookup
2033 * returns the target element and the address of the
2034 * previous pointer The previous pointer is the address
2035 * of the pointer pointing to the target element.
2036 * TWS must be locked
2037 */
2038
2039 void
2040 tws_traverse_address_hash_list (
2041 tws_hash_t tws,
2042 unsigned int index,
2043 vm_offset_t page_addr,
2044 vm_object_t object,
2045 vm_object_offset_t offset,
2046 vm_map_t map,
2047 tws_hash_ptr_t *target_ele,
2048 tws_hash_ptr_t **previous_ptr,
2049 tws_hash_ptr_t **free_list,
2050 unsigned int exclusive_addr)
2051 {
2052 unsigned int k;
2053 tws_hash_ptr_t cache_ele;
2054 tws_hash_ptr_t base_ele;
2055
2056 *target_ele = NULL;
2057 *previous_ptr = NULL;
2058
2059 for(k=0; k<tws->expansion_count; k++) {
2060 tws_hash_ele_t ele;
2061 cache_ele = tws->table[k][index];
2062 base_ele = cache_ele;
2063 *previous_ptr = (tws_hash_ptr_t *)&(tws->table[k][index]);
2064 while(cache_ele != NULL) {
2065 if(((unsigned int)
2066 cache_ele->element & TWS_ADDR_HASH) == 0) {
2067 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2068 cache_ele = cache_ele->next;
2069 continue;
2070 }
2071 ele = (tws_hash_ele_t)((unsigned int)
2072 cache_ele->element & ~TWS_ADDR_HASH);
2073 if ((ele == 0) || (ele->object == 0)) {
2074 /* A little clean-up of empty elements */
2075 cache_ele->element = 0;
2076 if(base_ele == cache_ele) {
2077 base_ele = cache_ele->next;
2078 tws->table[k][index] = cache_ele->next;
2079 cache_ele->next = tws->free_hash_ele[k];
2080 tws->free_hash_ele[k] = cache_ele;
2081 cache_ele = base_ele;
2082 } else {
2083 **previous_ptr = cache_ele->next;
2084 cache_ele->next = tws->free_hash_ele[k];
2085 tws->free_hash_ele[k] = cache_ele;
2086 cache_ele = **previous_ptr;
2087 }
2088 continue;
2089 }
2090
2091 if ((ele->page_addr <= page_addr)
2092 && (page_addr <= (ele->page_addr +
2093 (vm_offset_t)TWS_INDEX_MASK))
2094 && ((object == NULL)
2095 || ((object == ele->object)
2096 && (offset == ele->offset)
2097 && (map == ele->map)))) {
2098 if(exclusive_addr) {
2099 int delta;
2100 delta = ((page_addr - ele->page_addr)
2101 >> 12);
2102 if((1 << delta) & ele->page_cache) {
2103 /* We've found a match */
2104 *target_ele = cache_ele;
2105 *free_list =
2106 (tws_hash_ptr_t *)
2107 &(tws->free_hash_ele[k]);
2108 return;
2109 }
2110 } else {
2111 /* We've found a match */
2112 *target_ele = cache_ele;
2113 *free_list = (tws_hash_ptr_t *)
2114 &(tws->free_hash_ele[k]);
2115 return;
2116 }
2117 }
2118 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2119 cache_ele = cache_ele->next;
2120 }
2121 }
2122 }
2123
2124
2125 /*
2126 * internal sub-function for object space lookup
2127 * returns the target element and the address of the
2128 * previous pointer The previous pointer is the address
2129 * of the pointer pointing to the target element.
2130 * TWS must be locked
2131 */
2132
2133
2134 void
2135 tws_traverse_object_hash_list (
2136 tws_hash_t tws,
2137 unsigned int index,
2138 vm_object_t object,
2139 vm_object_offset_t offset,
2140 unsigned int pagemask,
2141 tws_hash_ptr_t *target_ele,
2142 tws_hash_ptr_t **previous_ptr,
2143 tws_hash_ptr_t **free_list)
2144 {
2145 unsigned int k;
2146 tws_hash_ptr_t cache_ele;
2147 tws_hash_ptr_t base_ele;
2148
2149 *target_ele = NULL;
2150 *previous_ptr = NULL;
2151
2152 for(k=0; k<tws->expansion_count; k++) {
2153 cache_ele = tws->table[k][index];
2154 base_ele = cache_ele;
2155 *previous_ptr = &(tws->table[k][index]);
2156 while(cache_ele != NULL) {
2157 if((((unsigned int)cache_ele->element)
2158 & TWS_ADDR_HASH) != 0) {
2159 *previous_ptr = &(cache_ele->next);
2160 cache_ele = cache_ele->next;
2161 continue;
2162 }
2163 if ((cache_ele->element == 0) ||
2164 (cache_ele->element->object == 0)) {
2165 /* A little clean-up of empty elements */
2166 cache_ele->element = 0;
2167 if(base_ele == cache_ele) {
2168 base_ele = cache_ele->next;
2169 tws->table[k][index] = cache_ele->next;
2170 cache_ele->next = tws->free_hash_ele[k];
2171 tws->free_hash_ele[k] = cache_ele;
2172 cache_ele = tws->table[k][index];
2173 } else {
2174 **previous_ptr = cache_ele->next;
2175 cache_ele->next = tws->free_hash_ele[k];
2176 tws->free_hash_ele[k] = cache_ele;
2177 cache_ele = **previous_ptr;
2178 }
2179 continue;
2180 }
2181 if ((cache_ele->element->object == object)
2182 && (cache_ele->element->offset ==
2183 (offset - (offset & ~TWS_HASH_OFF_MASK)))) {
2184 if((cache_ele->element->page_cache & pagemask)
2185 || (pagemask == 0xFFFFFFFF)) {
2186 /* We've found a match */
2187 *target_ele = cache_ele;
2188 *free_list = &(tws->free_hash_ele[k]);
2189 return;
2190 }
2191 }
2192 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2193 cache_ele = cache_ele->next;
2194 }
2195 }
2196 }
2197
2198
2199 /*
2200 * For a given object/offset, discover whether the indexed 32 page frame
2201 * containing the object/offset exists and if their are at least threshold
2202 * pages present. Returns true if population meets threshold.
2203 */
2204 int
2205 tws_test_for_community(
2206 tws_hash_t tws,
2207 vm_object_t object,
2208 vm_object_offset_t offset,
2209 unsigned int threshold,
2210 unsigned int *pagemask)
2211 {
2212 int index;
2213 tws_hash_ptr_t cache_ele;
2214 tws_hash_ptr_t *trailer;
2215 tws_hash_ptr_t *free_list;
2216 int community = 0;
2217
2218 index = do_tws_hash(object, offset,
2219 tws->number_of_elements, tws->number_of_lines);
2220 tws_traverse_object_hash_list(tws, index, object, offset, 0xFFFFFFFF,
2221 &cache_ele, &trailer, &free_list);
2222
2223 if(cache_ele != NULL) {
2224 int i;
2225 unsigned int ctr;
2226 ctr = 0;
2227 for(i=1; i!=0; i=i<<1) {
2228 if(i & cache_ele->element->page_cache)
2229 ctr++;
2230 if(ctr == threshold) {
2231 community = 1;
2232 *pagemask = cache_ele->element->page_cache;
2233 break;
2234 }
2235 }
2236 }
2237
2238 return community;
2239
2240 }
2241
2242
2243 /*
2244 * Gets new hash element for object hash from free pools
2245 * TWS must be locked
2246 */
2247
2248 tws_hash_ptr_t
2249 new_obj_hash(
2250 tws_hash_t tws,
2251 unsigned int set,
2252 unsigned int index)
2253 {
2254 tws_hash_ptr_t element;
2255
2256 if(tws->obj_free_count[set] < tws->number_of_lines * tws->number_of_elements) {
2257 element = &(tws->table_ele[set][tws->obj_free_count[set]]);
2258 tws->obj_free_count[set]+=1;
2259 } else if(tws->free_hash_ele[set] == NULL) {
2260 return NULL;
2261 } else {
2262 element = tws->free_hash_ele[set];
2263 if(element == NULL)
2264 return element;
2265 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2266 }
2267 element->element = 0;
2268 element->next = tws->table[set][index];
2269 tws->table[set][index] = element;
2270 return element;
2271 }
2272
2273 /*
2274 * Gets new hash element for addr hash from free pools
2275 * TWS must be locked
2276 */
2277
2278 tws_hash_ptr_t
2279 new_addr_hash(
2280 tws_hash_t tws,
2281 unsigned int set,
2282 unsigned int index)
2283 {
2284 tws_hash_ptr_t element;
2285
2286 if(tws->addr_free_count[set]
2287 < tws->number_of_lines * tws->number_of_elements) {
2288 element = &(tws->alt_ele[set][tws->addr_free_count[set]]);
2289 tws->addr_free_count[set]+=1;
2290 } else if(tws->free_hash_ele[set] == NULL) {
2291 return NULL;
2292 } else {
2293 element = tws->free_hash_ele[set];
2294 if(element == NULL)
2295 return element;
2296 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2297 }
2298 element->element = (tws_hash_ele_t)TWS_ADDR_HASH;
2299 element->next = tws->table[set][index];
2300 tws->table[set][index] = element;
2301 return element;
2302 }