]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/task_working_set.c
bd3ca3a68f76db0b4c316a4b56a34f661eb7145b
[apple/xnu.git] / osfmk / vm / task_working_set.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 */
30 /*
31 * File: vm/task_working_set.c
32 * Author: Chris Youngworth
33 * Date: 2001
34 *
35 * Working set detection and maintainence module
36 */
37
38
39 #include <mach/mach_types.h>
40 #include <mach/memory_object.h>
41 #include <mach/shared_memory_server.h>
42 #include <vm/task_working_set.h>
43 #include <vm/vm_kern.h>
44 #include <vm/vm_map.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pageout.h>
47 #include <kern/sched.h>
48 #include <kern/kalloc.h>
49
50 #include <vm/vm_protos.h>
51
52 /*
53 * LP64todo - Task Working Set Support is for 32-bit only
54 */
55 extern zone_t lsf_zone;
56
57 /* declarations for internal use only routines */
58 int startup_miss = 0;
59
60 tws_hash_t
61 tws_hash_create(
62 unsigned int lines,
63 unsigned int rows,
64 unsigned int style);
65
66 kern_return_t
67 tws_write_startup_file(
68 task_t task,
69 int fid,
70 int mod,
71 char *name,
72 unsigned int string_length);
73
74 kern_return_t
75 tws_read_startup_file(
76 task_t task,
77 tws_startup_t startup,
78 vm_offset_t cache_size);
79
80 tws_startup_t
81 tws_create_startup_list(
82 tws_hash_t tws);
83
84 unsigned int
85 tws_startup_list_lookup(
86 tws_startup_t startup,
87 vm_offset_t addr);
88
89 kern_return_t
90 tws_internal_startup_send(
91 tws_hash_t tws);
92
93 void
94 tws_hash_line_clear(
95 tws_hash_t tws,
96 tws_hash_line_t hash_line,
97 vm_object_t object,
98 boolean_t live);
99
100 void
101 tws_hash_clear(
102 tws_hash_t tws);
103
104 void
105 tws_traverse_address_hash_list (
106 tws_hash_t tws,
107 unsigned int index,
108 vm_offset_t page_addr,
109 vm_object_t object,
110 vm_object_offset_t offset,
111 vm_map_t map,
112 tws_hash_ptr_t *target_ele,
113 tws_hash_ptr_t **previous_ptr,
114 tws_hash_ptr_t **free_list,
115 unsigned int exclusive_addr);
116
117 void
118 tws_traverse_object_hash_list (
119 tws_hash_t tws,
120 unsigned int index,
121 vm_object_t object,
122 vm_object_offset_t offset,
123 unsigned int pagemask,
124 tws_hash_ptr_t *target_ele,
125 tws_hash_ptr_t **previous_ptr,
126 tws_hash_ptr_t **free_list);
127
128 tws_hash_ptr_t
129 new_addr_hash(
130 tws_hash_t tws,
131 unsigned int set,
132 unsigned int index);
133
134 tws_hash_ptr_t
135 new_obj_hash(
136 tws_hash_t tws,
137 unsigned int set,
138 unsigned int index);
139
140 int tws_test_for_community(
141 tws_hash_t tws,
142 vm_object_t object,
143 vm_object_offset_t offset,
144 unsigned int threshold,
145 unsigned int *pagemask);
146
147 kern_return_t
148 tws_internal_lookup(
149 tws_hash_t tws,
150 vm_object_offset_t offset,
151 vm_object_t object,
152 tws_hash_line_t *line);
153
154 /* Note: all of the routines below depend on the associated map lock for */
155 /* synchronization, the map lock will be on when the routines are called */
156 /* and on when they return */
157
158 tws_hash_t
159 tws_hash_create(
160 unsigned int lines,
161 unsigned int rows,
162 unsigned int style)
163 {
164 tws_hash_t tws;
165
166 if ((style != TWS_HASH_STYLE_BASIC) &&
167 (style != TWS_HASH_STYLE_BASIC)) {
168 return((tws_hash_t)NULL);
169 }
170
171
172 tws = (tws_hash_t)(kalloc(sizeof(struct tws_hash)));
173 if(tws == (tws_hash_t)NULL)
174 return tws;
175
176 if((tws->table[0] = (tws_hash_ptr_t *)
177 kalloc(sizeof(tws_hash_ptr_t) * lines * rows))
178 == NULL) {
179 kfree(tws, sizeof(struct tws_hash));
180 return (tws_hash_t)NULL;
181 }
182 if((tws->table_ele[0] = (tws_hash_ptr_t)
183 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
184 == NULL) {
185 kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
186 kfree(tws, sizeof(struct tws_hash));
187 return (tws_hash_t)NULL;
188 }
189 if((tws->alt_ele[0] = (tws_hash_ptr_t)
190 kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
191 == NULL) {
192 kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
193 kfree(tws->table_ele[0],
194 sizeof(struct tws_hash_ptr) * lines * rows);
195 kfree(tws, sizeof(struct tws_hash));
196 return (tws_hash_t)NULL;
197 }
198 if((tws->cache[0] = (struct tws_hash_line *)
199 kalloc(sizeof(struct tws_hash_line) * lines))
200 == NULL) {
201 kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
202 kfree(tws->table_ele[0],
203 sizeof(struct tws_hash_ptr) * lines * rows);
204 kfree(tws->alt_ele[0],
205 sizeof(struct tws_hash_ptr) * lines * rows);
206 kfree(tws, sizeof(struct tws_hash));
207 return (tws_hash_t)NULL;
208 }
209 tws->free_hash_ele[0] = (tws_hash_ptr_t)0;
210 tws->obj_free_count[0] = 0;
211 tws->addr_free_count[0] = 0;
212
213 /* most defaults are such that a bzero will initialize */
214 bzero((char *)tws->table[0],sizeof(tws_hash_ptr_t)
215 * lines * rows);
216 bzero((char *)tws->table_ele[0],sizeof(struct tws_hash_ptr)
217 * lines * rows);
218 bzero((char *)tws->alt_ele[0],sizeof(struct tws_hash_ptr)
219 * lines * rows);
220 bzero((char *)tws->cache[0], sizeof(struct tws_hash_line)
221 * lines);
222
223 mutex_init(&tws->lock, 0);
224 tws->style = style;
225 tws->current_line = 0;
226 tws->pageout_count = 0;
227 tws->line_count = 0;
228 tws->startup_cache = NULL;
229 tws->startup_name = NULL;
230 tws->number_of_lines = lines;
231 tws->number_of_elements = rows;
232 tws->expansion_count = 1;
233 tws->lookup_count = 0;
234 tws->insert_count = 0;
235 tws->time_of_creation = sched_tick;
236
237 return tws;
238 }
239
240
241 extern vm_page_t
242 vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset);
243
244
245 void
246 tws_hash_line_clear(
247 tws_hash_t tws,
248 tws_hash_line_t hash_line,
249 __unused vm_object_t object,
250 boolean_t live)
251 {
252 struct tws_hash_ele *hash_ele;
253 struct tws_hash_ptr **trailer;
254 struct tws_hash_ptr **free_list;
255 tws_hash_ele_t addr_ele;
256 int index;
257 unsigned int i, j;
258 int dump_pmap;
259 int hash_loop;
260
261
262 if(tws->line_count < tws->number_of_lines) {
263 tws->line_count++;
264 dump_pmap = 1;
265 } else {
266 if(tws->pageout_count != vm_pageout_scan_event_counter) {
267 tws->pageout_count =
268 vm_pageout_scan_event_counter;
269 tws->line_count = 0;
270 dump_pmap = 1;
271 } else {
272 dump_pmap = 0;
273 }
274 }
275 hash_line->ele_count = 0;
276
277 for (i=0; i<tws->number_of_elements; i++) {
278 hash_loop = 0;
279 hash_ele = &(hash_line->list[i]);
280 if(hash_ele->object != 0) {
281
282 vm_object_offset_t local_off = 0;
283 tws_hash_ptr_t cache_ele;
284
285 index = alt_tws_hash(
286 hash_ele->page_addr & TWS_ADDR_OFF_MASK,
287 tws->number_of_elements,
288 tws->number_of_lines);
289
290 tws_traverse_address_hash_list(tws, index,
291 hash_ele->page_addr, hash_ele->object,
292 hash_ele->offset, hash_ele->map,
293 &cache_ele, &trailer, &free_list, 0);
294 if(cache_ele != NULL) {
295 addr_ele = (tws_hash_ele_t)((unsigned int)
296 (cache_ele->element) & ~TWS_ADDR_HASH);
297 if(addr_ele != hash_ele)
298 panic("tws_hash_line_clear:"
299 " out of sync\n");
300 cache_ele->element = 0;
301 *trailer = cache_ele->next;
302 cache_ele->next = *free_list;
303 *free_list = cache_ele;
304 }
305
306 index = alt_tws_hash(
307 (hash_ele->page_addr - 0x1f000)
308 & TWS_ADDR_OFF_MASK,
309 tws->number_of_elements,
310 tws->number_of_lines);
311
312 tws_traverse_address_hash_list(tws, index,
313 hash_ele->page_addr, hash_ele->object,
314 hash_ele->offset, hash_ele->map,
315 &cache_ele, &trailer, &free_list, 0);
316
317 if(cache_ele != NULL) {
318 addr_ele = (tws_hash_ele_t)((unsigned int)
319 (cache_ele->element) & ~TWS_ADDR_HASH);
320 if(addr_ele != hash_ele)
321 panic("tws_hash_line_clear: "
322 "out of sync\n");
323 cache_ele->element = 0;
324 *trailer = cache_ele->next;
325 cache_ele->next = *free_list;
326 *free_list = cache_ele;
327 }
328
329
330 if((hash_ele->map != NULL) && (live)) {
331 vm_page_t p;
332
333 #if 0
334 if (object != hash_ele->object) {
335 if (object)
336 vm_object_unlock(object);
337 vm_object_lock(hash_ele->object);
338 }
339 #endif
340 if (dump_pmap == 1) {
341 for (j = 0x1; j != 0; j = j<<1) {
342 if(j & hash_ele->page_cache) {
343 p = vm_page_lookup_nohint(hash_ele->object,
344 hash_ele->offset + local_off);
345 if((p != NULL) && (p->wire_count == 0)) {
346 pmap_remove_some_phys((pmap_t)vm_map_pmap(current_map()),
347 p->phys_page);
348 }
349 }
350 local_off += PAGE_SIZE_64;
351 }
352 }
353 #if 0
354 if (object != hash_ele->object) {
355 vm_object_unlock(hash_ele->object);
356 if (object)
357 vm_object_lock(object);
358 }
359 #endif
360 }
361
362 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
363 vm_object_deallocate(hash_ele->object);
364 vm_map_deallocate(hash_ele->map);
365 }
366
367 index = do_tws_hash(hash_ele->object, hash_ele->offset,
368 tws->number_of_elements,
369 tws->number_of_lines);
370
371 tws_traverse_object_hash_list(tws,
372 index, hash_ele->object, hash_ele->offset,
373 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
374 if((cache_ele != NULL) && (cache_ele->element == hash_ele)) {
375 cache_ele->element = 0;
376 *trailer = cache_ele->next;
377 cache_ele->next = *free_list;
378 *free_list = cache_ele;
379 }
380 hash_ele->object = 0;
381 }
382 }
383 }
384
385 kern_return_t
386 tws_internal_lookup(
387 tws_hash_t tws,
388 vm_object_offset_t offset,
389 vm_object_t object,
390 tws_hash_line_t *line)
391 {
392 int index;
393 int loop;
394 int set;
395 int ele_line;
396 vm_offset_t pagenum;
397 tws_hash_ptr_t cache_ele;
398 tws_hash_ptr_t *trailer;
399 tws_hash_ptr_t *free_list;
400
401 /* don't cache private objects */
402 if(object->private)
403 return KERN_SUCCESS;
404
405 index = do_tws_hash(object, offset,
406 tws->number_of_elements, tws->number_of_lines);
407 loop = 0;
408
409 tws->lookup_count++;
410 if(tws->lookup_count == 0)
411 tws->insert_count = 0;
412 if(tws->startup_name != NULL) {
413 int age_of_cache;
414 age_of_cache = ((sched_tick
415 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
416 if (age_of_cache > 45) {
417 return KERN_OPERATION_TIMED_OUT;
418 }
419 }
420
421 if(tws->lookup_count > (4 * tws->expansion_count
422 * tws->number_of_elements * tws->number_of_lines) &&
423 (tws->lookup_count > (2 * tws->insert_count))) {
424 if(tws->startup_cache) {
425 int age_of_cache;
426 age_of_cache = ((sched_tick
427 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
428 if (age_of_cache > 45) {
429 return KERN_OPERATION_TIMED_OUT;
430 }
431 }
432 }
433
434 pagenum = (vm_offset_t)(offset & TWS_INDEX_MASK);
435 pagenum = pagenum >> 12;
436 pagenum = 1 << pagenum; /* get the appropriate page in 32 page block */
437 tws_traverse_object_hash_list(tws, index, object, offset, pagenum,
438 &cache_ele, &trailer, &free_list);
439 if(cache_ele != NULL) {
440 set = cache_ele->element->line/tws->number_of_lines;
441 ele_line = cache_ele->element->line - set;
442 *line = &tws->cache[set][ele_line];
443 return KERN_SUCCESS;
444 }
445
446 return KERN_FAILURE;
447
448
449 }
450
451 kern_return_t
452 tws_lookup(
453 tws_hash_t tws,
454 vm_object_offset_t offset,
455 vm_object_t object,
456 tws_hash_line_t *line)
457 {
458 kern_return_t kr;
459
460 if(!tws_lock_try(tws)) {
461 return KERN_FAILURE;
462 }
463 kr = tws_internal_lookup(tws,
464 offset, object, line);
465 tws_unlock(tws);
466 return kr;
467 }
468
469 kern_return_t
470 tws_expand_working_set(
471 tws_hash_t old_tws,
472 unsigned int line_count,
473 boolean_t dump_data)
474 {
475 tws_hash_t new_tws;
476 unsigned int i,j,k;
477 struct tws_hash temp;
478
479 /* Note we do an elaborate dance to preserve the header that */
480 /* task is pointing to. In this way we can avoid taking a task */
481 /* lock every time we want to access the tws */
482
483 if (old_tws->number_of_lines >= line_count) {
484 return KERN_FAILURE;
485 }
486 if((new_tws = tws_hash_create(line_count,
487 old_tws->number_of_elements, old_tws->style)) == 0) {
488 return(KERN_NO_SPACE);
489 }
490 tws_lock(old_tws);
491
492 if(!dump_data) {
493 for(i = 0; i<old_tws->number_of_lines; i++) {
494 for(j = 0; j<old_tws->number_of_elements; j++) {
495 for(k = 0; k<old_tws->expansion_count; k++) {
496 tws_hash_ele_t entry;
497 vm_object_offset_t paddr;
498 unsigned int page_index;
499 entry = &old_tws->cache[k][i].list[j];
500 if(entry->object != 0) {
501 paddr = 0;
502 for(page_index = 1; page_index != 0;
503 page_index = page_index << 1) {
504 if (entry->page_cache & page_index) {
505 tws_insert(new_tws,
506 entry->offset+paddr,
507 entry->object,
508 entry->page_addr+paddr,
509 entry->map);
510 }
511 paddr+=PAGE_SIZE;
512 }
513
514 }
515 }
516 }
517 }
518 }
519
520 temp.style = new_tws->style;
521 temp.current_line = new_tws->current_line;
522 temp.pageout_count = new_tws->pageout_count;
523 temp.line_count = new_tws->line_count;
524 temp.number_of_lines = new_tws->number_of_lines;
525 temp.number_of_elements = new_tws->number_of_elements;
526 temp.expansion_count = new_tws->expansion_count;
527 temp.lookup_count = new_tws->lookup_count;
528 temp.insert_count = new_tws->insert_count;
529 for(i = 0; i<new_tws->expansion_count; i++) {
530 temp.obj_free_count[i] = new_tws->obj_free_count[i];
531 temp.addr_free_count[i] = new_tws->addr_free_count[i];
532 temp.free_hash_ele[i] = new_tws->free_hash_ele[i];
533 temp.table[i] = new_tws->table[i];
534 temp.table_ele[i] = new_tws->table_ele[i];
535 temp.alt_ele[i] = new_tws->alt_ele[i];
536 temp.cache[i] = new_tws->cache[i];
537 }
538
539 new_tws->style = old_tws->style;
540 new_tws->current_line = old_tws->current_line;
541 new_tws->pageout_count = old_tws->pageout_count;
542 new_tws->line_count = old_tws->line_count;
543 new_tws->number_of_lines = old_tws->number_of_lines;
544 new_tws->number_of_elements = old_tws->number_of_elements;
545 new_tws->expansion_count = old_tws->expansion_count;
546 new_tws->lookup_count = old_tws->lookup_count;
547 new_tws->insert_count = old_tws->insert_count;
548 for(i = 0; i<old_tws->expansion_count; i++) {
549 new_tws->obj_free_count[i] = old_tws->obj_free_count[i];
550 new_tws->addr_free_count[i] = old_tws->addr_free_count[i];
551 new_tws->free_hash_ele[i] = old_tws->free_hash_ele[i];
552 new_tws->table[i] = old_tws->table[i];
553 new_tws->table_ele[i] = old_tws->table_ele[i];
554 new_tws->alt_ele[i] = old_tws->alt_ele[i];
555 new_tws->cache[i] = old_tws->cache[i];
556 }
557
558 old_tws->style = temp.style;
559 old_tws->current_line = temp.current_line;
560 old_tws->pageout_count = temp.pageout_count;
561 old_tws->line_count = temp.line_count;
562 old_tws->number_of_lines = temp.number_of_lines;
563 old_tws->number_of_elements = temp.number_of_elements;
564 old_tws->expansion_count = temp.expansion_count;
565 old_tws->lookup_count = temp.lookup_count;
566 old_tws->insert_count = temp.insert_count;
567 for(i = 0; i<temp.expansion_count; i++) {
568 old_tws->obj_free_count[i] = temp.obj_free_count[i];;
569 old_tws->addr_free_count[i] = temp.addr_free_count[i];;
570 old_tws->free_hash_ele[i] = NULL;
571 old_tws->table[i] = temp.table[i];
572 old_tws->table_ele[i] = temp.table_ele[i];
573 old_tws->alt_ele[i] = temp.alt_ele[i];
574 old_tws->cache[i] = temp.cache[i];
575 }
576
577 tws_hash_destroy(new_tws);
578 tws_unlock(old_tws);
579 return KERN_SUCCESS;
580 }
581
582 tws_hash_t test_tws = 0;
583
584 kern_return_t
585 tws_insert(
586 tws_hash_t tws,
587 vm_object_offset_t offset,
588 vm_object_t object,
589 vm_offset_t page_addr,
590 vm_map_t map)
591 {
592 unsigned int index;
593 unsigned int alt_index;
594 unsigned int index_enum[2];
595 unsigned int ele_index;
596 tws_hash_ptr_t cache_ele;
597 tws_hash_ptr_t obj_ele = NULL;
598 tws_hash_ptr_t addr_ele = NULL;
599 tws_hash_ptr_t *trailer;
600 tws_hash_ptr_t *free_list;
601 tws_hash_ele_t target_element = NULL;
602 unsigned int i;
603 unsigned int current_line;
604 unsigned int set;
605 int ctr;
606 unsigned int startup_cache_line;
607 int age_of_cache = 0;
608
609 if(!tws_lock_try(tws)) {
610 return KERN_FAILURE;
611 }
612 tws->insert_count++;
613 current_line = 0xFFFFFFFF;
614 set = 0;
615
616 startup_cache_line = 0;
617
618 if (tws->startup_cache) {
619 vm_offset_t startup_page_addr;
620
621 startup_page_addr = page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
622
623 age_of_cache = ((sched_tick - tws->time_of_creation) >> SCHED_TICK_SHIFT);
624
625 startup_cache_line = tws_startup_list_lookup(tws->startup_cache, startup_page_addr);
626 }
627 /* This next bit of code, the and alternate hash */
628 /* are all made necessary because of IPC COW */
629
630 /* Note: the use of page_addr modified by delta from offset */
631 /* frame base means we may miss some previous entries. However */
632 /* we will not miss the present entry. This is most important */
633 /* in avoiding duplication of entries against long lived non-cow */
634 /* objects */
635 index_enum[0] = alt_tws_hash(
636 page_addr & TWS_ADDR_OFF_MASK,
637 tws->number_of_elements, tws->number_of_lines);
638
639 index_enum[1] = alt_tws_hash(
640 (page_addr - 0x1f000) & TWS_ADDR_OFF_MASK,
641 tws->number_of_elements, tws->number_of_lines);
642
643 for(ctr = 0; ctr < 2;) {
644 tws_hash_ele_t resident;
645 tws_traverse_address_hash_list(tws,
646 index_enum[ctr], page_addr, NULL,
647 0, NULL,
648 &cache_ele, &trailer, &free_list, 1);
649 if(cache_ele != NULL) {
650 /* found one */
651 resident = (tws_hash_ele_t)((unsigned int)
652 cache_ele->element & ~TWS_ADDR_HASH);
653 if((object == resident->object) &&
654 resident->offset ==
655 (offset & TWS_HASH_OFF_MASK)) {
656 /* This is our object/offset */
657 resident->page_cache
658 |= startup_cache_line;
659 resident->page_cache |=
660 (1<<(((vm_offset_t)
661 (offset & TWS_INDEX_MASK))>>12));
662 tws_unlock(tws);
663 if (age_of_cache > 45)
664 return KERN_OPERATION_TIMED_OUT;
665 return KERN_SUCCESS;
666 }
667 if((object->shadow ==
668 resident->object) &&
669 ((resident->offset
670 + object->shadow_offset)
671 == (offset & TWS_HASH_OFF_MASK))) {
672 /* if we just shadowed, inherit */
673 /* access pattern from parent */
674 startup_cache_line |=
675 resident->page_cache;
676 /* thow out old entry */
677 resident->page_cache = 0;
678 break;
679 } else {
680 resident->page_cache &=
681 ~(1<<(((vm_offset_t)(page_addr
682 - resident->page_addr))
683 >>12));
684 }
685 /* Throw out old entry if there are no */
686 /* more pages in cache */
687 if(resident->page_cache == 0) {
688 /* delete addr hash entry */
689 cache_ele->element = 0;
690 *trailer = cache_ele->next;
691 cache_ele->next = *free_list;
692 *free_list = cache_ele;
693 /* go after object hash */
694 index = do_tws_hash(
695 resident->object,
696 resident->offset,
697 tws->number_of_elements,
698 tws->number_of_lines);
699 tws_traverse_object_hash_list(tws,
700 index, resident->object,
701 resident->offset,
702 0xFFFFFFFF, &cache_ele,
703 &trailer, &free_list);
704 if(cache_ele != NULL) {
705 if(tws->style ==
706 TWS_HASH_STYLE_SIGNAL) {
707 vm_object_deallocate(
708 cache_ele->element->object);
709 vm_map_deallocate(
710 cache_ele->element->map);
711 }
712 current_line =
713 cache_ele->element->line;
714 set = current_line
715 /tws->number_of_lines;
716 current_line -= set *
717 tws->number_of_lines;
718 if(cache_ele->element->object != 0) {
719 cache_ele->element->object = 0;
720 tws->cache[set]
721 [current_line].ele_count--;
722 }
723 cache_ele->element = 0;
724 *trailer = cache_ele->next;
725 cache_ele->next = *free_list;
726 *free_list = cache_ele;
727 }
728 }
729 continue;
730 }
731 ctr+=1;
732 }
733
734 /*
735 * We may or may not have a current line setting coming out of
736 * the code above. If we have a current line it means we can
737 * choose to back-fill the spot vacated by a previous entry.
738 * We have yet to do a definitive check using the original obj/off
739 * We will do that now and override the current line if we
740 * find an element
741 */
742
743 index = do_tws_hash(object, offset,
744 tws->number_of_elements, tws->number_of_lines);
745
746 alt_index = index_enum[0];
747
748 tws_traverse_object_hash_list(tws, index, object, offset,
749 0xFFFFFFFF, &cache_ele, &trailer, &free_list);
750 if(cache_ele != NULL) {
751 obj_ele = cache_ele;
752 current_line = cache_ele->element->line;
753 set = current_line/tws->number_of_lines;
754 current_line -= set * tws->number_of_lines;
755 target_element = cache_ele->element;
756
757 /* Now check to see if we have a hash addr for it */
758 tws_traverse_address_hash_list(tws,
759 alt_index, obj_ele->element->page_addr,
760 obj_ele->element->object,
761 obj_ele->element->offset,
762 obj_ele->element->map,
763 &cache_ele, &trailer, &free_list, 0);
764 if(cache_ele != NULL) {
765 addr_ele = cache_ele;
766 } else {
767 addr_ele = new_addr_hash(tws, set, alt_index);
768 /* if cannot allocate just do without */
769 /* we'll get it next time around */
770 }
771 }
772
773
774
775 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
776 vm_object_reference(object);
777 vm_map_reference(map);
778 }
779
780 if(current_line == 0xFFFFFFFF) {
781 current_line = tws->current_line;
782 set = current_line/tws->number_of_lines;
783 current_line = current_line - (set * tws->number_of_lines);
784
785 #ifdef notdef
786 if(cache_full) {
787 tws->current_line = tws->number_of_lines - 1;
788 }
789 #endif
790 if(tws->cache[set][current_line].ele_count
791 >= tws->number_of_elements) {
792 current_line++;
793 tws->current_line++;
794 if(current_line == tws->number_of_lines) {
795 set++;
796 current_line = 0;
797 if (set == tws->expansion_count) {
798 if((tws->lookup_count <
799 (2 * tws->insert_count)) &&
800 (set<TWS_HASH_EXPANSION_MAX)) {
801 tws->lookup_count = 0;
802 tws->insert_count = 0;
803 if(tws->number_of_lines
804 < TWS_HASH_LINE_COUNT) {
805 tws->current_line--;
806 tws_unlock(tws);
807 return KERN_NO_SPACE;
808 }
809 /* object persistence is guaranteed by */
810 /* an elevated paging or object */
811 /* reference count in the caller. */
812 vm_object_unlock(object);
813 if((tws->table[set] = (tws_hash_ptr_t *)
814 kalloc(sizeof(tws_hash_ptr_t)
815 * tws->number_of_lines
816 * tws->number_of_elements))
817 == NULL) {
818 set = 0;
819 } else if((tws->table_ele[set] =
820 (tws_hash_ptr_t)
821 kalloc(sizeof(struct tws_hash_ptr)
822 * tws->number_of_lines
823 * tws->number_of_elements))
824 == NULL) {
825 kfree(tws->table[set],
826 sizeof(tws_hash_ptr_t)
827 * tws->number_of_lines
828 * tws->number_of_elements);
829 set = 0;
830 } else if((tws->alt_ele[set] =
831 (tws_hash_ptr_t)
832 kalloc(sizeof(struct tws_hash_ptr)
833 * tws->number_of_lines
834 * tws->number_of_elements))
835 == NULL) {
836 kfree(tws->table_ele[set],
837 sizeof(struct tws_hash_ptr)
838 * tws->number_of_lines
839 * tws->number_of_elements);
840 kfree(tws->table[set],
841 sizeof(tws_hash_ptr_t)
842 * tws->number_of_lines
843 * tws->number_of_elements);
844 tws->table[set] = NULL;
845 set = 0;
846
847 } else if((tws->cache[set] =
848 (struct tws_hash_line *)
849 kalloc(sizeof
850 (struct tws_hash_line)
851 * tws->number_of_lines))
852 == NULL) {
853 kfree(tws->alt_ele[set],
854 sizeof(struct tws_hash_ptr)
855 * tws->number_of_lines
856 * tws->number_of_elements);
857 kfree(tws->table_ele[set],
858 sizeof(struct tws_hash_ptr)
859 * tws->number_of_lines
860 * tws->number_of_elements);
861 kfree(tws->table[set],
862 sizeof(tws_hash_ptr_t)
863 * tws->number_of_lines
864 * tws->number_of_elements);
865 tws->table[set] = NULL;
866 set = 0;
867
868 } else {
869 tws->free_hash_ele[set] =
870 (tws_hash_ptr_t)0;
871 tws->obj_free_count[set] = 0;
872 tws->addr_free_count[set] = 0;
873 bzero((char *)tws->table[set],
874 sizeof(tws_hash_ptr_t)
875 * tws->number_of_lines
876 * tws->number_of_elements);
877 bzero((char *)tws->table_ele[set],
878 sizeof(struct tws_hash_ptr)
879 * tws->number_of_lines
880 * tws->number_of_elements);
881 bzero((char *)tws->alt_ele[set],
882 sizeof(struct tws_hash_ptr)
883 * tws->number_of_lines
884 * tws->number_of_elements);
885 bzero((char *)tws->cache[set],
886 sizeof(struct tws_hash_line)
887 * tws->number_of_lines);
888 }
889 vm_object_lock(object);
890 } else {
891 if (tws->startup_name != NULL) {
892 tws->current_line--;
893
894 age_of_cache = ((sched_tick - tws->time_of_creation) >> SCHED_TICK_SHIFT);
895
896 tws_unlock(tws);
897
898 if (age_of_cache > 45)
899 return KERN_OPERATION_TIMED_OUT;
900
901 return KERN_FAILURE;
902 }
903 tws->lookup_count = 0;
904 tws->insert_count = 0;
905 set = 0;
906 }
907 }
908 tws->current_line = set * tws->number_of_lines;
909 }
910 if(set < tws->expansion_count) {
911 tws_hash_line_clear(tws,
912 &(tws->cache[set][current_line]), object, TRUE);
913 if(tws->cache[set][current_line].ele_count
914 >= tws->number_of_elements) {
915 if(tws->style == TWS_HASH_STYLE_SIGNAL) {
916 vm_object_deallocate(object);
917 vm_map_deallocate(map);
918 }
919 tws_unlock(tws);
920 return KERN_FAILURE;
921 }
922 } else {
923 tws->expansion_count++;
924 }
925 }
926 }
927
928
929 /* set object hash element */
930 if(obj_ele == NULL) {
931 obj_ele = new_obj_hash(tws, set, index);
932 if(obj_ele == NULL) {
933 tws->cache[set][current_line].ele_count
934 = tws->number_of_elements;
935 tws_unlock(tws);
936 return KERN_FAILURE;
937 }
938 }
939
940 /* set address hash element */
941 if(addr_ele == NULL) {
942 addr_ele = new_addr_hash(tws, set, alt_index);
943 }
944
945 if(target_element == NULL) {
946 ele_index = 0;
947 for(i = 0; i<tws->number_of_elements; i++) {
948 if(tws->cache[set][current_line].
949 list[ele_index].object == 0) {
950 break;
951 }
952 ele_index++;
953 if(ele_index >= tws->number_of_elements)
954 ele_index = 0;
955
956 }
957
958 if(i == tws->number_of_elements)
959 panic("tws_insert: no free elements");
960
961 target_element =
962 &(tws->cache[set][current_line].list[ele_index]);
963
964 tws->cache[set][current_line].ele_count++;
965 }
966
967 obj_ele->element = target_element;
968 if(addr_ele) {
969 addr_ele->element = (tws_hash_ele_t)
970 (((unsigned int)target_element) | TWS_ADDR_HASH);
971 }
972 target_element->object = object;
973 target_element->offset = offset & TWS_HASH_OFF_MASK;
974 target_element->page_addr =
975 page_addr - (offset - (offset & TWS_HASH_OFF_MASK));
976 target_element->map = map;
977 target_element->line =
978 current_line + (set * tws->number_of_lines);
979
980 target_element->page_cache |= startup_cache_line;
981 target_element->page_cache |= 1<<(((vm_offset_t)(offset & TWS_INDEX_MASK))>>12);
982
983 tws_unlock(tws);
984
985 if (age_of_cache > 45)
986 return KERN_OPERATION_TIMED_OUT;
987
988 return KERN_SUCCESS;
989 }
990
991 /*
992 * tws_build_cluster
993 * lengthen the cluster of pages by the number of pages encountered in the
994 * working set up to the limit requested by the caller. The object needs
995 * to be locked on entry. The map does not because the tws_lookup function
996 * is used only to find if their is an entry in the cache. No transient
997 * data from the cache is de-referenced.
998 *
999 */
1000 #if MACH_PAGEMAP
1001 /*
1002 * MACH page map - an optional optimization where a bit map is maintained
1003 * by the VM subsystem for internal objects to indicate which pages of
1004 * the object currently reside on backing store. This existence map
1005 * duplicates information maintained by the vnode pager. It is
1006 * created at the time of the first pageout against the object, i.e.
1007 * at the same time pager for the object is created. The optimization
1008 * is designed to eliminate pager interaction overhead, if it is
1009 * 'known' that the page does not exist on backing store.
1010 *
1011 * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is
1012 * either marked as paged out in the existence map for the object or no
1013 * existence map exists for the object. LOOK_FOR() is one of the
1014 * criteria in the decision to invoke the pager. It is also used as one
1015 * of the criteria to terminate the scan for adjacent pages in a clustered
1016 * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for
1017 * permanent objects. Note also that if the pager for an internal object
1018 * has not been created, the pager is not invoked regardless of the value
1019 * of LOOK_FOR() and that clustered pagein scans are only done on an object
1020 * for which a pager has been created.
1021 *
1022 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1023 * is marked as paged out in the existence map for the object. PAGED_OUT()
1024 * PAGED_OUT() is used to determine if a page has already been pushed
1025 * into a copy object in order to avoid a redundant page out operation.
1026 */
1027 #define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
1028 != VM_EXTERNAL_STATE_ABSENT)
1029 #define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
1030 == VM_EXTERNAL_STATE_EXISTS)
1031 #else /* MACH_PAGEMAP */
1032 /*
1033 * If the MACH page map optimization is not enabled,
1034 * LOOK_FOR() always evaluates to TRUE. The pager will always be
1035 * invoked to resolve missing pages in an object, assuming the pager
1036 * has been created for the object. In a clustered page operation, the
1037 * absence of a page on backing backing store cannot be used to terminate
1038 * a scan for adjacent pages since that information is available only in
1039 * the pager. Hence pages that may not be paged out are potentially
1040 * included in a clustered request. The vnode pager is coded to deal
1041 * with any combination of absent/present pages in a clustered
1042 * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager
1043 * will always be invoked to push a dirty page into a copy object assuming
1044 * a pager has been created. If the page has already been pushed, the
1045 * pager will ingore the new request.
1046 */
1047 #define LOOK_FOR(o, f) TRUE
1048 #define PAGED_OUT(o, f) FALSE
1049 #endif /* MACH_PAGEMAP */
1050
1051
1052 void
1053 tws_build_cluster(
1054 tws_hash_t tws,
1055 vm_object_t object,
1056 vm_object_offset_t *start,
1057 vm_object_offset_t *end,
1058 vm_size_t max_length)
1059 {
1060 tws_hash_line_t line;
1061 vm_object_offset_t before = *start;
1062 vm_object_offset_t after = *end;
1063 vm_object_offset_t original_start = *start;
1064 vm_object_offset_t original_end = *end;
1065 vm_size_t length = (vm_size_t)(*end - *start);
1066 vm_page_t m;
1067 kern_return_t kret;
1068 vm_object_offset_t object_size;
1069 int age_of_cache;
1070 vm_size_t pre_heat_size;
1071 unsigned int ele_cache;
1072 unsigned int end_cache = 0;
1073 unsigned int start_cache = 0;
1074 unsigned int memory_scarce = 0;
1075
1076 if((object->private) || !(object->pager))
1077 return;
1078
1079 if (!object->internal) {
1080 kret = vnode_pager_get_object_size(
1081 object->pager,
1082 &object_size);
1083 } else {
1084 object_size = object->size;
1085 }
1086
1087 if((!tws) || (!tws_lock_try(tws))) {
1088 return;
1089 }
1090 age_of_cache = ((sched_tick
1091 - tws->time_of_creation) >> SCHED_TICK_SHIFT);
1092
1093 if (vm_page_free_count < (2 * vm_page_free_target))
1094 memory_scarce = 1;
1095
1096 /* When pre-heat files are not available, resort to speculation */
1097 /* based on size of file */
1098
1099 if (tws->startup_cache || object->internal || age_of_cache > 45) {
1100 pre_heat_size = 0;
1101 } else {
1102 if (object_size > (vm_object_offset_t)(1024 * 1024))
1103 pre_heat_size = 8 * PAGE_SIZE;
1104 else if (object_size > (vm_object_offset_t)(128 * 1024))
1105 pre_heat_size = 4 * PAGE_SIZE;
1106 else
1107 pre_heat_size = 2 * PAGE_SIZE;
1108 }
1109
1110 if (tws->startup_cache) {
1111 int target_page_count;
1112
1113 if (memory_scarce)
1114 target_page_count = 16;
1115 else
1116 target_page_count = 4;
1117
1118 if (tws_test_for_community(tws, object, *start, target_page_count, &ele_cache))
1119 {
1120 start_cache = ele_cache;
1121 *start = *start & TWS_HASH_OFF_MASK;
1122 *end = *start + (32 * PAGE_SIZE_64);
1123
1124 if (*end > object_size) {
1125 *end = round_page_64(object_size);
1126 max_length = 0;
1127 } else
1128 end_cache = ele_cache;
1129
1130 while (max_length > ((*end - *start) + (32 * PAGE_SIZE))) {
1131 int expanded;
1132
1133 expanded = 0;
1134 after = *end;
1135
1136 if ((after + (32 * PAGE_SIZE_64)) <= object_size &&
1137 (tws_test_for_community(tws, object, after, 8, &ele_cache))) {
1138
1139 *end = after + (32 * PAGE_SIZE_64);
1140 end_cache = ele_cache;
1141 expanded = 1;
1142 }
1143 if (max_length < ((*end - *start) + (32 * PAGE_SIZE_64))) {
1144 break;
1145 }
1146 if (*start) {
1147 before = (*start - PAGE_SIZE_64) & TWS_HASH_OFF_MASK;
1148
1149 if (tws_test_for_community(tws, object, before, 8, &ele_cache)) {
1150
1151 *start = before;
1152 start_cache = ele_cache;
1153 expanded = 1;
1154 }
1155 }
1156 if (expanded == 0)
1157 break;
1158 }
1159 if (end_cache)
1160 *end -= PAGE_SIZE_64;
1161
1162 if (start_cache != 0) {
1163 unsigned int mask;
1164
1165 for (mask = 1; mask != 0; mask = mask << 1) {
1166 if (*start == original_start)
1167 break;
1168 if (!(start_cache & mask))
1169 *start += PAGE_SIZE_64;
1170 else
1171 break;
1172 }
1173 }
1174 if (end_cache != 0) {
1175 unsigned int mask;
1176
1177 for (mask = 0x80000000;
1178 mask != 0; mask = mask >> 1) {
1179 if (*end == original_end)
1180 break;
1181 if (!(end_cache & mask))
1182 *end -= PAGE_SIZE_64;
1183 else
1184 break;
1185 }
1186 }
1187 tws_unlock(tws);
1188
1189 if (*end < original_end)
1190 *end = original_end;
1191 return;
1192 }
1193 }
1194
1195 while ((length < max_length) &&
1196 (object_size >=
1197 (after + PAGE_SIZE_64))) {
1198 if(length >= pre_heat_size) {
1199 if(tws_internal_lookup(tws, after, object,
1200 &line) != KERN_SUCCESS) {
1201 vm_object_offset_t extend;
1202
1203 extend = after + PAGE_SIZE_64;
1204 if(tws_internal_lookup(tws, extend, object,
1205 &line) != KERN_SUCCESS) {
1206 break;
1207 }
1208 }
1209 }
1210
1211 if ((object->existence_map != NULL)
1212 && (!LOOK_FOR(object, after))) {
1213 break;
1214 }
1215
1216 if (vm_page_lookup(object, after) != VM_PAGE_NULL) {
1217 /*
1218 * don't bridge resident pages
1219 */
1220 break;
1221 }
1222
1223 if (object->internal) {
1224 /*
1225 * need to acquire a real page in
1226 * advance because this acts as
1227 * a throttling mechanism for
1228 * data_requests to the default
1229 * pager. If this fails, give up
1230 * trying to find any more pages
1231 * in the cluster and send off the
1232 * request for what we already have.
1233 */
1234 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1235 break;
1236 }
1237 } else if ((m = vm_page_grab_fictitious())
1238 == VM_PAGE_NULL) {
1239 break;
1240 }
1241 m->absent = TRUE;
1242 m->unusual = TRUE;
1243 m->clustered = TRUE;
1244 m->list_req_pending = TRUE;
1245
1246 vm_page_insert(m, object, after);
1247 object->absent_count++;
1248 after += PAGE_SIZE_64;
1249 length += PAGE_SIZE;
1250 }
1251 *end = after;
1252 while (length < max_length) {
1253 if (before == 0)
1254 break;
1255 before -= PAGE_SIZE_64;
1256
1257 if(length >= pre_heat_size) {
1258 if(tws_internal_lookup(tws, before, object,
1259 &line) != KERN_SUCCESS) {
1260 vm_object_offset_t extend;
1261
1262 extend = before;
1263 if (extend == 0)
1264 break;
1265 extend -= PAGE_SIZE_64;
1266 if(tws_internal_lookup(tws, extend, object,
1267 &line) != KERN_SUCCESS) {
1268 break;
1269 }
1270 }
1271 }
1272 if ((object->existence_map != NULL)
1273 && (!LOOK_FOR(object, before))) {
1274 break;
1275 }
1276
1277 if (vm_page_lookup(object, before) != VM_PAGE_NULL) {
1278 /*
1279 * don't bridge resident pages
1280 */
1281 break;
1282 }
1283
1284 if (object->internal) {
1285 /*
1286 * need to acquire a real page in
1287 * advance because this acts as
1288 * a throttling mechanism for
1289 * data_requests to the default
1290 * pager. If this fails, give up
1291 * trying to find any more pages
1292 * in the cluster and send off the
1293 * request for what we already have.
1294 */
1295 if ((m = vm_page_grab()) == VM_PAGE_NULL) {
1296 break;
1297 }
1298 } else if ((m = vm_page_grab_fictitious())
1299 == VM_PAGE_NULL) {
1300 break;
1301 }
1302 m->absent = TRUE;
1303 m->unusual = TRUE;
1304 m->clustered = TRUE;
1305 m->list_req_pending = TRUE;
1306
1307 vm_page_insert(m, object, before);
1308 object->absent_count++;
1309 *start -= PAGE_SIZE_64;
1310 length += PAGE_SIZE;
1311 }
1312 tws_unlock(tws);
1313 }
1314
1315 void
1316 tws_line_signal(
1317 tws_hash_t tws,
1318 vm_map_t map,
1319 tws_hash_line_t hash_line,
1320 vm_offset_t target_page)
1321 {
1322 unsigned int i,j;
1323 vm_object_t object;
1324 vm_object_offset_t offset;
1325 vm_object_offset_t before;
1326 vm_object_offset_t after;
1327 struct tws_hash_ele *element;
1328 vm_page_t m,p;
1329 kern_return_t rc;
1330
1331 if(tws->style != TWS_HASH_STYLE_SIGNAL)
1332 return;
1333
1334 vm_map_lock(map);
1335 for (i=0; i<tws->number_of_elements; i++) {
1336
1337 vm_object_offset_t local_off = 0;
1338
1339 if(hash_line->list[i].object == 0)
1340 continue;
1341
1342 element = &hash_line->list[i];
1343
1344 if (element->page_addr == target_page)
1345 continue;
1346
1347 j = 1;
1348 while (j != 0) {
1349 if(j & element->page_cache)
1350 break;
1351 j <<= 1;
1352 local_off += PAGE_SIZE_64;
1353 }
1354 object = element->object;
1355 offset = element->offset + local_off;
1356
1357 /* first try a fast test to speed up no-op signal */
1358 if (((p = vm_page_lookup(object, offset)) != NULL)
1359 || (object->pager == NULL)
1360 || (object->shadow_severed)) {
1361 continue;
1362 }
1363
1364 if((!object->alive) ||
1365 (!object->pager_created) || (!object->pager_ready))
1366 continue;
1367
1368 if (object->internal) {
1369 if (object->existence_map == NULL) {
1370 if (object->shadow)
1371 continue;
1372 } else {
1373 if(!LOOK_FOR(object, offset))
1374 continue;
1375 }
1376 }
1377
1378 vm_object_reference(object);
1379 vm_map_unlock(map);
1380
1381 if(object->internal) {
1382 m = vm_page_grab();
1383 } else {
1384 m = vm_page_grab_fictitious();
1385 }
1386
1387 if(m == NULL) {
1388 vm_object_deallocate(object);
1389 vm_map_lock(map);
1390 continue;
1391 }
1392
1393 vm_object_lock(object);
1394 if (((p = vm_page_lookup(object, offset)) != NULL)
1395 || (object->pager == NULL)
1396 || (object->shadow_severed)) {
1397 VM_PAGE_FREE(m);
1398 vm_object_unlock(object);
1399 vm_object_deallocate(object);
1400 vm_map_lock(map);
1401 continue;
1402 }
1403
1404 vm_page_insert(m, object, offset);
1405
1406 if (object->absent_count > vm_object_absent_max) {
1407 VM_PAGE_FREE(m);
1408 vm_object_unlock(object);
1409 vm_object_deallocate(object);
1410 vm_map_lock(map);
1411 break;
1412 }
1413 m->list_req_pending = TRUE;
1414 m->absent = TRUE;
1415 m->unusual = TRUE;
1416 object->absent_count++;
1417
1418 before = offset;
1419 after = offset + PAGE_SIZE_64;
1420 tws_build_cluster(tws, object, &before, &after, 0x16000);
1421 vm_object_unlock(object);
1422
1423 rc = memory_object_data_request(object->pager,
1424 before + object->paging_offset,
1425 (vm_size_t)(after - before), VM_PROT_READ);
1426 if (rc != KERN_SUCCESS) {
1427 offset = before;
1428 vm_object_lock(object);
1429 while (offset < after) {
1430 m = vm_page_lookup(object, offset);
1431 if(m && m->absent && m->busy)
1432 VM_PAGE_FREE(m);
1433 offset += PAGE_SIZE;
1434 }
1435 vm_object_unlock(object);
1436 vm_object_deallocate(object);
1437 } else {
1438 vm_object_deallocate(object);
1439 }
1440 vm_map_lock(map);
1441 continue;
1442 }
1443 vm_map_unlock(map);
1444 }
1445
1446 /* tws locked on entry */
1447
1448 tws_startup_t
1449 tws_create_startup_list(
1450 tws_hash_t tws)
1451 {
1452
1453 tws_startup_t startup;
1454 unsigned int i,j,k;
1455 unsigned int total_elements;
1456 unsigned int startup_size;
1457 unsigned int sindex;
1458 unsigned int hash_index;
1459 tws_startup_ptr_t element;
1460
1461 total_elements = tws->expansion_count *
1462 (tws->number_of_lines * tws->number_of_elements);
1463
1464 startup_size = sizeof(struct tws_startup)
1465 + (total_elements * sizeof(tws_startup_ptr_t *))
1466 + (total_elements * sizeof(struct tws_startup_ptr))
1467 + (total_elements * sizeof(struct tws_startup_ele));
1468 startup = (tws_startup_t)(kalloc(startup_size));
1469
1470 if(startup == NULL)
1471 return startup;
1472
1473 bzero((char *) startup, startup_size);
1474
1475 startup->table = (tws_startup_ptr_t *)
1476 (((int)startup) + (sizeof(struct tws_startup)));
1477 startup->ele = (struct tws_startup_ptr *)
1478 (((vm_offset_t)startup->table) +
1479 (total_elements * sizeof(tws_startup_ptr_t)));
1480
1481 startup->array = (struct tws_startup_ele *)
1482 (((vm_offset_t)startup->ele) +
1483 (total_elements * sizeof(struct tws_startup_ptr)));
1484
1485 startup->tws_hash_size = startup_size;
1486 startup->ele_count = 0; /* burn first hash ele, else we can't tell from zero */
1487 startup->array_size = total_elements;
1488 startup->hash_count = 1;
1489
1490 sindex = 0;
1491
1492
1493 for(i = 0; i<tws->number_of_lines; i++) {
1494 for(j = 0; j<tws->number_of_elements; j++) {
1495 for(k = 0; k<tws->expansion_count; k++) {
1496 tws_hash_ele_t entry;
1497 unsigned int hash_retry;
1498 vm_offset_t addr;
1499
1500 entry = &tws->cache[k][i].list[j];
1501 addr = entry->page_addr;
1502 hash_retry = 0;
1503 if(entry->object != 0) {
1504 /* get a hash element */
1505 hash_index = do_startup_hash(addr,
1506 startup->array_size);
1507
1508 if(startup->hash_count < total_elements) {
1509 element = &(startup->ele[startup->hash_count]);
1510 startup->hash_count += 1;
1511 } else {
1512 /* exit we're out of elements */
1513 break;
1514 }
1515 /* place the hash element */
1516 element->next = startup->table[hash_index];
1517 startup->table[hash_index] = (tws_startup_ptr_t)
1518 ((int)element - (int)&startup->ele[0]);
1519
1520 /* set entry OFFSET in hash element */
1521 element->element = (tws_startup_ele_t)
1522 ((int)&startup->array[sindex] -
1523 (int)&startup->array[0]);
1524
1525 startup->array[sindex].page_addr = entry->page_addr;
1526 startup->array[sindex].page_cache = entry->page_cache;
1527 startup->ele_count++;
1528 sindex++;
1529
1530 }
1531 }
1532 }
1533 }
1534
1535 return startup;
1536 }
1537
1538
1539 /*
1540 * Returns an entire cache line. The line is deleted from the startup
1541 * cache on return. The caller can check startup->ele_count for an empty
1542 * list. Access synchronization is the responsibility of the caller.
1543 */
1544
1545 unsigned int
1546 tws_startup_list_lookup(
1547 tws_startup_t startup,
1548 vm_offset_t addr)
1549 {
1550 unsigned int hash_index;
1551 unsigned int page_cache_bits;
1552 unsigned int startup_shift;
1553 tws_startup_ele_t entry;
1554 vm_offset_t next_addr;
1555 tws_startup_ptr_t element;
1556 tws_startup_ptr_t base_ele;
1557 tws_startup_ptr_t *previous_ptr;
1558
1559 page_cache_bits = 0;
1560
1561 hash_index = do_startup_hash(addr, startup->array_size);
1562
1563 if(((unsigned int)&(startup->table[hash_index])) >= ((unsigned int)startup + startup->tws_hash_size)) {
1564 return page_cache_bits = 0;
1565 }
1566 element = (tws_startup_ptr_t)((int)startup->table[hash_index] +
1567 (int)&startup->ele[0]);
1568 base_ele = element;
1569 previous_ptr = &(startup->table[hash_index]);
1570 while(element > &startup->ele[0]) {
1571 if (((int)element + sizeof(struct tws_startup_ptr))
1572 > ((int)startup + startup->tws_hash_size)) {
1573 return page_cache_bits;
1574 }
1575 entry = (tws_startup_ele_t)
1576 ((int)element->element
1577 + (int)&startup->array[0]);
1578 if((((int)entry + sizeof(struct tws_startup_ele))
1579 > ((int)startup + startup->tws_hash_size))
1580 || ((int)entry < (int)startup)) {
1581 return page_cache_bits;
1582 }
1583 if ((addr >= entry->page_addr) &&
1584 (addr <= (entry->page_addr + 0x1F000))) {
1585 startup_shift = (addr - entry->page_addr)>>12;
1586 page_cache_bits |= entry->page_cache >> startup_shift;
1587 /* don't dump the pages, unless the addresses */
1588 /* line up perfectly. The cache may be used */
1589 /* by other mappings */
1590 entry->page_cache &= (1 << startup_shift) - 1;
1591 if(addr == entry->page_addr) {
1592 if(base_ele == element) {
1593 base_ele = (tws_startup_ptr_t)
1594 ((int)element->next
1595 + (int)&startup->ele[0]);
1596 startup->table[hash_index] = element->next;
1597 element = base_ele;
1598 } else {
1599 *previous_ptr = element->next;
1600 element = (tws_startup_ptr_t)
1601 ((int)*previous_ptr
1602 + (int)&startup->ele[0]);
1603 }
1604 entry->page_addr = 0;
1605 startup->ele_count--;
1606 continue;
1607 }
1608 }
1609 next_addr = addr + 0x1F000;
1610 if ((next_addr >= entry->page_addr) &&
1611 (next_addr <= (entry->page_addr + 0x1F000))) {
1612 startup_shift = (next_addr - entry->page_addr)>>12;
1613 page_cache_bits |= entry->page_cache << (0x1F - startup_shift);
1614 entry->page_cache &= ~((1 << (startup_shift + 1)) - 1);
1615 if(entry->page_cache == 0) {
1616 if(base_ele == element) {
1617 base_ele = (tws_startup_ptr_t)
1618 ((int)element->next
1619 + (int)&startup->ele[0]);
1620 startup->table[hash_index] = element->next;
1621 element = base_ele;
1622 } else {
1623 *previous_ptr = element->next;
1624 element = (tws_startup_ptr_t)
1625 ((int)*previous_ptr
1626 + (int)&startup->ele[0]);
1627 }
1628 entry->page_addr = 0;
1629 startup->ele_count--;
1630 continue;
1631 }
1632 }
1633 previous_ptr = &(element->next);
1634 element = (tws_startup_ptr_t)
1635 ((int) element->next + (int) &startup->ele[0]);
1636 }
1637
1638 return page_cache_bits;
1639 }
1640
1641 kern_return_t
1642 tws_send_startup_info(
1643 task_t task)
1644 {
1645
1646 tws_hash_t tws;
1647
1648 task_lock(task);
1649 tws = task->dynamic_working_set;
1650 task_unlock(task);
1651 if(tws == NULL) {
1652 return KERN_FAILURE;
1653 }
1654 return tws_internal_startup_send(tws);
1655 }
1656
1657
1658 kern_return_t
1659 tws_internal_startup_send(
1660 tws_hash_t tws)
1661 {
1662
1663 tws_startup_t scache;
1664
1665 if(tws == NULL) {
1666 return KERN_FAILURE;
1667 }
1668 tws_lock(tws);
1669 /* used to signal write or release depending on state of tws */
1670 if(tws->startup_cache) {
1671 vm_offset_t startup_buf;
1672 vm_size_t size;
1673 startup_buf = (vm_offset_t)tws->startup_cache;
1674 size = tws->startup_cache->tws_hash_size;
1675 tws->startup_cache = 0;
1676 tws_unlock(tws);
1677 kmem_free(kernel_map, startup_buf, size);
1678 return KERN_SUCCESS;
1679 }
1680 if(tws->startup_name == NULL) {
1681 tws_unlock(tws);
1682 return KERN_FAILURE;
1683 }
1684 scache = tws_create_startup_list(tws);
1685 if(scache == NULL)
1686 return KERN_FAILURE;
1687 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1688 (caddr_t) scache, scache->tws_hash_size,
1689 tws->mod, tws->fid);
1690 kfree(scache, scache->tws_hash_size);
1691 kfree(tws->startup_name, tws->startup_name_length);
1692 tws->startup_name = NULL;
1693 tws_unlock(tws);
1694 return KERN_SUCCESS;
1695 }
1696
1697 kern_return_t
1698 tws_handle_startup_file(
1699 task_t task,
1700 unsigned int uid,
1701 char *app_name,
1702 void *app_vp,
1703 boolean_t *new_info)
1704
1705 {
1706 tws_startup_t startup;
1707 vm_offset_t cache_size;
1708 kern_return_t error;
1709 int fid;
1710 int mod;
1711
1712 *new_info = FALSE;
1713 /* don't pre-heat kernel task */
1714 if(task == kernel_task)
1715 return KERN_SUCCESS;
1716 error = bsd_read_page_cache_file(uid, &fid,
1717 &mod, app_name,
1718 app_vp,
1719 (vm_offset_t *) &startup,
1720 &cache_size);
1721 if(error) {
1722 return KERN_FAILURE;
1723 }
1724 if(startup == NULL) {
1725 /* Entry for app does not exist, make */
1726 /* one */
1727 /* we will want our own copy of the shared */
1728 /* regions to pick up a true picture of all */
1729 /* the pages we will touch. */
1730 if((lsf_zone->count * lsf_zone->elem_size)
1731 > (lsf_zone->max_size >> 1)) {
1732 /* We don't want to run out of shared memory */
1733 /* map entries by starting too many private versions */
1734 /* of the shared library structures */
1735 return KERN_SUCCESS;
1736 }
1737 *new_info = TRUE;
1738
1739 error = tws_write_startup_file(task,
1740 fid, mod, app_name, uid);
1741 if(error)
1742 return error;
1743
1744 } else {
1745 error = tws_read_startup_file(task,
1746 (tws_startup_t)startup,
1747 cache_size);
1748 if(error) {
1749 kmem_free(kernel_map,
1750 (vm_offset_t)startup, cache_size);
1751 return error;
1752 }
1753 }
1754 return KERN_SUCCESS;
1755 }
1756
1757 kern_return_t
1758 tws_write_startup_file(
1759 task_t task,
1760 int fid,
1761 int mod,
1762 char *name,
1763 unsigned int uid)
1764 {
1765 tws_hash_t tws;
1766 unsigned int string_length;
1767
1768 string_length = strlen(name);
1769
1770 restart:
1771 task_lock(task);
1772 tws = task->dynamic_working_set;
1773 task_unlock(task);
1774
1775 if(tws == NULL) {
1776 kern_return_t error;
1777
1778 /* create a dynamic working set of normal size */
1779 if((error = task_working_set_create(task, 0, 0, TWS_HASH_STYLE_DEFAULT)) != KERN_SUCCESS)
1780 return error;
1781 /* we need to reset tws and relock */
1782 goto restart;
1783 }
1784 tws_lock(tws);
1785
1786 if(tws->startup_name != NULL) {
1787 tws_unlock(tws);
1788 return KERN_FAILURE;
1789 }
1790
1791 tws->startup_name = (char *)
1792 kalloc((string_length + 1) * (sizeof(char)));
1793 if(tws->startup_name == NULL) {
1794 tws_unlock(tws);
1795 return KERN_FAILURE;
1796 }
1797
1798 bcopy(name, (char *)tws->startup_name, string_length + 1);
1799 tws->startup_name_length = (string_length + 1) * sizeof(char);
1800 tws->uid = uid;
1801 tws->fid = fid;
1802 tws->mod = mod;
1803
1804 tws_unlock(tws);
1805 return KERN_SUCCESS;
1806 }
1807
1808 unsigned long tws_read_startup_file_rejects = 0;
1809
1810 kern_return_t
1811 tws_read_startup_file(
1812 task_t task,
1813 tws_startup_t startup,
1814 vm_offset_t cache_size)
1815 {
1816 tws_hash_t tws;
1817 int lines;
1818 int old_exp_count;
1819 unsigned int ele_count;
1820
1821 restart:
1822 task_lock(task);
1823 tws = task->dynamic_working_set;
1824
1825 /* create a dynamic working set to match file size */
1826
1827 /* start with total size of the data we got from app_profile */
1828 ele_count = cache_size;
1829 /* skip the startup header */
1830 ele_count -= sizeof(struct tws_startup);
1831 /*
1832 * For each startup cache entry, we have one of these:
1833 * tws_startup_ptr_t startup->table[];
1834 * struct tws_startup_ptr startup->ele[];
1835 * struct tws_startup_ele startup->array[];
1836 */
1837 ele_count /= (sizeof (tws_startup_ptr_t) +
1838 sizeof (struct tws_startup_ptr) +
1839 sizeof (struct tws_startup_ele));
1840
1841 /*
1842 * Sanity check: make sure the value for startup->array_size
1843 * that we read from the app_profile file matches the size
1844 * of the data we read from disk. If it doesn't match, we
1845 * can't trust the data and we just drop it all.
1846 */
1847 if (cache_size < sizeof(struct tws_startup) ||
1848 startup->array_size != ele_count) {
1849 tws_read_startup_file_rejects++;
1850 task_unlock(task);
1851 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1852 return(KERN_SUCCESS);
1853 }
1854
1855 /*
1856 * We'll create the task working set with the default row size
1857 * (TWS_ARRAY_SIZE), so this will give us the number of lines
1858 * we need to store all the data from the app_profile startup
1859 * cache.
1860 */
1861 lines = ele_count / TWS_ARRAY_SIZE;
1862
1863 if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
1864 lines = TWS_SMALL_HASH_LINE_COUNT;
1865 task_unlock(task);
1866 kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
1867 return(KERN_SUCCESS);
1868 } else {
1869 old_exp_count = lines/TWS_HASH_LINE_COUNT;
1870 if((old_exp_count * TWS_HASH_LINE_COUNT) != lines) {
1871 lines = (old_exp_count + 1)
1872 * TWS_HASH_LINE_COUNT;
1873 }
1874 if(tws == NULL) {
1875 kern_return_t error;
1876
1877 task_unlock(task);
1878 if ((error = task_working_set_create(task, lines, 0, TWS_HASH_STYLE_DEFAULT)) != KERN_SUCCESS)
1879 return error;
1880 /* we need to reset tws and relock */
1881 goto restart;
1882 } else {
1883 task_unlock(task);
1884 tws_expand_working_set(
1885 (void *)tws, lines, TRUE);
1886 }
1887 }
1888
1889 tws_lock(tws);
1890
1891 if(tws->startup_cache != NULL) {
1892 tws_unlock(tws);
1893 return KERN_FAILURE;
1894 }
1895
1896
1897 /* now need to fix up internal table pointers */
1898 startup->table = (tws_startup_ptr_t *)
1899 (((int)startup) + (sizeof(struct tws_startup)));
1900 startup->ele = (struct tws_startup_ptr *)
1901 (((vm_offset_t)startup->table) +
1902 (startup->array_size * sizeof(tws_startup_ptr_t)));
1903 startup->array = (struct tws_startup_ele *)
1904 (((vm_offset_t)startup->ele) +
1905 (startup->array_size * sizeof(struct tws_startup_ptr)));
1906 /* the allocation size and file size should be the same */
1907 /* just in case their not, make sure we dealloc correctly */
1908 startup->tws_hash_size = cache_size;
1909
1910 tws->startup_cache = startup;
1911 tws_unlock(tws);
1912 return KERN_SUCCESS;
1913 }
1914
1915
1916 void
1917 tws_hash_ws_flush(tws_hash_t tws) {
1918 tws_startup_t scache;
1919 if(tws == NULL) {
1920 return;
1921 }
1922 tws_lock(tws);
1923 if(tws->startup_name != NULL) {
1924 scache = tws_create_startup_list(tws);
1925 if(scache == NULL) {
1926 /* dump the name cache, we'll */
1927 /* get it next time */
1928 kfree(tws->startup_name, tws->startup_name_length);
1929 tws->startup_name = NULL;
1930 tws_unlock(tws);
1931 return;
1932 }
1933 bsd_write_page_cache_file(tws->uid, tws->startup_name,
1934 (caddr_t) scache, scache->tws_hash_size,
1935 tws->mod, tws->fid);
1936 kfree(scache, scache->tws_hash_size);
1937 kfree(tws->startup_name, tws->startup_name_length);
1938 tws->startup_name = NULL;
1939 }
1940 tws_unlock(tws);
1941 return;
1942 }
1943
1944 void
1945 tws_hash_destroy(tws_hash_t tws)
1946 {
1947 unsigned int i,k;
1948
1949 if(tws->startup_cache != NULL) {
1950 kmem_free(kernel_map,
1951 (vm_offset_t)tws->startup_cache,
1952 tws->startup_cache->tws_hash_size);
1953 tws->startup_cache = NULL;
1954 }
1955 if(tws->startup_name != NULL) {
1956 tws_internal_startup_send(tws);
1957 }
1958 for (i=0; i<tws->number_of_lines; i++) {
1959 for(k=0; k<tws->expansion_count; k++) {
1960 /* clear the object refs */
1961 tws_hash_line_clear(tws, &(tws->cache[k][i]), NULL, FALSE);
1962 }
1963 }
1964 i = 0;
1965 while (i < tws->expansion_count) {
1966
1967 kfree(tws->table[i],
1968 sizeof(tws_hash_ptr_t)
1969 * tws->number_of_lines
1970 * tws->number_of_elements);
1971 kfree(tws->table_ele[i],
1972 sizeof(struct tws_hash_ptr)
1973 * tws->number_of_lines
1974 * tws->number_of_elements);
1975 kfree(tws->alt_ele[i],
1976 sizeof(struct tws_hash_ptr)
1977 * tws->number_of_lines
1978 * tws->number_of_elements);
1979 kfree(tws->cache[i],
1980 sizeof(struct tws_hash_line) * tws->number_of_lines);
1981 i++;
1982 }
1983 if(tws->startup_name != NULL) {
1984 kfree(tws->startup_name, tws->startup_name_length);
1985 }
1986 kfree(tws, sizeof(struct tws_hash));
1987 }
1988
1989 void
1990 tws_hash_clear(tws_hash_t tws)
1991 {
1992 unsigned int i, k;
1993
1994 for (i=0; i<tws->number_of_lines; i++) {
1995 for(k=0; k<tws->expansion_count; k++) {
1996 /* clear the object refs */
1997 tws_hash_line_clear(tws, &(tws->cache[k][i]), NULL, FALSE);
1998 }
1999 }
2000 }
2001
2002 kern_return_t
2003 task_working_set_create(
2004 task_t task,
2005 unsigned int lines,
2006 unsigned int rows,
2007 unsigned int style)
2008 {
2009
2010 if (lines == 0) {
2011 lines = TWS_HASH_LINE_COUNT;
2012 }
2013 if (rows == 0) {
2014 rows = TWS_ARRAY_SIZE;
2015 }
2016 if (style == TWS_HASH_STYLE_DEFAULT) {
2017 style = TWS_HASH_STYLE_BASIC;
2018 }
2019 task_lock(task);
2020 if(task->dynamic_working_set != 0) {
2021 task_unlock(task);
2022 return(KERN_FAILURE);
2023 } else if((task->dynamic_working_set =
2024 tws_hash_create(lines, rows, style)) == 0) {
2025 task_unlock(task);
2026 return(KERN_NO_SPACE);
2027 }
2028 task_unlock(task);
2029 return KERN_SUCCESS;
2030 }
2031
2032
2033 /* Internal use only routines */
2034
2035
2036 /*
2037 * internal sub-function for address space lookup
2038 * returns the target element and the address of the
2039 * previous pointer The previous pointer is the address
2040 * of the pointer pointing to the target element.
2041 * TWS must be locked
2042 */
2043
2044 void
2045 tws_traverse_address_hash_list (
2046 tws_hash_t tws,
2047 unsigned int index,
2048 vm_offset_t page_addr,
2049 vm_object_t object,
2050 vm_object_offset_t offset,
2051 vm_map_t map,
2052 tws_hash_ptr_t *target_ele,
2053 tws_hash_ptr_t **previous_ptr,
2054 tws_hash_ptr_t **free_list,
2055 unsigned int exclusive_addr)
2056 {
2057 unsigned int k;
2058 tws_hash_ptr_t cache_ele;
2059 tws_hash_ptr_t base_ele;
2060
2061 *target_ele = NULL;
2062 *previous_ptr = NULL;
2063
2064 for(k=0; k<tws->expansion_count; k++) {
2065 tws_hash_ele_t ele;
2066 cache_ele = tws->table[k][index];
2067 base_ele = cache_ele;
2068 *previous_ptr = (tws_hash_ptr_t *)&(tws->table[k][index]);
2069 while(cache_ele != NULL) {
2070 if(((unsigned int)
2071 cache_ele->element & TWS_ADDR_HASH) == 0) {
2072 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2073 cache_ele = cache_ele->next;
2074 continue;
2075 }
2076 ele = (tws_hash_ele_t)((unsigned int)
2077 cache_ele->element & ~TWS_ADDR_HASH);
2078 if ((ele == 0) || (ele->object == 0)) {
2079 /* A little clean-up of empty elements */
2080 cache_ele->element = 0;
2081 if(base_ele == cache_ele) {
2082 base_ele = cache_ele->next;
2083 tws->table[k][index] = cache_ele->next;
2084 cache_ele->next = tws->free_hash_ele[k];
2085 tws->free_hash_ele[k] = cache_ele;
2086 cache_ele = base_ele;
2087 } else {
2088 **previous_ptr = cache_ele->next;
2089 cache_ele->next = tws->free_hash_ele[k];
2090 tws->free_hash_ele[k] = cache_ele;
2091 cache_ele = **previous_ptr;
2092 }
2093 continue;
2094 }
2095
2096 if ((ele->page_addr <= page_addr)
2097 && (page_addr <= (ele->page_addr +
2098 (vm_offset_t)TWS_INDEX_MASK))
2099 && ((object == NULL)
2100 || ((object == ele->object)
2101 && (offset == ele->offset)
2102 && (map == ele->map)))) {
2103 if(exclusive_addr) {
2104 int delta;
2105 delta = ((page_addr - ele->page_addr)
2106 >> 12);
2107 if((1 << delta) & ele->page_cache) {
2108 /* We've found a match */
2109 *target_ele = cache_ele;
2110 *free_list =
2111 (tws_hash_ptr_t *)
2112 &(tws->free_hash_ele[k]);
2113 return;
2114 }
2115 } else {
2116 /* We've found a match */
2117 *target_ele = cache_ele;
2118 *free_list = (tws_hash_ptr_t *)
2119 &(tws->free_hash_ele[k]);
2120 return;
2121 }
2122 }
2123 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2124 cache_ele = cache_ele->next;
2125 }
2126 }
2127 }
2128
2129
2130 /*
2131 * internal sub-function for object space lookup
2132 * returns the target element and the address of the
2133 * previous pointer The previous pointer is the address
2134 * of the pointer pointing to the target element.
2135 * TWS must be locked
2136 */
2137
2138
2139 void
2140 tws_traverse_object_hash_list (
2141 tws_hash_t tws,
2142 unsigned int index,
2143 vm_object_t object,
2144 vm_object_offset_t offset,
2145 unsigned int pagemask,
2146 tws_hash_ptr_t *target_ele,
2147 tws_hash_ptr_t **previous_ptr,
2148 tws_hash_ptr_t **free_list)
2149 {
2150 unsigned int k;
2151 tws_hash_ptr_t cache_ele;
2152 tws_hash_ptr_t base_ele;
2153
2154 *target_ele = NULL;
2155 *previous_ptr = NULL;
2156
2157 for(k=0; k<tws->expansion_count; k++) {
2158 cache_ele = tws->table[k][index];
2159 base_ele = cache_ele;
2160 *previous_ptr = &(tws->table[k][index]);
2161 while(cache_ele != NULL) {
2162 if((((unsigned int)cache_ele->element)
2163 & TWS_ADDR_HASH) != 0) {
2164 *previous_ptr = &(cache_ele->next);
2165 cache_ele = cache_ele->next;
2166 continue;
2167 }
2168 if ((cache_ele->element == 0) ||
2169 (cache_ele->element->object == 0)) {
2170 /* A little clean-up of empty elements */
2171 cache_ele->element = 0;
2172 if(base_ele == cache_ele) {
2173 base_ele = cache_ele->next;
2174 tws->table[k][index] = cache_ele->next;
2175 cache_ele->next = tws->free_hash_ele[k];
2176 tws->free_hash_ele[k] = cache_ele;
2177 cache_ele = tws->table[k][index];
2178 } else {
2179 **previous_ptr = cache_ele->next;
2180 cache_ele->next = tws->free_hash_ele[k];
2181 tws->free_hash_ele[k] = cache_ele;
2182 cache_ele = **previous_ptr;
2183 }
2184 continue;
2185 }
2186 if ((cache_ele->element->object == object)
2187 && (cache_ele->element->offset ==
2188 (offset - (offset & ~TWS_HASH_OFF_MASK)))) {
2189 if((cache_ele->element->page_cache & pagemask)
2190 || (pagemask == 0xFFFFFFFF)) {
2191 /* We've found a match */
2192 *target_ele = cache_ele;
2193 *free_list = &(tws->free_hash_ele[k]);
2194 return;
2195 }
2196 }
2197 *previous_ptr = (tws_hash_ptr_t *)&(cache_ele->next);
2198 cache_ele = cache_ele->next;
2199 }
2200 }
2201 }
2202
2203
2204 /*
2205 * For a given object/offset, discover whether the indexed 32 page frame
2206 * containing the object/offset exists and if their are at least threshold
2207 * pages present. Returns true if population meets threshold.
2208 */
2209 int
2210 tws_test_for_community(
2211 tws_hash_t tws,
2212 vm_object_t object,
2213 vm_object_offset_t offset,
2214 unsigned int threshold,
2215 unsigned int *pagemask)
2216 {
2217 int index;
2218 tws_hash_ptr_t cache_ele;
2219 tws_hash_ptr_t *trailer;
2220 tws_hash_ptr_t *free_list;
2221 int community = 0;
2222
2223 index = do_tws_hash(object, offset,
2224 tws->number_of_elements, tws->number_of_lines);
2225 tws_traverse_object_hash_list(tws, index, object, offset, 0xFFFFFFFF,
2226 &cache_ele, &trailer, &free_list);
2227
2228 if(cache_ele != NULL) {
2229 int i;
2230 unsigned int ctr;
2231 ctr = 0;
2232 for(i=1; i!=0; i=i<<1) {
2233 if(i & cache_ele->element->page_cache)
2234 ctr++;
2235 if(ctr == threshold) {
2236 community = 1;
2237 *pagemask = cache_ele->element->page_cache;
2238 break;
2239 }
2240 }
2241 }
2242
2243 return community;
2244
2245 }
2246
2247
2248 /*
2249 * Gets new hash element for object hash from free pools
2250 * TWS must be locked
2251 */
2252
2253 tws_hash_ptr_t
2254 new_obj_hash(
2255 tws_hash_t tws,
2256 unsigned int set,
2257 unsigned int index)
2258 {
2259 tws_hash_ptr_t element;
2260
2261 if(tws->obj_free_count[set] < tws->number_of_lines * tws->number_of_elements) {
2262 element = &(tws->table_ele[set][tws->obj_free_count[set]]);
2263 tws->obj_free_count[set]+=1;
2264 } else if(tws->free_hash_ele[set] == NULL) {
2265 return NULL;
2266 } else {
2267 element = tws->free_hash_ele[set];
2268 if(element == NULL)
2269 return element;
2270 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2271 }
2272 element->element = 0;
2273 element->next = tws->table[set][index];
2274 tws->table[set][index] = element;
2275 return element;
2276 }
2277
2278 /*
2279 * Gets new hash element for addr hash from free pools
2280 * TWS must be locked
2281 */
2282
2283 tws_hash_ptr_t
2284 new_addr_hash(
2285 tws_hash_t tws,
2286 unsigned int set,
2287 unsigned int index)
2288 {
2289 tws_hash_ptr_t element;
2290
2291 if(tws->addr_free_count[set]
2292 < tws->number_of_lines * tws->number_of_elements) {
2293 element = &(tws->alt_ele[set][tws->addr_free_count[set]]);
2294 tws->addr_free_count[set]+=1;
2295 } else if(tws->free_hash_ele[set] == NULL) {
2296 return NULL;
2297 } else {
2298 element = tws->free_hash_ele[set];
2299 if(element == NULL)
2300 return element;
2301 tws->free_hash_ele[set] = tws->free_hash_ele[set]->next;
2302 }
2303 element->element = (tws_hash_ele_t)TWS_ADDR_HASH;
2304 element->next = tws->table[set][index];
2305 tws->table[set][index] = element;
2306 return element;
2307 }