3 * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
9 * This file contains Original Code and/or Modifications of Original Code
10 * as defined in and that are subject to the Apple Public Source License
11 * Version 2.0 (the 'License'). You may not use this file except in
12 * compliance with the License. Please obtain a copy of the License at
13 * http://www.opensource.apple.com/apsl/ and read it before using this
16 * The Original Code and all software distributed under the License are
17 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
18 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
19 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
21 * Please see the License for the specific language governing rights and
22 * limitations under the License.
24 * @APPLE_LICENSE_HEADER_END@
29 * File: vm/task_working_set.c
30 * Author: Chris Youngworth
33 * Working set detection and maintainence module
37 #include <mach/mach_types.h>
38 #include <mach/shared_memory_server.h>
39 #include <vm/task_working_set.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_map.h>
42 #include <vm/vm_page.h>
43 #include <vm/vm_pageout.h>
44 #include <kern/sched.h>
46 extern unsigned sched_tick
;
47 extern zone_t lsf_zone
;
49 /* declarations for internal use only routines */
52 tws_create_startup_list(
56 tws_startup_list_lookup(
57 tws_startup_t startup
,
61 tws_internal_startup_send(
65 tws_traverse_address_hash_list (
68 vm_offset_t page_addr
,
70 vm_object_offset_t offset
,
72 tws_hash_ptr_t
*target_ele
,
73 tws_hash_ptr_t
**previous_ptr
,
74 tws_hash_ptr_t
**free_list
,
75 unsigned int exclusive_addr
);
78 tws_traverse_object_hash_list (
82 vm_object_offset_t offset
,
83 unsigned int page_mask
,
84 tws_hash_ptr_t
*target_ele
,
85 tws_hash_ptr_t
**previous_ptr
,
86 tws_hash_ptr_t
**free_list
);
100 int tws_test_for_community(
103 vm_object_offset_t offset
,
104 unsigned int threshold
,
105 unsigned int *page_mask
);
107 /* Note: all of the routines below depend on the associated map lock for */
108 /* synchronization, the map lock will be on when the routines are called */
109 /* and on when they return */
121 if ((style
!= TWS_HASH_STYLE_BASIC
) &&
122 (style
!= TWS_HASH_STYLE_BASIC
)) {
123 return((tws_hash_t
)NULL
);
127 tws
= (tws_hash_t
)(kalloc(sizeof(struct tws_hash
)));
128 if(tws
== (tws_hash_t
)NULL
)
131 if((tws
->table
[0] = (tws_hash_ptr_t
*)
132 kalloc(sizeof(tws_hash_ptr_t
) * lines
* rows
))
134 kfree((vm_offset_t
)tws
, sizeof(struct tws_hash
));
135 return (tws_hash_t
)NULL
;
137 if((tws
->table_ele
[0] = (tws_hash_ptr_t
)
138 kalloc(sizeof(struct tws_hash_ptr
) * lines
* rows
))
140 kfree((vm_offset_t
)tws
->table
[0], sizeof(tws_hash_ele_t
)
142 kfree((vm_offset_t
)tws
, sizeof(struct tws_hash
));
143 return (tws_hash_t
)NULL
;
145 if((tws
->alt_ele
[0] = (tws_hash_ptr_t
)
146 kalloc(sizeof(struct tws_hash_ptr
) * lines
* rows
))
148 kfree((vm_offset_t
)tws
->table
[0], sizeof(tws_hash_ptr_t
)
150 kfree((vm_offset_t
)tws
->table_ele
[0],
151 sizeof(struct tws_hash_ptr
)
153 kfree((vm_offset_t
)tws
, sizeof(struct tws_hash
));
154 return (tws_hash_t
)NULL
;
156 if((tws
->cache
[0] = (struct tws_hash_line
*)
157 kalloc(sizeof(struct tws_hash_line
) * lines
))
159 kfree((vm_offset_t
)tws
->table
[0], sizeof(tws_hash_ptr_t
)
161 kfree((vm_offset_t
)tws
->table_ele
[0],
162 sizeof(struct tws_hash_ptr
)
164 kfree((vm_offset_t
)tws
->alt_ele
[0], sizeof(struct tws_hash_ptr
)
166 kfree((vm_offset_t
)tws
, sizeof(struct tws_hash
));
167 return (tws_hash_t
)NULL
;
169 tws
->free_hash_ele
[0] = (tws_hash_ptr_t
)0;
170 tws
->obj_free_count
[0] = 0;
171 tws
->addr_free_count
[0] = 0;
173 /* most defaults are such that a bzero will initialize */
174 bzero((char *)tws
->table
[0],sizeof(tws_hash_ptr_t
)
176 bzero((char *)tws
->table_ele
[0],sizeof(struct tws_hash_ptr
)
178 bzero((char *)tws
->alt_ele
[0],sizeof(struct tws_hash_ptr
)
180 bzero((char *)tws
->cache
[0], sizeof(struct tws_hash_line
)
183 mutex_init(&tws
->lock
, ETAP_VM_MAP
);
185 tws
->current_line
= 0;
186 tws
->pageout_count
= 0;
188 tws
->startup_cache
= NULL
;
189 tws
->startup_name
= NULL
;
190 tws
->number_of_lines
= lines
;
191 tws
->number_of_elements
= rows
;
192 tws
->expansion_count
= 1;
193 tws
->lookup_count
= 0;
194 tws
->insert_count
= 0;
195 tws
->time_of_creation
= sched_tick
;
204 tws_hash_line_t hash_line
,
207 struct tws_hash_ele
*hash_ele
;
208 struct tws_hash_ptr
**trailer
;
209 struct tws_hash_ptr
**free_list
;
210 tws_hash_ele_t addr_ele
;
212 unsigned int i
, j
, k
;
217 if(tws
->line_count
< tws
->number_of_lines
) {
221 if(tws
->pageout_count
!= vm_pageout_scan_event_counter
) {
223 vm_pageout_scan_event_counter
;
230 hash_line
->ele_count
= 0;
232 for (i
=0; i
<tws
->number_of_elements
; i
++) {
234 hash_ele
= &(hash_line
->list
[i
]);
235 if(hash_ele
->object
!= 0) {
237 vm_object_offset_t local_off
= 0;
238 tws_hash_ptr_t cache_ele
;
240 index
= alt_tws_hash(
241 hash_ele
->page_addr
& TWS_HASH_OFF_MASK
,
242 tws
->number_of_elements
,
243 tws
->number_of_lines
);
245 tws_traverse_address_hash_list(tws
, index
,
246 hash_ele
->page_addr
, hash_ele
->object
,
247 hash_ele
->offset
, hash_ele
->map
,
248 &cache_ele
, &trailer
, &free_list
, 0);
249 if(cache_ele
!= NULL
) {
250 addr_ele
= (tws_hash_ele_t
)((unsigned int)
251 (cache_ele
->element
) & ~TWS_ADDR_HASH
);
252 if(addr_ele
!= hash_ele
)
253 panic("tws_hash_line_clear:"
255 cache_ele
->element
= 0;
256 *trailer
= cache_ele
->next
;
257 cache_ele
->next
= *free_list
;
258 *free_list
= cache_ele
;
261 index
= alt_tws_hash(
262 (hash_ele
->page_addr
- 0x1f000)
264 tws
->number_of_elements
,
265 tws
->number_of_lines
);
267 tws_traverse_address_hash_list(tws
, index
,
268 hash_ele
->page_addr
, hash_ele
->object
,
269 hash_ele
->offset
, hash_ele
->map
,
270 &cache_ele
, &trailer
, &free_list
, 0);
272 if(cache_ele
!= NULL
) {
273 addr_ele
= (tws_hash_ele_t
)((unsigned int)
274 (cache_ele
->element
) & ~TWS_ADDR_HASH
);
275 if(addr_ele
!= hash_ele
)
276 panic("tws_hash_line_clear: "
278 cache_ele
->element
= 0;
279 *trailer
= cache_ele
->next
;
280 cache_ele
->next
= *free_list
;
281 *free_list
= cache_ele
;
285 if((hash_ele
->map
!= NULL
) && (live
)) {
288 for (j
= 0x1; j
!= 0; j
= j
<<1) {
289 if(j
& hash_ele
->page_cache
) {
290 p
= vm_page_lookup(hash_ele
->object
,
291 hash_ele
->offset
+ local_off
);
292 if((p
!= NULL
) && (p
->wire_count
== 0)
293 && (dump_pmap
== 1)) {
294 pmap_remove_some_phys((pmap_t
)
300 local_off
+= PAGE_SIZE_64
;
304 if(tws
->style
== TWS_HASH_STYLE_SIGNAL
) {
305 vm_object_deallocate(hash_ele
->object
);
306 vm_map_deallocate(hash_ele
->map
);
309 index
= do_tws_hash(hash_ele
->object
, hash_ele
->offset
,
310 tws
->number_of_elements
,
311 tws
->number_of_lines
);
313 tws_traverse_object_hash_list(tws
,
314 index
, hash_ele
->object
, hash_ele
->offset
,
315 0xFFFFFFFF, &cache_ele
, &trailer
, &free_list
);
316 if((cache_ele
!= NULL
) && (cache_ele
->element
== hash_ele
)) {
317 cache_ele
->element
= 0;
318 *trailer
= cache_ele
->next
;
319 cache_ele
->next
= *free_list
;
320 *free_list
= cache_ele
;
322 hash_ele
->object
= 0;
330 vm_object_offset_t offset
,
332 tws_hash_line_t
*line
)
339 tws_hash_ptr_t cache_ele
;
340 tws_hash_ptr_t
*trailer
;
341 tws_hash_ptr_t
*free_list
;
343 /* don't cache private objects */
347 index
= do_tws_hash(object
, offset
,
348 tws
->number_of_elements
, tws
->number_of_lines
);
352 if(tws
->lookup_count
== 0)
353 tws
->insert_count
= 0;
354 if(tws
->startup_name
!= NULL
) {
356 age_of_cache
= ((sched_tick
357 - tws
->time_of_creation
) >> SCHED_TICK_SHIFT
);
358 if (age_of_cache
> 35) {
359 return KERN_OPERATION_TIMED_OUT
;
363 if(tws
->lookup_count
> (4 * tws
->expansion_count
364 * tws
->number_of_elements
* tws
->number_of_lines
) &&
365 (tws
->lookup_count
> (2 * tws
->insert_count
))) {
366 if(tws
->startup_cache
) {
368 age_of_cache
= ((sched_tick
369 - tws
->time_of_creation
) >> SCHED_TICK_SHIFT
);
370 if (age_of_cache
> 60) {
371 return KERN_OPERATION_TIMED_OUT
;
376 pagenum
= (vm_offset_t
)(offset
& TWS_INDEX_MASK
);
377 pagenum
= pagenum
>> 12;
378 pagenum
= 1 << pagenum
; /* get the appropriate page in 32 page block */
379 tws_traverse_object_hash_list(tws
, index
, object
, offset
, pagenum
,
380 &cache_ele
, &trailer
, &free_list
);
381 if(cache_ele
!= NULL
) {
382 set
= cache_ele
->element
->line
/tws
->number_of_lines
;
383 ele_line
= cache_ele
->element
->line
- set
;
384 *line
= &tws
->cache
[set
][ele_line
];
396 vm_object_offset_t offset
,
398 tws_hash_line_t
*line
)
402 if(!tws_lock_try(tws
)) {
405 kr
= tws_internal_lookup(tws
,
406 offset
, object
, line
);
412 tws_expand_working_set(
420 struct tws_hash temp
;
422 old_tws
= (tws_hash_t
)tws
;
424 /* Note we do an elaborate dance to preserve the header that */
425 /* task is pointing to. In this way we can avoid taking a task */
426 /* lock every time we want to access the tws */
428 if (old_tws
->number_of_lines
>= line_count
) {
431 if((new_tws
= tws_hash_create(line_count
,
432 old_tws
->number_of_elements
, old_tws
->style
)) == 0) {
433 return(KERN_NO_SPACE
);
438 for(i
= 0; i
<old_tws
->number_of_lines
; i
++) {
439 for(j
= 0; j
<old_tws
->number_of_elements
; j
++) {
440 for(k
= 0; k
<old_tws
->expansion_count
; k
++) {
441 tws_hash_ele_t entry
;
442 vm_object_offset_t paddr
;
443 unsigned int page_index
;
444 entry
= &old_tws
->cache
[k
][i
].list
[j
];
445 if(entry
->object
!= 0) {
447 for(page_index
= 1; page_index
!= 0;
448 page_index
= page_index
<< 1); {
449 if (entry
->page_cache
& page_index
) {
453 entry
->page_addr
+paddr
,
465 temp
.style
= new_tws
->style
;
466 temp
.current_line
= new_tws
->current_line
;
467 temp
.pageout_count
= new_tws
->pageout_count
;
468 temp
.line_count
= new_tws
->line_count
;
469 temp
.number_of_lines
= new_tws
->number_of_lines
;
470 temp
.number_of_elements
= new_tws
->number_of_elements
;
471 temp
.expansion_count
= new_tws
->expansion_count
;
472 temp
.lookup_count
= new_tws
->lookup_count
;
473 temp
.insert_count
= new_tws
->insert_count
;
474 for(i
= 0; i
<new_tws
->expansion_count
; i
++) {
475 temp
.obj_free_count
[i
] = new_tws
->obj_free_count
[i
];
476 temp
.addr_free_count
[i
] = new_tws
->addr_free_count
[i
];
477 temp
.free_hash_ele
[i
] = new_tws
->free_hash_ele
[i
];
478 temp
.table
[i
] = new_tws
->table
[i
];
479 temp
.table_ele
[i
] = new_tws
->table_ele
[i
];
480 temp
.alt_ele
[i
] = new_tws
->alt_ele
[i
];
481 temp
.cache
[i
] = new_tws
->cache
[i
];
484 new_tws
->style
= old_tws
->style
;
485 new_tws
->current_line
= old_tws
->current_line
;
486 new_tws
->pageout_count
= old_tws
->pageout_count
;
487 new_tws
->line_count
= old_tws
->line_count
;
488 new_tws
->number_of_lines
= old_tws
->number_of_lines
;
489 new_tws
->number_of_elements
= old_tws
->number_of_elements
;
490 new_tws
->expansion_count
= old_tws
->expansion_count
;
491 new_tws
->lookup_count
= old_tws
->lookup_count
;
492 new_tws
->insert_count
= old_tws
->insert_count
;
493 for(i
= 0; i
<old_tws
->expansion_count
; i
++) {
494 new_tws
->obj_free_count
[i
] = old_tws
->obj_free_count
[i
];
495 new_tws
->addr_free_count
[i
] = old_tws
->addr_free_count
[i
];
496 new_tws
->free_hash_ele
[i
] = old_tws
->free_hash_ele
[i
];
497 new_tws
->table
[i
] = old_tws
->table
[i
];
498 new_tws
->table_ele
[i
] = old_tws
->table_ele
[i
];
499 new_tws
->alt_ele
[i
] = old_tws
->alt_ele
[i
];
500 new_tws
->cache
[i
] = old_tws
->cache
[i
];
503 old_tws
->style
= temp
.style
;
504 old_tws
->current_line
= temp
.current_line
;
505 old_tws
->pageout_count
= temp
.pageout_count
;
506 old_tws
->line_count
= temp
.line_count
;
507 old_tws
->number_of_lines
= temp
.number_of_lines
;
508 old_tws
->number_of_elements
= temp
.number_of_elements
;
509 old_tws
->expansion_count
= temp
.expansion_count
;
510 old_tws
->lookup_count
= temp
.lookup_count
;
511 old_tws
->insert_count
= temp
.insert_count
;
512 for(i
= 0; i
<temp
.expansion_count
; i
++) {
513 old_tws
->obj_free_count
[i
] = temp
.obj_free_count
[i
];;
514 old_tws
->addr_free_count
[i
] = temp
.addr_free_count
[i
];;
515 old_tws
->free_hash_ele
[i
] = NULL
;
516 old_tws
->table
[i
] = temp
.table
[i
];
517 old_tws
->table_ele
[i
] = temp
.table_ele
[i
];
518 old_tws
->alt_ele
[i
] = temp
.alt_ele
[i
];
519 old_tws
->cache
[i
] = temp
.cache
[i
];
522 tws_hash_destroy(new_tws
);
527 tws_hash_t test_tws
= 0;
532 vm_object_offset_t offset
,
534 vm_offset_t page_addr
,
539 unsigned int alt_index
;
540 unsigned int index_enum
[2];
541 unsigned int ele_index
;
542 tws_hash_ptr_t cache_ele
;
543 tws_hash_ptr_t obj_ele
= NULL
;
544 tws_hash_ptr_t addr_ele
= NULL
;
545 tws_hash_ptr_t
*trailer
;
546 tws_hash_ptr_t
*free_list
;
547 tws_hash_ele_t target_element
= NULL
;
552 unsigned int startup_cache_line
;
553 vm_offset_t startup_page_addr
;
555 int ask_for_startup_cache_release
= 0;
558 if(!tws_lock_try(tws
)) {
562 current_line
= 0xFFFFFFFF;
564 startup_cache_line
= 0;
566 page_addr
- (offset
- (offset
& TWS_HASH_OFF_MASK
));
567 if(tws
->startup_cache
) {
569 age_of_cache
= ((sched_tick
- tws
->time_of_creation
)
570 >> SCHED_TICK_SHIFT
);
571 startup_cache_line
= tws_startup_list_lookup(
572 tws
->startup_cache
, startup_page_addr
);
573 if(tws
== test_tws
) {
574 printf("cache_lookup, result = 0x%x, addr = 0x%x, object 0x%x, offset 0x%x%x\n", startup_cache_line
, startup_page_addr
, object
, offset
);
576 if(age_of_cache
> 60) {
577 ask_for_startup_cache_release
= 1;
580 if((tws
->startup_name
!= NULL
) && (tws
->mod
== 0)) {
581 /* Ensure as good a working set as possible */
582 pmap_remove(map
->pmap
, 0, GLOBAL_SHARED_TEXT_SEGMENT
);
583 pmap_remove(map
->pmap
,
584 GLOBAL_SHARED_DATA_SEGMENT
585 + SHARED_DATA_REGION_SIZE
, 0xFFFFFFFFFFFFF000);
588 /* This next bit of code, the and alternate hash */
589 /* are all made necessary because of IPC COW */
591 /* Note: the use of page_addr modified by delta from offset */
592 /* frame base means we may miss some previous entries. However */
593 /* we will not miss the present entry. This is most important */
594 /* in avoiding duplication of entries against long lived non-cow */
596 index_enum
[0] = alt_tws_hash(
597 page_addr
& TWS_HASH_OFF_MASK
,
598 tws
->number_of_elements
, tws
->number_of_lines
);
600 index_enum
[1] = alt_tws_hash(
601 (page_addr
- 0x1f000) & TWS_HASH_OFF_MASK
,
602 tws
->number_of_elements
, tws
->number_of_lines
);
604 for(ctr
= 0; ctr
< 2;) {
605 tws_hash_ele_t resident
;
606 tws_traverse_address_hash_list(tws
,
607 index_enum
[ctr
], page_addr
, NULL
,
609 &cache_ele
, &trailer
, &free_list
, 1);
610 if(cache_ele
!= NULL
) {
612 resident
= (tws_hash_ele_t
)((unsigned int)
613 cache_ele
->element
& ~TWS_ADDR_HASH
);
614 if((object
== resident
->object
) &&
616 (offset
& TWS_HASH_OFF_MASK
)) {
617 /* This is our object/offset */
619 |= startup_cache_line
;
620 resident
->page_cache
|=
622 (offset
& TWS_INDEX_MASK
))>>12));
624 if(ask_for_startup_cache_release
)
625 return KERN_OPERATION_TIMED_OUT
;
628 if((object
->shadow
==
631 + object
->shadow_offset
)
632 == (offset
& TWS_HASH_OFF_MASK
))) {
633 /* if we just shadowed, inherit */
634 /* access pattern from parent */
635 startup_cache_line
|=
636 resident
->page_cache
;
637 /* thow out old entry */
638 resident
->page_cache
= 0;
641 resident
->page_cache
&=
642 ~(1<<(((vm_offset_t
)(page_addr
643 - resident
->page_addr
))
646 /* Throw out old entry if there are no */
647 /* more pages in cache */
648 if(resident
->page_cache
== 0) {
649 /* delete addr hash entry */
650 cache_ele
->element
= 0;
651 *trailer
= cache_ele
->next
;
652 cache_ele
->next
= *free_list
;
653 *free_list
= cache_ele
;
654 /* go after object hash */
658 tws
->number_of_elements
,
659 tws
->number_of_lines
);
660 tws_traverse_object_hash_list(tws
,
661 index
, resident
->object
,
663 0xFFFFFFFF, &cache_ele
,
664 &trailer
, &free_list
);
665 if(cache_ele
!= NULL
) {
667 TWS_HASH_STYLE_SIGNAL
) {
668 vm_object_deallocate(
669 cache_ele
->element
->object
);
671 cache_ele
->element
->map
);
674 cache_ele
->element
->line
;
676 /tws
->number_of_lines
;
677 current_line
-= set
*
678 tws
->number_of_lines
;
679 if(cache_ele
->element
->object
!= 0) {
680 cache_ele
->element
->object
= 0;
682 [current_line
].ele_count
--;
684 cache_ele
->element
= 0;
685 *trailer
= cache_ele
->next
;
686 cache_ele
->next
= *free_list
;
687 *free_list
= cache_ele
;
696 * We may or may not have a current line setting coming out of
697 * the code above. If we have a current line it means we can
698 * choose to back-fill the spot vacated by a previous entry.
699 * We have yet to do a definitive check using the original obj/off
700 * We will do that now and override the current line if we
704 index
= do_tws_hash(object
, offset
,
705 tws
->number_of_elements
, tws
->number_of_lines
);
707 alt_index
= index_enum
[0];
709 tws_traverse_object_hash_list(tws
, index
, object
, offset
,
710 0xFFFFFFFF, &cache_ele
, &trailer
, &free_list
);
711 if(cache_ele
!= NULL
) {
713 current_line
= cache_ele
->element
->line
;
714 set
= current_line
/tws
->number_of_lines
;
715 current_line
-= set
* tws
->number_of_lines
;
716 target_element
= cache_ele
->element
;
718 /* Now check to see if we have a hash addr for it */
719 tws_traverse_address_hash_list(tws
,
720 alt_index
, obj_ele
->element
->page_addr
,
721 obj_ele
->element
->object
,
722 obj_ele
->element
->offset
,
723 obj_ele
->element
->map
,
724 &cache_ele
, &trailer
, &free_list
, 0);
725 if(cache_ele
!= NULL
) {
726 addr_ele
= cache_ele
;
728 addr_ele
= new_addr_hash(tws
, set
, alt_index
);
729 /* if cannot allocate just do without */
730 /* we'll get it next time around */
736 if(tws
->style
== TWS_HASH_STYLE_SIGNAL
) {
737 vm_object_reference(object
);
738 vm_map_reference(map
);
741 if(current_line
== 0xFFFFFFFF) {
742 current_line
= tws
->current_line
;
743 set
= current_line
/tws
->number_of_lines
;
744 current_line
= current_line
- (set
* tws
->number_of_lines
);
748 tws
->current_line
= tws
->number_of_lines
- 1;
751 if(tws
->cache
[set
][current_line
].ele_count
752 >= tws
->number_of_elements
) {
755 if(current_line
== tws
->number_of_lines
) {
758 if (set
== tws
->expansion_count
) {
759 if((tws
->lookup_count
<
760 (2 * tws
->insert_count
)) &&
761 (set
<TWS_HASH_EXPANSION_MAX
)) {
762 tws
->lookup_count
= 0;
763 tws
->insert_count
= 0;
764 if(tws
->number_of_lines
765 < TWS_HASH_LINE_COUNT
) {
768 return KERN_NO_SPACE
;
770 if((tws
->table
[set
] = (tws_hash_ptr_t
*)
771 kalloc(sizeof(tws_hash_ptr_t
)
772 * tws
->number_of_lines
773 * tws
->number_of_elements
))
776 } else if((tws
->table_ele
[set
] =
778 kalloc(sizeof(struct tws_hash_ptr
)
779 * tws
->number_of_lines
780 * tws
->number_of_elements
))
782 kfree((vm_offset_t
)tws
->table
[set
],
783 sizeof(tws_hash_ptr_t
)
784 * tws
->number_of_lines
785 * tws
->number_of_elements
);
787 } else if((tws
->alt_ele
[set
] =
789 kalloc(sizeof(struct tws_hash_ptr
)
790 * tws
->number_of_lines
791 * tws
->number_of_elements
))
793 kfree((vm_offset_t
)tws
->table_ele
[set
],
794 sizeof(tws_hash_ptr_t
)
795 * tws
->number_of_lines
796 * tws
->number_of_elements
);
797 kfree((vm_offset_t
)tws
->table
[set
],
798 sizeof(struct tws_hash_ptr
)
799 * tws
->number_of_lines
800 * tws
->number_of_elements
);
801 tws
->table
[set
] = NULL
;
804 } else if((tws
->cache
[set
] =
805 (struct tws_hash_line
*)
807 (struct tws_hash_line
)
808 * tws
->number_of_lines
))
810 kfree((vm_offset_t
)tws
->table
[set
],
811 sizeof(tws_hash_ptr_t
)
812 * tws
->number_of_lines
813 * tws
->number_of_elements
);
814 kfree((vm_offset_t
)tws
->table_ele
[set
],
815 sizeof(struct tws_hash_ptr
)
816 * tws
->number_of_lines
817 * tws
->number_of_elements
);
818 kfree((vm_offset_t
)tws
->alt_ele
[set
],
819 sizeof(struct tws_hash_ptr
)
820 * tws
->number_of_lines
821 * tws
->number_of_elements
);
822 tws
->table
[set
] = NULL
;
826 tws
->free_hash_ele
[set
] =
828 tws
->obj_free_count
[set
] = 0;
829 tws
->addr_free_count
[set
] = 0;
830 bzero((char *)tws
->table
[set
],
831 sizeof(tws_hash_ptr_t
)
832 * tws
->number_of_lines
833 * tws
->number_of_elements
);
834 bzero((char *)tws
->table_ele
[set
],
835 sizeof(struct tws_hash_ptr
)
836 * tws
->number_of_lines
837 * tws
->number_of_elements
);
838 bzero((char *)tws
->alt_ele
[set
],
839 sizeof(struct tws_hash_ptr
)
840 * tws
->number_of_lines
841 * tws
->number_of_elements
);
842 bzero((char *)tws
->cache
[set
],
843 sizeof(struct tws_hash_line
)
844 * tws
->number_of_lines
);
850 tws
->time_of_creation
)
851 >> SCHED_TICK_SHIFT
);
853 if((tws
->startup_cache
) &&
854 (age_of_cache
> 60)) {
855 ask_for_startup_cache_release
= 1;
857 if((tws
->startup_name
!= NULL
) &&
858 (age_of_cache
> 15)) {
861 return KERN_OPERATION_TIMED_OUT
;
863 if((tws
->startup_name
!= NULL
) &&
864 (age_of_cache
< 15)) {
865 /* If we are creating a */
866 /* cache, don't lose the */
872 tws
->lookup_count
= 0;
873 tws
->insert_count
= 0;
877 tws
->current_line
= set
* tws
->number_of_lines
;
879 if(set
< tws
->expansion_count
) {
880 tws_hash_line_clear(tws
,
881 &(tws
->cache
[set
][current_line
]), TRUE
);
882 if(tws
->cache
[set
][current_line
].ele_count
883 >= tws
->number_of_elements
) {
884 if(tws
->style
== TWS_HASH_STYLE_SIGNAL
) {
885 vm_object_deallocate(object
);
886 vm_map_deallocate(map
);
892 tws
->expansion_count
++;
898 /* set object hash element */
899 if(obj_ele
== NULL
) {
900 obj_ele
= new_obj_hash(tws
, set
, index
);
901 if(obj_ele
== NULL
) {
902 tws
->cache
[set
][current_line
].ele_count
903 = tws
->number_of_elements
;
909 /* set address hash element */
910 if(addr_ele
== NULL
) {
911 addr_ele
= new_addr_hash(tws
, set
, alt_index
);
914 if(target_element
== NULL
) {
916 for(i
= 0; i
<tws
->number_of_elements
; i
++) {
917 if(tws
->cache
[set
][current_line
].
918 list
[ele_index
].object
== 0) {
922 if(ele_index
>= tws
->number_of_elements
)
927 if(i
== tws
->number_of_elements
)
928 panic("tws_insert: no free elements");
931 &(tws
->cache
[set
][current_line
].list
[ele_index
]);
933 tws
->cache
[set
][current_line
].ele_count
++;
936 obj_ele
->element
= target_element
;
938 addr_ele
->element
= (tws_hash_ele_t
)
939 (((unsigned int)target_element
) | TWS_ADDR_HASH
);
941 target_element
->object
= object
;
942 target_element
->offset
= offset
& TWS_HASH_OFF_MASK
;
943 target_element
->page_addr
=
944 page_addr
- (offset
- (offset
& TWS_HASH_OFF_MASK
));
945 target_element
->map
= map
;
946 target_element
->line
=
947 current_line
+ (set
* tws
->number_of_lines
);
948 if(startup_cache_line
) {
949 target_element
->page_cache
= startup_cache_line
;
951 target_element
->page_cache
|=
952 1<<(((vm_offset_t
)(offset
& TWS_INDEX_MASK
))>>12);
956 if(ask_for_startup_cache_release
)
957 return KERN_OPERATION_TIMED_OUT
;
963 * lengthen the cluster of pages by the number of pages encountered in the
964 * working set up to the limit requested by the caller. The object needs
965 * to be locked on entry. The map does not because the tws_lookup function
966 * is used only to find if their is an entry in the cache. No transient
967 * data from the cache is de-referenced.
972 * MACH page map - an optional optimization where a bit map is maintained
973 * by the VM subsystem for internal objects to indicate which pages of
974 * the object currently reside on backing store. This existence map
975 * duplicates information maintained by the vnode pager. It is
976 * created at the time of the first pageout against the object, i.e.
977 * at the same time pager for the object is created. The optimization
978 * is designed to eliminate pager interaction overhead, if it is
979 * 'known' that the page does not exist on backing store.
981 * LOOK_FOR() evaluates to TRUE if the page specified by object/offset is
982 * either marked as paged out in the existence map for the object or no
983 * existence map exists for the object. LOOK_FOR() is one of the
984 * criteria in the decision to invoke the pager. It is also used as one
985 * of the criteria to terminate the scan for adjacent pages in a clustered
986 * pagein operation. Note that LOOK_FOR() always evaluates to TRUE for
987 * permanent objects. Note also that if the pager for an internal object
988 * has not been created, the pager is not invoked regardless of the value
989 * of LOOK_FOR() and that clustered pagein scans are only done on an object
990 * for which a pager has been created.
992 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
993 * is marked as paged out in the existence map for the object. PAGED_OUT()
994 * PAGED_OUT() is used to determine if a page has already been pushed
995 * into a copy object in order to avoid a redundant page out operation.
997 #define LOOK_FOR(o, f) (vm_external_state_get((o)->existence_map, (f)) \
998 != VM_EXTERNAL_STATE_ABSENT)
999 #define PAGED_OUT(o, f) (vm_external_state_get((o)->existence_map, (f)) \
1000 == VM_EXTERNAL_STATE_EXISTS)
1001 #else /* MACH_PAGEMAP */
1003 * If the MACH page map optimization is not enabled,
1004 * LOOK_FOR() always evaluates to TRUE. The pager will always be
1005 * invoked to resolve missing pages in an object, assuming the pager
1006 * has been created for the object. In a clustered page operation, the
1007 * absence of a page on backing backing store cannot be used to terminate
1008 * a scan for adjacent pages since that information is available only in
1009 * the pager. Hence pages that may not be paged out are potentially
1010 * included in a clustered request. The vnode pager is coded to deal
1011 * with any combination of absent/present pages in a clustered
1012 * pagein request. PAGED_OUT() always evaluates to FALSE, i.e. the pager
1013 * will always be invoked to push a dirty page into a copy object assuming
1014 * a pager has been created. If the page has already been pushed, the
1015 * pager will ingore the new request.
1017 #define LOOK_FOR(o, f) TRUE
1018 #define PAGED_OUT(o, f) FALSE
1019 #endif /* MACH_PAGEMAP */
1025 vm_object_offset_t
*start
,
1026 vm_object_offset_t
*end
,
1027 vm_size_t max_length
)
1029 tws_hash_line_t line
;
1031 vm_object_offset_t before
= *start
;
1032 vm_object_offset_t after
= *end
;
1033 vm_object_offset_t original_start
= *start
;
1034 vm_object_offset_t original_end
= *end
;
1035 vm_size_t length
= (vm_size_t
)(*end
- *start
);
1038 vm_object_offset_t object_size
;
1041 unsigned int ele_cache
;
1042 unsigned int end_cache
= 0;
1043 unsigned int start_cache
= 0;
1045 if((object
->private) || !(object
->pager
))
1048 if (!object
->internal
) {
1049 kret
= vnode_pager_get_object_size(
1053 object_size
= object
->size
;
1056 if((!tws
) || (!tws_lock_try(tws
))) {
1060 age_of_cache
= ((sched_tick
1061 - tws
->time_of_creation
) >> SCHED_TICK_SHIFT
);
1063 /* When pre-heat files are not available, resort to speculation */
1064 /* based on size of file */
1066 if(tws
->startup_cache
|| object
->internal
|| age_of_cache
> 15 ||
1067 (age_of_cache
> 5 &&
1068 vm_page_free_count
< (vm_page_free_target
* 2) )) {
1071 if (object_size
> (vm_object_offset_t
)(1024 * 1024))
1072 pre_heat_size
= 8 * PAGE_SIZE
;
1073 else if (object_size
> (vm_object_offset_t
)(128 * 1024))
1074 pre_heat_size
= 4 * PAGE_SIZE
;
1076 pre_heat_size
= 2 * PAGE_SIZE
;
1079 if ((age_of_cache
< 10) && (tws
->startup_cache
)) {
1080 if ((max_length
>= ((*end
- *start
)
1081 + (32 * PAGE_SIZE
))) &&
1082 (tws_test_for_community(tws
, object
,
1083 *start
, 3, &ele_cache
))) {
1085 start_cache
= ele_cache
;
1086 *start
= *start
& TWS_HASH_OFF_MASK
;
1087 *end
= *start
+ (32 * PAGE_SIZE_64
);
1088 if(*end
> object_size
) {
1089 *end
= trunc_page_64(object_size
);
1091 if(before
>= *end
) {
1094 end_cache
= ele_cache
;
1097 end_cache
= ele_cache
;
1099 while (max_length
> ((*end
- *start
)
1100 + (32 * PAGE_SIZE
))) {
1103 before
= *start
- PAGE_SIZE_64
;
1104 if((*end
<= (object
->size
1105 + (32 * PAGE_SIZE_64
))) &&
1106 (tws_test_for_community(tws
,
1110 (32 * PAGE_SIZE_64
);
1111 if(*end
> object_size
) {
1112 *end
= trunc_page_64(object_size
);
1114 if(*start
>= *end
) {
1118 end_cache
= ele_cache
;
1121 if (max_length
> ((*end
- *start
)
1122 + (32 * PAGE_SIZE_64
))) {
1125 if((*start
>= (32 * PAGE_SIZE_64
)) &&
1126 (tws_test_for_community(tws
, object
,
1127 before
, 5, &ele_cache
))) {
1129 start_cache
= ele_cache
;
1136 if(start_cache
!= 0) {
1139 for (mask
= 1; mask
!= 0; mask
= mask
<< 1) {
1140 if (*start
== original_start
)
1142 if (!(start_cache
& mask
))
1143 *start
+= PAGE_SIZE_64
;
1148 if(end_cache
!= 0) {
1151 for (mask
= 0x80000000;
1152 mask
!= 0; mask
= mask
>> 1) {
1153 if (*end
== original_end
)
1155 if(!(end_cache
& mask
))
1156 *end
-= PAGE_SIZE_64
;
1163 panic("bad clipping occurred\n");
1170 while ((length
< max_length
) &&
1172 (after
+ PAGE_SIZE_64
))) {
1173 if(length
>= pre_heat_size
) {
1174 if(tws_internal_lookup(tws
, after
, object
,
1175 &line
) != KERN_SUCCESS
) {
1176 vm_object_offset_t extend
;
1178 extend
= after
+ PAGE_SIZE_64
;
1179 if(tws_internal_lookup(tws
, extend
, object
,
1180 &line
) != KERN_SUCCESS
) {
1186 if ((object
->existence_map
!= NULL
)
1187 && (!LOOK_FOR(object
, after
))) {
1191 if (vm_page_lookup(object
, after
) != VM_PAGE_NULL
) {
1192 /* we can bridge resident pages */
1193 after
+= PAGE_SIZE_64
;
1194 length
+= PAGE_SIZE
;
1198 if (object
->internal
) {
1200 * need to acquire a real page in
1201 * advance because this acts as
1202 * a throttling mechanism for
1203 * data_requests to the default
1204 * pager. If this fails, give up
1205 * trying to find any more pages
1206 * in the cluster and send off the
1207 * request for what we already have.
1209 if ((m
= vm_page_grab()) == VM_PAGE_NULL
) {
1212 } else if ((m
= vm_page_grab_fictitious())
1218 m
->clustered
= TRUE
;
1219 m
->list_req_pending
= TRUE
;
1221 vm_page_insert(m
, object
, after
);
1222 object
->absent_count
++;
1223 after
+= PAGE_SIZE_64
;
1224 length
+= PAGE_SIZE
;
1227 while (length
< max_length
) {
1230 before
-= PAGE_SIZE_64
;
1232 if(length
>= pre_heat_size
) {
1233 if(tws_internal_lookup(tws
, before
, object
,
1234 &line
) != KERN_SUCCESS
) {
1235 vm_object_offset_t extend
;
1240 extend
-= PAGE_SIZE_64
;
1241 if(tws_internal_lookup(tws
, extend
, object
,
1242 &line
) != KERN_SUCCESS
) {
1247 if ((object
->existence_map
!= NULL
)
1248 && (!LOOK_FOR(object
, before
))) {
1252 if (vm_page_lookup(object
, before
) != VM_PAGE_NULL
) {
1253 /* we can bridge resident pages */
1254 *start
-= PAGE_SIZE_64
;
1255 length
+= PAGE_SIZE
;
1259 if (object
->internal
) {
1261 * need to acquire a real page in
1262 * advance because this acts as
1263 * a throttling mechanism for
1264 * data_requests to the default
1265 * pager. If this fails, give up
1266 * trying to find any more pages
1267 * in the cluster and send off the
1268 * request for what we already have.
1270 if ((m
= vm_page_grab()) == VM_PAGE_NULL
) {
1273 } else if ((m
= vm_page_grab_fictitious())
1279 m
->clustered
= TRUE
;
1280 m
->list_req_pending
= TRUE
;
1282 vm_page_insert(m
, object
, before
);
1283 object
->absent_count
++;
1284 *start
-= PAGE_SIZE_64
;
1285 length
+= PAGE_SIZE
;
1293 tws_hash_line_t hash_line
,
1294 vm_offset_t target_page
)
1298 vm_object_offset_t offset
;
1299 vm_object_offset_t before
;
1300 vm_object_offset_t after
;
1301 struct tws_hash_ele
*element
;
1305 if(tws
->style
!= TWS_HASH_STYLE_SIGNAL
)
1309 for (i
=0; i
<tws
->number_of_elements
; i
++) {
1311 vm_object_offset_t local_off
= 0;
1313 if(hash_line
->list
[i
].object
== 0)
1316 element
= &hash_line
->list
[i
];
1318 if (element
->page_addr
== target_page
)
1323 if(j
& element
->page_cache
)
1326 local_off
+= PAGE_SIZE_64
;
1328 object
= element
->object
;
1329 offset
= element
->offset
+ local_off
;
1331 /* first try a fast test to speed up no-op signal */
1332 if (((p
= vm_page_lookup(object
, offset
)) != NULL
)
1333 || (object
->pager
== NULL
)
1334 || (object
->shadow_severed
)) {
1338 if((!object
->alive
) ||
1339 (!object
->pager_created
) || (!object
->pager_ready
))
1342 if (object
->internal
) {
1343 if (object
->existence_map
== NULL
) {
1347 if(!LOOK_FOR(object
, offset
))
1352 vm_object_reference(object
);
1355 if(object
->internal
) {
1358 m
= vm_page_grab_fictitious();
1362 vm_object_deallocate(object
);
1367 vm_object_lock(object
);
1368 if (((p
= vm_page_lookup(object
, offset
)) != NULL
)
1369 || (object
->pager
== NULL
)
1370 || (object
->shadow_severed
)) {
1372 vm_object_unlock(object
);
1373 vm_object_deallocate(object
);
1378 vm_page_insert(m
, object
, offset
);
1380 if (object
->absent_count
> vm_object_absent_max
) {
1382 vm_object_unlock(object
);
1383 vm_object_deallocate(object
);
1387 m
->list_req_pending
= TRUE
;
1390 object
->absent_count
++;
1393 after
= offset
+ PAGE_SIZE_64
;
1394 tws_build_cluster(tws
, object
, &before
, &after
, 0x16000);
1395 vm_object_unlock(object
);
1397 rc
= memory_object_data_request(object
->pager
,
1398 before
+ object
->paging_offset
,
1399 (vm_size_t
)(after
- before
), VM_PROT_READ
);
1400 if (rc
!= KERN_SUCCESS
) {
1402 vm_object_lock(object
);
1403 while (offset
< after
) {
1404 m
= vm_page_lookup(object
, offset
);
1405 if(m
&& m
->absent
&& m
->busy
)
1407 offset
+= PAGE_SIZE
;
1409 vm_object_unlock(object
);
1410 vm_object_deallocate(object
);
1412 vm_object_deallocate(object
);
1420 /* tws locked on entry */
1423 tws_create_startup_list(
1427 tws_startup_t startup
;
1429 unsigned int total_elements
;
1430 unsigned int startup_size
;
1431 unsigned int sindex
;
1432 unsigned int hash_index
;
1433 tws_startup_ptr_t element
;
1435 total_elements
= tws
->expansion_count
*
1436 (tws
->number_of_lines
* tws
->number_of_elements
);
1438 startup_size
= sizeof(struct tws_startup
)
1439 + (total_elements
* sizeof(tws_startup_ptr_t
*))
1440 + (total_elements
* sizeof(struct tws_startup_ptr
))
1441 + (total_elements
* sizeof(struct tws_startup_ele
));
1442 startup
= (tws_startup_t
)(kalloc(startup_size
));
1447 bzero((char *) startup
, startup_size
);
1449 startup
->table
= (tws_startup_ptr_t
*)
1450 (((int)startup
) + (sizeof(struct tws_startup
)));
1451 startup
->ele
= (struct tws_startup_ptr
*)
1452 (((vm_offset_t
)startup
->table
) +
1453 (total_elements
* sizeof(tws_startup_ptr_t
)));
1455 startup
->array
= (struct tws_startup_ele
*)
1456 (((vm_offset_t
)startup
->ele
) +
1457 (total_elements
* sizeof(struct tws_startup_ptr
)));
1459 startup
->tws_hash_size
= startup_size
;
1460 startup
->ele_count
= 0; /* burn first hash ele, else we can't tell from zero */
1461 startup
->array_size
= total_elements
;
1462 startup
->hash_count
= 1;
1467 for(i
= 0; i
<tws
->number_of_lines
; i
++) {
1468 for(j
= 0; j
<tws
->number_of_elements
; j
++) {
1469 for(k
= 0; k
<tws
->expansion_count
; k
++) {
1470 tws_hash_ele_t entry
;
1471 unsigned int hash_retry
;
1474 entry
= &tws
->cache
[k
][i
].list
[j
];
1475 addr
= entry
->page_addr
;
1477 if(entry
->object
!= 0) {
1478 /* get a hash element */
1479 hash_index
= do_startup_hash(addr
,
1480 startup
->array_size
);
1482 if(startup
->hash_count
< total_elements
) {
1483 element
= &(startup
->ele
[startup
->hash_count
]);
1484 startup
->hash_count
+= 1;
1486 /* exit we're out of elements */
1489 /* place the hash element */
1490 element
->next
= startup
->table
[hash_index
];
1491 startup
->table
[hash_index
] = (tws_startup_ptr_t
)
1492 ((int)element
- (int)&startup
->ele
[0]);
1494 /* set entry OFFSET in hash element */
1495 element
->element
= (tws_startup_ele_t
)
1496 ((int)&startup
->array
[sindex
] -
1497 (int)&startup
->array
[0]);
1499 startup
->array
[sindex
].page_addr
= entry
->page_addr
;
1500 startup
->array
[sindex
].page_cache
= entry
->page_cache
;
1501 startup
->ele_count
++;
1514 * Returns an entire cache line. The line is deleted from the startup
1515 * cache on return. The caller can check startup->ele_count for an empty
1516 * list. Access synchronization is the responsibility of the caller.
1520 tws_startup_list_lookup(
1521 tws_startup_t startup
,
1524 unsigned int hash_index
;
1525 unsigned int page_cache_bits
;
1526 unsigned int startup_shift
;
1527 tws_startup_ele_t entry
;
1528 vm_offset_t next_addr
;
1529 tws_startup_ptr_t element
;
1530 tws_startup_ptr_t base_ele
;
1531 tws_startup_ptr_t
*previous_ptr
;
1533 page_cache_bits
= 0;
1535 hash_index
= do_startup_hash(addr
, startup
->array_size
);
1537 if(((unsigned int)&(startup
->table
[hash_index
])) >= startup
->tws_hash_size
) {
1538 return page_cache_bits
= 0;
1540 element
= (tws_startup_ptr_t
)((int)startup
->table
[hash_index
] +
1541 (int)&startup
->ele
[0]);
1543 previous_ptr
= &(startup
->table
[hash_index
]);
1544 while(element
> &startup
->ele
[0]) {
1545 if (((int)element
+ sizeof(struct tws_startup_ptr
))
1546 > ((int)startup
+ startup
->tws_hash_size
)) {
1547 return page_cache_bits
;
1549 entry
= (tws_startup_ele_t
)
1550 ((int)element
->element
1551 + (int)&startup
->array
[0]);
1552 if((((int)entry
+ sizeof(struct tws_startup_ele
))
1553 > ((int)startup
+ startup
->tws_hash_size
))
1554 || ((int)entry
< (int)startup
)) {
1555 return page_cache_bits
;
1557 if ((addr
>= entry
->page_addr
) &&
1558 (addr
<= (entry
->page_addr
+ 0x1F000))) {
1559 startup_shift
= (addr
- entry
->page_addr
)>>12;
1560 page_cache_bits
|= entry
->page_cache
>> startup_shift
;
1561 /* don't dump the pages, unless the addresses */
1562 /* line up perfectly. The cache may be used */
1563 /* by other mappings */
1564 entry
->page_cache
&= (1 << startup_shift
) - 1;
1565 if(addr
== entry
->page_addr
) {
1566 if(base_ele
== element
) {
1567 base_ele
= (tws_startup_ptr_t
)
1569 + (int)&startup
->ele
[0]);
1570 startup
->table
[hash_index
] = element
->next
;
1573 *previous_ptr
= element
->next
;
1574 element
= (tws_startup_ptr_t
)
1576 + (int)&startup
->ele
[0]);
1578 entry
->page_addr
= 0;
1579 startup
->ele_count
--;
1583 next_addr
= addr
+ 0x1F000;
1584 if ((next_addr
>= entry
->page_addr
) &&
1585 (next_addr
<= (entry
->page_addr
+ 0x1F000))) {
1586 startup_shift
= (next_addr
- entry
->page_addr
)>>12;
1587 page_cache_bits
|= entry
->page_cache
<< (0x1F - startup_shift
);
1588 entry
->page_cache
&= ~((1 << (startup_shift
+ 1)) - 1);
1589 if(entry
->page_cache
== 0) {
1590 if(base_ele
== element
) {
1591 base_ele
= (tws_startup_ptr_t
)
1593 + (int)&startup
->ele
[0]);
1594 startup
->table
[hash_index
] = element
->next
;
1597 *previous_ptr
= element
->next
;
1598 element
= (tws_startup_ptr_t
)
1600 + (int)&startup
->ele
[0]);
1602 entry
->page_addr
= 0;
1603 startup
->ele_count
--;
1607 previous_ptr
= &(element
->next
);
1608 element
= (tws_startup_ptr_t
)
1609 ((int) element
->next
+ (int) &startup
->ele
[0]);
1612 return page_cache_bits
;
1616 tws_send_startup_info(
1621 tws_startup_t scache
;
1624 tws
= (tws_hash_t
)task
->dynamic_working_set
;
1627 return KERN_FAILURE
;
1629 return tws_internal_startup_send(tws
);
1634 tws_internal_startup_send(
1638 tws_startup_t scache
;
1641 return KERN_FAILURE
;
1644 /* used to signal write or release depending on state of tws */
1645 if(tws
->startup_cache
) {
1646 vm_offset_t startup_buf
;
1648 startup_buf
= (vm_offset_t
)tws
->startup_cache
;
1649 size
= tws
->startup_cache
->tws_hash_size
;
1650 tws
->startup_cache
= 0;
1652 kmem_free(kernel_map
, startup_buf
, size
);
1653 return KERN_SUCCESS
;
1655 if(tws
->startup_name
== NULL
) {
1657 return KERN_FAILURE
;
1659 scache
= tws_create_startup_list(tws
);
1661 return KERN_FAILURE
;
1662 bsd_write_page_cache_file(tws
->uid
, tws
->startup_name
,
1663 scache
, scache
->tws_hash_size
,
1664 tws
->mod
, tws
->fid
);
1665 kfree((vm_offset_t
)scache
, scache
->tws_hash_size
);
1666 kfree((vm_offset_t
) tws
->startup_name
, tws
->startup_name_length
);
1667 tws
->startup_name
= NULL
;
1669 return KERN_SUCCESS
;
1673 tws_handle_startup_file(
1678 boolean_t
*new_info
)
1681 tws_startup_t startup
;
1682 vm_offset_t cache_size
;
1683 kern_return_t error
;
1688 /* don't pre-heat kernel task */
1689 if(task
== kernel_task
)
1690 return KERN_SUCCESS
;
1691 error
= bsd_read_page_cache_file(uid
, &fid
,
1696 return KERN_FAILURE
;
1698 if(startup
== NULL
) {
1699 /* Entry for app does not exist, make */
1701 /* we will want our own copy of the shared */
1702 /* regions to pick up a true picture of all */
1703 /* the pages we will touch. */
1704 if((lsf_zone
->count
* lsf_zone
->elem_size
)
1705 > (lsf_zone
->max_size
>> 1)) {
1706 /* We don't want to run out of shared memory */
1707 /* map entries by starting too many private versions */
1708 /* of the shared library structures */
1709 return KERN_SUCCESS
;
1712 error
= tws_write_startup_file(task
,
1713 fid
, mod
, app_name
, uid
);
1716 /* use the mod in the write case as an init */
1721 error
= tws_read_startup_file(task
,
1722 (tws_startup_t
)startup
,
1725 kmem_free(kernel_map
,
1726 (vm_offset_t
)startup
, cache_size
);
1730 return KERN_SUCCESS
;
1734 tws_write_startup_file(
1742 unsigned int string_length
;
1744 string_length
= strlen(name
);
1747 tws
= (tws_hash_t
)task
->dynamic_working_set
;
1751 /* create a dynamic working set of normal size */
1752 task_working_set_create(task
, 0,
1753 0, TWS_HASH_STYLE_DEFAULT
);
1757 if(tws
->startup_name
!= NULL
) {
1759 return KERN_FAILURE
;
1762 tws
->startup_name
= (char *)
1763 kalloc((string_length
+ 1) * (sizeof(char)));
1764 if(tws
->startup_name
== NULL
) {
1766 return KERN_FAILURE
;
1769 bcopy(name
, (char *)tws
->startup_name
, string_length
+ 1);
1770 tws
->startup_name_length
= (string_length
+ 1) * sizeof(char);
1776 return KERN_SUCCESS
;
1780 tws_read_startup_file(
1782 tws_startup_t startup
,
1783 vm_offset_t cache_size
)
1791 tws
= (tws_hash_t
)task
->dynamic_working_set
;
1793 if(cache_size
< sizeof(struct tws_hash
)) {
1795 kmem_free(kernel_map
, (vm_offset_t
)startup
, cache_size
);
1796 return(KERN_SUCCESS
);
1799 /* create a dynamic working set to match file size */
1800 lines
= (cache_size
- sizeof(struct tws_hash
))/TWS_ARRAY_SIZE
;
1801 /* we now need to divide out element size and word size */
1802 /* all fields are 4 bytes. There are 8 bytes in each hash element */
1803 /* entry, 4 bytes in each table ptr location and 8 bytes in each */
1804 /* page_cache entry, making a total of 20 bytes for each entry */
1805 lines
= (lines
/(20));
1806 if(lines
<= TWS_SMALL_HASH_LINE_COUNT
) {
1807 lines
= TWS_SMALL_HASH_LINE_COUNT
;
1809 kmem_free(kernel_map
, (vm_offset_t
)startup
, cache_size
);
1810 return(KERN_SUCCESS
);
1812 old_exp_count
= lines
/TWS_HASH_LINE_COUNT
;
1813 if((old_exp_count
* TWS_HASH_LINE_COUNT
) != lines
) {
1814 lines
= (old_exp_count
+ 1)
1815 * TWS_HASH_LINE_COUNT
;
1818 task_working_set_create(task
, lines
,
1819 0, TWS_HASH_STYLE_DEFAULT
);
1823 tws_expand_working_set(
1824 (vm_offset_t
)tws
, lines
, TRUE
);
1831 if(tws
->startup_cache
!= NULL
) {
1833 return KERN_FAILURE
;
1837 /* now need to fix up internal table pointers */
1838 startup
->table
= (tws_startup_ptr_t
*)
1839 (((int)startup
) + (sizeof(struct tws_startup
)));
1840 startup
->ele
= (struct tws_startup_ptr
*)
1841 (((vm_offset_t
)startup
->table
) +
1842 (startup
->array_size
* sizeof(tws_startup_ptr_t
)));
1843 startup
->array
= (struct tws_startup_ele
*)
1844 (((vm_offset_t
)startup
->ele
) +
1845 (startup
->array_size
* sizeof(struct tws_startup_ptr
)));
1846 /* the allocation size and file size should be the same */
1847 /* just in case their not, make sure we dealloc correctly */
1848 startup
->tws_hash_size
= cache_size
;
1851 tws
->startup_cache
= startup
;
1853 return KERN_SUCCESS
;
1858 tws_hash_ws_flush(tws_hash_t tws
) {
1859 tws_startup_t scache
;
1864 if(tws
->startup_name
!= NULL
) {
1865 scache
= tws_create_startup_list(tws
);
1866 if(scache
== NULL
) {
1867 /* dump the name cache, we'll */
1868 /* get it next time */
1871 tws
->startup_name_length
);
1872 tws
->startup_name
= NULL
;
1876 bsd_write_page_cache_file(tws
->uid
, tws
->startup_name
,
1877 scache
, scache
->tws_hash_size
,
1878 tws
->mod
, tws
->fid
);
1879 kfree((vm_offset_t
)scache
,
1880 scache
->tws_hash_size
);
1883 tws
->startup_name_length
);
1884 tws
->startup_name
= NULL
;
1891 tws_hash_destroy(tws_hash_t tws
)
1894 vm_size_t cache_size
;
1896 if(tws
->startup_cache
!= NULL
) {
1897 kmem_free(kernel_map
,
1898 (vm_offset_t
)tws
->startup_cache
,
1899 tws
->startup_cache
->tws_hash_size
);
1900 tws
->startup_cache
= NULL
;
1902 if(tws
->startup_name
!= NULL
) {
1903 tws_internal_startup_send(tws
);
1905 for (i
=0; i
<tws
->number_of_lines
; i
++) {
1906 for(k
=0; k
<tws
->expansion_count
; k
++) {
1907 /* clear the object refs */
1908 tws_hash_line_clear(tws
, &(tws
->cache
[k
][i
]), FALSE
);
1912 while (i
< tws
->expansion_count
) {
1914 kfree((vm_offset_t
)tws
->table
[i
], sizeof(tws_hash_ptr_t
)
1915 * tws
->number_of_lines
1916 * tws
->number_of_elements
);
1917 kfree((vm_offset_t
)tws
->table_ele
[i
],
1918 sizeof(struct tws_hash_ptr
)
1919 * tws
->number_of_lines
1920 * tws
->number_of_elements
);
1921 kfree((vm_offset_t
)tws
->alt_ele
[i
],
1922 sizeof(struct tws_hash_ptr
)
1923 * tws
->number_of_lines
1924 * tws
->number_of_elements
);
1925 kfree((vm_offset_t
)tws
->cache
[i
], sizeof(struct tws_hash_line
)
1926 * tws
->number_of_lines
);
1929 if(tws
->startup_name
!= NULL
) {
1930 kfree((vm_offset_t
)tws
->startup_name
,
1931 tws
->startup_name_length
);
1933 kfree((vm_offset_t
)tws
, sizeof(struct tws_hash
));
1937 tws_hash_clear(tws_hash_t tws
)
1941 for (i
=0; i
<tws
->number_of_lines
; i
++) {
1942 for(k
=0; k
<tws
->expansion_count
; k
++) {
1943 /* clear the object refs */
1944 tws_hash_line_clear(tws
, &(tws
->cache
[k
][i
]), FALSE
);
1950 task_working_set_create(
1958 lines
= TWS_HASH_LINE_COUNT
;
1961 rows
= TWS_ARRAY_SIZE
;
1963 if (style
== TWS_HASH_STYLE_DEFAULT
) {
1964 style
= TWS_HASH_STYLE_BASIC
;
1967 if(task
->dynamic_working_set
!= 0) {
1969 return(KERN_FAILURE
);
1970 } else if((task
->dynamic_working_set
1971 = (vm_offset_t
) tws_hash_create(lines
, rows
, style
)) == 0) {
1973 return(KERN_NO_SPACE
);
1976 return KERN_SUCCESS
;
1980 /* Internal use only routines */
1984 * internal sub-function for address space lookup
1985 * returns the target element and the address of the
1986 * previous pointer The previous pointer is the address
1987 * of the pointer pointing to the target element.
1988 * TWS must be locked
1992 tws_traverse_address_hash_list (
1995 vm_offset_t page_addr
,
1997 vm_object_offset_t offset
,
1999 tws_hash_ptr_t
*target_ele
,
2000 tws_hash_ptr_t
**previous_ptr
,
2001 tws_hash_ptr_t
**free_list
,
2002 unsigned int exclusive_addr
)
2005 tws_hash_ptr_t cache_ele
;
2006 tws_hash_ptr_t base_ele
;
2009 *previous_ptr
= NULL
;
2011 for(k
=0; k
<tws
->expansion_count
; k
++) {
2013 cache_ele
= tws
->table
[k
][index
];
2014 base_ele
= cache_ele
;
2015 *previous_ptr
= (tws_hash_ptr_t
*)&(tws
->table
[k
][index
]);
2016 while(cache_ele
!= NULL
) {
2018 cache_ele
->element
& TWS_ADDR_HASH
) == 0) {
2019 *previous_ptr
= (tws_hash_ptr_t
*)&(cache_ele
->next
);
2020 cache_ele
= cache_ele
->next
;
2023 ele
= (tws_hash_ele_t
)((unsigned int)
2024 cache_ele
->element
& ~TWS_ADDR_HASH
);
2025 if ((ele
== 0) || (ele
->object
== 0)) {
2026 /* A little clean-up of empty elements */
2027 cache_ele
->element
= 0;
2028 if(base_ele
== cache_ele
) {
2029 base_ele
= cache_ele
->next
;
2030 tws
->table
[k
][index
] = cache_ele
->next
;
2031 cache_ele
->next
= tws
->free_hash_ele
[k
];
2032 tws
->free_hash_ele
[k
] = cache_ele
;
2033 cache_ele
= base_ele
;
2035 **previous_ptr
= cache_ele
->next
;
2036 cache_ele
->next
= tws
->free_hash_ele
[k
];
2037 tws
->free_hash_ele
[k
] = cache_ele
;
2038 cache_ele
= **previous_ptr
;
2043 if ((ele
->page_addr
<= page_addr
)
2044 && (page_addr
<= (ele
->page_addr
+
2045 (vm_offset_t
)TWS_INDEX_MASK
))
2046 && ((object
== NULL
)
2047 || ((object
== ele
->object
)
2048 && (offset
== ele
->offset
)
2049 && (map
== ele
->map
)))) {
2050 if(exclusive_addr
) {
2052 delta
= ((page_addr
- ele
->page_addr
)
2054 if((1 << delta
) & ele
->page_cache
) {
2055 /* We've found a match */
2056 *target_ele
= cache_ele
;
2059 &(tws
->free_hash_ele
[k
]);
2063 /* We've found a match */
2064 *target_ele
= cache_ele
;
2065 *free_list
= (tws_hash_ptr_t
*)
2066 &(tws
->free_hash_ele
[k
]);
2070 *previous_ptr
= (tws_hash_ptr_t
*)&(cache_ele
->next
);
2071 cache_ele
= cache_ele
->next
;
2078 * internal sub-function for object space lookup
2079 * returns the target element and the address of the
2080 * previous pointer The previous pointer is the address
2081 * of the pointer pointing to the target element.
2082 * TWS must be locked
2087 tws_traverse_object_hash_list (
2091 vm_object_offset_t offset
,
2092 unsigned int page_mask
,
2093 tws_hash_ptr_t
*target_ele
,
2094 tws_hash_ptr_t
**previous_ptr
,
2095 tws_hash_ptr_t
**free_list
)
2098 tws_hash_ptr_t cache_ele
;
2099 tws_hash_ptr_t base_ele
;
2102 *previous_ptr
= NULL
;
2104 for(k
=0; k
<tws
->expansion_count
; k
++) {
2105 cache_ele
= tws
->table
[k
][index
];
2106 base_ele
= cache_ele
;
2107 *previous_ptr
= &(tws
->table
[k
][index
]);
2108 while(cache_ele
!= NULL
) {
2109 if((((unsigned int)cache_ele
->element
)
2110 & TWS_ADDR_HASH
) != 0) {
2111 *previous_ptr
= &(cache_ele
->next
);
2112 cache_ele
= cache_ele
->next
;
2115 if ((cache_ele
->element
== 0) ||
2116 (cache_ele
->element
->object
== 0)) {
2117 /* A little clean-up of empty elements */
2118 cache_ele
->element
= 0;
2119 if(base_ele
== cache_ele
) {
2120 base_ele
= cache_ele
->next
;
2121 tws
->table
[k
][index
] = cache_ele
->next
;
2122 cache_ele
->next
= tws
->free_hash_ele
[k
];
2123 tws
->free_hash_ele
[k
] = cache_ele
;
2124 cache_ele
= tws
->table
[k
][index
];
2126 **previous_ptr
= cache_ele
->next
;
2127 cache_ele
->next
= tws
->free_hash_ele
[k
];
2128 tws
->free_hash_ele
[k
] = cache_ele
;
2129 cache_ele
= **previous_ptr
;
2133 if ((cache_ele
->element
->object
== object
)
2134 && (cache_ele
->element
->offset
==
2135 (offset
- (offset
& ~TWS_HASH_OFF_MASK
)))) {
2136 if((cache_ele
->element
->page_cache
& page_mask
)
2137 || (page_mask
== 0xFFFFFFFF)) {
2138 /* We've found a match */
2139 *target_ele
= cache_ele
;
2140 *free_list
= &(tws
->free_hash_ele
[k
]);
2144 *previous_ptr
= (tws_hash_ptr_t
*)&(cache_ele
->next
);
2145 cache_ele
= cache_ele
->next
;
2152 * For a given object/offset, discover whether the indexed 32 page frame
2153 * containing the object/offset exists and if their are at least threshold
2154 * pages present. Returns true if population meets threshold.
2157 tws_test_for_community(
2160 vm_object_offset_t offset
,
2161 unsigned int threshold
,
2162 unsigned int *page_mask
)
2165 tws_hash_ptr_t cache_ele
;
2166 tws_hash_ptr_t
*trailer
;
2167 tws_hash_ptr_t
*free_list
;
2170 index
= do_tws_hash(object
, offset
,
2171 tws
->number_of_elements
, tws
->number_of_lines
);
2172 tws_traverse_object_hash_list(tws
, index
, object
, offset
, 0xFFFFFFFF,
2173 &cache_ele
, &trailer
, &free_list
);
2175 if(cache_ele
!= NULL
) {
2179 for(i
=1; i
!=0; i
=i
<<1) {
2180 if(i
& cache_ele
->element
->page_cache
)
2182 if(ctr
== threshold
) {
2184 *page_mask
= cache_ele
->element
->page_cache
;
2196 * Gets new hash element for object hash from free pools
2197 * TWS must be locked
2206 tws_hash_ptr_t element
;
2208 if(tws
->obj_free_count
[set
] < tws
->number_of_lines
* tws
->number_of_elements
) {
2209 element
= &(tws
->table_ele
[set
][tws
->obj_free_count
[set
]]);
2210 tws
->obj_free_count
[set
]+=1;
2211 } else if(tws
->free_hash_ele
[set
] == NULL
) {
2214 element
= tws
->free_hash_ele
[set
];
2217 tws
->free_hash_ele
[set
] = tws
->free_hash_ele
[set
]->next
;
2219 element
->element
= 0;
2220 element
->next
= tws
->table
[set
][index
];
2221 tws
->table
[set
][index
] = element
;
2226 * Gets new hash element for addr hash from free pools
2227 * TWS must be locked
2236 tws_hash_ptr_t element
;
2238 if(tws
->addr_free_count
[set
]
2239 < tws
->number_of_lines
* tws
->number_of_elements
) {
2240 element
= &(tws
->alt_ele
[set
][tws
->addr_free_count
[set
]]);
2241 tws
->addr_free_count
[set
]+=1;
2242 } else if(tws
->free_hash_ele
[set
] == NULL
) {
2245 element
= tws
->free_hash_ele
[set
];
2248 tws
->free_hash_ele
[set
] = tws
->free_hash_ele
[set
]->next
;
2250 element
->element
= (tws_hash_ele_t
)TWS_ADDR_HASH
;
2251 element
->next
= tws
->table
[set
][index
];
2252 tws
->table
[set
][index
] = element
;