-int startup_miss = 0;
/*
- * Copyright (c) 2002,2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <mach/mach_types.h>
+#include <mach/memory_object.h>
#include <mach/shared_memory_server.h>
#include <vm/task_working_set.h>
#include <vm/vm_kern.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <kern/sched.h>
+#include <kern/kalloc.h>
+
+#include <vm/vm_protos.h>
-extern unsigned sched_tick;
+/*
+ * LP64todo - Task Working Set Support is for 32-bit only
+ */
extern zone_t lsf_zone;
/* declarations for internal use only routines */
+int startup_miss = 0;
+
+tws_hash_t
+tws_hash_create(
+ unsigned int lines,
+ unsigned int rows,
+ unsigned int style);
+
+kern_return_t
+tws_write_startup_file(
+ task_t task,
+ int fid,
+ int mod,
+ char *name,
+ unsigned int string_length);
+
+kern_return_t
+tws_read_startup_file(
+ task_t task,
+ tws_startup_t startup,
+ vm_offset_t cache_size);
tws_startup_t
tws_create_startup_list(
tws_internal_startup_send(
tws_hash_t tws);
+void
+tws_hash_line_clear(
+ tws_hash_t tws,
+ tws_hash_line_t hash_line,
+ vm_object_t object,
+ boolean_t live);
+
+void
+tws_hash_clear(
+ tws_hash_t tws);
+
void
tws_traverse_address_hash_list (
tws_hash_t tws,
unsigned int index,
vm_object_t object,
vm_object_offset_t offset,
- unsigned int page_mask,
+ unsigned int pagemask,
tws_hash_ptr_t *target_ele,
tws_hash_ptr_t **previous_ptr,
tws_hash_ptr_t **free_list);
vm_object_t object,
vm_object_offset_t offset,
unsigned int threshold,
- unsigned int *page_mask);
+ unsigned int *pagemask);
+
+kern_return_t
+tws_internal_lookup(
+ tws_hash_t tws,
+ vm_object_offset_t offset,
+ vm_object_t object,
+ tws_hash_line_t *line);
/* Note: all of the routines below depend on the associated map lock for */
/* synchronization, the map lock will be on when the routines are called */
unsigned int style)
{
tws_hash_t tws;
- int i,j;
-
if ((style != TWS_HASH_STYLE_BASIC) &&
(style != TWS_HASH_STYLE_BASIC)) {
if((tws->table[0] = (tws_hash_ptr_t *)
kalloc(sizeof(tws_hash_ptr_t) * lines * rows))
== NULL) {
- kfree((vm_offset_t)tws, sizeof(struct tws_hash));
+ kfree(tws, sizeof(struct tws_hash));
return (tws_hash_t)NULL;
}
if((tws->table_ele[0] = (tws_hash_ptr_t)
kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
== NULL) {
- kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
- * lines * rows);
- kfree((vm_offset_t)tws, sizeof(struct tws_hash));
+ kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
+ kfree(tws, sizeof(struct tws_hash));
return (tws_hash_t)NULL;
}
if((tws->alt_ele[0] = (tws_hash_ptr_t)
kalloc(sizeof(struct tws_hash_ptr) * lines * rows))
== NULL) {
- kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
- * lines * rows);
- kfree((vm_offset_t)tws->table_ele[0],
- sizeof(struct tws_hash_ptr)
- * lines * rows);
- kfree((vm_offset_t)tws, sizeof(struct tws_hash));
+ kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
+ kfree(tws->table_ele[0],
+ sizeof(struct tws_hash_ptr) * lines * rows);
+ kfree(tws, sizeof(struct tws_hash));
return (tws_hash_t)NULL;
}
if((tws->cache[0] = (struct tws_hash_line *)
kalloc(sizeof(struct tws_hash_line) * lines))
== NULL) {
- kfree((vm_offset_t)tws->table[0], sizeof(tws_hash_ptr_t)
- * lines * rows);
- kfree((vm_offset_t)tws->table_ele[0],
- sizeof(struct tws_hash_ptr)
- * lines * rows);
- kfree((vm_offset_t)tws->alt_ele[0], sizeof(struct tws_hash_ptr)
- * lines * rows);
- kfree((vm_offset_t)tws, sizeof(struct tws_hash));
+ kfree(tws->table[0], sizeof(tws_hash_ptr_t) * lines * rows);
+ kfree(tws->table_ele[0],
+ sizeof(struct tws_hash_ptr) * lines * rows);
+ kfree(tws->alt_ele[0],
+ sizeof(struct tws_hash_ptr) * lines * rows);
+ kfree(tws, sizeof(struct tws_hash));
return (tws_hash_t)NULL;
}
tws->free_hash_ele[0] = (tws_hash_ptr_t)0;
bzero((char *)tws->cache[0], sizeof(struct tws_hash_line)
* lines);
- mutex_init(&tws->lock, ETAP_VM_MAP);
+ mutex_init(&tws->lock, 0);
tws->style = style;
tws->current_line = 0;
tws->pageout_count = 0;
return tws;
}
-int newtest = 0;
+
+extern vm_page_t
+vm_page_lookup_nohint(vm_object_t object, vm_object_offset_t offset);
+
+
void
tws_hash_line_clear(
tws_hash_t tws,
- tws_hash_line_t hash_line,
+ tws_hash_line_t hash_line,
+ __unused vm_object_t object,
boolean_t live)
{
struct tws_hash_ele *hash_ele;
struct tws_hash_ptr **free_list;
tws_hash_ele_t addr_ele;
int index;
- unsigned int i, j, k;
+ unsigned int i, j;
int dump_pmap;
int hash_loop;
tws_hash_ptr_t cache_ele;
index = alt_tws_hash(
- hash_ele->page_addr & TWS_HASH_OFF_MASK,
+ hash_ele->page_addr & TWS_ADDR_OFF_MASK,
tws->number_of_elements,
tws->number_of_lines);
index = alt_tws_hash(
(hash_ele->page_addr - 0x1f000)
- & TWS_HASH_OFF_MASK,
+ & TWS_ADDR_OFF_MASK,
tws->number_of_elements,
tws->number_of_lines);
if((hash_ele->map != NULL) && (live)) {
vm_page_t p;
-
- for (j = 0x1; j != 0; j = j<<1) {
- if(j & hash_ele->page_cache) {
- p = vm_page_lookup(hash_ele->object,
- hash_ele->offset + local_off);
- if((p != NULL) && (p->wire_count == 0)
- && (dump_pmap == 1)) {
- pmap_remove_some_phys((pmap_t)
- vm_map_pmap(
- current_map()),
- p->phys_page);
+
+#if 0
+ if (object != hash_ele->object) {
+ if (object)
+ vm_object_unlock(object);
+ vm_object_lock(hash_ele->object);
+ }
+#endif
+ if (dump_pmap == 1) {
+ for (j = 0x1; j != 0; j = j<<1) {
+ if(j & hash_ele->page_cache) {
+ p = vm_page_lookup_nohint(hash_ele->object,
+ hash_ele->offset + local_off);
+ if((p != NULL) && (p->wire_count == 0)) {
+ pmap_remove_some_phys((pmap_t)vm_map_pmap(current_map()),
+ p->phys_page);
+ }
}
- }
- local_off += PAGE_SIZE_64;
+ local_off += PAGE_SIZE_64;
+ }
+ }
+#if 0
+ if (object != hash_ele->object) {
+ vm_object_unlock(hash_ele->object);
+ if (object)
+ vm_object_lock(object);
}
+#endif
}
if(tws->style == TWS_HASH_STYLE_SIGNAL) {
kern_return_t
tws_expand_working_set(
- vm_offset_t tws,
- int line_count,
+ tws_hash_t old_tws,
+ unsigned int line_count,
boolean_t dump_data)
{
tws_hash_t new_tws;
- tws_hash_t old_tws;
unsigned int i,j,k;
struct tws_hash temp;
- old_tws = (tws_hash_t)tws;
-
/* Note we do an elaborate dance to preserve the header that */
/* task is pointing to. In this way we can avoid taking a task */
/* lock every time we want to access the tws */
vm_offset_t page_addr,
vm_map_t map)
{
- queue_t bucket;
unsigned int index;
unsigned int alt_index;
unsigned int index_enum[2];
tws_hash_ptr_t *trailer;
tws_hash_ptr_t *free_list;
tws_hash_ele_t target_element = NULL;
- int i,k;
- int current_line;
- int set;
+ unsigned int i;
+ unsigned int current_line;
+ unsigned int set;
int ctr;
unsigned int startup_cache_line;
- vm_offset_t startup_page_addr;
- int cache_full = 0;
int age_of_cache = 0;
-
if(!tws_lock_try(tws)) {
return KERN_FAILURE;
}
tws->insert_count++;
current_line = 0xFFFFFFFF;
+ set = 0;
startup_cache_line = 0;
/* in avoiding duplication of entries against long lived non-cow */
/* objects */
index_enum[0] = alt_tws_hash(
- page_addr & TWS_HASH_OFF_MASK,
+ page_addr & TWS_ADDR_OFF_MASK,
tws->number_of_elements, tws->number_of_lines);
index_enum[1] = alt_tws_hash(
- (page_addr - 0x1f000) & TWS_HASH_OFF_MASK,
+ (page_addr - 0x1f000) & TWS_ADDR_OFF_MASK,
tws->number_of_elements, tws->number_of_lines);
for(ctr = 0; ctr < 2;) {
* tws->number_of_lines
* tws->number_of_elements))
== NULL) {
- kfree((vm_offset_t)tws->table[set],
- sizeof(tws_hash_ptr_t)
- * tws->number_of_lines
- * tws->number_of_elements);
+ kfree(tws->table[set],
+ sizeof(tws_hash_ptr_t)
+ * tws->number_of_lines
+ * tws->number_of_elements);
set = 0;
} else if((tws->alt_ele[set] =
(tws_hash_ptr_t)
* tws->number_of_lines
* tws->number_of_elements))
== NULL) {
- kfree((vm_offset_t)tws->table_ele[set],
- sizeof(struct tws_hash_ptr)
- * tws->number_of_lines
- * tws->number_of_elements);
- kfree((vm_offset_t)tws->table[set],
- sizeof(tws_hash_ptr_t)
+ kfree(tws->table_ele[set],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree(tws->table[set],
+ sizeof(tws_hash_ptr_t)
* tws->number_of_lines
* tws->number_of_elements);
tws->table[set] = NULL;
(struct tws_hash_line)
* tws->number_of_lines))
== NULL) {
- kfree((vm_offset_t)tws->alt_ele[set],
- sizeof(struct tws_hash_ptr)
- * tws->number_of_lines
- * tws->number_of_elements);
- kfree((vm_offset_t)tws->table_ele[set],
- sizeof(struct tws_hash_ptr)
- * tws->number_of_lines
- * tws->number_of_elements);
- kfree((vm_offset_t)tws->table[set],
- sizeof(tws_hash_ptr_t)
- * tws->number_of_lines
- * tws->number_of_elements);
+ kfree(tws->alt_ele[set],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree(tws->table_ele[set],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree(tws->table[set],
+ sizeof(tws_hash_ptr_t)
+ * tws->number_of_lines
+ * tws->number_of_elements);
tws->table[set] = NULL;
set = 0;
}
if(set < tws->expansion_count) {
tws_hash_line_clear(tws,
- &(tws->cache[set][current_line]), TRUE);
+ &(tws->cache[set][current_line]), object, TRUE);
if(tws->cache[set][current_line].ele_count
>= tws->number_of_elements) {
if(tws->style == TWS_HASH_STYLE_SIGNAL) {
#define PAGED_OUT(o, f) FALSE
#endif /* MACH_PAGEMAP */
+
void
tws_build_cluster(
tws_hash_t tws,
vm_size_t max_length)
{
tws_hash_line_t line;
- task_t task;
vm_object_offset_t before = *start;
vm_object_offset_t after = *end;
vm_object_offset_t original_start = *start;
kern_return_t kret;
vm_object_offset_t object_size;
int age_of_cache;
- int pre_heat_size;
+ vm_size_t pre_heat_size;
unsigned int ele_cache;
unsigned int end_cache = 0;
unsigned int start_cache = 0;
+ unsigned int memory_scarce = 0;
if((object->private) || !(object->pager))
return;
if((!tws) || (!tws_lock_try(tws))) {
return;
}
-
age_of_cache = ((sched_tick
- tws->time_of_creation) >> SCHED_TICK_SHIFT);
+ if (vm_page_free_count < (2 * vm_page_free_target))
+ memory_scarce = 1;
+
/* When pre-heat files are not available, resort to speculation */
/* based on size of file */
pre_heat_size = 0;
} else {
if (object_size > (vm_object_offset_t)(1024 * 1024))
- pre_heat_size = 16 * PAGE_SIZE;
- else if (object_size > (vm_object_offset_t)(128 * 1024))
pre_heat_size = 8 * PAGE_SIZE;
- else
+ else if (object_size > (vm_object_offset_t)(128 * 1024))
pre_heat_size = 4 * PAGE_SIZE;
+ else
+ pre_heat_size = 2 * PAGE_SIZE;
}
if (tws->startup_cache) {
+ int target_page_count;
+
+ if (memory_scarce)
+ target_page_count = 16;
+ else
+ target_page_count = 4;
- if (tws_test_for_community(tws, object, *start, 4, &ele_cache))
+ if (tws_test_for_community(tws, object, *start, target_page_count, &ele_cache))
{
start_cache = ele_cache;
*start = *start & TWS_HASH_OFF_MASK;
tws_unlock(tws);
}
+void
tws_line_signal(
tws_hash_t tws,
vm_map_t map,
while (j != 0) {
if(j & element->page_cache)
break;
- j << 1;
+ j <<= 1;
local_off += PAGE_SIZE_64;
}
object = element->object;
{
tws_hash_t tws;
- tws_startup_t scache;
task_lock(task);
- tws = (tws_hash_t)task->dynamic_working_set;
+ tws = task->dynamic_working_set;
task_unlock(task);
if(tws == NULL) {
return KERN_FAILURE;
if(scache == NULL)
return KERN_FAILURE;
bsd_write_page_cache_file(tws->uid, tws->startup_name,
- scache, scache->tws_hash_size,
- tws->mod, tws->fid);
- kfree((vm_offset_t)scache, scache->tws_hash_size);
- kfree((vm_offset_t) tws->startup_name, tws->startup_name_length);
+ (caddr_t) scache, scache->tws_hash_size,
+ tws->mod, tws->fid);
+ kfree(scache, scache->tws_hash_size);
+ kfree(tws->startup_name, tws->startup_name_length);
tws->startup_name = NULL;
tws_unlock(tws);
return KERN_SUCCESS;
task_t task,
unsigned int uid,
char *app_name,
- vm_offset_t app_vp,
+ void *app_vp,
boolean_t *new_info)
{
return KERN_SUCCESS;
error = bsd_read_page_cache_file(uid, &fid,
&mod, app_name,
- app_vp, &startup,
+ app_vp,
+ (vm_offset_t *) &startup,
&cache_size);
if(error) {
return KERN_FAILURE;
string_length = strlen(name);
+restart:
task_lock(task);
- tws = (tws_hash_t)task->dynamic_working_set;
-
+ tws = task->dynamic_working_set;
task_unlock(task);
+
if(tws == NULL) {
+ kern_return_t error;
+
/* create a dynamic working set of normal size */
- task_working_set_create(task, 0,
- 0, TWS_HASH_STYLE_DEFAULT);
+ if((error = task_working_set_create(task, 0, 0, TWS_HASH_STYLE_DEFAULT)) != KERN_SUCCESS)
+ return error;
+ /* we need to reset tws and relock */
+ goto restart;
}
tws_lock(tws);
return KERN_SUCCESS;
}
+unsigned long tws_read_startup_file_rejects = 0;
+
kern_return_t
tws_read_startup_file(
task_t task,
vm_offset_t cache_size)
{
tws_hash_t tws;
- int error;
int lines;
int old_exp_count;
+ unsigned int ele_count;
+restart:
task_lock(task);
- tws = (tws_hash_t)task->dynamic_working_set;
+ tws = task->dynamic_working_set;
+
+ /* create a dynamic working set to match file size */
- if(cache_size < sizeof(struct tws_hash)) {
+ /* start with total size of the data we got from app_profile */
+ ele_count = cache_size;
+ /* skip the startup header */
+ ele_count -= sizeof(struct tws_startup);
+ /*
+ * For each startup cache entry, we have one of these:
+ * tws_startup_ptr_t startup->table[];
+ * struct tws_startup_ptr startup->ele[];
+ * struct tws_startup_ele startup->array[];
+ */
+ ele_count /= (sizeof (tws_startup_ptr_t) +
+ sizeof (struct tws_startup_ptr) +
+ sizeof (struct tws_startup_ele));
+
+ /*
+ * Sanity check: make sure the value for startup->array_size
+ * that we read from the app_profile file matches the size
+ * of the data we read from disk. If it doesn't match, we
+ * can't trust the data and we just drop it all.
+ */
+ if (cache_size < sizeof(struct tws_startup) ||
+ startup->array_size != ele_count) {
+ tws_read_startup_file_rejects++;
task_unlock(task);
kmem_free(kernel_map, (vm_offset_t)startup, cache_size);
return(KERN_SUCCESS);
}
- /* create a dynamic working set to match file size */
- lines = (cache_size - sizeof(struct tws_hash))/TWS_ARRAY_SIZE;
- /* we now need to divide out element size and word size */
- /* all fields are 4 bytes. There are 8 bytes in each hash element */
- /* entry, 4 bytes in each table ptr location and 8 bytes in each */
- /* page_cache entry, making a total of 20 bytes for each entry */
- lines = (lines/(20));
+ /*
+ * We'll create the task working set with the default row size
+ * (TWS_ARRAY_SIZE), so this will give us the number of lines
+ * we need to store all the data from the app_profile startup
+ * cache.
+ */
+ lines = ele_count / TWS_ARRAY_SIZE;
+
if(lines <= TWS_SMALL_HASH_LINE_COUNT) {
lines = TWS_SMALL_HASH_LINE_COUNT;
task_unlock(task);
* TWS_HASH_LINE_COUNT;
}
if(tws == NULL) {
- task_working_set_create(task, lines,
- 0, TWS_HASH_STYLE_DEFAULT);
+ kern_return_t error;
+
task_unlock(task);
+ if ((error = task_working_set_create(task, lines, 0, TWS_HASH_STYLE_DEFAULT)) != KERN_SUCCESS)
+ return error;
+ /* we need to reset tws and relock */
+ goto restart;
} else {
task_unlock(task);
tws_expand_working_set(
- (vm_offset_t)tws, lines, TRUE);
+ (void *)tws, lines, TRUE);
}
}
-
tws_lock(tws);
if(tws->startup_cache != NULL) {
if(scache == NULL) {
/* dump the name cache, we'll */
/* get it next time */
- kfree((vm_offset_t)
- tws->startup_name,
- tws->startup_name_length);
+ kfree(tws->startup_name, tws->startup_name_length);
tws->startup_name = NULL;
tws_unlock(tws);
return;
}
bsd_write_page_cache_file(tws->uid, tws->startup_name,
- scache, scache->tws_hash_size,
+ (caddr_t) scache, scache->tws_hash_size,
tws->mod, tws->fid);
- kfree((vm_offset_t)scache,
- scache->tws_hash_size);
- kfree((vm_offset_t)
- tws->startup_name,
- tws->startup_name_length);
+ kfree(scache, scache->tws_hash_size);
+ kfree(tws->startup_name, tws->startup_name_length);
tws->startup_name = NULL;
}
tws_unlock(tws);
void
tws_hash_destroy(tws_hash_t tws)
{
- int i,k;
- vm_size_t cache_size;
+ unsigned int i,k;
if(tws->startup_cache != NULL) {
kmem_free(kernel_map,
for (i=0; i<tws->number_of_lines; i++) {
for(k=0; k<tws->expansion_count; k++) {
/* clear the object refs */
- tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
+ tws_hash_line_clear(tws, &(tws->cache[k][i]), NULL, FALSE);
}
}
i = 0;
while (i < tws->expansion_count) {
- kfree((vm_offset_t)tws->table[i], sizeof(tws_hash_ptr_t)
- * tws->number_of_lines
- * tws->number_of_elements);
- kfree((vm_offset_t)tws->table_ele[i],
- sizeof(struct tws_hash_ptr)
- * tws->number_of_lines
- * tws->number_of_elements);
- kfree((vm_offset_t)tws->alt_ele[i],
- sizeof(struct tws_hash_ptr)
- * tws->number_of_lines
- * tws->number_of_elements);
- kfree((vm_offset_t)tws->cache[i], sizeof(struct tws_hash_line)
- * tws->number_of_lines);
+ kfree(tws->table[i],
+ sizeof(tws_hash_ptr_t)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree(tws->table_ele[i],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree(tws->alt_ele[i],
+ sizeof(struct tws_hash_ptr)
+ * tws->number_of_lines
+ * tws->number_of_elements);
+ kfree(tws->cache[i],
+ sizeof(struct tws_hash_line) * tws->number_of_lines);
i++;
}
if(tws->startup_name != NULL) {
- kfree((vm_offset_t)tws->startup_name,
- tws->startup_name_length);
+ kfree(tws->startup_name, tws->startup_name_length);
}
- kfree((vm_offset_t)tws, sizeof(struct tws_hash));
+ kfree(tws, sizeof(struct tws_hash));
}
void
tws_hash_clear(tws_hash_t tws)
{
- int i, k;
+ unsigned int i, k;
for (i=0; i<tws->number_of_lines; i++) {
for(k=0; k<tws->expansion_count; k++) {
/* clear the object refs */
- tws_hash_line_clear(tws, &(tws->cache[k][i]), FALSE);
+ tws_hash_line_clear(tws, &(tws->cache[k][i]), NULL, FALSE);
}
}
}
if(task->dynamic_working_set != 0) {
task_unlock(task);
return(KERN_FAILURE);
- } else if((task->dynamic_working_set
- = (vm_offset_t) tws_hash_create(lines, rows, style)) == 0) {
+ } else if((task->dynamic_working_set =
+ tws_hash_create(lines, rows, style)) == 0) {
task_unlock(task);
return(KERN_NO_SPACE);
}
tws_hash_ptr_t **free_list,
unsigned int exclusive_addr)
{
- int k;
+ unsigned int k;
tws_hash_ptr_t cache_ele;
tws_hash_ptr_t base_ele;
unsigned int index,
vm_object_t object,
vm_object_offset_t offset,
- unsigned int page_mask,
+ unsigned int pagemask,
tws_hash_ptr_t *target_ele,
tws_hash_ptr_t **previous_ptr,
tws_hash_ptr_t **free_list)
{
- int k;
+ unsigned int k;
tws_hash_ptr_t cache_ele;
tws_hash_ptr_t base_ele;
if ((cache_ele->element->object == object)
&& (cache_ele->element->offset ==
(offset - (offset & ~TWS_HASH_OFF_MASK)))) {
- if((cache_ele->element->page_cache & page_mask)
- || (page_mask == 0xFFFFFFFF)) {
+ if((cache_ele->element->page_cache & pagemask)
+ || (pagemask == 0xFFFFFFFF)) {
/* We've found a match */
*target_ele = cache_ele;
*free_list = &(tws->free_hash_ele[k]);
vm_object_t object,
vm_object_offset_t offset,
unsigned int threshold,
- unsigned int *page_mask)
+ unsigned int *pagemask)
{
int index;
tws_hash_ptr_t cache_ele;
if(cache_ele != NULL) {
int i;
- int ctr;
+ unsigned int ctr;
ctr = 0;
for(i=1; i!=0; i=i<<1) {
if(i & cache_ele->element->page_cache)
ctr++;
if(ctr == threshold) {
community = 1;
- *page_mask = cache_ele->element->page_cache;
+ *pagemask = cache_ele->element->page_cache;
break;
}
}