#define HFSIOC_GET_WRITE_GEN_COUNTER _IOR('h', 30, u_int32_t)
#define HFS_GET_WRITE_GEN_COUNTER IOCBASECMD(HFSIOC_GET_WRITE_GEN_COUNTER)
+/* revisiond uses this to allocate a doc-id for files from Cab and earlier systems that are marked tracked but don't have a doc-id */
#define HFS_DOCUMENT_ID_ALLOCATE 0x1
#define HFSIOC_GET_DOCUMENT_ID _IOR('h', 31, u_int32_t)
fp->f_data = kq;
proc_fdlock(p);
+ *fdflags(p, fd) |= UF_EXCLOSE;
procfdtbl_releasefd(p, fd, NULL);
fp_drop(p, fd, fp, 1);
proc_fdunlock(p);
#include <kern/kpc.h>
#include <pexpert/pexpert.h>
+#include <kperf/kperf.h>
/* Various sysctl requests */
#define REQ_CLASSES (1)
if( !kpc_initted )
panic("kpc_init not called");
+ // Most sysctls require an access check, but a few are public.
+ switch( (uintptr_t) arg1 ) {
+ case REQ_CLASSES:
+ case REQ_CONFIG_COUNT:
+ case REQ_COUNTER_COUNT:
+ // These read-only sysctls are public.
+ break;
+
+ default:
+ // Require kperf access to read or write anything else.
+ // This is either root or the blessed pid.
+ ret = kperf_access_check();
+ if (ret) {
+ return ret;
+ }
+ break;
+ }
+
lck_mtx_lock(&sysctl_buffer_lock);
/* which request */
/*
- * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*/
if (SEQ_LEQ(th->th_ack, tp->snd_una)) {
if (tlen == 0 && tiwin == tp->snd_wnd) {
+ /*
+ * If both ends send FIN at the same time,
+ * then the ack will be a duplicate ack
+ * but we have to process the FIN. Check
+ * for this condition and process the FIN
+ * instead of the dupack
+ */
+ if ((thflags & TH_FIN) &&
+ (tp->t_flags & TF_SENTFIN) &&
+ !TCPS_HAVERCVDFIN(tp->t_state) &&
+ (th->th_ack + 1) == tp->snd_max) {
+ break;
+ }
process_dupack:
#if MPTCP
/*
/*
- * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
recwin = tcp_sbspace(tp);
+
/*
* If the socket is capable of doing unordered send,
* pull the amount of data that can be sent from the
* If our state indicates that FIN should be sent
* and we have not yet done so, then we need to send.
*/
- if (flags & TH_FIN &&
- ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
+ if ((flags & TH_FIN) &&
+ (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una))
goto send;
/*
* In SACK, it is possible for tcp_output to fail to send a segment
if (flags & (TH_SYN|TH_FIN)) {
if (flags & TH_SYN)
tp->snd_nxt++;
- if (flags & TH_FIN) {
+ if ((flags & TH_FIN) &&
+ !(tp->t_flags & TF_SENTFIN)) {
tp->snd_nxt++;
tp->t_flags |= TF_SENTFIN;
}
timer:
if (tp->t_timer[TCPT_REXMT] == 0 &&
((sack_rxmit && tp->snd_nxt != tp->snd_max) ||
- tp->snd_nxt != tp->snd_una)) {
+ tp->snd_nxt != tp->snd_una ||
+ (flags & TH_FIN))) {
if (tp->t_timer[TCPT_PERSIST]) {
tp->t_timer[TCPT_PERSIST] = 0;
tp->t_rxtshift = 0;
int xlen = len;
if (flags & TH_SYN)
++xlen;
- if (flags & TH_FIN) {
+ if ((flags & TH_FIN) &&
+ !(tp->t_flags & TF_SENTFIN)) {
++xlen;
tp->t_flags |= TF_SENTFIN;
}
/*
- * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
/* Returns true if the timer is on the timer list */
#define TIMER_IS_ON_LIST(tp) ((tp)->t_flags & TF_TIMER_ONLIST)
+/* Run the TCP timerlist atleast once every hour */
+#define TCP_TIMERLIST_MAX_OFFSET (60 * 60 * TCP_RETRANSHZ)
static void add_to_time_wait_locked(struct tcpcb *tp, uint32_t delay);
void add_to_time_wait(struct tcpcb *tp, uint32_t delay) ;
int32_t diff;
boolean_t is_fast;
- if (runtime == 0 || index == TCPT_NONE)
+ if (index == TCPT_NONE)
return FALSE;
is_fast = !(IS_TIMER_SLOW(index));
/* If the list is being processed then the state of the list is in flux.
* In this case always acquire the lock and set the state correctly.
*/
- if (listp->running) {
+ if (listp->running)
return TRUE;
- }
+
+ if (!listp->scheduled)
+ return (TRUE);
diff = timer_diff(listp->runtime, 0, runtime, 0);
if (diff <= 0) {
lck_mtx_assert(listp->mtx, LCK_MTX_ASSERT_OWNED);
+ offset = min(offset, TCP_TIMERLIST_MAX_OFFSET);
listp->runtime = tcp_now + offset;
+ if (listp->runtime == 0)
+ listp->runtime++;
clock_interval_to_deadline(offset, NSEC_PER_SEC / TCP_RETRANSHZ,
&deadline);
thread_call_enter_delayed(listp->call, deadline);
+ listp->scheduled = TRUE;
}
/* Function to run the timers for a connection.
* with another thread that can cancel or reschedule the timer that is
* about to run. Check if we need to run anything.
*/
- index = tp->tentry.index;
- timer_val = tp->t_timer[index];
-
- if (index == TCPT_NONE || tp->tentry.runtime == 0)
+ if ((index = tp->tentry.index) == TCPT_NONE)
goto done;
+ timer_val = tp->t_timer[index];
diff = timer_diff(tp->tentry.runtime, 0, tcp_now, 0);
if (diff > 0) {
tp->tentry.index = lo_index;
if (lo_index != TCPT_NONE) {
tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[lo_index];
- } else {
- tp->tentry.runtime = 0;
+ if (tp->tentry.runtime == 0)
+ tp->tentry.runtime++;
}
if (count > 0) {
if (needtorun[i]) {
tp->t_timer[i] = 0;
tp = tcp_timers(tp, i);
- if (tp == NULL)
+ if (tp == NULL) {
+ offset = 0;
+ *(next_index) = TCPT_NONE;
goto done;
+ }
}
}
tcp_set_lotimer_index(tp);
done:
if (tp != NULL && tp->tentry.index == TCPT_NONE) {
tcp_remove_timer(tp);
+ offset = 0;
}
tcp_unlock(so, 1, 0);
return offset;
LIST_FOREACH_SAFE(te, &listp->lhead, le, next_te) {
uint32_t offset = 0;
uint32_t runtime = te->runtime;
- if (TSTMP_GT(runtime, tcp_now)) {
+ if (te->index < TCPT_NONE && TSTMP_GT(runtime, tcp_now)) {
offset = timer_diff(runtime, 0, tcp_now, 0);
if (next_timer == 0 || offset < next_timer) {
next_timer = offset;
tcp_sched_timerlist(next_timer);
} else {
- /* No need to reschedule this timer */
- listp->runtime = 0;
+ /*
+ * No need to reschedule this timer, but always run
+ * periodically at a much higher granularity.
+ */
+ tcp_sched_timerlist(TCP_TIMERLIST_MAX_OFFSET);
}
listp->running = FALSE;
struct tcptimerentry *te = &tp->tentry;
uint16_t index = te->index;
struct tcptimerlist *listp = &tcp_timer_list;
- uint32_t offset = 0;
+ int32_t offset = 0;
boolean_t is_fast;
int list_locked = 0;
}
is_fast = !(IS_TIMER_SLOW(index));
- offset = te->runtime - tcp_now;
- if (offset == 0) {
+ offset = timer_diff(te->runtime, 0, tcp_now, 0);
+ if (offset <= 0) {
offset = 1;
tcp_timer_advanced++;
}
listp->maxentries = listp->entries;
/* if the list is not scheduled, just schedule it */
- if (listp->runtime == 0)
+ if (!listp->scheduled)
goto schedule;
}
if (is_fast) {
listp->pref_mode = TCP_TIMERLIST_FASTMODE;
} else if (listp->pref_offset == 0 ||
- ((int)offset) < listp->pref_offset) {
+ offset < listp->pref_offset) {
listp->pref_offset = offset;
}
} else {
- int32_t diff;
- diff = timer_diff(listp->runtime, 0, tcp_now, offset);
- if (diff <= 0) {
- /* The list is going to run before this timer */
- goto done;
+ /*
+ * The list could have got scheduled while this
+ * thread was waiting for the lock
+ */
+ if (listp->scheduled) {
+ int32_t diff;
+ diff = timer_diff(listp->runtime, 0,
+ tcp_now, offset);
+ if (diff <= 0)
+ goto done;
+ else
+ goto schedule;
} else {
goto schedule;
}
tp->tentry.index = lo_index;
if (lo_index != TCPT_NONE) {
tp->tentry.runtime = tp->tentry.timer_start + tp->t_timer[lo_index];
- } else {
- tp->tentry.runtime = 0;
+ if (tp->tentry.runtime == 0)
+ tp->tentry.runtime++;
}
}
lck_mtx_assert(&tp->t_inpcb->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
+ if (tp->t_inpcb->inp_flags2 & INP2_TIMEWAIT)
+ return;
+
tcp_set_lotimer_index(tp);
tcp_sched_timers(tp);
/*
- * Copyright (c) 2000-2010 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
/* Set desired mode when timer list running */
boolean_t running; /* Set when timer list is being processed */
+ boolean_t scheduled; /* Set when timer is scheduled */
#define TCP_TIMERLIST_FASTMODE 0x1
#define TCP_TIMERLIST_SLOWMODE 0x2
uint32_t mode; /* Current mode, fast or slow */
-13.2.0
+13.3.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
off_t discard_offset, off_t discard_end);
#endif /* _SYS_CONF_H_ */
+void
+vm_compressor_do_warmup(void);
+
hibernate_page_list_t *
hibernate_page_list_allocate(boolean_t log);
bzero(&consoleInfo, sizeof(consoleInfo));
IOService::getPlatform()->getConsoleInfo(&consoleInfo);
- // estimate: 5% increase in pages compressed
+ // estimate: 6% increase in pages compressed
// screen preview 2 images compressed 50%
- setFileSize = ((ptoa_64((105 * pageCount) / 100) * gIOHibernateCompression) >> 8)
+ setFileSize = ((ptoa_64((106 * pageCount) / 100) * gIOHibernateCompression) >> 8)
+ vars->page_list->list_size
- + (consoleInfo.v_width * consoleInfo.v_height * 4);
+ + (consoleInfo.v_width * consoleInfo.v_height * 8);
+ enum { setFileRound = 1024*1024ULL };
+ setFileSize = ((setFileSize + setFileRound) & ~(setFileRound - 1));
HIBLOG("hibernate_page_list_setall preflight pageCount %d est comp %qd setfile %qd min %qd\n",
pageCount, (100ULL * gIOHibernateCompression) >> 8,
err = IOPolledFileOpen(gIOHibernateFilename, setFileSize, vars->ioBuffer,
&vars->fileVars, &vars->fileExtents, &data,
&vars->volumeCryptKey[0]);
+
if (KERN_SUCCESS != err)
{
HIBLOG("IOPolledFileOpen(%x)\n", err);
}
}
if (kIOReturnSuccess != err)
+ {
+ if (kIOReturnOverrun == err)
+ {
+ // update actual compression ratio on not enough space
+ gIOHibernateCompression = (compressedSize << 8) / uncompressedSize;
+ }
break;
+ }
// Header:
if (kIOHibernateStateWakingFromHibernate == gIOHibernateState)
{
gIOHibernateStats->wakeCapability = capability;
+
+ if (kIOPMSystemCapabilityGraphics & capability)
+ {
+ vm_compressor_do_warmup();
+ }
+
}
}
(kFullWakeReasonLocalUser == fullWakeReason) ?
kOSBooleanTrue : kOSBooleanFalse);
}
+#if HIBERNATION
+ IOHibernateSetWakeCapabilities(_pendingCapability);
+#endif
IOService::setAdvisoryTickleEnable( true );
tellClients(kIOMessageSystemWillPowerOn);
need_to_unlock_decompressor = FALSE;
vm_decompressor_unlock();
}
- vm_compressor_do_warmup();
+ vm_compressor_delay_trim();
}
return (KERN_SUCCESS);
}
lck_mtx_unlock_always(&c_seg->c_lock);
if (c_buffer) {
- kernel_memory_depopulate(kernel_map, (vm_offset_t) c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR);
+ if (pages_populated)
+ kernel_memory_depopulate(kernel_map, (vm_offset_t) c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR);
kmem_free(kernel_map, (vm_offset_t) c_buffer, C_SEG_ALLOCSIZE);
} else if (c_swap_handle)
c_seg = (c_segment_t)queue_first(&c_minor_list_head);
lck_mtx_lock_spin_always(&c_seg->c_lock);
+
+ if (c_seg->c_busy) {
+
+ lck_mtx_unlock_always(c_list_lock);
+ c_seg_wait_on_busy(c_seg);
+ lck_mtx_lock_spin_always(c_list_lock);
+
+ continue;
+ }
c_seg->c_busy = 1;
c_seg_do_minor_compaction_and_unlock(c_seg, TRUE, FALSE, TRUE);
lck_mtx_lock_spin_always(c_list_lock);
- if (!queue_empty(&c_age_list_head)) {
-
- c_seg = (c_segment_t)queue_last(&c_age_list_head);
+ if (first_c_segment_to_warm_generation_id == 0) {
+ if (!queue_empty(&c_age_list_head)) {
- first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
- } else
- first_c_segment_to_warm_generation_id = 0;
+ c_seg = (c_segment_t)queue_last(&c_age_list_head);
- fastwake_recording_in_progress = TRUE;
+ first_c_segment_to_warm_generation_id = c_seg->c_generation_id;
+ } else
+ first_c_segment_to_warm_generation_id = 0;
+ fastwake_recording_in_progress = TRUE;
+ }
lck_mtx_unlock_always(c_list_lock);
}
lck_mtx_lock_spin_always(c_list_lock);
- if (!queue_empty(&c_age_list_head)) {
+ if (fastwake_recording_in_progress == TRUE) {
- c_seg = (c_segment_t)queue_last(&c_age_list_head);
+ if (!queue_empty(&c_age_list_head)) {
- last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
- } else
- last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
+ c_seg = (c_segment_t)queue_last(&c_age_list_head);
+
+ last_c_segment_to_warm_generation_id = c_seg->c_generation_id;
+ } else
+ last_c_segment_to_warm_generation_id = first_c_segment_to_warm_generation_id;
- fastwake_recording_in_progress = FALSE;
+ fastwake_recording_in_progress = FALSE;
+ HIBLOG("vm_compressor_record_warmup (%qd - %qd)\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
+ }
lck_mtx_unlock_always(c_list_lock);
}
#define DELAY_TRIM_ON_WAKE_SECS 4
void
-vm_compressor_do_warmup(void)
+vm_compressor_delay_trim(void)
{
- clock_sec_t sec;
+ clock_sec_t sec;
clock_nsec_t nsec;
clock_get_system_nanotime(&sec, &nsec);
dont_trim_until_ts = sec + DELAY_TRIM_ON_WAKE_SECS;
+}
- if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id)
- return;
+void
+vm_compressor_do_warmup(void)
+{
lck_mtx_lock_spin_always(c_list_lock);
+ if (first_c_segment_to_warm_generation_id == last_c_segment_to_warm_generation_id) {
+ first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
+
+ lck_mtx_unlock_always(c_list_lock);
+ return;
+ }
+
if (compaction_swapper_running == 0) {
fastwake_warmup = TRUE;
{
uint64_t my_thread_id;
c_segment_t c_seg = NULL;
+ AbsoluteTime startTime, endTime;
+ uint64_t nsec;
+
+
+ HIBLOG("vm_compressor_fastwake_warmup (%qd - %qd) - starting\n", first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id);
+
+ clock_get_uptime(&startTime);
lck_mtx_unlock_always(c_list_lock);
lck_mtx_lock_spin_always(&c_seg->c_lock);
lck_mtx_unlock_always(c_list_lock);
- if (c_seg->c_busy)
+ if (c_seg->c_busy) {
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
c_seg_wait_on_busy(c_seg);
- else {
+ PAGE_REPLACEMENT_DISALLOWED(TRUE);
+ } else {
c_seg_swapin(c_seg, TRUE);
lck_mtx_unlock_always(&c_seg->c_lock);
-
c_segment_warmup_count++;
+
+ PAGE_REPLACEMENT_DISALLOWED(FALSE);
vm_pageout_io_throttle();
+ PAGE_REPLACEMENT_DISALLOWED(TRUE);
}
lck_mtx_lock_spin_always(c_list_lock);
}
proc_set_task_policy_thread(kernel_task, my_thread_id,
TASK_POLICY_INTERNAL, TASK_POLICY_IO, THROTTLE_LEVEL_COMPRESSOR_TIER0);
+ clock_get_uptime(&endTime);
+ SUB_ABSOLUTETIME(&endTime, &startTime);
+ absolutetime_to_nanoseconds(endTime, &nsec);
+
+ HIBLOG("vm_compressor_fastwake_warmup completed - took %qd msecs\n", nsec / 1000000ULL);
+
lck_mtx_lock_spin_always(c_list_lock);
+
+ first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
}
fastwake_warmup = FALSE;
}
+ /*
+ * it's possible for the c_age_list_head to be empty if we
+ * hit our limits for growing the compressor pool and we subsequently
+ * hibernated... on the next hibernation we could see the queue as
+ * empty and not proceeed even though we have a bunch of segments on
+ * the swapped in queue that need to be dealt with.
+ */
+ vm_compressor_do_delayed_compactions(flush_all);
+
+ vm_compressor_age_swapped_in_segments(flush_all);
+
+
while (!queue_empty(&c_age_list_head) && compaction_swapper_abort == 0) {
if (hibernate_flushing == TRUE) {
}
if (!c_seg->c_filling) {
if (c_seg->c_bytes_used == 0) {
- if (c_seg->c_on_minorcompact_q || c_seg->c_on_swappedout_sparse_q) {
- if (c_seg_try_free(c_seg) == TRUE)
- need_unlock = FALSE;
- } else {
- c_seg_free(c_seg);
- need_unlock = FALSE;
- }
+ if (!c_seg->c_ondisk) {
+ int pages_populated;
+
+ pages_populated = (round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset))) / PAGE_SIZE;
+ c_seg->c_populated_offset = C_SEG_BYTES_TO_OFFSET(0);
+
+ if (pages_populated) {
+ assert(c_seg->c_store.c_buffer != NULL);
+
+ c_seg->c_busy = 1;
+ lck_mtx_unlock_always(&c_seg->c_lock);
+
+ kernel_memory_depopulate(kernel_map, (vm_offset_t) c_seg->c_store.c_buffer, pages_populated * PAGE_SIZE, KMA_COMPRESSOR);
+
+ lck_mtx_lock_spin_always(&c_seg->c_lock);
+ C_SEG_WAKEUP_DONE(c_seg);
+ }
+ if (!c_seg->c_on_minorcompact_q && !c_seg->c_on_swapout_q)
+ c_seg_need_delayed_compaction(c_seg);
+ } else
+ assert(c_seg->c_on_swappedout_sparse_q);
+
} else if (c_seg->c_on_minorcompact_q) {
if (C_SEG_INCORE_IS_SPARSE(c_seg)) {
void vm_decompressor_lock(void);
void vm_decompressor_unlock(void);
+void vm_compressor_delay_trim(void);
void vm_compressor_do_warmup(void);
void vm_compressor_record_warmup_start(void);
void vm_compressor_record_warmup_end(void);
assert(c_seg->c_on_swapout_q);
if (c_seg->c_busy) {
- lck_mtx_unlock_always(&c_seg->c_lock);
lck_mtx_unlock_always(c_list_lock);
- mutex_pause(2);
+ c_seg_wait_on_busy(c_seg);
lck_mtx_lock_spin_always(c_list_lock);
c_seg->c_on_swapout_q = 0;
c_swapout_count--;
- c_seg->c_busy = 1;
- c_seg->c_busy_swapping = 1;
-
vm_swapout_thread_processed_segments++;
thread_wakeup((event_t)&compaction_swapper_running);
+ size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
+
+ if (size == 0) {
+ c_seg_free_locked(c_seg);
+ goto c_seg_was_freed;
+ }
+ c_seg->c_busy = 1;
+ c_seg->c_busy_swapping = 1;
+
lck_mtx_unlock_always(c_list_lock);
addr = (vm_offset_t) c_seg->c_store.c_buffer;
- size = round_page_32(C_SEG_OFFSET_TO_BYTES(c_seg->c_populated_offset));
-
lck_mtx_unlock_always(&c_seg->c_lock);
#if CHECKSUM_THE_SWAP
kmem_free(kernel_map, (vm_offset_t) addr, C_SEG_ALLOCSIZE);
vm_pageout_io_throttle();
-
+c_seg_was_freed:
if (c_swapout_count == 0)
vm_swap_consider_defragmenting();
int cd_found_laundry;
int cd_found_dirty;
int cd_found_xpmapped;
+ int cd_skipped_xpmapped;
int cd_local_free;
int cd_total_free;
int cd_vm_page_wire_count;
} hibernate_stats;
+/*
+ * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
+ * so that we don't overrun the estimated image size, which would
+ * result in a hibernation failure.
+ */
+#define HIBERNATE_XPMAPPED_LIMIT 40000
+
static int
hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
if (discard == FALSE) {
if (!preflight)
hibernate_stats.cd_found_dirty++;
- } else if (m->xpmapped && m->reference) {
- if (!preflight)
- hibernate_stats.cd_found_xpmapped++;
- discard = FALSE;
+ } else if (m->xpmapped && m->reference && !object->internal) {
+ if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
+ if (!preflight)
+ hibernate_stats.cd_found_xpmapped++;
+ discard = FALSE;
+ } else {
+ if (!preflight)
+ hibernate_stats.cd_skipped_xpmapped++;
+ }
}
}
while (FALSE);
m = next;
}
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
- while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
+ m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
+ while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
{
next = (vm_page_t) m->pageq.next;
discard = FALSE;
if (m->dirty)
count_discard_purgeable++;
else
- count_discard_inactive++;
+ count_discard_cleaned++;
discard = discard_all;
}
else
- count_inactive++;
+ count_cleaned++;
count_wire--;
if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
if (discard) hibernate_discard_page(m);
m = next;
}
- m = (vm_page_t) queue_first(&vm_page_queue_cleaned);
- while (m && !queue_end(&vm_page_queue_cleaned, (queue_entry_t)m))
+ m = (vm_page_t) queue_first(&vm_page_queue_active);
+ while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
+ {
+ next = (vm_page_t) m->pageq.next;
+ discard = FALSE;
+ if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
+ && hibernate_consider_discard(m, preflight))
+ {
+ if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
+ if (m->dirty)
+ count_discard_purgeable++;
+ else
+ count_discard_active++;
+ discard = discard_all;
+ }
+ else
+ count_active++;
+ count_wire--;
+ if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
+ if (discard) hibernate_discard_page(m);
+ m = next;
+ }
+
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ while (m && !queue_end(&vm_page_queue_inactive, (queue_entry_t)m))
{
next = (vm_page_t) m->pageq.next;
discard = FALSE;
if (m->dirty)
count_discard_purgeable++;
else
- count_discard_cleaned++;
+ count_discard_inactive++;
discard = discard_all;
}
else
- count_cleaned++;
+ count_inactive++;
count_wire--;
if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
if (discard) hibernate_discard_page(m);
}
}
- m = (vm_page_t) queue_first(&vm_page_queue_active);
- while (m && !queue_end(&vm_page_queue_active, (queue_entry_t)m))
- {
- next = (vm_page_t) m->pageq.next;
- discard = FALSE;
- if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode)
- && hibernate_consider_discard(m, preflight))
- {
- if (!preflight) hibernate_page_bitset(page_list, TRUE, m->phys_page);
- if (m->dirty)
- count_discard_purgeable++;
- else
- count_discard_active++;
- discard = discard_all;
- }
- else
- count_active++;
- count_wire--;
- if (!preflight) hibernate_page_bitset(page_list_wired, TRUE, m->phys_page);
- if (discard) hibernate_discard_page(m);
- m = next;
- }
-
queue_iterate(&compressor_object->memq, m, vm_page_t, listq)
{
count_compressor++;
discard_all ? "did" : "could",
count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
+ if (hibernate_stats.cd_skipped_xpmapped)
+ HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
+
*pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;