* FABN = old FABN - E.blockCount
*
* Inputs:
- * extent_info - This is the structure that contains state about
- * the current file, extent, and extent record that
- * is being relocated. This structure is shared
- * among code that traverses through all the extents
- * of the file, code that relocates extents, and
- * code that splits the extent.
+ * extent_info - This is the structure that contains state about
+ * the current file, extent, and extent record that
+ * is being relocated. This structure is shared
+ * among code that traverses through all the extents
+ * of the file, code that relocates extents, and
+ * code that splits the extent.
+ * newBlockCount - The blockCount of the extent to be split after
+ * successfully split operation.
* Output:
* Zero on success, non-zero on failure.
*/
extents = extent_info->extents;
cp = VTOC(extent_info->vp);
+ if (newBlockCount == 0) {
+ if (hfs_resize_debug) {
+ printf ("hfs_split_extent: No splitting required for newBlockCount=0\n");
+ }
+ return error;
+ }
+
if (hfs_resize_debug) {
printf ("hfs_split_extent: Split record:%u recStartBlock=%u %u:(%u,%u) for %u blocks\n", extent_info->overflow_count, extent_info->recStartBlock, index, extents[index].startBlock, extents[index].blockCount, newBlockCount);
}
goto out;
}
if (hfs_resize_debug) {
- printf ("hfs_split_extent: Deleted record with startBlock=%u\n", (is_xattr ? xattr_key->startBlock : extents_key->startBlock));
+ printf ("hfs_split_extent: Deleted extent record with startBlock=%u\n", (is_xattr ? xattr_key->startBlock : extents_key->startBlock));
}
}
printf ("hfs_split_extent: Inserted extent record with startBlock=%u\n", write_recStartBlock);
}
}
- BTFlushPath(extent_info->fcb);
+
out:
+ /*
+ * Extents overflow btree or attributes btree headers might have
+ * been modified during the split/shift operation, so flush the
+ * changes to the disk while we are inside journal transaction.
+ * We should only be able to generate I/O that modifies the B-Tree
+ * header nodes while we're in the middle of a journal transaction.
+ * Otherwise it might result in panic during unmount.
+ */
+ BTFlushPath(extent_info->fcb);
+
if (extents_rec) {
FREE (extents_rec, M_TEMP);
}
*/
if (oldStartBlock < allocLimit) {
newBlockCount = allocLimit - oldStartBlock;
-
+
+ if (hfs_resize_debug) {
+ int idx = extent_info->extent_index;
+ printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount);
+ }
+
/* If the extent belongs to a btree, check and trim
* it to be multiple of the node size.
*/
if (remainder_blocks) {
newBlockCount -= remainder_blocks;
if (hfs_resize_debug) {
- printf ("hfs_reclaim_extent: Fixing extent block count, node_blks=%u, old=%u, new=%u\n", node_size/hfsmp->blockSize, newBlockCount + remainder_blocks, newBlockCount);
+ printf ("hfs_reclaim_extent: Round-down newBlockCount to be multiple of nodeSize, node_allocblks=%u, old=%u, new=%u\n", node_size/hfsmp->blockSize, newBlockCount + remainder_blocks, newBlockCount);
}
}
}
- }
-
- if (hfs_resize_debug) {
- int idx = extent_info->extent_index;
- printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount);
+ /* The newBlockCount is zero because of rounding-down so that
+ * btree nodes are not split across extents. Therefore this
+ * straddling extent across resize-boundary does not require
+ * splitting. Skip over to relocating of complete extent.
+ */
+ if (newBlockCount == 0) {
+ if (hfs_resize_debug) {
+ printf ("hfs_reclaim_extent: After round-down newBlockCount=0, skip split, relocate full extent\n");
+ }
+ goto relocate_full_extent;
+ }
}
/* Split the extents into two parts --- the first extent lies
}
/* Split failed, so try to relocate entire extent */
if (hfs_resize_debug) {
- printf ("hfs_reclaim_extent: Split straddling extent failed, reclocate full extent\n");
+ int idx = extent_info->extent_index;
+ printf ("hfs_reclaim_extent: Split straddling extent %u:(%u,%u) for %u blocks failed, relocate full extent\n", idx, extent_info->extents[idx].startBlock, extent_info->extents[idx].blockCount, newBlockCount);
}
}
+relocate_full_extent:
/* At this point, the current extent requires relocation.
* We will try to allocate space equal to the size of the extent
* being relocated first to try to relocate it without splitting.
tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
(void) hfs_update(tdvp, 0);
+
+ /* Update the vnode's name now that the rename has completed. */
+ vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
+ tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
+
+ /*
+ * At this point, we may have a resource fork vnode attached to the
+ * 'from' vnode. If it exists, we will want to update its name, because
+ * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
+ *
+ * Note that the only thing we need to update here is the name attached to
+ * the vnode, since a resource fork vnode does not have a separate resource
+ * cnode -- it's still 'fcp'.
+ */
+ if (fcp->c_rsrc_vp) {
+ char* rsrc_path = NULL;
+ int len;
+
+ /* Create a new temporary buffer that's going to hold the new name */
+ MALLOC_ZONE (rsrc_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
+ len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
+ len = MIN(len, MAXPATHLEN);
+
+ /*
+ * vnode_update_identity will do the following for us:
+ * 1) release reference on the existing rsrc vnode's name.
+ * 2) copy/insert new name into the name cache
+ * 3) attach the new name to the resource vnode
+ * 4) update the vnode's vid
+ */
+ vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
+
+ /* Free the memory associated with the resource fork's name */
+ FREE_ZONE (rsrc_path, MAXPATHLEN, M_NAMEI);
+ }
out:
if (got_cookie) {
cat_postflight(hfsmp, &cookie, p);
filt_procattach(struct knote *kn)
{
struct proc *p;
- pid_t selfpid = (pid_t)0;
assert(PID_MAX < NOTE_PDATAMASK);
return (ESRCH);
}
- if ((kn->kn_sfflags & NOTE_EXIT) != 0) {
- selfpid = proc_selfpid();
- /* check for validity of NOTE_EXISTATUS */
- if (((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) &&
- ((p->p_ppid != selfpid) && (((p->p_lflag & P_LTRACED) == 0) || (p->p_oppid != selfpid)))) {
+ const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS;
+
+ if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits)
+ do {
+ pid_t selfpid = proc_selfpid();
+
+ if (p->p_ppid == selfpid)
+ break; /* parent => ok */
+
+ if ((p->p_lflag & P_LTRACED) != 0 &&
+ (p->p_oppid == selfpid))
+ break; /* parent-in-waiting => ok */
+
proc_rele(p);
- return(EACCES);
- }
- }
+ return (EACCES);
+ } while (0);
proc_klist_lock();
struct uthread * uth;
pid_t pid;
int exitval;
+ int knote_hint;
uth = (struct uthread *)get_bsdthread_info(current_thread());
p->task = TASK_NULL;
set_bsdtask_info(task, NULL);
- /* exit status will be seen by parent process */
- proc_knote(p, NOTE_EXIT | (p->p_xstat & 0xffff));
+ knote_hint = NOTE_EXIT | (p->p_xstat & 0xffff);
+ if (p->p_oppid != 0) {
+ knote_hint |= NOTE_EXIT_REPARENTED;
+ }
+
+ proc_knote(p, knote_hint);
/* mark the thread as the one that is doing proc_exit
* no need to hold proc lock in uthread_free
#endif /* LOCKF_DEBUGGING */
error = msleep(lock, &vp->v_lock, priority, lockstr, 0);
- if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
- if ((block = lf_getblock(lock, -1))) {
- lf_move_blocked(block, lock);
- }
- }
-
if (error == 0 && (lock->lf_flags & F_ABORT) != 0)
error = EBADF;
- if (error) { /* XXX */
+ if (lock->lf_next) {
/*
- * We may have been awakened by a signal and/or by a
- * debugger continuing us (in which cases we must remove
- * ourselves from the blocked list) and/or by another
- * process releasing a lock (in which case we have
- * already been removed from the blocked list and our
- * lf_next field set to NOLOCKF).
+ * lf_wakelock() always sets wakelock->lf_next to
+ * NULL before a wakeup; so we've been woken early
+ * - perhaps by a debugger, signal or other event.
+ *
+ * Remove 'lock' from the block list (avoids double-add
+ * in the spurious case, which would create a cycle)
*/
- if (lock->lf_next) {
- TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
- lock->lf_next = NOLOCKF;
+ TAILQ_REMOVE(&lock->lf_next->lf_blkhd, lock, lf_block);
+ lock->lf_next = NULL;
+
+ if (error == 0) {
+ /*
+ * If this was a spurious wakeup, retry
+ */
+ printf("%s: spurious wakeup, retrying lock\n",
+ __func__);
+ continue;
}
+ }
+
+ if (!TAILQ_EMPTY(&lock->lf_blkhd)) {
+ if ((block = lf_getblock(lock, -1)) != NULL)
+ lf_move_blocked(block, lock);
+ }
+
+ if (error) {
if (!TAILQ_EMPTY(&lock->lf_blkhd))
lf_wakelock(lock, TRUE);
-
FREE(lock, M_LOCKF);
return (error);
- } /* XXX */
+ }
}
+
/*
* No blocks!! Add the lock. Note that we will
* downgrade or upgrade any overlapping locks this
struct lockf *tlock;
TAILQ_FOREACH(tlock, &wakelock->lf_blkhd, lf_block) {
+ if (TAILQ_NEXT(tlock, lf_block) == tlock) {
+ /* See rdar://10887303 */
+ panic("cycle in wakelock list");
+ }
tlock->lf_next = wakelock;
}
}
for (i = 0; i < numpages; i++, page += NBPG) {
ppnum_t offset = ((char *)page - (char *)mbutl) / NBPG;
- ppnum_t new_page = pmap_find_phys(kernel_pmap,
- (vm_offset_t)page);
+ ppnum_t new_page = pmap_find_phys(kernel_pmap, page);
/*
* In the case of no mapper being available the following
* mapper the appropriate I/O page is returned.
*/
VERIFY(offset < mcl_pages);
- new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page);
+ if (mcl_paddr_base) {
+ bzero((void *)(uintptr_t) page, page_size);
+ new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page);
+ }
mcl_paddr[offset] = new_page << PGSHIFT;
/* Pattern-fill this fresh page */
case TCP_LRO_COALESCE:
if ((payload_len != 0) && (unknown_tcpopts == 0) &&
- (tcpflags == 0) && (ecn == 0) && (to.to_flags & TOF_TS)) {
+ (tcpflags == 0) && (ecn != IPTOS_ECN_CE) && (to.to_flags & TOF_TS)) {
tcp_lro_coalesce(flow_id, lro_mb, tcp_hdr, payload_len,
drop_hdrlen, &to,
(to.to_flags & TOF_TS) ? (u_int32_t *)(void *)(optp + 4) : NULL,
u_int16_t socket_id = get_socket_id(so);
int so_options = so->so_options;
struct rtentry *rt;
- u_int32_t basertt, svc_flags = 0;
+ u_int32_t basertt, svc_flags = 0, allocated_len;
u_int32_t lro_ackmore = (tp->t_lropktlen != 0) ? 1 : 0;
struct mbuf *mnext = NULL;
int sackoptlen = 0;
goto send;
}
if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
- tp->t_flags &= ~TF_MAXSEGSNT;
+ if (len >= tp->t_maxseg)
+ tp->t_flags |= TF_MAXSEGSNT;
+ else
+ tp->t_flags &= ~TF_MAXSEGSNT;
goto send;
}
if (sack_rxmit)
}
}
-/*#ifdef DIAGNOSTIC*/
-#if INET6
if (max_linkhdr + hdrlen > MCLBYTES)
panic("tcphdr too big");
-#else
- if (max_linkhdr + hdrlen > MHLEN)
- panic("tcphdr too big");
-#endif
-/*#endif*/
/* Check if there is enough data in the send socket
* buffer to start measuring bw
tcpstat.tcps_sndrexmitpack++;
tcpstat.tcps_sndrexmitbyte += len;
if (nstat_collect) {
- nstat_route_tx(tp->t_inpcb->inp_route.ro_rt, 1, len, NSTAT_TX_FLAG_RETRANSMIT);
+ nstat_route_tx(tp->t_inpcb->inp_route.ro_rt, 1,
+ len, NSTAT_TX_FLAG_RETRANSMIT);
locked_add_64(&tp->t_inpcb->inp_stat->txpackets, 1);
locked_add_64(&tp->t_inpcb->inp_stat->txbytes, len);
tp->t_stat.txretransmitbytes += len;
locked_add_64(&tp->t_inpcb->inp_stat->txbytes, len);
}
}
-#ifdef notyet
- if ((m = m_copypack(so->so_snd.sb_mb, off,
- (int)len, max_linkhdr + hdrlen)) == 0) {
- error = ENOBUFS;
- goto out;
- }
- /*
- * m_copypack left space for our hdr; use it.
- */
- m->m_len += hdrlen;
- m->m_data -= hdrlen;
-#else
/*
* try to use the new interface that allocates all
* the necessary mbuf hdrs under 1 mbuf lock and
* data area (no cluster attached)
*/
m = NULL;
-#if INET6
+
+ /* minimum length we are going to allocate */
+ allocated_len = MHLEN;
if (MHLEN < hdrlen + max_linkhdr) {
- MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */
+ MGETHDR(m, M_DONTWAIT, MT_HEADER);
if (m == NULL) {
- error = ENOBUFS;
+ error = ENOBUFS;
goto out;
}
MCLGET(m, M_DONTWAIT);
}
m->m_data += max_linkhdr;
m->m_len = hdrlen;
+ allocated_len = MCLBYTES;
}
-#endif
- if (len <= MHLEN - hdrlen - max_linkhdr) {
+ if (len <= allocated_len - hdrlen - max_linkhdr) {
if (m == NULL) {
- MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */
+ VERIFY(allocated_len <= MHLEN);
+ MGETHDR(m, M_DONTWAIT, MT_HEADER);
if (m == NULL) {
- error = ENOBUFS;
+ error = ENOBUFS;
goto out;
}
m->m_data += max_linkhdr;
m->m_len = hdrlen;
}
}
-#endif
/*
* If we're sending everything we've got, set PUSH.
* (This will keep happy those implementations which only
error = ENOBUFS;
goto out;
}
-#if INET6
- if (isipv6 && (MHLEN < hdrlen + max_linkhdr) &&
- MHLEN >= hdrlen) {
- MH_ALIGN(m, hdrlen);
- } else
-#endif
- m->m_data += max_linkhdr;
+ if (MHLEN < (hdrlen + max_linkhdr)) {
+ MCLGET(m, M_DONTWAIT);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_freem(m);
+ error = ENOBUFS;
+ goto out;
+ }
+ }
+ m->m_data += max_linkhdr;
m->m_len = hdrlen;
}
m->m_pkthdr.rcvif = 0;
_max_protohdr = TCP_MINPROTOHDR;
_max_protohdr = max_protohdr; /* round it up */
}
- if (max_linkhdr + max_protohdr > MHLEN)
+ if (max_linkhdr + max_protohdr > MCLBYTES)
panic("tcp_init");
#undef TCP_MINPROTOHDR
int i;
struct timeval tv;
+ _CASSERT((sizeof(struct ip6_hdr) + sizeof(struct icmp6_hdr)) <=
+ _MHLEN);
+
PE_parse_boot_argn("net.inet6.ip6.scopedroute", &ip6_doscopedroute,
sizeof (ip6_doscopedroute));
{
int i;
-
+
+ _CASSERT(PFKEY_ALIGN8(sizeof(struct sadb_msg)) <= _MHLEN);
+
sadb_mutex_grp_attr = lck_grp_attr_alloc_init();
sadb_mutex_grp = lck_grp_alloc_init("sadb", sadb_mutex_grp_attr);
sadb_mutex_attr = lck_attr_alloc_init();
#define CP_READ_ACCESS 0x1
#define CP_WRITE_ACCESS 0x2
+/*
+ * Check for this version when deciding to enable features
+ */
#define CONTENT_PROTECTION_XATTR_NAME "com.apple.system.cprotect"
#define CP_NEW_MAJOR_VERS 4
#define CP_PREV_MAJOR_VERS 2
#define NOTE_PDATAMASK 0x000fffff /* mask for pid/signal */
#define NOTE_PCTRLMASK (~NOTE_PDATAMASK)
+/*
+ * If NOTE_EXITSTATUS is present, provide additional info about exiting process.
+ */
+#define NOTE_EXIT_REPARENTED 0x00080000 /* exited while reparented */
+
/*
* data/hint fflags for EVFILT_VM, shared with userspace.
*/
uint32_t num_dropped;
uint64_t max_event_id;
struct fsevent_handle *fseh;
+ pid_t pid;
+ char proc_name[(2 * MAXCOMLEN) + 1];
} fs_event_watcher;
// fs_event_watcher flags
#define WATCHER_CLOSING 0x0002
#define WATCHER_WANTS_COMPACT_EVENTS 0x0004
#define WATCHER_WANTS_EXTENDED_INFO 0x0008
-
+#define WATCHER_APPLE_SYSTEM_SERVICE 0x0010 // fseventsd, coreservicesd, mds
#define MAX_WATCHERS 8
static fs_event_watcher *watcher_table[MAX_WATCHERS];
int (*)(const void *, const void *));
+
+/* From kdp_udp.c + user mode Libc - this ought to be in a library */
+static char *
+strnstr(char *s, const char *find, size_t slen)
+{
+ char c, sc;
+ size_t len;
+
+ if ((c = *find++) != '\0') {
+ len = strlen(find);
+ do {
+ do {
+ if ((sc = *s++) == '\0' || slen-- < 1)
+ return (NULL);
+ } while (sc != c);
+ if (len > slen)
+ return (NULL);
+ } while (strncmp(s, find, len) != 0);
+ s--;
+ }
+ return (s);
+}
+
+static int
+is_ignored_directory(const char *path) {
+
+ if (!path) {
+ return 0;
+ }
+
+#define IS_TLD(x) strnstr((char *) path, x, MAXPATHLEN)
+ if (IS_TLD("/.Spotlight-V100/") ||
+ IS_TLD("/.MobileBackups/") ||
+ IS_TLD("/Backups.backupdb/")) {
+ return 1;
+ }
+#undef IS_TLD
+
+ return 0;
+}
+
static void
fsevents_internal_init(void)
{
return 1;
}
-static int
-prefix_match_len(const char *str1, const char *str2)
-{
- int len=0;
-
- while(*str1 && *str2 && *str1 == *str2) {
- len++;
- str1++;
- str2++;
- }
-
- if (*str1 == '\0' && *str2 == '\0') {
- len++;
- }
-
- return len;
-}
-
-
-struct history_item {
- kfs_event *kfse;
- kfs_event *oldest_kfse;
- int counter;
-};
-
-static int
-compare_history_items(const void *_a, const void *_b)
-{
- const struct history_item *a = (const struct history_item *)_a;
- const struct history_item *b = (const struct history_item *)_b;
-
- // we want a descending order
- return (b->counter - a->counter);
-}
#define is_throw_away(x) ((x) == FSE_STAT_CHANGED || (x) == FSE_CONTENT_MODIFIED)
#define KFSE_RECYCLED 0x0004
int num_dropped = 0;
-int num_combined_events = 0;
-int num_added_to_parent = 0;
int num_parent_switch = 0;
int num_recycled_rename = 0;
-//
-// NOTE: you must call lock_fs_event_list() before calling
-// this function.
-//
-static kfs_event *
-find_an_event(const char *str, int len, kfs_event *do_not_reuse, int *reuse_type, int *longest_match_len)
-{
- kfs_event *kfse, *best_kfse=NULL;
-
-// this seems to be enough to find most duplicate events for the same vnode
-#define MAX_HISTORY 12
- struct history_item history[MAX_HISTORY];
- int i;
-
- *longest_match_len = 0;
- *reuse_type = 0;
-
- memset(history, 0, sizeof(history));
-
- //
- // now walk the list of events and try to find the best match
- // for this event. if we have a vnode, we look for an event
- // that already references the vnode. if we don't find one
- // we'll also take the parent of this vnode (in which case it
- // will be marked as having dropped events within it).
- //
- // if we have a string we look for the longest match on the
- // path we have.
- //
-
- LIST_FOREACH(kfse, &kfse_list_head, kevent_list) {
- int match_len;
-
- //
- // don't look at events that are still in the process of being
- // created, have a null vnode ptr or rename/exchange events.
- //
- if ( (kfse->flags & KFSE_BEING_CREATED) || kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) {
-
- continue;
- }
-
- if (str != NULL) {
- if (kfse->len != 0 && kfse->str != NULL) {
- match_len = prefix_match_len(str, kfse->str);
- if (match_len > *longest_match_len) {
- best_kfse = kfse;
- *longest_match_len = match_len;
- }
- }
- }
-
- if (kfse == do_not_reuse) {
- continue;
- }
-
- for(i=0; i < MAX_HISTORY; i++) {
- if (history[i].kfse == NULL) {
- break;
- }
-
- //
- // do a quick check to see if we've got two simple events
- // that we can cheaply combine. if the event we're looking
- // at and one of the events in the history table are for the
- // same path then we'll just mark the newer event as combined
- // and recyle the older event.
- //
- if (history[i].kfse->str == kfse->str) {
-
- OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &kfse->flags);
- *reuse_type = KFSE_RECYCLED;
- history[i].kfse->flags |= KFSE_RECYCLED_EVENT;
- return history[i].kfse;
- }
- }
-
- if (i < MAX_HISTORY && history[i].kfse == NULL) {
- history[i].kfse = kfse;
- history[i].counter = 1;
- } else if (i >= MAX_HISTORY) {
- qsort(history, MAX_HISTORY, sizeof(struct history_item), compare_history_items);
-
- // pluck off the lowest guy if he's only got a count of 1
- if (history[MAX_HISTORY-1].counter == 1) {
- history[MAX_HISTORY-1].kfse = kfse;
- }
- }
- }
-
-
- if (str != NULL && best_kfse) {
- if (*longest_match_len <= 1) {
- // if the best match we had was "/" then basically we're toast...
- *longest_match_len = 0;
- best_kfse = NULL;
- } else if (*longest_match_len != len) {
- OSBitOrAtomic16(KFSE_CONTAINS_DROPPED_EVENTS, &best_kfse->flags);
- *reuse_type = KFSE_COLLAPSED;
- } else {
- OSBitOrAtomic16(KFSE_COMBINED_EVENTS, &best_kfse->flags);
- *reuse_type = KFSE_COMBINED;
- }
- }
-
- return best_kfse;
-}
-
-
static struct timeval last_print;
//
add_fsevent(int type, vfs_context_t ctx, ...)
{
struct proc *p = vfs_context_proc(ctx);
- int i, arg_type, skip_init=0, longest_match_len, ret;
+ int i, arg_type, ret;
kfs_event *kfse, *kfse_dest=NULL, *cur;
fs_event_watcher *watcher;
va_list ap;
int error = 0, did_alloc=0, need_event_unlock = 0;
dev_t dev = 0;
uint64_t now, elapsed;
- int reuse_type = 0;
char *pathbuff=NULL;
int pathbuff_len;
if (kfse == NULL) { // yikes! no free events
- int len=0;
- char *str;
-
- //
- // Figure out what kind of reference we have to the
- // file in this event. This helps us find an event
- // to combine/collapse into to make room.
- //
- // If we have a rename or exchange event then we
- // don't want to go through the normal path, we
- // want to "steal" an event instead (which is what
- // find_an_event() will do if str is null).
- //
- arg_type = va_arg(ap, int32_t);
- if (type == FSE_RENAME || type == FSE_EXCHANGE) {
- str = NULL;
- } else if (arg_type == FSE_ARG_STRING) {
- len = va_arg(ap, int32_t);
- str = va_arg(ap, char *);
- } else if (arg_type == FSE_ARG_VNODE) {
- struct vnode *vp;
-
- vp = va_arg(ap, struct vnode *);
- pathbuff = get_pathbuff();
- pathbuff_len = MAXPATHLEN;
- if (vn_getpath(vp, pathbuff, &pathbuff_len) != 0 || pathbuff[0] == '\0') {
- release_pathbuff(pathbuff);
- pathbuff = NULL;
- }
- str = pathbuff;
- } else {
- str = NULL;
- }
-
- //
- // This will go through all events and find one that we
- // can combine with (hopefully), or "collapse" into (i.e
- // it has the same parent) or in the worst case we have
- // to "recycle" an event which means that it will combine
- // two other events and return us the now unused event.
- // failing all that, find_an_event() could still return
- // null and if it does then we have a catastrophic dropped
- // events scenario.
- //
- kfse = find_an_event(str, len, NULL, &reuse_type, &longest_match_len);
-
- if (kfse == NULL) {
- bail_early:
-
unlock_fs_event_list();
lock_watch_table();
continue;
}
- printf("add_fsevent: watcher %p: num dropped %d rd %4d wr %4d q_size %4d flags 0x%x\n",
- watcher_table[ii], watcher_table[ii]->num_dropped,
- watcher_table[ii]->rd, watcher_table[ii]->wr,
- watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
+ printf("add_fsevent: watcher %s %p: rd %4d wr %4d q_size %4d flags 0x%x\n",
+ watcher_table[ii]->proc_name,
+ watcher_table[ii],
+ watcher_table[ii]->rd, watcher_table[ii]->wr,
+ watcher_table[ii]->eventq_size, watcher_table[ii]->flags);
}
last_print = current_tv;
release_pathbuff(pathbuff);
pathbuff = NULL;
}
-
return ENOSPC;
}
- if ((type == FSE_RENAME || type == FSE_EXCHANGE) && reuse_type != KFSE_RECYCLED) {
- panic("add_fsevent: type == %d but reuse type == %d!\n", type, reuse_type);
- } else if ((kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) && kfse->dest == NULL) {
- panic("add_fsevent: bogus kfse %p (type %d, but dest is NULL)\n", kfse, kfse->type);
- } else if (kfse->type == FSE_RENAME || kfse->type == FSE_EXCHANGE) {
- panic("add_fsevent: we should never re-use rename events (kfse %p reuse type %d)!\n", kfse, reuse_type);
- }
-
- if (reuse_type == KFSE_COLLAPSED) {
- if (str) {
- const char *tmp_ptr, *new_str;
-
- //
- // if we collapsed and have a string we have to chop off the
- // tail component of the pathname to get the parent.
- //
- // NOTE: it is VERY IMPORTANT that we leave the trailing slash
- // on the pathname. user-level code depends on this.
- //
- if (str[0] == '\0' || longest_match_len <= 1) {
- printf("add_fsevent: strange state (str %s / longest_match_len %d)\n", str, longest_match_len);
- if (longest_match_len < 0) {
- panic("add_fsevent: longest_match_len %d\n", longest_match_len);
- }
- }
- // chop off the tail component if it's not the
- // first character...
- if (longest_match_len > 1) {
- str[longest_match_len] = '\0';
- } else if (longest_match_len == 0) {
- longest_match_len = 1;
- }
-
- new_str = vfs_addname(str, longest_match_len, 0, 0);
- if (new_str == NULL || new_str[0] == '\0') {
- panic("add_fsevent: longest match is strange (new_str %p).\n", new_str);
- }
-
- lck_rw_lock_exclusive(&event_handling_lock);
-
- kfse->len = longest_match_len;
- tmp_ptr = kfse->str;
- kfse->str = new_str;
- kfse->ino = 0;
- kfse->mode = 0;
- kfse->uid = 0;
- kfse->gid = 0;
-
- lck_rw_unlock_exclusive(&event_handling_lock);
-
- vfs_removename(tmp_ptr);
- } else {
- panic("add_fsevent: don't have a vnode or a string pointer (kfse %p)\n", kfse);
- }
- }
-
- if (reuse_type == KFSE_RECYCLED && (type == FSE_RENAME || type == FSE_EXCHANGE)) {
-
- // if we're recycling this kfse and we have a rename or
- // exchange event then we need to also get an event for
- // kfse_dest.
- //
- if (did_alloc) {
- // only happens if we allocated one but then failed
- // for kfse_dest (and thus free'd the first one we
- // allocated)
- kfse_dest = zalloc_noblock(event_zone);
- if (kfse_dest != NULL) {
- memset(kfse_dest, 0, sizeof(kfs_event));
- kfse_dest->refcount = 1;
- OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
- } else {
- did_alloc = 0;
- }
- }
-
- if (kfse_dest == NULL) {
- int dest_reuse_type, dest_match_len;
-
- kfse_dest = find_an_event(NULL, 0, kfse, &dest_reuse_type, &dest_match_len);
-
- if (kfse_dest == NULL) {
- // nothing we can do... gotta bail out
- goto bail_early;
- }
-
- if (dest_reuse_type != KFSE_RECYCLED) {
- panic("add_fsevent: type == %d but dest_reuse type == %d!\n", type, dest_reuse_type);
- }
- }
- }
-
-
- //
- // Here we check for some fast-path cases so that we can
- // jump over the normal initialization and just get on
- // with delivering the event. These cases are when we're
- // combining/collapsing an event and so basically there is
- // no more work to do (aside from a little book-keeping)
- //
- if (str && kfse->len != 0) {
- kfse->abstime = now;
- OSAddAtomic(1, &kfse->refcount);
- skip_init = 1;
-
- if (reuse_type == KFSE_COMBINED) {
- num_combined_events++;
- } else if (reuse_type == KFSE_COLLAPSED) {
- num_added_to_parent++;
- }
- } else if (reuse_type != KFSE_RECYCLED) {
- panic("add_fsevent: I'm so confused! (reuse_type %d str %p kfse->len %d)\n",
- reuse_type, str, kfse->len);
- }
-
- va_end(ap);
-
-
- if (skip_init) {
- if (kfse->refcount < 1) {
- panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
- }
-
- last_event_ptr = kfse;
- unlock_fs_event_list();
- goto normal_delivery;
-
- } else if (reuse_type == KFSE_RECYCLED || reuse_type == KFSE_COMBINED) {
-
- //
- // If we're here we have to clear out the kfs_event(s)
- // that we were given by find_an_event() and set it
- // up to be re-filled in by the normal code path.
- //
- va_start(ap, ctx);
-
- need_event_unlock = 1;
- lck_rw_lock_exclusive(&event_handling_lock);
-
- OSAddAtomic(1, &kfse->refcount);
-
- if (kfse->refcount < 1) {
- panic("add_fsevent: line %d: kfse recount %d but should be at least 1\n", __LINE__, kfse->refcount);
- }
-
- if (kfse->len == 0) {
- panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__);
- // vnode_rele_ext(kfse->fref.vp, O_EVTONLY, 0);
- } else {
- vfs_removename(kfse->str);
- kfse->len = 0;
- }
- kfse->str = NULL;
-
- if (kfse->kevent_list.le_prev != NULL) {
- num_events_outstanding--;
- if (kfse->type == FSE_RENAME) {
- num_pending_rename--;
- }
- LIST_REMOVE(kfse, kevent_list);
- memset(&kfse->kevent_list, 0, sizeof(kfse->kevent_list));
- }
-
- kfse->flags = 0 | KFSE_RECYCLED_EVENT;
-
- if (kfse_dest) {
- OSAddAtomic(1, &kfse_dest->refcount);
- kfse_dest->flags = 0 | KFSE_RECYCLED_EVENT;
-
- if (did_alloc == 0) {
- if (kfse_dest->len == 0) {
- panic("%s:%d: no more fref.vp\n", __FILE__, __LINE__);
- // vnode_rele_ext(kfse_dest->fref.vp, O_EVTONLY, 0);
- } else {
- vfs_removename(kfse_dest->str);
- kfse_dest->len = 0;
- }
- kfse_dest->str = NULL;
-
- if (kfse_dest->kevent_list.le_prev != NULL) {
- num_events_outstanding--;
- LIST_REMOVE(kfse_dest, kevent_list);
- memset(&kfse_dest->kevent_list, 0, sizeof(kfse_dest->kevent_list));
- }
-
- if (kfse_dest->dest) {
- panic("add_fsevent: should never recycle a rename event! kfse %p\n", kfse);
- }
- }
- }
-
- OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
- if (kfse_dest) {
- OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse_dest->flags);
- }
-
- goto process_normally;
- }
- }
-
- if (reuse_type != 0) {
- panic("fsevents: we have a reuse_type (%d) but are about to clear out kfse %p\n", reuse_type, kfse);
- }
-
- //
- // we only want to do this for brand new events, not
- // events which have been recycled.
- //
memset(kfse, 0, sizeof(kfs_event));
kfse->refcount = 1;
OSBitOrAtomic16(KFSE_BEING_CREATED, &kfse->flags);
- process_normally:
last_event_ptr = kfse;
kfse->type = type;
kfse->abstime = now;
lck_rw_unlock_shared(&event_handling_lock);
}
- normal_delivery:
// unlock this here so we don't hold it across the
// event delivery loop.
if (need_event_unlock) {
}
}
-
static int
add_watcher(int8_t *event_list, int32_t num_events, int32_t eventq_size, fs_event_watcher **watcher_out, void *fseh)
{
watcher->num_readers = 0;
watcher->max_event_id = 0;
watcher->fseh = fseh;
+ watcher->pid = proc_selfpid();
+ proc_selfname(watcher->proc_name, sizeof(watcher->proc_name));
watcher->num_dropped = 0; // XXXdbg - debugging
+ if (!strncmp(watcher->proc_name, "fseventsd", sizeof(watcher->proc_name)) ||
+ !strncmp(watcher->proc_name, "coreservicesd", sizeof(watcher->proc_name)) ||
+ !strncmp(watcher->proc_name, "mds", sizeof(watcher->proc_name))) {
+ watcher->flags |= WATCHER_APPLE_SYSTEM_SERVICE;
+ }
+
lock_watch_table();
// now update the global list of who's interested in
unlock_watch_table();
while (watcher->num_readers > 1 && counter++ < 5000) {
+ lock_watch_table();
fsevents_wakeup(watcher); // in case they're asleep
+ unlock_watch_table();
tsleep(watcher, PRIBIO, "fsevents-close", 1);
}
}
// drain the event_queue
- while(watcher->rd != watcher->wr) {
- lck_rw_lock_shared(&event_handling_lock);
+ while(watcher->rd != watcher->wr) {
+ lck_rw_lock_exclusive(&event_handling_lock);
kfse = watcher->event_queue[watcher->rd];
- if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
- panic("remove_watcher: bogus kfse %p during cleanup (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
+ if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
+ lck_rw_unlock_exclusive(&event_handling_lock);
+ break;
}
-
- lck_rw_unlock_shared(&event_handling_lock);
-
+ watcher->event_queue[watcher->rd] = NULL;
watcher->rd = (watcher->rd+1) % watcher->eventq_size;
-
+ OSSynchronizeIO();
if (kfse != NULL) {
release_event_ref(kfse);
}
+ lck_rw_unlock_exclusive(&event_handling_lock);
}
+
if (watcher->event_list) {
FREE(watcher->event_list, M_TEMP);
// send any pending events if no more are received in the next
// EVENT_DELAY_IN_MS milli-seconds.
//
- if ( (watcher->rd < watcher->wr && (watcher->wr - watcher->rd) > MAX_NUM_PENDING)
- || (watcher->rd > watcher->wr && (watcher->wr + watcher->eventq_size - watcher->rd) > MAX_NUM_PENDING)) {
+ int32_t num_pending = 0;
+ if (watcher->rd < watcher->wr) {
+ num_pending = watcher->wr - watcher->rd;
+ }
- fsevents_wakeup(watcher);
+ if (watcher->rd > watcher->wr) {
+ num_pending = watcher->wr + watcher->eventq_size - watcher->rd;
+ }
- } else if (timer_set == 0) {
+ if (num_pending > (watcher->eventq_size*3/4) && !(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE)) {
+ /* Non-Apple Service is falling behind, start dropping events for this process */
- schedule_event_wakeup();
- }
-
- return 0;
+ lck_rw_lock_exclusive(&event_handling_lock);
+ while (watcher->rd != watcher->wr) {
+ kfse = watcher->event_queue[watcher->rd];
+ if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
+ lck_rw_unlock_exclusive(&event_handling_lock);
+ break;
+ }
+ watcher->event_queue[watcher->rd] = NULL;
+ watcher->rd = (watcher->rd+1) % watcher->eventq_size;
+ OSSynchronizeIO();
+ if (kfse != NULL) {
+ release_event_ref(kfse);
+ }
+ }
+ lck_rw_unlock_exclusive(&event_handling_lock);
+
+ printf("fsevents: watcher failing behind: %s (pid: %d) rd: %4d wr: %4d q_size: %4d flags: 0x%x\n",
+ watcher->proc_name, watcher->pid, watcher->rd, watcher->wr,
+ watcher->eventq_size, watcher->flags);
+
+ watcher->flags |= WATCHER_DROPPED_EVENTS;
+ fsevents_wakeup(watcher);
+ } else if (num_pending > MAX_NUM_PENDING) {
+ fsevents_wakeup(watcher);
+ } else if (timer_set == 0) {
+ schedule_event_wakeup();
+ }
+
+ return 0;
}
static int
user_ssize_t last_full_event_resid;
kfs_event *kfse;
uint16_t tmp16;
+ int skipped;
last_full_event_resid = uio_resid(uio);
return EAGAIN;
}
+ restart_watch:
if (watcher->rd == watcher->wr) {
if (watcher->flags & WATCHER_CLOSING) {
OSAddAtomic(-1, &watcher->num_readers);
watcher->flags &= ~WATCHER_DROPPED_EVENTS;
}
+ skipped = 0;
while (uio_resid(uio) > 0 && watcher->rd != watcher->wr) {
if (watcher->flags & WATCHER_CLOSING) {
break;
lck_rw_lock_shared(&event_handling_lock);
kfse = watcher->event_queue[watcher->rd];
- if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
- panic("fmod_watch: someone left me a bogus kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
+ if (!kfse || kfse->type == FSE_INVALID || kfse->refcount < 1) {
+ lck_rw_unlock_shared(&event_handling_lock);
+ break;
}
if (watcher->event_list[kfse->type] == FSE_REPORT && watcher_cares_about_dev(watcher, kfse->dev)) {
+ if (!(watcher->flags & WATCHER_APPLE_SYSTEM_SERVICE) & is_ignored_directory(kfse->str)) {
+ // If this is not an Apple System Service, skip specified directories
+ // radar://12034844
+ error = 0;
+ skipped = 1;
+ } else {
+
+ skipped = 0;
if (last_event_ptr == kfse) {
last_event_ptr = NULL;
last_event_type = -1;
}
last_full_event_resid = uio_resid(uio);
+ }
}
- lck_rw_unlock_shared(&event_handling_lock);
-
watcher->rd = (watcher->rd + 1) % watcher->eventq_size;
OSSynchronizeIO();
-
- if (kfse->type == FSE_INVALID || kfse->refcount < 1) {
- panic("fmod_watch:2: my kfse became bogus! kfse %p (type %d refcount %d rd %d wr %d)\n", kfse, kfse->type, kfse->refcount, watcher->rd, watcher->wr);
- }
-
release_event_ref(kfse);
+
+ lck_rw_unlock_shared(&event_handling_lock);
+ }
+
+ if (skipped && error == 0) {
+ goto restart_watch;
}
get_out:
// and decision to tsleep in fmod_watch... this bit of
// latency is a decent tradeoff against not having to
// take and drop a lock in fmod_watch
+ lock_watch_table();
fsevents_wakeup(fseh->watcher);
+ unlock_watch_table();
tsleep((caddr_t)fseh->watcher, PRIBIO, "watcher-close", 1);
}
int flags,
int *errorp)
{
- struct uthread *ut;
upl_page_info_t *pl;
int result = PAGER_SUCCESS;
int error = 0;
if (errorp)
*errorp = result;
- ut = get_bsdthread_info(current_thread());
-
- if (ut->uu_lowpri_window) {
- /*
- * task is marked as a low priority I/O type
- * and the I/O we issued while in this page fault
- * collided with normal I/O operations... we'll
- * delay in order to mitigate the impact of this
- * task on the normal operation of the system
- */
- throttle_lowpri_io(TRUE);
- }
return (error);
}
__ZN8IOMapper17setMapperRequiredEb
__ZN8IOMapper19copyMapperForDeviceEP9IOService
__ZN8IOMapper19waitForSystemMapperEv
+__ZN8IOMapper13iovmMapMemoryEP8OSObjectjjjP13upl_page_infoPK21IODMAMapSpecification
__ZN8IOMapper4freeEv
__ZN8IOMapper5startEP9IOService
__ZN8IOMapper7gSystemE
__ZN8IOMapper10iovmInsertEjmPjm
__ZN8IOMapper11NewARTTableEmPPvPj
__ZN8IOMapper12FreeARTTableEP6OSDatam
-__ZN8IOMapper18_RESERVEDIOMapper3Ev
__ZN8IOMapper18_RESERVEDIOMapper4Ev
__ZN8IOMapper18_RESERVEDIOMapper5Ev
__ZN8IOMapper18_RESERVEDIOMapper6Ev
__ZN8IOMapper10iovmInsertEjjPjj
__ZN8IOMapper11NewARTTableEyPPvPj
__ZN8IOMapper12FreeARTTableEP6OSDatay
-__ZN8IOMapper18_RESERVEDIOMapper3Ev
__ZN8IOMapper18_RESERVEDIOMapper4Ev
__ZN8IOMapper18_RESERVEDIOMapper5Ev
__ZN8IOMapper18_RESERVEDIOMapper6Ev
-12.1.0
+12.2.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
_net_del_domain
_net_del_proto
_netboot_root
-_perf_monitor_register
+_perf_monitor_register_*
_perf_monitor_unregister
_pffinddomain
_pffindproto
__ZN8IOMapper11NewARTTableEmPPvPj
__ZN8IOMapper12FreeARTTableEP6OSDatam
__ZN8IOMapper17setMapperRequiredEb
-__ZN8IOMapper18_RESERVEDIOMapper3Ev
__ZN8IOMapper18_RESERVEDIOMapper4Ev
__ZN8IOMapper18_RESERVEDIOMapper5Ev
__ZN8IOMapper18_RESERVEDIOMapper6Ev
_NDR_record
_PE_kputc
__Z22OSFlushObjectTrackListv
+__ZN11IOMemoryMap9wireRangeEjyy
__ZN15IOWatchDogTimer10gMetaClassE
__ZN15IOWatchDogTimer10superClassE
__ZN15IOWatchDogTimer13setPropertiesEP8OSObject
_mig_user_deallocate
_ml_io_map
_ml_phys_read
+_ml_phys_read_byte_64
+_ml_phys_read_double_64
+_ml_phys_read_half_64
+_ml_phys_read_word_64
_ml_phys_write
+_ml_phys_write_byte_64
+_ml_phys_write_double_64
+_ml_phys_write_half_64
+_ml_phys_write_word_64
_ml_probe_read
_ml_processor_register
_ml_thread_policy
kIOMemoryPhysicallyContiguous = 0x00000010,
kIOMemoryPageable = 0x00000020,
kIOMemoryPurgeable = 0x00000040,
+ kIOMemoryHostPhysicallyContiguous = 0x00000080,
kIOMemorySharingTypeMask = 0x000f0000,
kIOMemoryUnshared = 0x00000000,
kIOMemoryKernelUserShared = 0x00010000,
#endif
| kIOMemoryThreadSafe
| kIOMemoryClearEncrypt
+ | kIOMemoryMapperNone
};
-#define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_ 1
+#define _IOBUFFERMEMORYDESCRIPTOR_INTASKWITHOPTIONS_ 1
+#define _IOBUFFERMEMORYDESCRIPTOR_HOSTPHYSICALLYCONTIGUOUS_ 1
/*!
@class IOBufferMemoryDescriptor
@abstract Provides a simple memory descriptor that allocates its own buffer memory.
IOReturn IOHibernateSystemPostWake(void);
bool IOHibernateWasScreenLocked(void);
void IOHibernateSetScreenLocked(uint32_t lockState);
+void IOHibernateSystemRestart(void);
#endif /* __cplusplus */
boolean_t * encryptedswap);
kern_return_t
hibernate_teardown(hibernate_page_list_t * page_list,
- hibernate_page_list_t * page_list_wired);
+ hibernate_page_list_t * page_list_wired,
+ hibernate_page_list_t * page_list_pal);
kern_return_t
hibernate_processor_setup(IOHibernateImageHeader * header);
#define kIOHibernateRTCVariablesKey "IOHibernateRTCVariables"
#define kIOHibernateSMCVariablesKey "IOHibernateSMCVariables"
-#define kIOHibernateBootSwitchVarsKey "boot-switch-vars"
+#define kIOHibernateBootSwitchVarsKey "boot-switch-vars"
+
+#define kIOHibernateBootNoteKey "boot-note"
+
#define kIOHibernateUseKernelInterpreter 0x80000000
#include <IOKit/IOService.h>
#include <IOKit/IOMemoryDescriptor.h>
+#include <IOKit/IODMACommand.h>
class OSData;
-class IODMACommand;
extern const OSSymbol * gIOMapperIDKey;
virtual ppnum_t iovmAllocDMACommand(IODMACommand * command, IOItemCount pageCount);
virtual void iovmFreeDMACommand(IODMACommand * command, ppnum_t addr, IOItemCount pageCount);
+ virtual ppnum_t iovmMapMemory(
+ OSObject * memory, // dma command or iomd
+ ppnum_t offsetPage,
+ ppnum_t pageCount,
+ uint32_t options,
+ upl_page_info_t * pageList,
+ const IODMAMapSpecification * mapSpecification);
+
OSMetaClassDeclareReservedUsed(IOMapper, 0);
OSMetaClassDeclareReservedUsed(IOMapper, 1);
OSMetaClassDeclareReservedUsed(IOMapper, 2);
+ OSMetaClassDeclareReservedUsed(IOMapper, 3);
private:
- OSMetaClassDeclareReservedUnused(IOMapper, 3);
OSMetaClassDeclareReservedUnused(IOMapper, 4);
OSMetaClassDeclareReservedUnused(IOMapper, 5);
OSMetaClassDeclareReservedUnused(IOMapper, 6);
class IOMemoryMap;
class IOMapper;
+class IOService;
/*
* Direction of transfer, with respect to the described memory.
kIOMemoryAsReference = 0x00000100,
kIOMemoryBufferPageable = 0x00000400,
- kIOMemoryMapperNone = 0x00000800,
+ kIOMemoryMapperNone = 0x00000800, // Shared with Buffer MD
+ kIOMemoryHostOnly = 0x00001000, // Never DMA accessible
#ifdef XNU_KERNEL_PRIVATE
kIOMemoryRedirected = 0x00004000,
kIOMemoryPreparedReadOnly = 0x00008000,
#define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1
+struct IODMAMapSpecification
+{
+ uint64_t alignment;
+ IOService * device;
+ uint32_t options;
+ uint8_t numAddressBits;
+ uint8_t resvA[3];
+ uint32_t resvB[4];
+};
+
+enum
+{
+ kIODMAMapWriteAccess = 0x00000002,
+ kIODMAMapPhysicallyContiguous = 0x00000010,
+ kIODMAMapDeviceMemory = 0x00000020,
+ kIODMAMapPagingPath = 0x00000040,
+ kIODMAMapIdentityMap = 0x00000080,
+};
+
+
enum
{
kIOPreparationIDUnprepared = 0,
#ifdef XNU_KERNEL_PRIVATE
IOMemoryDescriptorReserved * getKernelReserved( void );
+ IOReturn dmaMap(
+ IOMapper * mapper,
+ const IODMAMapSpecification * mapSpec,
+ uint64_t offset,
+ uint64_t length,
+ uint64_t * address,
+ ppnum_t * mapPages);
#endif
private:
IOReturn userClientUnmap();
#endif /* XNU_KERNEL_PRIVATE */
+ IOReturn wireRange(
+ uint32_t options,
+ mach_vm_size_t offset,
+ mach_vm_size_t length);
+
OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
virtual uint64_t getPreparationID( void );
+#ifdef XNU_KERNEL_PRIVATE
+ // Internal APIs may be made virtual at some time in the future.
+ IOReturn wireVirtual(IODirection forDirection);
+ IOReturn dmaMap(
+ IOMapper * mapper,
+ const IODMAMapSpecification * mapSpec,
+ uint64_t offset,
+ uint64_t length,
+ uint64_t * address,
+ ppnum_t * mapPages);
+ bool initMemoryEntries(size_t size, IOMapper * mapper);
+#endif
+
private:
#ifndef __LP64__
virtual void unmapFromKernel();
#endif /* !__LP64__ */
- // Internal APIs may be made virtual at some time in the future.
- IOReturn wireVirtual(IODirection forDirection);
void *createNamedEntry();
// Internal
* false == Retain FV key when going to standby mode
* not present == Retain FV key when going to standby mode
*/
-#define kIOPMDestroyFVKeyOnStandbyKey "DestroyFVKeyOnStandby"
+#define kIOPMDestroyFVKeyOnStandbyKey "DestroyFVKeyOnStandby"
/*******************************************************************************
*
*/
kIOPMDriverAssertionPreventDisplaySleepBit = 0x40,
- kIOPMDriverAssertionReservedBit7 = 0x80
+ /*! kIOPMDriverAssertionReservedBit7
+ * Reserved for storage family.
+ */
+ kIOPMDriverAssertionReservedBit7 = 0x80,
+
+ /*! kIOPMDriverAssertionMagicPacketWakeEnabledBit
+ * When set, driver is informing PM that magic packet wake is enabled.
+ */
+ kIOPMDriverAssertionMagicPacketWakeEnabledBit = 0x100
};
/* kIOPMAssertionsDriverKey
/* @constant kIOPMTimelineDictionaryKey
* @abstract RootDomain key for dictionary describing Timeline's info
*/
-#define kIOPMTimelineDictionaryKey "PMTimelineLogging"
+#define kIOPMTimelineDictionaryKey "PMTimelineLogging"
/* @constant kIOPMTimelineEnabledKey
* @abstract Boolean value indicating whether the system is recording PM events.
* @discussion Key may be found in the dictionary at IOPMrootDomain's property
* kIOPMTimelineDictionaryKey. uint32_t value; may be 0.
*/
-#define kIOPMTimelineEnabledKey "TimelineEnabled"
+#define kIOPMTimelineEnabledKey "TimelineEnabled"
/* @constant kIOMPTimelineSystemNumberTrackedKey
* @abstract The maximum number of system power events the system may record.
* @discussion Key may be found in the dictionary at IOPMrootDomain's property
* kIOPMTimelineDictionaryKey. uint32_t value; may be 0.
*/
-#define kIOPMTimelineSystemNumberTrackedKey "TimelineSystemEventsTracked"
+#define kIOPMTimelineSystemNumberTrackedKey "TimelineSystemEventsTracked"
/* @constant kIOPMTimelineSystemBufferSizeKey
* @abstract Size in bytes of buffer recording system PM events
* @discussion Key may be found in the dictionary at IOPMrootDomain's property
* kIOPMTimelineDictionaryKey. uint32_t value; may be 0.
*/
-#define kIOPMTimelineSystemBufferSizeKey "TimelineSystemBufferSize"
+#define kIOPMTimelineSystemBufferSizeKey "TimelineSystemBufferSize"
#define kIOPMSleepWakeFailureUUIDKey "UUID"
#define kIOPMSleepWakeFailureDateKey "Date"
-/******************************************************************************/
-/* System sleep policy
- * Shared between PM root domain and platform driver.
+/*****************************************************************************
+ *
+ * Root Domain private property keys
+ *
+ *****************************************************************************/
+
+/* kIOPMAutoPowerOffEnabledKey
+ * Indicates if Auto Power Off is enabled.
+ * It has a boolean value.
+ * true == Auto Power Off is enabled
+ * false == Auto Power Off is disabled
+ * not present == Auto Power Off is not supported on this hardware
*/
+#define kIOPMAutoPowerOffEnabledKey "AutoPowerOff Enabled"
-// Platform specific property added by the platform driver.
-// An OSData that describes the system sleep policy.
-#define kIOPlatformSystemSleepPolicyKey "IOPlatformSystemSleepPolicy"
+/* kIOPMAutoPowerOffDelayKey
+ * Key refers to a CFNumberRef that represents the delay in seconds before
+ * entering the Auto Power Off state. The property is not present if Auto
+ * Power Off is unsupported.
+ */
+#define kIOPMAutoPowerOffDelayKey "AutoPowerOff Delay"
-// Root domain property updated before platform sleep.
-// An OSData that describes the system sleep parameters.
-#define kIOPMSystemSleepParametersKey "IOPMSystemSleepParameters"
+/*****************************************************************************
+ *
+ * System Sleep Policy
+ *
+ *****************************************************************************/
-struct IOPMSystemSleepParameters
+#define kIOPMSystemSleepPolicySignature 0x54504c53
+#define kIOPMSystemSleepPolicyVersion 2
+
+/*!
+ * @defined kIOPMSystemSleepTypeKey
+ * @abstract Indicates the type of system sleep.
+ * @discussion An OSNumber property of root domain that describes the type
+ * of system sleep. This property is set after notifying priority sleep/wake
+ * clients, but before informing interested drivers and shutting down power
+ * plane drivers.
+ */
+#define kIOPMSystemSleepTypeKey "IOPMSystemSleepType"
+
+struct IOPMSystemSleepPolicyVariables
{
- uint32_t version;
- uint32_t sleepFlags;
- uint32_t sleepTimer;
- uint32_t wakeEvents;
+ uint32_t signature; // kIOPMSystemSleepPolicySignature
+ uint32_t version; // kIOPMSystemSleepPolicyVersion
+
+ uint64_t currentCapability; // current system capability bits
+ uint64_t highestCapability; // highest system capability bits
+
+ uint64_t sleepFactors; // sleep factor bits
+ uint32_t sleepReason; // kIOPMSleepReason*
+ uint32_t sleepPhase; // identify the sleep phase
+ uint32_t hibernateMode; // current hibernate mode
+
+ uint32_t standbyDelay; // standby delay in seconds
+ uint32_t poweroffDelay; // auto-poweroff delay in seconds
+ uint32_t scheduledAlarms; // bitmask of scheduled alarm types
+
+ uint32_t reserved[50]; // pad sizeof 256 bytes
+};
+
+enum {
+ kIOPMAlarmBitDebugWake = 0x01,
+ kIOPMAlarmBitCalendarWake = 0x02,
+ kIOPMAlarmBitMaintenanceWake = 0x04,
+ kIOPMAlarmBitSleepServiceWake = 0x08
};
-// Sleep flags
enum {
- kIOPMSleepFlagHibernate = 0x00000001,
- kIOPMSleepFlagSleepTimerEnable = 0x00000002
+ kIOPMSleepPhase1 = 1,
+ kIOPMSleepPhase2
};
+// Sleep Factor Mask / Bits
+enum {
+ kIOPMSleepFactorSleepTimerWake = 0x00000001ULL,
+ kIOPMSleepFactorLidOpen = 0x00000002ULL,
+ kIOPMSleepFactorACPower = 0x00000004ULL,
+ kIOPMSleepFactorBatteryLow = 0x00000008ULL,
+ kIOPMSleepFactorStandbyNoDelay = 0x00000010ULL,
+ kIOPMSleepFactorStandbyForced = 0x00000020ULL,
+ kIOPMSleepFactorStandbyDisabled = 0x00000040ULL,
+ kIOPMSleepFactorUSBExternalDevice = 0x00000080ULL,
+ kIOPMSleepFactorBluetoothHIDDevice = 0x00000100ULL,
+ kIOPMSleepFactorExternalMediaMounted = 0x00000200ULL,
+ kIOPMSleepFactorThunderboltDevice = 0x00000400ULL,
+ kIOPMSleepFactorRTCAlarmScheduled = 0x00000800ULL,
+ kIOPMSleepFactorMagicPacketWakeEnabled = 0x00001000ULL,
+ kIOPMSleepFactorHibernateForced = 0x00010000ULL,
+ kIOPMSleepFactorAutoPowerOffDisabled = 0x00020000ULL,
+ kIOPMSleepFactorAutoPowerOffForced = 0x00040000ULL,
+ kIOPMSleepFactorExternalDisplay = 0x00080000ULL
+};
+
+// System Sleep Types
+enum {
+ kIOPMSleepTypeInvalid = 0,
+ kIOPMSleepTypeAbortedSleep = 1,
+ kIOPMSleepTypeNormalSleep = 2,
+ kIOPMSleepTypeSafeSleep = 3,
+ kIOPMSleepTypeHibernate = 4,
+ kIOPMSleepTypeStandby = 5,
+ kIOPMSleepTypePowerOff = 6,
+ kIOPMSleepTypeLast = 7
+};
+
+// System Sleep Flags
+enum {
+ kIOPMSleepFlagDisableHibernateAbort = 0x00000001,
+ kIOPMSleepFlagDisableUSBWakeEvents = 0x00000002,
+ kIOPMSleepFlagDisableBatlowAssertion = 0x00000004
+};
+
+// System Wake Events
+enum {
+ kIOPMWakeEventLidOpen = 0x00000001,
+ kIOPMWakeEventLidClose = 0x00000002,
+ kIOPMWakeEventACAttach = 0x00000004,
+ kIOPMWakeEventACDetach = 0x00000008,
+ kIOPMWakeEventCDInsert = 0x00000010,
+ kIOPMWakeEventCDEject = 0x00000020,
+ kIOPMWakeEventHPDAttach = 0x00000040,
+ kIOPMWakeEventHPDDetach = 0x00000080,
+ kIOPMWakeEventPowerButton = 0x00000100,
+ kIOPMWakeEventG3PowerOn = 0x00000200,
+ kIOPMWakeEventUserPME = 0x00000400,
+ kIOPMWakeEventSleepTimer = 0x00000800,
+ kIOPMWakeEventBatteryLow = 0x00001000,
+ kIOPMWakeEventDarkPME = 0x00002000
+};
+
+/*!
+ * @defined kIOPMSystemSleepParametersKey
+ * @abstract Sleep parameters describing the upcoming sleep
+ * @discussion Root domain updates this OSData property before system sleep
+ * to pass sleep parameters to the platform driver. Some of the parameters
+ * are based on the chosen entry in the system sleep policy table.
+ */
+#define kIOPMSystemSleepParametersKey "IOPMSystemSleepParameters"
+#define kIOPMSystemSleepParametersVersion 2
+
+struct IOPMSystemSleepParameters
+{
+ uint16_t version;
+ uint16_t reserved1;
+ uint32_t sleepType;
+ uint32_t sleepFlags;
+ uint32_t ecWakeEvents;
+ uint32_t ecWakeTimer;
+ uint32_t ecPoweroffTimer;
+ uint32_t reserved2[10];
+} __attribute__((packed));
+
+#if defined(KERNEL) && defined(__cplusplus)
+
+/*!
+ * @defined kIOPMInstallSystemSleepPolicyHandlerKey
+ * @abstract Name of the platform function to install a sleep policy handler.
+ * @discussion Pass to IOPMrootDomain::callPlatformFunction(), with a pointer
+ * to the C-function handler at param1, and an optional target at param2, to
+ * register a sleep policy handler. Only a single sleep policy handler can
+ * be installed.
+ */
+#define kIOPMInstallSystemSleepPolicyHandlerKey \
+ "IOPMInstallSystemSleepPolicyHandler"
+
+typedef IOReturn (*IOPMSystemSleepPolicyHandler)(
+ void * target,
+ const IOPMSystemSleepPolicyVariables * vars,
+ IOPMSystemSleepParameters * params );
+
+#endif /* KERNEL */
+
#endif /* ! _IOKIT_IOPMPRIVATE_H */
IONotifier * systemCapabilityNotifier;
IOPMTimeline *timeline;
-
+
typedef struct {
uint32_t pid;
uint32_t refcount;
OSSet * preventIdleSleepList;
OSSet * preventSystemSleepList;
+ UInt32 _scheduledAlarms;
+
#if HIBERNATION
clock_sec_t _standbyTimerResetSeconds;
#endif
#if HIBERNATION
bool getSleepOption( const char * key, uint32_t * option );
- bool evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, int sleepPhase );
+ bool evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, int phase );
void evaluateSystemSleepPolicyEarly( void );
void evaluateSystemSleepPolicyFinal( void );
#endif /* HIBERNATION */
#include <IOKit/IOMapper.h>
#include <IOKit/IOBufferMemoryDescriptor.h>
#include <libkern/OSDebug.h>
+#include <mach/mach_vm.h>
#include "IOKitKernelInternal.h"
+#ifdef IOALLOCDEBUG
+#include <libkern/c++/OSCPPDebug.h>
+#endif
+#include <IOKit/IOStatisticsPrivate.h>
+
+#if IOKITSTATS
+#define IOStatisticsAlloc(type, size) \
+do { \
+ IOStatistics::countAlloc(type, size); \
+} while (0)
+#else
+#define IOStatisticsAlloc(type, size)
+#endif /* IOKITSTATS */
+
+
__BEGIN_DECLS
void ipc_port_release_send(ipc_port_t port);
#include <vm/pmap.h>
enum
{
- kInternalFlagPhysical = 0x00000001,
- kInternalFlagPageSized = 0x00000002
+ kInternalFlagPhysical = 0x00000001,
+ kInternalFlagPageSized = 0x00000002,
+ kInternalFlagPageAllocated = 0x00000004
+};
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#if 0
+#undef assert
+#define assert(ex) \
+ ((ex) ? (void)0 : Assert(__FILE__, __LINE__, # ex))
+#endif
+
+enum
+{
+ kIOPageAllocChunkBytes = (PAGE_SIZE / 64),
+ kIOPageAllocSignature = 'iopa'
+};
+
+struct io_pagealloc_t
+{
+ queue_chain_t link;
+ uint64_t avail;
+ uint32_t signature;
};
+typedef struct io_pagealloc_t io_pagealloc_t;
+
+typedef char io_pagealloc_t_assert[(sizeof(io_pagealloc_t) <= kIOPageAllocChunkBytes) ? 1 : -1];
+
+IOSimpleLock * gIOPageAllocLock;
+queue_head_t gIOPageAllocList;
+vm_size_t gIOPageAllocCount;
+vm_size_t gIOPageAllocBytes;
+
+static io_pagealloc_t *
+iopa_allocpage(void)
+{
+ kern_return_t kr;
+ io_pagealloc_t * pa;
+ vm_address_t vmaddr = 0;
+
+ int options = 0; // KMA_LOMEM;
+ kr = kernel_memory_allocate(kernel_map, &vmaddr,
+ page_size, 0, options);
+ if (KERN_SUCCESS != kr) return (0);
+
+ bzero((void *) vmaddr, page_size);
+ pa = (typeof(pa)) (vmaddr + page_size - kIOPageAllocChunkBytes);
+
+ pa->signature = kIOPageAllocSignature;
+ pa->avail = -2ULL;
+
+ return (pa);
+}
+
+static void
+iopa_freepage(io_pagealloc_t * pa)
+{
+ kmem_free( kernel_map, trunc_page((uintptr_t) pa), page_size);
+}
+
+static uintptr_t
+iopa_allocinpage(io_pagealloc_t * pa, uint32_t count, uint64_t align)
+{
+ uint32_t n, s;
+ uint64_t avail = pa->avail;
+
+ assert(avail);
+
+ // find strings of count 1 bits in avail
+ for (n = count; n > 1; n -= s)
+ {
+ s = n >> 1;
+ avail = avail & (avail << s);
+ }
+ // and aligned
+ avail &= align;
+
+ if (avail)
+ {
+ n = __builtin_clzll(avail);
+ pa->avail &= ~((-1ULL << (64 - count)) >> n);
+ if (!pa->avail && pa->link.next)
+ {
+ remque(&pa->link);
+ pa->link.next = 0;
+ }
+ return (n * kIOPageAllocChunkBytes + trunc_page((uintptr_t) pa));
+ }
+
+ return (0);
+}
+
+static uint32_t
+log2up(uint32_t size)
+{
+ if (size <= 1) size = 0;
+ else size = 32 - __builtin_clz(size - 1);
+ return (size);
+}
+
+static uintptr_t
+iopa_alloc(vm_size_t bytes, uint32_t balign)
+{
+ static const uint64_t align_masks[] = {
+ 0xFFFFFFFFFFFFFFFF,
+ 0xAAAAAAAAAAAAAAAA,
+ 0x8888888888888888,
+ 0x8080808080808080,
+ 0x8000800080008000,
+ 0x8000000080000000,
+ 0x8000000000000000,
+ };
+ io_pagealloc_t * pa;
+ uintptr_t addr = 0;
+ uint32_t count;
+ uint64_t align;
+
+ if (!bytes) bytes = 1;
+ count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
+ align = align_masks[log2up((balign + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes)];
+
+ IOSimpleLockLock(gIOPageAllocLock);
+ pa = (typeof(pa)) queue_first(&gIOPageAllocList);
+ while (!queue_end(&gIOPageAllocList, &pa->link))
+ {
+ addr = iopa_allocinpage(pa, count, align);
+ if (addr)
+ {
+ gIOPageAllocBytes += bytes;
+ break;
+ }
+ pa = (typeof(pa)) queue_next(&pa->link);
+ }
+ IOSimpleLockUnlock(gIOPageAllocLock);
+ if (!addr)
+ {
+ pa = iopa_allocpage();
+ if (pa)
+ {
+ addr = iopa_allocinpage(pa, count, align);
+ IOSimpleLockLock(gIOPageAllocLock);
+ if (pa->avail) enqueue_head(&gIOPageAllocList, &pa->link);
+ gIOPageAllocCount++;
+ if (addr) gIOPageAllocBytes += bytes;
+ IOSimpleLockUnlock(gIOPageAllocLock);
+ }
+ }
+
+ if (addr)
+ {
+ assert((addr & ((1 << log2up(balign)) - 1)) == 0);
+ IOStatisticsAlloc(kIOStatisticsMallocAligned, bytes);
+#if IOALLOCDEBUG
+ debug_iomalloc_size += bytes;
+#endif
+ }
+
+ return (addr);
+}
+
+static void
+iopa_free(uintptr_t addr, vm_size_t bytes)
+{
+ io_pagealloc_t * pa;
+ uint32_t count;
+ uintptr_t chunk;
+
+ if (!bytes) bytes = 1;
+
+ chunk = (addr & page_mask);
+ assert(0 == (chunk & (kIOPageAllocChunkBytes - 1)));
+
+ pa = (typeof(pa)) (addr | (page_size - kIOPageAllocChunkBytes));
+ assert(kIOPageAllocSignature == pa->signature);
+
+ count = (bytes + kIOPageAllocChunkBytes - 1) / kIOPageAllocChunkBytes;
+ chunk /= kIOPageAllocChunkBytes;
+
+ IOSimpleLockLock(gIOPageAllocLock);
+ if (!pa->avail)
+ {
+ assert(!pa->link.next);
+ enqueue_tail(&gIOPageAllocList, &pa->link);
+ }
+ pa->avail |= ((-1ULL << (64 - count)) >> chunk);
+ if (pa->avail != -2ULL) pa = 0;
+ else
+ {
+ remque(&pa->link);
+ pa->link.next = 0;
+ pa->signature = 0;
+ gIOPageAllocCount--;
+ }
+ gIOPageAllocBytes -= bytes;
+ IOSimpleLockUnlock(gIOPageAllocLock);
+ if (pa) iopa_freepage(pa);
+
+#if IOALLOCDEBUG
+ debug_iomalloc_size -= bytes;
+#endif
+ IOStatisticsAlloc(kIOStatisticsFreeAligned, bytes);
+}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
mach_vm_address_t alignment,
mach_vm_address_t physicalMask)
{
- kern_return_t kr;
- task_t mapTask = NULL;
- vm_map_t vmmap = NULL;
- mach_vm_address_t highestMask = 0;
- IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+ kern_return_t kr;
+ task_t mapTask = NULL;
+ vm_map_t vmmap = NULL;
+ mach_vm_address_t highestMask = 0;
+ IOOptionBits iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
+ IODMAMapSpecification mapSpec;
+ bool mapped = false;
+ bool needZero;
if (!capacity)
return false;
return (false);
_ranges.v64->address = 0;
_ranges.v64->length = 0;
- // make sure super::free doesn't dealloc _ranges before super::init
- _flags = kIOMemoryAsReference;
+ // make sure super::free doesn't dealloc _ranges before super::init
+ _flags = kIOMemoryAsReference;
// Grab IOMD bits from the Buffer MD options
iomdOptions |= (options & kIOBufferDescriptorMemoryFlags);
+ if (!(kIOMemoryMapperNone & options))
+ {
+ IOMapper::checkForSystemMapper();
+ mapped = (0 != IOMapper::gSystem);
+ }
+ needZero = mapped;
+
if (physicalMask && (alignment <= 1))
{
alignment = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
return false;
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.alignment = _alignment;
+ mapSpec.numAddressBits = 64;
+ if (highestMask && mapped)
+ {
+ if (highestMask <= 0xFFFFFFFF)
+ mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
+ else
+ mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
+ highestMask = 0;
+ }
+
// set flags for entry + object create
vm_prot_t memEntryCacheMode = VM_PROT_READ | VM_PROT_WRITE;
/* Allocate a wired-down buffer inside kernel space. */
- if ((options & kIOMemoryPhysicallyContiguous) || highestMask || (alignment > page_size))
+ bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));
+
+ if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
+ {
+ contig |= (!mapped);
+ contig |= (0 != (kIOMemoryMapperNone & options));
+#if 0
+ // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
+ contig |= true;
+#endif
+ }
+
+ if (contig || highestMask || (alignment > page_size))
{
_internalFlags |= kInternalFlagPhysical;
if (highestMask)
_internalFlags |= kInternalFlagPageSized;
capacity = round_page(capacity);
}
- _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(capacity, highestMask, alignment,
- (0 != (options & kIOMemoryPhysicallyContiguous)));
+ _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
+ capacity, highestMask, alignment, contig);
+ }
+ else if (needZero
+ && ((capacity + alignment) <= (page_size - kIOPageAllocChunkBytes)))
+ {
+ _internalFlags |= kInternalFlagPageAllocated;
+ needZero = false;
+ _buffer = (void *) iopa_alloc(capacity, alignment);
}
else if (alignment > 1)
{
{
_buffer = IOMalloc(capacity);
}
-
if (!_buffer)
{
return false;
}
+ if (needZero) bzero(_buffer, capacity);
}
if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
{
*startAddr;
startAddr += page_size;
- }
+ }
}
}
inTask, iomdOptions, /* System mapper */ 0))
return false;
+ // give any system mapper the allocation params
+ if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec,
+ &mapSpec, sizeof(mapSpec)))
+ return false;
+
if (mapTask)
{
if (!reserved) {
}
else if (buffer)
{
- if (internalFlags & kInternalFlagPhysical)
+ if (kInternalFlagPageSized & internalFlags) size = round_page(size);
+
+ if (kInternalFlagPhysical & internalFlags)
{
- if (kInternalFlagPageSized & internalFlags)
- size = round_page(size);
IOKernelFreePhysical((mach_vm_address_t) buffer, size);
- }
+ }
+ else if (kInternalFlagPageAllocated & internalFlags)
+ {
+ iopa_free((uintptr_t) buffer, size);
+ }
else if (alignment > 1)
+ {
IOFreeAligned(buffer, size);
+ }
else
+ {
IOFree(buffer, size);
+ }
}
if (range && (kIOMemoryAsReference & flags))
IODelete(range, IOAddressRange, 1);
#include <libkern/OSTypes.h>
#include <libkern/OSByteOrder.h>
+#include <libkern/OSDebug.h>
#include <IOKit/IOReturn.h>
#include <IOKit/IOLib.h>
#include "IOKitKernelInternal.h"
#define MAPTYPE(type) ((UInt) (type) & kTypeMask)
-#define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
+#define IS_MAPPED(type) (MAPTYPE(type) != kBypassed)
#define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
IOMapper *mapper,
void *refCon)
{
+ IOService * device = 0;
+
if (!super::init() || !outSegFunc)
return false;
if (!maxTransferSize)
maxTransferSize--; // Set Max transfer to -1
+
+ if (mapper && !OSDynamicCast(IOMapper, mapper))
+ {
+ device = mapper;
+ mapper = 0;
+ }
if (!mapper)
{
IOMapper::checkForSystemMapper();
switch (MAPTYPE(mappingOptions))
{
case kMapped: break;
- case kNonCoherent: fMapper = 0; break;
+ case kNonCoherent: /*fMapper = 0;*/ break;
case kBypassed:
if (mapper && !mapper->getBypassMask(&fBypassMask))
return false;
bzero(reserved, sizeof(IODMACommandInternal));
fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
-
+ fInternalState->fDevice = device;
+
return true;
}
if (mem) {
bzero(&fMDSummary, sizeof(fMDSummary));
- err = mem->dmaCommandOperation(
- kIOMDGetCharacteristics,
- &fMDSummary, sizeof(fMDSummary));
+ err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
+ &fMDSummary, sizeof(fMDSummary));
if (err)
return err;
ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
if ((kMapped == MAPTYPE(fMappingOptions))
- && fMapper
- && (!fNumAddressBits || (fNumAddressBits >= 31)))
- // assuming mapped space is 2G
+ && fMapper)
fInternalState->fCheckAddressing = false;
else
fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
if (autoPrepare) {
- err = prepare();
- if (err) {
- clearMemoryDescriptor();
- }
+ err = prepare();
+ if (err) {
+ clearMemoryDescriptor();
+ }
}
}
IODMACommandInternal * state = target->reserved;
- if (target->fNumAddressBits && (target->fNumAddressBits < 64) && !state->fLocalMapper)
+ if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperPageAlloc || !target->fMapper))
maxPhys = (1ULL << target->fNumAddressBits);
else
maxPhys = 0;
{
if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
{
+ addr64_t cpuAddr = address;
addr64_t remapAddr;
uint64_t chunk;
+ if ((kMapped == MAPTYPE(target->fMappingOptions))
+ && target->fMapper)
+ {
+ cpuAddr = target->fMapper->mapAddr(address);
+ }
+
remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
if (!state->fDoubleBuffer)
{
if (kWalkSyncIn & op)
{ // cppvNoModSnk
- copypv(remapAddr, address, chunk,
+ copypv(remapAddr, cpuAddr, chunk,
cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
}
else
{
- copypv(address, remapAddr, chunk,
+ copypv(cpuAddr, remapAddr, chunk,
cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
}
address += chunk;
if (kWalkPreflight & op)
{
- state->fMapContig = false;
state->fMisaligned = false;
state->fDoubleBuffer = false;
state->fPrepared = false;
state->fCopyNext = NULL;
state->fCopyPageAlloc = 0;
- state->fLocalMapperPageAlloc = 0;
state->fCopyPageCount = 0;
state->fNextRemapPage = NULL;
state->fCopyMD = 0;
if (!state->fDoubleBuffer)
{
kern_return_t kr;
+
+ if (fMapper) panic("fMapper copying");
+
kr = vm_page_alloc_list(state->fCopyPageCount,
KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase);
if (KERN_SUCCESS != kr)
}
}
}
-
- if (state->fLocalMapper)
- {
- state->fLocalMapperPageCount = atop_64(round_page(
- state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
- state->fLocalMapperPageAlloc = fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount);
- if (!state->fLocalMapperPageAlloc)
- {
- DEBG("IODMACommand !iovmAlloc");
- return (kIOReturnNoResources);
- }
- state->fMapContig = true;
- }
}
if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
if (kWalkComplete & op)
{
- if (state->fLocalMapperPageAlloc)
- {
- fMapper->iovmFreeDMACommand(this, state->fLocalMapperPageAlloc, state->fLocalMapperPageCount);
- state->fLocalMapperPageAlloc = 0;
- state->fLocalMapperPageCount = 0;
- }
if (state->fCopyPageAlloc)
{
vm_page_free_list(state->fCopyPageAlloc, FALSE);
if (!maxTransferSize)
maxTransferSize--; // Set Max transfer to -1
+ if (mapper && !OSDynamicCast(IOMapper, mapper))
+ {
+ fInternalState->fDevice = mapper;
+ mapper = 0;
+ }
if (!mapper)
{
IOMapper::checkForSystemMapper();
switch (MAPTYPE(mappingOptions))
{
case kMapped: break;
- case kNonCoherent: fMapper = 0; break;
+ case kNonCoherent: break;
case kBypassed:
if (mapper && !mapper->getBypassMask(&fBypassMask))
return kIOReturnBadArgument;
state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
state->fSourceAlignMask = fAlignMask;
- if (state->fLocalMapper)
+ if (fMapper)
state->fSourceAlignMask &= page_mask;
state->fCursor = state->fIterateOnly
|| (!state->fCheckAddressing
- && !state->fLocalMapper
&& (!state->fSourceAlignMask
|| ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
-
+
if (!state->fCursor)
{
IOOptionBits op = kWalkPrepare | kWalkPreflight;
op |= kWalkSyncOut;
ret = walkAll(op);
}
+
+ if (fMapper)
+ {
+ if (state->fLocalMapper)
+ {
+ state->fLocalMapperPageCount = atop_64(round_page(
+ state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
+ state->fLocalMapperPageAlloc = ptoa_64(fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount));
+ if (!state->fLocalMapperPageAlloc)
+ {
+ DEBG("IODMACommand !iovmAlloc");
+ return (kIOReturnNoResources);
+ }
+ state->fMapContig = true;
+ }
+ else
+ {
+ IOMDDMAMapArgs mapArgs;
+ bzero(&mapArgs, sizeof(mapArgs));
+ mapArgs.fMapper = fMapper;
+ mapArgs.fMapSpec.device = state->fDevice;
+ mapArgs.fMapSpec.alignment = fAlignMask + 1;
+ mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64;
+ mapArgs.fOffset = state->fPreparedOffset;
+ mapArgs.fLength = state->fPreparedLength;
+ const IOMemoryDescriptor * md = state->fCopyMD;
+ if (!md) md = fMemory;
+ ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs));
+ if (kIOReturnSuccess == ret)
+ {
+ state->fLocalMapperPageAlloc = mapArgs.fAlloc;
+ state->fLocalMapperPageCount = mapArgs.fAllocCount;
+ state->fMapContig = true;
+ }
+ ret = kIOReturnSuccess;
+ }
+ }
+
+
if (kIOReturnSuccess == ret)
state->fPrepared = true;
}
op |= kWalkSyncIn;
ret = walkAll(op);
}
+ if (state->fLocalMapperPageAlloc)
+ {
+ if (state->fLocalMapper)
+ {
+ fMapper->iovmFreeDMACommand(this, atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount);
+ }
+ else if (state->fLocalMapperPageCount)
+ {
+ fMapper->iovmFree(atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount);
+ }
+ state->fLocalMapperPageAlloc = 0;
+ state->fLocalMapperPageCount = 0;
+ }
+
state->fPrepared = false;
if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
return kIOReturnBadArgument;
IOMDDMAWalkSegmentArgs *state =
- (IOMDDMAWalkSegmentArgs *) fState;
+ (IOMDDMAWalkSegmentArgs *)(void *) fState;
UInt64 offset = *offsetP + internalState->fPreparedOffset;
UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
state->fOffset = offset;
state->fLength = memLength - offset;
- if (internalState->fMapContig && (kWalkClient & op))
+ if (internalState->fMapContig && internalState->fLocalMapperPageAlloc)
{
- ppnum_t pageNum = internalState->fLocalMapperPageAlloc;
- state->fIOVMAddr = ptoa_64(pageNum)
- + offset - internalState->fPreparedOffset;
+ state->fIOVMAddr = internalState->fLocalMapperPageAlloc + offset;
rtn = kIOReturnSuccess;
+#if 0
+ {
+ uint64_t checkOffset;
+ IOPhysicalLength segLen;
+ for (checkOffset = 0; checkOffset < state->fLength; )
+ {
+ addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone);
+ if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys)
+ {
+ panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset,
+ state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength,
+ phys, checkOffset);
+ }
+ checkOffset += page_size - (phys & page_mask);
+ }
+ }
+#endif
}
else
{
if (target->fNumAddressBits && (target->fNumAddressBits < 64)
&& ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
- && (target->reserved->fLocalMapperPageAlloc || !target->reserved->fLocalMapper))
+ && (target->reserved->fLocalMapperPageAlloc || !target->fMapper))
{
DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
ret = kIOReturnMessageTooLarge;
#include <machine/pal_hibernate.h>
extern "C" addr64_t kvtophys(vm_offset_t va);
+extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-
+
IOReturn
IOHibernateSystemSleep(void)
{
static IOReturn
IOHibernateDone(IOHibernateVars * vars)
{
- hibernate_teardown(vars->page_list, vars->page_list_wired);
+ hibernate_teardown(vars->page_list, vars->page_list_wired, vars->page_list_pal);
if (vars->videoMapping)
{
if (vars->ioBuffer)
vars->ioBuffer->release();
bzero(&gIOHibernateHandoffPages[0], gIOHibernateHandoffPageCount * sizeof(gIOHibernateHandoffPages[0]));
- if (vars->handoffBuffer && (kIOHibernateStateWakingFromHibernate == gIOHibernateState))
+ if (vars->handoffBuffer)
{
- IOHibernateHandoff * handoff;
- bool done = false;
- for (handoff = (IOHibernateHandoff *) vars->handoffBuffer->getBytesNoCopy();
- !done;
- handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount])
+ if (kIOHibernateStateWakingFromHibernate == gIOHibernateState)
{
- HIBPRINT("handoff %p, %x, %x\n", handoff, handoff->type, handoff->bytecount);
- uint8_t * data = &handoff->data[0];
- switch (handoff->type)
+ IOHibernateHandoff * handoff;
+ bool done = false;
+ for (handoff = (IOHibernateHandoff *) vars->handoffBuffer->getBytesNoCopy();
+ !done;
+ handoff = (IOHibernateHandoff *) &handoff->data[handoff->bytecount])
{
- case kIOHibernateHandoffTypeEnd:
- done = true;
- break;
+ HIBPRINT("handoff %p, %x, %x\n", handoff, handoff->type, handoff->bytecount);
+ uint8_t * data = &handoff->data[0];
+ switch (handoff->type)
+ {
+ case kIOHibernateHandoffTypeEnd:
+ done = true;
+ break;
- case kIOHibernateHandoffTypeDeviceTree:
- MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot());
- break;
-
- case kIOHibernateHandoffTypeKeyStore:
+ case kIOHibernateHandoffTypeDeviceTree:
+ MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot());
+ break;
+
+ case kIOHibernateHandoffTypeKeyStore:
#if defined(__i386__) || defined(__x86_64__)
- {
- IOBufferMemoryDescriptor *
- md = IOBufferMemoryDescriptor::withBytes(data, handoff->bytecount, kIODirectionOutIn);
- if (md)
{
- IOSetKeyStoreData(md);
+ IOBufferMemoryDescriptor *
+ md = IOBufferMemoryDescriptor::withBytes(data, handoff->bytecount, kIODirectionOutIn);
+ if (md)
+ {
+ IOSetKeyStoreData(md);
+ }
}
- }
#endif
- break;
-
- default:
- done = (kIOHibernateHandoffType != (handoff->type & 0xFFFF0000));
- break;
- }
+ break;
+
+ default:
+ done = (kIOHibernateHandoffType != (handoff->type & 0xFFFF0000));
+ break;
+ }
+ }
}
vars->handoffBuffer->release();
}
}
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+void IOHibernateSystemRestart(void)
+{
+ static uint8_t noteStore[32] __attribute__((aligned(32)));
+ IORegistryEntry * regEntry;
+ const OSSymbol * sym;
+ OSData * noteProp;
+ OSData * data;
+ uintptr_t * smcVars;
+ uint8_t * smcBytes;
+ size_t len;
+ addr64_t element;
+
+ data = OSDynamicCast(OSData, IOService::getPMRootDomain()->getProperty(kIOHibernateSMCVariablesKey));
+ if (!data) return;
+
+ smcVars = (typeof(smcVars)) data->getBytesNoCopy();
+ smcBytes = (typeof(smcBytes)) smcVars[1];
+ len = smcVars[0];
+ if (len > sizeof(noteStore)) len = sizeof(noteStore);
+ noteProp = OSData::withCapacity(3 * sizeof(element));
+ if (!noteProp) return;
+ element = len;
+ noteProp->appendBytes(&element, sizeof(element));
+ element = crc32(0, smcBytes, len);
+ noteProp->appendBytes(&element, sizeof(element));
+
+ bcopy(smcBytes, noteStore, len);
+ element = (addr64_t) ¬eStore[0];
+ element = (element & page_mask) | ptoa_64(pmap_find_phys(kernel_pmap, element));
+ noteProp->appendBytes(&element, sizeof(element));
+
+ if (!gIOOptionsEntry)
+ {
+ regEntry = IORegistryEntry::fromPath("/options", gIODTPlane);
+ gIOOptionsEntry = OSDynamicCast(IODTNVRAM, regEntry);
+ if (regEntry && !gIOOptionsEntry)
+ regEntry->release();
+ }
+
+ sym = OSSymbol::withCStringNoCopy(kIOHibernateBootNoteKey);
+ if (gIOOptionsEntry && sym) gIOOptionsEntry->setProperty(sym, noteProp);
+ if (noteProp) noteProp->release();
+ if (sym) sym->release();
+}
+
+
+
void IOInterruptEventSource::setWorkLoop(IOWorkLoop *inWorkLoop)
{
- super::setWorkLoop(inWorkLoop);
-
- if (!provider)
- return;
-
- if ( !inWorkLoop ) {
- if (intIndex >= 0) {
- provider->unregisterInterrupt(intIndex);
+ if (inWorkLoop) super::setWorkLoop(inWorkLoop);
+
+ if (provider) {
+ if (!inWorkLoop) {
+ if (intIndex >= 0) {
+ provider->unregisterInterrupt(intIndex);
+ intIndex = ~intIndex;
+ }
+ } else if ((intIndex < 0) && (kIOReturnSuccess == registerInterruptHandler(provider, ~intIndex))) {
intIndex = ~intIndex;
}
- } else if ((intIndex < 0) && (kIOReturnSuccess == registerInterruptHandler(provider, ~intIndex))) {
- intIndex = ~intIndex;
}
+
+ if (!inWorkLoop) super::setWorkLoop(inWorkLoop);
}
const IOService *IOInterruptEventSource::getProvider() const
extern ppnum_t gIOLastPage;
+extern IOSimpleLock * gIOPageAllocLock;
+extern queue_head_t gIOPageAllocList;
+
/* Physical to physical copy (ints must be disabled) */
extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t size);
// Used for dedicated communications for IODMACommand
enum {
- kIOMDWalkSegments = 0x00000001,
- kIOMDFirstSegment = 0x00000002 | kIOMDWalkSegments,
- kIOMDGetCharacteristics = 0x00000004,
- kIOMDSetDMAActive = 0x00000005,
- kIOMDSetDMAInactive = 0x00000006,
- kIOMDLastDMACommandOperation
+ kIOMDWalkSegments = 0x01000000,
+ kIOMDFirstSegment = 1 | kIOMDWalkSegments,
+ kIOMDGetCharacteristics = 0x02000000,
+ kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
+ kIOMDDMAActive = 0x03000000,
+ kIOMDSetDMAActive = 1 | kIOMDDMAActive,
+ kIOMDSetDMAInactive = kIOMDDMAActive,
+ kIOMDAddDMAMapSpec = 0x04000000,
+ kIOMDDMAMap = 0x05000000,
+ kIOMDDMACommandOperationMask = 0xFF000000,
};
struct IOMDDMACharacteristics {
UInt64 fLength;
UInt32 fPageAlign;
ppnum_t fHighestPage;
IODirection fDirection;
- UInt8 fIsMapped, fIsPrepared;
+ UInt8 fIsPrepared;
};
struct IOMDDMAWalkSegmentArgs {
UInt64 fOffset; // Input/Output offset
};
typedef UInt8 IOMDDMAWalkSegmentState[128];
+struct IOMDDMAMapArgs {
+ IOMapper * fMapper;
+ IODMAMapSpecification fMapSpec;
+ uint64_t fOffset;
+ uint64_t fLength;
+ uint64_t fAlloc;
+ ppnum_t fAllocCount;
+};
+
struct IODMACommandInternal
{
IOMDDMAWalkSegmentState fState;
ppnum_t fCopyPageCount;
- ppnum_t fLocalMapperPageAlloc;
+ addr64_t fLocalMapperPageAlloc;
ppnum_t fLocalMapperPageCount;
class IOBufferMemoryDescriptor * fCopyMD;
+ IOService * fDevice;
+
// IODMAEventSource use
IOReturn fStatus;
UInt64 fActualByteCount;
#include <IOKit/IOMapper.h>
#include <IOKit/IODMACommand.h>
#include <libkern/c++/OSData.h>
+#include <libkern/OSDebug.h>
__BEGIN_DECLS
extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
OSMetaClassDefineReservedUsed(IOMapper, 0);
OSMetaClassDefineReservedUsed(IOMapper, 1);
OSMetaClassDefineReservedUsed(IOMapper, 2);
-OSMetaClassDefineReservedUnused(IOMapper, 3);
+OSMetaClassDefineReservedUsed(IOMapper, 3);
OSMetaClassDefineReservedUnused(IOMapper, 4);
OSMetaClassDefineReservedUnused(IOMapper, 5);
OSMetaClassDefineReservedUnused(IOMapper, 6);
{
sMapperLock.lock();
while ((uintptr_t) IOMapper::gSystem & kWaitMask)
+ {
+ OSReportWithBacktrace("waitForSystemMapper");
sMapperLock.sleep(&IOMapper::gSystem);
+ }
sMapperLock.unlock();
}
ppnum_t IOMapper::iovmAllocDMACommand(IODMACommand * command, IOItemCount pageCount)
{
- return (0);
+ return (0);
}
void IOMapper::iovmFreeDMACommand(IODMACommand * command,
{
}
+ppnum_t IOMapper::iovmMapMemory(
+ OSObject * memory, // dma command or iomd
+ ppnum_t offsetPage,
+ ppnum_t pageCount,
+ uint32_t options,
+ upl_page_info_t * pageList,
+ const IODMAMapSpecification * mapSpecification)
+{
+ return (0);
+}
+
void IOMapper::iovmInsert(ppnum_t addr, IOItemCount offset,
ppnum_t *pageList, IOItemCount pageCount)
{
ppnum_t IOMapperInsertPage(ppnum_t addr, unsigned offset, ppnum_t page)
{
if (IOMapper::gSystem) {
+ if (!addr) panic("!addr");
IOMapper::gSystem->iovmInsert(addr, (IOItemCount) offset, page);
return addr + offset;
}
#include <IOKit/IOLib.h>
#include <IOKit/IOMemoryDescriptor.h>
#include <IOKit/IOMapper.h>
+#include <IOKit/IODMACommand.h>
#include <IOKit/IOKitKeysPrivate.h>
#ifndef __LP64__
#define kIOMaximumMappedIOByteCount (512*1024*1024)
+#define kIOMapperWaitSystem ((IOMapper *) 1)
+
static IOMapper * gIOSystemMapper = NULL;
static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
upl_t fIOPL;
vm_address_t fPageInfo; // Pointer to page list or index into it
uint32_t fIOMDOffset; // The offset of this iopl in descriptor
- ppnum_t fMappedBase; // Page number of first page in this iopl
+ ppnum_t fMappedPage; // Page number of first page in this iopl
unsigned int fPageOffset; // Offset within first page of iopl
unsigned int fFlags; // Flags
};
struct ioGMDData {
- IOMapper *fMapper;
+ IOMapper * fMapper;
+ uint8_t fDMAMapNumAddressBits;
+ uint64_t fDMAMapAlignment;
+ addr64_t fMappedBase;
uint64_t fPreparationID;
unsigned int fPageCnt;
#if __LP64__
};
#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
-#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
+#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
#define getNumIOPL(osd, d) \
(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
#define getPageList(d) (&(d->fPageList[0]))
}
// Grab the appropriate mapper
+ if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
if (kIOMemoryMapperNone & options)
mapper = 0; // No Mapper
else if (mapper == kIOMapperSystem) {
ioGMDData *dataP;
unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
- if (!_memoryEntries) {
- _memoryEntries = OSData::withCapacity(dataSize);
- if (!_memoryEntries)
- return false;
- }
- else if (!_memoryEntries->initWithCapacity(dataSize))
- return false;
-
- _memoryEntries->appendBytes(0, computeDataSize(0, 0));
+ if (!initMemoryEntries(dataSize, mapper)) return (false);
dataP = getDataP(_memoryEntries);
- dataP->fMapper = mapper;
dataP->fPageCnt = 0;
// _wireCount++; // UPLs start out life wired
if (upl_get_size(iopl.fIOPL) < (count + offset))
panic("short external upl");
- // Set the flag kIOPLOnDevice convieniently equal to 1
- iopl.fFlags = pageList->device | kIOPLExternUPL;
- iopl.fIOMDOffset = 0;
-
_highestPage = upl_get_highest_page(iopl.fIOPL);
+ // Set the flag kIOPLOnDevice convieniently equal to 1
+ iopl.fFlags = pageList->device | kIOPLExternUPL;
if (!pageList->device) {
// Pre-compute the offset into the UPL's page list
pageList = &pageList[atop_32(offset)];
offset &= PAGE_MASK;
- if (mapper) {
- iopl.fMappedBase = mapper->iovmAlloc(_pages);
- mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
- }
- else
- iopl.fMappedBase = 0;
}
- else
- iopl.fMappedBase = 0;
+ iopl.fIOMDOffset = 0;
+ iopl.fMappedPage = 0;
iopl.fPageInfo = (vm_address_t) pageList;
iopl.fPageOffset = offset;
-
_memoryEntries->appendBytes(&iopl, sizeof(iopl));
}
else {
ioGMDData *dataP;
unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
- if (!_memoryEntries) {
- _memoryEntries = OSData::withCapacity(dataSize);
- if (!_memoryEntries)
- return false;
- }
- else if (!_memoryEntries->initWithCapacity(dataSize))
- return false;
-
- _memoryEntries->appendBytes(0, computeDataSize(0, 0));
+ if (!initMemoryEntries(dataSize, mapper)) return false;
dataP = getDataP(_memoryEntries);
- dataP->fMapper = mapper;
dataP->fPageCnt = _pages;
if ( (kIOMemoryPersistent & _flags) && !_memEntry)
if (!_wireCount)
return (kIOPreparationIDUnprepared);
- if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
+ if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
+ || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
{
IOMemoryDescriptor::setPreparationID();
return (IOMemoryDescriptor::getPreparationID());
IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
{
+ IOReturn err = kIOReturnSuccess;
+ DMACommandOps params;
+ IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
+ ioGMDData *dataP;
+
+ params = (op & ~kIOMDDMACommandOperationMask & op);
+ op &= kIOMDDMACommandOperationMask;
+
+ if (kIOMDDMAMap == op)
+ {
+ if (dataSize < sizeof(IOMDDMAMapArgs))
+ return kIOReturnUnderrun;
+
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
+
+ if (_memoryEntries && data->fMapper)
+ {
+ bool remap = false;
+ bool whole = ((data->fOffset == 0) && (data->fLength == _length));
+ dataP = getDataP(_memoryEntries);
+ if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits)
+ {
+ dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
+ remap = ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
+ }
+ if (data->fMapSpec.alignment > dataP->fDMAMapAlignment)
+ {
+ dataP->fDMAMapAlignment = data->fMapSpec.alignment;
+ remap |= (dataP->fDMAMapAlignment > page_size);
+ }
+ remap |= (!whole);
+ if (remap || !dataP->fMappedBase)
+ {
+// if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
+ err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
+ if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
+ {
+ dataP->fMappedBase = data->fAlloc;
+ data->fAllocCount = 0; // IOMD owns the alloc now
+ }
+ }
+ else
+ {
+ data->fAlloc = dataP->fMappedBase;
+ data->fAllocCount = 0; // IOMD owns the alloc
+ }
+ }
+
+ return (err);
+ }
+
+ if (kIOMDAddDMAMapSpec == op)
+ {
+ if (dataSize < sizeof(IODMAMapSpecification))
+ return kIOReturnUnderrun;
+
+ IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
+
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
+
+ if (_memoryEntries)
+ {
+ dataP = getDataP(_memoryEntries);
+ if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
+ dataP->fDMAMapNumAddressBits = data->numAddressBits;
+ if (data->alignment > dataP->fDMAMapAlignment)
+ dataP->fDMAMapAlignment = data->alignment;
+ }
+ return kIOReturnSuccess;
+ }
+
if (kIOMDGetCharacteristics == op) {
if (dataSize < sizeof(IOMDDMACharacteristics))
else {
data->fIsPrepared = true;
data->fHighestPage = _highestPage;
- if (_memoryEntries) {
- ioGMDData *gmdData = getDataP(_memoryEntries);
- ioPLBlock *ioplList = getIOPLList(gmdData);
- UInt count = getNumIOPL(_memoryEntries, gmdData);
-
- data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
- && ioplList[0].fMappedBase);
+ if (_memoryEntries)
+ {
+ dataP = getDataP(_memoryEntries);
+ ioPLBlock *ioplList = getIOPLList(dataP);
+ UInt count = getNumIOPL(_memoryEntries, dataP);
if (count == 1)
data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
}
- else
- data->fIsMapped = false;
}
return kIOReturnSuccess;
#if IOMD_DEBUG_DMAACTIVE
- } else if (kIOMDSetDMAActive == op) {
- IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
- OSIncrementAtomic(&md->__iomd_reservedA);
- } else if (kIOMDSetDMAInactive == op) {
- IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
- if (md->__iomd_reservedA)
- OSDecrementAtomic(&md->__iomd_reservedA);
- else
- panic("kIOMDSetDMAInactive");
+ } else if (kIOMDDMAActive == op) {
+ if (params) OSIncrementAtomic(&md->__iomd_reservedA);
+ else {
+ if (md->__iomd_reservedA)
+ OSDecrementAtomic(&md->__iomd_reservedA);
+ else
+ panic("kIOMDSetDMAInactive");
+ }
#endif /* IOMD_DEBUG_DMAACTIVE */
- } else if (!(kIOMDWalkSegments & op))
+ } else if (kIOMDWalkSegments != op)
return kIOReturnBadArgument;
// Get the next segment
UInt offset = isP->fIO.fOffset;
bool mapped = isP->fIO.fMapped;
+ if (IOMapper::gSystem && mapped
+ && (!(kIOMemoryHostOnly & _flags))
+ && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
+// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
+ {
+ if (!_memoryEntries
+ && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
+
+ dataP = getDataP(_memoryEntries);
+ if (dataP->fMapper)
+ {
+ IODMAMapSpecification mapSpec;
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
+ mapSpec.alignment = dataP->fDMAMapAlignment;
+ err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
+ if (kIOReturnSuccess != err) return (err);
+ }
+ }
+
if (offset >= _length)
return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
// Validate the previous offset
UInt ind, off2Ind = isP->fOffset2Index;
- if ((kIOMDFirstSegment != op)
+ if (!params
&& offset
&& (offset == isP->fNextOffset || off2Ind <= offset))
ind = isP->fIndex;
UInt length;
UInt64 address;
+
+
if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
// Physical address based memory descriptor
length = off2Ind - offset;
address = physP[ind - 1].address + len - length;
- // see how far we can coalesce ranges
- while (ind < _rangesCount && address + length == physP[ind].address) {
- len = physP[ind].length;
- length += len;
- off2Ind += len;
- ind++;
+ if (true && mapped && _memoryEntries
+ && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
+ {
+ address = dataP->fMappedBase + offset;
+ }
+ else
+ {
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
}
// correct contiguous check overshoot
length = off2Ind - offset;
address = physP[ind - 1].address + len - length;
- // see how far we can coalesce ranges
- while (ind < _rangesCount && address + length == physP[ind].address) {
- len = physP[ind].length;
- length += len;
- off2Ind += len;
- ind++;
+ if (true && mapped && _memoryEntries
+ && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
+ {
+ address = dataP->fMappedBase + offset;
+ }
+ else
+ {
+ // see how far we can coalesce ranges
+ while (ind < _rangesCount && address + length == physP[ind].address) {
+ len = physP[ind].length;
+ length += len;
+ off2Ind += len;
+ ind++;
+ }
}
-
// correct contiguous check overshoot
ind--;
off2Ind -= len;
- }
+ }
#endif /* !__LP64__ */
else do {
if (!_wireCount)
assert(_memoryEntries);
- ioGMDData * dataP = getDataP(_memoryEntries);
+ dataP = getDataP(_memoryEntries);
const ioPLBlock *ioplList = getIOPLList(dataP);
UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
upl_page_info_t *pageList = getPageList(dataP);
// If a mapped address is requested and this is a pre-mapped IOPL
// then just need to compute an offset relative to the mapped base.
- if (mapped && ioplInfo.fMappedBase) {
+ if (mapped && dataP->fMappedBase) {
offset += (ioplInfo.fPageOffset & PAGE_MASK);
- address = ptoa_64(ioplInfo.fMappedBase) + offset;
+ address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
continue; // Done leave do/while(false) now
}
else
{
IOMDDMAWalkSegmentState _state;
- IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
+ IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
state->fOffset = offset;
state->fLength = _length - offset;
- state->fMapped = (0 == (options & kIOMemoryMapperNone));
+ state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
if (length > origLen)
length = origLen;
}
-#ifdef __LP64__
- else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
- {
- panic("getPhysicalSegment not mapped for I/O");
- }
-#endif /* __LP64__ */
}
}
IOReturn
IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
{
+ IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
+ DMACommandOps params;
+ IOReturn err;
+
+ params = (op & ~kIOMDDMACommandOperationMask & op);
+ op &= kIOMDDMACommandOperationMask;
+
if (kIOMDGetCharacteristics == op) {
if (dataSize < sizeof(IOMDDMACharacteristics))
return kIOReturnUnderrun;
data->fLength = getLength();
data->fSGCount = 0;
data->fDirection = getDirection();
- if (IOMapper::gSystem)
- data->fIsMapped = true;
data->fIsPrepared = true; // Assume prepared - fails safe
}
- else if (kIOMDWalkSegments & op) {
+ else if (kIOMDWalkSegments == op) {
if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
return kIOReturnUnderrun;
IOByteCount offset = (IOByteCount) data->fOffset;
IOPhysicalLength length;
- IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
if (data->fMapped && IOMapper::gSystem)
- data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
+ data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
else
- data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
+ data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
data->fLength = length;
}
- else
- return kIOReturnBadArgument;
+ else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
+ else if (kIOMDDMAMap == op)
+ {
+ if (dataSize < sizeof(IOMDDMAMapArgs))
+ return kIOReturnUnderrun;
+ IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
+
+ if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
+
+ err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
+ return (err);
+ }
+ else return kIOReturnBadArgument;
return kIOReturnSuccess;
}
IOOptionBits type = _flags & kIOMemoryTypeMask;
IOReturn error = kIOReturnCannotWire;
ioGMDData *dataP;
+ upl_page_info_array_t pageInfo;
ppnum_t mapBase = 0;
- IOMapper *mapper;
ipc_port_t sharedMem = (ipc_port_t) _memEntry;
assert(!_wireCount);
return kIOReturnNoResources;
dataP = getDataP(_memoryEntries);
+ IOMapper *mapper;
mapper = dataP->fMapper;
- if (mapper && _pages)
- mapBase = mapper->iovmAlloc(_pages);
-
- // Note that appendBytes(NULL) zeros the data up to the
- // desired length.
- _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
- dataP = 0; // May no longer be valid so lets not get tempted.
+ dataP->fMappedBase = 0;
if (forDirection == kIODirectionNone)
forDirection = getDirection();
#ifdef UPL_NEED_32BIT_ADDR
if (kIODirectionPrepareToPhys32 & forDirection)
- uplFlags |= UPL_NEED_32BIT_ADDR;
+ {
+ if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
+ if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
+ }
#endif
+ // Note that appendBytes(NULL) zeros the data up to the desired length.
+ _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
+ dataP = 0;
+
// Find the appropriate vm_map for the given task
vm_map_t curMap;
if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
unsigned int pageIndex = 0;
IOByteCount mdOffset = 0;
ppnum_t highestPage = 0;
+
for (UInt range = 0; range < _rangesCount; range++) {
ioPLBlock iopl;
user_addr_t startPage;
startPage = trunc_page_64(startPage);
if (mapper)
- iopl.fMappedBase = mapBase + pageIndex;
+ iopl.fMappedPage = mapBase + pageIndex;
else
- iopl.fMappedBase = 0;
+ iopl.fMappedPage = 0;
// Iterate over the current range, creating UPLs
while (numBytes) {
- dataP = getDataP(_memoryEntries);
vm_address_t kernelStart = (vm_address_t) startPage;
vm_map_t theMap;
if (curMap)
else
theMap = NULL;
- upl_page_info_array_t pageInfo = getPageList(dataP);
int ioplFlags = uplFlags;
+ dataP = getDataP(_memoryEntries);
+ pageInfo = getPageList(dataP);
upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
vm_size_t ioplSize = round_page(numBytes);
if (baseInfo->device) {
numPageInfo = 1;
iopl.fFlags = kIOPLOnDevice;
- // Don't translate device memory at all
- if (mapper && mapBase) {
- mapper->iovmFree(mapBase, _pages);
- mapBase = 0;
- iopl.fMappedBase = 0;
- }
}
else {
iopl.fFlags = 0;
- if (mapper)
- mapper->iovmInsert(mapBase, pageIndex,
- baseInfo, numPageInfo);
}
iopl.fIOMDOffset = mdOffset;
}
goto abortExit;
}
+ dataP = 0;
// Check for a multiple iopl's in one virtual range
pageIndex += numPageInfo;
startPage += ioplSize;
mdOffset += ioplSize;
iopl.fPageOffset = 0;
- if (mapper)
- iopl.fMappedBase = mapBase + pageIndex;
+ if (mapper) iopl.fMappedPage = mapBase + pageIndex;
}
else {
mdOffset += numBytes;
}
}
(void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
-
- if (mapper && mapBase)
- mapper->iovmFree(mapBase, _pages);
}
if (error == KERN_FAILURE)
return error;
}
+bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
+{
+ ioGMDData * dataP;
+ unsigned dataSize = size;
+
+ if (!_memoryEntries) {
+ _memoryEntries = OSData::withCapacity(dataSize);
+ if (!_memoryEntries)
+ return false;
+ }
+ else if (!_memoryEntries->initWithCapacity(dataSize))
+ return false;
+
+ _memoryEntries->appendBytes(0, computeDataSize(0, 0));
+ dataP = getDataP(_memoryEntries);
+
+ if (mapper == kIOMapperWaitSystem) {
+ IOMapper::checkForSystemMapper();
+ mapper = IOMapper::gSystem;
+ }
+ dataP->fMapper = mapper;
+ dataP->fPageCnt = 0;
+ dataP->fMappedBase = 0;
+ dataP->fDMAMapNumAddressBits = 64;
+ dataP->fDMAMapAlignment = 0;
+ dataP->fPreparationID = kIOPreparationIDUnprepared;
+
+ return (true);
+}
+
+IOReturn IOMemoryDescriptor::dmaMap(
+ IOMapper * mapper,
+ const IODMAMapSpecification * mapSpec,
+ uint64_t offset,
+ uint64_t length,
+ uint64_t * address,
+ ppnum_t * mapPages)
+{
+ IOMDDMAWalkSegmentState walkState;
+ IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
+ IOOptionBits mdOp;
+ IOReturn ret;
+ IOPhysicalLength segLen;
+ addr64_t phys, align, pageOffset;
+ ppnum_t base, pageIndex, pageCount;
+ uint64_t index;
+ uint32_t mapOptions = 0;
+
+ if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
+
+ walkArgs->fMapped = false;
+ mdOp = kIOMDFirstSegment;
+ pageCount = 0;
+ for (index = 0; index < length; )
+ {
+ if (index && (page_mask & (index + pageOffset))) break;
+
+ walkArgs->fOffset = offset + index;
+ ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
+ mdOp = kIOMDWalkSegments;
+ if (ret != kIOReturnSuccess) break;
+ phys = walkArgs->fIOVMAddr;
+ segLen = walkArgs->fLength;
+
+ align = (phys & page_mask);
+ if (!index) pageOffset = align;
+ else if (align) break;
+ pageCount += atop_64(round_page_64(align + segLen));
+ index += segLen;
+ }
+
+ if (index < length) return (kIOReturnVMError);
+
+ base = mapper->iovmMapMemory(this, offset, pageCount,
+ mapOptions, NULL, mapSpec);
+
+ if (!base) return (kIOReturnNoResources);
+
+ mdOp = kIOMDFirstSegment;
+ for (pageIndex = 0, index = 0; index < length; )
+ {
+ walkArgs->fOffset = offset + index;
+ ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
+ mdOp = kIOMDWalkSegments;
+ if (ret != kIOReturnSuccess) break;
+ phys = walkArgs->fIOVMAddr;
+ segLen = walkArgs->fLength;
+
+ ppnum_t page = atop_64(phys);
+ ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
+ while (count--)
+ {
+ mapper->iovmInsert(base, pageIndex, page);
+ page++;
+ pageIndex++;
+ }
+ index += segLen;
+ }
+ if (pageIndex != pageCount) panic("pageIndex");
+
+ *address = ptoa_64(base) + pageOffset;
+ if (mapPages) *mapPages = pageCount;
+
+ return (kIOReturnSuccess);
+}
+
+IOReturn IOGeneralMemoryDescriptor::dmaMap(
+ IOMapper * mapper,
+ const IODMAMapSpecification * mapSpec,
+ uint64_t offset,
+ uint64_t length,
+ uint64_t * address,
+ ppnum_t * mapPages)
+{
+ IOReturn err = kIOReturnSuccess;
+ ioGMDData * dataP;
+ IOOptionBits type = _flags & kIOMemoryTypeMask;
+
+ *address = 0;
+ if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
+
+ if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
+ || offset || (length != _length))
+ {
+ err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
+ }
+ else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
+ {
+ const ioPLBlock * ioplList = getIOPLList(dataP);
+ upl_page_info_t * pageList;
+ uint32_t mapOptions = 0;
+ ppnum_t base;
+
+ IODMAMapSpecification mapSpec;
+ bzero(&mapSpec, sizeof(mapSpec));
+ mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
+ mapSpec.alignment = dataP->fDMAMapAlignment;
+
+ // For external UPLs the fPageInfo field points directly to
+ // the upl's upl_page_info_t array.
+ if (ioplList->fFlags & kIOPLExternUPL)
+ {
+ pageList = (upl_page_info_t *) ioplList->fPageInfo;
+ mapOptions |= kIODMAMapPagingPath;
+ }
+ else
+ pageList = getPageList(dataP);
+
+ if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
+
+ // Check for direct device non-paged memory
+ if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
+
+ base = mapper->iovmMapMemory(
+ this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
+ *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
+ if (mapPages) *mapPages = _pages;
+ }
+
+ return (err);
+}
+
/*
* prepare
*
* the memory after the I/O transfer finishes. This method needn't
* called for non-pageable memory.
*/
+
IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
{
IOReturn error = kIOReturnSuccess;
}
if (kIOReturnSuccess == error)
- _wireCount++;
-
- if (1 == _wireCount)
{
- if (kIOMemoryClearEncrypt & _flags)
- {
- performOperation(kIOMemoryClearEncrypted, 0, _length);
- }
+ if (1 == ++_wireCount)
+ {
+ if (kIOMemoryClearEncrypt & _flags)
+ {
+ performOperation(kIOMemoryClearEncrypted, 0, _length);
+ }
+ }
}
if (_prepareLock)
if (__iomd_reservedA) panic("complete() while dma active");
#endif /* IOMD_DEBUG_DMAACTIVE */
- if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
- dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
-
+ if (dataP->fMappedBase) {
+ dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
+ dataP->fMappedBase = 0;
+ }
// Only complete iopls that we created which are for TypeVirtual
if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
for (UInt ind = 0; ind < count; ind++)
return( newMapping );
}
+IOReturn IOMemoryMap::wireRange(
+ uint32_t options,
+ mach_vm_size_t offset,
+ mach_vm_size_t length)
+{
+ IOReturn kr;
+ mach_vm_address_t start = trunc_page_64(fAddress + offset);
+ mach_vm_address_t end = round_page_64(fAddress + offset + length);
+
+ if (kIODirectionOutIn & options)
+ {
+ kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
+ }
+ else
+ {
+ kr = vm_map_unwire(fAddressMap, start, end, FALSE);
+ }
+
+ return (kr);
+}
+
+
IOPhysicalAddress
#ifdef __LP64__
IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
ptoa_64(gIOMaximumMappedIOPageCount), 64);
gIOLastPage = IOGetLastPageNumber();
+
+ gIOPageAllocLock = IOSimpleLockAlloc();
+ queue_init(&gIOPageAllocList);
}
void IOMemoryDescriptor::free( void )
kAutoWakePostWindow = 15
};
-#define kLocalEvalClamshellCommand (1 << 15)
+#define kLocalEvalClamshellCommand (1 << 15)
+#define kIdleSleepRetryInterval (3 * 60)
enum {
OFF_STATE = 0,
static uint32_t gDarkWakeFlags = kDarkWakeFlagHIDTickleNone | kDarkWakeFlagIgnoreDiskIOAlways;
static PMStatsStruct gPMStats;
+#if HIBERNATION
+static IOPMSystemSleepPolicyHandler gSleepPolicyHandler = 0;
+static IOPMSystemSleepPolicyVariables * gSleepPolicyVars = 0;
+static void * gSleepPolicyTarget;
+#endif
+
struct timeval gIOLastSleepTime;
struct timeval gIOLastWakeTime;
static SYSCTL_INT(_debug, OID_AUTO, darkwake, CTLFLAG_RW, &gDarkWakeFlags, 0, "");
+static const OSSymbol * gIOPMSettingAutoWakeCalendarKey;
static const OSSymbol * gIOPMSettingAutoWakeSecondsKey;
static const OSSymbol * gIOPMSettingDebugWakeRelativeKey;
static const OSSymbol * gIOPMSettingMaintenanceWakeCalendarKey;
super::start(nub);
gRootDomain = this;
+ gIOPMSettingAutoWakeCalendarKey = OSSymbol::withCString(kIOPMSettingAutoWakeCalendarKey);
gIOPMSettingAutoWakeSecondsKey = OSSymbol::withCString(kIOPMSettingAutoWakeSecondsKey);
gIOPMSettingDebugWakeRelativeKey = OSSymbol::withCString(kIOPMSettingDebugWakeRelativeKey);
gIOPMSettingMaintenanceWakeCalendarKey = OSSymbol::withCString(kIOPMSettingMaintenanceWakeCalendarKey);
OSSymbol::withCString(kIOPMSettingSleepOnPowerButtonKey),
gIOPMSettingAutoWakeSecondsKey,
OSSymbol::withCString(kIOPMSettingAutoPowerSecondsKey),
- OSSymbol::withCString(kIOPMSettingAutoWakeCalendarKey),
+ gIOPMSettingAutoWakeCalendarKey,
OSSymbol::withCString(kIOPMSettingAutoPowerCalendarKey),
gIOPMSettingDebugWakeRelativeKey,
OSSymbol::withCString(kIOPMSettingDebugPowerRelativeKey),
publishFeature("DisplayDims");
}
if(psIterator) {
- psIterator->release();
+ psIterator->release();
}
OSBoolean *b;
OSNumber *n;
OSDictionary *d;
- OSSymbol *type;
+ const OSSymbol *key;
OSObject *obj;
- unsigned int i;
+ OSCollectionIterator * iter = 0;
const OSSymbol *publish_simulated_battery_string = OSSymbol::withCString("SoftwareSimulatedBatteries");
const OSSymbol *boot_complete_string = OSSymbol::withCString("System Boot Complete");
const OSSymbol *suspendPMClient_string = OSSymbol::withCString(kPMSuspendedNotificationClients);
#endif
- if (!dict)
+ if (!dict)
{
return_value = kIOReturnBadArgument;
goto exit;
}
-
- if ((b = OSDynamicCast(OSBoolean, dict->getObject(publish_simulated_battery_string))))
- {
- publishResource(publish_simulated_battery_string, kOSBooleanTrue);
- }
- if ((n = OSDynamicCast(OSNumber, dict->getObject(idle_seconds_string))))
+ iter = OSCollectionIterator::withCollection(dict);
+ if (!iter)
{
- setProperty(idle_seconds_string, n);
- idleSeconds = n->unsigned32BitValue();
+ return_value = kIOReturnNoMemory;
+ goto exit;
}
- if (boot_complete_string && dict->getObject(boot_complete_string))
- {
- pmPowerStateQueue->submitPowerEvent( kPowerEventSystemBootCompleted );
- }
-
- if( battery_warning_disabled_string && dict->getObject(battery_warning_disabled_string))
- {
- setProperty( battery_warning_disabled_string, dict->getObject(battery_warning_disabled_string));
- }
-
- if (pmTimelineLogging_string && (d = OSDynamicCast(OSDictionary, dict->getObject(pmTimelineLogging_string))))
+ while ((key = (const OSSymbol *) iter->getNextObject()) &&
+ (obj = dict->getObject(key)))
{
- if (timeline && timeline->setProperties(d))
+ if (key->isEqualTo(publish_simulated_battery_string))
{
- OSDictionary *tlInfo = timeline->copyInfoDictionary();
- if (tlInfo) {
- setProperty(kIOPMTimelineDictionaryKey, tlInfo);
- tlInfo->release();
+ if (OSDynamicCast(OSBoolean, obj))
+ publishResource(key, kOSBooleanTrue);
+ }
+ else if (key->isEqualTo(idle_seconds_string))
+ {
+ if ((n = OSDynamicCast(OSNumber, obj)))
+ {
+ setProperty(key, n);
+ idleSeconds = n->unsigned32BitValue();
+ }
+ }
+ else if (key->isEqualTo(boot_complete_string))
+ {
+ pmPowerStateQueue->submitPowerEvent(kPowerEventSystemBootCompleted);
+ }
+ else if (key->isEqualTo(sys_shutdown_string))
+ {
+ if ((b = OSDynamicCast(OSBoolean, obj)))
+ pmPowerStateQueue->submitPowerEvent(kPowerEventSystemShutdown, (void *) b);
+ }
+ else if (key->isEqualTo(battery_warning_disabled_string))
+ {
+ setProperty(key, obj);
+ }
+ else if (key->isEqualTo(pmTimelineLogging_string))
+ {
+ if ((d = OSDynamicCast(OSDictionary, obj)) &&
+ timeline && timeline->setProperties(d))
+ {
+ OSDictionary *tlInfo = timeline->copyInfoDictionary();
+ if (tlInfo) {
+ setProperty(kIOPMTimelineDictionaryKey, tlInfo);
+ tlInfo->release();
+ }
}
}
- }
-
- if( sys_shutdown_string && (b = OSDynamicCast(OSBoolean, dict->getObject(sys_shutdown_string))))
- {
- pmPowerStateQueue->submitPowerEvent(kPowerEventSystemShutdown, (void *) b);
- }
-
- if( stall_halt_string && (b = OSDynamicCast(OSBoolean, dict->getObject(stall_halt_string))) )
- {
- setProperty(stall_halt_string, b);
- }
-
#if HIBERNATION
- if ( hibernatemode_string
- && (n = OSDynamicCast(OSNumber, dict->getObject(hibernatemode_string))))
- {
- setProperty(hibernatemode_string, n);
- }
- if ( hibernatefreeratio_string
- && (n = OSDynamicCast(OSNumber, dict->getObject(hibernatefreeratio_string))))
- {
- setProperty(hibernatefreeratio_string, n);
- }
- if ( hibernatefreetime_string
- && (n = OSDynamicCast(OSNumber, dict->getObject(hibernatefreetime_string))))
- {
- setProperty(hibernatefreetime_string, n);
- }
- OSString *str;
- if ( hibernatefile_string
- && (str = OSDynamicCast(OSString, dict->getObject(hibernatefile_string))))
- {
- setProperty(hibernatefile_string, str);
- }
-#endif
-
- if( sleepdisabled_string
- && (b = OSDynamicCast(OSBoolean, dict->getObject(sleepdisabled_string))) )
- {
- setProperty(sleepdisabled_string, b);
- pmPowerStateQueue->submitPowerEvent(kPowerEventUserDisabledSleep, (void *) b);
- }
- if (ondeck_sleepwake_uuid_string
- && (obj = dict->getObject(ondeck_sleepwake_uuid_string)))
- {
- if(pmPowerStateQueue) {
+ else if (key->isEqualTo(hibernatemode_string) ||
+ key->isEqualTo(hibernatefreeratio_string) ||
+ key->isEqualTo(hibernatefreetime_string))
+ {
+ if ((n = OSDynamicCast(OSNumber, obj)))
+ setProperty(key, n);
+ }
+ else if (key->isEqualTo(hibernatefile_string))
+ {
+ OSString * str = OSDynamicCast(OSString, obj);
+ if (str) setProperty(key, str);
+ }
+#endif
+ else if (key->isEqualTo(sleepdisabled_string))
+ {
+ if ((b = OSDynamicCast(OSBoolean, obj)))
+ {
+ setProperty(key, b);
+ pmPowerStateQueue->submitPowerEvent(kPowerEventUserDisabledSleep, (void *) b);
+ }
+ }
+ else if (key->isEqualTo(ondeck_sleepwake_uuid_string))
+ {
obj->retain();
pmPowerStateQueue->submitPowerEvent(kPowerEventQueueSleepWakeUUID, (void *)obj);
}
-
- }
-
- if (loginwindow_tracepoint_string
- && (n = OSDynamicCast(OSNumber, dict->getObject(loginwindow_tracepoint_string)))
- && pmTracer)
- {
- pmTracer->traceLoginWindowPhase( n->unsigned8BitValue() );
- }
-
- if ((b = OSDynamicCast(OSBoolean, dict->getObject(kIOPMDeepSleepEnabledKey))))
- {
- setProperty(kIOPMDeepSleepEnabledKey, b);
- }
- if ((n = OSDynamicCast(OSNumber, dict->getObject(kIOPMDeepSleepDelayKey))))
- {
- setProperty(kIOPMDeepSleepDelayKey, n);
- }
-
+ else if (key->isEqualTo(loginwindow_tracepoint_string))
+ {
+ if (pmTracer && (n = OSDynamicCast(OSNumber, obj)))
+ pmTracer->traceLoginWindowPhase(n->unsigned8BitValue());
+ }
+ else if (key->isEqualTo(kIOPMDeepSleepEnabledKey) ||
+ key->isEqualTo(kIOPMDestroyFVKeyOnStandbyKey) ||
+ key->isEqualTo(kIOPMAutoPowerOffEnabledKey) ||
+ key->isEqualTo(stall_halt_string))
+ {
+ if ((b = OSDynamicCast(OSBoolean, obj)))
+ setProperty(key, b);
+ }
+ else if (key->isEqualTo(kIOPMDeepSleepDelayKey) ||
+ key->isEqualTo(kIOPMAutoPowerOffDelayKey))
+ {
+ if ((n = OSDynamicCast(OSNumber, obj)))
+ setProperty(key, n);
+ }
#if SUSPEND_PM_NOTIFICATIONS_DEBUG
- if ((n = OSDynamicCast(OSNumber, dict->getObject(suspendPMClient_string))))
- {
- // Toggle the suspended status for pid n.
- uint32_t pid_int = n->unsigned32BitValue();
- suspendPMNotificationsForPID(pid_int, !pmNotificationIsSuspended(pid_int));
- }
+ else if (key->isEqualTo(suspendPMClient_string))
+ {
+ if ((n = OSDynamicCast(OSNumber, obj)))
+ {
+ // Toggle the suspended status for pid n.
+ uint32_t pid_int = n->unsigned32BitValue();
+ suspendPMNotificationsForPID(pid_int, !pmNotificationIsSuspended(pid_int));
+ }
+ }
#endif
-
- if ((b = OSDynamicCast(OSBoolean, dict->getObject(kIOPMDestroyFVKeyOnStandbyKey))))
- {
- setProperty(kIOPMDestroyFVKeyOnStandbyKey, b);
- }
-
- // Relay our allowed PM settings onto our registered PM clients
- for(i = 0; i < allowedPMSettings->getCount(); i++) {
-
- type = (OSSymbol *)allowedPMSettings->getObject(i);
- if(!type) continue;
+ // Relay our allowed PM settings onto our registered PM clients
+ else if ((allowedPMSettings->getNextIndexOfObject(key, 0) != (unsigned int) -1))
+ {
+ if ((gIOPMSettingAutoWakeSecondsKey == key) && ((n = OSDynamicCast(OSNumber, obj))))
+ {
+ UInt32 rsecs = n->unsigned32BitValue();
+ if (!rsecs)
+ autoWakeStart = autoWakeEnd = 0;
+ else
+ {
+ AbsoluteTime deadline;
+ clock_interval_to_deadline(rsecs + kAutoWakePostWindow, kSecondScale, &deadline);
+ autoWakeEnd = AbsoluteTime_to_scalar(&deadline);
+ if (rsecs > kAutoWakePreWindow)
+ rsecs -= kAutoWakePreWindow;
+ else
+ rsecs = 0;
+ clock_interval_to_deadline(rsecs, kSecondScale, &deadline);
+ autoWakeStart = AbsoluteTime_to_scalar(&deadline);
+ }
+ }
- obj = dict->getObject(type);
- if(!obj) continue;
+ return_value = setPMSetting(key, obj);
+ if (kIOReturnSuccess != return_value)
+ break;
- if ((gIOPMSettingAutoWakeSecondsKey == type) && ((n = OSDynamicCast(OSNumber, obj))))
- {
- UInt32 rsecs = n->unsigned32BitValue();
- if (!rsecs)
- autoWakeStart = autoWakeEnd = 0;
- else
+ if (gIOPMSettingDebugWakeRelativeKey == key)
{
- AbsoluteTime deadline;
- clock_interval_to_deadline(rsecs + kAutoWakePostWindow, kSecondScale, &deadline);
- autoWakeEnd = AbsoluteTime_to_scalar(&deadline);
- if (rsecs > kAutoWakePreWindow)
- rsecs -= kAutoWakePreWindow;
- else
- rsecs = 0;
- clock_interval_to_deadline(rsecs, kSecondScale, &deadline);
- autoWakeStart = AbsoluteTime_to_scalar(&deadline);
+ if ((n = OSDynamicCast(OSNumber, obj)) &&
+ (_debugWakeSeconds = n->unsigned32BitValue()))
+ {
+ OSBitOrAtomic(kIOPMAlarmBitDebugWake, &_scheduledAlarms);
+ }
+ else
+ {
+ _debugWakeSeconds = 0;
+ OSBitAndAtomic(~kIOPMAlarmBitDebugWake, &_scheduledAlarms);
+ }
+ DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms);
+ }
+ else if (gIOPMSettingAutoWakeCalendarKey == key)
+ {
+ OSData * data;
+ if ((data = OSDynamicCast(OSData, obj)) &&
+ (data->getLength() == sizeof(IOPMCalendarStruct)))
+ {
+ const IOPMCalendarStruct * cs =
+ (const IOPMCalendarStruct *) data->getBytesNoCopy();
+
+ if (cs->year)
+ OSBitOrAtomic(kIOPMAlarmBitCalendarWake, &_scheduledAlarms);
+ else
+ OSBitAndAtomic(~kIOPMAlarmBitCalendarWake, &_scheduledAlarms);
+ DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms);
+ }
}
}
- if (gIOPMSettingDebugWakeRelativeKey == type)
+ else
{
- if ((n = OSDynamicCast(OSNumber, obj)))
- _debugWakeSeconds = n->unsigned32BitValue();
- else
- _debugWakeSeconds = 0;
+ DLOG("setProperties(%s) not handled\n", key->getCStringNoCopy());
}
-
- return_value = setPMSetting(type, obj);
-
- if(kIOReturnSuccess != return_value) goto exit;
}
exit:
#if SUSPEND_PM_NOTIFICATIONS_DEBUG
if(suspendPMClient_string) suspendPMClient_string->release();
#endif
+ if (iter) iter->release();
return return_value;
}
_lastDebugWakeSeconds = _debugWakeSeconds;
_debugWakeSeconds = 0;
+ _scheduledAlarms = 0;
// And start logging the wake event here
// TODO: Publish the wakeReason string as an integer
// Sleep canceled, clear the sleep trace point.
tracePoint(kIOPMTracePointSystemUp);
- if (idleSeconds && !wrangler)
+ if (!wrangler)
{
- // stay awake for at least idleSeconds
- startIdleSleepTimer(idleSeconds);
+ if (idleSeconds)
+ {
+ // stay awake for at least idleSeconds
+ startIdleSleepTimer(idleSeconds);
+ }
+ }
+ else if (sleepSlider && wranglerAsleep)
+ {
+ // Display wrangler is already asleep, it won't trigger the next
+ // idle sleep attempt. Schedule a future idle sleep attempt, and
+ // also push out the next idle sleep attempt.
+
+ startIdleSleepTimer( kIdleSleepRetryInterval );
}
+
return tellClients( kIOMessageSystemWillNotSleep );
}
// evaluateSystemSleepPolicy
//******************************************************************************
+#define kIOPlatformSystemSleepPolicyKey "IOPlatformSystemSleepPolicy"
+
+// Sleep flags
+enum {
+ kIOPMSleepFlagHibernate = 0x00000001,
+ kIOPMSleepFlagSleepTimerEnable = 0x00000002
+};
+
struct IOPMSystemSleepPolicyEntry
{
uint32_t factorMask;
uint32_t factorBits;
uint32_t sleepFlags;
uint32_t wakeEvents;
-};
+} __attribute__((packed));
struct IOPMSystemSleepPolicyTable
{
- uint8_t signature[4];
+ uint32_t signature;
uint16_t version;
uint16_t entryCount;
IOPMSystemSleepPolicyEntry entries[];
-};
-
-enum {
- kIOPMSleepFactorSleepTimerWake = 0x00000001,
- kIOPMSleepFactorLidOpen = 0x00000002,
- kIOPMSleepFactorACPower = 0x00000004,
- kIOPMSleepFactorLowBattery = 0x00000008,
- kIOPMSleepFactorDeepSleepNoDelay = 0x00000010,
- kIOPMSleepFactorDeepSleepDemand = 0x00000020,
- kIOPMSleepFactorDeepSleepDisable = 0x00000040,
- kIOPMSleepFactorUSBExternalDevice = 0x00000080,
- kIOPMSleepFactorBluetoothHIDDevice = 0x00000100,
- kIOPMSleepFactorExternalMediaMounted = 0x00000200,
- kIOPMSleepFactorDriverAssertBit5 = 0x00000400, /* Reserved for ThunderBolt */
- kIOPMSleepFactorDriverAssertBit6 = 0x00000800,
- kIOPMSleepFactorDriverAssertBit7 = 0x00001000 /* Reserved for legacy I/O */
-};
-
-enum {
- kSleepPhaseEarly, kSleepPhaseFinal
-};
+} __attribute__((packed));
-bool IOPMrootDomain::evaluateSystemSleepPolicy( IOPMSystemSleepParameters * p, int sleepPhase )
+bool IOPMrootDomain::evaluateSystemSleepPolicy(
+ IOPMSystemSleepParameters * params, int sleepPhase )
{
const IOPMSystemSleepPolicyTable * pt;
OSObject * prop = 0;
OSData * policyData;
- uint32_t currentFactors;
- uint32_t deepSleepDelay = 0;
- bool success = false;
-
- if (getProperty(kIOPMDeepSleepEnabledKey) != kOSBooleanTrue)
- return false;
-
- getSleepOption(kIOPMDeepSleepDelayKey, &deepSleepDelay);
-
- prop = getServiceRoot()->copyProperty(kIOPlatformSystemSleepPolicyKey);
- if (!prop)
- return false;
-
- policyData = OSDynamicCast(OSData, prop);
- if (!policyData ||
- (policyData->getLength() < sizeof(IOPMSystemSleepPolicyTable)))
- {
- goto done;
- }
-
- pt = (const IOPMSystemSleepPolicyTable *) policyData->getBytesNoCopy();
- if ((pt->signature[0] != 'S') ||
- (pt->signature[1] != 'L') ||
- (pt->signature[2] != 'P') ||
- (pt->signature[3] != 'T') ||
- (pt->version != 1) ||
- (pt->entryCount == 0))
+ uint64_t currentFactors = 0;
+ uint32_t standbyDelay = 0;
+ uint32_t powerOffDelay = 0;
+ uint32_t mismatch;
+ bool standbyEnabled;
+ bool powerOffEnabled;
+ bool found = false;
+
+ // Get platform's sleep policy table
+ if (!gSleepPolicyHandler)
+ {
+ prop = getServiceRoot()->copyProperty(kIOPlatformSystemSleepPolicyKey);
+ if (!prop) goto done;
+ }
+
+ // Fetch additional settings
+ standbyEnabled = (getSleepOption(kIOPMDeepSleepDelayKey, &standbyDelay)
+ && (getProperty(kIOPMDeepSleepEnabledKey) == kOSBooleanTrue));
+ powerOffEnabled = (getSleepOption(kIOPMAutoPowerOffDelayKey, &powerOffDelay)
+ && (getProperty(kIOPMAutoPowerOffEnabledKey) == kOSBooleanTrue));
+ DLOG("standby %d delay %u, powerOff %d delay %u, hibernate %u\n",
+ standbyEnabled, standbyDelay, powerOffEnabled, powerOffDelay,
+ hibernateMode);
+
+ // pmset level overrides
+ if ((hibernateMode & kIOHibernateModeOn) == 0)
{
- goto done;
+ if (!gSleepPolicyHandler)
+ {
+ standbyEnabled = false;
+ powerOffEnabled = false;
+ }
}
-
- if ((policyData->getLength() - sizeof(IOPMSystemSleepPolicyTable)) !=
- (sizeof(IOPMSystemSleepPolicyEntry) * pt->entryCount))
+ else if (!(hibernateMode & kIOHibernateModeSleep))
{
- goto done;
+ // Force hibernate (i.e. mode 25)
+ // If standby is enabled, force standy.
+ // If poweroff is enabled, force poweroff.
+ if (standbyEnabled)
+ currentFactors |= kIOPMSleepFactorStandbyForced;
+ else if (powerOffEnabled)
+ currentFactors |= kIOPMSleepFactorAutoPowerOffForced;
+ else
+ currentFactors |= kIOPMSleepFactorHibernateForced;
}
- currentFactors = 0;
+ // Current factors based on environment and assertions
+ if (sleepTimerMaintenance)
+ currentFactors |= kIOPMSleepFactorSleepTimerWake;
+ if (!clamshellClosed)
+ currentFactors |= kIOPMSleepFactorLidOpen;
+ if (acAdaptorConnected)
+ currentFactors |= kIOPMSleepFactorACPower;
+ if (lowBatteryCondition)
+ currentFactors |= kIOPMSleepFactorBatteryLow;
+ if (!standbyDelay)
+ currentFactors |= kIOPMSleepFactorStandbyNoDelay;
+ if (!standbyEnabled)
+ currentFactors |= kIOPMSleepFactorStandbyDisabled;
if (getPMAssertionLevel(kIOPMDriverAssertionUSBExternalDeviceBit) !=
kIOPMDriverAssertionLevelOff)
currentFactors |= kIOPMSleepFactorUSBExternalDevice;
if (getPMAssertionLevel(kIOPMDriverAssertionExternalMediaMountedBit) !=
kIOPMDriverAssertionLevelOff)
currentFactors |= kIOPMSleepFactorExternalMediaMounted;
- if (getPMAssertionLevel(kIOPMDriverAssertionReservedBit5) != /* AssertionBit5 = Thunderbolt */
+ if (getPMAssertionLevel(kIOPMDriverAssertionReservedBit5) !=
kIOPMDriverAssertionLevelOff)
- currentFactors |= kIOPMSleepFactorDriverAssertBit5;
- if (getPMAssertionLevel(kIOPMDriverAssertionReservedBit7) !=
+ currentFactors |= kIOPMSleepFactorThunderboltDevice;
+ if (_scheduledAlarms != 0)
+ currentFactors |= kIOPMSleepFactorRTCAlarmScheduled;
+ if (getPMAssertionLevel(kIOPMDriverAssertionMagicPacketWakeEnabledBit) !=
kIOPMDriverAssertionLevelOff)
- currentFactors |= kIOPMSleepFactorDriverAssertBit7;
- if (0 == deepSleepDelay)
- currentFactors |= kIOPMSleepFactorDeepSleepNoDelay;
- if (!clamshellClosed)
- currentFactors |= kIOPMSleepFactorLidOpen;
- if (acAdaptorConnected)
- currentFactors |= kIOPMSleepFactorACPower;
- if (lowBatteryCondition)
- currentFactors |= kIOPMSleepFactorLowBattery;
- if (sleepTimerMaintenance)
- currentFactors |= kIOPMSleepFactorSleepTimerWake;
+ currentFactors |= kIOPMSleepFactorMagicPacketWakeEnabled;
+ if (!powerOffEnabled)
+ currentFactors |= kIOPMSleepFactorAutoPowerOffDisabled;
+ if (desktopMode)
+ currentFactors |= kIOPMSleepFactorExternalDisplay;
- // pmset overrides
- if ((hibernateMode & kIOHibernateModeOn) == 0)
- currentFactors |= kIOPMSleepFactorDeepSleepDisable;
- else if ((hibernateMode & kIOHibernateModeSleep) == 0)
- currentFactors |= kIOPMSleepFactorDeepSleepDemand;
-
- DLOG("Sleep policy %u entries, current factors 0x%x\n",
- pt->entryCount, currentFactors);
+ DLOG("sleep factors 0x%llx\n", currentFactors);
+
+ // Clear the output params
+ bzero(params, sizeof(*params));
+
+ if (gSleepPolicyHandler)
+ {
+ if (!gSleepPolicyVars)
+ {
+ gSleepPolicyVars = IONew(IOPMSystemSleepPolicyVariables, 1);
+ if (!gSleepPolicyVars)
+ goto done;
+ bzero(gSleepPolicyVars, sizeof(*gSleepPolicyVars));
+ }
+ gSleepPolicyVars->signature = kIOPMSystemSleepPolicySignature;
+ gSleepPolicyVars->version = kIOPMSystemSleepPolicyVersion;
+ if (kIOPMSleepPhase1 == sleepPhase)
+ {
+ gSleepPolicyVars->currentCapability = _currentCapability;
+ gSleepPolicyVars->highestCapability = _highestCapability;
+ gSleepPolicyVars->sleepReason = lastSleepReason;
+ gSleepPolicyVars->hibernateMode = hibernateMode;
+ gSleepPolicyVars->standbyDelay = standbyDelay;
+ gSleepPolicyVars->poweroffDelay = powerOffDelay;
+ }
+ gSleepPolicyVars->sleepFactors = currentFactors;
+ gSleepPolicyVars->sleepPhase = sleepPhase;
+ gSleepPolicyVars->scheduledAlarms = _scheduledAlarms;
+
+ if ((gSleepPolicyHandler(gSleepPolicyTarget, gSleepPolicyVars, params) !=
+ kIOReturnSuccess) || (kIOPMSleepTypeInvalid == params->sleepType) ||
+ (params->sleepType >= kIOPMSleepTypeLast) ||
+ (kIOPMSystemSleepParametersVersion != params->version))
+ {
+ MSG("sleep policy handler error\n");
+ goto done;
+ }
+
+ if ((params->sleepType >= kIOPMSleepTypeSafeSleep) &&
+ ((hibernateMode & kIOHibernateModeOn) == 0))
+ {
+ hibernateMode |= (kIOHibernateModeOn | kIOHibernateModeSleep);
+ }
+
+ DLOG("sleep params v%u, type %u, flags 0x%x, wake 0x%x, timer %u, poweroff %u\n",
+ params->version, params->sleepType, params->sleepFlags,
+ params->ecWakeEvents, params->ecWakeTimer, params->ecPoweroffTimer);
+ found = true;
+ goto done;
+ }
+
+ // Policy table is meaningless without standby enabled
+ if (!standbyEnabled)
+ goto done;
+
+ // Validate the sleep policy table
+ policyData = OSDynamicCast(OSData, prop);
+ if (!policyData || (policyData->getLength() <= sizeof(IOPMSystemSleepPolicyTable)))
+ goto done;
+
+ pt = (const IOPMSystemSleepPolicyTable *) policyData->getBytesNoCopy();
+ if ((pt->signature != kIOPMSystemSleepPolicySignature) ||
+ (pt->version != 1) || (0 == pt->entryCount))
+ goto done;
+
+ if (((policyData->getLength() - sizeof(IOPMSystemSleepPolicyTable)) !=
+ (sizeof(IOPMSystemSleepPolicyEntry) * pt->entryCount)))
+ goto done;
for (uint32_t i = 0; i < pt->entryCount; i++)
{
- const IOPMSystemSleepPolicyEntry * policyEntry = &pt->entries[i];
+ const IOPMSystemSleepPolicyEntry * entry = &pt->entries[i];
+ mismatch = (((uint32_t)currentFactors ^ entry->factorBits) & entry->factorMask);
+
+ DLOG("mask 0x%08x, bits 0x%08x, flags 0x%08x, wake 0x%08x, mismatch 0x%08x\n",
+ entry->factorMask, entry->factorBits,
+ entry->sleepFlags, entry->wakeEvents, mismatch);
+ if (mismatch)
+ continue;
- DLOG("factor mask 0x%08x, bits 0x%08x, flags 0x%08x, wake 0x%08x\n",
- policyEntry->factorMask, policyEntry->factorBits,
- policyEntry->sleepFlags, policyEntry->wakeEvents);
+ DLOG("^ found match\n");
+ found = true;
- if ((currentFactors ^ policyEntry->factorBits) & policyEntry->factorMask)
- continue; // mismatch, try next
+ params->version = kIOPMSystemSleepParametersVersion;
+ params->reserved1 = 1;
+ if (entry->sleepFlags & kIOPMSleepFlagHibernate)
+ params->sleepType = kIOPMSleepTypeStandby;
+ else
+ params->sleepType = kIOPMSleepTypeNormalSleep;
- if (p)
+ params->ecWakeEvents = entry->wakeEvents;
+ if (entry->sleepFlags & kIOPMSleepFlagSleepTimerEnable)
{
- p->version = 1;
- p->sleepFlags = policyEntry->sleepFlags;
- p->sleepTimer = 0;
- p->wakeEvents = policyEntry->wakeEvents;
- if (p->sleepFlags & kIOPMSleepFlagSleepTimerEnable)
+ if (kIOPMSleepPhase2 == sleepPhase)
{
- if (kSleepPhaseFinal == sleepPhase)
+ clock_sec_t now_secs = gIOLastSleepTime.tv_sec;
+
+ if (!_standbyTimerResetSeconds ||
+ (now_secs <= _standbyTimerResetSeconds))
+ {
+ // Reset standby timer adjustment
+ _standbyTimerResetSeconds = now_secs;
+ DLOG("standby delay %u, reset %u\n",
+ standbyDelay, (uint32_t) _standbyTimerResetSeconds);
+ }
+ else if (standbyDelay)
{
- clock_sec_t now_secs = gIOLastSleepTime.tv_sec;
+ // Shorten the standby delay timer
+ clock_sec_t elapsed = now_secs - _standbyTimerResetSeconds;
+ if (standbyDelay > elapsed)
+ standbyDelay -= elapsed;
+ else
+ standbyDelay = 1; // must be > 0
- if (!_standbyTimerResetSeconds ||
- (now_secs <= _standbyTimerResetSeconds))
- {
- // Reset standby timer adjustment
- _standbyTimerResetSeconds = now_secs;
- DLOG("standby delay %u, reset %u\n",
- deepSleepDelay, (uint32_t) _standbyTimerResetSeconds);
- }
- else if (deepSleepDelay)
- {
- // Shorten the standby delay timer
- clock_sec_t elapsed = now_secs - _standbyTimerResetSeconds;
- if (deepSleepDelay > elapsed)
- deepSleepDelay -= elapsed;
- else
- deepSleepDelay = 1; // must be > 0
-
- DLOG("standby delay %u, elapsed %u\n",
- deepSleepDelay, (uint32_t) elapsed);
- }
+ DLOG("standby delay %u, elapsed %u\n",
+ standbyDelay, (uint32_t) elapsed);
}
- p->sleepTimer = deepSleepDelay;
}
- else if (kSleepPhaseFinal == sleepPhase)
- {
- // A sleep that does not enable the sleep timer will reset
- // the standby delay adjustment.
- _standbyTimerResetSeconds = 0;
- }
+ params->ecWakeTimer = standbyDelay;
+ }
+ else if (kIOPMSleepPhase2 == sleepPhase)
+ {
+ // A sleep that does not enable the sleep timer will reset
+ // the standby delay adjustment.
+ _standbyTimerResetSeconds = 0;
}
-
- DLOG("matched policy entry %u\n", i);
- success = true;
break;
}
if (prop)
prop->release();
- return success;
+ return found;
}
+static IOPMSystemSleepParameters gEarlySystemSleepParams;
+
void IOPMrootDomain::evaluateSystemSleepPolicyEarly( void )
{
- IOPMSystemSleepParameters params;
-
- // Evaluate sleep policy before driver sleep phase.
+ // Evaluate early (priority interest phase), before drivers sleep.
DLOG("%s\n", __FUNCTION__);
removeProperty(kIOPMSystemSleepParametersKey);
hibernateMode = 0;
getSleepOption(kIOHibernateModeKey, &hibernateMode);
- if (!hibernateNoDefeat &&
- evaluateSystemSleepPolicy(¶ms, kSleepPhaseEarly) &&
- ((params.sleepFlags & kIOPMSleepFlagHibernate) == 0))
+ // Save for late evaluation if sleep is aborted
+ bzero(&gEarlySystemSleepParams, sizeof(gEarlySystemSleepParams));
+
+ if (evaluateSystemSleepPolicy(&gEarlySystemSleepParams, kIOPMSleepPhase1))
{
- hibernateDisabled = true;
+ if (!hibernateNoDefeat &&
+ (gEarlySystemSleepParams.sleepType == kIOPMSleepTypeNormalSleep))
+ {
+ // Disable hibernate setup for normal sleep
+ hibernateDisabled = true;
+ }
}
+
+ // Publish IOPMSystemSleepType
+ uint32_t sleepType = gEarlySystemSleepParams.sleepType;
+ if (sleepType == kIOPMSleepTypeInvalid)
+ {
+ // no sleep policy
+ sleepType = kIOPMSleepTypeNormalSleep;
+ if (hibernateMode & kIOHibernateModeOn)
+ sleepType = (hibernateMode & kIOHibernateModeSleep) ?
+ kIOPMSleepTypeSafeSleep : kIOPMSleepTypeHibernate;
+ }
+ else if ((sleepType == kIOPMSleepTypeStandby) &&
+ (gEarlySystemSleepParams.ecPoweroffTimer))
+ {
+ // report the lowest possible sleep state
+ sleepType = kIOPMSleepTypePowerOff;
+ }
+
+ setProperty(kIOPMSystemSleepTypeKey, sleepType, 32);
}
void IOPMrootDomain::evaluateSystemSleepPolicyFinal( void )
IOPMSystemSleepParameters params;
OSData * paramsData;
- // Evaluate sleep policy after drivers but before platform sleep.
+ // Evaluate sleep policy after sleeping drivers but before platform sleep.
DLOG("%s\n", __FUNCTION__);
- if (evaluateSystemSleepPolicy(¶ms, kSleepPhaseFinal))
+ if (evaluateSystemSleepPolicy(¶ms, kIOPMSleepPhase2))
{
if ((hibernateDisabled || hibernateAborted) &&
- (params.sleepFlags & kIOPMSleepFlagHibernate))
+ (params.sleepType != kIOPMSleepTypeNormalSleep))
{
- // Should hibernate but unable to or aborted.
- // Arm timer for a short sleep and retry or wake fully.
+ // Final evaluation picked a state requiring hibernation,
+ // but hibernate setup was skipped. Retry using the early
+ // sleep parameters.
- params.sleepFlags &= ~kIOPMSleepFlagHibernate;
- params.sleepFlags |= kIOPMSleepFlagSleepTimerEnable;
- params.sleepTimer = 1;
+ bcopy(&gEarlySystemSleepParams, ¶ms, sizeof(params));
+ params.sleepType = kIOPMSleepTypeAbortedSleep;
+ params.ecWakeTimer = 1;
hibernateNoDefeat = true;
DLOG("wake in %u secs for hibernateDisabled %d, hibernateAborted %d\n",
- params.sleepTimer, hibernateDisabled, hibernateAborted);
+ params.ecWakeTimer, hibernateDisabled, hibernateAborted);
}
else
+ {
hibernateNoDefeat = false;
+ }
paramsData = OSData::withBytes(¶ms, sizeof(params));
if (paramsData)
paramsData->release();
}
- if (params.sleepFlags & kIOPMSleepFlagHibernate)
+ if (params.sleepType >= kIOPMSleepTypeHibernate)
{
- // Force hibernate
+ // Disable safe sleep to force the hibernate path
gIOHibernateMode &= ~kIOHibernateModeSleep;
}
}
}
bool IOPMrootDomain::getHibernateSettings(
- uint32_t * hibernateMode,
+ uint32_t * hibernateModePtr,
uint32_t * hibernateFreeRatio,
uint32_t * hibernateFreeTime )
{
- bool ok = getSleepOption(kIOHibernateModeKey, hibernateMode);
+ // Called by IOHibernateSystemSleep() after evaluateSystemSleepPolicyEarly()
+ // has updated the hibernateDisabled flag.
+
+ bool ok = getSleepOption(kIOHibernateModeKey, hibernateModePtr);
getSleepOption(kIOHibernateFreeRatioKey, hibernateFreeRatio);
getSleepOption(kIOHibernateFreeTimeKey, hibernateFreeTime);
if (hibernateDisabled)
- *hibernateMode = 0;
- DLOG("hibernateMode 0x%x\n", *hibernateMode);
+ *hibernateModePtr = 0;
+ else if (gSleepPolicyHandler)
+ *hibernateModePtr = hibernateMode;
+ DLOG("hibernateMode 0x%x\n", *hibernateModePtr);
return ok;
}
ctx.PowerState = ON_STATE;
ctx.MessageType = kIOMessageSystemPagingOff;
IOService::updateConsoleUsers(NULL, kIOMessageSystemPagingOff);
+ IOHibernateSystemRestart();
break;
default:
else
maxPowerState = 1;
}
+ else if (actions->parameter & kPMActionsFlagIsGraphicsDevice)
+ {
+ maxPowerState = 1;
+ }
}
else
{
if (kPMCalendarTypeMaintenance == calendar->selector) {
ret = setPMSetting(gIOPMSettingMaintenanceWakeCalendarKey, data);
+ if (kIOReturnSuccess == ret)
+ OSBitOrAtomic(kIOPMAlarmBitMaintenanceWake, &_scheduledAlarms);
} else
if (kPMCalendarTypeSleepService == calendar->selector)
{
ret = setPMSetting(gIOPMSettingSleepServiceWakeCalendarKey, data);
+ if (kIOReturnSuccess == ret)
+ OSBitOrAtomic(kIOPMAlarmBitSleepServiceWake, &_scheduledAlarms);
}
+ DLOG("_scheduledAlarms = 0x%x\n", (uint32_t) _scheduledAlarms);
-
data->release();
return ret;
}
//******************************************************************************
bool IOPMrootDomain::displayWranglerMatchPublished(
- void * target,
+ void * target,
void * refCon,
IOService * newService,
IONotifier * notifier __unused)
{
systemBooting = false;
+ if (lowBatteryCondition)
+ {
+ privateSleepSystem (kIOPMSleepReasonLowPower);
+
+ // The rest is unnecessary since the system is expected
+ // to sleep immediately. The following wake will update
+ // everything.
+ break;
+ }
+
// If lid is closed, re-send lid closed notification
// now that booting is complete.
if ( clamshellClosed )
return kIOReturnSuccess;
}
+#if HIBERNATION
+ else if (functionName &&
+ functionName->isEqualTo(kIOPMInstallSystemSleepPolicyHandlerKey))
+ {
+ if (gSleepPolicyHandler)
+ return kIOReturnExclusiveAccess;
+ if (!param1)
+ return kIOReturnBadArgument;
+ gSleepPolicyHandler = (IOPMSystemSleepPolicyHandler) param1;
+ gSleepPolicyTarget = (void *) param2;
+ setProperty("IOPMSystemSleepPolicyHandler", kOSBooleanTrue);
+ return kIOReturnSuccess;
+ }
+#endif
return super::callPlatformFunction(
functionName, waitForFunction, param1, param2, param3, param4);
// Override the mapper present flag is requested by boot arguments.
if (PE_parse_boot_argn("dart", &debugFlags, sizeof (debugFlags)) && (debugFlags == 0))
removeProperty(kIOPlatformMapperPresentKey);
-
+ if (PE_parse_boot_argn("-x", &debugFlags, sizeof (debugFlags)))
+ removeProperty(kIOPlatformMapperPresentKey);
+
// Register the presence or lack thereof a system
// PCI address mapper with the IOMapper class
IOMapper::setMapperRequired(0 != getProperty(kIOPlatformMapperPresentKey));
ok = attachToParent( getRegistryRoot(), gIOServicePlane);
}
+ if (ok && !__provider) (void) getProvider();
+
return( ok );
}
IOService * parent;
SInt32 generation;
- parent = __provider;
generation = getGenerationCount();
if( __providerGeneration == generation)
- return( parent );
+ return( __provider );
parent = (IOService *) getParentEntry( gIOServicePlane);
if( parent == IORegistryEntry::getRegistryRoot())
parent = 0;
self->__provider = parent;
- // save the count before getParentEntry()
+ OSMemoryBarrier();
+ // save the count from before call to getParentEntry()
self->__providerGeneration = generation;
return( parent );
#include <libkern/c++/OSLib.h>
#include <libkern/OSAtomic.h>
-#include <IOKit/pwr_mgt/RootDomain.h>
-#include <IOKit/IOMessage.h>
#include <IOKit/IOLib.h>
__BEGIN_DECLS
OPTIONS/hibernation optional hibernation
OPTIONS/networking optional networking
OPTIONS/crypto optional crypto
+OPTIONS/allcrypto optional allcrypto
OPTIONS/zlib optional zlib
# libkern
extern void acpi_sleep_cpu(acpi_sleep_callback, void * refcon);
extern void acpi_wake_prot(void);
#endif
+extern kern_return_t IOCPURunPlatformQuiesceActions(void);
+extern kern_return_t IOCPURunPlatformActiveActions(void);
extern void fpinit(void);
unsigned int save_kdebug_enable = 0;
static uint64_t acpi_sleep_abstime;
-
#if CONFIG_SLEEP
static void
acpi_hibernate(void *refcon)
}
kdebug_enable = 0;
+ IOCPURunPlatformQuiesceActions();
+
acpi_sleep_abstime = mach_absolute_time();
(data->func)(data->refcon);
kdebug_enable = save_kdebug_enable;
+ IOCPURunPlatformActiveActions();
+
if (did_hibernate) {
my_tsc = (now >> 32) | (now << 32);
* Allocate an object.
* Conditions:
* Nothing locked. If successful, the object is returned locked.
+ * The space is write locked on successful return.
* The caller doesn't get a reference for the object.
* Returns:
* KERN_SUCCESS The object is allocated.
ipc_entry_modified(space, *namep, entry);
io_lock(object);
- is_write_unlock(space);
object->io_references = 1; /* for entry, not caller */
object->io_bits = io_makebits(TRUE, otype, 0);
if (kr != KERN_SUCCESS)
return kr;
- /* port is locked */
-
+ /* port and space are locked */
ipc_port_init(port, space, name);
#if MACH_ASSERT
ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
#endif /* MACH_ASSERT */
+ /* unlock space after init */
+ is_write_unlock(space);
+
#if CONFIG_MACF_MACH
task_t issuer = current_task();
tasklabel_lock2 (issuer, space->is_task);
&name, (ipc_object_t *) &pset);
if (kr != KERN_SUCCESS)
return kr;
- /* pset is locked */
+ /* pset and space are locked */
pset->ips_local_name = name;
ipc_mqueue_init(&pset->ips_messages, TRUE /* set */);
+ is_write_unlock(space);
*namep = name;
*psetp = pset;
}
iin->iin_urefs = IE_BITS_UREFS(bits);
- iin->iin_object = (natural_t)(uintptr_t)entry->ie_object;
+ iin->iin_object = (natural_t)VM_KERNEL_ADDRPERM((uintptr_t)entry->ie_object);
iin->iin_next = entry->ie_next;
iin->iin_hash = entry->ie_index;
}
for (index = 0; index < tsize; index++) {
ipc_entry_t ientry = &table[index];
+ ipc_port_t port = (ipc_port_t) ientry->ie_object;
- if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
- ipc_port_t port =
- (ipc_port_t) ientry->ie_object;
-
+ if (ientry->ie_bits & MACH_PORT_TYPE_RECEIVE &&
+ port->ip_pset_count > 0) {
mach_port_gst_helper(pset, port,
maxnames, names, &actual);
}
kern_return_t
hibernate_teardown(hibernate_page_list_t * page_list,
- hibernate_page_list_t * page_list_wired)
+ hibernate_page_list_t * page_list_wired,
+ hibernate_page_list_t * page_list_pal)
{
hibernate_free_gobble_pages();
kfree(page_list, page_list->list_size);
if (page_list_wired)
kfree(page_list_wired, page_list_wired->list_size);
+ if (page_list_pal)
+ kfree(page_list_pal, page_list_pal->list_size);
return (KERN_SUCCESS);
}
#include <mach/mach_time.h>
#include <mach/mach_types.h>
+#include <libkern/version.h>
+
/****************************************************************************
* The four main object types
*
* KERN_RESOURCE_SHORTAGE if the kernel lacks the resources to register another performance monitor
* driver, KERN_INVALID_ARGUMENT if one or both of the arguments is null
*/
+
+/* Prevent older AppleProfileFamily kexts from loading on newer kernels.
+ * Alas, C doesn't necessarily have a cleaner way to do the version number concatenation
+ */
+#define PERF_REG_NAME1(a, b) a ## b
+#define PERF_REG_NAME(a, b) PERF_REG_NAME1(a, b)
+#define perf_monitor_register PERF_REG_NAME(perf_monitor_register_, VERSION_MAJOR)
+
kern_return_t perf_monitor_register(perf_monitor_object_t monitor, perf_monitor_methods_t *methods);
/*!fn
* delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
*/
-boolean_t thread_is_io_throttled(void);
+extern boolean_t thread_is_io_throttled(void);
+extern void throttle_lowpri_io(int);
uint64_t vm_hard_throttle_threshold;
*top_page = first_m;
if (type_of_fault)
*type_of_fault = DBG_GUARD_FAULT;
+ thread_interrupt_level(interruptible_state);
return VM_FAULT_SUCCESS;
} else {
/*
done:
thread_interrupt_level(interruptible_state);
+ throttle_lowpri_io(TRUE);
+
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
(int)((uint64_t)vaddr >> 32),
if (!mapped_needs_copy) {
if (vm_map_lock_read_to_write(map)) {
vm_map_lock_read(map);
- /* XXX FBDP: entry still valid ? */
- if(*real_map == entry->object.sub_map)
- *real_map = map;
+ *real_map = map;
goto RetryLookup;
}
vm_map_lock_read(entry->object.sub_map);
+ *var_map = entry->object.sub_map;
cow_sub_map_parent = map;
/* reset base to map before cow object */
/* this is the map which will accept */
mapped_needs_copy = TRUE;
} else {
vm_map_lock_read(entry->object.sub_map);
+ *var_map = entry->object.sub_map;
if((cow_sub_map_parent != map) &&
(*real_map != map))
vm_map_unlock(map);
}
} else {
vm_map_lock_read(entry->object.sub_map);
+ *var_map = entry->object.sub_map;
/* leave map locked if it is a target */
/* cow sub_map above otherwise, just */
/* follow the maps down to the object */
vm_map_unlock_read(map);
}
- /* XXX FBDP: map has been unlocked, what protects "entry" !? */
- *var_map = map = entry->object.sub_map;
+ map = *var_map;
/* calculate the offset in the submap for vaddr */
local_vaddr = (local_vaddr - entry->vme_start) + entry->offset;
static unsigned long vm_object_collapse_objects = 0;
static unsigned long vm_object_collapse_do_collapse = 0;
static unsigned long vm_object_collapse_do_bypass = 0;
-static unsigned long vm_object_collapse_delays = 0;
+
__private_extern__ void
vm_object_collapse(
register vm_object_t object,
*/
size = atop(object->vo_size);
rcount = object->resident_page_count;
+
if (rcount != size) {
vm_object_offset_t offset;
vm_object_offset_t backing_offset;
unsigned int backing_rcount;
- unsigned int lookups = 0;
/*
* If the backing object has a pager but no pagemap,
continue;
}
+ backing_offset = object->vo_shadow_offset;
+ backing_rcount = backing_object->resident_page_count;
+
+ if ( (int)backing_rcount - (int)(atop(backing_object->vo_size) - size) > (int)rcount) {
+ /*
+ * we have enough pages in the backing object to guarantee that
+ * at least 1 of them must be 'uncovered' by a resident page
+ * in the object we're evaluating, so move on and
+ * try to collapse the rest of the shadow chain
+ */
+ if (object != original_object) {
+ vm_object_unlock(object);
+ }
+ object = backing_object;
+ object_lock_type = backing_object_lock_type;
+ continue;
+ }
+
/*
* If all of the pages in the backing object are
* shadowed by the parent object, the parent
*
*/
- backing_offset = object->vo_shadow_offset;
- backing_rcount = backing_object->resident_page_count;
-
#if MACH_PAGEMAP
#define EXISTS_IN_OBJECT(obj, off, rc) \
(vm_external_state_get((obj)->existence_map, \
(vm_offset_t)(off)) == VM_EXTERNAL_STATE_EXISTS || \
- ((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+ ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
#else
#define EXISTS_IN_OBJECT(obj, off, rc) \
- (((rc) && ++lookups && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
+ (((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
#endif /* MACH_PAGEMAP */
/*
* pages in the backing object, it makes sense to
* walk the backing_object's resident pages first.
*
- * NOTE: Pages may be in both the existence map and
- * resident. So, we can't permanently decrement
- * the rcount here because the second loop may
- * find the same pages in the backing object'
- * existence map that we found here and we would
- * double-decrement the rcount. We also may or
- * may not have found the
+ * NOTE: Pages may be in both the existence map and/or
+ * resident, so if we don't find a dependency while
+ * walking the backing object's resident page list
+ * directly, and there is an existence map, we'll have
+ * to run the offset based 2nd pass. Because we may
+ * have to run both passes, we need to be careful
+ * not to decrement 'rcount' in the 1st pass
*/
- if (backing_rcount &&
-#if MACH_PAGEMAP
- size > ((backing_object->existence_map) ?
- backing_rcount : (backing_rcount >> 1))
-#else
- size > (backing_rcount >> 1)
-#endif /* MACH_PAGEMAP */
- ) {
+ if (backing_rcount && backing_rcount < (size / 8)) {
unsigned int rc = rcount;
vm_page_t p;
backing_rcount = backing_object->resident_page_count;
p = (vm_page_t)queue_first(&backing_object->memq);
do {
- /* Until we get more than one lookup lock */
- if (lookups > 256) {
- vm_object_collapse_delays++;
- lookups = 0;
- mutex_pause(0);
- }
-
offset = (p->offset - backing_offset);
+
if (offset < object->vo_size &&
offset != hint_offset &&
!EXISTS_IN_OBJECT(object, offset, rc)) {
p = (vm_page_t) queue_next(&p->listq);
} while (--backing_rcount);
+
if (backing_rcount != 0 ) {
/* try and collapse the rest of the shadow chain */
if (object != original_object) {
(offset + PAGE_SIZE_64 < object->vo_size) ?
(offset + PAGE_SIZE_64) : 0) != hint_offset) {
- /* Until we get more than one lookup lock */
- if (lookups > 256) {
- vm_object_collapse_delays++;
- lookups = 0;
- mutex_pause(0);
- }
-
if (EXISTS_IN_OBJECT(backing_object, offset +
backing_offset, backing_rcount) &&
!EXISTS_IN_OBJECT(object, offset, rcount)) {