{
int action = state->dts_cred.dcr_action;
+#if defined(__APPLE__)
+ if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
+ goto bad;
+#endif /* __APPLE__ */
+
if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
dtrace_priv_proc_common_zone(state) == 0)
goto bad;
static int
dtrace_priv_proc_control(dtrace_state_t *state)
{
+#if defined(__APPLE__)
+ if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
+ goto bad;
+#endif /* __APPLE__ */
+
if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
return (1);
dtrace_priv_proc_common_nocd())
return (1);
+#if defined(__APPLE__)
+bad:
+#endif /* __APPLE__ */
cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
return (0);
static int
dtrace_priv_proc(dtrace_state_t *state)
{
+#if defined(__APPLE__)
+ if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
+ goto bad;
+#endif /* __APPLE__ */
+
if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
return (1);
+#if defined(__APPLE__)
+bad:
+#endif /* __APPLE__ */
cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
return (0);
if (subr == DIF_SUBR_COPYIN) {
DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
+#if !defined(__APPLE__)
dtrace_copyin(tupregs[0].dttk_value, dest, size);
+#else
+ if (dtrace_priv_proc(state))
+ dtrace_copyin(tupregs[0].dttk_value, dest, size);
+#endif /* __APPLE__ */
DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
}
}
DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
+#if !defined(__APPLE__)
dtrace_copyin(tupregs[0].dttk_value, dest, size);
+#else
+ if (dtrace_priv_proc(state))
+ dtrace_copyin(tupregs[0].dttk_value, dest, size);
+#endif /* __APPLE__ */
DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
break;
}
}
DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
+#if !defined(__APPLE__)
dtrace_copyinstr(tupregs[0].dttk_value, dest, size);
+#else
+ if (dtrace_priv_proc(state))
+ dtrace_copyinstr(tupregs[0].dttk_value, dest, size);
+#endif /* __APPLE__ */
DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
((char *)dest)[size - 1] = '\0';
now = dtrace_gethrtime(); /* must not precede dtrace_calc_thread_recent_vtime() call! */
#endif /* __APPLE__ */
+#if defined(__APPLE__)
+ /*
+ * A provider may call dtrace_probe_error() in lieu of dtrace_probe() in some circumstances.
+ * See, e.g. fasttrap_isa.c. However the provider has no access to ECB context, so passes
+ * NULL through "arg0" and the probe_id of the ovedrriden probe as arg1. Detect that here
+ * and cons up a viable state (from the probe_id).
+ */
+ if (dtrace_probeid_error == id && NULL == arg0) {
+ dtrace_id_t ftp_id = (dtrace_id_t)arg1;
+ dtrace_probe_t *ftp_probe = dtrace_probes[ftp_id - 1];
+ dtrace_ecb_t *ftp_ecb = ftp_probe->dtpr_ecb;
+
+ if (NULL != ftp_ecb) {
+ dtrace_state_t *ftp_state = ftp_ecb->dte_state;
+
+ arg0 = (uint64_t)(uintptr_t)ftp_state;
+ arg1 = ftp_ecb->dte_epid;
+ /*
+ * args[2-4] established by caller.
+ */
+ ftp_state->dts_arg_error_illval = -1; /* arg5 */
+ }
+ }
+#endif /* __APPLE__ */
+
mstate.dtms_probe = probe;
mstate.dtms_arg[0] = arg0;
mstate.dtms_arg[1] = arg1;
}
}
-#if defined(__APPLE__)
- /*
- * If the thread on which this probe has fired belongs to a process marked P_LNOATTACH
- * then this enabling is not permitted to observe it. Move along, nothing to see here.
- */
- if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
- continue;
- }
-#endif /* __APPLE__ */
-
if (ecb->dte_cond) {
/*
* If the dte_cond bits indicate that this
x86_saved_state_t *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread());
if (is_saved_state64(tagged_regs)) {
- code = -saved_state64(tagged_regs)->rax & SYSCALL_NUMBER_MASK;
+ code = saved_state64(tagged_regs)->rax & SYSCALL_NUMBER_MASK;
} else {
code = -saved_state32(tagged_regs)->eax;
}
#include <sys/fasttrap_impl.h>
#include <sys/dtrace.h>
#include <sys/dtrace_impl.h>
+extern dtrace_id_t dtrace_probeid_error;
#include "fasttrap_regset.h"
id->fti_probe->ftp_fsize)
continue;
- if (p_model == DATAMODEL_LP64) {
+ if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
+ dtrace_probe(dtrace_probeid_error, 0 /* state */, id->fti_probe->ftp_id,
+ 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV);
+ } else if (p_model == DATAMODEL_LP64) {
dtrace_probe(id->fti_probe->ftp_id,
pc - id->fti_probe->ftp_faddr,
regs64->rax, regs64->rdx, 0, 0);
for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
fasttrap_probe_t *probe = id->fti_probe;
- if (id->fti_ptype == DTFTP_ENTRY) {
+ if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
+ dtrace_probe(dtrace_probeid_error, 0 /* state */, probe->ftp_id,
+ 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV);
+ } else if (id->fti_ptype == DTFTP_ENTRY) {
/*
* We note that this was an entry
* probe to help ustack() find the
for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
fasttrap_probe_t *probe = id->fti_probe;
- if (id->fti_ptype == DTFTP_ENTRY) {
+ if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
+ dtrace_probe(dtrace_probeid_error, 0 /* state */, probe->ftp_id,
+ 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV);
+ } else if (id->fti_ptype == DTFTP_ENTRY) {
/*
* We note that this was an entry
* probe to help ustack() find the
#include <mach/task.h>
#include <vm/pmap.h>
#include <vm/vm_map.h> /* All the bits we care about are guarded by MACH_KERNEL_PRIVATE :-( */
+extern dtrace_id_t dtrace_probeid_error;
#define proc_t struct proc
continue; /* Yeah, skip this one... */
DTRACE_CPUFLAG_SET(CPU_DTRACE_USTACK_FP);
- dtrace_probe(id->fti_probe->ftp_id,
- pc - id->fti_probe->ftp_faddr,
- sv->save_r3, sv->save_r4, 0, 0);
+ if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
+ dtrace_probe(dtrace_probeid_error, 0 /* state */,
+ id->fti_probe->ftp_id, 1 /* ndx */, -1 /* offset */,
+ DTRACEFLT_UPRIV);
+ } else {
+ dtrace_probe(id->fti_probe->ftp_id,
+ pc - id->fti_probe->ftp_faddr,
+ sv->save_r3, sv->save_r4, 0, 0);
+ }
DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_USTACK_FP);
}
for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
fasttrap_probe_t *probe = id->fti_probe;
- if (id->fti_ptype == DTFTP_ENTRY) {
+ if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
+ dtrace_probe(dtrace_probeid_error, 0 /* state */,
+ id->fti_probe->ftp_id, 1 /* ndx */, -1 /* offset */,
+ DTRACEFLT_UPRIV);
+ } else if (id->fti_ptype == DTFTP_ENTRY) {
/*
* We note that this was an entry
* probe to help ustack() find the
u_int8_t type = DT_UNKNOWN;
u_int8_t is_mangled = 0;
u_int8_t *nameptr;
- user_addr_t uiobase = (user_addr_t)NULL;
+ user_addr_t uiobase = USER_ADDR_NULL;
size_t namelen = 0;
size_t maxnamelen;
size_t uiosize = 0;
(void) hfs_lock(cp, HFS_FORCE_LOCK);
- /*
- * Recycle named streams quickly so that the data fork vnode can
- * go inactive in a timely manner (so that it can be zero filled
- * or truncated if needed).
- */
- if (vnode_isnamedstream(vp))
- recycle = 1;
-
/*
* We should lock cnode before checking the flags in the
* condition below and should unlock the cnode before calling
*/
if ((cp->c_flag & C_MODIFIED) ||
cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
- cp->c_flag |= C_FORCEUPDATE;
+ if ((cp->c_flag & C_MODIFIED) || cp->c_touch_modtime){
+ cp->c_flag |= C_FORCEUPDATE;
+ }
hfs_update(vp, 0);
}
out:
* force the update, or hfs_update will again skip the cat_update.
*/
if ((cp->c_flag & C_MODIFIED) ||
- cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
- cp->c_flag |= C_FORCEUPDATE;
+ cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime) {
+ if ((cp->c_flag & C_MODIFIED) || cp->c_touch_modtime){
+ cp->c_flag |= C_FORCEUPDATE;
+ }
hfs_update(vp, 0);
}
* occurred during the attachment, then cleanup the cnode.
*/
if ((cp->c_vp == NULL) && (cp->c_rsrc_vp == NULL)) {
- hfs_chash_abort(cp);
+ hfs_chash_abort(cp);
hfs_reclaim_cnode(cp);
- } else {
- hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
- hfs_unlock(cp);
+ }
+ else {
+ hfs_chashwakeup(cp, H_ALLOC | H_ATTACH);
+ if ((flags & GNV_SKIPLOCK) == 0){
+ hfs_unlock(cp);
+ }
}
*vpp = NULL;
return (retval);
*/
if (direction == kSwapBTNodeHostToBig) {
/*
- * Sanity check and swap the forkward and backward links.
+ * Sanity check and swap the forward and backward links.
*/
if (srcDesc->fLink >= btcb->totalNodes) {
- printf("hfs_UNswap_BTNode: invalid forward link (0x%08X)\n", srcDesc->fLink);
+ panic("hfs_UNswap_BTNode: invalid forward link (0x%08X)\n", srcDesc->fLink);
error = fsBTInvalidHeaderErr;
goto fail;
}
if (srcDesc->bLink >= btcb->totalNodes) {
- printf("hfs_UNswap_BTNode: invalid backward link (0x%08X)\n", srcDesc->bLink);
+ panic("hfs_UNswap_BTNode: invalid backward link (0x%08X)\n", srcDesc->bLink);
error = fsBTInvalidHeaderErr;
goto fail;
}
* Check srcDesc->kind. Don't swap it because it's only one byte.
*/
if (srcDesc->kind < kBTLeafNode || srcDesc->kind > kBTMapNode) {
- printf("hfs_UNswap_BTNode: invalid node kind (%d)\n", srcDesc->kind);
+ panic("hfs_UNswap_BTNode: invalid node kind (%d)\n", srcDesc->kind);
error = fsBTInvalidHeaderErr;
goto fail;
}
* Check srcDesc->height. Don't swap it because it's only one byte.
*/
if (srcDesc->height > btcb->treeDepth) {
- printf("hfs_UNswap_BTNode: invalid node height (%d)\n", srcDesc->height);
+ panic("hfs_UNswap_BTNode: invalid node height (%d)\n", srcDesc->height);
error = fsBTInvalidHeaderErr;
goto fail;
}
*/
if ((char *)srcOffs > ((char *)src->buffer + src->blockSize) ||
(char *)srcOffs < ((char *)src->buffer + sizeof(BTNodeDescriptor))) {
- printf("hfs_UNswap_BTNode: invalid record count (0x%04X)\n", srcDesc->numRecords);
+ panic("hfs_UNswap_BTNode: invalid record count (0x%04X)\n", srcDesc->numRecords);
error = fsBTInvalidHeaderErr;
goto fail;
}
* This is why we allow the record offset to be zero.
*/
if ((srcOffs[i] & 1) || (srcOffs[i] < sizeof(BTNodeDescriptor) && srcOffs[i] != 0) || (srcOffs[i] >= src->blockSize)) {
- printf("hfs_UNswap_BTNode: record #%d invalid offset (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ panic("hfs_UNswap_BTNode: record #%d invalid offset (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
error = fsBTInvalidHeaderErr;
goto fail;
}
* them backwards, hence the order in the comparison.
*/
if ((i < srcDesc->numRecords) && (srcOffs[i+1] >= srcOffs[i])) {
- printf("hfs_UNswap_BTNode: offsets %d and %d out of order (0x%04X, 0x%04X)\n",
+ panic("hfs_UNswap_BTNode: offsets %d and %d out of order (0x%04X, 0x%04X)\n",
srcDesc->numRecords-i-2, srcDesc->numRecords-i-1, srcOffs[i+1], srcOffs[i]);
error = fsBTInvalidHeaderErr;
goto fail;
* below.
*/
if ((char *)srcKey + sizeof(HFSPlusExtentKey) + recordSize > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: extents key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: extents key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: extents key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ }
return fsBTInvalidNodeErr;
}
if (direction == kSwapBTNodeBigToHost)
srcKey->keyLength = SWAP_BE16 (srcKey->keyLength);
if (srcKey->keyLength != sizeof(*srcKey) - sizeof(srcKey->keyLength)) {
- printf("hfs_swap_HFSPlusBTInternalNode: extents key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: extents key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: extents key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ }
return fsBTInvalidNodeErr;
}
srcRec = (HFSPlusExtentDescriptor *)((char *)srcKey + srcKey->keyLength + sizeof(srcKey->keyLength));
nextRecord = (char *)src->buffer + srcOffs[i-1];
/*
- * Make sure we can safely dereference the keyLength and parentID fields. */
+ * Make sure we can safely dereference the keyLength and parentID fields.
+ */
if ((char *)srcKey + offsetof(HFSPlusCatalogKey, nodeName.unicode[0]) > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: catalog key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: catalog key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: catalog key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ }
return fsBTInvalidNodeErr;
}
/* Sanity check the key length */
if (keyLength < kHFSPlusCatalogKeyMinimumLength || keyLength > kHFSPlusCatalogKeyMaximumLength) {
- printf("hfs_swap_HFSPlusBTInternalNode: catalog key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, keyLength);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: catalog key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, keyLength);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: catalog key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, keyLength);
+ }
return fsBTInvalidNodeErr;
}
*/
srcPtr = (int16_t *)((char *)srcKey + keyLength + sizeof(srcKey->keyLength));
if ((char *)srcPtr + sizeof(u_int32_t) > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: catalog key #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: catalog key #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: catalog key #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
/* Make sure name length is consistent with key length */
if (keyLength < sizeof(srcKey->parentID) + sizeof(srcKey->nodeName.length) +
srcKey->nodeName.length*sizeof(srcKey->nodeName.unicode[0])) {
- printf("hfs_swap_HFSPlusBTInternalNode: catalog record #%d keyLength=%d expected=%lu\n",
- srcDesc->numRecords-i, keyLength, sizeof(srcKey->parentID) + sizeof(srcKey->nodeName.length) +
- srcKey->nodeName.length*sizeof(srcKey->nodeName.unicode[0]));
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: catalog record #%d keyLength=%d expected=%lu\n",
+ srcDesc->numRecords-i, keyLength, sizeof(srcKey->parentID) + sizeof(srcKey->nodeName.length) +
+ srcKey->nodeName.length*sizeof(srcKey->nodeName.unicode[0]));
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: catalog record #%d keyLength=%d expected=%lu\n",
+ srcDesc->numRecords-i, keyLength, sizeof(srcKey->parentID) + sizeof(srcKey->nodeName.length) +
+ srcKey->nodeName.length*sizeof(srcKey->nodeName.unicode[0]));
+ }
return fsBTInvalidNodeErr;
}
for (j = 0; j < srcKey->nodeName.length; j++) {
if (srcPtr[0] == kHFSPlusFolderRecord) {
HFSPlusCatalogFolder *srcRec = (HFSPlusCatalogFolder *)srcPtr;
if ((char *)srcRec + sizeof(*srcRec) > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: catalog folder record #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: catalog folder record #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: catalog folder record #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
} else if (srcPtr[0] == kHFSPlusFileRecord) {
HFSPlusCatalogFile *srcRec = (HFSPlusCatalogFile *)srcPtr;
if ((char *)srcRec + sizeof(*srcRec) > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: catalog file record #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: catalog file record #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: catalog file record #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
srcRec->textEncoding = SWAP_BE32 (srcRec->textEncoding);
/* If kHFSHasLinkChainBit is set, reserved1 is hl_FirstLinkID.
- * In all other context, it is expected to be zero.
- */
+ * In all other context, it is expected to be zero.
+ */
srcRec->reserved1 = SWAP_BE32 (srcRec->reserved1);
/* Don't swap srcRec->userInfo */
*/
HFSPlusCatalogThread *srcRec = (HFSPlusCatalogThread *)srcPtr;
if ((char *) &srcRec->nodeName.unicode[0] > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
* Then swap the characters of the name itself.
*/
if ((char *) &srcRec->nodeName.unicode[srcRec->nodeName.length] > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d name too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d name too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: catalog thread record #%d name too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
for (j = 0; j < srcRec->nodeName.length; j++) {
srcRec->nodeName.length = SWAP_BE16 (srcRec->nodeName.length);
} else {
- printf("hfs_swap_HFSPlusBTInternalNode: unrecognized catalog record type (0x%04X; record #%d)\n", srcPtr[0], srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: unrecognized catalog record type (0x%04X; record #%d)\n", srcPtr[0], srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: unrecognized catalog record type (0x%04X; record #%d)\n", srcPtr[0], srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
/* Make sure there is room in the buffer for a minimal key */
if ((char *) &srcKey->attrName[1] > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: attr key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: attr key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: attr key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ }
return fsBTInvalidNodeErr;
}
*/
srcRec = (HFSPlusAttrRecord *)((char *)srcKey + keyLength + sizeof(srcKey->keyLength));
if ((char *)srcRec + sizeof(u_int32_t) > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: attr key #%d too big (%d)\n", srcDesc->numRecords-i-1, keyLength);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: attr key #%d too big (%d)\n", srcDesc->numRecords-i-1, keyLength);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: attr key #%d too big (%d)\n", srcDesc->numRecords-i-1, keyLength);
+ }
return fsBTInvalidNodeErr;
}
srcKey->attrNameLen = SWAP_BE16(srcKey->attrNameLen);
/* Sanity check the attribute name length */
if (srcKey->attrNameLen > kHFSMaxAttrNameLen || keyLength < (kHFSPlusAttrKeyMinimumLength + sizeof(u_int16_t)*srcKey->attrNameLen)) {
- printf("hfs_swap_HFSPlusBTInternalNode: attr key #%d keyLength=%d attrNameLen=%d\n", srcDesc->numRecords-i-1, keyLength, srcKey->attrNameLen);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: attr key #%d keyLength=%d attrNameLen=%d\n", srcDesc->numRecords-i-1, keyLength, srcKey->attrNameLen);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: attr key #%d keyLength=%d attrNameLen=%d\n", srcDesc->numRecords-i-1, keyLength, srcKey->attrNameLen);
+ }
return fsBTInvalidNodeErr;
}
for (j = 0; j < srcKey->attrNameLen; j++)
case kHFSPlusAttrInlineData:
/* Is there room for the inline data header? */
if ((char *) &srcRec->attrData.attrData[0] > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
/* Is there room for the inline attribute data? */
if ((char *) &srcRec->attrData.attrData[attrSize] > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big (attrSize=%u)\n", srcDesc->numRecords-i-1, attrSize);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big (attrSize=%u)\n", srcDesc->numRecords-i-1, attrSize);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: attr inline #%d too big (attrSize=%u)\n", srcDesc->numRecords-i-1, attrSize);
+ }
return fsBTInvalidNodeErr;
}
case kHFSPlusAttrForkData:
/* Is there room for the fork data record? */
if ((char *)srcRec + sizeof(HFSPlusAttrForkData) > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: attr fork data #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: attr fork data #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: attr fork data #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
case kHFSPlusAttrExtents:
/* Is there room for an extent record? */
if ((char *)srcRec + sizeof(HFSPlusAttrExtents) > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: attr extents #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: attr extents #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: attr extents #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
/* Make sure there is room for the key (HotFileKey) and data (u_int32_t) */
if ((char *)srcKey + sizeof(HotFileKey) + sizeof(u_int32_t) > nextRecord) {
- printf("hfs_swap_HFSPlusBTInternalNode: hotfile #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: hotfile #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: hotfile #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ }
return fsBTInvalidNodeErr;
}
if (direction == kSwapBTNodeBigToHost)
srcKey->keyLength = SWAP_BE16 (srcKey->keyLength);
if (srcKey->keyLength != sizeof(*srcKey) - sizeof(srcKey->keyLength)) {
- printf("hfs_swap_HFSPlusBTInternalNode: hotfile #%d incorrect keyLength %d\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSPlusBTInternalNode: hotfile #%d incorrect keyLength %d\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ } else {
+ printf("hfs_swap_HFSPlusBTInternalNode: hotfile #%d incorrect keyLength %d\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ }
return fsBTInvalidNodeErr;
}
srcRec = (u_int32_t *)((char *)srcKey + srcKey->keyLength + sizeof(srcKey->keyLength));
* below.
*/
if ((char *)srcKey + sizeof(HFSExtentKey) + recordSize > nextRecord) {
- printf("hfs_swap_HFSBTInternalNode: extents key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: extents key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: extents key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ }
return fsBTInvalidNodeErr;
}
/* Don't swap srcKey->keyLength (it's only one byte), but do sanity check it */
if (srcKey->keyLength != sizeof(*srcKey) - sizeof(srcKey->keyLength)) {
- printf("hfs_swap_HFSBTInternalNode: extents key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: extents key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: extents key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ }
return fsBTInvalidNodeErr;
}
* record start to an even offset, which forms a minimal key.
*/
if ((char *)srcKey + 8 > nextRecord) {
- printf("hfs_swap_HFSBTInternalNode: catalog key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: catalog key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: catalog key #%d offset too big (0x%04X)\n", srcDesc->numRecords-i-1, srcOffs[i]);
+ }
return fsBTInvalidNodeErr;
}
/* Don't swap srcKey->keyLength (it's only one byte), but do sanity check it */
if (srcKey->keyLength < kHFSCatalogKeyMinimumLength || srcKey->keyLength > kHFSCatalogKeyMaximumLength) {
- printf("hfs_swap_HFSBTInternalNode: catalog key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: catalog key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: catalog key #%d invalid length (%d)\n", srcDesc->numRecords-i-1, srcKey->keyLength);
+ }
return fsBTInvalidNodeErr;
}
else
expectedKeyLength = srcKey->nodeName[0] + kHFSCatalogKeyMinimumLength;
if (srcKey->keyLength < expectedKeyLength) {
- printf("hfs_swap_HFSBTInternalNode: catalog record #%d keyLength=%u expected=%u\n",
- srcDesc->numRecords-i, srcKey->keyLength, expectedKeyLength);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: catalog record #%d keyLength=%u expected=%u\n",
+ srcDesc->numRecords-i, srcKey->keyLength, expectedKeyLength);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: catalog record #%d keyLength=%u expected=%u\n",
+ srcDesc->numRecords-i, srcKey->keyLength, expectedKeyLength);
+ }
return fsBTInvalidNodeErr;
}
* and index node's child node number.
*/
if ((char *)srcPtr + sizeof(u_int32_t) > nextRecord) {
- printf("hfs_swap_HFSBTInternalNode: catalog key #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: catalog key #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: catalog key #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
if (srcPtr[0] == kHFSFolderRecord) {
HFSCatalogFolder *srcRec = (HFSCatalogFolder *)srcPtr;
if ((char *)srcRec + sizeof(*srcRec) > nextRecord) {
- printf("hfs_swap_HFSBTInternalNode: catalog folder record #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: catalog folder record #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: catalog folder record #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
} else if (srcPtr[0] == kHFSFileRecord) {
HFSCatalogFile *srcRec = (HFSCatalogFile *)srcPtr;
if ((char *)srcRec + sizeof(*srcRec) > nextRecord) {
- printf("hfs_swap_HFSBTInternalNode: catalog file record #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: catalog file record #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: catalog file record #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
/* Make sure there is room for parentID and name length */
if ((char *) &srcRec->nodeName[1] > nextRecord) {
- printf("hfs_swap_HFSBTInternalNode: catalog thread record #%d too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: catalog thread record #%d too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: catalog thread record #%d too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
/* Make sure there is room for the name in the buffer */
if ((char *) &srcRec->nodeName[srcRec->nodeName[0]] > nextRecord) {
- printf("hfs_swap_HFSBTInternalNode: catalog thread record #%d name too big\n", srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: catalog thread record #%d name too big\n", srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: catalog thread record #%d name too big\n", srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
} else {
- printf("hfs_swap_HFSBTInternalNode: unrecognized catalog record type (0x%04X; record #%d)\n", srcPtr[0], srcDesc->numRecords-i-1);
+ if (direction == kSwapBTNodeHostToBig) {
+ panic("hfs_swap_HFSBTInternalNode: unrecognized catalog record type (0x%04X; record #%d)\n", srcPtr[0], srcDesc->numRecords-i-1);
+ } else {
+ printf("hfs_swap_HFSBTInternalNode: unrecognized catalog record type (0x%04X; record #%d)\n", srcPtr[0], srcDesc->numRecords-i-1);
+ }
return fsBTInvalidNodeErr;
}
tmp_user_access.num_files = accessp->num_files;
tmp_user_access.map_size = 0;
tmp_user_access.file_ids = CAST_USER_ADDR_T(accessp->file_ids);
- tmp_user_access.bitmap = (user_addr_t)NULL;
+ tmp_user_access.bitmap = USER_ADDR_NULL;
tmp_user_access.access = CAST_USER_ADDR_T(accessp->access);
tmp_user_access.num_parents = 0;
user_access_structp = &tmp_user_access;
#include <sys/kauth.h>
#include <sys/ubc.h>
+#include <sys/ubc_internal.h>
#include <sys/vnode_internal.h>
#include <sys/mount_internal.h>
#include <sys/sysctl.h>
freeCNIDs = (u_long)0xFFFFFFFF - (u_long)vcb->vcbNxtCNID;
sbp->f_bsize = (u_int32_t)vcb->blockSize;
- sbp->f_iosize = (size_t)(MAX_UPL_TRANSFER * PAGE_SIZE);
+ sbp->f_iosize = (size_t)cluster_max_io_size(mp, 0);
sbp->f_blocks = (u_int64_t)((unsigned long)vcb->totalBlocks);
sbp->f_bfree = (u_int64_t)((unsigned long )hfs_freeblks(hfsmp, 0));
sbp->f_bavail = (u_int64_t)((unsigned long )hfs_freeblks(hfsmp, 1));
if (error)
break;
error = hfs_relocate(rvp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc());
- vnode_put(rvp);
+ VTOC(rvp)->c_flag |= C_NEED_RVNODE_PUT;
if (error)
break;
}
VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt);
VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt);
VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF);
- VFSATTR_RETURN(fsap, f_iosize, (size_t)(MAX_UPL_TRANSFER * PAGE_SIZE));
+ VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0));
VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks);
VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0));
VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1));
#include <sys/systm.h>
#include <sys/kernel.h>
+#include <sys/param.h>
#include <sys/file_internal.h>
#include <sys/dirent.h>
#include <sys/stat.h>
struct proc *p = vfs_context_proc(ap->a_context);
struct hfsmount *hfsmp;
int busy;
+ int knownrefs = 0;
+ int tooktrunclock = 0;
if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0)
return (0);
cp = VTOC(vp);
hfsmp = VTOHFS(vp);
+ /*
+ * If the rsrc fork is a named stream, it holds a usecount on
+ * the data fork, which prevents the data fork from getting recycled, which
+ * then prevents the de-allocation of its extra blocks.
+ * Do checks for truncation on close. Purge extra extents if they
+ * exist. Make sure the vp is not a directory, that it has a resource
+ * fork, and that rsrc fork is a named stream.
+ */
+
+ if ((vp->v_type == VREG) && (cp->c_rsrc_vp)
+ && (vnode_isnamedstream(cp->c_rsrc_vp))) {
+ uint32_t blks;
+
+ blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
+ /*
+ * If there are any extra blocks and there are only 2 refs on
+ * this vp (ourselves + rsrc fork holding ref on us), go ahead
+ * and try to truncate the extra blocks away.
+ */
+ if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
+ // release cnode lock ; must acquire truncate lock BEFORE cnode lock
+ hfs_unlock (cp);
+
+ hfs_lock_truncate(cp, TRUE);
+ tooktrunclock = 1;
+
+ if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) {
+ hfs_unlock_truncate(cp, TRUE);
+ return (0);
+ }
+
+ //now re-test to make sure it's still valid.
+ if (cp->c_rsrc_vp) {
+ knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
+ if (!vnode_isinuse(vp, knownrefs)) {
+ blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
+ if (blks < VTOF(vp)->ff_blocks) {
+ (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY, 0, ap->a_context);
+ }
+ }
+ }
+ }
+ }
+
// if we froze the fs and we're exiting, then "thaw" the fs
if (hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
hfsmp->hfs_freezing_proc = NULL;
} else if (vnode_issystem(vp) && !busy) {
vnode_recycle(vp);
}
-
+ if (tooktrunclock) {
+ hfs_unlock_truncate(cp, TRUE);
+ }
+
hfs_unlock(cp);
return (0);
}
hfs_unlockpair(dcp, cp);
if (recycle_rsrc && vnode_getwithvid(rvp, rvid) == 0) {
+ vnode_ref(rvp);
+ vnode_rele(rvp);
vnode_recycle(rvp);
vnode_put(rvp);
}
* until the call to hfs_vnop_inactive() occurs.
*/
if (cp->c_rsrc_vp) {
- defer_remove = 1;
+ defer_remove = 1;
} else {
error = hfs_vgetrsrc(hfsmp, vp, &rvp, FALSE);
if (error)
struct vnode *tdvp = ap->a_tdvp;
struct vnode *fvp = ap->a_fvp;
struct vnode *fdvp = ap->a_fdvp;
+ struct vnode *rvp = NULLVP;
struct componentname *tcnp = ap->a_tcnp;
struct componentname *fcnp = ap->a_fcnp;
struct proc *p = vfs_context_proc(ap->a_context);
int took_trunc_lock = 0;
int lockflags;
int error;
-
+ int rsrc_vid = 0;
+ int recycle_rsrc = 0;
+
/* When tvp exist, take the truncate lock for the hfs_removefile(). */
if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
hfs_lock_truncate(VTOC(tvp), TRUE);
if (vnode_isdir(tvp))
error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE);
else {
+ if (tcp){
+ rvp = tcp->c_rsrc_vp;
+ }
error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0);
+
+ /* If the destination file had a resource fork vnode, we couldn't do
+ * anything about it in hfs_removefile because we didn't have a reference on it.
+ * We need to take action here to prevent it from leaking blocks. If removefile
+ * succeeded, then squirrel away the vid of the resource fork vnode and force a
+ * recycle after dropping all of the locks. The vid is guaranteed not to change
+ * at this point because we still hold the cnode lock.
+ */
+ if ((error == 0) && (tcp->c_flag & C_DELETED) && rvp && !vnode_isinuse(rvp, 0)) {
+ rsrc_vid = vnode_vid(rvp);
+ recycle_rsrc = 1;
+ }
}
if (error)
hfs_unlockfour(fdcp, fcp, tdcp, tcp);
+ /* Now that we've dropped locks, see if we need to force recycle on the old
+ * destination's rsrc fork, preventing a leak of the rsrc fork's blocks. Note that
+ * doing the ref/rele is in order to twiddle the VL_INACTIVE bit to the vnode's flags
+ * so that on the last vnode_put for this vnode, we will force vnop_inactive to be triggered.
+ */
+ if ((recycle_rsrc) && (vnode_getwithvid(rvp, rsrc_vid) == 0)) {
+ vnode_ref(rvp);
+ vnode_rele(rvp);
+ vnode_recycle(rvp);
+ vnode_put (rvp);
+ }
+
+
/* After tvp is removed the only acceptable error is EIO */
if (error && tvp_deleted)
error = EIO;
hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, int can_drop_lock)
{
struct vnode *rvp;
+ struct vnode *dvp = NULLVP;
struct cnode *cp = VTOC(vp);
int error;
int vid;
"%s%s", cp->c_desc.cd_nameptr,
_PATH_RSRCFORKSPEC);
}
- error = hfs_getnewvnode(hfsmp, vnode_parent(vp), cn.cn_pnbuf ? &cn : NULL,
+ dvp = vnode_getparent(vp);
+ error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
&cp->c_desc, GNV_WANTRSRC | GNV_SKIPLOCK, &cp->c_attr,
&rsrcfork, &rvp);
+ if (dvp)
+ vnode_put(dvp);
if (cn.cn_pnbuf)
FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
if (error)
// offset in sectors from start of allocation block space
//
temp = (daddr64_t)((offset - (off_t)((off_t)(firstFABN) * (off_t)(allocBlockSize)))/sectorSize);
- temp += startBlock * sectorsPerBlock;
+ temp += (daddr64_t)startBlock * (daddr64_t)sectorsPerBlock;
/* Add in any volume offsets */
if (vcb->vcbSigWord == kHFSPlusSigWord)
/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004, 2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* cast to any desired pointer type.
*/
#define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1)
-#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES)
+#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(__darwin_intptr_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES)
+
+#define __DARWIN_ALIGNBYTES32 (sizeof(__uint32_t) - 1)
+#define __DARWIN_ALIGN32(p) ((__darwin_size_t)((char *)(__darwin_intptr_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32)
#endif /* _I386__PARAM_H_ */
int boothowto = RB_DEBUG;
void lightning_bolt(void *);
-extern kern_return_t IOFindBSDRoot(char *, dev_t *, u_int32_t *);
+extern kern_return_t IOFindBSDRoot(char *, unsigned int, dev_t *, u_int32_t *);
extern void IOSecureBSDRoot(const char * rootName);
extern kern_return_t IOKitBSDInit(void );
extern void kminit(void);
* which needs to be under network funnel. Right thing to do
* here is to drop the funnel alltogether and regrab it afterwards
*/
- err = IOFindBSDRoot( rootdevice, &rootdev, &flags );
+ err = IOFindBSDRoot(rootdevice, sizeof(rootdevice), &rootdev, &flags);
if( err) {
printf("setconf: IOFindBSDRoot returned an error (%d);"
"setting rootdevice to 'sd0a'.\n", err); /* XXX DEBUG TEMP */
if (root_path == NULL)
panic("%s: M_NAMEI zone exhausted", __FUNCTION__);
- if(PE_parse_boot_arg("rp", root_path) == TRUE) {
+ if(PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == TRUE) {
/* Got it, now verify scheme */
if (strncmp(root_path, kIBFilePrefix,
if (root_path == NULL)
return (ENOMEM);
- if(PE_parse_boot_arg("rp", root_path) == FALSE) {
+ if(PE_parse_boot_argn("rp", root_path, MAXPATHLEN) == FALSE) {
error = ENOENT;
goto done;
}
lck_mtx_convert_spin(&p->p_fdmlock);
+ if (fp->f_fglob->fg_ops->fo_drain) {
+ (*fp->f_fglob->fg_ops->fo_drain)(fp, &context);
+ }
if (((fp->f_flags & FP_INSELECT)== FP_INSELECT)) {
wait_queue_wakeup_all((wait_queue_t)fp->f_waddr, &selwait, THREAD_INTERRUPTED);
- } else {
- if (fp->f_fglob->fg_ops->fo_drain) {
- (*fp->f_fglob->fg_ops->fo_drain)(fp, &context);
- }
- }
+ }
p->p_fpdrainwait = 1;
msleep(&p->p_fpdrainwait, &p->p_fdmlock, PRIBIO, "fpdrain", NULL);
if (buf == NULL)
return(ENOMEM);
+ bzero(buf, usize);
error = vnode_getwithvid(tvp, vid);
if (error == 0) {
goto semopout;
}
+ if (nsops < 0 || nsops > MAX_SOPS) {
+#ifdef SEM_DEBUG
+ printf("too many sops (max=%d, nsops=%d)\n", MAX_SOPS, nsops);
+#endif
+ eval = E2BIG;
+ goto semopout;
+ }
+
#if CONFIG_MACF
/*
* Initial pass thru sops to see what permissions are needed.
goto semopout;
#endif
- if (nsops < 0 || nsops > MAX_SOPS) {
-#ifdef SEM_DEBUG
- printf("too many sops (max=%d, nsops=%d)\n", MAX_SOPS, nsops);
-#endif
- eval = E2BIG;
- goto semopout;
- }
-
/* OK for LP64, since sizeof(struct sembuf) is currently invariant */
if ((eval = copyin(uap->sops, &sops, nsops * sizeof(struct sembuf))) != 0) {
#ifdef SEM_DEBUG
/*
- * Copyright (c) 1999-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 1999-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
cs_valid_range(cd, cd + 1, lower_bound, upper_bound) &&
cs_valid_range(cd, (const char *) cd + ntohl(cd->length),
lower_bound, upper_bound) &&
+ cs_valid_range(cd, (const char *) cd + ntohl(cd->hashOffset),
+ lower_bound, upper_bound) &&
+ cs_valid_range(cd, (const char *) cd +
+ ntohl(cd->hashOffset) +
+ (ntohl(cd->nCodeSlots) * SHA1_RESULTLEN),
+ lower_bound, upper_bound) &&
+
ntohl(cd->magic) == CSMAGIC_CODEDIRECTORY) {
return cd;
}
struct ubc_info *uip;
int need_rele = 0;
int need_wakeup = 0;
-#if NAMEDRSRCFORK
- int named_fork = 0;
-#endif
-
+
if (vnode_getwithref(vp))
return;
}
SET(uip->ui_flags, UI_MAPBUSY);
-#if NAMEDRSRCFORK
- if ((vp->v_flag & VISNAMEDSTREAM) &&
- (vp->v_parent != NULLVP) &&
- !(vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS)) {
- named_fork = 1;
- }
-#endif
-
if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
- CLR(uip->ui_flags, UI_ISMAPPED);
+ CLR(uip->ui_flags, UI_ISMAPPED);
need_rele = 1;
}
vnode_unlock(vp);
-
+
if (need_rele) {
- (void)VNOP_MNOMAP(vp, vfs_context_current());
-
-#if NAMEDRSRCFORK
- if (named_fork) {
- vnode_relenamedstream(vp->v_parent, vp, vfs_context_current());
- }
-#endif
-
- vnode_rele(vp);
+ (void) VNOP_MNOMAP(vp, vfs_context_current());
+ vnode_rele(vp);
}
vnode_lock_spin(vp);
vnode_unlock(vp);
if (need_wakeup)
- wakeup(&uip->ui_flags);
+ wakeup(&uip->ui_flags);
}
/*
ubc_upl_maxbufsize(
void)
{
- return(MAX_UPL_TRANSFER * PAGE_SIZE);
+ return(MAX_UPL_SIZE * PAGE_SIZE);
}
/*
kern_return_t kr;
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
- kr = upl_commit(upl, pl, MAX_UPL_TRANSFER);
+ kr = upl_commit(upl, pl, MAX_UPL_SIZE);
upl_deallocate(upl);
return kr;
}
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
kr = upl_commit_range(upl, offset, size, flags,
- pl, MAX_UPL_TRANSFER, &empty);
+ pl, MAX_UPL_SIZE, &empty);
if((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty)
upl_deallocate(upl);
blob_start_offset = blob->csb_base_offset + blob->csb_start_offset;
blob_end_offset = blob->csb_base_offset + blob->csb_end_offset;
- if (blob_start_offset >= blob_end_offset) {
+ if (blob_start_offset >= blob_end_offset ||
+ blob_start_offset < 0 ||
+ blob_end_offset <= 0) {
/* reject empty or backwards blob */
error = EINVAL;
goto out;
codeLimit = ntohl(cd->codeLimit);
hash = hashes(cd, atop(offset),
lower_bound, upper_bound);
- bcopy(hash, expected_hash, sizeof (expected_hash));
- found_hash = TRUE;
+ if (hash != NULL) {
+ bcopy(hash, expected_hash,
+ sizeof (expected_hash));
+ found_hash = TRUE;
+ }
#if !CS_BLOB_KEEP_IN_KERNEL
/* we no longer need that blob in the kernel map */
struct ifaddr *ifa = NULL;
struct ifaddr *ifa_maybe = (struct ifaddr *) 0;
u_int af = addr->sa_family;
- char *addr_data = addr->sa_data, *cplim;
+ const char *addr_data = addr->sa_data, *cplim;
ifnet_head_lock_shared();
/*
ifnet_lock_shared(ifp);
for (ifa = ifp->if_addrhead.tqh_first; ifa;
ifa = ifa->ifa_link.tqe_next) {
- char *cp, *cp2, *cp3;
+ const char *cp, *cp2, *cp3;
if (ifa->ifa_addr->sa_family != af)
next: continue;
* Grow the congestion window, if the
* connection is cwnd bound.
*/
+ if (tp->snd_cwnd < tp->snd_wnd) {
+ tp->t_bytes_acked += acked;
+ if (tp->t_bytes_acked > tp->snd_cwnd) {
+ tp->t_bytes_acked -= tp->snd_cwnd;
+ tp->snd_cwnd += tp->t_maxseg;
+ }
+ }
sbdrop(&so->so_snd, acked);
if (SEQ_GT(tp->snd_una, tp->snd_recover) &&
SEQ_LEQ(th->th_ack, tp->snd_recover))
register u_int cw = tp->snd_cwnd;
register u_int incr = tp->t_maxseg;
- if ((acked > incr) && tcp_do_rfc3465) {
- if (cw >= tp->snd_ssthresh) {
- tp->t_bytes_acked += acked;
- if (tp->t_bytes_acked >= cw) {
+ if (cw >= tp->snd_ssthresh) {
+ tp->t_bytes_acked += acked;
+ if (tp->t_bytes_acked >= cw) {
/* Time to increase the window. */
- tp->t_bytes_acked -= cw;
- } else {
- /* No need to increase yet. */
- incr = 0;
- }
+ tp->t_bytes_acked -= cw;
} else {
- /*
- * If the user explicitly enables RFC3465
- * use 2*SMSS for the "L" param. Otherwise
- * use the more conservative 1*SMSS.
- *
- * (See RFC 3465 2.3 Choosing the Limit)
- */
- u_int abc_lim;
-
- abc_lim = (tcp_do_rfc3465 == 0) ?
- incr : incr * 2;
- incr = lmin(acked, abc_lim);
+ /* No need to increase yet. */
+ incr = 0;
}
- }
- else {
+ } else {
/*
- * If the window gives us less than ssthresh packets
- * in flight, open exponentially (segsz per packet).
- * Otherwise open linearly: segsz per window
- * (segsz^2 / cwnd per packet).
+ * If the user explicitly enables RFC3465
+ * use 2*SMSS for the "L" param. Otherwise
+ * use the more conservative 1*SMSS.
+ *
+ * (See RFC 3465 2.3 Choosing the Limit)
*/
-
- if (cw >= tp->snd_ssthresh) {
- incr = incr * incr / cw;
- }
- }
+ u_int abc_lim;
+ abc_lim = (tcp_do_rfc3465 == 0) ?
+ incr : incr * 2;
+ incr = min(acked, abc_lim);
+ }
tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale);
}
tp->snd_ssthresh = max(2 * mss, rt->rt_rmx.rmx_ssthresh);
tcpstat.tcps_usedssthresh++;
}
+ else
+ tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
+
lck_mtx_unlock(rt_mtx);
}
* after the retransmission timer has been turned off. Make sure
* that the retransmission timer is set.
*/
- if (tp->sack_enable && SEQ_GT(tp->snd_max, tp->snd_una) &&
+ if (tp->sack_enable && (tp->t_state >= TCPS_ESTABLISHED) && SEQ_GT(tp->snd_max, tp->snd_una) &&
tp->t_timer[TCPT_REXMT] == 0 &&
tp->t_timer[TCPT_PERSIST] == 0) {
tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT;
+ tp->snd_ssthresh_prev = TCP_MAXWIN << TCP_MAX_WINSHIFT;
tp->t_rcvtime = 0;
tp->t_bw_rtttime = 0;
/*
* growth is 2 mss. We don't allow the threshhold
* to go below this.)
*/
- {
- u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
- if (win < 2)
- win = 2;
- tp->snd_cwnd = tp->t_maxseg;
- tp->snd_ssthresh = win * tp->t_maxseg;
- tp->t_bytes_acked = 0;
- tp->t_dupacks = 0;
- tp->t_unacksegs = 0;
+ if (tp->t_state >= TCPS_ESTABLISHED) {
+ u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg;
+ if (win < 2)
+ win = 2;
+ tp->snd_cwnd = tp->t_maxseg;
+ tp->snd_ssthresh = win * tp->t_maxseg;
+ tp->t_bytes_acked = 0;
+ tp->t_dupacks = 0;
+ tp->t_unacksegs = 0;
}
EXIT_FASTRECOVERY(tp);
(void) tcp_output(tp);
#if 1 /* DIAGNOSTIC? */
/* be careful about wrap-around */
- if (*curpps + 1 > *curpps)
+ if (*curpps + 1 > 0)
*curpps = *curpps + 1;
#else
/*
}
md = m_pulldown(m, off, sizeof(*ipcomp), NULL);
- if (!m) {
+ if (!md) {
m = NULL; /*already freed*/
ipseclog((LOG_DEBUG, "IPv4 IPComp input: assumption failed "
"(pulldown failure)\n"));
off = *offp;
md = m_pulldown(m, off, sizeof(*ipcomp), NULL);
- if (!m) {
+ if (!md) {
m = NULL; /*already freed*/
ipseclog((LOG_DEBUG, "IPv6 IPComp input: assumption failed "
"(pulldown failure)\n"));
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
u_int64_t *xidp,
int *status)
{
- int error, asyncio = req->r_callback.rcb_func ? 1 : 0;
+ int error = 0, asyncio = req->r_callback.rcb_func ? 1 : 0;
lck_mtx_lock(&req->r_mtx);
if (!asyncio)
req->r_flags |= R_ASYNCWAIT;
- while (req->r_flags & R_RESENDQ) /* wait until the request is off the resend queue */
- msleep(req, &req->r_mtx, PZERO-1, "nfsresendqwait", NULL);
+ while (req->r_flags & R_RESENDQ) { /* wait until the request is off the resend queue */
+ struct timespec ts = { 2, 0 };
+ if ((error = nfs_sigintr(req->r_nmp, req, req->r_thread, 0)))
+ break;
+ msleep(req, &req->r_mtx, PZERO-1, "nfsresendqwait", &ts);
+ }
lck_mtx_unlock(&req->r_mtx);
- nfs_request_wait(req);
- error = nfs_request_finish(req, nmrepp, status);
+ if (!error) {
+ nfs_request_wait(req);
+ error = nfs_request_finish(req, nmrepp, status);
+ }
while (!error && (req->r_flags & R_RESTART)) {
if (asyncio && req->r_resendtime) { /* send later */
void
nfs_down(struct nfsmount *nmp, thread_t thd, int error, int flags, const char *msg)
{
- int ostate;
+ int ostate, do_vfs_signal;
if (nmp == NULL)
return;
nmp->nm_state |= NFSSTA_JUKEBOXTIMEO;
lck_mtx_unlock(&nmp->nm_lock);
- if (!(ostate & (NFSSTA_TIMEO|NFSSTA_LOCKTIMEO|NFSSTA_JUKEBOXTIMEO)))
+ /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
+ if ((nmp->nm_flag & NFSMNT_SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE))
+ do_vfs_signal = 0;
+ else
+ do_vfs_signal = !(ostate & (NFSSTA_TIMEO|NFSSTA_LOCKTIMEO|NFSSTA_JUKEBOXTIMEO));
+ if (do_vfs_signal)
vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 0);
nfs_msg(thd, vfs_statfs(nmp->nm_mountp)->f_mntfromname, msg, error);
void
nfs_up(struct nfsmount *nmp, thread_t thd, int flags, const char *msg)
{
- int ostate, state;
+ int ostate, state, do_vfs_signal;
if (nmp == NULL)
return;
state = nmp->nm_state;
lck_mtx_unlock(&nmp->nm_lock);
- if ((ostate & (NFSSTA_TIMEO|NFSSTA_LOCKTIMEO|NFSSTA_JUKEBOXTIMEO)) &&
- !(state & (NFSSTA_TIMEO|NFSSTA_LOCKTIMEO|NFSSTA_JUKEBOXTIMEO)))
+ /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
+ if ((nmp->nm_flag & NFSMNT_SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE))
+ do_vfs_signal = 0;
+ else
+ do_vfs_signal = (ostate & (NFSSTA_TIMEO|NFSSTA_LOCKTIMEO|NFSSTA_JUKEBOXTIMEO)) &&
+ !(state & (NFSSTA_TIMEO|NFSSTA_LOCKTIMEO|NFSSTA_JUKEBOXTIMEO));
+ if (do_vfs_signal)
vfs_event_signal(&vfs_statfs(nmp->nm_mountp)->f_fsid, VQ_NOTRESP, 1);
}
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
nfs_vfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
user_addr_t newp, size_t newlen, vfs_context_t ctx)
{
- int error = 0, val;
+ int error = 0, val, softnobrowse;
struct sysctl_req *req = NULL;
struct vfsidctl vc;
struct user_vfsidctl user_vc;
break;
case VFS_CTL_QUERY:
lck_mtx_lock(&nmp->nm_lock);
- if (nmp->nm_state & (NFSSTA_TIMEO|NFSSTA_JUKEBOXTIMEO))
+ /* XXX don't allow users to know about/disconnect unresponsive, soft, nobrowse mounts */
+ softnobrowse = ((nmp->nm_flag & NFSMNT_SOFT) && (vfs_flags(nmp->nm_mountp) & MNT_DONTBROWSE));
+ if (!softnobrowse && (nmp->nm_state & (NFSSTA_TIMEO|NFSSTA_JUKEBOXTIMEO)))
vq.vq_flags |= VQ_NOTRESP;
- if (!(nmp->nm_flag & (NFSMNT_NOLOCKS|NFSMNT_LOCALLOCKS)) &&
+ if (!softnobrowse && !(nmp->nm_flag & (NFSMNT_NOLOCKS|NFSMNT_LOCALLOCKS)) &&
(nmp->nm_state & NFSSTA_LOCKTIMEO))
vq.vq_flags |= VQ_NOTRESP;
lck_mtx_unlock(&nmp->nm_lock);
/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004, 2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* cast to any desired pointer type.
*/
#define __DARWIN_ALIGNBYTES (sizeof(__darwin_size_t) - 1)
-#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES)
+#define __DARWIN_ALIGN(p) ((__darwin_size_t)((char *)(__darwin_intptr_t)(p) + __DARWIN_ALIGNBYTES) &~ __DARWIN_ALIGNBYTES)
+
+#define __DARWIN_ALIGNBYTES32 (sizeof(__uint32_t) - 1)
+#define __DARWIN_ALIGN32(p) ((__darwin_size_t)((char *)(__darwin_intptr_t)(p) + __DARWIN_ALIGNBYTES32) &~ __DARWIN_ALIGNBYTES32)
#endif /* _PPC__PARAM_H_ */
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
/* given pointer to struct cmsghdr, return pointer to data */
#define CMSG_DATA(cmsg) ((unsigned char *)(cmsg) + \
- __DARWIN_ALIGN(sizeof(struct cmsghdr)))
+ __DARWIN_ALIGN32(sizeof(struct cmsghdr)))
/* given pointer to struct cmsghdr, return pointer to next cmsghdr */
#define CMSG_NXTHDR(mhdr, cmsg) \
- (((unsigned char *)(cmsg) + __DARWIN_ALIGN((__darwin_intptr_t)(cmsg)->cmsg_len) + \
- __DARWIN_ALIGN(sizeof(struct cmsghdr)) > \
+ (((unsigned char *)(cmsg) + __DARWIN_ALIGN32((uint32_t)(cmsg)->cmsg_len) + \
+ __DARWIN_ALIGN32(sizeof(struct cmsghdr)) > \
(unsigned char *)(mhdr)->msg_control + (mhdr)->msg_controllen) ? \
(struct cmsghdr *)0L /* NULL */ : \
- (struct cmsghdr *)((unsigned char *)(cmsg) + __DARWIN_ALIGN((__darwin_intptr_t)(cmsg)->cmsg_len)))
+ (struct cmsghdr *)((unsigned char *)(cmsg) + __DARWIN_ALIGN32((uint32_t)(cmsg)->cmsg_len)))
#define CMSG_FIRSTHDR(mhdr) ((struct cmsghdr *)(mhdr)->msg_control)
#if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
/* RFC 2292 additions */
-#define CMSG_SPACE(l) (__DARWIN_ALIGN(sizeof(struct cmsghdr)) + __DARWIN_ALIGN(l))
-#define CMSG_LEN(l) (__DARWIN_ALIGN(sizeof(struct cmsghdr)) + (l))
+#define CMSG_SPACE(l) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + __DARWIN_ALIGN32(l))
+#define CMSG_LEN(l) (__DARWIN_ALIGN32(sizeof(struct cmsghdr)) + (l))
#ifdef KERNEL
#define CMSG_ALIGN(n) __DARWIN_ALIGN(n)
/* internal only */
__private_extern__ void cluster_release(struct ubc_info *);
-
+__private_extern__ uint32_t cluster_max_io_size(mount_t, int);
+
+
/* Flags for ubc_getobject() */
#define UBC_FLAGS_NONE 0x0000
(void *) &ux_exception_port) != MACH_MSG_SUCCESS)
panic("ux_handler: object_copyin(ux_exception_port) failed");
+ proc_list_lock();
thread_wakeup(&ux_exception_port);
+ proc_list_unlock();
/* Message handling loop. */
{
ux_exception_port = MACH_PORT_NULL;
(void) kernel_thread(kernel_task, ux_handler);
+ proc_list_lock();
if (ux_exception_port == MACH_PORT_NULL) {
- assert_wait(&ux_exception_port, THREAD_UNINT);
- thread_block(THREAD_CONTINUE_NULL);
+ (void)msleep(&ux_exception_port, proc_list_mlock, 0, "ux_handler_wait", 0);
}
+ proc_list_unlock();
}
kern_return_t
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
if (!thread_safe) {
unlock_fsnode(vp, &funnel_state);
}
+
+#if NAMEDSTREAMS
+ /* For file systems that do not support namedstreams natively, mark
+ * the shadow stream file vnode to be recycled as soon as the last
+ * reference goes away. To avoid re-entering reclaim code, do not
+ * call recycle on terminating named stream vnodes.
+ */
+ if (vnode_isnamedstream(vp) &&
+ (vp->v_parent != NULLVP) &&
+ ((vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0) &&
+ ((vp->v_lflag & VL_TERMINATE) == 0)) {
+ vnode_recycle(vp);
+ }
+#endif
+
return (_err);
}
#include <sys/kdebug.h>
#define CL_READ 0x01
-#define CL_ASYNC 0x02
-#define CL_COMMIT 0x04
+#define CL_WRITE 0x02
+#define CL_ASYNC 0x04
+#define CL_COMMIT 0x08
#define CL_PAGEOUT 0x10
#define CL_AGE 0x20
#define CL_NOZERO 0x40
* can represent it in a 32 bit int
*/
#define MAX_IO_REQUEST_SIZE (1024 * 1024 * 256)
-#define MAX_IO_CONTIG_SIZE (1024 * 1024 * 8)
-#define MAX_VECTS 16
-
-/*
- * note: MAX_CLUSTER_SIZE CANNOT be larger than MAX_UPL_TRANSFER
- */
-#define MAX_CLUSTER_SIZE (MAX_UPL_TRANSFER)
-#define MAX_PREFETCH (MAX_CLUSTER_SIZE * PAGE_SIZE * 2)
+#define MAX_IO_CONTIG_SIZE (MAX_UPL_SIZE * PAGE_SIZE)
+#define MAX_VECTS 16
#define MIN_DIRECT_WRITE_SIZE (4 * PAGE_SIZE)
+#define MAX_CLUSTER_SIZE(vp) (cluster_max_io_size(vp->v_mount, CL_WRITE))
+#define MAX_PREFETCH(vp) (cluster_max_io_size(vp->v_mount, CL_READ) * 3);
+
+
int speculative_reads_disabled = 0;
/*
}
+uint32_t
+cluster_max_io_size(mount_t mp, int type)
+{
+ uint32_t max_io_size;
+ uint32_t segcnt;
+ uint32_t maxcnt;
+
+ switch(type) {
+
+ case CL_READ:
+ segcnt = mp->mnt_segreadcnt;
+ maxcnt = mp->mnt_maxreadcnt;
+ break;
+ case CL_WRITE:
+ segcnt = mp->mnt_segwritecnt;
+ maxcnt = mp->mnt_maxwritecnt;
+ break;
+ default:
+ segcnt = min(mp->mnt_segreadcnt, mp->mnt_segwritecnt);
+ maxcnt = min(mp->mnt_maxreadcnt, mp->mnt_maxwritecnt);
+ break;
+ }
+ if (segcnt > MAX_UPL_SIZE) {
+ /*
+ * don't allow a size beyond the max UPL size we can create
+ */
+ segcnt = MAX_UPL_SIZE;
+ }
+ max_io_size = min((segcnt * PAGE_SIZE), maxcnt);
+
+ if (max_io_size < (MAX_UPL_TRANSFER * PAGE_SIZE)) {
+ /*
+ * don't allow a size smaller than the old fixed limit
+ */
+ max_io_size = (MAX_UPL_TRANSFER * PAGE_SIZE);
+ } else {
+ /*
+ * make sure the size specified is a multiple of PAGE_SIZE
+ */
+ max_io_size &= ~PAGE_MASK;
+ }
+ return (max_io_size);
+}
+
+
+
#define CLW_ALLOCATE 0x01
#define CLW_RETURNLOCKED 0x02
async_throttle = VNODE_ASYNC_THROTTLE;
else {
u_int max_cluster;
+ u_int max_cluster_size;
+ u_int max_prefetch;
+
+ max_cluster_size = MAX_CLUSTER_SIZE(vp);
+ max_prefetch = MAX_PREFETCH(vp);
- if (max_iosize > (MAX_CLUSTER_SIZE * PAGE_SIZE))
- max_cluster = (MAX_CLUSTER_SIZE * PAGE_SIZE);
+ if (max_iosize > max_cluster_size)
+ max_cluster = max_cluster_size;
else
max_cluster = max_iosize;
if (size < max_cluster)
max_cluster = size;
- async_throttle = min(VNODE_ASYNC_THROTTLE, (MAX_PREFETCH / max_cluster) - 1);
+ async_throttle = min(VNODE_ASYNC_THROTTLE, (max_prefetch / max_cluster) - 1);
}
}
}
daddr64_t r_addr;
off_t f_offset;
int size_of_prefetch;
+ u_int max_prefetch;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_START,
return;
}
+ max_prefetch = MAX_PREFETCH(vp);
+
if (extent->e_addr < rap->cl_maxra) {
- if ((rap->cl_maxra - extent->e_addr) > ((MAX_PREFETCH / PAGE_SIZE) / 4)) {
+ if ((rap->cl_maxra - extent->e_addr) > ((max_prefetch / PAGE_SIZE) / 4)) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 48)) | DBG_FUNC_END,
rap->cl_ralen, (int)rap->cl_maxra, (int)rap->cl_lastr, 2, 0);
if (f_offset < filesize) {
daddr64_t read_size;
- rap->cl_ralen = rap->cl_ralen ? min(MAX_PREFETCH / PAGE_SIZE, rap->cl_ralen << 1) : 1;
+ rap->cl_ralen = rap->cl_ralen ? min(max_prefetch / PAGE_SIZE, rap->cl_ralen << 1) : 1;
read_size = (extent->e_addr + 1) - extent->b_addr;
if (read_size > rap->cl_ralen) {
- if (read_size > MAX_PREFETCH / PAGE_SIZE)
- rap->cl_ralen = MAX_PREFETCH / PAGE_SIZE;
+ if (read_size > max_prefetch / PAGE_SIZE)
+ rap->cl_ralen = max_prefetch / PAGE_SIZE;
else
rap->cl_ralen = read_size;
}
u_int32_t io_req_size;
u_int32_t offset_in_file;
u_int32_t offset_in_iovbase;
- int io_size;
+ u_int32_t io_size;
int io_flag;
int bflag;
vm_size_t upl_size;
user_addr_t iov_base;
u_int32_t mem_alignment_mask;
u_int32_t devblocksize;
+ u_int32_t max_upl_size;
+
+
+ max_upl_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
if (flags & IO_PASSIVE)
- bflag = CL_PASSIVE;
+ bflag = CL_PASSIVE;
else
- bflag = 0;
+ bflag = 0;
/*
* When we enter this routine, we know
io_size = io_req_size & ~PAGE_MASK;
iov_base = uio_curriovbase(uio);
- if (io_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
- io_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+ if (io_size > max_upl_size)
+ io_size = max_upl_size;
upl_offset = (vm_offset_t)((u_int32_t)iov_base & PAGE_MASK);
upl_needed_size = (upl_offset + io_size + (PAGE_SIZE -1)) & ~PAGE_MASK;
*/
lck_mtx_lock(cl_mtxp);
- while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_UPL_TRANSFER * PAGE_SIZE)) {
+ while ((iostate.io_issued - iostate.io_completed) > (2 * max_upl_size)) {
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
+ iostate.io_issued, iostate.io_completed, 2 * max_upl_size, 0, 0);
+
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_direct", NULL);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
+ iostate.io_issued, iostate.io_completed, 2 * max_upl_size, 0, 0);
}
lck_mtx_unlock(cl_mtxp);
lck_mtx_lock(cl_mtxp);
while (iostate.io_issued != iostate.io_completed) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
+
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_direct", NULL);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
}
lck_mtx_unlock(cl_mtxp);
}
lck_mtx_lock(cl_mtxp);
while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_IO_CONTIG_SIZE)) {
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
+ iostate.io_issued, iostate.io_completed, 2 * MAX_IO_CONTIG_SIZE, 0, 0);
+
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_contig", NULL);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
+ iostate.io_issued, iostate.io_completed, 2 * MAX_IO_CONTIG_SIZE, 0, 0);
}
lck_mtx_unlock(cl_mtxp);
}
io_size -= xsize;
}
}
- if (error == 0 && iostate.io_error == 0 && tail_size == 0) {
+ if (error == 0 && iostate.io_error == 0 && tail_size == 0 && num_upl < MAX_VECTS) {
error = cluster_io_type(uio, write_type, write_length, 0);
lck_mtx_lock(cl_mtxp);
while (iostate.io_issued != iostate.io_completed) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
+
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_write_contig", NULL);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
}
lck_mtx_unlock(cl_mtxp);
int intersection;
struct cl_writebehind *wbp;
int bflag;
+ u_int max_cluster_pgcount;
+ u_int max_io_size;
if (flags & IO_PASSIVE)
- bflag = CL_PASSIVE;
+ bflag = CL_PASSIVE;
else
- bflag = 0;
+ bflag = 0;
if (uio) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 40)) | DBG_FUNC_START,
zero_off = 0;
zero_off1 = 0;
+ max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
+ max_io_size = cluster_max_io_size(vp->v_mount, CL_WRITE);
+
if (flags & IO_HEADZEROFILL) {
/*
* some filesystems (HFS is one) don't support unallocated holes within a file...
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 46)) | DBG_FUNC_NONE,
(int)zero_off, (int)zero_cnt, (int)zero_off1, (int)zero_cnt1, 0);
- if (total_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
- total_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+ if (total_size > max_io_size)
+ total_size = max_io_size;
cl.b_addr = (daddr64_t)(upl_f_offset / PAGE_SIZE_64);
* assumption... total_size <= io_resid
* because IO_HEADZEROFILL and IO_TAILZEROFILL not set
*/
- if ((start_offset + total_size) > (MAX_UPL_TRANSFER * PAGE_SIZE))
+ if ((start_offset + total_size) > max_io_size)
total_size -= start_offset;
xfer_resid = total_size;
*/
upl_size = (start_offset + total_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
- if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
- upl_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+ if (upl_size > max_io_size)
+ upl_size = max_io_size;
pages_in_upl = upl_size / PAGE_SIZE;
io_size = upl_size - start_offset;
/*
* the current write starts at or after the current cluster
*/
- if (cl.e_addr <= (wbp->cl_clusters[cl_index].b_addr + MAX_CLUSTER_SIZE)) {
+ if (cl.e_addr <= (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
/*
* we have a write that fits entirely
* within the existing cluster limits
wbp->cl_clusters[cl_index].e_addr = cl.e_addr;
break;
}
- if (cl.b_addr < (wbp->cl_clusters[cl_index].b_addr + MAX_CLUSTER_SIZE)) {
+ if (cl.b_addr < (wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount)) {
/*
* we have a write that starts in the middle of the current cluster
* but extends beyond the cluster's limit... we know this because
* note that we'll always have a leftover tail in this case since
* full absorbtion would have occurred in the clause above
*/
- wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + MAX_CLUSTER_SIZE;
+ wbp->cl_clusters[cl_index].e_addr = wbp->cl_clusters[cl_index].b_addr + max_cluster_pgcount;
if (upl_size) {
daddr64_t start_pg_in_upl;
/*
* the current write starts in front of the cluster we're currently considering
*/
- if ((wbp->cl_clusters[cl_index].e_addr - cl.b_addr) <= MAX_CLUSTER_SIZE) {
+ if ((wbp->cl_clusters[cl_index].e_addr - cl.b_addr) <= max_cluster_pgcount) {
/*
* we can just merge the new request into
* this cluster and leave it in the cache
/*
* the current write completely
* envelops the existing cluster and since
- * each write is limited to at most MAX_CLUSTER_SIZE pages
+ * each write is limited to at most max_cluster_pgcount pages
* we can just use the start and last blocknos of the write
* to generate the cluster limits
*/
* get an intersection with the current write
*
*/
- if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr - MAX_CLUSTER_SIZE) {
+ if (cl.e_addr > wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount) {
/*
* the current write extends into the proposed cluster
* clip the length of the current write after first combining it's
* tail with the newly shaped cluster
*/
- wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - MAX_CLUSTER_SIZE;
+ wbp->cl_clusters[cl_index].b_addr = wbp->cl_clusters[cl_index].e_addr - max_cluster_pgcount;
if (upl_size) {
intersection = (int)((cl.e_addr - wbp->cl_clusters[cl_index].b_addr) * PAGE_SIZE);
upl_page_info_t *pl;
upl_t upl;
vm_offset_t upl_offset;
- int upl_size;
+ u_int32_t upl_size;
off_t upl_f_offset;
int start_offset;
int start_pg;
u_int32_t size_of_prefetch;
u_int32_t xsize;
u_int32_t io_size;
- u_int32_t max_rd_size = MAX_PREFETCH;
+ u_int32_t max_rd_size;
+ u_int32_t max_io_size;
+ u_int32_t max_prefetch;
u_int rd_ahead_enabled = 1;
u_int prefetch_enabled = 1;
struct cl_readahead * rap;
take_reference = 0;
if (flags & IO_PASSIVE)
- bflag = CL_PASSIVE;
+ bflag = CL_PASSIVE;
else
- bflag = 0;
+ bflag = 0;
+
+ max_prefetch = MAX_PREFETCH(vp);
+ max_rd_size = max_prefetch;
+ max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 32)) | DBG_FUNC_START,
(int)uio->uio_offset, io_req_size, (int)filesize, flags, 0);
* we can notice that our I/O pipe is running dry and
* get the next I/O issued before it does go dry
*/
- if (last_ioread_offset && io_size > ((MAX_UPL_TRANSFER * PAGE_SIZE) / 4))
- io_resid = ((MAX_UPL_TRANSFER * PAGE_SIZE) / 4);
+ if (last_ioread_offset && io_size > (max_io_size / 4))
+ io_resid = (max_io_size / 4);
else
io_resid = io_size;
upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
if (flags & IO_NOCACHE) {
- if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
- upl_size = (MAX_UPL_TRANSFER * PAGE_SIZE);
+ if (upl_size > max_io_size)
+ upl_size = max_io_size;
} else {
- if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE) / 4)
- upl_size = (MAX_UPL_TRANSFER * PAGE_SIZE) / 4;
+ if (upl_size > max_io_size / 4)
+ upl_size = max_io_size / 4;
}
pages_in_upl = upl_size / PAGE_SIZE;
lck_mtx_lock(cl_mtxp);
while (iostate.io_issued != iostate.io_completed) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
+
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_copy", NULL);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
}
lck_mtx_unlock(cl_mtxp);
rd_ahead_enabled = 1;
prefetch_enabled = 1;
- max_rd_size = MAX_PREFETCH;
+ max_rd_size = max_prefetch;
last_ioread_offset = 0;
}
}
u_int32_t xsize;
u_int32_t devblocksize;
u_int32_t mem_alignment_mask;
- u_int32_t max_rd_size = MAX_UPL_TRANSFER * PAGE_SIZE;
- u_int32_t max_rd_ahead = MAX_PREFETCH;
+ u_int32_t max_upl_size;
+ u_int32_t max_rd_size;
+ u_int32_t max_rd_ahead;
+
+
+ max_upl_size = cluster_max_io_size(vp->v_mount, CL_READ);
+
+ max_rd_size = max_upl_size;
+ max_rd_ahead = max_rd_size * 2;
+
if (flags & IO_PASSIVE)
- bflag = CL_PASSIVE;
+ bflag = CL_PASSIVE;
else
- bflag = 0;
+ bflag = 0;
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 70)) | DBG_FUNC_START,
(int)uio->uio_offset, (int)filesize, *read_type, *read_length, 0);
max_rd_size = HARD_THROTTLE_MAXSIZE;
max_rd_ahead = HARD_THROTTLE_MAXSIZE - 1;
} else {
- max_rd_size = MAX_UPL_TRANSFER * PAGE_SIZE;
- max_rd_ahead = MAX_PREFETCH;
+ max_rd_size = max_upl_size;
+ max_rd_ahead = max_rd_size * 2;
}
io_start = io_size = io_req_size;
lck_mtx_lock(cl_mtxp);
while ((iostate.io_issued - iostate.io_completed) > max_rd_ahead) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
+ iostate.io_issued, iostate.io_completed, max_rd_ahead, 0, 0);
+
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_direct", NULL);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
+ iostate.io_issued, iostate.io_completed, max_rd_ahead, 0, 0);
}
lck_mtx_unlock(cl_mtxp);
lck_mtx_lock(cl_mtxp);
while (iostate.io_issued != iostate.io_completed) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
+
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_direct", NULL);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
}
lck_mtx_unlock(cl_mtxp);
}
int bflag;
if (flags & IO_PASSIVE)
- bflag = CL_PASSIVE;
+ bflag = CL_PASSIVE;
else
- bflag = 0;
+ bflag = 0;
/*
* When we enter this routine, we know
if (iostate.io_issued) {
lck_mtx_lock(cl_mtxp);
- while ((iostate.io_issued - iostate.io_completed) > (3 * MAX_IO_CONTIG_SIZE)) {
+ while ((iostate.io_issued - iostate.io_completed) > (2 * MAX_IO_CONTIG_SIZE)) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
+ iostate.io_issued, iostate.io_completed, 2 * MAX_IO_CONTIG_SIZE, 0, 0);
+
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_contig", NULL);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
+ iostate.io_issued, iostate.io_completed, 2 * MAX_IO_CONTIG_SIZE, 0, 0);
}
lck_mtx_unlock(cl_mtxp);
}
lck_mtx_lock(cl_mtxp);
while (iostate.io_issued != iostate.io_completed) {
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_START,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
+
iostate.io_wanted = 1;
msleep((caddr_t)&iostate.io_wanted, cl_mtxp, PRIBIO + 1, "cluster_read_contig", NULL);
+
+ KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 95)) | DBG_FUNC_END,
+ iostate.io_issued, iostate.io_completed, 0, 0, 0);
}
lck_mtx_unlock(cl_mtxp);
int retval = 0;
int issued_io;
int skip_range;
-
+ uint32_t max_io_size;
+
+
if ( !UBCINFOEXISTS(vp))
return(EINVAL);
+ max_io_size = cluster_max_io_size(vp->v_mount, CL_READ);
+
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 60)) | DBG_FUNC_START,
- (int)f_offset, resid, (int)filesize, 0, 0);
+ (int)f_offset, resid, (int)filesize, 0, 0);
while (resid && f_offset < filesize && retval == 0) {
/*
io_size = max_size;
upl_size = (start_offset + io_size + (PAGE_SIZE - 1)) & ~PAGE_MASK;
- if (upl_size > (MAX_UPL_TRANSFER * PAGE_SIZE))
- upl_size = MAX_UPL_TRANSFER * PAGE_SIZE;
+ if ((uint32_t)upl_size > max_io_size)
+ upl_size = max_io_size;
skip_range = 0;
/*
int cl_len;
int cl_pushed = 0;
struct cl_wextent l_clusters[MAX_CLUSTERS];
-
+ u_int max_cluster_pgcount;
+
+
+ max_cluster_pgcount = MAX_CLUSTER_SIZE(vp) / PAGE_SIZE;
/*
* the write behind context exists and has
* already been locked...
* of order... if this occurs at the tail of the last cluster, we don't want to fall into the sparse cluster world...
*/
for (i = 0; i < MAX_CLUSTERS - 1; i++) {
- if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != MAX_CLUSTER_SIZE)
+ if ((l_clusters[i].e_addr - l_clusters[i].b_addr) != max_cluster_pgcount)
goto dont_try;
if (l_clusters[i].e_addr != l_clusters[i+1].b_addr)
goto dont_try;
// (as long as it's not an event type that can never be the
// same as a previous event)
//
- if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE) {
+ if (type != FSE_CREATE_FILE && type != FSE_DELETE && type != FSE_RENAME && type != FSE_EXCHANGE && type != FSE_CHOWN) {
void *ptr=NULL;
int vid=0, was_str=0, nlen=0;
VATTR_WANTED(&va, va_mode);
VATTR_WANTED(&va, va_uid);
VATTR_WANTED(&va, va_gid);
- if ((ret = vnode_getattr(vp, &va, ctx)) != 0) {
+ if ((ret = vnode_getattr(vp, &va, vfs_context_kernel())) != 0) {
// printf("add_fsevent: failed to getattr on vp %p (%d)\n", cur->fref.vp, ret);
cur->str = NULL;
error = EINVAL;
}
int
-get_fse_info(struct vnode *vp, fse_info *fse, vfs_context_t ctx)
+get_fse_info(struct vnode *vp, fse_info *fse, __unused vfs_context_t ctx)
{
struct vnode_attr va;
}
}
- if (vnode_getattr(vp, &va, ctx) != 0) {
+ if (vnode_getattr(vp, &va, vfs_context_kernel()) != 0) {
memset(fse, 0, sizeof(fse_info));
return -1;
}
*dst-- = '\\';
count--;
}
- *src--;
+ src--;
}
}
last_sequence_num = blhdr->binfo[0].b.sequence_num;
if (blhdr_offset >= jnl->jhdr->end && jnl->jhdr->start <= jnl->jhdr->end) {
+ if (last_sequence_num == 0) {
+ check_past_jnl_end = 0;
+ printf("jnl: %s: pre-sequence-num-enabled txn's - can not go further than end (%lld %lld).\n",
+ jnl->jdev_name, jnl->jhdr->start, jnl->jhdr->end);
+ if (jnl->jhdr->start != jnl->jhdr->end) {
+ jnl->jhdr->start = jnl->jhdr->end;
+ }
+ continue;
+ }
printf("jnl: %s: examining extra transactions starting @ %lld / 0x%llx\n", jnl->jdev_name, blhdr_offset, blhdr_offset);
}
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
static void vnode_list_add(vnode_t);
static void vnode_list_remove(vnode_t);
+static void vnode_list_remove_locked(vnode_t);
static errno_t vnode_drain(vnode_t);
static void vgone(vnode_t, int flags);
* Alias, but not in use, so flush it out.
*/
if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
+ vnode_put_locked(vp);
vnode_unlock(vp);
- vnode_put(vp);
goto loop;
}
}
if (vp != NULLVP) {
nvp->v_flag |= VALIASED;
vp->v_flag |= VALIASED;
+ vnode_put_locked(vp);
vnode_unlock(vp);
- vnode_put(vp);
}
return (NULLVP);
}
vnode_list_unlock();
}
+
+/*
+ * remove the vnode from appropriate free list.
+ * called with vnode LOCKED and
+ * the list lock held
+ */
+static void
+vnode_list_remove_locked(vnode_t vp)
+{
+ if (VONLIST(vp)) {
+ /*
+ * the v_listflag field is
+ * protected by the vnode_list_lock
+ */
+ if (vp->v_listflag & VLIST_RAGE)
+ VREMRAGE("vnode_list_remove", vp);
+ else if (vp->v_listflag & VLIST_DEAD)
+ VREMDEAD("vnode_list_remove", vp);
+ else
+ VREMFREE("vnode_list_remove", vp);
+ }
+}
+
+
/*
* remove the vnode from appropriate free list.
+ * called with vnode LOCKED
*/
static void
vnode_list_remove(vnode_t vp)
/*
* however, we're not guaranteed that
* we won't go from the on-list state
- * to the non-on-list state until we
+ * to the not-on-list state until we
* hold the vnode_list_lock... this
- * is due to new_vnode removing vnodes
+ * is due to "new_vnode" removing vnodes
* from the free list uder the list_lock
* w/o the vnode lock... so we need to
* check again whether we're currently
* on the free list
*/
- if (VONLIST(vp)) {
- if (vp->v_listflag & VLIST_RAGE)
- VREMRAGE("vnode_list_remove", vp);
- else if (vp->v_listflag & VLIST_DEAD)
- VREMDEAD("vnode_list_remove", vp);
- else
- VREMFREE("vnode_list_remove", vp);
+ vnode_list_remove_locked(vp);
- VLISTNONE(vp);
- }
vnode_list_unlock();
}
}
goto defer_reclaim;
}
vnode_lock_convert(vp);
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
}
vnode_dropiocount(vp);
vnode_list_add(vp);
#ifdef JOE_DEBUG
record_vp(vp, 1);
#endif
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
vnode_dropiocount(vp);
vnode_list_add(vp);
-
vnode_unlock(vp);
+
reclaimed++;
mount_lock(mp);
continue;
#ifdef JOE_DEBUG
record_vp(vp, 1);
#endif
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
vnode_dropiocount(vp);
vnode_list_add(vp);
vnode_unlock(vp);
int already_terminating;
int clflags = 0;
+#if NAMEDSTREAMS
+ int is_namedstream;
+#endif
+
/*
* Check to see if the vnode is in use.
* If so we have to reference it before we clean it out
*/
insmntque(vp, (struct mount *)0);
+#if NAMEDSTREAMS
+ is_namedstream = vnode_isnamedstream(vp);
+#endif
+
vnode_unlock(vp);
OSAddAtomic(1, &num_recycledvnodes);
if (active || need_inactive)
VNOP_INACTIVE(vp, ctx);
+#if NAMEDSTREAMS
+ /* Delete the shadow stream file before we reclaim its vnode */
+ if ((is_namedstream != 0) &&
+ (vp->v_parent != NULLVP) &&
+ ((vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0)) {
+ vnode_relenamedstream(vp->v_parent, vp, ctx);
+ }
+#endif
+
/*
* Destroy ubc named reference
* cluster_release is done on this path
SPECHASH_LOCK();
break;
}
- vnode_reclaim_internal(vq, 0, 0, 0);
+ vnode_reclaim_internal(vq, 0, 1, 0);
vnode_put(vq);
SPECHASH_LOCK();
break;
return(0);
}
vnode_reclaim_internal(vp, 1, 0, 0);
+
vnode_unlock(vp);
return (1);
/*
* Alias, but not in use, so flush it out.
*/
- vnode_reclaim_internal(vq, 1, 0, 0);
+ vnode_reclaim_internal(vq, 1, 1, 0);
+ vnode_put_locked(vq);
vnode_unlock(vq);
- vnode_put(vq);
goto loop;
}
count += (vq->v_usecount - vq->v_kusecount);
mac_vnode_label_init(vp);
#endif /* MAC */
+ vp->v_iocount = 1;
goto done;
}
steal_this_vp:
vpid = vp->v_id;
- /*
- * the v_listflag field is
- * protected by the vnode_list_lock
- */
- if (vp->v_listflag & VLIST_DEAD)
- VREMDEAD("new_vnode", vp);
- else if (vp->v_listflag & VLIST_RAGE)
- VREMRAGE("new_vnode", vp);
- else
- VREMFREE("new_vnode", vp);
- VLISTNONE(vp);
+ vnode_list_remove_locked(vp);
vnode_list_unlock();
vnode_lock_spin(vp);
if (vp->v_lflag & VL_DEAD)
panic("new_vnode: the vnode is VL_DEAD but not VBAD");
vnode_lock_convert(vp);
-
(void)vnode_reclaim_internal(vp, 1, 1, 0);
if ((VONLIST(vp)))
}
#endif /* MAC */
+ vp->v_iocount = 1;
vp->v_lflag = 0;
vp->v_writecount = 0;
vp->v_references = 0;
if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
vnode_lock_convert(vp);
- vnode_reclaim_internal(vp, 1, 0, 0);
+ vnode_reclaim_internal(vp, 1, 1, 0);
}
vnode_dropiocount(vp);
vnode_list_add(vp);
* once new_vnode drops the list_lock, it will block trying to take
* the vnode lock until we release it... at that point it will evaluate
* whether the v_vid has changed
+ * also need to make sure that the vnode isn't on a list where "new_vnode"
+ * can find it after the v_id has been bumped until we are completely done
+ * with the vnode (i.e. putting it back on a list has to be the very last
+ * thing we do to this vnode... many of the callers of vnode_reclaim_internal
+ * are holding an io_count on the vnode... they need to drop the io_count
+ * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
+ * they are completely done with the vnode
*/
vnode_list_lock();
+
+ vnode_list_remove_locked(vp);
vp->v_id++;
+
vnode_list_unlock();
if (isfifo) {
if (vp->v_data)
panic("vnode_reclaim_internal: cleaned vnode isn't");
if (vp->v_numoutput)
- panic("vnode_reclaim_internal: Clean vnode has pending I/O's");
+ panic("vnode_reclaim_internal: clean vnode has pending I/O's");
if (UBCINFOEXISTS(vp))
panic("vnode_reclaim_internal: ubcinfo not cleaned");
if (vp->v_parent)
vp->v_lflag &= ~VL_TERMWANT;
wakeup(&vp->v_lflag);
}
- if (!reuse && vp->v_usecount == 0) {
+ if (!reuse) {
/*
* make sure we get on the
- * dead list
+ * dead list if appropriate
*/
- vnode_list_remove(vp);
vnode_list_add(vp);
}
if (!locked)
}
/* USAGE:
- * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
- * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
- * is obsoleted by this.
* vnode_create(int flavor, size_t size, void * param, vnode_t *vp)
*/
int
vp->v_op = param->vnfs_vops;
vp->v_type = param->vnfs_vtype;
vp->v_data = param->vnfs_fsnode;
- vp->v_iocount = 1;
if (param->vnfs_markroot)
vp->v_flag |= VROOT;
cpos += dp->d_reclen;
dp = (struct dirent*)cpos;
}
+
+ /*
+ * workaround for HFS/NFS setting eofflag before end of file
+ */
+ if (vp->v_tag == VT_HFS && nentries > 2)
+ eofflag=0;
+
+ if (vp->v_tag == VT_NFS) {
+ if (eofflag && !full_erase_flag) {
+ full_erase_flag = 1;
+ eofflag = 0;
+ uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
+ }
+ else if (!eofflag && full_erase_flag)
+ full_erase_flag = 0;
+ }
} while (!eofflag);
/*
uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
eofflag = 0;
+ full_erase_flag = 0;
do {
siz = UIO_BUFF_SIZE;
/*
- * Copyright (c) 1995-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 1995-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
int niopts;
struct vfs_context context;
+#if NAMEDRSRCFORK
+ int is_namedstream = 0;
+#endif
+
/*
* Access is defined as checking against the process'
* real identity, even if operations are checking the
if (error)
goto out;
+#if NAMEDRSRCFORK
+ /* Grab reference on the shadow stream file vnode to
+ * force an inactive on release which will mark it for
+ * recycle
+ */
+ if (vnode_isnamedstream(nd.ni_vp) &&
+ (nd.ni_vp->v_parent != NULLVP) &&
+ ((nd.ni_vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0)) {
+ is_namedstream = 1;
+ vnode_ref(nd.ni_vp);
+ }
+#endif
+
error = access1(nd.ni_vp, nd.ni_dvp, uap->flags, &context);
+#if NAMEDRSRCFORK
+ if (is_namedstream) {
+ vnode_rele(nd.ni_vp);
+ }
+#endif
+
vnode_put(nd.ni_vp);
if (uap->flags & _DELETE_OK)
vnode_put(nd.ni_dvp);
void * statptr;
#if NAMEDRSRCFORK
+ int is_namedstream = 0;
/* stat calls are allowed for resource forks. */
ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK;
#endif
statptr = (void *)&sb64;
else
statptr = (void *)&sb;
+
+#if NAMEDRSRCFORK
+ /* Grab reference on the shadow stream file vnode to
+ * force an inactive on release which will mark it for
+ * recycle.
+ */
+ if (vnode_isnamedstream(ndp->ni_vp) &&
+ (ndp->ni_vp->v_parent != NULLVP) &&
+ ((ndp->ni_vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS) == 0)) {
+ is_namedstream = 1;
+ vnode_ref (ndp->ni_vp);
+ }
+#endif
+
error = vn_stat(ndp->ni_vp, statptr, (xsecurity != USER_ADDR_NULL ? &fsec : NULL), isstat64, ctx);
#if NAMEDRSRCFORK
- /* Clean up resource fork shadow file if needed. */
- if ((ndp->ni_vp->v_flag & VISNAMEDSTREAM) &&
- (ndp->ni_vp->v_parent != NULLVP) &&
- !(ndp->ni_vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS)) {
- (void) vnode_relenamedstream(ndp->ni_vp->v_parent, ndp->ni_vp, ctx);
+ if (is_namedstream) {
+ vnode_rele (ndp->ni_vp);
}
#endif
+
vnode_put(ndp->ni_vp);
nameidone(ndp);
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#endif
#if NAMEDRSRCFORK
- /* Clean up resource fork shadow file if needed. */
+ /* Sync data from resource fork shadow file if needed. */
if ((vp->v_flag & VISNAMEDSTREAM) &&
(vp->v_parent != NULLVP) &&
!(vp->v_parent->v_mount->mnt_kern_flag & MNTK_NAMED_STREAMS)) {
if (flags & FWASWRITTEN) {
(void) vnode_flushnamedstream(vp->v_parent, vp, ctx);
}
- /* XXX failure ignored */
- vnode_relenamedstream(vp->v_parent, vp, ctx);
}
#endif
error = VNOP_CLOSE(vp, flags, ctx);
/*
- * Copyright (c) 2004-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2004-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
vnode_lock(svp);
svp->v_flag |= VISNAMEDSTREAM;
vnode_unlock(svp);
- /* Make the file it's parent. */
+ /* Make the file its parent.
+ * Note: This parent link helps us distinguish vnodes for
+ * shadow stream files from vnodes for resource fork on file
+ * systems that support named streams natively (both have
+ * VISNAMEDSTREAM set) by allowing access to mount structure
+ * for checking MNTK_NAMED_STREAMS bit at many places in the code
+ */
vnode_update_identity(svp, vp, NULL, 0, 0, VNODE_UPDATE_PARENT);
}
/* Tag the vnode. */
vnode_lock(svp);
svp->v_flag |= VISNAMEDSTREAM;
- vnode_unlock(svp);
- /* Make the file it's parent. */
+ vnode_unlock(svp);
+ /* Make the file its parent.
+ * Note: This parent link helps us distinguish vnodes for
+ * shadow stream files from vnodes for resource fork on file
+ * systems that support named streams natively (both have
+ * VISNAMEDSTREAM set) by allowing access to mount structure
+ * for checking MNTK_NAMED_STREAMS bit at many places in the code
+ */
vnode_update_identity(svp, vp, NULL, 0, 0, VNODE_UPDATE_PARENT);
}
return (error);
/*
* Release a named stream shadow file.
+ *
+ * Note: This function is called from two places where we do not need
+ * to check if the vnode has any references held before deleting the
+ * shadow file. Once from vclean() when the vnode is being reclaimed
+ * and we do not hold any references on the vnode. Second time from
+ * default_getnamedstream() when we get an error during shadow stream
+ * file initialization so that other processes who are waiting for the
+ * shadow stream file initialization by the creator will get opportunity
+ * to create and initialize the file again.
*/
errno_t
vnode_relenamedstream(vnode_t vp, vnode_t svp, vfs_context_t context)
char tmpname[48];
errno_t err;
- if (vnode_isinuse(svp, 1)) {
- return (EBUSY);
- }
cache_purge(svp);
vnode_lock(svp);
if (err != 0) {
return err;
}
- /* Check for busy svp one last time. */
- if (vnode_isinuse(svp, 1) == 0) {
- (void) VNOP_REMOVE(dvp, svp, &cn, 0, context);
- (void) vnode_recycle(svp);
- }
+
+ (void) VNOP_REMOVE(dvp, svp, &cn, 0, context);
vnode_put(dvp);
return (0);
wakeup((caddr_t)&svp->v_parent);
vnode_unlock(svp);
} else {
- /* On post create errors, get rid of shadow file. */
- (void)vnode_relenamedstream(vp, svp, context);
-
+ /* On post create errors, get rid of the shadow file. This
+ * way, if there is another process waiting for initialization
+ * of the shadow file by the current process, it will wake up
+ * and retry by creating and initializing the shadow file again.
+ */
+ (void) vnode_relenamedstream(vp, svp, context);
wakeup((caddr_t)&svp->v_parent);
}
}
if (p->task != TASK_NULL) {
/* If we aren't root and target's task access port is set... */
if (!kauth_cred_issuser(kauth_cred_get()) &&
+ p != current_proc() &&
(task_get_task_access_port(p->task, &tfpport) == 0) &&
(tfpport != IPC_PORT_NULL)) {
goto out;
}
if (upl == (upl_t)NULL) {
- if (size > (MAX_UPL_TRANSFER * PAGE_SIZE)) {
+ if (size > (MAX_UPL_SIZE * PAGE_SIZE)) {
- panic("vnode_pagein: size = %x\n", size);
+ panic("vnode_pagein: size = %x\n", size);
result = PAGER_ERROR;
error = PAGER_ERROR;
if (upl == (upl_t)NULL) {
- panic("vnode_pagein: ubc_create_upl failed\n");
+ panic("vnode_pagein: ubc_create_upl failed\n");
result = PAGER_ABSENT;
error = PAGER_ABSENT;
-9.2.0
+9.3.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
Segment64 segment,
void *segments,
UInt32 segmentIndex);
- IOReturn IODMACommand::walkAll(UInt8 op);
+ IOReturn walkAll(UInt8 op);
public:
#include <IOKit/IODMAEventSource.h>
#include <IOKit/IOService.h>
+class IODMAEventSource;
+
class IODMAController : public IOService
{
OSDeclareAbstractStructors(IODMAController);
#include <IOKit/IODMAController.h>
#include <IOKit/IOEventSource.h>
+class IODMAController;
+
class IODMAEventSource : public IOEventSource
{
OSDeclareDefaultStructors(IODMAEventSource);
enum {
kOFVariablePermRootOnly = 0,
kOFVariablePermUserRead,
- kOFVariablePermUserWrite
+ kOFVariablePermUserWrite,
+ kOFVariablePermKernelOnly
};
class IODTNVRAM : public IOService
#define kIOPMPSLegacyBatteryInfoKey "LegacyBatteryInfo"
#define kIOPMPSBatteryHealthKey "BatteryHealth"
#define kIOPMPSHealthConfidenceKey "HealthConfidence"
-#define kIOPMPSCapacityEstimatedKey "CapacityEstimated"
+#define kIOPMPSCapacityEstimatedKey "CapacityEstimated"
+#define kIOPMPSBatteryChargeStatusKey "ChargeStatus"
+
+// kIOPMBatteryChargeStatusKey may have one of the following values, or may have
+// no value. If kIOPMBatteryChargeStatusKey has a NULL value (or no value) associated with it
+// then charge is proceeding normally. If one of these battery charge status reasons is listed,
+// then the charge may have been interrupted.
+#define kIOPMBatteryChargeStatusTooHot "HighTemperature"
+#define kIOPMBatteryChargeStatusTooCold "LowTemperature"
+#define kIOPMBatteryChargeStatusGradient "BatteryTemperatureGradient"
// Definitions for battery location, in case of multiple batteries.
// A location of 0 is unspecified
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+#define kBootCPUNumber 0
static iocpu_platform_action_entry_t * gIOAllActionsQueue;
static queue_head_t gIOSleepActionQueue;
{
long cnt, numCPUs;
IOCPU *target;
+ IOCPU *bootCPU = NULL;
kprintf("IOCPUSleepKernel\n");
numCPUs = gIOCPUs->getCount();
// Sleep the CPUs.
cnt = numCPUs;
- while (cnt--) {
- target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
- if (target->getCPUState() == kIOCPUStateRunning) {
- target->haltCPU();
- }
+ while (cnt--)
+ {
+ target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
+
+ // We make certain that the bootCPU is the last to sleep
+ // We'll skip it for now, and halt it after finishing the
+ // non-boot CPU's.
+ if (target->getCPUNumber() == kBootCPUNumber)
+ {
+ bootCPU = target;
+ } else if (target->getCPUState() == kIOCPUStateRunning)
+ {
+ target->haltCPU();
+ }
}
+ // Now sleep the boot CPU.
+ if (bootCPU)
+ bootCPU->haltCPU();
+
iocpu_run_platform_actions(&gIOWakeActionQueue, 0, 0UL-1,
NULL, NULL, NULL);
IOPanic("gIOWakeActionQueue");
// Wake the other CPUs.
- for (cnt = 1; cnt < numCPUs; cnt++) {
- target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
- if (target->getCPUState() == kIOCPUStateStopped) {
- processor_start(target->getMachProcessor());
- }
+ for (cnt = 0; cnt < numCPUs; cnt++)
+ {
+ target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
+
+ // Skip the already-woken boot CPU.
+ if ((target->getCPUNumber() != kBootCPUNumber)
+ && (target->getCPUState() == kIOCPUStateStopped))
+ {
+ processor_start(target->getMachProcessor());
+ }
}
}
// Check if the interrupt source can/should be shared.
canBeShared = vectorCanBeShared(vectorNumber, vector);
IODTGetInterruptOptions(nub, source, &options);
+#if defined(__i386__) || defined(__x86_64__)
+ int interruptType;
+ if (OSDynamicCast(IOPlatformDevice, getProvider()) &&
+ (getInterruptType(nub, source, &interruptType) == kIOReturnSuccess) &&
+ (kIOInterruptTypeLevel & interruptType))
+ {
+ options |= kIODTInterruptShared;
+ }
+#endif
shouldBeShared = canBeShared && (options & kIODTInterruptShared);
wasAlreadyRegisterd = vector->interruptRegistered;
IOReturn err;
IOMemoryDescriptorMapAllocRef ref;
- ref.sharedMem = entry;
- ref.sourceOffset = offset;
- ref.options = options;
+ ref.sharedMem = entry;
+ ref.sourceOffset = trunc_page_64(offset);
+ ref.options = options;
ref.size = length;
bool IODTNVRAM::serializeProperties(OSSerialize *s) const
{
- bool result;
+ bool result, hasPrivilege;
UInt32 variablePerm;
const OSSymbol *key;
- OSDictionary *dict, *tmpDict = 0;
+ OSDictionary *dict = 0, *tmpDict = 0;
OSCollectionIterator *iter = 0;
if (_ofDict == 0) return false;
// Verify permissions.
- result = IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege);
- if (result != kIOReturnSuccess) {
- tmpDict = OSDictionary::withCapacity(1);
- if (tmpDict == 0) return false;
+ hasPrivilege = (kIOReturnSuccess == IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege));
+
+ tmpDict = OSDictionary::withCapacity(1);
+ if (tmpDict == 0) return false;
- iter = OSCollectionIterator::withCollection(_ofDict);
- if (iter == 0) return false;
+ iter = OSCollectionIterator::withCollection(_ofDict);
+ if (iter == 0) return false;
- while (1) {
- key = OSDynamicCast(OSSymbol, iter->getNextObject());
- if (key == 0) break;
+ while (1) {
+ key = OSDynamicCast(OSSymbol, iter->getNextObject());
+ if (key == 0) break;
- variablePerm = getOFVariablePerm(key);
- if (variablePerm != kOFVariablePermRootOnly) {
- tmpDict->setObject(key, _ofDict->getObject(key));
- }
+ variablePerm = getOFVariablePerm(key);
+ if ((hasPrivilege || (variablePerm != kOFVariablePermRootOnly)) &&
+ ( ! (variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task) )) {
+ tmpDict->setObject(key, _ofDict->getObject(key));
}
dict = tmpDict;
- } else {
- dict = _ofDict;
}
-
+
result = dict->serialize(s);
if (tmpDict != 0) tmpDict->release();
if (_ofDict == 0) return 0;
// Verify permissions.
+ variablePerm = getOFVariablePerm(aKey);
result = IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege);
if (result != kIOReturnSuccess) {
- variablePerm = getOFVariablePerm(aKey);
if (variablePerm == kOFVariablePermRootOnly) return 0;
}
+ if (variablePerm == kOFVariablePermKernelOnly && current_task() != kernel_task) return 0;
return _ofDict->getObject(aKey);
}
if (_ofDict == 0) return false;
// Verify permissions.
+ propPerm = getOFVariablePerm(aKey);
result = IOUserClient::clientHasPrivilege(current_task(), kIONVRAMPrivilege);
if (result != kIOReturnSuccess) {
- propPerm = getOFVariablePerm(aKey);
if (propPerm != kOFVariablePermUserWrite) return false;
}
-
+ if (propPerm == kOFVariablePermKernelOnly && current_task() != kernel_task) return 0;
+
// Don't allow creation of new properties on old world machines.
if (getPlatform()->getBootROMType() == 0) {
if (_ofDict->getObject(aKey) == 0) return false;
if (_ofDict == 0) return;
// Verify permissions.
+ propPerm = getOFVariablePerm(aKey);
result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
if (result != kIOReturnSuccess) {
- propPerm = getOFVariablePerm(aKey);
if (propPerm != kOFVariablePermUserWrite) return;
}
+ if (propPerm == kOFVariablePermKernelOnly && current_task() != kernel_task) return;
// Don't allow removal of properties on old world machines.
if (getPlatform()->getBootROMType() == 0) return;
{"security-mode", kOFVariableTypeString, kOFVariablePermUserRead, -1},
{"security-password", kOFVariableTypeData, kOFVariablePermRootOnly, -1},
{"boot-image", kOFVariableTypeData, kOFVariablePermUserWrite, -1},
+ {"com.apple.System.fp-state", kOFVariableTypeData, kOFVariablePermKernelOnly, -1},
{0, kOFVariableTypeData, kOFVariablePermUserRead, -1}
};
fDeviceOverrides = false;
fMachineState = kIOPM_Finished;
fIdleTimerEventSource = NULL;
+ fIdleTimerMinPowerState = 0;
fActivityLock = IOLockAlloc();
fClampOn = false;
fStrictTreeOrder = false;
PM_ASSERT_IN_GATE();
OUR_PMLog(kPMLogWillChange, newPowerFlags, 0);
- if (!inPlane(gIOPowerPlane))
+ if (!inPlane(gIOPowerPlane) || !whichParent || !whichParent->getAwaitingAck())
{
PM_DEBUG("[%s] %s: not in power tree\n", getName(), __FUNCTION__);
- return;
+ goto exit_no_ack;
}
savedParentsKnowState = fParentsKnowState;
getName());
ask_parent( fDesiredPowerState );
}
+
+exit_no_ack:
+ // Drop the retain from notifyChild().
+ if (whichParent) whichParent->release();
}
//*********************************************************************************
PM_ASSERT_IN_GATE();
OUR_PMLog(kPMLogDidChange, newPowerFlags, 0);
- if (!inPlane(gIOPowerPlane))
+ if (!inPlane(gIOPowerPlane) || !whichParent || !whichParent->getAwaitingAck())
{
PM_DEBUG("[%s] %s: not in power tree\n", getName(), __FUNCTION__);
- return;
+ goto exit_no_ack;
}
savedParentsKnowState = fParentsKnowState;
getName());
ask_parent( fDesiredPowerState );
}
+
+exit_no_ack:
+ // Drop the retain from notifyChild().
+ if (whichParent) whichParent->release();
}
//*********************************************************************************
childRequest = acquirePMRequest( theChild, requestType );
if (childRequest)
{
+ theNub->retain();
childRequest->fArg0 = (void *) fHeadNoteOutputFlags;
childRequest->fArg1 = (void *) theNub;
childRequest->fArg2 = (void *) (fHeadNoteState < fCurrentPowerState);
if (fCurrentCapabilityFlags & kIOPMStaticPowerValid)
fCurrentPowerConsumption = powerStatePtr->staticPower;
}
+
+ // When power rises enough to satisfy the tickle's desire for more power,
+ // the condition preventing idle-timer from dropping power is removed.
+
+ if (fCurrentPowerState >= fIdleTimerMinPowerState)
+ {
+ fIdleTimerMinPowerState = 0;
+ }
}
//*********************************************************************************
if (request->fArg1)
{
- // power rise
- if (fDeviceDesire < (unsigned long) request->fArg0)
+ // Power rise from activity tickle.
+ unsigned long ticklePowerState = (unsigned long) request->fArg0;
+ if ((fDeviceDesire < ticklePowerState) &&
+ (ticklePowerState < fNumberOfPowerStates))
+ {
setDeviceDesire = true;
- }
- else if (fDeviceDesire)
+ fIdleTimerMinPowerState = ticklePowerState;
+ }
+ }
+ else if (fDeviceDesire > fIdleTimerMinPowerState)
{
- // power drop and deviceDesire is not zero
+ // Power drop from idle timer expiration.
request->fArg0 = (void *) (fDeviceDesire - 1);
setDeviceDesire = true;
}
Idle timer's period in seconds.
*/
unsigned long IdleTimerPeriod;
+ unsigned long IdleTimerMinPowerState;
/*! @var DriverDesire
Power state desired by our controlling driver.
#define fActivityLock pwrMgt->ActivityLock
#define fIdleTimerEventSource pwrMgt->IdleTimerEventSource
#define fIdleTimerPeriod pwrMgt->IdleTimerPeriod
+#define fIdleTimerMinPowerState pwrMgt->IdleTimerMinPowerState
#define fDeviceActive pwrMgt->DeviceActive
#define fDeviceActiveTimestamp pwrMgt->DeviceActiveTimestamp
#define fDriverDesire pwrMgt->DriverDesire
len = strlen( kIODeviceTreePlane ":" );
maxLen -= len;
- if( maxLen < 0)
+ if( maxLen <= 0)
continue;
- strcpy( buf, kIODeviceTreePlane ":" );
+ strlcpy( buf, kIODeviceTreePlane ":", len + 1 );
comp = buf + len;
// remove parameters following ':' from the path
len = skip - path;
maxLen -= len;
- if( maxLen < 0)
+ if( maxLen <= 0)
continue;
- strncpy( comp, path, len );
- comp[ len ] = 0;
+ strlcpy( comp, path, len + 1 );
matching = IOService::serviceMatching( "IONetworkInterface" );
if( !matching)
long partition = -1;
long lun = -1;
char c;
+ int len;
// scan the tail of the path for "@unit:partition"
do {
if( c || unit == -1 || partition == -1)
continue;
- maxLen -= strlen( "{" kIOPathMatchKey "='" kIODeviceTreePlane ":" );
- maxLen -= ( alias ? strlen( alias ) : 0 ) + (look - path);
- maxLen -= strlen( "/@hhhhhhhh,hhhhhhhh:dddddddddd';}" );
+ len = strlen( "{" kIOPathMatchKey "='" kIODeviceTreePlane ":" );
+ maxLen -= len;
+ if( maxLen <= 0)
+ continue;
- if( maxLen > 0) {
- sprintf( buf, "{" kIOPathMatchKey "='" kIODeviceTreePlane ":" );
- comp = buf + strlen( buf );
-
- if( alias) {
- strcpy( comp, alias );
- comp += strlen( alias );
- }
-
- if ( (look - path)) {
- strncpy( comp, path, look - path);
- comp += look - path;
- }
+ snprintf( buf, len + 1, "{" kIOPathMatchKey "='" kIODeviceTreePlane ":" );
+ comp = buf + len;
+
+ if( alias) {
+ len = strlen( alias );
+ maxLen -= len;
+ if( maxLen <= 0)
+ continue;
+
+ strlcpy( comp, alias, len + 1 );
+ comp += len;
+ }
+
+ if ( (look - path)) {
+ len = (look - path);
+ maxLen -= len;
+ if( maxLen <= 0)
+ continue;
+
+ strlcpy( comp, path, len + 1 );
+ comp += len;
+ }
- if ( lun != -1 )
- {
- sprintf ( comp, "/@%lx,%lx:%ld';}", unit, lun, partition );
- }
- else
- {
- sprintf( comp, "/@%lx:%ld';}", unit, partition );
- }
- } else
- continue;
+ if ( lun != -1 )
+ {
+ len = strlen( "/@hhhhhhhh,hhhhhhhh:dddddddddd';}" );
+ maxLen -= len;
+ if( maxLen <= 0)
+ continue;
+
+ snprintf( comp, len + 1, "/@%lx,%lx:%ld';}", unit, lun, partition );
+ }
+ else
+ {
+ len = strlen( "/@hhhhhhhh:dddddddddd';}" );
+ maxLen -= len;
+ if( maxLen <= 0)
+ continue;
+
+ snprintf( comp, len + 1, "/@%lx:%ld';}", unit, partition );
+ }
return( OSDynamicCast(OSDictionary, OSUnserialize( buf, 0 )) );
len = strlen( kIODeviceTreePlane ":" );
maxLen -= len;
- if( maxLen < 0)
+ if( maxLen <= 0)
continue;
- strcpy( buf, kIODeviceTreePlane ":" );
+ strlcpy( buf, kIODeviceTreePlane ":", len + 1 );
comp = buf + len;
len = strlen( path );
maxLen -= len;
- if( maxLen < 0)
+ if( maxLen <= 0)
continue;
- strncpy( comp, path, len );
- comp[ len ] = 0;
+ strlcpy( comp, path, len + 1 );
matching = OSDictionary::withCapacity( 1 );
if( !matching)
static int didRam = 0;
-kern_return_t IOFindBSDRoot( char * rootName,
+kern_return_t IOFindBSDRoot( char * rootName, unsigned int rootNameSize,
dev_t * root, u_int32_t * oflags )
{
mach_timespec_t t;
iostr = (OSString *) service->getProperty( kIOBSDNameKey );
if( iostr)
- strcpy( rootName, iostr->getCStringNoCopy() );
+ strlcpy( rootName, iostr->getCStringNoCopy(), rootNameSize );
off = (OSNumber *) service->getProperty( kIOBSDMajorKey );
if( off)
mjr = off->unsigned32BitValue();
} else {
IOLog( "Wait for root failed\n" );
- strcpy( rootName, "en0");
+ strlcpy( rootName, "en0", rootNameSize );
flags |= 1;
}
OSData * moduleInfo = 0; // must release
MkextEntryInfo module_info;
+ IORegistryEntry * root;
+ OSData * checksumObj;
+
if (vaddr) {
// addExtensionsFromArchive passes a kernel virtual address
mkext_data = (mkext_header *)mkext_file_info->paddr;
goto finish;
}
- IORegistryEntry * root = IORegistryEntry::getRegistryRoot();
+ root = IORegistryEntry::getRegistryRoot();
assert(root);
- OSData * checksumObj = OSData::withBytes((void *)&checksum,
+ checksumObj = OSData::withBytes((void *)&checksum,
sizeof(checksum));
assert(checksumObj);
if (checksumObj) {
# default architecture configuration = system architecture where you are running make.
# default machine configuration for ppc = none at this time.
# default machine configuration for i386 = none at this time.
-# default machine configuration for arm = "S5L8900XRB".
+# default machine configuration for arm = "S5L8900X".
#
ifndef TARGET_CONFIGS_UC
ifdef TARGET_CONFIGS
# We must not use -fno-keep-inline-functions, or it will remove the dtrace
# probes from the kernel.
#
-export CFLAGS_GEN = -static $(DEBUG_CFLAGS) -nostdinc -nostdlib -no-cpp-precomp \
+export CFLAGS_GEN = -static $(DEBUG_CFLAGS) -nostdinc -nostdlib \
-fno-builtin -finline -msoft-float \
-fsigned-bitfields $(OTHER_CFLAGS)
/* Convert xnu-####.###.obj~### into ####.###~### */
- if (version) {
+ if (version[0]) {
const char *versionpos = strnstr(version, "xnu-", VERSIONBUF_LEN);
if (versionpos) {
valid_symbol = TRUE;
}
if (db_allow_unprefixed_hexa && db_radix == 16 &&
- db_tok_string) {
+ db_tok_string[0]) {
char *cp;
db_expr_t value;
/*
- * Copyright (c) 2003-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
core->pcore_num = cpup->cpu_phys_number / lcpus_per_core;
core->lcore_num = core->pcore_num % cpuid_info()->cpuid_cores_per_package;
- core->flags = X86CORE_FL_PRESENT | X86CORE_FL_READY;
+ core->flags = X86CORE_FL_PRESENT | X86CORE_FL_READY
+ | X86CORE_FL_HALTED | X86CORE_FL_IDLE;
return(core);
}
#define cpu_to_logical_cpu(cpu) (cpu_to_lapic[cpu] & CPU_THREAD_MASK)
#define cpu_is_core_cpu(cpu) (cpu_to_logical_cpu(cpu) == 0)
-#define cpu_to_lcpu(cpu) (&cpu_datap(cpu)->lcpu)
-#define cpu_to_core(cpu) (cpu_to_lcpu(cpu)->core)
-#define cpu_to_package(cpu) (cpu_to_core(cpu)->package)
+#define _cpu_to_lcpu(cpu) (&cpu_datap(cpu)->lcpu)
+#define _cpu_to_core(cpu) (_cpu_to_lcpu(cpu)->core)
+#define _cpu_to_package(cpu) (_cpu_to_core(cpu)->package)
+
+#define cpu_to_lcpu(cpu) ((cpu_datap(cpu) != NULL) ? _cpu_to_lcpu(cpu) : NULL)
+#define cpu_to_core(cpu) ((cpu_to_lcpu(cpu) != NULL) ? _cpu_to_lcpu(cpu)->core : NULL)
+#define cpu_to_package(cpu) ((cpu_to_core(cpu) != NULL) ? _cpu_to_core(cpu)->package : NULL)
/* Fast access: */
#define x86_lcpu() (¤t_cpu_datap()->lcpu)
return(-1);
}
+ /*
+ * Deal with the case where the CPU # passed in is past the
+ * value specified in cpus=n in boot-args.
+ */
+ if (cpu >= real_ncpus) {
+ enabled = ml_set_interrupts_enabled(FALSE);
+ lcpu = cpu_to_lcpu(cpu);
+ if (lcpu != NULL) {
+ core = lcpu->core;
+ pkg = core->package;
+
+ if (lcpu->primary) {
+ pkg->flags |= X86PKG_FL_HAS_HPET;
+ }
+ }
+
+ ml_set_interrupts_enabled(enabled);
+ return(0);
+ }
+
rc = (*hpet_req)(ml_get_apicid(cpu), hpet_arg, &hpetReq);
if (rc != 0) {
return(rc);
* Nanotime returned in %edx:%eax.
* Computed from tsc based on the scale factor
* and an implicit 32 bit shift.
+ * This code must match what _rtc_nanotime_read does in
+ * i386/machine_routines_asm.s. Failure to do so can
+ * result in "weird" timing results.
*
* Uses %eax, %ebx, %ecx, %edx, %esi, %edi.
*/
#define RNT_INFO _rtc_nanotime_info
-#define NANOTIME \
-0: movl RNT_INFO+RNT_TSC_BASE,%esi ; \
- movl RNT_INFO+RNT_TSC_BASE+4,%edi ; \
- rdtsc ; \
- subl %esi,%eax /* tsc - tsc_base */ ; \
- sbbl %edi,%edx ; \
- movl RNT_INFO+RNT_SCALE,%ecx ; \
- movl %edx,%ebx /* delta * scale */ ; \
- mull %ecx ; \
- movl %ebx,%eax ; \
- movl %edx,%ebx ; \
- mull %ecx ; \
- addl %ebx,%eax ; \
- adcl $0,%edx /* add carry into hi */ ; \
- addl RNT_INFO+RNT_NS_BASE,%eax /* add ns_base lo */ ; \
- adcl RNT_INFO+RNT_NS_BASE+4,%edx /* add ns_base hi */ ; \
- cmpl RNT_INFO+RNT_TSC_BASE,%esi ; \
- jne 0b /* repeat if changed */ ; \
- cmpl RNT_INFO+RNT_TSC_BASE+4,%edi ; \
- jne 0b
+#define NANOTIME \
+ lea RNT_INFO,%edi ; \
+0: ; \
+ movl RNT_GENERATION(%edi),%esi /* being updated? */ ; \
+ testl %esi,%esi ; \
+ jz 0b /* wait until done */ ; \
+ rdtsc ; \
+ subl RNT_TSC_BASE(%edi),%eax ; \
+ sbbl RNT_TSC_BASE+4(%edi),%edx /* tsc - tsc_base */ ; \
+ movl RNT_SCALE(%edi),%ecx /* * scale factor */ ; \
+ movl %edx,%ebx ; \
+ mull %ecx ; \
+ movl %ebx,%eax ; \
+ movl %edx,%ebx ; \
+ mull %ecx ; \
+ addl %ebx,%eax ; \
+ adcl $0,%edx ; \
+ addl RNT_NS_BASE(%edi),%eax /* + ns_base */ ; \
+ adcl RNT_NS_BASE+4(%edi),%edx ; \
+ cmpl RNT_GENERATION(%edi),%esi /* check for update */ ; \
+ jne 0b /* do it all again */
+
/*
* Add 64-bit delta in register dreg : areg to timer pointed to by register treg.
kdb_printf(" IA32_MCG_MISC: 0x%08x\n", rdmsr32(IA32_MCG_MISC));
}
+static void
+mca_report_cpu_info(void)
+{
+ uint64_t microcode;
+ i386_cpu_info_t *infop = cpuid_info();
+
+ // microcode revision is top 32 bits of MSR_IA32_UCODE_REV
+ microcode = rdmsr64(MSR_IA32_UCODE_REV) >> 32;
+ kdb_printf("family: %d model: %d stepping: %d microcode revision %d\n",
+ infop->cpuid_family,
+ infop->cpuid_model,
+ infop->cpuid_stepping,
+ (uint32_t) microcode);
+ kdb_printf("%s\n", infop->cpuid_brand_string);
+}
+
+
static const char *mca_threshold_status[] = {
[THRESHOLD_STATUS_NO_TRACKING] "No tracking",
[THRESHOLD_STATUS_GREEN] "Green",
kdb_printf(
"Machine-check capabilities (cpu %d) 0x%016qx:\n",
cpu_number(), ia32_mcg_cap.u64);
+
+ mca_report_cpu_info();
+
kdb_printf(
" %d error-reporting banks\n%s%s", mca_error_bank_count,
IF(mca_control_MSR_present,
*
* This is the same as the commpage nanotime routine, except that it uses the
* kernel internal "rtc_nanotime_info" data instead of the commpage data. The two copies
- * of data (one in the kernel and one in user space) are kept in sync by rtc_nanotime_update().
+ * of data (one in the kernel and one in user space) are kept in sync by rtc_clock_napped().
+ *
+ * Warning! There is another copy of this code in osfmk/i386/locore.s. The
+ * two versions must be kept in sync with each other!
*
* There are actually two versions of the algorithm, one each for "slow" and "fast"
* processors. The more common "fast" algorithm is:
};
#define FAKE_UBER64(addr32) { (uint32_t) (addr32), KERNEL_UBER_BASE_HI32 }
#define FAKE_COMPAT(addr32) { (uint32_t) (addr32), 0x0 }
-#define UBER64(addr32) ((addr64_t) addr32 + KERNEL_UBER_BASE)
+#define UBER64(addr32) ((addr64_t) (uintptr_t)addr32 + KERNEL_UBER_BASE)
/*
* Boot-time data for master (or only) CPU
extern int ipc_mqueue_full;
extern int ipc_mqueue_rcv;
-#define IPC_MQUEUE_FULL (event64_t)&ipc_mqueue_full
-#define IPC_MQUEUE_RECEIVE (event64_t)&ipc_mqueue_rcv
+#define IPC_MQUEUE_FULL CAST_EVENT64_T(&ipc_mqueue_full)
+#define IPC_MQUEUE_RECEIVE CAST_EVENT64_T(&ipc_mqueue_rcv)
/*
* Exported interfaces
unsigned short exception_port;
unsigned char exception_seq;
boolean_t exception_ack_needed;
+ int kdp_cpu;
+ thread_t kdp_thread;
} kdp_glob_t;
extern kdp_glob_t kdp;
* do this. I think the client and the host can get out of sync.
*/
kdp.saved_state = saved_state;
-
+ kdp.kdp_cpu = cpu_number();
+ kdp.kdp_thread = current_thread();
+
if (pkt.input)
kdp_panic("kdp_raise_exception");
char *vptr;
strlcpy(vstr, "custom", 10);
- if (version) {
- if (kdp_vm_read(version, versionbuf, 95)) {
- versionbuf[94] = '\0';
- versionpos = strnstr(versionbuf, "xnu-", 90);
- if (versionpos) {
- strncpy(vstr, versionpos, sizeof(vstr));
- vstr[sizeof(vstr)-1] = '\0';
- vptr = vstr + 4; /* Begin after "xnu-" */
- while (*vptr && (isdigit(*vptr) || *vptr == '.'))
- vptr++;
+ if (strlcpy(versionbuf, version, 95) < 95) {
+ versionpos = strnstr(versionbuf, "xnu-", 90);
+ if (versionpos) {
+ strncpy(vstr, versionpos, sizeof(vstr));
+ vstr[sizeof(vstr)-1] = '\0';
+ vptr = vstr + 4; /* Begin after "xnu-" */
+ while (*vptr && (isdigit(*vptr) || *vptr == '.'))
+ vptr++;
+ *vptr = '\0';
+ /* Remove trailing period, if any */
+ if (*(--vptr) == '.')
*vptr = '\0';
- /* Remove trailing period, if any */
- if (*(--vptr) == '.')
- *vptr = '\0';
- retval = 0;
- }
+ retval = 0;
}
}
strlcpy(versionbuf, vstr, KDP_MAXPACKET);
state->esi = saved_state->esi;
state->ebp = saved_state->ebp;
- if ((saved_state->cs & 0x3) == 0){ /* Kernel State */
- state->esp = (unsigned int) &saved_state->uesp;
+ if ((saved_state->cs & SEL_PL) == SEL_PL_K) { /* Kernel state? */
+ if (cpu_mode_is64bit())
+ state->esp = (uint32_t) saved_state->uesp;
+ else
+ state->esp = ((uint32_t)saved_state) + offsetof(x86_saved_state_t, ss_32) + sizeof(x86_saved_state32_t);
state->ss = KERNEL_DS;
} else {
state->esp = saved_state->uesp;
saved_state->frame.eflags |= ( EFL_IF | EFL_SET );
#endif
saved_state->eip = state->eip;
- saved_state->fs = state->fs;
- saved_state->gs = state->gs;
}
{
return 0xcc;
}
+
extern pmap_t kdp_pmap;
extern uint32_t kdp_src_high32;
/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
ast_check(
processor_t processor)
{
+ thread_t thread = processor->active_thread;
+
+ processor->current_pri = thread->sched_pri;
if ( processor->state == PROCESSOR_RUNNING ||
processor->state == PROCESSOR_SHUTDOWN ) {
- thread_t thread = processor->active_thread;
- ast_t preempt;
+ ast_t preempt;
/*
* Propagate thread ast to processor.
typedef uint64_t event64_t; /* 64 bit wait event */
#define NO_EVENT64 ((event64_t) 0)
+#define CAST_EVENT64_T(a_ptr) ((event64_t)((uintptr_t)(a_ptr)))
/*
* Possible wait_result_t values.
/* Get the bundle id, if provided, and discard the buffer sent down.
*/
if (*data && *dataCount) {
- (char *)(kmem_alloc(kernel_map, (vm_offset_t *)&kext_id, id_length));
+ kmem_alloc(kernel_map, (vm_offset_t *)&kext_id, id_length);
if (!kext_id) {
return KERN_FAILURE;
}
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
init_ast_check(processor);
pset = processor->processor_set;
pset_lock(pset);
- pset->processor_count++;
- enqueue_head(&pset->active_queue, (queue_entry_t)processor);
+ if (++pset->processor_count == 1)
+ pset->low_pri = processor;
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
processor->state = PROCESSOR_RUNNING;
(void)hw_atomic_add(&processor_avail_count, 1);
pset_unlock(pset);
pset = processor->processor_set;
pset_lock(pset);
- pset->processor_count--;
processor->state = PROCESSOR_OFF_LINE;
- if (processor == pset->low_hint)
- pset->low_hint = PROCESSOR_NULL;
+ if (--pset->processor_count == 0)
+ pset->low_pri = PROCESSOR_NULL;
(void)hw_atomic_sub(&processor_avail_count, 1);
pset_unlock(pset);
ml_cpu_down();
n = 0;
- while (*p != '\0') {
- if (++n > prec || (length > 0 && n > length))
- break;
-
- (*putc)(*p++, arg);
- nprinted++;
+ while ((n < prec) && (!(length > 0 && n >= length))) {
+ if (*p == '\0') {
+ break;
+ }
+ (*putc)(*p++, arg);
+ nprinted++;
+ n++;
}
if (n < length && ladjust) {
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
pset_lock(pset);
- pset_hint_low(pset, processor);
- pset_hint_high(pset, processor);
+ pset_pri_hint(pset, processor, processor->current_pri);
pset_unlock(pset);
}
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
queue_init(&pset->idle_queue);
pset->idle_count = 0;
pset->processor_count = 0;
- pset->high_hint = PROCESSOR_NULL;
- pset->low_hint = PROCESSOR_NULL;
+ pset->low_pri = PROCESSOR_NULL;
pset_lock_init(pset);
pset->pset_self = IP_NULL;
pset->pset_name_self = IP_NULL;
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
queue_head_t idle_queue; /* idle processors */
int idle_count;
- processor_t low_hint;
- processor_t high_hint;
+ processor_t low_pri;
int processor_count;
/* Update hints */
-#define pset_hint_low(ps, p) \
-MACRO_BEGIN \
- if ((ps)->low_hint != PROCESSOR_NULL) { \
- if ((p) != (ps)->low_hint) { \
- if ((p)->runq.count < (ps)->low_hint->runq.count) \
- (ps)->low_hint = (p); \
- } \
- } \
- else \
- (ps)->low_hint = (p); \
-MACRO_END
-
-#define pset_hint_high(ps, p) \
-MACRO_BEGIN \
- if ((ps)->high_hint != PROCESSOR_NULL) { \
- if ((p) != (ps)->high_hint) { \
- if ((p)->runq.count > (ps)->high_hint->runq.count) \
- (ps)->high_hint = (p); \
- } \
- } \
- else \
- (ps)->high_hint = (p); \
+#define pset_pri_hint(ps, p, pri) \
+MACRO_BEGIN \
+ if ((p) != (ps)->low_pri) { \
+ if ((pri) < (ps)->low_pri->current_pri) \
+ (ps)->low_pri = (p); \
+ } \
MACRO_END
extern void processor_bootstrap(void) __attribute__((section("__TEXT, initcode")));
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
uint32_t sched_run_count, sched_share_count;
uint32_t sched_load_average, sched_mach_factor;
-void (*pm_tick_callout)(void) = NULL;
-
/* Forwards */
void wait_queues_init(void) __attribute__((section("__TEXT, initcode")));
processor_t processor);
static thread_t steal_thread(
+ processor_set_t pset);
+
+static thread_t steal_processor_thread(
processor_t processor);
static void thread_update_scan(void);
/* scheduler tick interval */
clock_interval_to_absolutetime_interval(USEC_PER_SEC >> SCHED_TICK_SHIFT,
NSEC_PER_USEC, &abstime);
+ assert((abstime >> 32) == 0 && (uint32_t)abstime != 0);
sched_tick_interval = abstime;
-#if DEBUG
- printf("Quantum: %d. Smallest quantum: %d. Min Rt/Max Rt: %d/%d."
- " Tick: %d.\n",
- std_quantum, min_std_quantum, min_rt_quantum, max_rt_quantum,
- sched_tick_interval);
-#endif
-
/*
* Compute conversion factor from usage to
* timesharing priorities with 5/8 ** n aging.
processor_t processor)
{
processor_set_t pset = processor->processor_set;
- thread_t new_thread;
+ thread_t new_thread = THREAD_NULL;
boolean_t other_runnable;
do {
/* I am the highest priority runnable (non-idle) thread */
- pset_hint_low(pset, processor);
- pset_hint_high(pset, processor);
+ pset_pri_hint(pset, processor, processor->current_pri);
processor->deadline = UINT64_MAX;
* No runnable threads, attempt to steal
* from other processors.
*/
- if (pset->high_hint != PROCESSOR_NULL && pset->high_hint->runq.count > 0) {
- new_thread = steal_thread(pset->high_hint);
- if (new_thread != THREAD_NULL) {
- pset_unlock(pset);
+ new_thread = steal_thread(pset);
+ if (new_thread != THREAD_NULL)
+ return (new_thread);
- return (new_thread);
- }
- }
+ /*
+ * If other threads have appeared, shortcut
+ * around again.
+ */
+ if (processor->runq.count > 0 || rt_runq.count > 0)
+ continue;
+
+ pset_lock(pset);
/*
* Nothing is runnable, so set this processor idle if it
processor->state = PROCESSOR_IDLE;
enqueue_head(&pset->idle_queue, (queue_entry_t)processor);
- pset->low_hint = processor;
+ pset->low_pri = processor;
pset->idle_count++;
}
*/
spllo(); new_thread = processor_idle(thread, processor);
+ /*
+ * Return at splsched.
+ */
(*thread->sched_call)(SCHED_CALL_UNBLOCK, thread);
thread_lock(thread);
if (processor->state == PROCESSOR_IDLE) {
remqueue(&pset->idle_queue, (queue_entry_t)processor);
pset->idle_count--;
- enqueue_head(&pset->active_queue, (queue_entry_t)processor);
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
processor->next_thread = thread;
processor->deadline = thread->realtime.deadline;
if (processor->state == PROCESSOR_IDLE) {
remqueue(&pset->idle_queue, (queue_entry_t)processor);
pset->idle_count--;
- enqueue_head(&pset->active_queue, (queue_entry_t)processor);
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
processor->next_thread = thread;
processor->deadline = UINT64_MAX;
if (!processor_enqueue(processor, thread, options))
preempt = AST_NONE;
- pset_hint_high(pset, processor);
-
if (preempt != AST_NONE) {
if (processor == current_processor()) {
thread_t self = processor->active_thread;
nset = next_pset(nset);
} while (nset->processor_count < 1 && nset != pset);
- return ((nset != pset)? nset: pset);
+ return (nset);
}
/*
thread_t thread)
{
processor_set_t nset, cset = pset;
- processor_t processor;
+ processor_t processor = thread->last_processor;
+
+ /*
+ * Prefer the last processor, when appropriate.
+ */
+ if (processor != PROCESSOR_NULL) {
+ if (processor->processor_set != pset ||
+ processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)
+ processor = PROCESSOR_NULL;
+ else
+ if (processor->state == PROCESSOR_IDLE || processor->current_pri < thread->sched_pri)
+ return (processor);
+ }
/*
* Iterate through the processor sets to locate
processor = (processor_t)queue_next((queue_entry_t)processor);
}
+
+ processor = PROCESSOR_NULL;
}
else {
/*
- * Choose the low hint processor in the processor set if available.
+ * Check the low hint processor in the processor set if available.
*/
- processor = cset->low_hint;
- if (processor != PROCESSOR_NULL &&
- processor->state != PROCESSOR_SHUTDOWN && processor->state != PROCESSOR_OFF_LINE)
- return (processor);
+ if (cset->low_pri != PROCESSOR_NULL &&
+ cset->low_pri->state != PROCESSOR_SHUTDOWN && cset->low_pri->state != PROCESSOR_OFF_LINE) {
+ if (processor == PROCESSOR_NULL || cset->low_pri->current_pri < thread->sched_pri)
+ processor = cset->low_pri;
+ }
/*
- * Choose any active processor if the hint was invalid.
+ * Otherwise, choose an available processor in the set.
*/
- processor = (processor_t)dequeue_head(&cset->active_queue);
- if (processor != PROCESSOR_NULL) {
- enqueue_tail(&cset->active_queue, (queue_entry_t)processor);
- return (processor);
+ if (processor == PROCESSOR_NULL) {
+ processor = (processor_t)dequeue_head(&cset->active_queue);
+ if (processor != PROCESSOR_NULL)
+ enqueue_tail(&cset->active_queue, (queue_entry_t)processor);
}
}
} while (nset != pset);
/*
- * If all else fails choose the current processor,
- * this routine must return a running processor.
+ * Make sure that we pick a running processor,
+ * and that the correct processor set is locked.
*/
- processor = current_processor();
- if (cset != processor->processor_set) {
- pset_unlock(cset);
+ do {
+ /*
+ * If we haven't been able to choose a processor,
+ * pick the current one and return it.
+ */
+ if (processor == PROCESSOR_NULL) {
+ processor = current_processor();
- cset = processor->processor_set;
- pset_lock(cset);
- }
+ /*
+ * Check that the correct processor set is
+ * returned locked.
+ */
+ if (cset != processor->processor_set) {
+ pset_unlock(cset);
+
+ cset = processor->processor_set;
+ pset_lock(cset);
+ }
+
+ return (processor);
+ }
+
+ /*
+ * Check that the processor set for the chosen
+ * processor is locked.
+ */
+ if (cset != processor->processor_set) {
+ pset_unlock(cset);
+
+ cset = processor->processor_set;
+ pset_lock(cset);
+ }
+
+ /*
+ * We must verify that the chosen processor is still available.
+ */
+ if (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)
+ processor = PROCESSOR_NULL;
+ } while (processor == PROCESSOR_NULL);
return (processor);
}
/*
* Choose a different processor in certain cases.
*/
- if (processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)
- processor = choose_processor(pset, thread);
- else
if (thread->sched_pri >= BASEPRI_RTQUEUES) {
/*
* If the processor is executing an RT thread with
processor = choose_processor(pset, thread);
}
else
- if (processor->state != PROCESSOR_IDLE && pset->idle_count > 0) {
processor = choose_processor(pset, thread);
- }
- else {
- processor_set_t nset = choose_next_pset(pset);
-
- /*
- * Bump into a lesser loaded processor set if appropriate.
- */
- if (pset != nset && (nset->low_hint == PROCESSOR_NULL ||
- (pset->idle_count == 0 && nset->idle_count > 0) ||
- processor->runq.count > nset->low_hint->runq.count)) {
- pset_unlock(pset);
-
- pset = nset;
- pset_lock(pset);
-
- processor = choose_processor(pset, thread);
- }
- }
}
else {
/*
* No Affinity case:
*
- * Choose a processor from the current processor set.
+ * Utilitize a per task hint to spread threads
+ * among the available processor sets.
*/
- processor = current_processor();
- pset = processor->processor_set;
+ task_t task = thread->task;
+
+ pset = task->pset_hint;
+ if (pset == PROCESSOR_SET_NULL)
+ pset = current_processor()->processor_set;
+
+ pset = choose_next_pset(pset);
pset_lock(pset);
processor = choose_processor(pset, thread);
+ task->pset_hint = processor->processor_set;
}
}
else {
* choose_thread:
*
* Choose a thread to execute from the run queues
- * and return it. May steal a thread from another
- * processor.
+ * and return it.
*
* Called with pset scheduling lock and rt lock held,
* released on return.
if (processor->runq.count > 0 && processor->runq.highq >= rt_runq.highq) {
simple_unlock(&rt_lock);
- pset_hint_low(pset, processor);
-
- if (pset->high_hint != PROCESSOR_NULL) {
- if (processor != pset->high_hint) {
- if (processor->runq.count >= pset->high_hint->runq.count)
- pset->high_hint = processor;
- else
- if (pset->high_hint->runq.highq > processor->runq.highq) {
- thread = steal_thread(pset->high_hint);
- if (thread != THREAD_NULL) {
- processor->deadline = UINT64_MAX;
- pset_unlock(pset);
-
- return (thread);
- }
- }
- }
- }
- else
- pset->high_hint = processor;
-
thread = run_queue_dequeue(&processor->runq, SCHED_HEADQ);
+ pset_pri_hint(pset, processor, thread->sched_pri);
+
processor->deadline = UINT64_MAX;
pset_unlock(pset);
}
/*
- * steal_thread:
+ * steal_processor_thread:
*
- * Steal a thread from a processor and return it.
+ * Locate a thread to steal from the processor and
+ * return it.
*
* Associated pset must be locked. Returns THREAD_NULL
* on failure.
*/
static thread_t
-steal_thread(
+steal_processor_thread(
processor_t processor)
{
run_queue_t rq = &processor->runq;
queue_t queue = rq->queues + rq->highq;
int pri = rq->highq, count = rq->count;
- thread_t thread = THREAD_NULL;
+ thread_t thread;
while (count > 0) {
thread = (thread_t)queue_first(queue);
return (THREAD_NULL);
}
+/*
+ * Locate and steal a thread, beginning
+ * at the pset.
+ *
+ * The pset must be locked, and is returned
+ * unlocked.
+ *
+ * Returns the stolen thread, or THREAD_NULL on
+ * failure.
+ */
+static thread_t
+steal_thread(
+ processor_set_t pset)
+{
+ processor_set_t nset, cset = pset;
+ processor_t processor;
+ thread_t thread;
+
+ do {
+ processor = (processor_t)queue_first(&cset->active_queue);
+ while (!queue_end(&cset->active_queue, (queue_entry_t)processor)) {
+ if (processor->runq.count > 0) {
+ thread = steal_processor_thread(processor);
+ if (thread != THREAD_NULL) {
+ remqueue(&cset->active_queue, (queue_entry_t)processor);
+ enqueue_tail(&cset->active_queue, (queue_entry_t)processor);
+
+ processor->deadline = UINT64_MAX;
+ pset_unlock(cset);
+
+ return (thread);
+ }
+ }
+
+ processor = (processor_t)queue_next((queue_entry_t)processor);
+ }
+
+ nset = next_pset(cset);
+
+ if (nset != pset) {
+ pset_unlock(cset);
+
+ cset = nset;
+ pset_lock(cset);
+ }
+ } while (nset != pset);
+
+ pset_unlock(cset);
+
+ return (THREAD_NULL);
+}
+
/*
* This is the processor idle loop, which just looks for other threads
* to execute. Processor idle threads invoke this without supplying a
mach_absolute_time(), &PROCESSOR_DATA(processor, idle_state));
PROCESSOR_DATA(processor, current_state) = &PROCESSOR_DATA(processor, idle_state);
- while (processor->next_thread == THREAD_NULL && processor->runq.count == 0 &&
+ while (processor->next_thread == THREAD_NULL && processor->runq.count == 0 && rt_runq.count == 0 &&
(thread == THREAD_NULL || ((thread->state & (TH_WAIT|TH_SUSP)) == TH_WAIT && !thread->wake_active))) {
- volatile processor_t hint;
-
machine_idle();
(void)splsched();
-
- if (pset->low_hint == PROCESSOR_NULL)
- break;
-
- hint = pset->high_hint;
- if (hint != PROCESSOR_NULL && hint->runq.count > 0)
- break;
}
timer_switch(&PROCESSOR_DATA(processor, idle_state),
pset->idle_count--;
processor->state = PROCESSOR_RUNNING;
- enqueue_head(&pset->active_queue, (queue_entry_t)processor);
+ enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
}
else
if (state == PROCESSOR_SHUTDOWN) {
return (THREAD_NULL);
}
+/*
+ * Each processor has a dedicated thread which
+ * executes the idle loop when there is no suitable
+ * previous context.
+ */
void
idle_thread(void)
{
*/
thread_update_scan();
- if (pm_tick_callout != NULL)
- (*pm_tick_callout)();
-
clock_deadline_for_periodic_event(sched_tick_interval, abstime,
&sched_tick_deadline);
MACRO_END
unsigned int lock_set_event;
-#define LOCK_SET_EVENT ((event64_t)&lock_set_event)
+#define LOCK_SET_EVENT CAST_EVENT64_T(&lock_set_event)
unsigned int lock_set_handoff;
-#define LOCK_SET_HANDOFF ((event64_t)&lock_set_handoff)
+#define LOCK_SET_HANDOFF CAST_EVENT64_T(&lock_set_handoff)
/*
* ROUTINE: lock_set_init [private]
#include <kern/mach_param.h>
static unsigned int semaphore_event;
-#define SEMAPHORE_EVENT ((event64_t)&semaphore_event)
+#define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event)
zone_t semaphore_zone;
unsigned int semaphore_max = SEMAPHORE_MAX;
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
new_task->thread_count = 0;
new_task->active_thread_count = 0;
new_task->user_stop_count = 0;
+ new_task->pset_hint = PROCESSOR_SET_NULL;
new_task->role = TASK_UNSPECIFIED;
new_task->active = TRUE;
new_task->user_data = NULL;
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
queue_head_t threads;
int thread_count;
uint32_t active_thread_count;
+ processor_set_t pset_hint;
struct affinity_space *affinity_space;
/* User-visible scheduling information */
if (!nxprbufs)
return;
- n = nxprbufs;
-
s = splhigh();
simple_lock(&xprlock);
prev = db_recover;
- if (_setjmp(db_recover = &db_jmpbuf) == 0)
+ if (_setjmp(db_recover = &db_jmpbuf) == 0) {
+ n = nxprbufs;
+
for (x = *(struct xprbuf **)xprlast ; n--; ) {
if (--x < xprbase)
x = xprlast - 1;
x->cpuinfo, x->timestamp);
db_printf(x->msg, x->arg1,x->arg2,x->arg3,x->arg4,x->arg5);
}
+ }
db_recover = prev;
simple_unlock(&xprlock);
*/
#ifdef PRIVATE
#define MAX_UPL_TRANSFER 256
+#define MAX_UPL_SIZE 4096
struct upl_page_info {
ppnum_t phys_addr; /* physical page index number */
* Initialize kernel pmap
*/
kernel_pmap = &kernel_pmap_store;
- kernel_pmap_phys = (addr64_t)&kernel_pmap_store;
+ kernel_pmap_phys = (addr64_t)(uintptr_t)&kernel_pmap_store;
cursor_pmap = &kernel_pmap_store;
kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */
BootProcInfo.VMX_owner = NULL;
BootProcInfo.pp_cbfr = console_per_proc_alloc(TRUE);
BootProcInfo.rtcPop = EndOfAllTime;
- BootProcInfo.pp2ndPage = (addr64_t)&BootProcInfo; /* Initial physical address of the second page */
+ BootProcInfo.pp2ndPage = (addr64_t)(uintptr_t)&BootProcInfo; /* Initial physical address of the second page */
BootProcInfo.pms.pmsStamp = 0; /* Dummy transition time */
BootProcInfo.pms.pmsPop = EndOfAllTime; /* Set the pop way into the future */
}
- pmap_enter(kernel_pmap, (vm_map_offset_t)&sharedPage,
+ pmap_enter(kernel_pmap, (vm_map_offset_t)(uintptr_t)&sharedPage,
(ppnum_t)&sharedPage >> 12, /* Make sure the sharedPage is mapped */
VM_PROT_READ|VM_PROT_WRITE,
VM_WIMG_USE_DEFAULT, TRUE);
- pmap_enter(kernel_pmap, (vm_map_offset_t)&lowGlo.lgVerCode,
+ pmap_enter(kernel_pmap, (vm_map_offset_t)(uintptr_t)&lowGlo.lgVerCode,
(ppnum_t)&lowGlo.lgVerCode >> 12, /* Make sure the low memory globals are mapped */
VM_PROT_READ|VM_PROT_WRITE,
VM_WIMG_USE_DEFAULT, TRUE);
saveanchor.savefree = 0; /* Remember the start of the free chain */
saveanchor.savefreecnt = 0; /* Remember the length */
- saveanchor.savepoolfwd = (addr64_t)&saveanchor; /* Remember pool forward */
- saveanchor.savepoolbwd = (addr64_t)&saveanchor; /* Remember pool backward */
+ saveanchor.savepoolfwd = (addr64_t)(uintptr_t)&saveanchor; /* Remember pool forward */
+ saveanchor.savepoolbwd = (addr64_t)(uintptr_t)&saveanchor; /* Remember pool backward */
save = addr; /* Point to the whole block of blocks */
saveanchor.savefree = 0; /* Remember the start of the free chain */
saveanchor.savefreecnt = 0; /* Remember the length */
saveanchor.saveadjust = 0; /* Set none needed yet */
- saveanchor.savepoolfwd = (addr64_t)&saveanchor; /* Remember pool forward */
- saveanchor.savepoolbwd = (addr64_t)&saveanchor; /* Remember pool backward */
+ saveanchor.savepoolfwd = (addr64_t)(uintptr_t)&saveanchor; /* Remember pool forward */
+ saveanchor.savepoolbwd = (addr64_t)(uintptr_t)&saveanchor; /* Remember pool backward */
for(i=0; i < InitialSaveBloks; i++) { /* Initialize the saveareas */
if (!IP_VALID(memory_manager_default) &&
(m->object->purgable == VM_PURGABLE_DENY ||
- m->object->purgable == VM_PURGABLE_NONVOLATILE)) {
+ m->object->purgable == VM_PURGABLE_NONVOLATILE ||
+ m->object->purgable == VM_PURGABLE_VOLATILE )) {
vm_page_lock_queues();
queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
if (range) {
if (offset > offset_end)
offset = offset_end;
- *range = offset - offset_beg;
+ if(offset > offset_beg)
+ *range = offset - offset_beg;
+ else *range=0;
}
return KERN_SUCCESS;
}
mem->inactive = FALSE; \
if (!mem->fictitious) { \
vm_page_inactive_count--; \
- vm_purgeable_q_advance_all(1); \
+ vm_purgeable_q_advance_all(); \
} else { \
assert(mem->phys_page == \
vm_page_fictitious_addr); \
percent_avail =
(vm_page_active_count + vm_page_inactive_count +
vm_page_speculative_count + vm_page_free_count +
- vm_page_purgeable_count ) * 100 /
+ (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
atop_64(max_mem);
if (percent_avail >= (kern_memorystatus_level + 5) ||
percent_avail <= (kern_memorystatus_level - 5)) {
vm_page_t, pageq);
m->throttled = FALSE;
vm_page_throttled_count--;
-
+
/*
* not throttled any more, so can stick
* it on the inactive queue.
#if MACH_ASSERT
vm_page_inactive_count--; /* balance for purgeable queue asserts */
#endif
- vm_purgeable_q_advance_all(1);
+ vm_purgeable_q_advance_all();
queue_enter(&vm_page_queue_inactive, m,
vm_page_t, pageq);
#if MACH_ASSERT
vm_page_inactive_count--; /* balance for purgeable queue asserts */
#endif
- vm_purgeable_q_advance_all(1);
+ vm_purgeable_q_advance_all();
queue_enter(&vm_page_queue_inactive, m,
vm_page_t, pageq);
m->inactive = FALSE;
if (!m->fictitious)
vm_page_inactive_count--;
- vm_purgeable_q_advance_all(1);
+ vm_purgeable_q_advance_all();
}
/* If the object is empty, the page must be reclaimed even if dirty or used. */
if (!IP_VALID(memory_manager_default) &&
object->internal &&
(object->purgable == VM_PURGABLE_DENY ||
- object->purgable == VM_PURGABLE_NONVOLATILE)) {
+ object->purgable == VM_PURGABLE_NONVOLATILE ||
+ object->purgable == VM_PURGABLE_VOLATILE )) {
queue_enter(&vm_page_queue_throttled, m,
vm_page_t, pageq);
m->throttled = TRUE;
panic("vm_object_upl_request: contiguous object specified\n");
- if ((size / PAGE_SIZE) > MAX_UPL_TRANSFER)
- size = MAX_UPL_TRANSFER * PAGE_SIZE;
+ if ((size / PAGE_SIZE) > MAX_UPL_SIZE)
+ size = MAX_UPL_SIZE * PAGE_SIZE;
if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
- *page_list_count = MAX_UPL_TRANSFER;
+ *page_list_count = MAX_UPL_SIZE;
if (cntrl_flags & UPL_SET_INTERNAL) {
if (cntrl_flags & UPL_SET_LITE) {
return KERN_SUCCESS;
}
if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
- if ((*upl_size/page_size) > MAX_UPL_TRANSFER)
- *upl_size = MAX_UPL_TRANSFER * page_size;
+ if ((*upl_size/page_size) > MAX_UPL_SIZE)
+ *upl_size = MAX_UPL_SIZE * page_size;
}
/*
* Create an object if necessary.
else
prot = VM_PROT_READ | VM_PROT_WRITE;
- if (((size/page_size) > MAX_UPL_TRANSFER) && !object->phys_contiguous)
- size = MAX_UPL_TRANSFER * page_size;
+ if (((size/page_size) > MAX_UPL_SIZE) && !object->phys_contiguous)
+ size = MAX_UPL_SIZE * page_size;
if (cntrl_flags & UPL_SET_INTERNAL) {
if (page_list_count != NULL)
- *page_list_count = MAX_UPL_TRANSFER;
+ *page_list_count = MAX_UPL_SIZE;
}
if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
((page_list_count != NULL) && (*page_list_count != 0) && *page_list_count < (size/page_size)))
#include <mach/mach_types.h>
#include <vm/vm_page.h>
+#include <vm/vm_kern.h> /* kmem_alloc */
#include <vm/vm_purgeable_internal.h>
#include <sys/kdebug.h>
+#include <kern/sched_prim.h>
struct token {
token_cnt_t count;
token_idx_t next;
};
-struct token tokens[MAX_VOLATILE];
+struct token *tokens;
+token_idx_t token_q_max_cnt = 0;
+vm_size_t token_q_cur_size = 0;
token_idx_t token_free_idx = 0; /* head of free queue */
token_idx_t token_init_idx = 1; /* token 0 is reserved!! */
* token removed protect with
* page_queue_lock */
+static int token_q_allocating = 0; /* flag to singlethread allocator */
+
struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
#define TOKEN_ADD 0x40/* 0x100 */
#define OBJECT_ADDED 0x50/* 0x140 */
#define OBJECT_REMOVED 0x51/* 0x144 */
-static void vm_purgeable_q_advance(uint32_t num_pages, purgeable_q_t queue);
static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
#if MACH_ASSERT
token_idx_t token;
enum purgeable_q_type i;
- if (token_init_idx < MAX_VOLATILE) { /* lazy token array init */
- token = token_init_idx;
- token_init_idx++;
- } else if (token_free_idx) {
+find_available_token:
+
+ if (token_free_idx) { /* unused tokens available */
token = token_free_idx;
token_free_idx = tokens[token_free_idx].next;
- } else {
- return KERN_FAILURE;
+ } else if (token_init_idx < token_q_max_cnt) { /* lazy token array init */
+ token = token_init_idx;
+ token_init_idx++;
+ } else { /* allocate more memory */
+ /* Wait if another thread is inside the memory alloc section */
+ while(token_q_allocating) {
+ wait_result_t res = thread_sleep_mutex((event_t)&token_q_allocating,
+ &vm_page_queue_lock,
+ THREAD_UNINT);
+ if(res != THREAD_AWAKENED) return KERN_ABORTED;
+ };
+
+ /* Check whether memory is still maxed out */
+ if(token_init_idx < token_q_max_cnt)
+ goto find_available_token;
+
+ /* Still no memory. Allocate some. */
+ token_q_allocating = 1;
+
+ /* Drop page queue lock so we can allocate */
+ vm_page_unlock_queues();
+
+ struct token *new_loc;
+ vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
+ kern_return_t result;
+
+ if (token_q_cur_size) {
+ result=kmem_realloc(kernel_map, (vm_offset_t)tokens, token_q_cur_size,
+ (vm_offset_t*)&new_loc, alloc_size);
+ } else {
+ result=kmem_alloc(kernel_map, (vm_offset_t*)&new_loc, alloc_size);
+ }
+
+ vm_page_lock_queues();
+
+ if (result) {
+ /* Unblock waiting threads */
+ token_q_allocating = 0;
+ thread_wakeup((event_t)&token_q_allocating);
+ return result;
+ }
+
+ /* If we get here, we allocated new memory. Update pointers and
+ * dealloc old range */
+ struct token *old_tokens=tokens;
+ tokens=new_loc;
+ vm_size_t old_token_q_cur_size=token_q_cur_size;
+ token_q_cur_size=alloc_size;
+ token_q_max_cnt = token_q_cur_size / sizeof(struct token);
+ assert (token_init_idx < token_q_max_cnt); /* We must have a free token now */
+
+ if (old_token_q_cur_size) { /* clean up old mapping */
+ vm_page_unlock_queues();
+ /* kmem_realloc leaves the old region mapped. Get rid of it. */
+ kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
+ vm_page_lock_queues();
+ }
+
+ /* Unblock waiting threads */
+ token_q_allocating = 0;
+ thread_wakeup((event_t)&token_q_allocating);
+
+ goto find_available_token;
}
-
+
+ assert (token);
+
/*
* the new pagecount we got need to be applied to all queues except
* obsolete
void
-vm_purgeable_q_advance_all(uint32_t num_pages)
+vm_purgeable_q_advance_all()
{
/* check queue counters - if they get really large, scale them back.
* They tend to get that large when there is no purgeable queue action */
int i;
- if(token_new_pagecount > (INT32_MAX >> 1)) /* a system idling years might get there */
+ if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1)) /* a system idling years might get there */
{
for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
}
/*
- * don't need to advance obsolete queue - all items are ripe there,
+ * Decrement token counters. A token counter can be zero, this means the
+ * object is ripe to be purged. It is not purged immediately, because that
+ * could cause several objects to be purged even if purging one would satisfy
+ * the memory needs. Instead, the pageout thread purges one after the other
+ * by calling vm_purgeable_object_purge_one and then rechecking the memory
+ * balance.
+ *
+ * No need to advance obsolete queue - all items are ripe there,
* always
*/
- vm_purgeable_q_advance(num_pages, &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
- vm_purgeable_q_advance(num_pages, &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
-}
+ for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
+ purgeable_q_t queue = &purgeable_queues[i];
+ uint32_t num_pages = 1;
+
+ /* Iterate over tokens as long as there are unripe tokens. */
+ while (queue->token_q_unripe) {
+ if (tokens[queue->token_q_unripe].count && num_pages)
+ {
+ tokens[queue->token_q_unripe].count -= 1;
+ num_pages -= 1;
+ }
-/*
- * Decrements token counters. A token counter can be zero, this means the
- * object is ripe to be purged. It is not purged immediately, because that
- * could cause several objects to be purged even if purging one would satisfy
- * the memory needs. Instead, the pageout thread purges one after the other
- * by calling vm_purgeable_object_purge_one and then rechecking the memory
- * balance.
- */
-static void
-vm_purgeable_q_advance(uint32_t num_pages, purgeable_q_t queue)
-{
- /* Iterate over tokens as long as there are unripe tokens. */
- while (queue->token_q_unripe) {
- int min = (tokens[queue->token_q_unripe].count < num_pages) ?
- tokens[queue->token_q_unripe].count : num_pages;
- tokens[queue->token_q_unripe].count -= min;
- num_pages -= min;
-
- if (tokens[queue->token_q_unripe].count == 0) {
- queue->token_q_unripe = tokens[queue->token_q_unripe].next;
- available_for_purge++;
- KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_QUEUE_ADVANCE)),
- queue->type,
- tokens[queue->token_q_head].count, /* num pages on new
- * first token */
- 0,
- available_for_purge,
- 0);
- continue; /* One token ripened. Make sure to
- * check the next. */
+ if (tokens[queue->token_q_unripe].count == 0) {
+ queue->token_q_unripe = tokens[queue->token_q_unripe].next;
+ available_for_purge++;
+ KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_QUEUE_ADVANCE)),
+ queue->type,
+ tokens[queue->token_q_head].count, /* num pages on new
+ * first token */
+ 0,
+ available_for_purge,
+ 0);
+ continue; /* One token ripened. Make sure to
+ * check the next. */
+ }
+ if (num_pages == 0)
+ break; /* Current token not ripe and no more pages.
+ * Work done. */
}
- if (num_pages == 0)
- break; /* Current token not ripe and no more pages.
- * Work done. */
- }
- /*
- * if there are no unripe tokens in the queue, decrement the
- * new_pages counter instead new_pages can be negative, but must be
- * canceled out by token_new_pagecount -- since inactive queue as a
- * whole always contains a nonnegative number of pages
- */
- if (!queue->token_q_unripe) {
- queue->new_pages -= num_pages;
- assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
- }
+ /*
+ * if there are no unripe tokens in the queue, decrement the
+ * new_pages counter instead new_pages can be negative, but must be
+ * canceled out by token_new_pagecount -- since inactive queue as a
+ * whole always contains a nonnegative number of pages
+ */
+ if (!queue->token_q_unripe) {
+ queue->new_pages -= num_pages;
+ assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
+ }
#if MACH_ASSERT
- vm_purgeable_token_check_queue(queue);
+ vm_purgeable_token_check_queue(queue);
#endif
+ }
}
/*
#if (CONFIG_TOKEN_QUEUE_SMALL == 1)
typedef uint16_t token_idx_t;
typedef uint16_t token_cnt_t;
-#define MAX_VOLATILE 0x01000
#define TOKEN_COUNT_MAX UINT16_MAX
#else
typedef uint32_t token_idx_t;
typedef uint32_t token_cnt_t;
-#define MAX_VOLATILE 0x10000
#define TOKEN_COUNT_MAX UINT32_MAX
#endif
extern struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
extern int32_t token_new_pagecount;
+#define TOKEN_NEW_PAGECOUNT_MAX INT32_MAX
extern int available_for_purge;
* the token counters are protected by the vm_page_queue_lock, since they're
* mostly used in that context and we don't want to do a lot of extra locking
* the purgeable page queues are protected by a separate lock since they're
- * mostly user on a user context and we don't want any contention with the
+ * mostly used on a user context and we don't want any contention with the
* pageout daemon.
*/
-
decl_mutex_data(,vm_purgeable_queue_lock)
/* add a new token to queue. called by vm_object_purgeable_control */
void vm_purgeable_token_delete_first(purgeable_q_t queue);
/*
- * decrement token counters. the function will call the object purger if a
- * token expires.
+ * decrement token counters.
+ * enter with page queue locked
*/
-/* enter with page queue locked */
-void vm_purgeable_q_advance_all(uint32_t num_pages);
+void vm_purgeable_q_advance_all(void);
-/* the object purger. purges the specified number of objects from memory. */
+/* the object purger. purges the next eligible object from memory. */
void vm_purgeable_object_purge_one(void);
/* insert purgeable object into queue */
void vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group);
-/* Look for page belonging to object. If found, put on inactive queue. */
+/* look for object. If found, remove from purgeable queue. */
purgeable_q_t vm_purgeable_object_remove(vm_object_t object);
#endif /* __VM_PURGEABLE_INTERNAL__ */
percent_avail =
(vm_page_active_count + vm_page_inactive_count +
vm_page_speculative_count + vm_page_free_count +
- vm_page_purgeable_count ) * 100 /
+ (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
atop_64(max_mem);
if (percent_avail <= (kern_memorystatus_level - 5)) {
kern_memorystatus_level = percent_avail;
percent_avail =
(vm_page_active_count + vm_page_inactive_count +
vm_page_speculative_count + vm_page_free_count +
- vm_page_purgeable_count ) * 100 /
+ (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
atop_64(max_mem);
if (percent_avail >= (kern_memorystatus_level + 5)) {
kern_memorystatus_level = percent_avail;
percent_avail =
(vm_page_active_count + vm_page_inactive_count +
vm_page_speculative_count + vm_page_free_count +
- vm_page_purgeable_count ) * 100 /
+ (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
atop_64(max_mem);
if (percent_avail >= (kern_memorystatus_level + 5)) {
kern_memorystatus_level = percent_avail;
if (!IP_VALID(memory_manager_default) &&
mem->dirty && mem->object->internal &&
(mem->object->purgable == VM_PURGABLE_DENY ||
- mem->object->purgable == VM_PURGABLE_NONVOLATILE)) {
+ mem->object->purgable == VM_PURGABLE_NONVOLATILE ||
+ mem->object->purgable == VM_PURGABLE_VOLATILE)) {
queue_enter(&vm_page_queue_throttled, mem, vm_page_t, pageq);
vm_page_throttled_count++;
mem->throttled = TRUE;
if (!IP_VALID(memory_manager_default) &&
m->dirty && m->object->internal &&
(m->object->purgable == VM_PURGABLE_DENY ||
- m->object->purgable == VM_PURGABLE_NONVOLATILE)) {
+ m->object->purgable == VM_PURGABLE_NONVOLATILE ||
+ m->object->purgable == VM_PURGABLE_VOLATILE )) {
queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
m->throttled = TRUE;
vm_page_throttled_count++;
if (!IP_VALID(memory_manager_default) &&
!m->fictitious && m->dirty && m->object->internal &&
(m->object->purgable == VM_PURGABLE_DENY ||
- m->object->purgable == VM_PURGABLE_NONVOLATILE)) {
+ m->object->purgable == VM_PURGABLE_NONVOLATILE ||
+ m->object->purgable == VM_PURGABLE_VOLATILE )) {
queue_enter(&vm_page_queue_throttled, m, vm_page_t, pageq);
m->throttled = TRUE;
vm_page_throttled_count++;
percent_avail =
(vm_page_active_count + vm_page_inactive_count +
vm_page_speculative_count + vm_page_free_count +
- vm_page_purgeable_count ) * 100 /
+ (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
atop_64(max_mem);
if (percent_avail <= (kern_memorystatus_level - 5)) {
kern_memorystatus_level = percent_avail;
extern int argstrcpy(char *from, char *to);
extern int getval(char *s, int *val);
-int argstrcpy2(char *from,char *to, unsigned maxlen);
+static int argstrcpy2(char *from,char *to, unsigned maxlen);
#define NUM 0
#define STR 1
return(i);
}
-int
+static int
argstrcpy2(
char *from,
char *to,
/* unrecognized type */
return -1;
}
-
#include <sys/types.h>
#include <sys/ucred.h>
#include <sys/uio.h>
+#include "tests.h"
+
+#if !TARGET_OS_EMBEDDED
#include <XILog/XILog.h>
+#endif
-#include "tests.h"
/* our table of tests to run */
struct stat my_stat_buf;
char my_buffer[64];
/* vars for XILog */
+#if !TARGET_OS_EMBEDDED
XILogRef logRef;
char *logPath = "";
char *config = NULL;
int echo = 0;
int xml = 0;
-
+#endif
sranddev( ); /* set up seed for our random name generator */
g_cmd_namep = argv[0];
g_skip_setuid_tests = 1;
continue;
}
+#if !TARGET_OS_EMBEDDED
if ( strcmp( argv[i], "-x" ) == 0 ||
strcmp( argv[i], "-xilog" ) == 0 ) {
g_xilog_active = 1;
continue;
}
-
+#endif
printf( "invalid argument \"%s\" \n", argv[i] );
usage( );
}
list_all_tests( );
return 0;
}
-
+#if !TARGET_OS_EMBEDDED
if (g_xilog_active == 1) {
logRef = XILogOpenLogExtended( logPath, "xnu_quick_test", "com.apple.coreos",
config, xml, echo, NULL, "ResultOwner",
exit(-1);
}
}
+#endif
/* build a test target directory that we use as our path to create any test
* files and directories.
my_testp = &g_tests[i];
if ( my_testp->test_run_it == 0 || my_testp->test_routine == NULL )
continue;
+#if !TARGET_OS_EMBEDDED
if (g_xilog_active == 1) {
XILogBeginTestCase( logRef, my_testp->test_infop, my_testp->test_infop );
XILogMsg( "test #%d - %s \n", (i + 1), my_testp->test_infop );
}
+#endif
printf( "test #%d - %s \n", (i + 1), my_testp->test_infop );
my_err = my_testp->test_routine( my_testp->test_input );
if ( my_err != 0 ) {
printf("\t--> FAILED \n");
+#if !TARGET_OS_EMBEDDED
if (g_xilog_active == 1) {
XILogMsg("SysCall %s failed", my_testp->test_infop);
XILogErr("Result %d", my_err);
}
+#endif
my_failures++;
if ( my_failures > g_max_failures ) {
+#if !TARGET_OS_EMBEDDED
if (g_xilog_active == 1) {
XILogEndTestCase( logRef, kXILogTestPassOnErrorLevel );
}
+#endif
printf( "\n too many failures - test aborted \n" );
goto exit_this_routine;
}
}
+#if !TARGET_OS_EMBEDDED
if (g_xilog_active == 1) {
XILogEndTestCase(logRef, kXILogTestPassOnErrorLevel);
}
+#endif
}
exit_this_routine:
/* clean up our test directory */
rmdir( &g_target_path[0] );
+#if !TARGET_OS_EMBEDDED
if (g_xilog_active == 1) {
XILogCloseLog(logRef);
}
+#endif
return 0;
} /* main */
printf( "\t -r[un] 1, 3, 10 - 19 # run specific tests. enter individual test numbers and/or range of numbers. use -list to list tests. \n" );
printf( "\t -s[kip] # skip setuid tests \n" );
printf( "\t -t[arget] TARGET_PATH # path to directory where tool will create test files. defaults to \"/tmp/\" \n" );
+#if !TARGET_OS_EMBEDDED
printf( "\t -x[ilog] # use XILog\n");
+#endif
printf( "\nexamples: \n" );
printf( "--- Place all test files and directories at the root of volume \"test_vol\" --- \n" );
printf( "%s -t /Volumes/test_vol/ \n", (my_ptr != NULL) ? my_ptr : g_cmd_namep );
+Product=$(shell tconf --product)
+Embedded=$(shell tconf --test TARGET_OS_EMBEDDED)
+
+ifeq "$(Embedded)" "YES"
+XILogFLAG =
+else
+XILogFLAG = -framework XILog
+endif
+
+CC=gcc $(SYSROOT)
+
ifdef RC_BUILDIT
DOING_BUILDIT=yes
endif
ifndef ARCH
ARCH=i386 x86_64 ppc ppc64
+ # this hack should be removed once tconf gets
+ # <rdar://problem/5667139>
+ ifeq "$(Product)" "iPhone"
+ ARCH=armv6
+ endif
+ ifeq "$(Product)" "AppleTV"
+ ARCH=i386
+ endif
endif
ifdef ARCH
endif
CFLAGS += -g -I /System/Library/Frameworks/System.framework/Versions/B/PrivateHeaders/ -F/AppleInternal/Library/Frameworks/ $(MORECFLAGS)
-LIBFLAGS = -I /System/Library/Frameworks/System.framework/Versions/B/PrivateHeaders/ -F/AppleInternal/Library/Frameworks/ -framework XILog
+LIBFLAGS = -I /System/Library/Frameworks/System.framework/Versions/B/PrivateHeaders -F/AppleInternal/Library/Frameworks/ $(XILogFLAG)
+
#CFLAGS+= $(MY_ARCH) -g -D_POSIX_C_SOURCE=200112L
MY_OBJECTS = $(OBJROOT)/main.o $(OBJROOT)/memory_tests.o $(OBJROOT)/misc.o \
xnu_quick_test : $(OBJROOT) $(DSTROOT) $(MY_OBJECTS) helpers
sudo rm -rf $(DSTROOT)/xnu_quick_test
- cc $(MY_ARCH) $(LIBFLAGS) -o $(DSTROOT)/xnu_quick_test $(MY_OBJECTS)
+ $(CC) $(MY_ARCH) $(LIBFLAGS) -o $(DSTROOT)/xnu_quick_test $(MY_OBJECTS)
sudo chown root $(DSTROOT)/xnu_quick_test
sudo chmod 4755 $(DSTROOT)/xnu_quick_test
# helper processes for the 64-bit version of xnu_quick_test to test the conversion
# from a 32-bit process to a 64-bit process.
helpers : helpers/sleep.c helpers/launch.c helpers/arch.c helperdir $(OBJROOT)/misc.o
- gcc -arch ppc helpers/sleep.c -o $(DSTROOT)/helpers/sleep-ppc32
- gcc -arch i386 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-i386
- gcc -arch x86_64 -pagezero_size 0x100000000 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-x86_64-4G
- gcc -arch x86_64 -pagezero_size 0x1000 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-x86_64-4K
- gcc -arch ppc64 -pagezero_size 0x100000000 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-ppc64-4G
- gcc -arch ppc64 -pagezero_size 0x1000 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-ppc64-4K
- gcc $(LIBFLAGS) -arch i386 $(OBJROOT)/misc.o helpers/launch.c -o $(DSTROOT)/helpers/launch-i386
- gcc $(LIBFLAGS) -arch x86_64 $(OBJROOT)/misc.o helpers/launch.c -o $(DSTROOT)/helpers/launch-x86_64
- gcc $(LIBFLAGS) -arch ppc $(OBJROOT)/misc.o helpers/launch.c -o $(DSTROOT)/helpers/launch-ppc
- gcc $(LIBFLAGS) -arch ppc64 $(OBJROOT)/misc.o helpers/launch.c -o $(DSTROOT)/helpers/launch-ppc64
- gcc -arch ppc -arch ppc64 -arch i386 -arch x86_64 helpers/arch.c -o $(DSTROOT)/helpers/arch
+ifneq "$(Product)" "iPhone"
+ $(CC) -arch i386 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-i386
+endif
+ifeq "$(Product)" "MacOSX"
+ $(CC) -arch x86_64 -pagezero_size 0x100000000 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-x86_64-4G
+ $(CC) -arch x86_64 -pagezero_size 0x1000 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-x86_64-4K
+ $(CC) -arch ppc helpers/sleep.c -o $(DSTROOT)/helpers/sleep-ppc32
+ $(CC) -arch ppc64 -pagezero_size 0x100000000 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-ppc64-4G
+ $(CC) -arch ppc64 -pagezero_size 0x1000 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-ppc64-4K
+endif
+ifneq "$(Product)" "iPhone"
+ $(CC) $(LIBFLAGS) -arch i386 $(OBJROOT)/misc.o helpers/launch.c -o $(DSTROOT)/helpers/launch-i386
+endif
+ifeq "$(Product)" "MacOS"
+ $(CC) $(LIBFLAGS) -arch x86_64 $(OBJROOT)/misc.o helpers/launch.c -o $(DSTROOT)/helpers/launch-x86_64
+ $(CC) $(LIBFLAGS) -arch ppc $(OBJROOT)/misc.o helpers/launch.c -o $(DSTROOT)/helpers/launch-ppc
+ $(CC) $(LIBFLAGS) -arch ppc64 $(OBJROOT)/misc.o helpers/launch.c -o $(DSTROOT)/helpers/launch-ppc64
+ $(CC) $(MY_ARCH) helpers/arch.c -o $(DSTROOT)/helpers/arch
+endif
+ifeq "$(Product)" "iPhone"
+ $(CC) -arch armv6 helpers/sleep.c -o $(DSTROOT)/helpers/sleep-arm
+ $(CC) $(LIBFLAGS) -arch armv6 $(OBJROOT)/misc.o helpers/launch.c -o $(DSTROOT)/helpers/launch-arm
+endif
+
helperdir :
mkdir -p $(DSTROOT)/helpers
mkdir -p $(DSTROOT);
INCLUDES = /Developer/SDKs/Purple/System/Library/Frameworks/System.framework/Versions/B/PrivateHeaders/
-
$(OBJROOT)/main.o : main.c tests.h
- cc $(CFLAGS) -c main.c -o $@
+ $(CC) $(CFLAGS) -c main.c -o $@
$(OBJROOT)/memory_tests.o : memory_tests.c tests.h
- cc $(CFLAGS) -c memory_tests.c -o $@
+ $(CC) $(CFLAGS) -c memory_tests.c -o $@
# misc.o has to be built 4-way for the helpers to link
$(OBJROOT)/misc.o : misc.c tests.h
- cc -arch i386 -arch x86_64 -arch ppc -arch ppc64 $(CFLAGS) -c misc.c -o $@
+ifeq "$(Product)" "iPhone"
+ $(CC) -arch armv6 $(CFLAGS) -c misc.c -o $@
+else
+ $(CC) -arch i386 -arch x86_64 -arch ppc -arch ppc64 $(CFLAGS) -c misc.c -o $@
+endif
$(OBJROOT)/sema_tests.o : sema_tests.c tests.h
- cc $(CFLAGS) -c sema_tests.c -o $@
+ $(CC) $(CFLAGS) -c sema_tests.c -o $@
$(OBJROOT)/shared_memory_tests.o : shared_memory_tests.c tests.h
- cc $(CFLAGS) -c shared_memory_tests.c -o $@
+ $(CC) $(CFLAGS) -c shared_memory_tests.c -o $@
$(OBJROOT)/socket_tests.o : socket_tests.c tests.h
- cc $(CFLAGS) -c socket_tests.c -o $@
+ $(CC) $(CFLAGS) -c socket_tests.c -o $@
$(OBJROOT)/tests.o : tests.c tests.h
- cc $(CFLAGS) -c tests.c -o $@
+ $(CC) $(CFLAGS) -c tests.c -o $@
$(OBJROOT)/xattr_tests.o : xattr_tests.c tests.h
- cc $(CFLAGS) -c xattr_tests.c -o $@
+ $(CC) $(CFLAGS) -c xattr_tests.c -o $@
ifndef DOING_BUILDIT
sudo rm -f $(DSTROOT)/helpers/*
rm -f $(OBJROOT)/*.o
endif
-
posix_spawnattr_t attr;
char * args[] = {"helpers/arch", NULL};
-
+
my_err = posix_spawnattr_init(&attr);
if (my_err != 0) {
printf("posix_spawnattr_init failed\n");
case CPU_TYPE_POWERPC64:
rval = POWERPC;
break;
+ case CPU_TYPE_ARM:
+ rval = ARM;
+ break;
}
finished:
*/
int sema_tests( void * the_argp )
{
+#if !TARGET_OS_EMBEDDED
int my_err, i;
int my_sem_id = -1;
union semun my_sem_union;
semctl( my_sem_id, 0, IPC_RMID, my_sem_union );
}
return( my_err );
+#else
+ printf( "\t--> Not supported on EMBEDDED TARGET\n" );
+ return 0;
+#endif
}
* **************************************************************************************************************
*/
int shm_tests( void * the_argp )
-{
+{
+#if !TARGET_OS_EMBEDDED
int my_err;
int my_shm_id;
void * my_shm_addr = NULL;
shmdt( my_shm_addr );
}
return( my_err );
+#else
+ printf( "\t--> Not supported on EMBEDDED TARGET\n" );
+ return 0;
+#endif
}
#include <AvailabilityMacros.h> /* for determination of Mac OS X version (tiger, leopard, etc.) */
#include <libkern/OSByteOrder.h> /* for OSSwap32() */
+
extern char g_target_path[ PATH_MAX ];
extern int g_skip_setuid_tests;
extern int g_is_under_rosetta;
goto test_failed_exit;
}
else if ( my_err == -1 ) {
+ int tmp = 0;
+ tmp = getuid( );
+
/* special case when running as root - we get back EPERM when running as root */
my_err = errno;
- if ( (getuid( ) == 0 && my_err != EPERM) || (getuid( ) != 0 && my_err != EACCES) ) {
+#if !TARGET_OS_EMBEDDED
+ if ( ( tmp == 0 && my_err != EPERM) || (tmp != 0 && my_err != EACCES) ) {
+ printf( "access failed with errno %d - %s. \n", my_err, strerror( my_err ) );
+ goto test_failed_exit;
+ }
+#else
+ if ( ( tmp == 0 && my_err != EACCES) || (tmp != 0 && my_err != EACCES) ) {
printf( "access failed with errno %d - %s. \n", my_err, strerror( my_err ) );
goto test_failed_exit;
}
+#endif
}
/* verify correct modes are set */
*/
int chown_fchown_lchown_lstat_symlink_test( void * the_argp )
{
+#if !TARGET_OS_EMBEDDED
int my_err, my_group_count, i;
int my_fd = -1;
char * my_pathp = NULL;
/* set up by getting a list of groups */
my_group_count = getgroups( NGROUPS_MAX, &my_groups[0] );
+ printf("my_group_count: %d\n", my_group_count);
+
if ( my_group_count == -1 || my_group_count < 1 ) {
printf( "getgroups call failed. got errno %d - %s. \n", errno, strerror( errno ) );
goto test_failed_exit;
/* now change group owner to something other than current value */
my_orig_gid = my_sb.st_gid;
my_orig_uid = my_sb.st_uid;
+
+ printf( "st_gid: %d, st_uid: %d, my_group_count: %d\n" );
+
for ( i = 0; i < my_group_count; i++ ) {
if ( my_orig_gid != my_groups[ i ] ) {
if ( my_new_gid1 == 0 ) {
free( my_link_pathp );
}
return( my_err );
+#else
+ printf( "\t--> Test not designed for EMBEDDED TARGET\n" );
+ return 0;
+#endif
}
/* **************************************************************************************************************
printf( "statfs and getattrlist results do not match for volume block size \n" );
goto test_failed_exit;
}
-
+
my_err = 0;
goto test_passed_exit;
if ( my_buffer64p != NULL ) {
free( my_buffer64p );
}
+
return( my_err );
}
char * errmsg = NULL;
char * argvs[2] = {"", NULL};
int bits = get_bits(); /* Gets actual processor bit-ness. */
-
-
+
if (bits != 32 && bits != 64) {
printf("Determination of processor bit-ness failed, get_bits() returned %d.\n", get_bits());
return(-1);
errmsg = "get_architecture() could not determine the CPU architecture.\n";
goto test_failed_exit;
}
+
if (get_architecture() == INTEL) {
if (bits == 64 && sizeof(long) == 8) {
/*
goto test_failed_exit;
}
}
+ else if(get_architecture() == ARM) {
+ if (bits == 32) {
+
+ /* Running on arm hardware. Check cases 2. */
+ errmsg = "execve failed: from arm forking and exec()ing 32-bit arm process.\n";
+ argvs[0] = "sleep-arm";
+ if (do_execve_test("helpers/sleep-arm", argvs, NULL, 1))
+ goto test_failed_exit;
+
+ /* Test posix_spawn for arm (should succeed) */
+ errmsg = NULL;
+ if (do_spawn_test(CPU_TYPE_ARM, 0))
+ goto test_failed_exit;
+ }
+ }
else {
/* Just in case someone decides we need more architectures in the future */
printf("get_architecture() returned unknown architecture");
*/
int groups_test( void * the_argp )
{
+#if !TARGET_OS_EMBEDDED
int my_err, i;
int my_group_count, my_orig_group_count;
gid_t my_real_gid;
/* start by getting list of groups the current user belongs to */
my_orig_group_count = getgroups( NGROUPS_MAX, &my_groups[0] );
+ printf("my_orig_group_count: %d\n", my_orig_group_count);
+
if ( my_orig_group_count == -1 || my_orig_group_count < 1 ) {
printf( "getgroups call failed. got errno %d - %s. \n", errno, strerror( errno ) );
goto test_failed_exit;
}
my_group_count = getgroups( NGROUPS_MAX, &my_groups[0] );
+ printf("my_group_count: %d\n", my_group_count);
+
if ( my_group_count == -1 || my_group_count < 1 ) {
printf( "getgroups call failed. got errno %d - %s. \n", errno, strerror( errno ) );
goto test_failed_exit;
test_passed_exit:
return( my_err );
+#else
+ printf( "\t--> Test not designed for EMBEDDED TARGET\n" );
+ return 0;
+#endif
}
my_pid = getpid( );
my_process_group = getpgrp( );
-
/* test getpgrp and getpgid - they should return the same results when 0 is passed to getpgid */
if ( my_process_group != getpgid( 0 ) ) {
printf( "getpgrp and getpgid did not return the same process group ID \n" );
+ printf( "getpgid: %d, my_process_group: %d\n", getpgid( 0 ), my_process_group );
goto test_failed_exit;
}
*/
int quotactl_test( void * the_argp )
{
+#if !TARGET_OS_EMBEDDED
int my_err;
int is_quotas_on = 0;
struct dqblk my_quota_blk;
test_passed_exit:
return( my_err );
+#else
+ printf( "\t--> Not supported on EMBEDDED TARGET\n" );
+ return 0;
+#endif
}
/* **************************************************************************************************************
my_attrlist.bitmapcount = ATTR_BIT_MAP_COUNT;
my_attrlist.commonattr = (ATTR_CMN_OBJTYPE | ATTR_CMN_OBJID | ATTR_CMN_BKUPTIME);
my_err = getattrlist( my_pathp, &my_attrlist, &my_attr_buf[0], sizeof(my_attr_buf[0]), 0 );
+
if ( my_err != 0 ) {
if ( errno == ENOTSUP && is_ufs ) {
/* getattr calls not supported on ufs */
goto test_passed_exit;
test_failed_exit:
- my_err = -1;
+ if(my_err != 0)
+ my_err = -1;
test_passed_exit:
if ( my_fd != -1 )
/* create a test file */
my_err = create_random_name( my_file1_pathp, 1 );
if ( my_err != 0 ) {
+ printf( "create_random_name my_err: %d\n", my_err );
goto test_failed_exit;
}
my_fd1 = open( my_file1_pathp, O_RDWR, 0 );
/* create a test file */
my_err = create_random_name( my_file2_pathp, 1 );
if ( my_err != 0 ) {
+ printf( "create_random_name my_err: %d\n", my_err );
goto test_failed_exit;
}
my_fd2 = open( my_file2_pathp, O_RDWR, 0 );
*/
int aio_tests( void * the_argp )
{
+#if !TARGET_OS_EMBEDDED
int my_err, i;
char * my_pathp;
struct aiocb * my_aiocbp;
}
}
return( my_err );
+#else
+ printf( "\t--> Not supported on EMBEDDED TARGET\n" );
+ return 0;
+#endif
}
*/
int message_queue_tests( void * the_argp )
{
+#if !TARGET_OS_EMBEDDED
int my_err;
int my_msg_queue_id = -1;
ssize_t my_result;
msgctl( my_msg_queue_id, IPC_RMID, NULL );
}
return( my_err );
+#else
+ printf( "\t--> Not supported on EMBEDDED TARGET \n" );
+ return 0;
+#endif
}
#include <sys/ucred.h>
#include <sys/vnode.h>
#include <sys/wait.h>
-
+#include <TargetConditionals.h> /* for TARGET_OS_EMBEDDED */
+
#define MY_BUFFER_SIZE (1024 * 10)
+#define ARM 100 /* I am not sure if the value really matters? */
#define POWERPC 238947
#define INTEL 38947 /*
* Random values used by execve tests to