goto ErrorExit;
};
- /* do we have permission to change the dates? */
-// if (alist->commonattr & (ATTR_CMN_CRTIME | ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME | ATTR_CMN_BKUPTIME)) {
- if (alist->commonattr & (ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME)) {
- if ((error = hfs_owner_rights(vp, cred, p, true)) != 0) {
- goto ErrorExit;
- };
- };
+ /*
+ * If we are going to change the times:
+ * 1. do we have permission to change the dates?
+ * 2. Is there another fork? If so then clear any flags associated with the times
+ */
+ if (alist->commonattr & (ATTR_CMN_MODTIME | ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME)) {
+ if (alist->commonattr & (ATTR_CMN_CHGTIME | ATTR_CMN_ACCTIME)) {
+ if ((error = hfs_owner_rights(vp, cred, p, true)) != 0)
+ goto ErrorExit;
+ }
+
+ /* If there is another fork, clear the flags */
+ if ((hp->h_meta->h_usecount > 1) && (H_FORKTYPE(hp) == kDataFork)) {
+ struct vnode *sib_vp = NULL;
+ struct hfsnode *nhp;
+
+ /* Loop through all siblings, skipping ourselves */
+ simple_lock(&hp->h_meta->h_siblinglock);
+ CIRCLEQ_FOREACH(nhp, &hp->h_meta->h_siblinghead, h_sibling) {
+ if (nhp == hp) /* skip ourselves */
+ continue;
+ sib_vp = HTOV(nhp);
+ }
+ simple_unlock(&hp->h_meta->h_siblinglock);
+
+ /* The only error that vget returns is when the vnode is going away, so ignore the vnode */
+ if (sib_vp && vget(sib_vp, LK_EXCLUSIVE | LK_RETRY, p) == 0) {
+ if (VTOH(sib_vp)->h_nodeflags & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) {
+ if (alist->commonattr & ATTR_CMN_MODTIME)
+ VTOH(sib_vp)->h_nodeflags &= ~IN_UPDATE;
+ if (alist->commonattr & ATTR_CMN_CHGTIME)
+ VTOH(sib_vp)->h_nodeflags &= ~IN_CHANGE;
+ if (alist->commonattr & ATTR_CMN_ACCTIME)
+ VTOH(sib_vp)->h_nodeflags &= ~IN_ACCESS;
+ }
+ vput(sib_vp);
+ } /* vget() */
+ } /* h_use_count > 1 */
+ }
/* save these in case hfs_chown() or hfs_chmod() fail */
saved_uid = hp->h_meta->h_uid;
if ((error = hfs_chflags(vp, flags, cred, p)))
goto ErrorExit;
};
-
+
+
+ /* Update Catalog Tree */
if (alist->volattr == 0) {
error = MacToVFSError( UpdateCatalogNode(HTOVCB(hp), pid, filename, H_HINT(hp), &catInfo.nodeData));
}
- if (alist->volattr & ATTR_VOL_NAME) {
- ExtendedVCB *vcb = VTOVCB(vp);
- int namelen = strlen(vcb->vcbVN);
-
+ /* Volume Rename */
+ if (alist->volattr & ATTR_VOL_NAME) {
+ ExtendedVCB *vcb = VTOVCB(vp);
+ int namelen = strlen(vcb->vcbVN);
+
if (vcb->vcbVN[0] == 0) {
/*
- Ignore attempts to rename a volume to a zero-length name:
- restore the original name from the metadata.
+ * Ignore attempts to rename a volume to a zero-length name:
+ * restore the original name from the metadata.
*/
copystr(H_NAME(hp), vcb->vcbVN, sizeof(vcb->vcbVN), NULL);
} else {
- error = MoveRenameCatalogNode(vcb, kRootParID, H_NAME(hp), H_HINT(hp), kRootParID, vcb->vcbVN, &H_HINT(hp));
+ error = MoveRenameCatalogNode(vcb, kRootParID, H_NAME(hp), H_HINT(hp),
+ kRootParID, vcb->vcbVN, &H_HINT(hp));
if (error) {
- VCB_LOCK(vcb);
- copystr(H_NAME(hp), vcb->vcbVN, sizeof(vcb->vcbVN), NULL); /* Restore the old name in the VCB */
- vcb->vcbFlags |= 0xFF00; // Mark the VCB dirty
- VCB_UNLOCK(vcb);
- goto ErrorExit;
+ VCB_LOCK(vcb);
+ copystr(H_NAME(hp), vcb->vcbVN, sizeof(vcb->vcbVN), NULL); /* Restore the old name in the VCB */
+ vcb->vcbFlags |= 0xFF00; // Mark the VCB dirty
+ VCB_UNLOCK(vcb);
+ goto ErrorExit;
};
-
+
hfs_set_metaname(vcb->vcbVN, hp->h_meta, HTOHFS(hp));
hp->h_nodeflags |= IN_CHANGE;
-#if 0
- /* if hfs wrapper exists, update its name too */
- if (vcb->vcbSigWord == kHFSPlusSigWord && vcb->vcbAlBlSt != 0) {
- HFSMasterDirectoryBlock *mdb;
- struct buf *bp = NULL;
- int size = kMDBSize; /* 512 */
- int volnamelen = MIN(sizeof(Str27), namelen);
-
- if ( bread(VTOHFS(vp)->hfs_devvp, IOBLKNOFORBLK(kMasterDirectoryBlock, size),
- IOBYTECCNTFORBLK(kMasterDirectoryBlock, kMDBSize, size), NOCRED, &bp) == 0) {
-
- mdb = (HFSMasterDirectoryBlock *)((char *)bp->b_data + IOBYTEOFFSETFORBLK(kMasterDirectoryBlock, size));
- if (SWAP_BE16 (mdb->drSigWord) == kHFSSigWord) {
- /* Convert the string to MacRoman, ignoring any errors, */
- (void) utf8_to_hfs(vcb, volnamelen, vcb->vcbVN, Str31 mdb->drVN)
- bawrite(bp);
- bp = NULL;
- }
- }
-
- if (bp) brelse(bp);
- }
-#endif
- }; /* vcb->vcbVN[0] == 0 ... else ... */
- }; /* alist->volattr & ATTR_VOL_NAME */
+ } /* vcb->vcbVN[0] == 0 ... else ... */
+ } /* alist->volattr & ATTR_VOL_NAME */
ErrorExit:
/* unlock catalog b-tree */
switch (which) {
case FREAD:
+ so->so_rcv.sb_sel.si_flags |= SI_SBSEL;
if (soreadable(so)) {
splx(s);
retnum = 1;
+ so->so_rcv.sb_sel.si_flags &= ~SI_SBSEL;
goto done;
}
selrecord(p, &so->so_rcv.sb_sel);
- so->so_rcv.sb_flags |= SB_SEL;
break;
case FWRITE:
+ so->so_snd.sb_sel.si_flags |= SI_SBSEL;
if (sowriteable(so)) {
splx(s);
retnum = 1;
+ so->so_snd.sb_sel.si_flags &= ~SI_SBSEL;
goto done;
}
selrecord(p, &so->so_snd.sb_sel);
- so->so_snd.sb_flags |= SB_SEL;
break;
case 0:
+ so->so_rcv.sb_sel.si_flags |= SI_SBSEL;
if (so->so_oobmark || (so->so_state & SS_RCVATMARK)) {
splx(s);
retnum = 1;
+ so->so_rcv.sb_sel.si_flags &= ~SI_SBSEL;
goto done;
}
selrecord(p, &so->so_rcv.sb_sel);
- so->so_rcv.sb_flags |= SB_SEL;
break;
}
splx(s);
splx(s);
return (error);
}
- if (so->so_comp.tqh_first == NULL)
+ if (TAILQ_EMPTY(&so->so_comp))
so->so_options |= SO_ACCEPTCONN;
if (backlog < 0 || backlog > somaxconn)
backlog = somaxconn;
if (so->so_pcb || (so->so_state & SS_NOFDREF) == 0)
return;
- if (head != NULL) {
- if (so->so_state & SS_INCOMP) {
- TAILQ_REMOVE(&head->so_incomp, so, so_list);
- head->so_incqlen--;
- } else if (so->so_state & SS_COMP) {
- TAILQ_REMOVE(&head->so_comp, so, so_list);
- } else {
- panic("sofree: not queued");
- }
+ if (head != NULL) {
+ if (so->so_state & SS_INCOMP) {
+ TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ head->so_incqlen--;
+ } else if (so->so_state & SS_COMP) {
+ /*
+ * We must not decommission a socket that's
+ * on the accept(2) queue. If we do, then
+ * accept(2) may hang after select(2) indicated
+ * that the listening socket was ready.
+ */
+ return;
+ } else {
+ panic("sofree: not queued");
+ }
head->so_qlen--;
so->so_state &= ~(SS_INCOMP|SS_COMP);
so->so_head = NULL;
if (so->so_options & SO_ACCEPTCONN) {
struct socket *sp, *sonext;
- for (sp = so->so_incomp.tqh_first; sp != NULL; sp = sonext) {
- sonext = sp->so_list.tqe_next;
- (void) soabort(sp);
- }
- for (sp = so->so_comp.tqh_first; sp != NULL; sp = sonext) {
- sonext = sp->so_list.tqe_next;
- (void) soabort(sp);
- }
+ sp = TAILQ_FIRST(&so->so_incomp);
+ for (; sp != NULL; sp = sonext) {
+ sonext = TAILQ_NEXT(sp, so_list);
+ (void) soabort(sp);
+ }
+ for (sp = TAILQ_FIRST(&so->so_comp); sp != NULL; sp = sonext) {
+ sonext = TAILQ_NEXT(sp, so_list);
+ /* Dequeue from so_comp since sofree() won't do it */
+ TAILQ_REMOVE(&so->so_comp, sp, so_list);
+ so->so_qlen--;
+ sp->so_state &= ~SS_COMP;
+ sp->so_head = NULL;
+ (void) soabort(sp);
+ }
+
}
if (so->so_pcb == 0)
goto discard;
error = error2;
}
discard:
- if (so->so_state & SS_NOFDREF)
+ if (so->so_pcb && so->so_state & SS_NOFDREF)
panic("soclose: NOFDREF");
so->so_state |= SS_NOFDREF;
so->so_proto->pr_domain->dom_refs--;
if (revents == 0) {
if (events & (POLLIN | POLLPRI | POLLRDNORM | POLLRDBAND)) {
selrecord(p, &so->so_rcv.sb_sel);
- so->so_rcv.sb_flags |= SB_SEL;
+ so->so_rcv.sb_sel.si_flags |= SI_SBSEL;
}
if (events & (POLLOUT | POLLWRNORM)) {
selrecord(p, &so->so_snd.sb_sel);
- so->so_snd.sb_flags |= SB_SEL;
+ so->so_snd.sb_sel.si_flags |= SI_SBSEL;
}
}
* called during processing of connect() call,
* resulting in an eventual call to soisconnected() if/when the
* connection is established. When the connection is torn down
- * soisdisconnecting() is called during processing of disconnect() call,
+ * soisdisconnecting() is called during processing of disconnect() call,
* and soisdisconnected() is called when the connection to the peer
* is totally severed. The semantics of these routines are such that
* connectionless protocols can call soisconnected() and soisdisconnected()
* takes no time.
*
* From the passive side, a socket is created with
- * two queues of sockets: so_q0 for connections in progress
- * and so_q for connections already made and awaiting user acceptance.
- * As a protocol is preparing incoming connections, it creates a socket
- * structure queued on so_q0 by calling sonewconn(). When the connection
+ * two queues of sockets: so_incomp for connections in progress
+ * and so_comp for connections already made and awaiting user acceptance.
+ * As a protocol is preparing incoming connections, it creates a socket
+ * structure queued on so_incomp by calling sonewconn(). When the connection
* is established, soisconnected() is called, and transfers the
- * socket structure to so_q, making it available to accept().
- *
- * If a socket is closed with sockets on either
- * so_q0 or so_q, these sockets are dropped.
+ * socket structure to so_comp, making it available to accept().
*
+ * If a socket is closed with sockets on either
+ * so_incomp or so_comp, these sockets are dropped.
+ *
* If higher level protocols are implemented in
* the kernel, the wakeups done here will sometimes
* cause software-interrupt process scheduling.
struct proc *p = current_proc();
- sb->sb_flags &= ~SB_SEL;
thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
+ sb->sb_sel.si_flags &= ~SI_SBSEL;
selwakeup(&sb->sb_sel);
- thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
+ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
if (sb->sb_flags & SB_WAIT) {
sb->sb_flags &= ~SB_WAIT;
splx(s);
return (EWOULDBLOCK);
}
- while (head->so_comp.tqh_first == NULL && head->so_error == 0) {
+ while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
if (head->so_state & SS_CANTRCVMORE) {
head->so_error = ECONNABORTED;
break;
* block allowing another process to accept the connection
* instead.
*/
- so = head->so_comp.tqh_first;
+ so = TAILQ_FIRST(&head->so_comp);
TAILQ_REMOVE(&head->so_comp, so, so_list);
head->so_qlen--;
so->so_upcall = aurp_wakeup;
so->so_upcallarg = (caddr_t)AE_UDPIP; /* Yuck */
so->so_state |= SS_NBIO;
- so->so_rcv.sb_flags |=(SB_SEL|SB_NOINTR);
- so->so_snd.sb_flags |=(SB_SEL|SB_NOINTR);
+ so->so_rcv.sb_flags |=SB_NOINTR;
+ so->so_rcv.sb_sel.si_flags |=SI_SBSEL;
+ so->so_snd.sb_flags |=SB_NOINTR;
+ so->so_snd.sb_sel.si_flags |=SI_SBSEL;
out:
sbunlock(&so->so_snd);
lcp -> lcd_window_condition = FALSE;
if (so && ((so -> so_snd.sb_flags & SB_WAIT) ||
- (so -> so_snd.sb_flags & SB_NOTIFY)))
+ (so -> so_snd.sb_flags & SB_NOTIFY)) ||
+ (so->so_snd.sb_sel.si_flags & SI_SBSEL))
sowwakeup (so);
return (PACKET_OK);
printf("tp_sbdroping %d pkts %d bytes on %x at 0x%x\n",
oldi, oldcc - sb->sb_cc, tpcb, seq);
ENDDEBUG
- if (sb->sb_flags & SB_NOTIFY)
+ if ((sb->sb_flags & SB_NOTIFY) || (sb->sb_sel.si_flags & SI_SBSEL))
sowwakeup(tpcb->tp_sock);
return (oldcc - sb->sb_cc);
}
* and internal data.
*/
-#ifdef DIAGNOSE
-#define MCHECK(m) if ((m)->m_type != MT_FREE) panic("mget")
+#if 1
+#define MCHECK(m) if ((m)->m_type != MT_FREE) panic("mget MCHECK: m_type=%x m=%x", m->m_type, m)
#else
#define MCHECK(m)
#endif
void *si_thread; /* thread to be notified */
short si_flags; /* see below */
};
-#define SI_COLL 0x0001 /* collision occurred */
+#define SI_COLL 0x0001 /* collision occurred */
+#define SI_SBSEL 0x0002 /* select socket buffer wanted replaces SB_SEL */
#ifdef KERNEL
struct proc;
#define SB_LOCK 0x01 /* lock on data queue */
#define SB_WANT 0x02 /* someone is waiting to lock */
#define SB_WAIT 0x04 /* someone is waiting for data/space */
-#define SB_SEL 0x08 /* someone is selecting */
+#define SB_SEL_XXX 0x08 /* Don't use. replaced by SI_SBSEL in selinfo */
#define SB_ASYNC 0x10 /* ASYNC I/O, need signals */
-#define SB_NOTIFY (SB_WAIT|SB_SEL|SB_ASYNC)
+#define SB_NOTIFY (SB_WAIT|SB_ASYNC)
#define SB_UPCALL 0x20 /* someone wants an upcall */
#define SB_NOINTR 0x40 /* operations not interruptible */
#define SB_RECV 0x8000 /* this is rcv sb */
/*
* Do we need to notify the other side when I/O is possible?
*/
-#define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT|SB_SEL|SB_ASYNC|SB_UPCALL)) != 0)
+#define sb_notify(sb) (((sb)->sb_flags & (SB_WAIT|SB_ASYNC|SB_UPCALL)) != 0 || ((sb)->sb_sel.si_flags & SI_SBSEL) != 0)
/*
* How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
else
turnLEDon |= ADBKS_LED_CAPSLOCK;
- thread_call_func(asyncSetLEDFunc, (thread_call_param_t)this, true);
+ if ( ! isInactive() ) {
+ thread_call_func(asyncSetLEDFunc, (thread_call_param_t)this, true);
+ }
}
void AppleADBKeyboard::setNumLockFeedback ( bool to )
else
turnLEDon |= ADBKS_LED_NUMLOCK;
- thread_call_func(asyncSetLEDFunc, (thread_call_param_t)this, true);
+ if ( ! isInactive() ) {
+ thread_call_func(asyncSetLEDFunc, (thread_call_param_t)this, true);
+ }
}
sleepInfo.powerFlags = 0;
sleepInfo.powerReserved1 = 0;
sleepInfo.powerReserved2 = 0;
+
+#if 1
+ if( newState == kHardwareSleep) {
+ IOMemoryDescriptor * vram;
+ if( (vram = getVRAMRange())) {
+ vram->redirect( kernel_task, true );
+ vram->release();
+ }
+ }
+#endif
ignore_zero_fault( true );
boolean_t ints = ml_set_interrupts_enabled( false );
ml_set_interrupts_enabled( ints );
ignore_zero_fault( false );
+#if 1
+ if( newState == kHardwareWake) {
+ IOMemoryDescriptor * vram;
+ if( (vram = getVRAMRange())) {
+ vram->redirect( kernel_task, false );
+ vram->release();
+ }
+ }
+#endif
+
if( powerStateOrdinal) {
powerState = powerStateOrdinal;
if( kAVPowerOn == newState) {
IOVirtualAddress mapAddress,
IOOptionBits options = 0 );
+ // Following methods are private implementation
+
+ // make virtual
+ IOReturn redirect( task_t safeTask, bool redirect );
+
protected:
virtual IOMemoryMap * makeMapping(
IOMemoryDescriptor * owner,
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+// The following classes are private implementation of IOMemoryDescriptor - they
+// should not be reference directly, just through the public API's in the
+// IOMemoryDescriptor class.
+
enum {
kIOMemoryRequiresWire = 0x00000001
};
virtual IOReturn complete(IODirection forDirection = kIODirectionNone);
+ // make virtual
+ IOReturn redirect( task_t safeTask, bool redirect );
+
protected:
virtual IOMemoryMap * makeMapping(
IOMemoryDescriptor * owner,
#define _IOKIT_IOSHAREDLOCKIMP_H
#include <architecture/ppc/asm_help.h>
+#ifdef KERNEL
+#undef END
+#include <mach/ppc/asm.h>
+#endif
-// 'Till we're building in kernel
.macro DISABLE_PREEMPTION
#ifdef KERNEL
+ stwu r1,-(FM_SIZE)(r1)
+ mflr r0
+ stw r3,FM_ARG0(r1)
+ stw r0,(FM_SIZE+FM_LR_SAVE)(r1)
+ bl EXT(_disable_preemption)
+ lwz r3,FM_ARG0(r1)
+ lwz r1,0(r1)
+ lwz r0,FM_LR_SAVE(r1)
+ mtlr r0
#endif
.endmacro
.macro ENABLE_PREEMPTION
#ifdef KERNEL
+ stwu r1,-(FM_SIZE)(r1)
+ mflr r0
+ stw r3,FM_ARG0(r1)
+ stw r0,(FM_SIZE+FM_LR_SAVE)(r1)
+ bl EXT(_enable_preemption)
+ lwz r3,FM_ARG0(r1)
+ lwz r1,0(r1)
+ lwz r0,FM_LR_SAVE(r1)
+ mtlr r0
#endif
.endmacro
if(forDirection == kIODirectionNone)
forDirection = _direction;
- vm_prot_t access = VM_PROT_DEFAULT; // Could be cleverer using direction
+ vm_prot_t access = VM_PROT_DEFAULT; // Could be cleverer using direction
//
// Check user read/write access to the data buffer.
// osfmk/device/iokit_rpc.c
extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa,
vm_size_t length, unsigned int mapFlags);
+extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
};
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
IOOptionBits options,
IOByteCount offset,
IOByteCount length );
+
+ IOReturn redirect(
+ task_t intoTask, bool redirect );
};
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
options, offset, length ));
if( !ok) {
logical = 0;
- _memory->release();
+ memory->release();
+ memory = 0;
vm_map_deallocate(addressMap);
addressMap = 0;
}
return( err );
}
+IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect )
+{
+ IOReturn err;
+ _IOMemoryMap * mapping = 0;
+ OSIterator * iter;
+
+ LOCK;
+
+ do {
+ if( (iter = OSCollectionIterator::withCollection( _mappings))) {
+ while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
+ mapping->redirect( safeTask, redirect );
+
+ iter->release();
+ }
+ } while( false );
+
+ UNLOCK;
+
+ // temporary binary compatibility
+ IOSubMemoryDescriptor * subMem;
+ if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
+ err = subMem->redirect( safeTask, redirect );
+ else
+ err = kIOReturnSuccess;
+
+ return( err );
+}
+
+IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect )
+{
+// temporary binary compatibility IOMemoryDescriptor::redirect( safeTask, redirect );
+ return( _parent->redirect( safeTask, redirect ));
+}
+
+IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect )
+{
+ IOReturn err = kIOReturnSuccess;
+
+ if( superMap) {
+// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
+ } else {
+
+ LOCK;
+ if( logical && addressMap
+ && (get_task_map( safeTask) != addressMap)
+ && (0 == (options & kIOMapStatic))) {
+
+ IOUnmapPages( addressMap, logical, length );
+ if( !redirect) {
+ err = vm_deallocate( addressMap, logical, length );
+ err = memory->doMap( addressMap, &logical,
+ (options & ~kIOMapAnywhere) /*| kIOMapReserve*/ );
+ } else
+ err = kIOReturnSuccess;
+#ifdef DEBUG
+ IOLog("IOMemoryMap::redirect(%d, %x) %x from %lx\n", redirect, err, logical, addressMap);
+#endif
+ }
+ UNLOCK;
+ }
+
+ return( err );
+}
+
IOReturn _IOMemoryMap::unmap( void )
{
IOReturn err;
*/
const char * gIOKernelKmods =
"{
- 'com.apple.kernel' = '1.3.3';
- 'com.apple.kernel.bsd' = '1.0.3';
- 'com.apple.kernel.iokit' = '1.0.3';
- 'com.apple.kernel.libkern' = '1.0.3';
- 'com.apple.kernel.mach' = '1.0.3';
- 'com.apple.iokit.IOADBFamily' = '1.0.3';
- 'com.apple.iokit.IOCDStorageFamily' = '1.0.3';
- 'com.apple.iokit.IODVDStorageFamily' = '1.0.3';
- 'com.apple.iokit.IOGraphicsFamily' = '1.0.3';
- 'com.apple.iokit.IOHIDSystem' = '1.0.3';
- 'com.apple.iokit.IONDRVSupport' = '1.0.3';
- 'com.apple.iokit.IONetworkingFamily' = '1.0.3';
- 'com.apple.iokit.IOPCIFamily' = '1.0.3';
- 'com.apple.iokit.IOStorageFamily' = '1.0.3';
- 'com.apple.iokit.IOSystemManagementFamily' = '1.0.3';
+ 'com.apple.kernel' = '1.3.7';
+ 'com.apple.kernel.bsd' = '1.0.7';
+ 'com.apple.kernel.iokit' = '1.0.7';
+ 'com.apple.kernel.libkern' = '1.0.7';
+ 'com.apple.kernel.mach' = '1.0.7';
+ 'com.apple.iokit.IOADBFamily' = '1.0.7';
+ 'com.apple.iokit.IOCDStorageFamily' = '1.0.7';
+ 'com.apple.iokit.IODVDStorageFamily' = '1.0.7';
+ 'com.apple.iokit.IOGraphicsFamily' = '1.0.7';
+ 'com.apple.iokit.IOHIDSystem' = '1.0.7';
+ 'com.apple.iokit.IONDRVSupport' = '1.0.7';
+ 'com.apple.iokit.IONetworkingFamily' = '1.0.7';
+ 'com.apple.iokit.IOPCIFamily' = '1.0.7';
+ 'com.apple.iokit.IOStorageFamily' = '1.0.7';
+ 'com.apple.iokit.IOSystemManagementFamily' = '1.0.7';
}";
if( (IKOT_IOKIT_OBJECT == type)
|| (IKOT_IOKIT_CONNECT == type))
iokit_add_reference( obj );
- else
+ else
obj = NULL;
}
ip_unlock(port);
return( KERN_SUCCESS );
}
+kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length)
+{
+ pmap_t pmap = map->pmap;
+ vm_size_t off;
+ boolean_t b;
+
+#if __ppc__
+ b = mapping_remove(pmap, va);
+#else
+ pmap_remove(pmap, va, va + length);
+ b = TRUE;
+#endif
+
+ return( b ? KERN_SUCCESS : KERN_INVALID_ADDRESS );
+}
+
void IOGetTime( mach_timespec_t * clock_time);
void IOGetTime( mach_timespec_t * clock_time)
{
* zero-fill the page in dst_object.
*/
src_page = VM_PAGE_NULL;
+ result_page = VM_PAGE_NULL;
} else {
vm_object_lock(src_object);
src_page = vm_page_lookup(src_object,
trunc_page_64(src_offset));
- if (src_page == dst_page)
+ if (src_page == dst_page) {
src_prot = dst_prot;
- else {
+ result_page = VM_PAGE_NULL;
+ } else {
src_prot = VM_PROT_READ;
vm_object_paging_begin(src_object);
return(KERN_MEMORY_ERROR);
}
- src_page = result_page;
assert((src_top_page == VM_PAGE_NULL) ==
- (src_page->object == src_object));
+ (result_page->object == src_object));
}
assert ((src_prot & VM_PROT_READ) != VM_PROT_NONE);
- vm_object_unlock(src_page->object);
+ vm_object_unlock(result_page->object);
}
if (!vm_map_verify(dst_map, dst_version)) {
- if (src_page != VM_PAGE_NULL && src_page != dst_page)
- vm_fault_copy_cleanup(src_page, src_top_page);
+ if (result_page != VM_PAGE_NULL && src_page != dst_page)
+ vm_fault_copy_cleanup(result_page, src_top_page);
vm_fault_copy_dst_cleanup(dst_page);
break;
}
if (dst_page->object->copy != old_copy_object) {
vm_object_unlock(dst_page->object);
vm_map_verify_done(dst_map, dst_version);
- if (src_page != VM_PAGE_NULL && src_page != dst_page)
- vm_fault_copy_cleanup(src_page, src_top_page);
+ if (result_page != VM_PAGE_NULL && src_page != dst_page)
+ vm_fault_copy_cleanup(result_page, src_top_page);
vm_fault_copy_dst_cleanup(dst_page);
break;
}
part_size = amount_left;
}
- if (src_page == VM_PAGE_NULL) {
+ if (result_page == VM_PAGE_NULL) {
vm_page_part_zero_fill(dst_page,
dst_po, part_size);
} else {
- vm_page_part_copy(src_page, src_po,
+ vm_page_part_copy(result_page, src_po,
dst_page, dst_po, part_size);
if(!dst_page->dirty){
vm_object_lock(dst_object);
} else {
part_size = PAGE_SIZE;
- if (src_page == VM_PAGE_NULL)
+ if (result_page == VM_PAGE_NULL)
vm_page_zero_fill(dst_page);
else{
- vm_page_copy(src_page, dst_page);
+ vm_page_copy(result_page, dst_page);
if(!dst_page->dirty){
vm_object_lock(dst_object);
dst_page->dirty = TRUE;
vm_map_verify_done(dst_map, dst_version);
- if (src_page != VM_PAGE_NULL && src_page != dst_page)
- vm_fault_copy_cleanup(src_page, src_top_page);
+ if (result_page != VM_PAGE_NULL && src_page != dst_page)
+ vm_fault_copy_cleanup(result_page, src_top_page);
vm_fault_copy_dst_cleanup(dst_page);
amount_left -= part_size;
if (tmp_entry->is_shared ||
tmp_entry->object.vm_object->true_share ||
map_share) {
- /* dec ref gained in copy_quickly */
- vm_object_lock(src_object);
- src_object->ref_count--;
- vm_object_res_deallocate(src_object);
- vm_object_unlock(src_object);
+ vm_map_unlock(src_map);
new_entry->object.vm_object =
vm_object_copy_delayed(
src_object,
src_offset,
src_size);
+ /* dec ref gained in copy_quickly */
+ vm_object_lock(src_object);
+ src_object->ref_count--;
+ vm_object_res_deallocate(src_object);
+ vm_object_unlock(src_object);
+ vm_map_lock(src_map);
+ /*
+ * it turns out that we have
+ * finished our copy. No matter
+ * what the state of the map
+ * we will lock it again here
+ * knowing that if there is
+ * additional data to copy
+ * it will be checked at
+ * the top of the loop
+ *
+ * Don't do timestamp check
+ */
+
} else {
vm_object_pmap_protect(
src_object,