HFSPlusForkData cnd_datafork;
HFSPlusForkData cnd_rsrcfork;
u_int32_t cnd_iNodeNumCopy;
- u_int8_t cnd_extra[268]; /* make struct at least 520 bytes long */
+ u_int32_t cnd_linkCNID; /* for hard links only */
+ u_int8_t cnd_extra[264]; /* make struct at least 520 bytes long */
struct CatalogNameSpecifier cnd_namespecifier;
};
typedef struct CatalogNodeData CatalogNodeData;
rovingVariableBuffer = variableBuffer;
INIT_CATALOGDATA(&catalogInfo.nodeData, 0);
+ catalogInfo.nodeData.cnd_iNodeNumCopy = 0;
/* The packing call below expects a struct hfsCatalogInfo */
bcopy(cnp, &catalogInfo.nodeData, (cnp->cnd_type == kCatalogFileNode) ?
char iNodeName[32];
UInt32 hint;
UInt32 indlinkno;
+ UInt32 linkparid, linkcnid;
OSErr result;
fip = (struct FInfo *) &cndp->cnd_finderInfo;
/*
* Get nodeData from the data node file.
* Flag the node data to NOT copy the name (ie preserve the original)
+ * Also preserve the parent directory ID.
*/
+ linkparid = cndp->cnm_parID;
+ linkcnid = cndp->cnd_nodeID;
cndp->cnm_flags |= kCatNameNoCopyName;
result = GetCatalogNode(vcb, VCBTOHFS(vcb)->hfs_private_metadata_dir,
iNodeName, 0, 0, cndp, &hint);
/* Keep a copy of iNodeNum to put into h_indnodeno */
cndp->cnd_iNodeNumCopy = indlinkno;
+ cndp->cnm_parID = linkparid;
+ cndp->cnd_linkCNID = linkcnid;
}
}
}
*vpp = NULL;
return (EPERM);
}
- DBG_UTILS(("\thfs_vcreate: On '%s' with forktype of %d, nodeType of 0x%08lX\n", catInfo->nodeData.cnm_nameptr, forkType, (unsigned long)catInfo->nodeData.cnd_type));
-
+
+ /*
+ * If this is a hard link then check if the
+ * data node already exists in our hash.
+ */
+ if ((forkType == kDataFork)
+ && (catInfo->nodeData.cnd_type == kCatalogFileNode)
+ && ((catInfo->nodeData.cnd_mode & IFMT) == IFREG)
+ && (catInfo->nodeData.cnd_linkCount > 0)) {
+ vp = hfs_vhashget(dev, catInfo->nodeData.cnd_nodeID, kDataFork);
+ if (vp != NULL) {
+ /* Use the name of the link and it's parent ID. */
+ hp = VTOH(vp);
+ H_DIRID(hp) = catInfo->nodeData.cnm_parID;
+ hfs_set_metaname(catInfo->nodeData.cnm_nameptr, hp->h_meta, hfsmp);
+ *vpp = vp;
+ return (0);
+ }
+ }
+
/* Must malloc() here, since getnewvnode() can sleep */
MALLOC_ZONE(hp, struct hfsnode *, sizeof(struct hfsnode), M_HFSNODE, M_WAITOK);
bzero((caddr_t)hp, sizeof(struct hfsnode));
};
}
if (a & ATTR_CMN_OBJTAG) *((fsobj_tag_t *)attrbufptr)++ = root_vp->v_tag;
- if (a & ATTR_CMN_OBJID)
- {
- ((fsobj_id_t *)attrbufptr)->fid_objno = catalogInfo->nodeData.cnd_nodeID;
+ if (a & ATTR_CMN_OBJID) {
+ u_int32_t cnid;
+
+ /* For hard links use the link's cnid */
+ if (catalogInfo->nodeData.cnd_iNodeNumCopy != 0)
+ cnid = catalogInfo->nodeData.cnd_linkCNID;
+ else
+ cnid = catalogInfo->nodeData.cnd_nodeID;
+ ((fsobj_id_t *)attrbufptr)->fid_objno = cnid;
((fsobj_id_t *)attrbufptr)->fid_generation = 0;
++((fsobj_id_t *)attrbufptr);
};
- if (a & ATTR_CMN_OBJPERMANENTID)
- {
- ((fsobj_id_t *)attrbufptr)->fid_objno = catalogInfo->nodeData.cnd_nodeID;
+ if (a & ATTR_CMN_OBJPERMANENTID) {
+ u_int32_t cnid;
+
+ /* For hard links use the link's cnid */
+ if (catalogInfo->nodeData.cnd_iNodeNumCopy != 0)
+ cnid = catalogInfo->nodeData.cnd_linkCNID;
+ else
+ cnid = catalogInfo->nodeData.cnd_nodeID;
+ ((fsobj_id_t *)attrbufptr)->fid_objno = cnid;
((fsobj_id_t *)attrbufptr)->fid_generation = 0;
++((fsobj_id_t *)attrbufptr);
};
(char *)varbufptr += attrlength + ((4 - (attrlength & 3)) & 3);
++((struct attrreference *)attrbufptr);
};
- if (a & ATTR_CMN_FLAGS) {
- if (catalogInfo->nodeData.cnd_mode & IFMT) {
- if (catalogInfo->nodeData.cnd_flags & kHFSFileLockedMask) {
- *((u_long *)attrbufptr)++ =
- (u_long) (catalogInfo->nodeData.cnd_ownerFlags |
- (catalogInfo->nodeData.cnd_adminFlags << 16)) |
- UF_IMMUTABLE;
- } else {
- *((u_long *)attrbufptr)++ =
- (u_long) (catalogInfo->nodeData.cnd_ownerFlags |
- (catalogInfo->nodeData.cnd_adminFlags << 16)) & ~UF_IMMUTABLE;
- }
- } else {
- /* The information in the node flag fields is not valid: */
- *((u_long *)attrbufptr)++ =
- (catalogInfo->nodeData.cnd_flags & kHFSFileLockedMask) ? UF_IMMUTABLE : 0;
- };
- };
+ if (a & ATTR_CMN_FLAGS) {
+ u_long flags;
+
+ if (catalogInfo->nodeData.cnd_mode & IFMT)
+ flags = catalogInfo->nodeData.cnd_ownerFlags |
+ catalogInfo->nodeData.cnd_adminFlags << 16;
+ else
+ flags = 0;
+
+ if (catalogInfo->nodeData.cnd_type == kCatalogFileNode) {
+ if (catalogInfo->nodeData.cnd_flags & kHFSFileLockedMask)
+ flags |= UF_IMMUTABLE;
+ else
+ flags &= ~UF_IMMUTABLE;
+ };
+ *((u_long *)attrbufptr)++ = flags;
+ };
if (a & ATTR_CMN_USERACCESS) {
if ((VTOVFS(root_vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) ||
((catalogInfo->nodeData.cnd_mode & IFMT) == 0)) {
if (a & ATTR_CMN_OBJTYPE) *((fsobj_type_t *)attrbufptr)++ = vp->v_type;
if (a & ATTR_CMN_OBJTAG) *((fsobj_tag_t *)attrbufptr)++ = vp->v_tag;
if (a & ATTR_CMN_OBJID) {
- ((fsobj_id_t *)attrbufptr)->fid_objno = H_FILEID(hp);
+ u_int32_t cnid;
+
+ /* For hard links use the link's cnid */
+ if (hp->h_meta->h_metaflags & IN_DATANODE)
+ cnid = catInfo->nodeData.cnd_linkCNID;
+ else
+ cnid = H_FILEID(hp);
+ ((fsobj_id_t *)attrbufptr)->fid_objno = cnid;
((fsobj_id_t *)attrbufptr)->fid_generation = 0;
++((fsobj_id_t *)attrbufptr);
};
if (a & ATTR_CMN_OBJPERMANENTID) {
- ((fsobj_id_t *)attrbufptr)->fid_objno = H_FILEID(hp);
+ u_int32_t cnid;
+
+ /* For hard links use the link's cnid */
+ if (hp->h_meta->h_metaflags & IN_DATANODE)
+ cnid = catInfo->nodeData.cnd_linkCNID;
+ else
+ cnid = H_FILEID(hp);
+ ((fsobj_id_t *)attrbufptr)->fid_objno = cnid;
((fsobj_id_t *)attrbufptr)->fid_generation = 0;
++((fsobj_id_t *)attrbufptr);
};
if (((alist->volattr == 0) && ((alist->commonattr & HFS_ATTR_CMN_LOOKUPMASK) != 0)) ||
((alist->dirattr & HFS_ATTR_DIR_LOOKUPMASK) != 0) ||
- ((alist->fileattr & HFS_ATTR_FILE_LOOKUPMASK) != 0)) {
+ ((alist->fileattr & HFS_ATTR_FILE_LOOKUPMASK) != 0) ||
+ ((alist->commonattr & (ATTR_CMN_OBJID | ATTR_CMN_OBJPERMANENTID))
+ && (hp->h_meta->h_metaflags & IN_DATANODE))) {
/* lock catalog b-tree */
error = hfs_metafilelocking(VTOHFS(vp), kHFSCatalogFileID, LK_SHARED, ap->a_p);
#include <dev/busvar.h>
#include <sys/kdebug.h>
+#include <mach/mach_types.h>
+#include <mach/vm_prot.h>
+#include <mach/semaphore.h>
+#include <mach/sync_policy.h>
+#include <kern/clock.h>
+#include <mach/kern_return.h>
+
extern shared_region_mapping_t system_shared_region;
char copyright[] =
struct vnode *rootvp;
int boothowto = RB_DEBUG;
+#define BSD_PAGABLE_MAP_SIZE (4 * 512 * 1024)
vm_map_t bsd_pageable_map;
vm_map_t mb_map;
+semaphore_t execve_semaphore;
int cmask = CMASK;
/*
* Sets the name for the given task.
*/
-void proc_name(s, p)
+void
+proc_name(s, p)
char *s;
struct proc *p;
{
kernel_flock = funnel_alloc(KERNEL_FUNNEL);
if (kernel_flock == (funnel_t *)0 ) {
- panic("fail to allocate kernel mutex lock\n");
+ panic("bsd_init: Fail to allocate kernel mutex lock");
}
if (!disable_funnel) {
network_flock = funnel_alloc(NETWORK_FUNNEL);
if (network_flock == (funnel_t *)0 ) {
- panic("fail to allocate network mutex lock\n");
+ panic("bds_init: Fail to allocate network mutex lock");
}
} else {
network_flock = kernel_flock;
/*
* Allocate a kernel submap for pageable memory
- * for temporary copying (table(), execve()).
+ * for temporary copying (execve()).
*/
{
vm_offset_t min;
ret = kmem_suballoc(kernel_map,
&min,
- (vm_size_t)512*1024,
+ (vm_size_t)BSD_PAGABLE_MAP_SIZE,
TRUE,
TRUE,
&bsd_pageable_map);
if (ret != KERN_SUCCESS)
- panic("Failed to allocare bsd pageable map\n");
+ panic("bsd_init: Failed to allocare bsd pageable map");
+ }
+
+ /* Initialize the execve() semaphore */
+ {
+ kern_return_t kret;
+ int value;
+
+ value = BSD_PAGABLE_MAP_SIZE / NCARGS;
+
+ kret = semaphore_create(kernel_task, &execve_semaphore,
+ SYNC_POLICY_FIFO, value);
+ if (kret != KERN_SUCCESS)
+ panic("bsd_init: Failed to create execve semaphore");
}
/*
/* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */
if (VFS_ROOT(mountlist.cqh_first, &rootvnode))
- panic("cannot find root vnode");
+ panic("bsd_init: cannot find root vnode");
filedesc0.fd_cdir = rootvnode;
VREF(rootvnode);
VOP_UNLOCK(rootvnode, 0, p);
#include <mach-o/loader.h>
#include <mach/vm_region.h>
+#include <mach/vm_statistics.h>
#include <vm/vm_kern.h>
* Note: if we can't read, then we end up with
* a hole in the file.
*/
- if ((maxprot & VM_PROT_READ) == VM_PROT_READ) {
+ if ((maxprot & VM_PROT_READ) == VM_PROT_READ && vbr.user_tag != VM_MEMORY_IOKIT) {
error = vn_rdwr(UIO_WRITE, vp, (caddr_t)vmoffset, size, foffset,
UIO_USERSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) 0, p);
}
static int load_return_to_errno(load_return_t lrtn);
int execve(struct proc *p, struct execve_args *uap, register_t *retval);
+static int execargs_alloc(vm_offset_t *addrp);
+static int execargs_free(vm_offset_t addr);
int
execv(p, args, retval)
}
}
- ret = kmem_alloc_pageable(bsd_pageable_map, &execargs, NCARGS);
- if (ret != KERN_SUCCESS)
- return(ENOMEM);
+ error = execargs_alloc(&execargs);
+ if (error)
+ return(error);
savedpath = execargs;
vput(vp);
bad1:
if (execargs)
- kmem_free(bsd_pageable_map, execargs, NCARGS);
+ execargs_free(execargs);
if (!error && vfexec) {
vfork_return(current_act(), p->p_pptr, p, retval);
return(0);
return (0);
}
+#include <mach/mach_types.h>
+#include <mach/vm_prot.h>
+#include <mach/semaphore.h>
+#include <mach/sync_policy.h>
+#include <kern/clock.h>
+#include <mach/kern_return.h>
+
+extern semaphore_t execve_semaphore;
+
+static int
+execargs_alloc(addrp)
+ vm_offset_t *addrp;
+{
+ kern_return_t kret;
+
+ kret = semaphore_wait(execve_semaphore);
+ if (kret != KERN_SUCCESS)
+ switch (kret) {
+ default:
+ return (EINVAL);
+ case KERN_INVALID_ADDRESS:
+ case KERN_PROTECTION_FAILURE:
+ return (EACCES);
+ case KERN_ABORTED:
+ case KERN_OPERATION_TIMED_OUT:
+ return (EINTR);
+ }
+
+ kret = kmem_alloc_pageable(bsd_pageable_map, addrp, NCARGS);
+ if (kret != KERN_SUCCESS)
+ return (ENOMEM);
+
+ return (0);
+}
+
+static int
+execargs_free(addr)
+ vm_offset_t addr;
+{
+ kern_return_t kret;
+
+ kmem_free(bsd_pageable_map, addr, NCARGS);
+
+ kret = semaphore_signal(execve_semaphore);
+ switch (kret) {
+ case KERN_INVALID_ADDRESS:
+ case KERN_PROTECTION_FAILURE:
+ return (EINVAL);
+ case KERN_ABORTED:
+ case KERN_OPERATION_TIMED_OUT:
+ return (EINTR);
+ case KERN_SUCCESS:
+ return(0);
+ default:
+ return (EINVAL);
+ }
+}
+
int
signal_lock(struct proc *p)
{
+int error = 0;
#if SIGNAL_DEBUG
#ifdef __ppc__
{
#endif /* __ppc__ */
#endif /* SIGNAL_DEBUG */
- return(lockmgr(&p->signal_lock, LK_EXCLUSIVE, 0, (struct proc *)0));
+siglock_retry:
+ error = lockmgr(&p->signal_lock, LK_EXCLUSIVE, 0, (struct proc *)0);
+ if (error == EINTR)
+ goto siglock_retry;
+ return(error);
}
int
if (lo_dlt) {
if ((*m)->m_flags & M_BCAST) {
struct mbuf *n = m_copy(*m, 0, (int)M_COPYALL);
- dlil_output(lo_dlt, n, 0, ndest, 0);
+ if (n != NULL)
+ dlil_output(lo_dlt, n, 0, ndest, 0);
}
else
{
#endif
#define errno nbperrno
-#define NBP_DEBUG 0
/* externs */
extern at_ifaddr_t *ifID_table[];
mlen = m->m_len;
w = mtod(m, u_short *);
+skip_start:
if (len < mlen)
mlen = len;
-skip_start:
sum = xsum_assym(w, mlen, sum, starting_on_odd);
len -= mlen;
if (mlen & 0x1)
} else {
mlen = m->m_len;
}
- if (len < mlen)
- mlen = len;
skip_start:
+ if (len < mlen)
+ mlen = len;
len -= mlen;
/*
goto bad;
}
if (m->m_pkthdr.len > ip->ip_len) {
+ /* Invalidate hwcksuming */
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+
if (m->m_len == m->m_pkthdr.len) {
m->m_len = ip->ip_len;
m->m_pkthdr.len = ip->ip_len;
#define B_WANTED 0x00800000 /* Process wants this buffer. */
#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */
#define B_WRITEINPROG 0x01000000 /* Write in progress. */
-#define B_UNUSED0 0x02000000 /* Unused bit */
+#define B_HDRALLOC 0x02000000 /* zone allocated buffer header */
#define B_UNUSED1 0x04000000 /* Unused bit */
#define B_NEED_IODONE 0x08000000
/* need to do a biodone on the */
/*
* Definitions for the buffer free lists.
*/
-#define BQUEUES 5 /* number of free buffer queues */
+#define BQUEUES 6 /* number of free buffer queues */
#define BQ_LOCKED 0 /* super-blocks &c */
#define BQ_LRU 1 /* lru, useful buffers */
#define BQ_AGE 2 /* rubbish */
#define BQ_EMPTY 3 /* buffer headers with no memory */
#define BQ_META 4 /* buffer containing metadata */
+#define BQ_LAUNDRY 5 /* buffers that need cleaning */
__BEGIN_DECLS
int allocbuf __P((struct buf *, int));
static struct buf *getnewbuf(int slpflag, int slptimeo, int *queue);
extern int niobuf; /* The number of IO buffer headers for cluster IO */
+int blaundrycnt;
#if TRACE
struct proc *traceproc;
#define BHASHENTCHECK(bp) \
if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
- panic("%x: b_hash.le_prev is deadb", (bp));
+ panic("%x: b_hash.le_prev is not deadbeef", (bp));
#define BLISTNONE(bp) \
(bp)->b_hash.le_next = (struct buf *)0; \
bp->b_timestamp = 0;
}
+static __inline__ void
+bufhdrinit(struct buf *bp)
+{
+ bzero((char *)bp, sizeof *bp);
+ bp->b_dev = NODEV;
+ bp->b_rcred = NOCRED;
+ bp->b_wcred = NOCRED;
+ bp->b_vnbufs.le_next = NOLIST;
+ bp->b_flags = B_INVAL;
+
+ return;
+}
+
/*
* Initialize buffers and hash links for buffers.
*/
register int i;
int metabuf;
long whichq;
-#if ZALLOC_METADATA
static void bufzoneinit();
-#endif /* ZALLOC_METADATA */
+ static void bcleanbuf_thread_init();
/* Initialize the buffer queues ('freelists') and the hash table */
for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
/* Initialize the buffer headers */
for (i = 0; i < nbuf; i++) {
bp = &buf[i];
- bzero((char *)bp, sizeof *bp);
- bp->b_dev = NODEV;
- bp->b_rcred = NOCRED;
- bp->b_wcred = NOCRED;
- bp->b_vnbufs.le_next = NOLIST;
- bp->b_flags = B_INVAL;
+ bufhdrinit(bp);
+
/*
* metabuf buffer headers on the meta-data list and
* rest of the buffer headers on the empty list
*/
- if (--metabuf )
+ if (--metabuf)
whichq = BQ_META;
else
whichq = BQ_EMPTY;
for (; i < nbuf + niobuf; i++) {
bp = &buf[i];
- bzero((char *)bp, sizeof *bp);
- bp->b_dev = NODEV;
- bp->b_rcred = NOCRED;
- bp->b_wcred = NOCRED;
- bp->b_vnbufs.le_next = NOLIST;
- bp->b_flags = B_INVAL;
+ bufhdrinit(bp);
binsheadfree(bp, &iobufqueue, -1);
}
printf("using %d buffer headers and %d cluster IO buffer headers\n",
nbuf, niobuf);
-#if ZALLOC_METADATA
- /* Set up zones for meta-data */
+ /* Set up zones used by the buffer cache */
bufzoneinit();
-#endif
-#if XXX
+ /* start the bcleanbuf() thread */
+ bcleanbuf_thread_init();
+
+#if 0 /* notyet */
/* create a thread to do dynamic buffer queue balancing */
bufq_balance_thread_init();
#endif /* XXX */
{NULL, (MINMETA * 8), 512 * (MINMETA * 8), "buf.4096" },
{NULL, 0, 0, "" } /* End */
};
+#endif /* ZALLOC_METADATA */
+
+zone_t buf_hdr_zone;
+int buf_hdr_count;
/*
* Initialize the meta data zones
static void
bufzoneinit(void)
{
+#if ZALLOC_METADATA
int i;
for (i = 0; meta_zones[i].mz_size != 0; i++) {
PAGE_SIZE,
meta_zones[i].mz_name);
}
+#endif /* ZALLOC_METADATA */
+ buf_hdr_zone = zinit(sizeof(struct buf), 32, PAGE_SIZE, "buf headers");
}
+#if ZALLOC_METADATA
static zone_t
getbufzone(size_t size)
{
s = splbio();
/* invalid request gets empty queue */
- if ((*queue > BQUEUES) || (*queue < 0))
+ if ((*queue > BQUEUES) || (*queue < 0)
+ || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED))
*queue = BQ_EMPTY;
/* (*queue == BQUEUES) means no preference */
*queue = BQ_EMPTY;
goto found;
}
-#if DIAGNOSTIC
- /* with UBC this is a fatal condition */
- panic("getnewbuf: No useful buffers");
-#else
+
+ /* Create a new temparory buffer header */
+ bp = (struct buf *)zalloc(buf_hdr_zone);
+
+ if (bp) {
+ bufhdrinit(bp);
+ BLISTNONE(bp);
+ binshash(bp, &invalhash);
+ SET(bp->b_flags, B_HDRALLOC);
+ *queue = BQ_EMPTY;
+ binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
+ buf_hdr_count++;
+ goto found;
+ }
+
/* Log this error condition */
printf("getnewbuf: No useful buffers");
-#endif /* DIAGNOSTIC */
-
+
/* wait for a free buffer of any kind */
needbuffer = 1;
bufstats.bufs_sleeps++;
if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef)
panic("bcleanbuf: le_prev is deadbeef");
- /* If buffer was a delayed write, start it, and return 1 */
+ /*
+ * If buffer was a delayed write, start the IO by queuing
+ * it on the LAUNDRY queue, and return 1
+ */
if (ISSET(bp->b_flags, B_DELWRI)) {
splx(s);
- bawrite (bp);
+ binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
+ blaundrycnt++;
+ wakeup(&blaundrycnt);
return (1);
}
register struct buf *bp;
register struct bqueues *dp;
int counts[MAXBSIZE/CLBYTES+1];
- static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY", "META" };
+ static char *bname[BQUEUES] =
+ { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
count = 0;
bufqlim[BQ_META].bl_target = nbuftarget/4;
bufqlim[BQ_META].bl_stale = META_IS_STALE;
+ /* LAUNDRY queue */
+ bufqlim[BQ_LOCKED].bl_nlow = 0;
+ bufqlim[BQ_LOCKED].bl_nlhigh = 32;
+ bufqlim[BQ_LOCKED].bl_target = 0;
+ bufqlim[BQ_LOCKED].bl_stale = 30;
+
buqlimprt(1);
}
if ((q < 0) || (q >= BQUEUES))
goto out;
- /* LOCKED queue MUST not be balanced */
- if (q == BQ_LOCKED)
+ /* LOCKED or LAUNDRY queue MUST not be balanced */
+ if ((q == BQ_LOCKED) || (q == BQ_LAUNDRY))
goto out;
n = (bufqlim[q].bl_num - bufqlim[q].bl_target);
buqlimprt(int all)
{
int i;
- static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY", "META" };
+ static char *bname[BQUEUES] =
+ { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
if (all)
for (i = 0; i < BQUEUES; i++) {
printf("cur = %d, ", (long)bufqlim[i].bl_num);
}
}
+
+/*
+ * If the getnewbuf() calls bcleanbuf() on the same thread
+ * there is a potential for stack overrun and deadlocks.
+ * So we always handoff the work to worker thread for completion
+ */
+
+static void
+bcleanbuf_thread_init()
+{
+ static void bcleanbuf_thread();
+
+ /* create worker thread */
+ kernel_thread(kernel_task, bcleanbuf_thread);
+}
+
+static void
+bcleanbuf_thread()
+{
+ boolean_t funnel_state;
+ struct buf *bp;
+
+ funnel_state = thread_funnel_set(kernel_flock, TRUE);
+
+doit:
+ while (blaundrycnt == 0)
+ (void)tsleep((void *)&blaundrycnt, PRIBIO, "blaundry", 60 * hz);
+ bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY]);
+ /* Remove from the queue */
+ bremfree(bp);
+ blaundrycnt--;
+ /* do the IO */
+ bawrite(bp);
+ /* start again */
+ goto doit;
+
+ (void) thread_funnel_set(kernel_flock, funnel_state);
+}
+
*
* @APPLE_LICENSE_HEADER_END@
*/
-
#include <sys/param.h>
#include <sys/utfconv.h>
#include <sys/errno.h>
#include <architecture/byte_order.h>
-
/*
- * UTF-8 (UCS Transformation Format)
+ * UTF-8 (Unicode Transformation Format)
*
- * The following subset of UTF-8 is used to encode UCS-2 filenames. It
- * requires a maximum of three 3 bytes per UCS-2 character. Only the
- * shortest encoding required to represent the significant UCS-2 bits
- * is legal.
+ * UTF-8 is the Unicode Transformation Format that serializes a Unicode
+ * character as a sequence of one to four bytes. Only the shortest form
+ * required to represent the significant Unicode bits is legal.
*
* UTF-8 Multibyte Codes
*
- * Bytes Bits UCS-2 Min UCS-2 Max UTF-8 Byte Sequence (binary)
- * -------------------------------------------------------------------
- * 1 7 0x0000 0x007F 0xxxxxxx
- * 2 11 0x0080 0x07FF 110xxxxx 10xxxxxx
- * 3 16 0x0800 0xFFFF 1110xxxx 10xxxxxx 10xxxxxx
- * -------------------------------------------------------------------
+ * Bytes Bits Unicode Min Unicode Max UTF-8 Byte Sequence (binary)
+ * -----------------------------------------------------------------------------
+ * 1 7 0x0000 0x007F 0xxxxxxx
+ * 2 11 0x0080 0x07FF 110xxxxx 10xxxxxx
+ * 3 16 0x0800 0xFFFF 1110xxxx 10xxxxxx 10xxxxxx
+ * 4 21 0x10000 0x10FFFF 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ * -----------------------------------------------------------------------------
*/
-#define UCS_TO_UTF8_LEN(c) ((c) < 0x0080 ? 1 : ((c) < 0x0800 ? 2 : 3))
+#define UNICODE_TO_UTF8_LEN(c) \
+ ((c) < 0x0080 ? 1 : ((c) < 0x0800 ? 2 : (((c) & 0xf800) == 0xd800 ? 2 : 3)))
#define UCS_ALT_NULL 0x2400
+/* Surrogate Pair Constants */
+#define SP_HALF_SHIFT 10
+#define SP_HALF_BASE 0x0010000UL
+#define SP_HALF_MASK 0x3FFUL
+
+#define SP_HIGH_FIRST 0xD800UL
+#define SP_HIGH_LAST 0xDBFFUL
+#define SP_LOW_FIRST 0xDC00UL
+#define SP_LOW_LAST 0xDFFFUL
-static u_int16_t ucs_decompose __P((u_int16_t, u_int16_t *));
+
+static u_int16_t ucs_decompose(u_int16_t, u_int16_t *);
static u_int16_t ucs_combine(u_int16_t base, u_int16_t comb);
+char utf_extrabytes[32] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 2, 2, 3, -1
+};
+
+
/*
- * utf8_encodelen - Calculates the UTF-8 encoding length for a UCS-2 filename
+ * utf8_encodelen - Calculates the UTF-8 encoding length for a Unicode filename
*
* NOTES:
* If '/' chars are allowed on disk then an alternate
* (replacement) char must be provided in altslash.
*
* input flags:
- * UTF_REVERSE_ENDIAN: UCS-2 byteorder is opposite current runtime
+ * UTF_REVERSE_ENDIAN: Unicode byteorder is opposite current runtime
*/
size_t
-utf8_encodelen(ucsp, ucslen, altslash, flags)
- const u_int16_t * ucsp;
- size_t ucslen;
- u_int16_t altslash;
- int flags;
+utf8_encodelen(const u_int16_t * ucsp, size_t ucslen, u_int16_t altslash,
+ int flags)
{
u_int16_t ucs_ch;
int charcnt;
else if (ucs_ch == '\0')
ucs_ch = UCS_ALT_NULL;
- len += UCS_TO_UTF8_LEN(ucs_ch);
+ len += UNICODE_TO_UTF8_LEN(ucs_ch);
}
return (len);
/*
- * utf8_encodestr - Encodes a UCS-2 (Unicode) string to UTF-8
+ * utf8_encodestr - Encodes a Unicode string to UTF-8
*
* NOTES:
* The resulting UTF-8 string is NULL terminated.
* (replacement) char must be provided in altslash.
*
* input flags:
- * UTF_REVERSE_ENDIAN: UCS-2 byteorder is opposite current runtime
+ * UTF_REVERSE_ENDIAN: Unicode byteorder is opposite current runtime
* UTF_NO_NULL_TERM: don't add NULL termination to UTF-8 output
*
* result:
* ENAMETOOLONG: Name didn't fit; only buflen bytes were encoded
* EINVAL: Illegal char found; char was replaced by an '_'.
*/
-int utf8_encodestr(ucsp, ucslen, utf8p, utf8len, buflen, altslash, flags)
- const u_int16_t * ucsp;
- size_t ucslen;
- u_int8_t * utf8p;
- size_t * utf8len;
- size_t buflen;
- u_int16_t altslash;
- int flags;
+int
+utf8_encodestr(const u_int16_t * ucsp, size_t ucslen, u_int8_t * utf8p,
+ size_t * utf8len, size_t buflen, u_int16_t altslash, int flags)
{
u_int8_t * bufstart;
u_int8_t * bufend;
if (utf8p >= bufend) {
result = ENAMETOOLONG;
break;
- }
+ }
*utf8p++ = ucs_ch;
} else if (ucs_ch < 0x800) {
result = ENAMETOOLONG;
break;
}
- *utf8p++ = (ucs_ch >> 6) | 0xc0;
- *utf8p++ = (ucs_ch & 0x3f) | 0x80;
+ *utf8p++ = 0xc0 | (ucs_ch >> 6);
+ *utf8p++ = 0x80 | (0x3f & ucs_ch);
} else {
+ /* Combine valid surrogate pairs */
+ if (ucs_ch >= SP_HIGH_FIRST && ucs_ch <= SP_HIGH_LAST
+ && charcnt > 0) {
+ u_int16_t ch2;
+ u_int32_t pair;
+
+ ch2 = swapbytes ? NXSwapShort(*ucsp) : *ucsp;
+ if (ch2 >= SP_LOW_FIRST && ch2 <= SP_LOW_LAST) {
+ pair = ((ucs_ch - SP_HIGH_FIRST) << SP_HALF_SHIFT)
+ + (ch2 - SP_LOW_FIRST) + SP_HALF_BASE;
+ if ((utf8p + 3) >= bufend) {
+ result = ENAMETOOLONG;
+ break;
+ }
+ --charcnt;
+ ++ucsp;
+ *utf8p++ = 0xf0 | (pair >> 18);
+ *utf8p++ = 0x80 | (0x3f & (pair >> 12));
+ *utf8p++ = 0x80 | (0x3f & (pair >> 6));
+ *utf8p++ = 0x80 | (0x3f & pair);
+ continue;
+ }
+ }
if ((utf8p + 2) >= bufend) {
result = ENAMETOOLONG;
break;
}
- *utf8p++ = (ucs_ch >> 12) | 0xe0;
- *utf8p++ = ((ucs_ch >> 6) & 0x3f) | 0x80;
- *utf8p++ = ((ucs_ch) & 0x3f) | 0x80;
+ *utf8p++ = 0xe0 | (ucs_ch >> 12);
+ *utf8p++ = 0x80 | (0x3f & (ucs_ch >> 6));
+ *utf8p++ = 0x80 | (0x3f & ucs_ch);
}
}
/*
- * utf8_decodestr - Decodes a UTF-8 string back to UCS-2 (Unicode)
+ * utf8_decodestr - Decodes a UTF-8 string back to Unicode
*
* NOTES:
* The input UTF-8 string does not need to be null terminated
* (replacement) char must be provided in altslash.
*
* input flags:
- * UTF_REV_ENDIAN: UCS-2 byteorder is oposite current runtime
- * UTF_DECOMPOSED: UCS-2 output string must be fully decompsed
+ * UTF_REV_ENDIAN: Unicode byteorder is oposite current runtime
+ * UTF_DECOMPOSED: Unicode output string must be fully decompsed
*
* result:
* ENAMETOOLONG: Name didn't fit; only ucslen chars were decoded.
* EINVAL: Illegal UTF-8 sequence found.
*/
int
-utf8_decodestr(utf8p, utf8len, ucsp, ucslen, buflen, altslash, flags)
- const u_int8_t* utf8p;
- size_t utf8len;
- u_int16_t* ucsp;
- size_t *ucslen;
- size_t buflen;
- u_int16_t altslash;
- int flags;
+utf8_decodestr(const u_int8_t* utf8p, size_t utf8len, u_int16_t* ucsp,
+ size_t *ucslen, size_t buflen, u_int16_t altslash, int flags)
{
u_int16_t* bufstart;
u_int16_t* bufend;
bufend = (u_int16_t *)((u_int8_t *)ucsp + buflen);
while (utf8len-- > 0 && (byte = *utf8p++) != '\0') {
- if (ucsp >= bufend) {
- result = ENAMETOOLONG;
- goto stop;
- }
+ if (ucsp >= bufend)
+ goto toolong;
/* check for ascii */
if (byte < 0x80) {
- ucs_ch = byte;
+ ucs_ch = byte; /* 1st byte */
} else {
- switch (byte & 0xf0) {
- /* 2 byte sequence */
- case 0xc0:
- case 0xd0:
- /* extract bits 6 - 10 from first byte */
- ucs_ch = (byte & 0x1F) << 6;
- if (ucs_ch < 0x0080) {
- result = EINVAL; /* seq not minimal */
- goto stop;
- }
- break;
- /* 3 byte sequence */
- case 0xe0:
- /* extract bits 12 - 15 from first byte */
- ucs_ch = (byte & 0x0F) << 6;
-
- /* extract bits 6 - 11 from second byte */
- if (((byte = *utf8p++) & 0xc0) != 0x80) {
- result = EINVAL;
- goto stop;
- }
- utf8len--;
-
- ucs_ch += (byte & 0x3F);
- ucs_ch <<= 6;
+ u_int32_t ch;
+ int extrabytes = utf_extrabytes[byte >> 3];
+
+ if (utf8len < extrabytes)
+ goto invalid;
+ utf8len -= extrabytes;
+
+ switch (extrabytes) {
+ case 1: ch = byte; /* 1st byte */
+ ch <<= 6;
+ ch += *utf8p++; /* 2nd byte */
+ ch -= 0x00003080UL;
+ if (ch < 0x0080)
+ goto invalid;
+ ucs_ch = ch;
+ break;
+
+ case 2: ch = byte; /* 1st byte */
+ ch <<= 6;
+ ch += *utf8p++; /* 2nd byte */
+ ch <<= 6;
+ ch += *utf8p++; /* 3rd byte */
+ ch -= 0x000E2080UL;
+ if (ch < 0x0800)
+ goto invalid;
+ ucs_ch = ch;
+ break;
+
+ case 3: ch = byte; /* 1st byte */
+ ch <<= 6;
+ ch += *utf8p++; /* 2nd byte */
+ ch <<= 6;
+ ch += *utf8p++; /* 3rd byte */
+ ch <<= 6;
+ ch += *utf8p++; /* 4th byte */
+ ch -= 0x03C82080UL + SP_HALF_BASE;
+ ucs_ch = (ch >> SP_HALF_SHIFT) + SP_HIGH_FIRST;
+ *ucsp++ = swapbytes ? NXSwapShort(ucs_ch) : ucs_ch;
+ if (ucsp >= bufend)
+ goto toolong;
+ ucs_ch = (ch & SP_HALF_MASK) + SP_LOW_FIRST;
+ *ucsp++ = swapbytes ? NXSwapShort(ucs_ch) : ucs_ch;
+ continue;
- if (ucs_ch < 0x0800) {
- result = EINVAL; /* sequence not minimal */
- goto stop;
- }
- break;
default:
- result = EINVAL;
- goto stop;
+ goto invalid;
}
-
- /* extract bits 0 - 5 from final byte */
- if (((byte = *utf8p++) & 0xc0) != 0x80) {
- result = EINVAL;
- goto stop;
- }
- utf8len--;
- ucs_ch += (byte & 0x3F);
-
if (decompose) {
u_int16_t comb_ch[2];
if (comb_ch[0]) {
*ucsp++ = swapbytes ? NXSwapShort(ucs_ch) : ucs_ch;
- if (ucsp >= bufend) {
- result = ENAMETOOLONG;
- goto stop;
- }
+ if (ucsp >= bufend)
+ goto toolong;
ucs_ch = comb_ch[0];
if (comb_ch[1]) {
*ucsp++ = swapbytes ? NXSwapShort(ucs_ch) : ucs_ch;
- if (ucsp >= bufend) {
- result = ENAMETOOLONG;
- goto stop;
- }
+ if (ucsp >= bufend)
+ goto toolong;
ucs_ch = comb_ch[1];
}
}
if (ucs_ch == UCS_ALT_NULL)
ucs_ch = '\0';
}
-
if (ucs_ch == altslash)
ucs_ch = '/';
- if (swapbytes)
- ucs_ch = NXSwapShort(ucs_ch);
- *ucsp++ = ucs_ch;
+ *ucsp++ = swapbytes ? NXSwapShort(ucs_ch) : ucs_ch;
}
-stop:
+
+exit:
*ucslen = (u_int8_t*)ucsp - (u_int8_t*)bufstart;
return (result);
+
+invalid:
+ result = EINVAL;
+ goto exit;
+
+toolong:
+ result = ENAMETOOLONG;
+ goto exit;
}
/* CYRILLIC codepoints 0x0400 ~ 0x04FF */
static const unsigned long __CyrillicDecompBitmap[] = {
- 0x40000040, 0x00000040, 0x00004000, 0x00000000, /* 0x0400 */
+ 0x510A0040, 0x00000040, 0x0000510A, 0x00000000, /* 0x0400 */
0x00000000, 0x00000000, 0x00000000, 0x00000000, /* 0x0480 */
};
(table[(unicodeVal) / 32] & (1 << (31 - ((unicodeVal) % 32))))
/*
- * ucs_decompose - decompose a composed UCS-2 char
+ * ucs_decompose - decompose a composed Unicode char
*
* Composed Unicode characters are forbidden on
* HFS Plus volumes. ucs_decompose will convert a
/* Handle CYRILLIC LETTERs */
switch(ch) {
case 0x0401: base = 0x0415; cmb[0] = 0x0308; break; /* */
+ case 0x0403: base = 0x0413; cmb[0] = 0x0301; break; /* */
+ case 0x0407: base = 0x0406; cmb[0] = 0x0308; break; /* */
+ case 0x040C: base = 0x041A; cmb[0] = 0x0301; break; /* */
+ case 0x040E: base = 0x0423; cmb[0] = 0x0306; break; /* */
case 0x0419: base = 0x0418; cmb[0] = 0x0306; break; /* */
case 0x0439: base = 0x0438; cmb[0] = 0x0306; break; /* */
case 0x0451: base = 0x0435; cmb[0] = 0x0308; break; /* */
+ case 0x0453: base = 0x0433; cmb[0] = 0x0301; break; /* */
+ case 0x0457: base = 0x0456; cmb[0] = 0x0308; break; /* */
+ case 0x045C: base = 0x043A; cmb[0] = 0x0301; break; /* */
+ case 0x045E: base = 0x0443; cmb[0] = 0x0306; break; /* */
default:
/* Should not be hit from bit map table */
/*
- * ucs_combine - generate a precomposed UCS-2 char
+ * ucs_combine - generate a precomposed Unicode char
*
* Precomposed Unicode characters are required for some volume
* formats and network protocols. ucs_combine will combine a
switch (base) {
case 0x00DC: return (0x01D7);
case 0x00FC: return (0x01D8);
+ case 0x0413: return (0x0403);
+ case 0x041A: return (0x040C);
+ case 0x0433: return (0x0453);
+ case 0x043A: return (0x045C);
} break;
case 0x0304:
switch (base) {
case 0x0306:
switch (base) {
case 0x0418: return (0x0419);
+ case 0x0423: return (0x040E);
case 0x0438: return (0x0439);
+ case 0x0443: return (0x045E);
} break;
case 0x0308:
switch (base) {
+ case 0x0406: return (0x0407);
case 0x0415: return (0x0401);
case 0x0435: return (0x0451);
+ case 0x0456: return (0x0457);
} break;
case 0x030C:
switch (base) {
*/
const char * gIOKernelKmods =
"{
- 'com.apple.kernel' = '1.4';
- 'com.apple.kernel.bsd' = '1.1';
- 'com.apple.kernel.iokit' = '1.1';
- 'com.apple.kernel.libkern' = '1.1';
- 'com.apple.kernel.mach' = '1.1';
+ 'com.apple.kernel' = '5.1';
+ 'com.apple.kernel.bsd' = '5.1';
+ 'com.apple.kernel.iokit' = '5.1';
+ 'com.apple.kernel.libkern' = '5.1';
+ 'com.apple.kernel.mach' = '5.1';
'com.apple.iokit.IOADBFamily' = '1.1';
'com.apple.iokit.IOSystemManagementFamily' = '1.1';
}";
#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
#define pmap_attribute(pmap,addr,size,attr,value) \
(KERN_INVALID_ADDRESS)
+#define pmap_sync_caches_phys(pa) \
+ (KERN_INVALID_ADDRESS)
+
#endif /* ASSEMBLER */
#endif /* _PMAP_MACHINE_ */
return KERN_NO_SPACE;
}
}
- nits = its + 1;
- nsize = nits->its_size;
if (osize == size) {
is_write_unlock(space);
return KERN_NO_SPACE;
}
+ nits = its + 1;
+ nsize = nits->its_size;
+
assert((osize < size) && (size <= nsize));
/*
assert(ipc_table_entries != ITS_NULL);
ipc_table_fill(ipc_table_entries, ipc_table_entries_size - 1,
- 4, sizeof(struct ipc_entry) +
- sizeof(ipc_entry_bits_t) +
- sizeof(ipc_table_index_t));
+ 16, sizeof(struct ipc_entry));
/* the last two elements should have the same size */
vm_size_t size,
vm_offset_t table);
+#define it_entries_reallocable(its) \
+ ((its)->its_size * sizeof(struct ipc_entry) >= PAGE_SIZE)
+
#define it_entries_alloc(its) \
((ipc_entry_t) \
- ipc_table_alloc(round_page( \
- (its)->its_size * sizeof(struct ipc_entry))))
-
-#define it_entries_reallocable(its) \
- ((its)->its_size * sizeof(struct ipc_entry) \
- >= PAGE_SIZE)
+ ipc_table_alloc(it_entries_reallocable(its) ? \
+ round_page((its)->its_size * sizeof(struct ipc_entry)) : \
+ (its)->its_size * sizeof(struct ipc_entry) \
+ ))
#define it_entries_realloc(its, table, nits) \
((ipc_entry_t) \
))
#define it_entries_free(its, table) \
- ipc_table_free( \
- round_page((its)->its_size * sizeof(struct ipc_entry)), \
+ ipc_table_free(it_entries_reallocable(its) ? \
+ round_page((its)->its_size * sizeof(struct ipc_entry)) : \
+ (its)->its_size * sizeof(struct ipc_entry), \
(vm_offset_t)(table) \
)
ret = kmem_suballoc(kernel_map, /* Suballocate from the kernel map */
&stack,
- (stack_alloc_bndry * (THREAD_MAX + 64)), /* Allocate enough for all of it */
+ (stack_alloc_bndry * (2*THREAD_MAX + 64)), /* Allocate enough for all of it */
FALSE, /* Say not pageable so that it is wired */
TRUE, /* Allocate from anywhere */
&stack_map); /* Allocate a submap */
* @APPLE_LICENSE_HEADER_START@
*
* The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
+ * are subject to`the Apple Public Source License Version 1.1 (the
* "License"). You may not use this file except in compliance with the
* License. Please obtain a copy of the License at
* http://www.apple.com/publicsource and read it before using this file.
/* loop to flush the data cache */
.L_sync_data_loop:
subic r4, r4, CACHE_LINE_SIZE
- dcbst r3, r4
+ dcbf r3, r4
bdnz .L_sync_data_loop
sync
blr
.L_sync_one_line:
- dcbst 0,r3
+ dcbf 0,r3
sync
icbi 0,r3
b .L_sync_cache_done
proc_info = &per_proc_info[cpu];
+ if(proc_info->FPU_thread) fpu_save(proc_info->FPU_thread); /* If anyone owns FPU, save it */
+ proc_info->FPU_thread = 0; /* Set no fpu owner now */
+
+ if(proc_info->VMX_thread) vec_save(proc_info->VMX_thread); /* If anyone owns vectors, save it */
+ proc_info->VMX_thread = 0; /* Set no vector owner now */
+
if (proc_info->cpu_number == 0) {
proc_info->cpu_flags &= BootDone;
proc_info->istackptr = (vm_offset_t)&intstack + (INTSTACK_SIZE*(cpu+1)) - sizeof (struct ppc_saved_state);
proc_info->debstack_top_ss = proc_info->debstackptr;
#endif /* MACH_KDP || MACH_KDB */
proc_info->interrupts_enabled = 0;
- proc_info->FPU_thread = 0;
- if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
+ if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) {
extern void _start_cpu(void);
-
+
resethandler_target.type = RESET_HANDLER_START;
resethandler_target.call_paddr = kvtophys((vm_offset_t)_start_cpu);
resethandler_target.arg__paddr = kvtophys((vm_offset_t)proc_info);
-
+
ml_phys_write((vm_offset_t)&ResetHandler + 0,
- resethandler_target.type);
+ resethandler_target.type);
ml_phys_write((vm_offset_t)&ResetHandler + 4,
- resethandler_target.call_paddr);
+ resethandler_target.call_paddr);
ml_phys_write((vm_offset_t)&ResetHandler + 8,
- resethandler_target.arg__paddr);
+ resethandler_target.arg__paddr);
__asm__ volatile("sync");
__asm__ volatile("isync");
- }
+ }
}
PE_cpu_machine_quiesce(proc_info->cpu_id);
bne- mpwait2 /* (TEST/DEBUG) */
isync /* (TEST/DEBUG) */
- lwz r4,0xD80(br0) /* (TEST/DEBUG) */
+ lwz r4,0xE80(br0) /* (TEST/DEBUG) */
mr. r4,r4 /* (TEST/DEBUG) */
li r4,1 /* (TEST/DEBUG) */
bne- doncheksv /* (TEST/DEBUG) */
lis r8,HIGH_ADDR(EXT(saveanchor)) /* (TEST/DEBUG) */
ori r8,r8,LOW_ADDR(EXT(saveanchor)) /* (TEST/DEBUG) */
- stw r4,0xD80(br0) /* (TEST/DEBUG) */
+ stw r4,0xE80(br0) /* (TEST/DEBUG) */
lwarx r4,0,r8 ; ?
isync /* (TEST/DEBUG) */
-#if 0
rlwinm r4,r13,0,0,19 /* (TEST/DEBUG) */
lwz r21,SACflags(r4) /* (TEST/DEBUG) */
rlwinm r22,r21,24,24,31 /* (TEST/DEBUG) */
cmplwi r22,0x00EE /* (TEST/DEBUG) */
lwz r22,SACvrswap(r4) /* (TEST/DEBUG) */
- bne- currbad /* (TEST/DEBUG) */
+ bnel- currbad /* (TEST/DEBUG) */
andis. r21,r21,hi16(sac_perm) /* (TEST/DEBUG) */
bne- currnotbad /* (TEST/DEBUG) */
mr. r22,r22 /* (TEST/DEBUG) */
stw r26,SACalloc(r23) /* (TEST/DEBUG) */
sync /* (TEST/DEBUG) */
- li r28,0 /* (TEST/DEBUG) */
- stw r28,0x20(br0) /* (TEST/DEBUG) */
- stw r28,0(r8) /* (TEST/DEBUG) */
- BREAKPOINT_TRAP /* (TEST/DEBUG) */
+
+ li r3,0 /* (TEST/DEBUG) */
+ stw r3,0x20(br0) /* (TEST/DEBUG) */
+ stw r3,0(r8) /* (TEST/DEBUG) */
+ lis r0,hi16(Choke) ; (TEST/DEBUG)
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG)
+ sc ; System ABEND
currnotbad:
-#endif
-
lwz r28,SVcount(r8) /* (TEST/DEBUG) */
lwz r21,SVinuse(r8) /* (TEST/DEBUG) */
lwz r23,SVmin(r8) /* (TEST/DEBUG) */
cmpw r22,r23 /* (TEST/DEBUG) */
bge+ cksave0 /* (TEST/DEBUG) */
- li r4,0 /* (TEST/DEBUG) */
- stw r4,0x20(br0) /* (TEST/DEBUG) */
- stw r4,0(r8) /* (TEST/DEBUG) */
- BREAKPOINT_TRAP /* (TEST/DEBUG) */
+ bl currbad ; (TEST/DEBUG)
cksave0: lwz r28,SVfree(r8) /* (TEST/DEBUG) */
li r24,0 /* (TEST/DEBUG) */
rlwinm. r21,r28,0,4,19 /* (TEST/DEBUG) */
bne+ cksave1 /* (TEST/DEBUG) */
- li r4,0 /* (TEST/DEBUG) */
- stw r4,0x20(br0) /* (TEST/DEBUG) */
- stw r4,0(r8) /* (TEST/DEBUG) */
- BREAKPOINT_TRAP /* (TEST/DEBUG) */
+ bl currbad ; (TEST/DEBUG)
cksave1: rlwinm. r21,r28,0,21,3 /* (TEST/DEBUG) */
beq+ cksave2 /* (TEST/DEBUG) */
- li r4,0 /* (TEST/DEBUG) */
- stw r4,0x20(br0) /* (TEST/DEBUG) */
- stw r4,0(r8) /* (TEST/DEBUG) */
- BREAKPOINT_TRAP /* (TEST/DEBUG) */
+ bl currbad ; (TEST/DEBUG)
cksave2: lwz r25,SACalloc(r28) /* (TEST/DEBUG) */
lbz r26,SACflags+2(r28) /* (TEST/DEBUG) */
stb r29,SACflags+3(r28) /* (TEST/DEBUG) */
beq+ cksave2z
- li r4,0 /* (TEST/DEBUG) */
- stw r4,0x20(br0) /* (TEST/DEBUG) */
- stw r4,0(r8) /* (TEST/DEBUG) */
- BREAKPOINT_TRAP /* (TEST/DEBUG) */
+ bl currbad ; (TEST/DEBUG)
cksave2z: mr. r21,r21 /* (TEST/DEBUG) */
beq+ cksave2a /* (TEST/DEBUG) */
- li r4,0 /* (TEST/DEBUG) */
- stw r4,0x20(br0) /* (TEST/DEBUG) */
- stw r4,0(r8) /* (TEST/DEBUG) */
- BREAKPOINT_TRAP /* (TEST/DEBUG) */
+ bl currbad ; (TEST/DEBUG)
cksave2a: rlwinm r26,r25,1,31,31 /* (TEST/DEBUG) */
rlwinm r27,r25,2,31,31 /* (TEST/DEBUG) */
cksave3: cmplw r24,r22 /* (TEST/DEBUG) */
beq+ cksave4 /* (TEST/DEBUG) */
- li r4,0 /* (TEST/DEBUG) */
- stw r4,0x20(br0) /* (TEST/DEBUG) */
- stw r4,0(r8) /* (TEST/DEBUG) */
- BREAKPOINT_TRAP /* (TEST/DEBUG) */
+ bl currbad ; (TEST/DEBUG)
cksave4: lwz r28,SVfree(r8) /* (TEST/DEBUG) */
li r24,0 /* (TEST/DEBUG) */
cksave6:
li r4,0 /* (TEST/DEBUG) */
- stw r4,0xD80(br0) /* (TEST/DEBUG) */
+ stw r4,0xE80(br0) /* (TEST/DEBUG) */
stw r4,0(r8) /* (TEST/DEBUG) */
doncheksv:
;
; Lock gotten, toss the saveareas
;
-fretagain:
+fretagain: isync ; Toss those prefetches
#if TRCSAVE
beq- cr5,trkill1 ; (TEST/DEBUG) Do not trace this type
lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask
#include <ppc/asm.h>
#include <ppc/thread_act.h>
#include <ppc/vmachmon.h>
+#include <ppc/low_trace.h>
#include <sys/kdebug.h>
#endif
if (branch_tracing_enabled())
per_proc_info[cpu_number()].cpu_flags |= traceBE;
+
+ if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old->top_act, (unsigned int)new->top_act); /* Cut trace entry if tracing */
+
return;
}
return ret;
}
+/*
+ * pmap_sync_caches_phys(vm_offset_t pa)
+ *
+ * Invalidates all of the instruction cache on a physical page and
+ * pushes any dirty data from the data cache for the same physical page
+ */
+
+void pmap_sync_caches_phys(vm_offset_t pa) {
+
+ spl_t s;
+
+ s = splhigh(); /* No interruptions here */
+ sync_cache(trunc_page(pa), PAGE_SIZE); /* Sync up dem caches */
+ splx(s); /* Allow interruptions */
+ return;
+}
+
/*
* pmap_collect
*
extern void flush_dcache(vm_offset_t va, unsigned length, boolean_t phys);
extern void invalidate_dcache(vm_offset_t va, unsigned length, boolean_t phys);
extern void invalidate_icache(vm_offset_t va, unsigned length, boolean_t phys);
+extern void pmap_sync_caches_phys(vm_offset_t pa);
extern void invalidate_cache_for_io(vm_offset_t va, unsigned length, boolean_t phys);
extern void pmap_map_block(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t size,
vm_prot_t prot, int attr, unsigned int flags); /* Map a block */
b sdqlcks /* Sniff away... */
-sdqlckd: lwz r3,SVfree(r10) /* Get the free save area list anchor */
+sdqlckd: isync ; Clean out the prefetches
+ lwz r3,SVfree(r10) /* Get the free save area list anchor */
la r5,SVfree(r10) /* Remember that the we're just starting out */
lwz r6,SVcount(r10) /* Get the total count of saveareas for later */
lis r8,sac_empty>>16 /* Get the empty block indication */
; 750CX
-init750CX: bflr firstBoot ; No init for wakeup....
+init750CX:
+ bf firstBoot, init750 ; No init for wakeup....
mfspr r13,hid1 ; Get HID1
li r14,lo16(0xFD5F) ; Get valid
rlwinm r13,r13,4,28,31 ; Isolate
slw r14,r14,r13 ; Position
rlwimi r17,r14,15-pfCanNapb,pfCanNapb,pfCanNapb ; Set it
- b init750com ; Join common...
+ b init750 ; Join common...
; 750
-init750: bflr firstBoot ; No init for wakeup....
+init750:
+ bf firstBoot, init750nb ; No init for wakeup....
-init750com: mfspr r13,l2cr ; Get the L2CR
+ mfspr r13,l2cr ; Get the L2CR
rlwinm. r0,r13,0,l2e,l2e ; Any L2?
bne+ i750hl2 ; Yes...
rlwinm r17,r17,0,pfL2b+1,pfL2b-1 ; No L2, turn off feature
-i750hl2: lis r14,hi16(256*1024) ; Base L2 size
- addis r15,r13,0x3000 ; Hah... Figure this one out...
- rlwinm r15,r15,4,30,31 ; Isolate
- rlwinm. r8,r13,0,l2siz,l2sizf ; Was size valid?
- slw r14,r14,r15 ; Set 256KB, 512KB, or 1MB
- beq- init750none ; Not a valid setting...
+i750hl2:
+ lis r14,hi16(256*1024) ; Base L2 size
+ addis r15,r13,0x3000 ; Hah... Figure this one out...
+ rlwinm r15,r15,4,30,31 ; Isolate
+ rlwinm. r8,r13,0,l2siz,l2sizf ; Was size valid?
+ slw r14,r14,r15 ; Set 256KB, 512KB, or 1MB
+ beq- init750l2none ; Not a valid setting...
- stw r13,pfl2cr(r30) ; Shadow the L2CR
- stw r14,pfl2Size(r30) ; Set the L2 size
- blr ; Return....
+ stw r13,pfl2cr(r30) ; Shadow the L2CR
+ stw r14,pfl2Size(r30) ; Set the L2 size
+ b init750l2done ; Done with L2
-init750none:
- rlwinm r17,r17,0,pfL2b+1,pfL2b-1 ; No level 2 cache
- blr ; Return...
-
+init750l2none:
+ rlwinm r17,r17,0,pfL2b+1,pfL2b-1 ; No level 2 cache
+
+init750l2done:
+ mfspr r11,hid0 ; Get the current HID0
+ stw r11,pfHID0(r30) ; Save the HID0 value
+ blr ; Return...
+
+init750nb:
+ lwz r11,pfHID0(r30) ; Get HID0
+ sync
+ mtspr hid0,r11 ; Set the HID
+ isync
+ sync
+ blr
init7400: bf firstBoot,i7400nb ; Do different if not initial boot...
mfspr r13,l2cr ; Get the L2CR
stw r11,pfMSSCR1(r30) ; Save the MSSCR1 value
blr ; Return...
-i7400nb: lwz r11,pfHID0(r30) ; Get HID0
+i7400nb:
+ lwz r11,pfHID0(r30) ; Get HID0
sync
mtspr hid0,r11 ; Set the HID
+ isync
+ sync
lwz r11,pfMSSCR0(r30) ; Get MSSCR0
isync
sync
* does not have any data.
*/
- if (m->absent || m->error || m->restart)
- return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+ if (m->absent || m->error || m->restart) {
+ if(m->error && should_flush) {
+ /* dump the page, pager wants us to */
+ /* clean it up and there is no */
+ /* relevant data to return */
+ if(m->wire_count == 0) {
+ VM_PAGE_FREE(m);
+ return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+ }
+ } else {
+ return(MEMORY_OBJECT_LOCK_RESULT_DONE);
+ }
+ }
assert(!m->fictitious);
int pre_heat_size;
int age_of_cache;
- if(object->private)
+ if((object->private) || !(object->pager))
return;
if (!object->internal) {
object->pager,
&object_size);
} else {
- object_size = 0xFFFFFFFFFFFFFFFF;
+ object_size = object->size;
}
/*
* determine age of cache in seconds
while ((length < max_length) &&
(object_size >=
- (object->paging_offset + after + PAGE_SIZE_64))) {
+ (after + PAGE_SIZE_64))) {
if(length >= pre_heat_size)
{
int vm_fault_debug = 0;
boolean_t vm_page_deactivate_behind = TRUE;
-vm_machine_attribute_val_t mv_cache_sync = MATTR_VAL_CACHE_SYNC;
#if !VM_FAULT_STATIC_CONFIG
boolean_t vm_fault_dirty_handling = FALSE;
prot &= ~VM_PROT_WRITE;
#endif /* MACH_KDB */
#endif /* STATIC_CONFIG */
- PMAP_ENTER(pmap, vaddr, m, prot, wired);
+ if (m->no_isync == TRUE)
+ pmap_sync_caches_phys(m->phys_addr);
- if (m->no_isync) {
- pmap_attribute(pmap,
- vaddr,
- PAGE_SIZE,
- MATTR_CACHE,
- &mv_cache_sync);
-
- }
+ PMAP_ENTER(pmap, vaddr, m, prot, wired);
{
tws_hash_line_t line;
task_t task;
}
}
}
-
- if (m->clustered) {
- vm_pagein_cluster_used++;
- m->clustered = FALSE;
- }
/*
* Grab the object lock to manipulate
* the page queues. Change wiring
*/
vm_object_lock(object);
vm_page_lock_queues();
+
+ if (m->clustered) {
+ vm_pagein_cluster_used++;
+ m->clustered = FALSE;
+ }
/*
- * we did the isync above... we're clearing
+ * we did the isync above (if needed)... we're clearing
* the flag here to avoid holding a lock
* while calling pmap functions, however
* we need hold the object lock before
* the pageout queues. If the pageout daemon comes
* across the page, it will remove it from the queues.
*/
- if(m != VM_PAGE_NULL) {
- if (m->no_isync) {
- m->no_isync = FALSE;
-
- vm_object_unlock(m->object);
-
- PMAP_ENTER(pmap, vaddr, m, prot, wired);
+ if (m != VM_PAGE_NULL) {
+ if (m->no_isync == TRUE) {
+ pmap_sync_caches_phys(m->phys_addr);
- /*
- * It's critically important that a wired-down page be faulted
- * only once in each map for which it is wired.
- */
- /* Sync I & D caches for new mapping */
- pmap_attribute(pmap,
- vaddr,
- PAGE_SIZE,
- MATTR_CACHE,
- &mv_cache_sync);
- } else {
+ m->no_isync = FALSE;
+ }
vm_object_unlock(m->object);
- PMAP_ENTER(pmap, vaddr, m, prot, wired);
- }
+ PMAP_ENTER(pmap, vaddr, m, prot, wired);
{
tws_hash_line_t line;
task_t task;
* We have to unlock the object because pmap_enter
* may cause other faults.
*/
- if (m->no_isync) {
- m->no_isync = FALSE;
-
- vm_object_unlock(object);
-
- PMAP_ENTER(pmap, va, m, prot, TRUE);
-
- /* Sync I & D caches for new mapping */
- pmap_attribute(pmap,
- va,
- PAGE_SIZE,
- MATTR_CACHE,
- &mv_cache_sync);
-
- } else {
- vm_object_unlock(object);
+ if (m->no_isync == TRUE) {
+ pmap_sync_caches_phys(m->phys_addr);
- PMAP_ENTER(pmap, va, m, prot, TRUE);
+ m->no_isync = FALSE;
}
+ vm_object_unlock(object);
+
+ PMAP_ENTER(pmap, va, m, prot, TRUE);
/*
* Must relock object so that paging_in_progress can be cleared.
#include <vm/vm_init.h>
#define ZONE_MAP_MIN (12 * 1024 * 1024)
-#define ZONE_MAP_MAX (128 * 1024 * 1024)
+#define ZONE_MAP_MAX (256 * 1024 * 1024)
/*
* vm_mem_bootstrap initializes the virtual memory system.
vm_prot_t protection)
{
- vm_machine_attribute_val_t mv_cache_sync = MATTR_VAL_CACHE_SYNC;
-
while (addr < end_addr) {
register vm_page_t m;
printf("map: %x, addr: %x, object: %x, offset: %x\n",
map, addr, object, offset);
}
-
m->busy = TRUE;
+
+ if (m->no_isync == TRUE) {
+ pmap_sync_caches_phys(m->phys_addr);
+
+ m->no_isync = FALSE;
+ }
vm_object_unlock(object);
PMAP_ENTER(map->pmap, addr, m,
protection, FALSE);
- if (m->no_isync) {
- pmap_attribute(map->pmap,
- addr,
- PAGE_SIZE,
- MATTR_CACHE,
- &mv_cache_sync);
- }
vm_object_lock(object);
- m->no_isync = FALSE;
-
PAGE_WAKEUP_DONE(m);
vm_page_lock_queues();
if (!m->active && !m->inactive)
m->active = FALSE;
m->laundry = FALSE;
m->free = FALSE;
+ m->no_isync = TRUE;
m->reference = FALSE;
m->pageout = FALSE;
m->dump_cleaning = FALSE;
m = (vm_page_t)zget(vm_page_zone);
if (m) {
- m->free = FALSE;
vm_page_init(m, vm_page_fictitious_addr);
m->fictitious = TRUE;
}
m->phys_addr = real_m->phys_addr;
m->fictitious = FALSE;
+ m->no_isync = TRUE;
vm_page_lock_queues();
- m->no_isync = TRUE;
- real_m->no_isync = FALSE;
if (m->active)
vm_page_active_count++;
else if (m->inactive)
assert(m->free);
assert(!m->wanted);
m->free = FALSE;
+ m->no_isync = TRUE;
m->gobbled = TRUE;
}
vm_page_free_count -= npages;