/* get total blocks (both forks) */
attrp->ca_blocks = crp->dataFork.totalBlocks + crp->resourceFork.totalBlocks;
attrp->ca_attrblks = crp->attrBlocks;
+ /* On HFS+ the ThreadExists flag must always be set. */
+ if ((hfsmp->hfs_flags & HFS_STANDARD) == 0)
+ attrp->ca_recflags |= kHFSThreadExistsMask;
}
attrp->ca_fileid = crp->fileID;
*
*/
struct directoryhint {
- SLIST_ENTRY(directoryhint) dh_link; /* chain */
+ TAILQ_ENTRY(directoryhint) dh_link; /* chain */
int dh_index; /* index into directory (zero relative) */
u_int32_t dh_time;
struct cat_desc dh_desc; /* entry's descriptor */
};
typedef struct directoryhint directoryhint_t;
+/*
+ * HFS_MAXDIRHINTS cannot be larger than 63 without reducing
+ * HFS_INDEX_BITS, because given the 6-bit tag, at most 63 different
+ * tags can exist. When HFS_MAXDIRHINTS is larger than 63, the same
+ * list may contain dirhints of the same tag, and a staled dirhint may
+ * be returned.
+ */
#define HFS_MAXDIRHINTS 32
#define HFS_DIRHINT_TTL 45
SET(ncp->c_hflag, H_ALLOC);
ncp->c_fileid = inum;
ncp->c_dev = dev;
+ TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */
lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr);
if (!skiplock)
lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr);
/* Make sure its still valid (ie exists on disk). */
- if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid)) {
+ if (!hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) {
hfs_chash_abort(cp);
hfs_reclaim_cnode(cp);
*vpp = NULL;
u_long c_childhint; /* catalog hint for children */
struct cat_desc c_desc; /* cnode's descriptor */
struct cat_attr c_attr; /* cnode's attributes */
- SLIST_HEAD(hfs_hinthead, directoryhint) c_hintlist; /* directory hint list */
+ TAILQ_HEAD(hfs_hinthead, directoryhint) c_hintlist; /* directory hint list */
int16_t c_dirhinttag; /* directory hint tag */
union {
int16_t cu_dirhintcnt; /* directory hint count */
hfs_unlock(VTOC(vp));
if (vnode_vtype(vp) == VDIR) {
- myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, &my_context);
- if (myErr) {
- // try again with just read-access
- myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, &my_context);
- }
+ myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), &my_context);
} else {
myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, &my_context);
}
myNodeID = cp->c_parentcnid; /* move up the hierarchy */
hfs_unlock(VTOC(vp));
- myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH), &my_context);
- //myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), &my_context);
+ if (vp->v_type == VDIR) {
+ myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), &my_context);
+ } else {
+ myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH), &my_context);
+ }
vnode_put(vp);
vp = NULL;
if ( myErr ) {
hfs_getdirhint(struct cnode *dcp, int index)
{
struct timeval tv;
- directoryhint_t *hint, *next, *oldest;
+ directoryhint_t *hint;
+ boolean_t need_remove, need_init;
char * name;
- oldest = NULL;
microuptime(&tv);
- /* Look for an existing hint first */
- for(hint = dcp->c_hintlist.slh_first; hint != NULL; hint = next) {
- next = hint->dh_link.sle_next;
- if (hint->dh_index == index) {
- goto out;
- } else if (oldest == NULL || (hint->dh_time < oldest->dh_time)) {
- oldest = hint;
- }
+ /*
+ * Look for an existing hint first. If not found, create a new one (when
+ * the list is not full) or recycle the oldest hint. Since new hints are
+ * always added to the head of the list, the last hint is always the
+ * oldest.
+ */
+ TAILQ_FOREACH(hint, &dcp->c_hintlist, dh_link) {
+ if (hint->dh_index == index)
+ break;
}
- /* Recycle one if we have too many already. */
- if ((dcp->c_dirhintcnt >= HFS_MAXDIRHINTS) && (oldest != NULL)) {
- hint = oldest;
- if ((name = hint->dh_desc.cd_nameptr)) {
- hint->dh_desc.cd_nameptr = NULL;
- vfs_removename(name);
+ if (hint != NULL) { /* found an existing hint */
+ need_init = false;
+ need_remove = true;
+ } else { /* cannot find an existing hint */
+ need_init = true;
+ if (dcp->c_dirhintcnt < HFS_MAXDIRHINTS) { /* we don't need recycling */
+ /* Create a default directory hint */
+ MALLOC_ZONE(hint, directoryhint_t *, sizeof(directoryhint_t), M_HFSDIRHINT, M_WAITOK);
+ ++dcp->c_dirhintcnt;
+ need_remove = false;
+ } else { /* recycle the last (i.e., the oldest) hint */
+ hint = TAILQ_LAST(&dcp->c_hintlist, hfs_hinthead);
+ if ((name = hint->dh_desc.cd_nameptr)) {
+ hint->dh_desc.cd_nameptr = NULL;
+ vfs_removename(name);
+ }
+ need_remove = true;
}
- goto init;
- }
-
- /* Create a default directory hint */
- MALLOC_ZONE(hint, directoryhint_t *, sizeof(directoryhint_t), M_HFSDIRHINT, M_WAITOK);
- SLIST_INSERT_HEAD(&dcp->c_hintlist, hint, dh_link);
- ++dcp->c_dirhintcnt;
-init:
- hint->dh_index = index;
- hint->dh_desc.cd_flags = 0;
- hint->dh_desc.cd_encoding = 0;
- hint->dh_desc.cd_namelen = 0;
- hint->dh_desc.cd_nameptr = NULL;
- hint->dh_desc.cd_parentcnid = dcp->c_cnid;
- hint->dh_desc.cd_hint = dcp->c_childhint;
- hint->dh_desc.cd_cnid = 0;
-out:
+ }
+
+ if (need_remove)
+ TAILQ_REMOVE(&dcp->c_hintlist, hint, dh_link);
+
+ TAILQ_INSERT_HEAD(&dcp->c_hintlist, hint, dh_link);
+
+ if (need_init) {
+ hint->dh_index = index;
+ hint->dh_desc.cd_flags = 0;
+ hint->dh_desc.cd_encoding = 0;
+ hint->dh_desc.cd_namelen = 0;
+ hint->dh_desc.cd_nameptr = NULL;
+ hint->dh_desc.cd_parentcnid = dcp->c_cnid;
+ hint->dh_desc.cd_hint = dcp->c_childhint;
+ hint->dh_desc.cd_cnid = 0;
+ }
hint->dh_time = tv.tv_sec;
return (hint);
}
void
hfs_reldirhint(struct cnode *dcp, directoryhint_t * relhint)
{
- directoryhint_t *hint;
char * name;
- SLIST_FOREACH(hint, &dcp->c_hintlist, dh_link) {
- if (hint == relhint) {
- SLIST_REMOVE(&dcp->c_hintlist, hint, directoryhint, dh_link);
- name = hint->dh_desc.cd_nameptr;
- if (name != NULL) {
- hint->dh_desc.cd_nameptr = NULL;
- vfs_removename(name);
- }
- FREE_ZONE(hint, sizeof(directoryhint_t), M_HFSDIRHINT);
- --dcp->c_dirhintcnt;
- break;
- }
+ TAILQ_REMOVE(&dcp->c_hintlist, relhint, dh_link);
+ name = relhint->dh_desc.cd_nameptr;
+ if (name != NULL) {
+ relhint->dh_desc.cd_nameptr = NULL;
+ vfs_removename(name);
}
+ FREE_ZONE(relhint, sizeof(directoryhint_t), M_HFSDIRHINT);
+ --dcp->c_dirhintcnt;
}
/*
hfs_reldirhints(struct cnode *dcp, int stale_hints_only)
{
struct timeval tv;
- directoryhint_t *hint, *next;
+ directoryhint_t *hint, *prev;
char * name;
if (stale_hints_only)
microuptime(&tv);
- else
- tv.tv_sec = 0;
-
- for (hint = dcp->c_hintlist.slh_first; hint != NULL; hint = next) {
- next = hint->dh_link.sle_next;
- if (stale_hints_only) {
- /* Skip over newer entries. */
- if ((tv.tv_sec - hint->dh_time) < HFS_DIRHINT_TTL)
- continue;
- SLIST_REMOVE(&dcp->c_hintlist, hint, directoryhint, dh_link);
- }
+
+ /* searching from the oldest to the newest, so we can stop early when releasing stale hints only */
+ for (hint = TAILQ_LAST(&dcp->c_hintlist, hfs_hinthead); hint != NULL; hint = prev) {
+ if (stale_hints_only && (tv.tv_sec - hint->dh_time) < HFS_DIRHINT_TTL)
+ break; /* stop here if this entry is too new */
name = hint->dh_desc.cd_nameptr;
if (name != NULL) {
hint->dh_desc.cd_nameptr = NULL;
vfs_removename(name);
}
+ prev = TAILQ_PREV(hint, hfs_hinthead, dh_link); /* must save this pointer before calling FREE_ZONE on this node */
+ TAILQ_REMOVE(&dcp->c_hintlist, hint, dh_link);
FREE_ZONE(hint, sizeof(directoryhint_t), M_HFSDIRHINT);
--dcp->c_dirhintcnt;
}
- if (!stale_hints_only)
- dcp->c_hintlist.slh_first = NULL;
}
if ( localhint.dh_desc.cd_parentcnid == cp->c_cnid) {
localhint.dh_index = index - 1;
localhint.dh_time = 0;
- localhint.dh_link.sle_next = 0;
+ bzero(&localhint.dh_link, sizeof(localhint.dh_link));
dirhint = &localhint; /* don't forget to release the descriptor */
} else {
cat_releasedesc(&localhint.dh_desc);
}
attr.ca_atime = attr.ca_ctime = attr.ca_itime = attr.ca_mtime;
attr.ca_atimeondisk = attr.ca_atime;
+ /* On HFS+ the ThreadExists flag must always be set for files. */
+ if (vnodetype != VDIR && (hfsmp->hfs_flags & HFS_STANDARD) == 0)
+ attr.ca_recflags = kHFSThreadExistsMask;
attr.ca_uid = vap->va_uid;
attr.ca_gid = vap->va_gid;
#include <sys/pipe.h>
#include <kern/kern_types.h>
#include <kern/kalloc.h>
+#include <libkern/OSAtomic.h>
struct psemnode;
struct pshmnode;
error = EBADF;
goto outdrop;
}
- p->p_flag |= P_ADVLOCK;
+ OSBitOrAtomic(P_LADVLOCK, &p->p_ladvflag);
error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context);
(void)vnode_put(vp);
goto outdrop;
error = EBADF;
goto outdrop;
}
- p->p_flag |= P_ADVLOCK;
+ OSBitOrAtomic(P_LADVLOCK, &p->p_ladvflag);
error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context);
(void)vnode_put(vp);
goto outdrop;
* If the descriptor was in a message, POSIX-style locks
* aren't passed with the descriptor.
*/
- if (p && (p->p_flag & P_ADVLOCK) && fg->fg_type == DTYPE_VNODE) {
+ if (p && (p->p_ladvflag & P_LADVLOCK) && fg->fg_type == DTYPE_VNODE) {
proc_fdunlock(p);
lf.l_whence = SEEK_SET;
leavepgrp(child);
LIST_REMOVE(child, p_list); /* off zombproc */
LIST_REMOVE(child, p_sibling);
- child->p_flag &= ~P_WAITING;
+ child->p_lflag &= ~P_LWAITING;
+ wakeup(&child->p_stat);
lck_mtx_destroy(&child->p_mlock, proc_lck_grp);
lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp);
/* XXX This is racy because we don't get the lock!!!! */
- if (p->p_flag & P_WAITING) {
+ if (p->p_lflag & P_LWAITING) {
(void)tsleep(&p->p_stat, PWAIT, "waitcoll", 0);
goto loop;
}
- p->p_flag |= P_WAITING; /* only allow single thread to wait() */
+ p->p_lflag |= P_LWAITING; /* only allow single thread to wait() */
if (p->p_stat == SZOMB) {
retval[0] = p->p_pid;
uap->status,
sizeof(status));
if (error) {
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
return (error);
}
}
/* information unavailable? */
if (error) {
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
return (error);
}
}
/* Clean up */
- if (!reap_child_process(q, p))
- p->p_flag &= ~P_WAITING;
-
- /* Wake other wait'ers, if any */
- wakeup(&p->p_stat);
+ if (!reap_child_process(q, p)) {
+ p->p_lflag &= ~P_LWAITING;
+ wakeup(&p->p_stat);
+ }
return (0);
}
sizeof(status));
} else
error = 0;
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
return (error);
}
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
}
if (nfound == 0)
* Wait collision; go to sleep and restart; used to maintain
* the single return for waited process guarantee.
*/
- if (p->p_flag & P_WAITING) {
+ if (p->p_lflag & P_LWAITING) {
(void)tsleep(&p->p_stat, PWAIT, "waitidcoll", 0);
goto loop;
}
- p->p_flag |= P_WAITING; /* mark busy */
+ p->p_lflag |= P_LWAITING; /* mark busy */
nfound++;
}
/* information unavailable? */
if (error) {
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
return (error);
}
/* Prevent other process for waiting for this event? */
if (!(uap->options & WNOWAIT)) {
/* Clean up */
- if (!reap_child_process(q, p))
- p->p_flag &= ~P_WAITING;
-
- /* Wake other wait'ers, if any */
- wakeup(&p->p_stat);
+ if (!reap_child_process(q, p)) {
+ p->p_lflag &= ~P_LWAITING;
+ wakeup(&p->p_stat);
+ }
}
return (0);
}
/* information unavailable? */
if (error) {
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
return (error);
}
p->p_flag |= P_WAITED;
}
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
return (0);
}
/* information unavailable? */
if (error) {
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
return (error);
}
p->p_flag &= ~P_CONTINUED;
}
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
return (0);
/* Not a process we are interested in; go on to next child */
- p->p_flag &= ~P_WAITING;
+ p->p_lflag &= ~P_LWAITING;
wakeup(&p->p_stat);
}
p2->p_vforkcnt = 0;
p2->p_vforkact = 0;
p2->p_lflag = 0;
+ p2->p_ladvflag = 0;
TAILQ_INIT(&p2->p_uthlist);
TAILQ_INIT(&p2->aio_activeq);
TAILQ_INIT(&p2->aio_doneq);
int filtered = 0;
int error = 0;
- for (filter = so->so_filt; filter;
+ for (filter = so->so_filt; filter && (error == 0);
filter = filter->sfe_next_onsocket) {
if (filter->sfe_filter->sf_filter.sf_data_in) {
if (filtered == 0) {
int dlil_expand_mcl;
+extern u_int32_t inject_buckets;
+
static const u_int32_t dlil_writer_waiting = 0x80000000;
static __inline__ void*
}
proto_input_run();
-
+
if (dlil_input_mbuf_head == NULL &&
- dlil_input_loop_head == NULL) {
+ dlil_input_loop_head == NULL && inject_buckets == 0) {
assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT);
(void) thread_block(dlil_input_thread_continue);
/* NOTREACHED */
static struct proto_input_entry *proto_hash[PROTO_HASH_SLOTS];
static struct proto_input_entry *proto_input_add_list;
static lck_mtx_t *proto_input_lock = 0;
-static u_int32_t inject_buckets = 0;
+__private_extern__ u_int32_t inject_buckets = 0;
extern thread_t dlil_input_thread_ptr;
extern int dlil_input_thread_wakeup;
}
}
- if (args.fhsize > NFSX_V3FHMAX)
+ if (args.fhsize < 0 || args.fhsize > NFSX_V3FHMAX)
return (EINVAL);
error = copyin(args.fh, (caddr_t)nfh, args.fhsize);
if (error)
unsigned int p_fdlock_pc[4];
unsigned int p_fdunlock_pc[4];
int p_fpdrainwait;
- int p_lflag; /* local flags */
+ unsigned int p_lflag; /* local flags */
+ unsigned int p_ladvflag; /* local adv flags*/
#if DIAGNOSTIC
#if SIGNAL_DEBUG
unsigned int lockpc[8];
};
+/* local flags */
#define P_LDELAYTERM 0x1 /* */
#define P_LNOZOMB 0x2 /* */
#define P_LLOW_PRI_IO 0x4
#define P_LPEXIT 0x8
#define P_LBACKGROUND_IO 0x10
+#define P_LWAITING 0x20
+
+/* advisory flags in the proc */
+#define P_LADVLOCK 0x01
// LP64todo - should this move?
/* LP64 version of extern_proc. all pointers
-8.0.0
+8.1.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
#include <libkern/c++/OSContainers.h>
#include <iokit/IOLib.h>
-char *testBuffer = "
-{ string = \"this is a 'string' with spaces\";
- string2 = 'this is also a \"string\" with spaces';
- offset = 16384:32;
- true = .true.;
- false = .false.;
- data = <0123 4567 89abcdef>;
- array = (1:8, 2:16, 3:32, 4:64 );
- set = [ one, two, three, four ];
- emptydict = { }@1;
- emptyarray = ( )@2;
- emptyset = [ ]@3;
- emptydata = < >@4;
- emptydict2 = @1;
- emptyarray2 = @2;
- emptyset2 = @3;
- emptydata2 = @4;
- dict2 = { string = asdfasdf; };
- dict3 = { string = asdfasdf; };
-}@0";
-
-kern_return_t
-test1_start(struct kmod_info *ki, void *data)
-{
- IOLog("test buffer start:\n%s\n:test buffer end.\n", testBuffer);
-
- // test unserialize
- OSString *errmsg;
- OSObject *d = OSUnserialize(testBuffer, &errmsg);
- if (!d) {
- IOLog("%s\n", errmsg->getCStringNoCopy());
- return KMOD_RETURN_SUCCESS;
- }
-
- // test serialize
- OSSerialize *s = OSSerialize::withCapacity(5);
- if (!d->serialize(s)) {
- IOLog("serialization failed\n");
- return KMOD_RETURN_SUCCESS;
- }
-
- IOLog("serialized object's length = %d, capacity = %d\n", s->getLength(), s->getCapacity());
- IOLog("object unformatted = %s\n", s->text());
-
- // try second time
- OSObject *d2 = OSUnserializeXML(s->text(), &errmsg);
- if (!d2) {
- IOLog("%s\n", errmsg->getCStringNoCopy());
- return KMOD_RETURN_SUCCESS;
- }
-
- IOLog("\nserialized objects compared %ssuccessfully objectwise\n\n",
- d->isEqualTo(d2) ? "":"un");
-
- if (d2) d2->release();
- s->release();
- if (d) d->release();
-
- return KMOD_RETURN_SUCCESS;
-}
-
-kern_return_t
-test1_stop(struct kmod_info *ki, void *data)
-{
- return KMOD_RETURN_SUCCESS;
-}
trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;
#ifdef ppc
- if(trcWork.traceMask) dbgTrace((unsigned int)kmsg->ikm_header->msgh_id,
+ if(trcWork.traceMask) dbgTrace(0x1100, (unsigned int)kmsg->ikm_header->msgh_id,
(unsigned int)kmsg->ikm_header->msgh_remote_port,
(unsigned int)kmsg->ikm_header->msgh_local_port, 0);
#endif
void fwSCOM(scomcomm *); /* Read/Write SCOM */
void setPmon(unsigned int, unsigned int); /* Set perf mon stuff */
-extern void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4);
+extern void dbgTrace(unsigned int id, unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4);
#if 0 /* (TEST/DEBUG) - eliminate inline */
-extern __inline__ void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4) {
+extern __inline__ void dbgTrace(unsigned int id, unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4) {
+ __asm__ volatile("mr r2,%0" : : "r" (id) : "r2");
__asm__ volatile("mr r3,%0" : : "r" (item1) : "r3");
__asm__ volatile("mr r4,%0" : : "r" (item2) : "r4");
__asm__ volatile("mr r5,%0" : : "r" (item3) : "r5");
/*
* This is the glue to call the CutTrace firmware call
+ * dbgTrace(id, p1, p2, p3, p4)
*/
.align 5
LEXT(dbgTrace)
+ mr r2,r3
+ mr r3,r4
lis r0,HIGH_ADDR(CutTrace) /* Top half of CreateFakeIO firmware call number */
+ mr r4,r5
+ mr r5,r6
ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half */
+ mr r6,r7
sc /* Do it to it */
blr /* Bye bye, Birdie... */
#include <ppc/savearea.h>
#define FPVECDBG 0
-#define GDDBG 0
.text
* Note that interrupts must be disabled before we get here (i.e., splsched)
*/
-/* Context switches are double jumps. We pass the following to the
+/*
+ * Switch_context(old, continuation, new)
+ *
+ * Context switches are double jumps. We pass the following to the
* context switch firmware call:
*
* R3 = switchee's savearea, virtual if continuation, low order physical for full switch
lwz r11,SAVprev+4(r8) ; Get the previous of the switchee savearea
ori r0,r0,lo16(CutTrace) ; Trace FW call
beq++ cswNoTrc ; No trace today, dude...
- mr r10,r3 ; Save across trace
- mr r2,r3 ; Trace old activation
- mr r3,r11 ; Trace prev savearea
+
+ li r2,0x4400 ; Trace ID
+ mr r6,r11 ; Trace prev savearea
sc ; Cut trace entry of context switch
- mr r3,r10 ; Restore
cswNoTrc: lwz r2,curctx(r5) ; Grab our current context pointer
lwz r10,FPUowner(r12) ; Grab the owner of the FPU
bne++ cswnofloat ; Level not the same, this is not live...
cmplw r5,r0 ; Still owned by this cpu?
- lwz r10,FPUsave(r2) ; Get the level
+ lwz r10,FPUsave(r2) ; Get the pointer to next saved context
bne++ cswnofloat ; CPU claimed by someone else...
mr. r10,r10 ; Is there a savearea here?
rlwinm r11,r8,0,0,19 ; Switch to savearea base
lis r9,hi16(EXT(switch_in)) ; Get top of switch in routine
lwz r5,savesrr0+4(r8) ; Set up the new SRR0
+;
+; Note that the low-level code requires the R7 contain the high order half of the savearea's
+; physical address. This is hack city, but it is the way it is.
+;
lwz r7,SACvrswap(r11) ; Get the high order V to R translation
lwz r11,SACvrswap+4(r11) ; Get the low order V to R translation
ori r9,r9,lo16(EXT(switch_in)) ; Bottom half of switch in
#endif
mflr r2 ; Save the return address
-fsretry: mr. r12,r12 ; Anyone own the FPU?
+ cmplw r3,r12 ; Is the specified context live?
lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number
- beq-- fsret ; Nobody owns the FPU, no save required...
-
- cmplw cr1,r3,r12 ; Is the specified context live?
-
- isync ; Force owner check first
-
- lwz r9,FPUcpu(r12) ; Get the cpu that context was last on
- bne-- cr1,fsret ; No, it is not...
+ lwz r9,FPUcpu(r3) ; Get the cpu that context was last on
+ bne-- fsret ; Nobody owns the FPU, no save required...
- cmplw cr1,r9,r11 ; Was the context for this processor?
- beq-- cr1,fsgoodcpu ; Facility last used on this processor...
+ cmplw r9,r11 ; Was the context for this processor?
+ la r5,FPUsync(r3) ; Point to the sync word
+ bne-- fsret ; Facility not last used on this processor...
- b fsret ; Someone else claimed it...
+;
+; It looks like we need to save this one.
+;
+; First, make sure that the live context block is not mucked with while
+; we are trying to save it on out. Then we will give it the final check.
+;
+
+ lis r9,ha16(EXT(LockTimeOut)) ; Get the high part
+ mftb r8 ; Get the time now
+ lwz r9,lo16(EXT(LockTimeOut))(r9) ; Get the timeout value
+ b fssync0a ; Jump to the lock...
.align 5
-fsgoodcpu: lwz r3,FPUsave(r12) ; Get the current FPU savearea for the thread
+fssync0: li r7,lgKillResv ; Get killing field
+ stwcx. r7,0,r7 ; Kill reservation
+
+fssync0a: lwz r7,0(r5) ; Sniff the lock
+ mftb r10 ; Is it time yet?
+ cmplwi cr1,r7,0 ; Is it locked?
+ sub r10,r10,r8 ; How long have we been spinning?
+ cmplw r10,r9 ; Has it been too long?
+ bgt-- fstimeout ; Way too long, panic...
+ bne-- cr1,fssync0a ; Yea, still locked so sniff harder...
+
+fssync1: lwarx r7,0,r5 ; Get the sync word
+ li r12,1 ; Get the lock
+ mr. r7,r7 ; Is it unlocked?
+ bne-- fssync0
+ stwcx. r12,0,r5 ; Store lock and test reservation
+ bne-- fssync1 ; Try again if lost reservation...
+
+ isync ; Toss speculation
+
+ lwz r12,FPUowner(r6) ; Get the context ID for owner
+ cmplw r3,r12 ; Check again if we own the FPU?
+ bne-- fsretlk ; Go unlock and return since we no longer own context
+
+ lwz r5,FPUcpu(r12) ; Get the cpu that context was last on
+ lwz r7,FPUsave(r12) ; Get the current FPU savearea for the thread
+ cmplw r5,r11 ; Is this for the same processor?
lwz r9,FPUlevel(r12) ; Get our current level indicator
+ bne-- fsretlk ; Not the same processor, skip any save...
- cmplwi cr1,r3,0 ; Have we ever saved this facility context?
- beq- cr1,fsneedone ; Never saved it, so go do it...
+ cmplwi r7,0 ; Have we ever saved this facility context?
+ beq-- fsneedone ; Never saved it, so go do it...
- lwz r8,SAVlevel(r3) ; Get the level this savearea is for
- cmplw cr1,r9,r8 ; Correct level?
- beq-- cr1,fsret ; The current level is already saved, bail out...
+ lwz r8,SAVlevel(r7) ; Get the level of this savearea
+ cmplw r9,r8 ; Correct level?
+ beq-- fsretlk ; The current level is already saved, bail out...
fsneedone: bl EXT(save_get) ; Get a savearea for the context
li r4,SAVfloat ; Get floating point tag
lwz r12,FPUowner(r6) ; Get back our thread
stb r4,SAVflags+2(r3) ; Mark this savearea as a float
- mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it...
- beq-- fsbackout ; If disowned, just toss savearea...
lwz r4,facAct(r12) ; Get the activation associated with live context
lwz r8,FPUsave(r12) ; Get the current top floating point savearea
stw r4,SAVact(r3) ; Indicate the right activation for this context
bl fp_store ; save all 32 FPRs in the save area at r3
mtlr r2 ; Restore return
-
+
+fsretlk: li r7,0 ; Get the unlock value
+ eieio ; Make sure that these updates make it out
+ stw r7,FPUsync(r12) ; Unlock it
+
fsret: mtmsr r0 ; Put interrupts on if they were and floating point off
isync
blr
-fsbackout: mr r4,r0 ; restore the original MSR
- b EXT(save_ret_wMSR) ; Toss savearea and return from there...
+fstimeout: mr r4,r5 ; Set the lock address
+ mr r5,r7 ; Set the lock word data
+ lis r3,hi16(fstimeout_str) ; Get the failed lck message
+ ori r3,r3,lo16(fstimeout_str) ; Get the failed lck message
+ bl EXT(panic)
+ BREAKPOINT_TRAP ; We die here anyway
+
+ .data
+fstimeout_str:
+ STRINGD "fpu_save: timeout on sync lock (0x%08X), value = 0x%08X\n\000"
+ .text
+
/*
* fpu_switch()
lhz r16,PP_CPU_NUMBER(r26) ; Get the current CPU number
-fswretry: mr. r22,r22 ; See if there is any live FP status
-
- beq- fsnosave ; No live context, so nothing to save...
+ mr. r22,r22 ; See if there is any live FP status
+ la r15,FPUsync(r22) ; Point to the sync word
- isync ; Make sure we see this in the right order
+ beq-- fsnosave ; No live context, so nothing to save...
- lwz r30,FPUsave(r22) ; Get the top savearea
- cmplw cr2,r22,r29 ; Are both old and new the same context?
lwz r18,FPUcpu(r22) ; Get the last CPU we ran on
- cmplwi cr1,r30,0 ; Anything saved yet?
+ cmplw cr2,r22,r29 ; Are both old and new the same context?
+ lwz r30,FPUsave(r22) ; Get the top savearea
cmplw r18,r16 ; Make sure we are on the right processor
lwz r31,FPUlevel(r22) ; Get the context level
+ cmplwi cr1,r30,0 ; Anything saved yet?
- bne- fsnosave ; No, not on the same processor...
+ bne-- fsnosave ; No, not on the same processor...
;
; Check to see if the live context has already been saved.
cmplw r31,r27 ; See if the current and active levels are the same
crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same
- li r3,0 ; Clear this
- beq- fsthesame ; New and old are the same, just go enable...
+ beq-- fsthesame ; New and old are the same, just go enable...
+
+
+;
+; Note it turns out that on a G5, the following load has about a 50-50 chance of
+; taking a segment exception in a system that is doing heavy file I/O. We
+; make a dummy access right now in order to get that resolved before we take the lock.
+; We do not use the data returned because it may change over the lock
+;
+
+ beq-- cr1,fswsync ; Nothing saved, skip the probe attempt...
+ lwz r11,SAVlevel(r30) ; Touch the context in order to fault in the segment
+
+;
+; Make sure that the live context block is not mucked with while
+; we are trying to save it on out
+;
+
+fswsync: lis r11,ha16(EXT(LockTimeOut)) ; Get the high part
+ mftb r3 ; Get the time now
+ lwz r11,lo16(EXT(LockTimeOut))(r11) ; Get the timeout value
+ b fswsync0a ; Jump to the lock...
+
+ .align 5
+
+fswsync0: li r19,lgKillResv ; Get killing field
+ stwcx. r19,0,r19 ; Kill reservation
+
+fswsync0a: lwz r19,0(r15) ; Sniff the lock
+ mftb r18 ; Is it time yet?
+ cmplwi cr1,r19,0 ; Is it locked?
+ sub r18,r18,r3 ; How long have we been spinning?
+ cmplw r18,r11 ; Has it been too long?
+ bgt-- fswtimeout ; Way too long, panic...
+ bne-- cr1,fswsync0a ; Yea, still locked so sniff harder...
+
+fswsync1: lwarx r19,0,r15 ; Get the sync word
+ li r0,1 ; Get the lock
+ mr. r19,r19 ; Is it unlocked?
+ bne-- fswsync0
+ stwcx. r0,0,r15 ; Store lock and test reservation
+ bne-- fswsync1 ; Try again if lost reservation...
+
+ isync ; Toss speculation
- beq- cr1,fsmstsave ; Not saved yet, go do it...
+;
+; Note that now that we have the lock, we need to check if anything changed.
+; Also note that the possible changes are limited. The context owner can
+; never change to a different thread or level although it can be invalidated.
+; A new context can not be pushed on top of us, but it can be popped. The
+; cpu indicator will always change if another processor mucked with any
+; contexts.
+;
+; It should be very rare that any of the context stuff changes across the lock.
+;
+
+ lwz r0,FPUowner(r26) ; Get the thread that owns the FPU again
+ lwz r11,FPUsave(r22) ; Get the top savearea again
+ lwz r18,FPUcpu(r22) ; Get the last CPU we ran on again
+ sub r0,r0,r22 ; Non-zero if we lost ownership, 0 if not
+ xor r11,r11,r30 ; Non-zero if saved context changed, 0 if not
+ xor r18,r18,r16 ; Non-zero if cpu changed, 0 if not
+ cmplwi cr1,r30,0 ; Is anything saved?
+ or r0,r0,r11 ; Zero only if both owner and context are unchanged
+ or. r0,r0,r18 ; Zero only if nothing has changed
+ li r3,0 ; Clear this
+ bne-- fsnosavelk ; Something has changed, so this is not ours to save...
+ beq-- cr1,fsmstsave ; There is no context saved yet...
+
lwz r11,SAVlevel(r30) ; Get the level of top saved context
cmplw r31,r11 ; Are live and saved the same?
#if FPVECDBG
lis r0,hi16(CutTrace) ; (TEST/DEBUG)
li r2,0x7F02 ; (TEST/DEBUG)
- mr r3,r30 ; (TEST/DEBUG)
+ mr r3,r11 ; (TEST/DEBUG)
mr r5,r31 ; (TEST/DEBUG)
oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
sc ; (TEST/DEBUG)
li r3,0 ; (TEST/DEBUG)
#endif
- beq+ fsnosave ; Same level, so already saved...
-
+ beq++ fsnosavelk ; Same level, so already saved...
fsmstsave: stw r3,FPUowner(r26) ; Kill the context now
eieio ; Make sure everyone sees it
bl EXT(save_get) ; Go get a savearea
-
- mr. r31,r31 ; Are we saving the user state?
- la r15,FPUsync(r22) ; Point to the sync word
- beq++ fswusave ; Yeah, no need for lock...
-;
-; Here we make sure that the live context is not tossed while we are
-; trying to push it. This can happen only for kernel context and
-; then only by a race with act_machine_sv_free.
-;
-; We only need to hold this for a very short time, so no sniffing needed.
-; If we find any change to the level, we just abandon.
-;
-fswsync: lwarx r19,0,r15 ; Get the sync word
- li r0,1 ; Get the lock
- cmplwi cr1,r19,0 ; Is it unlocked?
- stwcx. r0,0,r15 ; Store lock and test reservation
- crand cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked
- bne-- fswsync ; Try again if lost reservation or locked...
-
- isync ; Toss speculation
- lwz r0,FPUlevel(r22) ; Pick up the level again
- li r7,0 ; Get unlock value
- cmplw r0,r31 ; Same level?
- beq++ fswusave ; Yeah, we expect it to be...
-
- stw r7,FPUsync(r22) ; Unlock lock. No need to sync here
-
- bl EXT(save_ret) ; Toss save area because we are abandoning save
- b fsnosave ; Skip the save...
-
- .align 5
-
-fswusave: lwz r12,facAct(r22) ; Get the activation associated with the context
- stw r3,FPUsave(r22) ; Set this as the latest context savearea for the thread
- mr. r31,r31 ; Check again if we were user level
+ lwz r12,facAct(r22) ; Get the activation associated with the context
stw r30,SAVprev+4(r3) ; Point us to the old context
stw r31,SAVlevel(r3) ; Tag our level
li r7,SAVfloat ; Get the floating point ID
stw r12,SAVact(r3) ; Make sure we point to the right guy
stb r7,SAVflags+2(r3) ; Set that we have a floating point save area
-
- li r7,0 ; Get the unlock value
-
- beq-- fswnulock ; Skip unlock if user (we did not lock it)...
- eieio ; Make sure that these updates make it out
- stw r7,FPUsync(r22) ; Unlock it.
+ stw r3,FPUsave(r22) ; Set this as the latest context savearea for the thread
-fswnulock:
-
#if FPVECDBG
lis r0,hi16(CutTrace) ; (TEST/DEBUG)
li r2,0x7F03 ; (TEST/DEBUG)
bl fp_store ; store all 32 FPRs
+fsnosavelk: li r7,0 ; Get the unlock value
+ eieio ; Make sure that these updates make it out
+ stw r7,FPUsync(r22) ; Unlock it.
+
;
; The context is all saved now and the facility is free.
;
-; If we do not we need to fill the registers with junk, because this level has
+; Check if we need to fill the registers with junk, because this level has
; never used them before and some thieving bastard could hack the old values
; of some thread! Just imagine what would happen if they could! Why, nothing
; would be safe! My God! It is terrifying!
;
+; Make sure that the live context block is not mucked with while
+; we are trying to load it up
+;
+
+fsnosave: la r15,FPUsync(r29) ; Point to the sync word
+ lis r11,ha16(EXT(LockTimeOut)) ; Get the high part
+ mftb r3 ; Get the time now
+ lwz r11,lo16(EXT(LockTimeOut))(r11) ; Get the timeout value
+ b fsnsync0a ; Jump to the lock...
+
+ .align 5
+
+fsnsync0: li r19,lgKillResv ; Get killing field
+ stwcx. r19,0,r19 ; Kill reservation
+
+fsnsync0a: lwz r19,0(r15) ; Sniff the lock
+ mftb r18 ; Is it time yet?
+ cmplwi cr1,r19,0 ; Is it locked?
+ sub r18,r18,r3 ; How long have we been spinning?
+ cmplw r18,r11 ; Has it been too long?
+ bgt-- fsntimeout ; Way too long, panic...
+ bne-- cr1,fsnsync0a ; Yea, still locked so sniff harder...
+fsnsync1: lwarx r19,0,r15 ; Get the sync word
+ li r0,1 ; Get the lock
+ mr. r19,r19 ; Is it unlocked?
+ bne-- fsnsync0 ; Unfortunately, it is locked...
+ stwcx. r0,0,r15 ; Store lock and test reservation
+ bne-- fsnsync1 ; Try again if lost reservation...
+
+ isync ; Toss speculation
-fsnosave: lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one
+ lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one
lwz r19,FPUcpu(r29) ; Get the last CPU we ran on
lwz r14,FPUsave(r29) ; Point to the top of the "new" context stack
dcbt 0,r11 ; Touch line in
- lwz r3,SAVprev+4(r14) ; Get the previous context
lwz r0,SAVlevel(r14) ; Get the level of first facility savearea
+ lwz r3,SAVprev+4(r14) ; Get the previous context
cmplw r0,r15 ; Top level correct to load?
+ li r7,0 ; Get the unlock value
bne-- MakeSureThatNoTerroristsCanHurtUsByGod ; No, go initialize...
stw r3,FPUsave(r29) ; Pop the context (we will toss the savearea later)
sc ; (TEST/DEBUG)
#endif
+ eieio ; Make sure that these updates make it out
+ stw r7,FPUsync(r29) ; Unlock context now that the context save has been removed
+
// Note this code is used both by 32- and 128-byte processors. This means six extra DCBTs
// are executed on a 128-byte machine, but that is better than a mispredicted branch.
sc ; (TEST/DEBUG)
#endif
lis r5,hi16(EXT(FloatInit)) ; Get top secret floating point init value address
+ li r7,0 ; Get the unlock value
ori r5,r5,lo16(EXT(FloatInit)) ; Slam bottom
+ eieio ; Make sure that these updates make it out
+ stw r7,FPUsync(r29) ; Unlock it now that the context has been removed
+
lfd f0,0(r5) ; Initialize FP0
fmr f1,f0 ; Do them all
fmr f2,f0
;
; We get here when we are switching to the same context at the same level and the context
-; is still live. Essentially, all we are doing is turning on the faility. It may have
+; is still live. Essentially, all we are doing is turning on the facility. It may have
; gotten turned off due to doing a context save for the current level or a context switch
; back to the live guy.
;
.align 5
+
+fsthesamel: li r7,0 ; Get the unlock value
+ eieio ; Make sure that these updates make it out
+ stw r7,FPUsync(r22) ; Unlock it.
+
fsthesame:
#if FPVECDBG
cmplw r11,r31 ; Are live and saved the same?
- bne+ fsenable ; Level not the same, nothing to pop, go enable and exit...
+ bne++ fsenable ; Level not the same, nothing to pop, go enable and exit...
mr r3,r30 ; Get the old savearea (we popped it before)
stw r14,FPUsave(r22) ; Pop the savearea from the stack
bl EXT(save_ret) ; Toss it
b fsenable ; Go enable and exit...
+;
+; Note that we need to choke in this code rather than panic because there is no
+; stack.
+;
+
+fswtimeout: lis r0,hi16(Choke) ; Choke code
+ ori r0,r0,lo16(Choke) ; and the rest
+ li r3,failTimeout ; Timeout code
+ sc ; System ABEND
+
+fsntimeout: lis r0,hi16(Choke) ; Choke code
+ ori r0,r0,lo16(Choke) ; and the rest
+ li r3,failTimeout ; Timeout code
+ sc ; System ABEND
+
+vswtimeout0:
+ lis r0,hi16(Choke) ; Choke code
+ ori r0,r0,lo16(Choke) ; and the rest
+ li r3,failTimeout ; Timeout code
+ sc ; System ABEND
+
+vswtimeout1:
+ lis r0,hi16(Choke) ; Choke code
+ ori r0,r0,lo16(Choke) ; and the rest
+ li r3,failTimeout ; Timeout code
+ sc ; System ABEND
;
; This function invalidates any live floating point context for the passed in facility_context.
lwz r12,VMXowner(r6) ; Get the context ID for owner
#if FPVECDBG
+ mr r11,r6 ; (TEST/DEBUG)
mr r7,r0 ; (TEST/DEBUG)
li r4,0 ; (TEST/DEBUG)
mr r10,r3 ; (TEST/DEBUG)
mr. r3,r12 ; (TEST/DEBUG)
li r2,0x5F00 ; (TEST/DEBUG)
li r5,0 ; (TEST/DEBUG)
- beq- noowneryeu ; (TEST/DEBUG)
+ lwz r6,liveVRS(r6) ; (TEST/DEBUG)
+ beq-- noowneryeu ; (TEST/DEBUG)
lwz r4,VMXlevel(r12) ; (TEST/DEBUG)
lwz r5,VMXsave(r12) ; (TEST/DEBUG)
sc ; (TEST/DEBUG)
mr r0,r7 ; (TEST/DEBUG)
mr r3,r10 ; (TEST/DEBUG)
+ mr r6,r11 ; (TEST/DEBUG)
#endif
mflr r2 ; Save the return address
-vsretry: mr. r12,r12 ; Anyone own the vector?
+ cmplw r3,r12 ; Is the specified context live?
lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number
- beq- vsret ; Nobody owns the vector, no save required...
-
- cmplw cr1,r3,r12 ; Is the specified context live?
-
- isync ; Force owner check first
-
+ bne-- vsret ; We do not own the vector, no save required...
lwz r9,VMXcpu(r12) ; Get the cpu that context was last on
- bne- cr1,vsret ; Specified context is not live
- cmplw cr1,r9,r11 ; Was the context for this processor?
- beq+ cr1,vsgoodcpu ; Facility last used on this processor...
+ cmplw r9,r11 ; Was the context for this processor?
+ la r5,VMXsync(r3) ; Point to the sync word
+ bne-- vsret ; Specified context is not live
- b vsret ; Someone else claimed this...
+;
+; It looks like we need to save this one. Or possibly toss a saved one if
+; the VRSAVE is 0.
+;
+; First, make sure that the live context block is not mucked with while
+; we are trying to save it on out. Then we will give it the final check.
+;
+
+ lis r9,ha16(EXT(LockTimeOut)) ; Get the high part
+ mftb r8 ; Get the time now
+ lwz r9,lo16(EXT(LockTimeOut))(r9) ; Get the timeout value
+ b vssync0a ; Jump to the lock...
.align 5
-vsgoodcpu: lwz r3,VMXsave(r12) ; Get the current vector savearea for the thread
+vssync0: li r7,lgKillResv ; Get killing field
+ stwcx. r7,0,r7 ; Kill reservation
+
+vssync0a: lwz r7,0(r5) ; Sniff the lock
+ mftb r10 ; Is it time yet?
+ cmplwi cr1,r7,0 ; Is it locked?
+ sub r10,r10,r8 ; How long have we been spinning?
+ cmplw r10,r9 ; Has it been too long?
+ bgt-- vswtimeout0 ; Way too long, panic...
+ bne-- cr1,vssync0a ; Yea, still locked so sniff harder...
+
+vssync1: lwarx r7,0,r5 ; Get the sync word
+ li r12,1 ; Get the lock
+ mr. r7,r7 ; Is it unlocked?
+ bne-- vssync0 ; No, it is unlocked...
+ stwcx. r12,0,r5 ; Store lock and test reservation
+ bne-- vssync1 ; Try again if lost reservation...
+
+ isync ; Toss speculation
+
+ lwz r12,VMXowner(r6) ; Get the context ID for owner
+ cmplw r3,r12 ; Check again if we own VMX?
lwz r10,liveVRS(r6) ; Get the right VRSave register
- lwz r9,VMXlevel(r12) ; Get our current level indicator
+ bne-- vsretlk ; Go unlock and return since we no longer own context
+ lwz r5,VMXcpu(r12) ; Get the cpu that context was last on
+ lwz r7,VMXsave(r12) ; Get the current vector savearea for the thread
+ cmplwi cr1,r10,0 ; Is VRsave set to 0?
+ cmplw r5,r11 ; Is this for the same processor?
+ lwz r9,VMXlevel(r12) ; Get our current level indicator
+ bne-- vsretlk ; Not the same processor, skip any save...
- cmplwi cr1,r3,0 ; Have we ever saved this facility context?
- beq- cr1,vsneedone ; Never saved it, so we need an area...
+ cmplwi r7,0 ; Have we ever saved this facility context?
+ beq-- vsneedone ; Never saved it, so we need an area...
- lwz r8,SAVlevel(r3) ; Get the level this savearea is for
- mr. r10,r10 ; Is VRsave set to 0?
- cmplw cr1,r9,r8 ; Correct level?
- bne- cr1,vsneedone ; Different level, so we need to save...
+ lwz r8,SAVlevel(r7) ; Get the level this savearea is for
+ cmplw r9,r8 ; Correct level?
+ bne-- vsneedone ; Different level, so we need to save...
- bne+ vsret ; VRsave is non-zero so we need to keep what is saved...
+ bne++ cr1,vsretlk ; VRsave is non-zero so we need to keep what is saved...
- lwz r4,SAVprev+4(r3) ; Pick up the previous area
- lwz r5,SAVlevel(r4) ; Get the level associated with save
+ lwz r4,SAVprev+4(r7) ; Pick up the previous area
+ li r5,0 ; Assume we just dumped the last
+ mr. r4,r4 ; Is there one?
stw r4,VMXsave(r12) ; Dequeue this savearea
- li r4,0 ; Clear
- stw r5,VMXlevel(r12) ; Save the level
-
- stw r4,VMXowner(r12) ; Show no live context here
- eieio
+ beq-- vsnomore ; We do not have another...
+
+ lwz r5,SAVlevel(r4) ; Get the level associated with save
+
+vsnomore: stw r5,VMXlevel(r12) ; Save the level
+ li r7,0 ; Clear
+ stw r7,VMXowner(r6) ; Show no live context here
vsbackout: mr r4,r0 ; restore the saved MSR
+ eieio
+ stw r7,VMXsync(r12) ; Unlock the context
+
b EXT(save_ret_wMSR) ; Toss the savearea and return from there...
.align 5
-vsneedone: mr. r10,r10 ; Is VRsave set to 0?
- beq- vsret ; Yeah, they do not care about any of them...
+vsneedone: beq-- cr1,vsclrlive ; VRSave is zero, go blow away the context...
bl EXT(save_get) ; Get a savearea for the context
lwz r12,VMXowner(r6) ; Get back our context ID
stb r4,SAVflags+2(r3) ; Mark this savearea as a vector
mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it...
- beq- vsbackout ; If disowned, just toss savearea...
+ li r7,0 ; Clear
+ beq-- vsbackout ; If disowned, just toss savearea...
lwz r4,facAct(r12) ; Get the activation associated with live context
lwz r8,VMXsave(r12) ; Get the current top vector savearea
stw r4,SAVact(r3) ; Indicate the right activation for this context
bl vr_store ; store live VRs into savearea as required (uses r4-r11)
+ mfsprg r6,1 ; Get the current activation
mtcrf 255,r12 ; Restore the non-volatile CRs
- mtlr r2 ; restore return address
+ lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block
+ mtlr r2 ; Restore return address
+ lwz r12,VMXowner(r6) ; Get back our context ID
+
+vsretlk: li r7,0 ; Get the unlock value
+ eieio ; Make sure that these updates make it out
+ stw r7,VMXsync(r12) ; Unlock it
vsret: mtmsr r0 ; Put interrupts on if they were and vector off
isync
blr
+vsclrlive: li r7,0 ; Clear
+ stw r7,VMXowner(r6) ; Show no live context here
+ b vsretlk ; Go unlock and leave...
+
/*
* vec_switch()
*
li r2,0x5F01 ; (TEST/DEBUG)
mr r3,r22 ; (TEST/DEBUG)
mr r5,r29 ; (TEST/DEBUG)
+ lwz r6,liveVRS(r26) ; (TEST/DEBUG)
oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
sc ; (TEST/DEBUG)
#endif
lhz r16,PP_CPU_NUMBER(r26) ; Get the current CPU number
-vsvretry: mr. r22,r22 ; See if there is any live vector status
-
- beq- vsnosave ; No live context, so nothing to save...
+ mr. r22,r22 ; See if there is any live vector status
+ la r15,VMXsync(r22) ; Point to the sync word
- isync ; Make sure we see this in the right order
+ beq-- vswnosave ; No live context, so nothing to save...
- lwz r30,VMXsave(r22) ; Get the top savearea
- cmplw cr2,r22,r29 ; Are both old and new the same context?
lwz r18,VMXcpu(r22) ; Get the last CPU we ran on
+ cmplw cr2,r22,r29 ; Are both old and new the same context?
+ lwz r30,VMXsave(r22) ; Get the top savearea
cmplwi cr1,r30,0 ; Anything saved yet?
- cmplw r18,r16 ; Make sure we are on the right processor
lwz r31,VMXlevel(r22) ; Get the context level
+ cmplw r18,r16 ; Make sure we are on the right processor
lwz r10,liveVRS(r26) ; Get the right VRSave register
- bne- vsnosave ; No, not on the same processor...
+ bne-- vswnosave ; No, not on the same processor...
;
; Check to see if the live context has already been saved.
;
cmplw r31,r27 ; See if the current and active levels are the same
- crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same
- li r8,0 ; Clear this
+ crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same
+
+ beq-- vswthesame ; New and old are the same, just go enable...
+
+;
+; Make sure that the live context block is not mucked with while
+; we are trying to save it on out
+;
+
+ lis r11,ha16(EXT(LockTimeOut)) ; Get the high part
+ mftb r3 ; Get the time now
+ lwz r11,lo16(EXT(LockTimeOut))(r11) ; Get the timeout value
+ b vswsync0a ; Jump to the lock...
+
+ .align 5
- beq- vsthesame ; New and old are the same, just go enable...
+vswsync0: li r19,lgKillResv ; Get killing field
+ stwcx. r19,0,r19 ; Kill reservation
+
+vswsync0a: lwz r19,0(r15) ; Sniff the lock
+ mftb r18 ; Is it time yet?
+ cmplwi cr1,r19,0 ; Is it locked?
+ sub r18,r18,r3 ; How long have we been spinning?
+ cmplw r18,r11 ; Has it been too long?
+ bgt-- vswtimeout0 ; Way too long, panic...
+ bne-- cr1,vswsync0a ; Yea, still locked so sniff harder...
+
+vswsync1: lwarx r19,0,r15 ; Get the sync word
+ li r0,1 ; Get the lock
+ mr. r19,r19 ; Is it unlocked?
+ bne-- vswsync0
+ stwcx. r0,0,r15 ; Store lock and test reservation
+ bne-- vswsync1 ; Try again if lost reservation...
+
+ isync ; Toss speculation
+
+;
+; Note that now that we have the lock, we need to check if anything changed.
+; Also note that the possible changes are limited. The context owner can
+; never change to a different thread or level although it can be invalidated.
+; A new context can not be pushed on top of us, but it can be popped. The
+; cpu indicator will always change if another processor mucked with any
+; contexts.
+;
+; It should be very rare that any of the context stuff changes across the lock.
+;
+ lwz r0,VMXowner(r26) ; Get the thread that owns the vectors again
+ lwz r11,VMXsave(r22) ; Get the top savearea again
+ lwz r18,VMXcpu(r22) ; Get the last CPU we ran on again
+ sub r0,r0,r22 ; Non-zero if we lost ownership, 0 if not
+ xor r11,r11,r30 ; Non-zero if saved context changed, 0 if not
+ xor r18,r18,r16 ; Non-zero if cpu changed, 0 if not
+ cmplwi cr1,r30,0 ; Is anything saved?
+ or r0,r0,r11 ; Zero only if both owner and context are unchanged
+ or. r0,r0,r18 ; Zero only if nothing has changed
cmplwi cr2,r10,0 ; Check VRSave to see if we really need to save anything...
- beq- cr1,vsmstsave ; Not saved yet, go do it...
+ li r8,0 ; Clear
+
+ bne-- vswnosavelk ; Something has changed, so this is not ours to save...
+ beq-- cr1,vswmstsave ; There is no context saved yet...
lwz r11,SAVlevel(r30) ; Get the level of top saved context
sc ; (TEST/DEBUG)
#endif
- bne- vsmstsave ; Live context has not been saved yet...
-
- bne- cr2,vsnosave ; Live context saved and VRSave not 0, no save and keep context...
+ beq++ vswnosavelk ; Same level, already saved...
+ bne-- cr2,vswnosavelk ; Live context saved and VRSave not 0, no save and keep context...
lwz r4,SAVprev+4(r30) ; Pick up the previous area
li r5,0 ; Assume this is the only one (which should be the ususal case)
mr. r4,r4 ; Was this the only one?
stw r4,VMXsave(r22) ; Dequeue this savearea
- beq+ vsonlyone ; This was the only one...
+ beq++ vswonlyone ; This was the only one...
lwz r5,SAVlevel(r4) ; Get the level associated with previous save
-vsonlyone: stw r5,VMXlevel(r22) ; Save the level
+vswonlyone: stw r5,VMXlevel(r22) ; Save the level
stw r8,VMXowner(r26) ; Clear owner
- eieio
+
mr r3,r30 ; Copy the savearea we are tossing
bl EXT(save_ret) ; Toss the savearea
- b vsnosave ; Go load up the context...
+ b vswnosavelk ; Go load up the context...
.align 5
-
-vsmstsave: stw r8,VMXowner(r26) ; Clear owner
- eieio
- beq- cr2,vsnosave ; The VRSave was 0, so there is nothing to save...
+vswmstsave: stw r8,VMXowner(r26) ; Clear owner
+ beq-- cr2,vswnosavelk ; The VRSave was 0, so there is nothing to save...
bl EXT(save_get) ; Go get a savearea
- mr. r31,r31 ; Are we saving the user state?
- la r15,VMXsync(r22) ; Point to the sync word
- beq++ vswusave ; Yeah, no need for lock...
-;
-; Here we make sure that the live context is not tossed while we are
-; trying to push it. This can happen only for kernel context and
-; then only by a race with act_machine_sv_free.
-;
-; We only need to hold this for a very short time, so no sniffing needed.
-; If we find any change to the level, we just abandon.
-;
-vswsync: lwarx r19,0,r15 ; Get the sync word
- li r0,1 ; Get the lock
- cmplwi cr1,r19,0 ; Is it unlocked?
- stwcx. r0,0,r15 ; Store lock and test reservation
- crand cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked
- bne-- vswsync ; Try again if lost reservation or locked...
-
- isync ; Toss speculation
-
- lwz r0,VMXlevel(r22) ; Pick up the level again
- li r7,0 ; Get unlock value
- cmplw r0,r31 ; Same level?
- beq++ vswusave ; Yeah, we expect it to be...
-
- stw r7,VMXsync(r22) ; Unlock lock. No need to sync here
-
- bl EXT(save_ret) ; Toss save area because we are abandoning save
- b vsnosave ; Skip the save...
-
- .align 5
-
-vswusave: lwz r12,facAct(r22) ; Get the activation associated with the context
+ lwz r12,facAct(r22) ; Get the activation associated with the context
stw r3,VMXsave(r22) ; Set this as the latest context savearea for the thread
- mr. r31,r31 ; Check again if we were user level
stw r30,SAVprev+4(r3) ; Point us to the old context
stw r31,SAVlevel(r3) ; Tag our level
li r7,SAVvector ; Get the vector ID
stw r12,SAVact(r3) ; Make sure we point to the right guy
stb r7,SAVflags+2(r3) ; Set that we have a vector save area
- li r7,0 ; Get the unlock value
-
- beq-- vswnulock ; Skip unlock if user (we did not lock it)...
- eieio ; Make sure that these updates make it out
- stw r7,VMXsync(r22) ; Unlock it.
-
-vswnulock:
-
#if FPVECDBG
lis r0,hi16(CutTrace) ; (TEST/DEBUG)
li r2,0x5F03 ; (TEST/DEBUG)
lwz r10,liveVRS(r26) ; Get the right VRSave register
bl vr_store ; store VRs into savearea according to vrsave (uses r4-r11)
-
;
; The context is all saved now and the facility is free.
;
-; If we do not we need to fill the registers with junk, because this level has
+; Check if we need to fill the registers with junk, because this level has
; never used them before and some thieving bastard could hack the old values
; of some thread! Just imagine what would happen if they could! Why, nothing
; would be safe! My God! It is terrifying!
; Also, along the way, thanks to Ian Ollmann, we generate the 0x7FFFDEAD (QNaNbarbarian)
; constant that we may need to fill unused vector registers.
;
+; Make sure that the live context block is not mucked with while
+; we are trying to load it up
+;
+vswnosavelk:
+ li r7,0 ; Get the unlock value
+ eieio ; Make sure that these updates make it out
+ stw r7,VMXsync(r22) ; Unlock the old context
+
+vswnosave: la r15,VMXsync(r29) ; Point to the sync word
+ lis r11,ha16(EXT(LockTimeOut)) ; Get the high part
+ mftb r3 ; Get the time now
+ lwz r11,lo16(EXT(LockTimeOut))(r11) ; Get the timeout value
+ b vswnsync0a ; Jump to the lock...
+
+ .align 5
+
+vswnsync0: li r19,lgKillResv ; Get killing field
+ stwcx. r19,0,r19 ; Kill reservation
+vswnsync0a: lwz r19,0(r15) ; Sniff the lock
+ mftb r18 ; Is it time yet?
+ cmplwi cr1,r19,0 ; Is it locked?
+ sub r18,r18,r3 ; How long have we been spinning?
+ cmplw r18,r11 ; Has it been too long?
+ bgt-- vswtimeout1 ; Way too long, panic...
+ bne-- cr1,vswnsync0a ; Yea, still locked so sniff harder...
+vswnsync1: lwarx r19,0,r15 ; Get the sync word
+ li r0,1 ; Get the lock
+ mr. r19,r19 ; Is it unlocked?
+ bne-- vswnsync0 ; Unfortunately, it is locked...
+ stwcx. r0,0,r15 ; Store lock and test reservation
+ bne-- vswnsync1 ; Try again if lost reservation...
+
+ isync ; Toss speculation
-vsnosave: vspltisb v31,-10 ; Get 0xF6F6F6F6
+ vspltisb v31,-10 ; Get 0xF6F6F6F6
lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one
vspltisb v30,5 ; Get 0x05050505
lwz r19,VMXcpu(r29) ; Get the last CPU we ran on
lwz r19,ppe_vaddr(r19) ; Point to the owner per_proc
vrlb v31,v31,v29 ; Get 0xDEADDEAD
-vsinvothr: lwarx r18,r16,r19 ; Get the owner
+vswinvothr: lwarx r18,r16,r19 ; Get the owner
sub r0,r18,r29 ; Subtract one from the other
sub r11,r29,r18 ; Subtract the other from the one
srawi r11,r11,31 ; Get a 0 if equal or -1 of not
and r18,r18,r11 ; Make 0 if same, unchanged if not
stwcx. r18,r16,r19 ; Try to invalidate it
- bne-- vsinvothr ; Try again if there was a collision...
+ bne-- vswinvothr ; Try again if there was a collision...
cmplwi cr1,r14,0 ; Do we possibly have some context to load?
vmrghh v31,v30,v31 ; Get 0x7FFFDEAD. V31 keeps this value until the bitter end
bl vr_load ; load VRs from save area based on vrsave in r10
bl EXT(save_ret) ; Toss the save area after loading VRs
+
+vrenablelk: li r7,0 ; Get the unlock value
+ eieio ; Make sure that these updates make it out
+ stw r7,VMXsync(r29) ; Unlock the new context
vrenable: lwz r8,savesrr1+4(r25) ; Get the msr of the interrupted guy
oris r8,r8,hi16(MASK(MSR_VEC)) ; Enable the vector facility
vor v28,v31,v31 ; Copy into the next register
vor v29,v31,v31 ; Copy into the next register
vor v30,v31,v31 ; Copy into the next register
- b vrenable ; Finish setting it all up...
+ b vrenablelk ; Finish setting it all up...
.align 5
-vsthesame:
+vswthesame:
#if FPVECDBG
lis r0,hi16(CutTrace) ; (TEST/DEBUG)
#define failSkipLists 7
#define failUnalignedStk 8
#define failPmap 9
+#define failTimeout 10
/* Always must be last - update failNames table in model_dep.c as well */
-#define failUnknown 10
+#define failUnknown 11
#ifndef ASSEMBLER
#define VERIFYSAVE 0
#define FPVECDBG 0
+#define FPFLOOD 0
#define INSTRUMENT 0
/*
.L_call_trap:
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
+
bl EXT(trap)
lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
mr r4,r13 ; current activation
addi r7,r7,1 ; Bump it
stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it
+
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
+
bl EXT(unix_syscall) ; Check out unix...
.L_call_server_syscall_exception:
lwz r7,TASK_SYSCALLS_MACH(r10) ; Get the current count
neg r31,r0 ; Make this positive
- mr r3,r31 ; save it
- slwi r27,r3,4 ; multiply by 16
- slwi r3,r3,2 ; and the original by 4
+ mr r3,r31 ; save it
+ slwi r27,r3,4 ; multiply by 16
+ slwi r3,r3,2 ; and the original by 4
ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
- add r27,r27,r3 ; for a total of 20x (5 words/entry)
+ add r27,r27,r3 ; for a total of 20x (5 words/entry)
addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count
cmplwi r8,0 ; Is kdebug_enable non-zero
stw r7,TASK_SYSCALLS_MACH(r10) ; Save count
.L_kernel_syscall_munge:
cmplwi r0,0 ; test for null munger
- mtctr r0 ; Set the function call address
+ mtctr r0 ; Set the function call address
addi r3,r30,saver3 ; Pointer to args from save area
addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
beq-- .L_kernel_syscall_trapcall ; null munger - skip to trap call
lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
mtctr r0 ; Set the function call address
addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
+
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
+
bctrl
mr r4,r30
lwz r5,savedsisr(r30) ; Get the DSISR
lwz r6,savedar+4(r30) ; Get the DAR
-
+
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
+
bl EXT(interrupt)
lwz r24,FPUsave(r26) ; (TEST/DEBUG) Get the first savearea
mr. r23,r23 ; (TEST/DEBUG) Should be level 0
beq++ fpulvl0 ; (TEST/DEBUG) Yes...
- BREAKPOINT_TRAP ; (TEST/DEBUG)
+
+ lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
+ sc ; (TEST/DEBUG) System ABEND
fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context?
beq fpunusrstt ; (TEST/DEBUG) No...
lwz r21,SAVprev+4(r24) ; (TEST/DEBUG) Get previous pointer
mr. r23,r23 ; (TEST/DEBUG) Is this our user context?
beq++ fpulvl0b ; (TEST/DEBUG) Yes...
- BREAKPOINT_TRAP ; (TEST/DEBUG)
+
+ lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
+ sc ; (TEST/DEBUG) System ABEND
fpulvl0b: mr. r21,r21 ; (TEST/DEBUG) Is there a forward chain?
beq++ fpunusrstt ; (TEST/DEBUG) Nope...
- BREAKPOINT_TRAP ; (TEST/DEBUG)
+
+ lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
+ sc ; (TEST/DEBUG) System ABEND
fpunusrstt: ; (TEST/DEBUG)
#endif
beq++ fpuena ; Nope...
lwz r25,SAVlevel(r24) ; Get the level of savearea
lwz r0,SAVprev+4(r24) ; Get the previous
+
cmplw r30,r25 ; Is savearea for the level we are launching?
bne++ fpuena ; No, just go enable...
.align 5
hsg64Miss: bl mapPhysUnlock ; Unlock physent chain
- mtmsr r11 ; Restore 'rupts, translation
+ mtmsrd r11 ; Restore 'rupts, translation
li r3,mapRtEmpty ; No mappings found matching specified criteria
b hrmRetnCmn ; Exit through common epilog
; System Calls (sc instruction)
;
-; The syscall number is in r0. All we do here is munge the number into a
-; 7-bit index into the "scTable", and dispatch on it to handle the Ultra
+; The syscall number is in r0. All we do here is munge the number into an
+; 8-bit index into the "scTable", and dispatch on it to handle the Ultra
; Fast Traps (UFTs.) The index is:
;
+; 0x80 - set if syscall number is 0x80000000 (CutTrace)
; 0x40 - set if syscall number is 0x00006004
; 0x20 - set if upper 29 bits of syscall number are 0xFFFFFFF8
; 0x10 - set if upper 29 bits of syscall number are 0x00007FF0
.L_handlerC00:
mtsprg 3,r11 ; Save R11
mtsprg 2,r13 ; Save R13
- rlwinm r11,r0,0,0xFFFFFFF8 ; mask off low 3 bits of syscall number
- xori r13,r11,0x7FF0 ; start to check for the 0x7FFx traps
- addi r11,r11,8 ; make a 0 iff this is a 0xFFFFFFF8 trap
- cntlzw r13,r13 ; set bit 0x20 iff a 0x7FFx trap
- cntlzw r11,r11 ; set bit 0x20 iff a 0xFFFFFFF8 trap
- rlwimi r11,r13,31,0x10 ; move 0x7FFx bit into position
- xori r13,r0,0x6004 ; start to check for 0x6004
- rlwimi r11,r0,1,0xE ; move in low 3 bits of syscall number
- cntlzw r13,r13 ; set bit 0x20 iff 0x6004
- rlwinm r11,r11,0,0,30 ; clear out bit 31
- rlwimi r11,r13,1,0x40 ; move 0x6004 bit into position
- lhz r11,lo16(scTable)(r11) ; get branch address from sc table
- mfctr r13 ; save caller's ctr in r13
- mtctr r11 ; set up branch to syscall handler
- mfsprg r11,0 ; get per_proc, which most UFTs use
- bctr ; dispatch (r11 in sprg3, r13 in sprg2, ctr in r13, per_proc in r11)
+ rlwinm r11,r0,0,0xFFFFFFF8 ; mask off low 3 bits of syscall number
+ xori r13,r11,0x7FF0 ; start to check for the 0x7FFx traps
+ addi r11,r11,8 ; make a 0 iff this is a 0xFFFFFFF8 trap
+ cntlzw r13,r13 ; set bit 0x20 iff a 0x7FFx trap
+ cntlzw r11,r11 ; set bit 0x20 iff a 0xFFFFFFF8 trap
+ xoris r0,r0,0x8000 ; Flip bit to make 0 iff 0x80000000
+ rlwimi r11,r13,31,0x10 ; move 0x7FFx bit into position
+ cntlzw r13,r0 ; Set bit 0x20 iff 0x80000000
+ xoris r0,r0,0x8000 ; Flip bit to restore R0
+ rlwimi r11,r13,2,0x80 ; Set bit 0x80 iff CutTrace
+ xori r13,r0,0x6004 ; start to check for 0x6004
+ rlwimi r11,r0,1,0xE ; move in low 3 bits of syscall number
+ cntlzw r13,r13 ; set bit 0x20 iff 0x6004
+ rlwinm r11,r11,0,0,30 ; clear out bit 31
+ rlwimi r11,r13,1,0x40 ; move 0x6004 bit into position
+ lhz r11,lo16(scTable)(r11) ; get branch address from sc table
+ mfctr r13 ; save caller's ctr in r13
+ mtctr r11 ; set up branch to syscall handler
+ mfsprg r11,0 ; get per_proc, which most UFTs use
+ bctr ; dispatch (r11 in sprg3, r13 in sprg2, ctr in r13, per_proc in r11)
/*
* Trace - generated by single stepping
* 3. If (syscall & 0xFFFFFFF0) == 0xFFFFFFF0, then it is also a UFT and is dispatched here.
*
* 4. If (syscall & 0xFFFFF000) == 0x80000000, then it is a "firmware" call and is dispatched in
- * Firmware.s, though the special "Cut Trace" trap (0x80000000) is handled here in xcpSyscall.
+ * Firmware.s, though the special "Cut Trace" trap (0x80000000) is handled here as an ultra
+ * fast trap.
*
* 5. If (syscall & 0xFFFFF000) == 0xFFFFF000, and it is not one of the above, then it is a Mach
* syscall, which are dispatched in hw_exceptions.s via "mach_trap_table".
* "scTable" is an array of 2-byte addresses, accessed using a 7-bit index derived from the syscall
* number as follows:
*
- * 0x40 (A) - set if syscall number is 0x00006004
- * 0x20 (B) - set if upper 29 bits of syscall number are 0xFFFFFFF8
- * 0x10 (C) - set if upper 29 bits of syscall number are 0x00007FF0
- * 0x0E (D) - low three bits of syscall number
+ * 0x80 (A) - set if syscall number is 0x80000000
+ * 0x40 (B) - set if syscall number is 0x00006004
+ * 0x20 (C) - set if upper 29 bits of syscall number are 0xFFFFFFF8
+ * 0x10 (D) - set if upper 29 bits of syscall number are 0x00007FF0
+ * 0x0E (E) - low three bits of syscall number
*
* If you define another UFT, try to use a number in one of the currently decoded ranges, ie one marked
* "unassigned" below. The dispatch table and the UFT handlers must reside in the first 32KB of
* physical memory.
*/
- .align 7 ; start this table on a cache line
-scTable: ; ABC D
- .short uftNormalSyscall-baseR ; 000 0 these syscalls are not in a reserved range
- .short uftNormalSyscall-baseR ; 000 1 these syscalls are not in a reserved range
- .short uftNormalSyscall-baseR ; 000 2 these syscalls are not in a reserved range
- .short uftNormalSyscall-baseR ; 000 3 these syscalls are not in a reserved range
- .short uftNormalSyscall-baseR ; 000 4 these syscalls are not in a reserved range
- .short uftNormalSyscall-baseR ; 000 5 these syscalls are not in a reserved range
- .short uftNormalSyscall-baseR ; 000 6 these syscalls are not in a reserved range
- .short uftNormalSyscall-baseR ; 000 7 these syscalls are not in a reserved range
-
- .short uftNormalSyscall-baseR ; 001 0 0x7FF0 is unassigned
- .short uftNormalSyscall-baseR ; 001 1 0x7FF1 is Set Thread Info Fast Trap (pass up)
- .short uftThreadInfo-baseR ; 001 2 0x7FF2 is Thread Info
- .short uftFacilityStatus-baseR ; 001 3 0x7FF3 is Facility Status
- .short uftLoadMSR-baseR ; 001 4 0x7FF4 is Load MSR
- .short uftNormalSyscall-baseR ; 001 5 0x7FF5 is the Null FastPath Trap (pass up)
- .short uftNormalSyscall-baseR ; 001 6 0x7FF6 is unassigned
- .short uftNormalSyscall-baseR ; 001 7 0x7FF7 is unassigned
-
- .short uftNormalSyscall-baseR ; 010 0 0xFFFFFFF0 is unassigned
- .short uftNormalSyscall-baseR ; 010 1 0xFFFFFFF1 is unassigned
- .short uftNormalSyscall-baseR ; 010 2 0xFFFFFFF2 is unassigned
- .short uftNormalSyscall-baseR ; 010 3 0xFFFFFFF3 is unassigned
- .short uftNormalSyscall-baseR ; 010 4 0xFFFFFFF4 is unassigned
- .short uftNormalSyscall-baseR ; 010 5 0xFFFFFFF5 is unassigned
- .short uftIsPreemptiveTaskEnv-baseR ; 010 6 0xFFFFFFFE is Blue Box uftIsPreemptiveTaskEnv
- .short uftIsPreemptiveTask-baseR ; 010 7 0xFFFFFFFF is Blue Box IsPreemptiveTask
-
- .short WhoaBaby-baseR ; 011 0 impossible combination
- .short WhoaBaby-baseR ; 011 1 impossible combination
- .short WhoaBaby-baseR ; 011 2 impossible combination
- .short WhoaBaby-baseR ; 011 3 impossible combination
- .short WhoaBaby-baseR ; 011 4 impossible combination
- .short WhoaBaby-baseR ; 011 5 impossible combination
- .short WhoaBaby-baseR ; 011 6 impossible combination
- .short WhoaBaby-baseR ; 011 7 impossible combination
-
- .short WhoaBaby-baseR ; 100 0 0x6000 is an impossible index (diagCall)
- .short WhoaBaby-baseR ; 100 1 0x6001 is an impossible index (vmm_get_version)
- .short WhoaBaby-baseR ; 100 2 0x6002 is an impossible index (vmm_get_features)
- .short WhoaBaby-baseR ; 100 3 0x6003 is an impossible index (vmm_init_context)
- .short uftVMM-baseR ; 100 4 0x6004 is vmm_dispatch (only some of which are UFTs)
- .short WhoaBaby-baseR ; 100 5 0x6005 is an impossible index (bb_enable_bluebox)
- .short WhoaBaby-baseR ; 100 6 0x6006 is an impossible index (bb_disable_bluebox)
- .short WhoaBaby-baseR ; 100 7 0x6007 is an impossible index (bb_settaskenv)
+ .align 8 ; start this table on a 256-byte boundry
+scTable: ; ABCD E
+ .short uftNormalSyscall-baseR ; 0000 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0000 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 0001 0 0x7FF0 is unassigned
+ .short uftNormalSyscall-baseR ; 0001 1 0x7FF1 is Set Thread Info Fast Trap (pass up)
+ .short uftThreadInfo-baseR ; 0001 2 0x7FF2 is Thread Info
+ .short uftFacilityStatus-baseR ; 0001 3 0x7FF3 is Facility Status
+ .short uftLoadMSR-baseR ; 0001 4 0x7FF4 is Load MSR
+ .short uftNormalSyscall-baseR ; 0001 5 0x7FF5 is the Null FastPath Trap (pass up)
+ .short uftNormalSyscall-baseR ; 0001 6 0x7FF6 is unassigned
+ .short uftNormalSyscall-baseR ; 0001 7 0x7FF7 is unassigned
+
+ .short uftNormalSyscall-baseR ; 0010 0 0xFFFFFFF0 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 1 0xFFFFFFF1 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 2 0xFFFFFFF2 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 3 0xFFFFFFF3 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 4 0xFFFFFFF4 is unassigned
+ .short uftNormalSyscall-baseR ; 0010 5 0xFFFFFFF5 is unassigned
+ .short uftIsPreemptiveTaskEnv-baseR ; 0010 6 0xFFFFFFFE is Blue Box uftIsPreemptiveTaskEnv
+ .short uftIsPreemptiveTask-baseR ; 0010 7 0xFFFFFFFF is Blue Box IsPreemptiveTask
+
+ .short WhoaBaby-baseR ; 0011 0 impossible combination
+ .short WhoaBaby-baseR ; 0011 1 impossible combination
+ .short WhoaBaby-baseR ; 0011 2 impossible combination
+ .short WhoaBaby-baseR ; 0011 3 impossible combination
+ .short WhoaBaby-baseR ; 0011 4 impossible combination
+ .short WhoaBaby-baseR ; 0011 5 impossible combination
+ .short WhoaBaby-baseR ; 0011 6 impossible combination
+ .short WhoaBaby-baseR ; 0011 7 impossible combination
+
+ .short WhoaBaby-baseR ; 0100 0 0x6000 is an impossible index (diagCall)
+ .short WhoaBaby-baseR ; 0100 1 0x6001 is an impossible index (vmm_get_version)
+ .short WhoaBaby-baseR ; 0100 2 0x6002 is an impossible index (vmm_get_features)
+ .short WhoaBaby-baseR ; 0100 3 0x6003 is an impossible index (vmm_init_context)
+ .short uftVMM-baseR ; 0100 4 0x6004 is vmm_dispatch (only some of which are UFTs)
+ .short WhoaBaby-baseR ; 0100 5 0x6005 is an impossible index (bb_enable_bluebox)
+ .short WhoaBaby-baseR ; 0100 6 0x6006 is an impossible index (bb_disable_bluebox)
+ .short WhoaBaby-baseR ; 0100 7 0x6007 is an impossible index (bb_settaskenv)
+
+ .short uftNormalSyscall-baseR ; 0101 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0101 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 0110 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0110 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 0111 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 0111 7 these syscalls are not in a reserved range
+
+ .short uftCutTrace-baseR ; 1000 0 CutTrace
+ .short uftNormalSyscall-baseR ; 1000 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1000 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1001 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1001 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1010 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1010 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1011 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1011 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1100 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1100 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1101 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1101 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1110 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1110 7 these syscalls are not in a reserved range
+
+ .short uftNormalSyscall-baseR ; 1111 0 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 1 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 2 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 3 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 4 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 5 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 6 these syscalls are not in a reserved range
+ .short uftNormalSyscall-baseR ; 1111 7 these syscalls are not in a reserved range
.align 2 ; prepare for code
* sprg3 = holds caller's r11
*/
-; Handle "vmm_dispatch" (0x6004), of which only some selectors are UFTs.
+; Handle "vmm_dispatch" (0x6004), of which only some selectors are UFTs.
uftVMM:
- mtctr r13 ; restore caller's ctr
- lwz r11,spcFlags(r11) ; get the special flags word from per_proc
- mfcr r13 ; save caller's entire cr (we use all fields below)
+ mtctr r13 ; restore caller's ctr
+ lwz r11,spcFlags(r11) ; get the special flags word from per_proc
+ mfcr r13 ; save caller's entire cr (we use all fields below)
rlwinm r11,r11,16,16,31 ; Extract spcFlags upper bits
andi. r11,r11,hi16(runningVM|FamVMena|FamVMmode)
cmpwi cr0,r11,hi16(runningVM|FamVMena|FamVMmode) ; Test in VM FAM
- bne-- uftNormal80 ; not eligible for FAM UFTs
+ bne-- uftNormal80 ; not eligible for FAM UFTs
cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest
cmpwi cr2,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister
cror cr1_eq,cr5_lt,cr2_gt ; Set true if out of VMM Fast syscall range
- bt-- cr1_eq,uftNormalFF ; Exit if out of range (the others are not UFTs)
+ bt-- cr1_eq,uftNormalFF ; Exit if out of range (the others are not UFTs)
b EXT(vmm_ufp) ; handle UFT range of vmm_dispatch syscall
-
-; Handle blue box UFTs (syscalls -1 and -2).
+
+; Handle blue box UFTs (syscalls -1 and -2).
uftIsPreemptiveTask:
uftIsPreemptiveTaskEnv:
- mtctr r13 ; restore caller's ctr
- lwz r11,spcFlags(r11) ; get the special flags word from per_proc
- mfcr r13,0x80 ; save caller's cr0 so we can use it
- andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need
+ mtctr r13 ; restore caller's ctr
+ lwz r11,spcFlags(r11) ; get the special flags word from per_proc
+ mfcr r13,0x80 ; save caller's cr0 so we can use it
+ andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need
cmplwi r11,bbNoMachSC ; See if we are trapping syscalls
- blt-- uftNormal80 ; No...
- cmpwi r0,-2 ; is this call IsPreemptiveTaskEnv?
+ blt-- uftNormal80 ; No...
+ cmpwi r0,-2 ; is this call IsPreemptiveTaskEnv?
rlwimi r13,r11,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq
mfsprg r11,0 ; Get the per proc once more
- bne++ uftRestoreThenRFI ; do not load r0 if IsPreemptiveTask
+ bne++ uftRestoreThenRFI ; do not load r0 if IsPreemptiveTask
lwz r0,ppbbTaskEnv(r11) ; Get the shadowed taskEnv (only difference)
- b uftRestoreThenRFI ; restore modified cr0 and return
+ b uftRestoreThenRFI ; restore modified cr0 and return
-; Handle "Thread Info" UFT (0x7FF2)
+; Handle "Thread Info" UFT (0x7FF2)
- .globl EXT(uft_uaw_nop_if_32bit)
+ .globl EXT(uft_uaw_nop_if_32bit)
uftThreadInfo:
- lwz r3,UAW+4(r11) ; get user assist word, assuming a 32-bit processor
+ lwz r3,UAW+4(r11) ; get user assist word, assuming a 32-bit processor
LEXT(uft_uaw_nop_if_32bit)
- ld r3,UAW(r11) ; get the whole doubleword if 64-bit (patched to nop if 32-bit)
- mtctr r13 ; restore caller's ctr
- b uftRFI ; done
+ ld r3,UAW(r11) ; get the whole doubleword if 64-bit (patched to nop if 32-bit)
+ mtctr r13 ; restore caller's ctr
+ b uftRFI ; done
-; Handle "Facility Status" UFT (0x7FF3)
+; Handle "Facility Status" UFT (0x7FF3)
uftFacilityStatus:
- lwz r3,spcFlags(r11) ; get "special flags" word from per_proc
- mtctr r13 ; restore caller's ctr
- b uftRFI ; done
+ lwz r3,spcFlags(r11) ; get "special flags" word from per_proc
+ mtctr r13 ; restore caller's ctr
+ b uftRFI ; done
-; Handle "Load MSR" UFT (0x7FF4). This is not used on 64-bit processors, though it would work.
+; Handle "Load MSR" UFT (0x7FF4). This is not used on 64-bit processors, though it would work.
uftLoadMSR:
- mfsrr1 r11 ; get caller's MSR
- mtctr r13 ; restore caller's ctr
- mfcr r13,0x80 ; save caller's cr0 so we can test PR
- rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; really in the kernel?
- bne- uftNormal80 ; do not permit from user mode
- mfsprg r11,0 ; restore per_proc
+ mfsrr1 r11 ; get caller's MSR
+ mtctr r13 ; restore caller's ctr
+ mfcr r13,0x80 ; save caller's cr0 so we can test PR
+ rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; really in the kernel?
+ bne- uftNormal80 ; do not permit from user mode
+ mfsprg r11,0 ; restore per_proc
mtsrr1 r3 ; Set new MSR
-; Return to caller after UFT. When called:
-; r11 = per_proc ptr
-; r13 = callers cr0 in upper nibble (if uftRestoreThenRFI called)
-; sprg2 = callers r13
-; sprg3 = callers r11
+; Return to caller after UFT. When called:
+; r11 = per_proc ptr
+; r13 = callers cr0 in upper nibble (if uftRestoreThenRFI called)
+; sprg2 = callers r13
+; sprg3 = callers r11
-uftRestoreThenRFI: ; WARNING: can drop down to here
- mtcrf 0x80,r13 ; restore caller's cr0
+uftRestoreThenRFI: ; WARNING: can drop down to here
+ mtcrf 0x80,r13 ; restore caller's cr0
uftRFI:
- .globl EXT(uft_nop_if_32bit)
+ .globl EXT(uft_nop_if_32bit)
LEXT(uft_nop_if_32bit)
- b uftX64 ; patched to NOP if 32-bit processor
+ b uftX64 ; patched to NOP if 32-bit processor
- lwz r11,pfAvailable(r11) ; Get the feature flags
+uftX32: lwz r11,pfAvailable(r11) ; Get the feature flags
mfsprg r13,2 ; Restore R13
mtsprg 2,r11 ; Set the feature flags
mfsprg r11,3 ; Restore R11
mfspr r14,hsprg0 ; Restore R14
rfid ; Back to our guy...
+;
+; Quickly cut a trace table entry for the CutTrace firmware call.
+;
+; All registers except R11 and R13 are unchanged.
+;
+; Note that this code cuts a trace table entry for the CutTrace call only.
+; An identical entry is made during normal interrupt processing. Any entry
+; format entry changes made must be done in both places.
+;
+
+ .align 5
+
+ .globl EXT(uft_cuttrace)
+LEXT(uft_cuttrace)
+uftCutTrace:
+ b uftct64 ; patched to NOP if 32-bit processor
+
+ stw r20,tempr0(r11) ; Save some work registers
+ lwz r20,dgFlags(0) ; Get the flags
+ stw r21,tempr1(r11) ; Save some work registers
+ mfsrr1 r21 ; Get the SRR1
+ rlwinm r20,r20,MSR_PR_BIT-enaUsrFCallb,MASK(MSR_PR) ; Shift the validity bit over to pr bit spot
+ stw r25,tempr2(r11) ; Save some work registers
+ orc r20,r20,r21 ; Get ~PR | FC
+ mfcr r25 ; Save the CR
+ stw r22,tempr3(r11) ; Save some work registers
+ lhz r22,PP_CPU_NUMBER(r11) ; Get the logical processor number
+ andi. r20,r20,MASK(MSR_PR) ; Set cr0_eq is we are in problem state and the validity bit is not set
+ stw r23,tempr4(r11) ; Save some work registers
+ lwz r23,traceMask(0) ; Get the trace mask
+ stw r24,tempr5(r11) ; Save some work registers
+ beq- ctbail32 ; Can not issue from user...
+
+
+ addi r24,r22,16 ; Get shift to move cpu mask to syscall mask
+ rlwnm r24,r23,r24,12,12 ; Shift cpu mask bit to rupt type mask
+ and. r24,r24,r23 ; See if both are on
+
+;
+; We select a trace entry using a compare and swap on the next entry field.
+; Since we do not lock the actual trace buffer, there is a potential that
+; another processor could wrap an trash our entry. Who cares?
+;
+
+ li r23,trcWork ; Get the trace work area address
+ lwz r21,traceStart(0) ; Get the start of trace table
+ lwz r22,traceEnd(0) ; Get end of trace table
+
+ beq-- ctdisa32 ; Leave because tracing is disabled...
+
+ctgte32: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
+ addi r24,r20,LTR_size ; Point to the next trace entry
+ cmplw r24,r22 ; Do we need to wrap the trace table?
+ bne+ ctgte32s ; No wrap, we got us a trace entry...
+
+ mr r24,r21 ; Wrap back to start
+
+ctgte32s: stwcx. r24,0,r23 ; Try to update the current pointer
+ bne- ctgte32 ; Collision, try again...
+
+#if ESPDEBUG
+ dcbf 0,r23 ; Force to memory
+ sync
+#endif
+
+ dcbz 0,r20 ; Clear and allocate first trace line
+ li r24,32 ; Offset to next line
+
+ctgte32tb: mftbu r21 ; Get the upper time now
+ mftb r22 ; Get the lower time now
+ mftbu r23 ; Get upper again
+ cmplw r21,r23 ; Has it ticked?
+ bne- ctgte32tb ; Yes, start again...
+
+ dcbz r24,r20 ; Clean second line
+
+;
+; Let us cut that trace entry now.
+;
+; Note that this code cuts a trace table entry for the CutTrace call only.
+; An identical entry is made during normal interrupt processing. Any entry
+; format entry changes made must be done in both places.
+;
+
+ lhz r24,PP_CPU_NUMBER(r11) ; Get the logical processor number
+ li r23,T_SYSTEM_CALL ; Get the system call id
+ mtctr r13 ; Restore the caller's CTR
+ sth r24,LTR_cpu(r20) ; Save processor number
+ li r24,64 ; Offset to third line
+ sth r23,LTR_excpt(r20) ; Set the exception code
+ dcbz r24,r20 ; Clean 3rd line
+ mfspr r23,dsisr ; Get the DSISR
+ stw r21,LTR_timeHi(r20) ; Save top of time stamp
+ li r24,96 ; Offset to fourth line
+ mflr r21 ; Get the LR
+ dcbz r24,r20 ; Clean 4th line
+ stw r22,LTR_timeLo(r20) ; Save bottom of time stamp
+ mfsrr0 r22 ; Get SRR0
+ stw r25,LTR_cr(r20) ; Save CR
+ mfsrr1 r24 ; Get the SRR1
+ stw r23,LTR_dsisr(r20) ; Save DSISR
+ stw r22,LTR_srr0+4(r20) ; Save SRR0
+ mfdar r23 ; Get DAR
+ stw r24,LTR_srr1+4(r20) ; Save SRR1
+ stw r23,LTR_dar+4(r20) ; Save DAR
+ stw r21,LTR_lr+4(r20) ; Save LR
+
+ stw r13,LTR_ctr+4(r20) ; Save CTR
+ stw r0,LTR_r0+4(r20) ; Save register
+ stw r1,LTR_r1+4(r20) ; Save register
+ stw r2,LTR_r2+4(r20) ; Save register
+ stw r3,LTR_r3+4(r20) ; Save register
+ stw r4,LTR_r4+4(r20) ; Save register
+ stw r5,LTR_r5+4(r20) ; Save register
+ stw r6,LTR_r6+4(r20) ; Save register
+
+#if 0
+ lwz r21,FPUowner(r11) ; (TEST/DEBUG) Get the current floating point owner
+ stw r21,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner
+#endif
+
+#if ESPDEBUG
+ addi r21,r20,32 ; Second line
+ addi r22,r20,64 ; Third line
+ dcbst 0,r20 ; Force to memory
+ dcbst 0,r21 ; Force to memory
+ addi r21,r22,32 ; Fourth line
+ dcbst 0,r22 ; Force to memory
+ dcbst 0,r21 ; Force to memory
+ sync ; Make sure it all goes
+#endif
+
+ctdisa32: mtcrf 0x80,r25 ; Restore the used condition register field
+ lwz r20,tempr0(r11) ; Restore work register
+ lwz r21,tempr1(r11) ; Restore work register
+ lwz r25,tempr2(r11) ; Restore work register
+ mtctr r13 ; Restore the caller's CTR
+ lwz r22,tempr3(r11) ; Restore work register
+ lwz r23,tempr4(r11) ; Restore work register
+ lwz r24,tempr5(r11) ; Restore work register
+ b uftX32 ; Go restore the rest and go...
+
+ctbail32: mtcrf 0x80,r25 ; Restore the used condition register field
+ lwz r20,tempr0(r11) ; Restore work register
+ lwz r21,tempr1(r11) ; Restore work register
+ lwz r25,tempr2(r11) ; Restore work register
+ mtctr r13 ; Restore the caller's CTR
+ lwz r22,tempr3(r11) ; Restore work register
+ lwz r23,tempr4(r11) ; Restore work register
+ b uftNormalSyscall ; Go pass it on along...
+
+;
+; This is the 64-bit version.
+;
+
+uftct64: std r20,tempr0(r11) ; Save some work registers
+ lwz r20,dgFlags(0) ; Get the flags
+ std r21,tempr1(r11) ; Save some work registers
+ mfsrr1 r21 ; Get the SRR1
+ rlwinm r20,r20,MSR_PR_BIT-enaUsrFCallb,MASK(MSR_PR) ; Shift the validity bit over to pr bit spot
+ std r25,tempr2(r11) ; Save some work registers
+ orc r20,r20,r21 ; Get ~PR | FC
+ mfcr r25 ; Save the CR
+ std r22,tempr3(r11) ; Save some work registers
+ lhz r22,PP_CPU_NUMBER(r11) ; Get the logical processor number
+ andi. r20,r20,MASK(MSR_PR) ; Set cr0_eq when we are in problem state and the validity bit is not set
+ std r23,tempr4(r11) ; Save some work registers
+ lwz r23,traceMask(0) ; Get the trace mask
+ std r24,tempr5(r11) ; Save some work registers
+ beq-- ctbail64 ; Can not issue from user...
+
+ addi r24,r22,16 ; Get shift to move cpu mask to syscall mask
+ rlwnm r24,r23,r24,12,12 ; Shift cpu mask bit to rupt type mask
+ and. r24,r24,r23 ; See if both are on
+
+;
+; We select a trace entry using a compare and swap on the next entry field.
+; Since we do not lock the actual trace buffer, there is a potential that
+; another processor could wrap an trash our entry. Who cares?
+;
+
+ li r23,trcWork ; Get the trace work area address
+ lwz r21,traceStart(0) ; Get the start of trace table
+ lwz r22,traceEnd(0) ; Get end of trace table
+
+ beq-- ctdisa64 ; Leave because tracing is disabled...
-; Handle a system call that is not a UFT and which thus goes upstairs.
+ctgte64: lwarx r20,0,r23 ; Get and reserve the next slot to allocate
+ addi r24,r20,LTR_size ; Point to the next trace entry
+ cmplw r24,r22 ; Do we need to wrap the trace table?
+ bne++ ctgte64s ; No wrap, we got us a trace entry...
+
+ mr r24,r21 ; Wrap back to start
-uftNormalFF: ; here with entire cr in r13
- mtcr r13 ; restore all 8 fields
+ctgte64s: stwcx. r24,0,r23 ; Try to update the current pointer
+ bne-- ctgte64 ; Collision, try again...
+
+#if ESPDEBUG
+ dcbf 0,r23 ; Force to memory
+ sync
+#endif
+
+ dcbz128 0,r20 ; Zap the trace entry
+
+ mftb r21 ; Get the time
+
+;
+; Let us cut that trace entry now.
+;
+; Note that this code cuts a trace table entry for the CutTrace call only.
+; An identical entry is made during normal interrupt processing. Any entry
+; format entry changes made must be done in both places.
+;
+
+ lhz r24,PP_CPU_NUMBER(r11) ; Get the logical processor number
+ li r23,T_SYSTEM_CALL ; Get the system call id
+ sth r24,LTR_cpu(r20) ; Save processor number
+ sth r23,LTR_excpt(r20) ; Set the exception code
+ mfspr r23,dsisr ; Get the DSISR
+ std r21,LTR_timeHi(r20) ; Save top of time stamp
+ mflr r21 ; Get the LR
+ mfsrr0 r22 ; Get SRR0
+ stw r25,LTR_cr(r20) ; Save CR
+ mfsrr1 r24 ; Get the SRR1
+ stw r23,LTR_dsisr(r20) ; Save DSISR
+ std r22,LTR_srr0(r20) ; Save SRR0
+ mfdar r23 ; Get DAR
+ std r24,LTR_srr1(r20) ; Save SRR1
+ std r23,LTR_dar(r20) ; Save DAR
+ std r21,LTR_lr(r20) ; Save LR
+
+ std r13,LTR_ctr(r20) ; Save CTR
+ std r0,LTR_r0(r20) ; Save register
+ std r1,LTR_r1(r20) ; Save register
+ std r2,LTR_r2(r20) ; Save register
+ std r3,LTR_r3(r20) ; Save register
+ std r4,LTR_r4(r20) ; Save register
+ std r5,LTR_r5(r20) ; Save register
+ std r6,LTR_r6(r20) ; Save register
+
+#if 0
+ lwz r21,FPUowner(r11) ; (TEST/DEBUG) Get the current floating point owner
+ stw r21,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner
+#endif
+
+#if ESPDEBUG
+ dcbf 0,r20 ; Force to memory
+ sync ; Make sure it all goes
+#endif
+
+ctdisa64: mtcrf 0x80,r25 ; Restore the used condition register field
+ ld r20,tempr0(r11) ; Restore work register
+ ld r21,tempr1(r11) ; Restore work register
+ ld r25,tempr2(r11) ; Restore work register
+ mtctr r13 ; Restore the caller's CTR
+ ld r22,tempr3(r11) ; Restore work register
+ ld r23,tempr4(r11) ; Restore work register
+ ld r24,tempr5(r11) ; Restore work register
+ b uftX64 ; Go restore the rest and go...
+
+ctbail64: mtcrf 0x80,r25 ; Restore the used condition register field
+ ld r20,tempr0(r11) ; Restore work register
+ ld r21,tempr1(r11) ; Restore work register
+ ld r25,tempr2(r11) ; Restore work register
+ mtctr r13 ; Restore the caller's CTR
+ ld r22,tempr3(r11) ; Restore work register
+ ld r23,tempr4(r11) ; Restore work register
+ li r11,T_SYSTEM_CALL|T_FAM ; Set system code call
+ b extEntry64 ; Go straight to the 64-bit code...
+
+
+
+; Handle a system call that is not a UFT and which thus goes upstairs.
+
+uftNormalFF: ; here with entire cr in r13
+ mtcr r13 ; restore all 8 fields
b uftNormalSyscall1 ; Join common...
-
-uftNormal80: ; here with callers cr0 in r13
- mtcrf 0x80,r13 ; restore cr0
+
+uftNormal80: ; here with callers cr0 in r13
+ mtcrf 0x80,r13 ; restore cr0
b uftNormalSyscall1 ; Join common...
-
-uftNormalSyscall: ; r13 = callers ctr
- mtctr r13 ; restore ctr
+
+uftNormalSyscall: ; r13 = callers ctr
+ mtctr r13 ; restore ctr
uftNormalSyscall1:
- li r11,T_SYSTEM_CALL|T_FAM ; this is a system call (and fall through)
+ li r11,T_SYSTEM_CALL|T_FAM ; this is a system call (and fall through)
/*<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>*/
* misses, so these stores won't take all that long. Except the first line that is because
* we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are
* off also.
- *
+ *
* Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions
* are ignored.
*/
lwz r25,traceMask(0) ; Get the trace mask
li r0,SAVgeneral ; Get the savearea type value
lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
- rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
+ rlwinm r22,r11,30,0,31 ; Divide interrupt code by 4
stb r0,SAVflags+2(r13) ; Mark valid context
addi r22,r22,10 ; Adjust code so we shift into CR5
li r23,trcWork ; Get the trace work area address
;
; Let us cut that trace entry now.
+;
+; Note that this code cuts a trace table entry for everything but the CutTrace call.
+; An identical entry is made during normal CutTrace processing. Any entry
+; format changes made must be done in both places.
;
lwz r16,ruptStamp(r2) ; Get top of time base
addi r22,r20,LTR_size ; Point to the next trace entry
cmplw r22,r26 ; Do we need to wrap the trace table?
- bne+ gotTrcEntSF ; No wrap, we got us a trace entry...
+ bne++ gotTrcEntSF ; No wrap, we got us a trace entry...
mr r22,r25 ; Wrap back to start
;
; Let us cut that trace entry now.
+;
+; Note that this code cuts a trace table entry for everything but the CutTrace call.
+; An identical entry is made during normal CutTrace processing. Any entry
+; format changes made must be done in both places.
;
dcbz128 0,r20 ; Zap the trace entry
std r13,LTR_save(r20) ; Save the savearea
stw r17,LTR_dsisr(r20) ; Save the DSISR
sth r11,LTR_excpt(r20) ; Save the exception type
+#if 0
+ lwz r17,FPUowner(r2) ; (TEST/DEBUG) Get the current floating point owner
+ stw r17,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner
+#endif
#if ESPDEBUG
dcbf 0,r20 ; Force to memory
"Corrupt skip lists", /* failSkipLists */
"Unaligned stack", /* failUnalignedStk */
"Invalid pmap", /* failPmap */
+ "Lock timeout", /* failTimeout */
"Unknown failure code" /* Unknown failure code - must always be last */
};
* at the base of the kernel stack (see stack_attach()).
*/
- thread->machine.upcb = sv; /* Set user pcb */
+ thread->machine.upcb = sv; /* Set user pcb */
sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; /* Set the default user MSR */
if(task_has_64BitAddr(task)) sv->save_srr1 |= (uint64_t)MASK32(MSR_SF) << 32; /* If 64-bit task, force 64-bit mode */
sv->save_fpscr = 0; /* Clear all floating point exceptions */
register savearea_fpu *fsv, *fpsv;
register savearea *svp;
register int i;
+ boolean_t intr;
/*
* This function will release all context.
* Walk through and release all floating point and vector contexts. Also kill live context.
*
*/
+
+ intr = ml_set_interrupts_enabled(FALSE); /* Disable for interruptions */
- toss_live_vec(thread->machine.curctx); /* Dump live vectors */
+ toss_live_vec(thread->machine.curctx); /* Dump live vectors */
- vsv = thread->machine.curctx->VMXsave; /* Get the top vector savearea */
+ vsv = thread->machine.curctx->VMXsave; /* Get the top vector savearea */
while(vsv) { /* Any VMX saved state? */
vpsv = vsv; /* Remember so we can toss this */
save_release((savearea *)vpsv); /* Release it */
}
- thread->machine.curctx->VMXsave = 0; /* Kill chain */
+ thread->machine.curctx->VMXsave = 0; /* Kill chain */
- toss_live_fpu(thread->machine.curctx); /* Dump live float */
+ toss_live_fpu(thread->machine.curctx); /* Dump live float */
- fsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */
+ fsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */
while(fsv) { /* Any float saved state? */
fpsv = fsv; /* Remember so we can toss this */
save_release((savearea *)fpsv); /* Release it */
}
- thread->machine.curctx->FPUsave = 0; /* Kill chain */
+ thread->machine.curctx->FPUsave = 0; /* Kill chain */
/*
* free all regular saveareas.
*/
- pcb = thread->machine.pcb; /* Get the general savearea */
+ pcb = thread->machine.pcb; /* Get the general savearea */
while(pcb) { /* Any float saved state? */
ppsv = pcb; /* Remember so we can toss this */
}
hw_atomic_sub((uint32_t *)&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */
+
+ (void) ml_set_interrupts_enabled(intr); /* Restore interrupts if enabled */
+
}
/*
* release saveareas associated with an act. if flag is true, release
* user level savearea(s) too, else don't
*
- * this code cannot block so we call the proper save area free routine
+ * This code must run with interruptions disabled because an interrupt handler could use
+ * floating point and/or vectors. If this happens and the thread we are blowing off owns
+ * the facility, we can deadlock.
*/
void
act_machine_sv_free(thread_t act)
register savearea_fpu *fsv, *fpst, *fsvt;
register savearea *svp;
register int i;
+ boolean_t intr;
/*
* This function will release all non-user state context.
* Then we unlock. Next, all of the old kernel contexts are released.
*
*/
-
+
+ intr = ml_set_interrupts_enabled(FALSE); /* Disable for interruptions */
+
if(act->machine.curctx->VMXlevel) { /* Is the current level user state? */
toss_live_vec(act->machine.curctx); /* Dump live vectors if is not user */
-
- vsv = act->machine.curctx->VMXsave; /* Get the top vector savearea */
- while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */
-
if(!hw_lock_to((hw_lock_t)&act->machine.curctx->VMXsync, LockTimeOut)) { /* Get the sync lock */
panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */
}
+
+ vsv = act->machine.curctx->VMXsave; /* Get the top vector savearea */
+ while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */
- vsvt = act->machine.curctx->VMXsave; /* Get the top of the chain */
+ vsvt = act->machine.curctx->VMXsave; /* Get the top of the chain */
act->machine.curctx->VMXsave = vsv; /* Point to the user context */
- act->machine.curctx->VMXlevel = 0; /* Set the level to user */
+ act->machine.curctx->VMXlevel = 0; /* Set the level to user */
hw_lock_unlock((hw_lock_t)&act->machine.curctx->VMXsync); /* Unlock */
while(vsvt) { /* Clear any VMX saved state */
toss_live_fpu(act->machine.curctx); /* Dump live floats if is not user */
- fsv = act->machine.curctx->FPUsave; /* Get the top floats savearea */
-
- while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */
-
if(!hw_lock_to((hw_lock_t)&act->machine.curctx->FPUsync, LockTimeOut)) { /* Get the sync lock */
panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */
}
- fsvt = act->machine.curctx->FPUsave; /* Get the top of the chain */
+ fsv = act->machine.curctx->FPUsave; /* Get the top floats savearea */
+ while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */
+
+ fsvt = act->machine.curctx->FPUsave; /* Get the top of the chain */
act->machine.curctx->FPUsave = fsv; /* Point to the user context */
- act->machine.curctx->FPUlevel = 0; /* Set the level to user */
+ act->machine.curctx->FPUlevel = 0; /* Set the level to user */
hw_lock_unlock((hw_lock_t)&act->machine.curctx->FPUsync); /* Unlock */
while(fsvt) { /* Clear any VMX saved state */
}
act->machine.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */
-
+ (void) ml_set_interrupts_enabled(intr); /* Restore interrupts if enabled */
+
}
void
machine_act_terminate(
thread_t act)
{
- if(act->machine.bbDescAddr) { /* Check if the Blue box assist is active */
+ if(act->machine.bbDescAddr) { /* Check if the Blue box assist is active */
disable_bluebox_internal(act); /* Kill off bluebox */
}
- if(act->machine.vmmControl) { /* Check if VMM is active */
+ if(act->machine.vmmControl) { /* Check if VMM is active */
vmm_tear_down_all(act); /* Kill off all VMM contexts */
}
}
if (branch_tracing_enabled())
ppinfo->cpu_flags |= traceBE;
- if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old, (unsigned int)new, 0); /* Cut trace entry if tracing */
+ if(trcWork.traceMask) dbgTrace(0x9903, (unsigned int)old, (unsigned int)new, 0, 0); /* Cut trace entry if tracing */
return;
}
extern unsigned int xsum_nop_if_32bit;
extern unsigned int uft_nop_if_32bit;
extern unsigned int uft_uaw_nop_if_32bit;
+extern unsigned int uft_cuttrace;
int forcenap = 0;
int wcte = 0; /* Non-cache gather timer disabled */
{&xsum_nop_if_32bit, 0x60000000, PATCH_FEATURE, PatchExt32},
{&uft_nop_if_32bit, 0x60000000, PATCH_FEATURE, PatchExt32},
{&uft_uaw_nop_if_32bit, 0x60000000, PATCH_FEATURE, PatchExt32},
+ {&uft_cuttrace, 0x60000000, PATCH_FEATURE, PatchExt32},
{NULL, 0x00000000, PATCH_END_OF_TABLE, 0}
};
thread = current_thread();
- toss_live_fpu(thread->machine.curctx); /* Toss my floating point if live anywhere */
- toss_live_vec(thread->machine.curctx); /* Toss my vector if live anywhere */
+ act_machine_sv_free(thread); /* Blow away any current kernel FP or vector.
+ We do not support those across a vfork */
+ toss_live_fpu(thread->machine.curctx); /* Toss my floating point if live anywhere */
+ toss_live_vec(thread->machine.curctx); /* Toss my vector if live anywhere */
sv->save_hdr.save_misc2 = 0; /* Eye catcher for debug */
sv->save_hdr.save_misc3 = 0; /* Eye catcher for debug */
sv->save_hdr.save_act = thread;
- spc = (unsigned int)thread->map->pmap->space; /* Get the space we're in */
+ spc = (unsigned int)thread->map->pmap->space; /* Get the space we're in */
- osv = thread->machine.pcb; /* Get the top general savearea */
+ osv = thread->machine.pcb; /* Get the top general savearea */
psv = 0;
while(osv) { /* Any saved state? */
if(osv->save_srr1 & MASK(MSR_PR)) break; /* Leave if this is user state */
if(ovsv) { /* Did we find one? */
if(pvsv) pvsv->save_hdr.save_prev = 0; /* Yes, clear pointer to it (it should always be last) or */
- else thread->machine.curctx->VMXsave = 0; /* to the start if the only one */
+ else thread->machine.curctx->VMXsave = 0; /* to the start if the only one */
save_release((savearea *)ovsv); /* Nope, release it */
}
if(vsv) { /* Are we sticking any vector on this one? */
if(pvsv) pvsv->save_hdr.save_prev = (addr64_t)((uintptr_t)vsv); /* Yes, chain us to the end or */
- else thread->machine.curctx->VMXsave = vsv; /* to the start if the only one */
+ else {
+ thread->machine.curctx->VMXsave = vsv; /* to the start if the only one */
+ thread->machine.curctx->VMXlevel = 0; /* Insure that we don't have a leftover level */
+ }
vsv->save_hdr.save_misc2 = 0; /* Eye catcher for debug */
vsv->save_hdr.save_misc3 = 0; /* Eye catcher for debug */
vsv->save_hdr.save_act = thread;
}
- ofsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */
+ ofsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */
pfsv = 0;
while(ofsv) { /* Any float saved state? */
if(ofsv) { /* Did we find one? */
if(pfsv) pfsv->save_hdr.save_prev = 0; /* Yes, clear pointer to it (it should always be last) or */
- else thread->machine.curctx->FPUsave = 0; /* to the start if the only one */
+ else thread->machine.curctx->FPUsave = 0; /* to the start if the only one */
save_release((savearea *)ofsv); /* Nope, release it */
}
if(fsv) { /* Are we sticking any vector on this one? */
if(pfsv) pfsv->save_hdr.save_prev = (addr64_t)((uintptr_t)fsv); /* Yes, chain us to the end or */
- else thread->machine.curctx->FPUsave = fsv; /* to the start if the only one */
+ else {
+ thread->machine.curctx->FPUsave = fsv; /* to the start if the only one */
+ thread->machine.curctx->FPUlevel = 0; /* Insure that we don't have a leftover level */
+ }
fsv->save_hdr.save_misc2 = 0; /* Eye catcher for debug */
fsv->save_hdr.save_misc3 = 0; /* Eye catcher for debug */