From b36670cedae0009469e8ee117453de831de64a6b Mon Sep 17 00:00:00 2001 From: Apple Date: Mon, 16 May 2005 22:30:03 +0000 Subject: [PATCH] xnu-792.1.5.tar.gz --- bsd/hfs/hfs_catalog.c | 3 + bsd/hfs/hfs_catalog.h | 9 +- bsd/hfs/hfs_chash.c | 1 + bsd/hfs/hfs_cnode.c | 2 +- bsd/hfs/hfs_cnode.h | 2 +- bsd/hfs/hfs_readwrite.c | 6 +- bsd/hfs/hfs_search.c | 7 +- bsd/hfs/hfs_vfsutils.c | 120 ++-- bsd/hfs/hfs_vnops.c | 5 +- bsd/kern/kern_descrip.c | 7 +- bsd/kern/kern_exit.c | 49 +- bsd/kern/kern_fork.c | 1 + bsd/kern/kpi_socketfilter.c | 2 +- bsd/net/dlil.c | 6 +- bsd/net/kpi_protocol.c | 2 +- bsd/nfs/nfs_vfsops.c | 2 +- bsd/sys/proc_internal.h | 8 +- config/MasterVersion | 2 +- .../TestSerialization/test1/test1_main.cpp | 66 -- osfmk/ipc/ipc_kmsg.c | 2 +- osfmk/ppc/Firmware.h | 5 +- osfmk/ppc/Firmware.s | 6 + osfmk/ppc/cswtch.s | 634 +++++++++++------ osfmk/ppc/exception.h | 3 +- osfmk/ppc/hw_exception.s | 47 +- osfmk/ppc/hw_vm.s | 2 +- osfmk/ppc/lowmem_vectors.s | 639 ++++++++++++++---- osfmk/ppc/model_dep.c | 1 + osfmk/ppc/pcb.c | 62 +- osfmk/ppc/ppc_init.c | 2 + osfmk/ppc/status.c | 26 +- 31 files changed, 1187 insertions(+), 542 deletions(-) diff --git a/bsd/hfs/hfs_catalog.c b/bsd/hfs/hfs_catalog.c index 64dd3c8fb..1b05373d5 100644 --- a/bsd/hfs/hfs_catalog.c +++ b/bsd/hfs/hfs_catalog.c @@ -2751,6 +2751,9 @@ getbsdattr(struct hfsmount *hfsmp, const struct HFSPlusCatalogFile *crp, struct /* get total blocks (both forks) */ attrp->ca_blocks = crp->dataFork.totalBlocks + crp->resourceFork.totalBlocks; attrp->ca_attrblks = crp->attrBlocks; + /* On HFS+ the ThreadExists flag must always be set. */ + if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) + attrp->ca_recflags |= kHFSThreadExistsMask; } attrp->ca_fileid = crp->fileID; diff --git a/bsd/hfs/hfs_catalog.h b/bsd/hfs/hfs_catalog.h index 991478558..63c2fe994 100644 --- a/bsd/hfs/hfs_catalog.h +++ b/bsd/hfs/hfs_catalog.h @@ -118,13 +118,20 @@ struct cat_fork { * */ struct directoryhint { - SLIST_ENTRY(directoryhint) dh_link; /* chain */ + TAILQ_ENTRY(directoryhint) dh_link; /* chain */ int dh_index; /* index into directory (zero relative) */ u_int32_t dh_time; struct cat_desc dh_desc; /* entry's descriptor */ }; typedef struct directoryhint directoryhint_t; +/* + * HFS_MAXDIRHINTS cannot be larger than 63 without reducing + * HFS_INDEX_BITS, because given the 6-bit tag, at most 63 different + * tags can exist. When HFS_MAXDIRHINTS is larger than 63, the same + * list may contain dirhints of the same tag, and a staled dirhint may + * be returned. + */ #define HFS_MAXDIRHINTS 32 #define HFS_DIRHINT_TTL 45 diff --git a/bsd/hfs/hfs_chash.c b/bsd/hfs/hfs_chash.c index 1cbaf8186..a317afe81 100644 --- a/bsd/hfs/hfs_chash.c +++ b/bsd/hfs/hfs_chash.c @@ -347,6 +347,7 @@ loop_with_lock: SET(ncp->c_hflag, H_ALLOC); ncp->c_fileid = inum; ncp->c_dev = dev; + TAILQ_INIT(&ncp->c_hintlist); /* make the list empty */ lck_rw_init(&ncp->c_rwlock, hfs_rwlock_group, hfs_lock_attr); if (!skiplock) diff --git a/bsd/hfs/hfs_cnode.c b/bsd/hfs/hfs_cnode.c index 1fb30d020..8351989ed 100644 --- a/bsd/hfs/hfs_cnode.c +++ b/bsd/hfs/hfs_cnode.c @@ -496,7 +496,7 @@ hfs_getnewvnode( lck_rw_init(&cp->c_truncatelock, hfs_rwlock_group, hfs_lock_attr); /* Make sure its still valid (ie exists on disk). */ - if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid)) { + if (!hfs_valid_cnode(hfsmp, dvp, (wantrsrc ? NULL : cnp), cp->c_fileid)) { hfs_chash_abort(cp); hfs_reclaim_cnode(cp); *vpp = NULL; diff --git a/bsd/hfs/hfs_cnode.h b/bsd/hfs/hfs_cnode.h index 64d2fd70d..7f6fa30e1 100644 --- a/bsd/hfs/hfs_cnode.h +++ b/bsd/hfs/hfs_cnode.h @@ -94,7 +94,7 @@ struct cnode { u_long c_childhint; /* catalog hint for children */ struct cat_desc c_desc; /* cnode's descriptor */ struct cat_attr c_attr; /* cnode's attributes */ - SLIST_HEAD(hfs_hinthead, directoryhint) c_hintlist; /* directory hint list */ + TAILQ_HEAD(hfs_hinthead, directoryhint) c_hintlist; /* directory hint list */ int16_t c_dirhinttag; /* directory hint tag */ union { int16_t cu_dirhintcnt; /* directory hint count */ diff --git a/bsd/hfs/hfs_readwrite.c b/bsd/hfs/hfs_readwrite.c index 3a54712da..46f8e54e5 100644 --- a/bsd/hfs/hfs_readwrite.c +++ b/bsd/hfs/hfs_readwrite.c @@ -1228,11 +1228,7 @@ hfs_vnop_ioctl( struct vnop_ioctl_args /* { hfs_unlock(VTOC(vp)); if (vnode_vtype(vp) == VDIR) { - myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_SEARCH, &my_context); - if (myErr) { - // try again with just read-access - myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, &my_context); - } + myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), &my_context); } else { myErr = vnode_authorize(vp, NULL, KAUTH_VNODE_READ_DATA, &my_context); } diff --git a/bsd/hfs/hfs_search.c b/bsd/hfs/hfs_search.c index 930f9776c..0d013ced4 100644 --- a/bsd/hfs/hfs_search.c +++ b/bsd/hfs/hfs_search.c @@ -601,8 +601,11 @@ CheckAccess(ExtendedVCB *theVCBPtr, u_long searchBits, CatalogKey *theKeyPtr, st myNodeID = cp->c_parentcnid; /* move up the hierarchy */ hfs_unlock(VTOC(vp)); - myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH), &my_context); - //myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), &my_context); + if (vp->v_type == VDIR) { + myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH | KAUTH_VNODE_LIST_DIRECTORY), &my_context); + } else { + myErr = vnode_authorize(vp, NULL, (KAUTH_VNODE_SEARCH), &my_context); + } vnode_put(vp); vp = NULL; if ( myErr ) { diff --git a/bsd/hfs/hfs_vfsutils.c b/bsd/hfs/hfs_vfsutils.c index c35236e69..3dfe383b6 100644 --- a/bsd/hfs/hfs_vfsutils.c +++ b/bsd/hfs/hfs_vfsutils.c @@ -1540,45 +1540,57 @@ directoryhint_t * hfs_getdirhint(struct cnode *dcp, int index) { struct timeval tv; - directoryhint_t *hint, *next, *oldest; + directoryhint_t *hint; + boolean_t need_remove, need_init; char * name; - oldest = NULL; microuptime(&tv); - /* Look for an existing hint first */ - for(hint = dcp->c_hintlist.slh_first; hint != NULL; hint = next) { - next = hint->dh_link.sle_next; - if (hint->dh_index == index) { - goto out; - } else if (oldest == NULL || (hint->dh_time < oldest->dh_time)) { - oldest = hint; - } + /* + * Look for an existing hint first. If not found, create a new one (when + * the list is not full) or recycle the oldest hint. Since new hints are + * always added to the head of the list, the last hint is always the + * oldest. + */ + TAILQ_FOREACH(hint, &dcp->c_hintlist, dh_link) { + if (hint->dh_index == index) + break; } - /* Recycle one if we have too many already. */ - if ((dcp->c_dirhintcnt >= HFS_MAXDIRHINTS) && (oldest != NULL)) { - hint = oldest; - if ((name = hint->dh_desc.cd_nameptr)) { - hint->dh_desc.cd_nameptr = NULL; - vfs_removename(name); + if (hint != NULL) { /* found an existing hint */ + need_init = false; + need_remove = true; + } else { /* cannot find an existing hint */ + need_init = true; + if (dcp->c_dirhintcnt < HFS_MAXDIRHINTS) { /* we don't need recycling */ + /* Create a default directory hint */ + MALLOC_ZONE(hint, directoryhint_t *, sizeof(directoryhint_t), M_HFSDIRHINT, M_WAITOK); + ++dcp->c_dirhintcnt; + need_remove = false; + } else { /* recycle the last (i.e., the oldest) hint */ + hint = TAILQ_LAST(&dcp->c_hintlist, hfs_hinthead); + if ((name = hint->dh_desc.cd_nameptr)) { + hint->dh_desc.cd_nameptr = NULL; + vfs_removename(name); + } + need_remove = true; } - goto init; - } - - /* Create a default directory hint */ - MALLOC_ZONE(hint, directoryhint_t *, sizeof(directoryhint_t), M_HFSDIRHINT, M_WAITOK); - SLIST_INSERT_HEAD(&dcp->c_hintlist, hint, dh_link); - ++dcp->c_dirhintcnt; -init: - hint->dh_index = index; - hint->dh_desc.cd_flags = 0; - hint->dh_desc.cd_encoding = 0; - hint->dh_desc.cd_namelen = 0; - hint->dh_desc.cd_nameptr = NULL; - hint->dh_desc.cd_parentcnid = dcp->c_cnid; - hint->dh_desc.cd_hint = dcp->c_childhint; - hint->dh_desc.cd_cnid = 0; -out: + } + + if (need_remove) + TAILQ_REMOVE(&dcp->c_hintlist, hint, dh_link); + + TAILQ_INSERT_HEAD(&dcp->c_hintlist, hint, dh_link); + + if (need_init) { + hint->dh_index = index; + hint->dh_desc.cd_flags = 0; + hint->dh_desc.cd_encoding = 0; + hint->dh_desc.cd_namelen = 0; + hint->dh_desc.cd_nameptr = NULL; + hint->dh_desc.cd_parentcnid = dcp->c_cnid; + hint->dh_desc.cd_hint = dcp->c_childhint; + hint->dh_desc.cd_cnid = 0; + } hint->dh_time = tv.tv_sec; return (hint); } @@ -1592,22 +1604,16 @@ __private_extern__ void hfs_reldirhint(struct cnode *dcp, directoryhint_t * relhint) { - directoryhint_t *hint; char * name; - SLIST_FOREACH(hint, &dcp->c_hintlist, dh_link) { - if (hint == relhint) { - SLIST_REMOVE(&dcp->c_hintlist, hint, directoryhint, dh_link); - name = hint->dh_desc.cd_nameptr; - if (name != NULL) { - hint->dh_desc.cd_nameptr = NULL; - vfs_removename(name); - } - FREE_ZONE(hint, sizeof(directoryhint_t), M_HFSDIRHINT); - --dcp->c_dirhintcnt; - break; - } + TAILQ_REMOVE(&dcp->c_hintlist, relhint, dh_link); + name = relhint->dh_desc.cd_nameptr; + if (name != NULL) { + relhint->dh_desc.cd_nameptr = NULL; + vfs_removename(name); } + FREE_ZONE(relhint, sizeof(directoryhint_t), M_HFSDIRHINT); + --dcp->c_dirhintcnt; } /* @@ -1620,32 +1626,26 @@ void hfs_reldirhints(struct cnode *dcp, int stale_hints_only) { struct timeval tv; - directoryhint_t *hint, *next; + directoryhint_t *hint, *prev; char * name; if (stale_hints_only) microuptime(&tv); - else - tv.tv_sec = 0; - - for (hint = dcp->c_hintlist.slh_first; hint != NULL; hint = next) { - next = hint->dh_link.sle_next; - if (stale_hints_only) { - /* Skip over newer entries. */ - if ((tv.tv_sec - hint->dh_time) < HFS_DIRHINT_TTL) - continue; - SLIST_REMOVE(&dcp->c_hintlist, hint, directoryhint, dh_link); - } + + /* searching from the oldest to the newest, so we can stop early when releasing stale hints only */ + for (hint = TAILQ_LAST(&dcp->c_hintlist, hfs_hinthead); hint != NULL; hint = prev) { + if (stale_hints_only && (tv.tv_sec - hint->dh_time) < HFS_DIRHINT_TTL) + break; /* stop here if this entry is too new */ name = hint->dh_desc.cd_nameptr; if (name != NULL) { hint->dh_desc.cd_nameptr = NULL; vfs_removename(name); } + prev = TAILQ_PREV(hint, hfs_hinthead, dh_link); /* must save this pointer before calling FREE_ZONE on this node */ + TAILQ_REMOVE(&dcp->c_hintlist, hint, dh_link); FREE_ZONE(hint, sizeof(directoryhint_t), M_HFSDIRHINT); --dcp->c_dirhintcnt; } - if (!stale_hints_only) - dcp->c_hintlist.slh_first = NULL; } diff --git a/bsd/hfs/hfs_vnops.c b/bsd/hfs/hfs_vnops.c index 873ff095c..eccf2c1e6 100644 --- a/bsd/hfs/hfs_vnops.c +++ b/bsd/hfs/hfs_vnops.c @@ -2565,7 +2565,7 @@ hfs_vnop_readdir(ap) if ( localhint.dh_desc.cd_parentcnid == cp->c_cnid) { localhint.dh_index = index - 1; localhint.dh_time = 0; - localhint.dh_link.sle_next = 0; + bzero(&localhint.dh_link, sizeof(localhint.dh_link)); dirhint = &localhint; /* don't forget to release the descriptor */ } else { cat_releasedesc(&localhint.dh_desc); @@ -2948,6 +2948,9 @@ hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, } attr.ca_atime = attr.ca_ctime = attr.ca_itime = attr.ca_mtime; attr.ca_atimeondisk = attr.ca_atime; + /* On HFS+ the ThreadExists flag must always be set for files. */ + if (vnodetype != VDIR && (hfsmp->hfs_flags & HFS_STANDARD) == 0) + attr.ca_recflags = kHFSThreadExistsMask; attr.ca_uid = vap->va_uid; attr.ca_gid = vap->va_gid; diff --git a/bsd/kern/kern_descrip.c b/bsd/kern/kern_descrip.c index b0c759539..331e24761 100644 --- a/bsd/kern/kern_descrip.c +++ b/bsd/kern/kern_descrip.c @@ -90,6 +90,7 @@ #include #include #include +#include struct psemnode; struct pshmnode; @@ -489,7 +490,7 @@ fcntl(p, uap, retval) error = EBADF; goto outdrop; } - p->p_flag |= P_ADVLOCK; + OSBitOrAtomic(P_LADVLOCK, &p->p_ladvflag); error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context); (void)vnode_put(vp); goto outdrop; @@ -500,7 +501,7 @@ fcntl(p, uap, retval) error = EBADF; goto outdrop; } - p->p_flag |= P_ADVLOCK; + OSBitOrAtomic(P_LADVLOCK, &p->p_ladvflag); error = VNOP_ADVLOCK(vp, (caddr_t)p, F_SETLK, &fl, flg, &context); (void)vnode_put(vp); goto outdrop; @@ -2265,7 +2266,7 @@ closef_locked(fp, fg, p) * If the descriptor was in a message, POSIX-style locks * aren't passed with the descriptor. */ - if (p && (p->p_flag & P_ADVLOCK) && fg->fg_type == DTYPE_VNODE) { + if (p && (p->p_ladvflag & P_LADVLOCK) && fg->fg_type == DTYPE_VNODE) { proc_fdunlock(p); lf.l_whence = SEEK_SET; diff --git a/bsd/kern/kern_exit.c b/bsd/kern/kern_exit.c index cb515a0dd..3cc43dcce 100644 --- a/bsd/kern/kern_exit.c +++ b/bsd/kern/kern_exit.c @@ -585,7 +585,8 @@ reap_child_process(struct proc *parent, struct proc *child) leavepgrp(child); LIST_REMOVE(child, p_list); /* off zombproc */ LIST_REMOVE(child, p_sibling); - child->p_flag &= ~P_WAITING; + child->p_lflag &= ~P_LWAITING; + wakeup(&child->p_stat); lck_mtx_destroy(&child->p_mlock, proc_lck_grp); lck_mtx_destroy(&child->p_fdmlock, proc_lck_grp); @@ -634,11 +635,11 @@ loop: /* XXX This is racy because we don't get the lock!!!! */ - if (p->p_flag & P_WAITING) { + if (p->p_lflag & P_LWAITING) { (void)tsleep(&p->p_stat, PWAIT, "waitcoll", 0); goto loop; } - p->p_flag |= P_WAITING; /* only allow single thread to wait() */ + p->p_lflag |= P_LWAITING; /* only allow single thread to wait() */ if (p->p_stat == SZOMB) { retval[0] = p->p_pid; @@ -648,7 +649,7 @@ loop: uap->status, sizeof(status)); if (error) { - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); return (error); } @@ -672,18 +673,17 @@ loop: } /* information unavailable? */ if (error) { - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); return (error); } } /* Clean up */ - if (!reap_child_process(q, p)) - p->p_flag &= ~P_WAITING; - - /* Wake other wait'ers, if any */ - wakeup(&p->p_stat); + if (!reap_child_process(q, p)) { + p->p_lflag &= ~P_LWAITING; + wakeup(&p->p_stat); + } return (0); } @@ -698,11 +698,11 @@ loop: sizeof(status)); } else error = 0; - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); return (error); } - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); } if (nfound == 0) @@ -782,11 +782,11 @@ loop: * Wait collision; go to sleep and restart; used to maintain * the single return for waited process guarantee. */ - if (p->p_flag & P_WAITING) { + if (p->p_lflag & P_LWAITING) { (void)tsleep(&p->p_stat, PWAIT, "waitidcoll", 0); goto loop; } - p->p_flag |= P_WAITING; /* mark busy */ + p->p_lflag |= P_LWAITING; /* mark busy */ nfound++; @@ -823,7 +823,7 @@ loop: } /* information unavailable? */ if (error) { - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); return (error); } @@ -831,11 +831,10 @@ loop: /* Prevent other process for waiting for this event? */ if (!(uap->options & WNOWAIT)) { /* Clean up */ - if (!reap_child_process(q, p)) - p->p_flag &= ~P_WAITING; - - /* Wake other wait'ers, if any */ - wakeup(&p->p_stat); + if (!reap_child_process(q, p)) { + p->p_lflag &= ~P_LWAITING; + wakeup(&p->p_stat); + } } return (0); @@ -886,7 +885,7 @@ loop: } /* information unavailable? */ if (error) { - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); return (error); } @@ -896,7 +895,7 @@ loop: p->p_flag |= P_WAITED; } - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); return (0); @@ -936,7 +935,7 @@ loop: } /* information unavailable? */ if (error) { - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); return (error); } @@ -946,7 +945,7 @@ loop: p->p_flag &= ~P_CONTINUED; } - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); return (0); @@ -955,7 +954,7 @@ loop: /* Not a process we are interested in; go on to next child */ - p->p_flag &= ~P_WAITING; + p->p_lflag &= ~P_LWAITING; wakeup(&p->p_stat); } diff --git a/bsd/kern/kern_fork.c b/bsd/kern/kern_fork.c index a993e3356..1754e8091 100644 --- a/bsd/kern/kern_fork.c +++ b/bsd/kern/kern_fork.c @@ -567,6 +567,7 @@ again: p2->p_vforkcnt = 0; p2->p_vforkact = 0; p2->p_lflag = 0; + p2->p_ladvflag = 0; TAILQ_INIT(&p2->p_uthlist); TAILQ_INIT(&p2->aio_activeq); TAILQ_INIT(&p2->aio_doneq); diff --git a/bsd/kern/kpi_socketfilter.c b/bsd/kern/kpi_socketfilter.c index 729f5fac1..5a5189517 100644 --- a/bsd/kern/kpi_socketfilter.c +++ b/bsd/kern/kpi_socketfilter.c @@ -150,7 +150,7 @@ sflt_data_in( int filtered = 0; int error = 0; - for (filter = so->so_filt; filter; + for (filter = so->so_filt; filter && (error == 0); filter = filter->sfe_next_onsocket) { if (filter->sfe_filter->sf_filter.sf_data_in) { if (filtered == 0) { diff --git a/bsd/net/dlil.c b/bsd/net/dlil.c index f69a1c9e0..5f766f6b7 100644 --- a/bsd/net/dlil.c +++ b/bsd/net/dlil.c @@ -230,6 +230,8 @@ __private_extern__ void link_rtrequest(int, struct rtentry *, struct sockaddr *) int dlil_expand_mcl; +extern u_int32_t inject_buckets; + static const u_int32_t dlil_writer_waiting = 0x80000000; static __inline__ void* @@ -713,9 +715,9 @@ dlil_input_thread_continue( } proto_input_run(); - + if (dlil_input_mbuf_head == NULL && - dlil_input_loop_head == NULL) { + dlil_input_loop_head == NULL && inject_buckets == 0) { assert_wait(&dlil_input_thread_wakeup, THREAD_UNINT); (void) thread_block(dlil_input_thread_continue); /* NOTREACHED */ diff --git a/bsd/net/kpi_protocol.c b/bsd/net/kpi_protocol.c index ad16db5c1..f1611a11e 100644 --- a/bsd/net/kpi_protocol.c +++ b/bsd/net/kpi_protocol.c @@ -58,7 +58,7 @@ struct proto_input_entry { static struct proto_input_entry *proto_hash[PROTO_HASH_SLOTS]; static struct proto_input_entry *proto_input_add_list; static lck_mtx_t *proto_input_lock = 0; -static u_int32_t inject_buckets = 0; +__private_extern__ u_int32_t inject_buckets = 0; extern thread_t dlil_input_thread_ptr; extern int dlil_input_thread_wakeup; diff --git a/bsd/nfs/nfs_vfsops.c b/bsd/nfs/nfs_vfsops.c index 6d72e61e0..944ce2be1 100644 --- a/bsd/nfs/nfs_vfsops.c +++ b/bsd/nfs/nfs_vfsops.c @@ -988,7 +988,7 @@ nfs_mount(mount_t mp, vnode_t vp, user_addr_t data, vfs_context_t context) } } - if (args.fhsize > NFSX_V3FHMAX) + if (args.fhsize < 0 || args.fhsize > NFSX_V3FHMAX) return (EINVAL); error = copyin(args.fh, (caddr_t)nfh, args.fhsize); if (error) diff --git a/bsd/sys/proc_internal.h b/bsd/sys/proc_internal.h index 6d7c06111..a286ac9f7 100644 --- a/bsd/sys/proc_internal.h +++ b/bsd/sys/proc_internal.h @@ -222,7 +222,8 @@ struct proc { unsigned int p_fdlock_pc[4]; unsigned int p_fdunlock_pc[4]; int p_fpdrainwait; - int p_lflag; /* local flags */ + unsigned int p_lflag; /* local flags */ + unsigned int p_ladvflag; /* local adv flags*/ #if DIAGNOSTIC #if SIGNAL_DEBUG unsigned int lockpc[8]; @@ -232,11 +233,16 @@ struct proc { }; +/* local flags */ #define P_LDELAYTERM 0x1 /* */ #define P_LNOZOMB 0x2 /* */ #define P_LLOW_PRI_IO 0x4 #define P_LPEXIT 0x8 #define P_LBACKGROUND_IO 0x10 +#define P_LWAITING 0x20 + +/* advisory flags in the proc */ +#define P_LADVLOCK 0x01 // LP64todo - should this move? /* LP64 version of extern_proc. all pointers diff --git a/config/MasterVersion b/config/MasterVersion index b87eb66eb..4925e1e8a 100644 --- a/config/MasterVersion +++ b/config/MasterVersion @@ -1,4 +1,4 @@ -8.0.0 +8.1.0 # The first line of this file contains the master version number for the kernel. # All other instances of the kernel version in xnu are derived from this file. diff --git a/libkern/c++/Tests/TestSerialization/test1/test1_main.cpp b/libkern/c++/Tests/TestSerialization/test1/test1_main.cpp index d9f86d6b2..44cc8e3b0 100644 --- a/libkern/c++/Tests/TestSerialization/test1/test1_main.cpp +++ b/libkern/c++/Tests/TestSerialization/test1/test1_main.cpp @@ -33,69 +33,3 @@ __END_DECLS #include #include -char *testBuffer = " -{ string = \"this is a 'string' with spaces\"; - string2 = 'this is also a \"string\" with spaces'; - offset = 16384:32; - true = .true.; - false = .false.; - data = <0123 4567 89abcdef>; - array = (1:8, 2:16, 3:32, 4:64 ); - set = [ one, two, three, four ]; - emptydict = { }@1; - emptyarray = ( )@2; - emptyset = [ ]@3; - emptydata = < >@4; - emptydict2 = @1; - emptyarray2 = @2; - emptyset2 = @3; - emptydata2 = @4; - dict2 = { string = asdfasdf; }; - dict3 = { string = asdfasdf; }; -}@0"; - -kern_return_t -test1_start(struct kmod_info *ki, void *data) -{ - IOLog("test buffer start:\n%s\n:test buffer end.\n", testBuffer); - - // test unserialize - OSString *errmsg; - OSObject *d = OSUnserialize(testBuffer, &errmsg); - if (!d) { - IOLog("%s\n", errmsg->getCStringNoCopy()); - return KMOD_RETURN_SUCCESS; - } - - // test serialize - OSSerialize *s = OSSerialize::withCapacity(5); - if (!d->serialize(s)) { - IOLog("serialization failed\n"); - return KMOD_RETURN_SUCCESS; - } - - IOLog("serialized object's length = %d, capacity = %d\n", s->getLength(), s->getCapacity()); - IOLog("object unformatted = %s\n", s->text()); - - // try second time - OSObject *d2 = OSUnserializeXML(s->text(), &errmsg); - if (!d2) { - IOLog("%s\n", errmsg->getCStringNoCopy()); - return KMOD_RETURN_SUCCESS; - } - - IOLog("\nserialized objects compared %ssuccessfully objectwise\n\n", - d->isEqualTo(d2) ? "":"un"); - - if (d2) d2->release(); - s->release(); - if (d) d->release(); - - return KMOD_RETURN_SUCCESS; -} - -kern_return_t -test1_stop(struct kmod_info *ki, void *data) -{ - return KMOD_RETURN_SUCCESS; -} diff --git a/osfmk/ipc/ipc_kmsg.c b/osfmk/ipc/ipc_kmsg.c index 7e097c14d..5fde45534 100644 --- a/osfmk/ipc/ipc_kmsg.c +++ b/osfmk/ipc/ipc_kmsg.c @@ -739,7 +739,7 @@ ipc_kmsg_get( trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE; #ifdef ppc - if(trcWork.traceMask) dbgTrace((unsigned int)kmsg->ikm_header->msgh_id, + if(trcWork.traceMask) dbgTrace(0x1100, (unsigned int)kmsg->ikm_header->msgh_id, (unsigned int)kmsg->ikm_header->msgh_remote_port, (unsigned int)kmsg->ikm_header->msgh_local_port, 0); #endif diff --git a/osfmk/ppc/Firmware.h b/osfmk/ppc/Firmware.h index b0e2b922d..c724b2e1d 100644 --- a/osfmk/ppc/Firmware.h +++ b/osfmk/ppc/Firmware.h @@ -55,10 +55,11 @@ void fwEmMck(unsigned int, unsigned int, unsigned int, unsigned int, unsigned in void fwSCOM(scomcomm *); /* Read/Write SCOM */ void setPmon(unsigned int, unsigned int); /* Set perf mon stuff */ -extern void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4); +extern void dbgTrace(unsigned int id, unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4); #if 0 /* (TEST/DEBUG) - eliminate inline */ -extern __inline__ void dbgTrace(unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4) { +extern __inline__ void dbgTrace(unsigned int id, unsigned int item1, unsigned int item2, unsigned int item3, unsigned int item4) { + __asm__ volatile("mr r2,%0" : : "r" (id) : "r2"); __asm__ volatile("mr r3,%0" : : "r" (item1) : "r3"); __asm__ volatile("mr r4,%0" : : "r" (item2) : "r4"); __asm__ volatile("mr r5,%0" : : "r" (item3) : "r5"); diff --git a/osfmk/ppc/Firmware.s b/osfmk/ppc/Firmware.s index 960529bf8..736859122 100644 --- a/osfmk/ppc/Firmware.s +++ b/osfmk/ppc/Firmware.s @@ -332,6 +332,7 @@ LEXT(xLoadIBATsLL) /* * This is the glue to call the CutTrace firmware call + * dbgTrace(id, p1, p2, p3, p4) */ .align 5 @@ -339,8 +340,13 @@ LEXT(xLoadIBATsLL) LEXT(dbgTrace) + mr r2,r3 + mr r3,r4 lis r0,HIGH_ADDR(CutTrace) /* Top half of CreateFakeIO firmware call number */ + mr r4,r5 + mr r5,r6 ori r0,r0,LOW_ADDR(CutTrace) /* Bottom half */ + mr r6,r7 sc /* Do it to it */ blr /* Bye bye, Birdie... */ diff --git a/osfmk/ppc/cswtch.s b/osfmk/ppc/cswtch.s index cd131bf4f..96f051e16 100644 --- a/osfmk/ppc/cswtch.s +++ b/osfmk/ppc/cswtch.s @@ -32,7 +32,6 @@ #include #define FPVECDBG 0 -#define GDDBG 0 .text @@ -100,7 +99,10 @@ LEXT(Call_continuation) * Note that interrupts must be disabled before we get here (i.e., splsched) */ -/* Context switches are double jumps. We pass the following to the +/* + * Switch_context(old, continuation, new) + * + * Context switches are double jumps. We pass the following to the * context switch firmware call: * * R3 = switchee's savearea, virtual if continuation, low order physical for full switch @@ -154,11 +156,10 @@ notonintstack: lwz r11,SAVprev+4(r8) ; Get the previous of the switchee savearea ori r0,r0,lo16(CutTrace) ; Trace FW call beq++ cswNoTrc ; No trace today, dude... - mr r10,r3 ; Save across trace - mr r2,r3 ; Trace old activation - mr r3,r11 ; Trace prev savearea + + li r2,0x4400 ; Trace ID + mr r6,r11 ; Trace prev savearea sc ; Cut trace entry of context switch - mr r3,r10 ; Restore cswNoTrc: lwz r2,curctx(r5) ; Grab our current context pointer lwz r10,FPUowner(r12) ; Grab the owner of the FPU @@ -182,7 +183,7 @@ cswNoTrc: lwz r2,curctx(r5) ; Grab our current context pointer bne++ cswnofloat ; Level not the same, this is not live... cmplw r5,r0 ; Still owned by this cpu? - lwz r10,FPUsave(r2) ; Get the level + lwz r10,FPUsave(r2) ; Get the pointer to next saved context bne++ cswnofloat ; CPU claimed by someone else... mr. r10,r10 ; Is there a savearea here? @@ -268,6 +269,10 @@ cswnovect: li r0,0 ; Get set to release quickfret holdoff rlwinm r11,r8,0,0,19 ; Switch to savearea base lis r9,hi16(EXT(switch_in)) ; Get top of switch in routine lwz r5,savesrr0+4(r8) ; Set up the new SRR0 +; +; Note that the low-level code requires the R7 contain the high order half of the savearea's +; physical address. This is hack city, but it is the way it is. +; lwz r7,SACvrswap(r11) ; Get the high order V to R translation lwz r11,SACvrswap+4(r11) ; Get the low order V to R translation ori r9,r9,lo16(EXT(switch_in)) ; Bottom half of switch in @@ -407,33 +412,65 @@ noowneryet: oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) #endif mflr r2 ; Save the return address -fsretry: mr. r12,r12 ; Anyone own the FPU? + cmplw r3,r12 ; Is the specified context live? lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number - beq-- fsret ; Nobody owns the FPU, no save required... - - cmplw cr1,r3,r12 ; Is the specified context live? - - isync ; Force owner check first - - lwz r9,FPUcpu(r12) ; Get the cpu that context was last on - bne-- cr1,fsret ; No, it is not... + lwz r9,FPUcpu(r3) ; Get the cpu that context was last on + bne-- fsret ; Nobody owns the FPU, no save required... - cmplw cr1,r9,r11 ; Was the context for this processor? - beq-- cr1,fsgoodcpu ; Facility last used on this processor... + cmplw r9,r11 ; Was the context for this processor? + la r5,FPUsync(r3) ; Point to the sync word + bne-- fsret ; Facility not last used on this processor... - b fsret ; Someone else claimed it... +; +; It looks like we need to save this one. +; +; First, make sure that the live context block is not mucked with while +; we are trying to save it on out. Then we will give it the final check. +; + + lis r9,ha16(EXT(LockTimeOut)) ; Get the high part + mftb r8 ; Get the time now + lwz r9,lo16(EXT(LockTimeOut))(r9) ; Get the timeout value + b fssync0a ; Jump to the lock... .align 5 -fsgoodcpu: lwz r3,FPUsave(r12) ; Get the current FPU savearea for the thread +fssync0: li r7,lgKillResv ; Get killing field + stwcx. r7,0,r7 ; Kill reservation + +fssync0a: lwz r7,0(r5) ; Sniff the lock + mftb r10 ; Is it time yet? + cmplwi cr1,r7,0 ; Is it locked? + sub r10,r10,r8 ; How long have we been spinning? + cmplw r10,r9 ; Has it been too long? + bgt-- fstimeout ; Way too long, panic... + bne-- cr1,fssync0a ; Yea, still locked so sniff harder... + +fssync1: lwarx r7,0,r5 ; Get the sync word + li r12,1 ; Get the lock + mr. r7,r7 ; Is it unlocked? + bne-- fssync0 + stwcx. r12,0,r5 ; Store lock and test reservation + bne-- fssync1 ; Try again if lost reservation... + + isync ; Toss speculation + + lwz r12,FPUowner(r6) ; Get the context ID for owner + cmplw r3,r12 ; Check again if we own the FPU? + bne-- fsretlk ; Go unlock and return since we no longer own context + + lwz r5,FPUcpu(r12) ; Get the cpu that context was last on + lwz r7,FPUsave(r12) ; Get the current FPU savearea for the thread + cmplw r5,r11 ; Is this for the same processor? lwz r9,FPUlevel(r12) ; Get our current level indicator + bne-- fsretlk ; Not the same processor, skip any save... - cmplwi cr1,r3,0 ; Have we ever saved this facility context? - beq- cr1,fsneedone ; Never saved it, so go do it... + cmplwi r7,0 ; Have we ever saved this facility context? + beq-- fsneedone ; Never saved it, so go do it... - lwz r8,SAVlevel(r3) ; Get the level this savearea is for - cmplw cr1,r9,r8 ; Correct level? - beq-- cr1,fsret ; The current level is already saved, bail out... + lwz r8,SAVlevel(r7) ; Get the level of this savearea + cmplw r9,r8 ; Correct level? + beq-- fsretlk ; The current level is already saved, bail out... fsneedone: bl EXT(save_get) ; Get a savearea for the context @@ -442,8 +479,6 @@ fsneedone: bl EXT(save_get) ; Get a savearea for the context li r4,SAVfloat ; Get floating point tag lwz r12,FPUowner(r6) ; Get back our thread stb r4,SAVflags+2(r3) ; Mark this savearea as a float - mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it... - beq-- fsbackout ; If disowned, just toss savearea... lwz r4,facAct(r12) ; Get the activation associated with live context lwz r8,FPUsave(r12) ; Get the current top floating point savearea stw r4,SAVact(r3) ; Indicate the right activation for this context @@ -455,14 +490,28 @@ fsneedone: bl EXT(save_get) ; Get a savearea for the context bl fp_store ; save all 32 FPRs in the save area at r3 mtlr r2 ; Restore return - + +fsretlk: li r7,0 ; Get the unlock value + eieio ; Make sure that these updates make it out + stw r7,FPUsync(r12) ; Unlock it + fsret: mtmsr r0 ; Put interrupts on if they were and floating point off isync blr -fsbackout: mr r4,r0 ; restore the original MSR - b EXT(save_ret_wMSR) ; Toss savearea and return from there... +fstimeout: mr r4,r5 ; Set the lock address + mr r5,r7 ; Set the lock word data + lis r3,hi16(fstimeout_str) ; Get the failed lck message + ori r3,r3,lo16(fstimeout_str) ; Get the failed lck message + bl EXT(panic) + BREAKPOINT_TRAP ; We die here anyway + + .data +fstimeout_str: + STRINGD "fpu_save: timeout on sync lock (0x%08X), value = 0x%08X\n\000" + .text + /* * fpu_switch() @@ -523,20 +572,19 @@ LEXT(fpu_switch) lhz r16,PP_CPU_NUMBER(r26) ; Get the current CPU number -fswretry: mr. r22,r22 ; See if there is any live FP status - - beq- fsnosave ; No live context, so nothing to save... + mr. r22,r22 ; See if there is any live FP status + la r15,FPUsync(r22) ; Point to the sync word - isync ; Make sure we see this in the right order + beq-- fsnosave ; No live context, so nothing to save... - lwz r30,FPUsave(r22) ; Get the top savearea - cmplw cr2,r22,r29 ; Are both old and new the same context? lwz r18,FPUcpu(r22) ; Get the last CPU we ran on - cmplwi cr1,r30,0 ; Anything saved yet? + cmplw cr2,r22,r29 ; Are both old and new the same context? + lwz r30,FPUsave(r22) ; Get the top savearea cmplw r18,r16 ; Make sure we are on the right processor lwz r31,FPUlevel(r22) ; Get the context level + cmplwi cr1,r30,0 ; Anything saved yet? - bne- fsnosave ; No, not on the same processor... + bne-- fsnosave ; No, not on the same processor... ; ; Check to see if the live context has already been saved. @@ -546,12 +594,77 @@ fswretry: mr. r22,r22 ; See if there is any live FP status cmplw r31,r27 ; See if the current and active levels are the same crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same - li r3,0 ; Clear this - beq- fsthesame ; New and old are the same, just go enable... + beq-- fsthesame ; New and old are the same, just go enable... + + +; +; Note it turns out that on a G5, the following load has about a 50-50 chance of +; taking a segment exception in a system that is doing heavy file I/O. We +; make a dummy access right now in order to get that resolved before we take the lock. +; We do not use the data returned because it may change over the lock +; + + beq-- cr1,fswsync ; Nothing saved, skip the probe attempt... + lwz r11,SAVlevel(r30) ; Touch the context in order to fault in the segment + +; +; Make sure that the live context block is not mucked with while +; we are trying to save it on out +; + +fswsync: lis r11,ha16(EXT(LockTimeOut)) ; Get the high part + mftb r3 ; Get the time now + lwz r11,lo16(EXT(LockTimeOut))(r11) ; Get the timeout value + b fswsync0a ; Jump to the lock... + + .align 5 + +fswsync0: li r19,lgKillResv ; Get killing field + stwcx. r19,0,r19 ; Kill reservation + +fswsync0a: lwz r19,0(r15) ; Sniff the lock + mftb r18 ; Is it time yet? + cmplwi cr1,r19,0 ; Is it locked? + sub r18,r18,r3 ; How long have we been spinning? + cmplw r18,r11 ; Has it been too long? + bgt-- fswtimeout ; Way too long, panic... + bne-- cr1,fswsync0a ; Yea, still locked so sniff harder... + +fswsync1: lwarx r19,0,r15 ; Get the sync word + li r0,1 ; Get the lock + mr. r19,r19 ; Is it unlocked? + bne-- fswsync0 + stwcx. r0,0,r15 ; Store lock and test reservation + bne-- fswsync1 ; Try again if lost reservation... + + isync ; Toss speculation - beq- cr1,fsmstsave ; Not saved yet, go do it... +; +; Note that now that we have the lock, we need to check if anything changed. +; Also note that the possible changes are limited. The context owner can +; never change to a different thread or level although it can be invalidated. +; A new context can not be pushed on top of us, but it can be popped. The +; cpu indicator will always change if another processor mucked with any +; contexts. +; +; It should be very rare that any of the context stuff changes across the lock. +; + + lwz r0,FPUowner(r26) ; Get the thread that owns the FPU again + lwz r11,FPUsave(r22) ; Get the top savearea again + lwz r18,FPUcpu(r22) ; Get the last CPU we ran on again + sub r0,r0,r22 ; Non-zero if we lost ownership, 0 if not + xor r11,r11,r30 ; Non-zero if saved context changed, 0 if not + xor r18,r18,r16 ; Non-zero if cpu changed, 0 if not + cmplwi cr1,r30,0 ; Is anything saved? + or r0,r0,r11 ; Zero only if both owner and context are unchanged + or. r0,r0,r18 ; Zero only if nothing has changed + li r3,0 ; Clear this + bne-- fsnosavelk ; Something has changed, so this is not ours to save... + beq-- cr1,fsmstsave ; There is no context saved yet... + lwz r11,SAVlevel(r30) ; Get the level of top saved context cmplw r31,r11 ; Are live and saved the same? @@ -559,69 +672,27 @@ fswretry: mr. r22,r22 ; See if there is any live FP status #if FPVECDBG lis r0,hi16(CutTrace) ; (TEST/DEBUG) li r2,0x7F02 ; (TEST/DEBUG) - mr r3,r30 ; (TEST/DEBUG) + mr r3,r11 ; (TEST/DEBUG) mr r5,r31 ; (TEST/DEBUG) oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) li r3,0 ; (TEST/DEBUG) #endif - beq+ fsnosave ; Same level, so already saved... - + beq++ fsnosavelk ; Same level, so already saved... fsmstsave: stw r3,FPUowner(r26) ; Kill the context now eieio ; Make sure everyone sees it bl EXT(save_get) ; Go get a savearea - - mr. r31,r31 ; Are we saving the user state? - la r15,FPUsync(r22) ; Point to the sync word - beq++ fswusave ; Yeah, no need for lock... -; -; Here we make sure that the live context is not tossed while we are -; trying to push it. This can happen only for kernel context and -; then only by a race with act_machine_sv_free. -; -; We only need to hold this for a very short time, so no sniffing needed. -; If we find any change to the level, we just abandon. -; -fswsync: lwarx r19,0,r15 ; Get the sync word - li r0,1 ; Get the lock - cmplwi cr1,r19,0 ; Is it unlocked? - stwcx. r0,0,r15 ; Store lock and test reservation - crand cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked - bne-- fswsync ; Try again if lost reservation or locked... - - isync ; Toss speculation - lwz r0,FPUlevel(r22) ; Pick up the level again - li r7,0 ; Get unlock value - cmplw r0,r31 ; Same level? - beq++ fswusave ; Yeah, we expect it to be... - - stw r7,FPUsync(r22) ; Unlock lock. No need to sync here - - bl EXT(save_ret) ; Toss save area because we are abandoning save - b fsnosave ; Skip the save... - - .align 5 - -fswusave: lwz r12,facAct(r22) ; Get the activation associated with the context - stw r3,FPUsave(r22) ; Set this as the latest context savearea for the thread - mr. r31,r31 ; Check again if we were user level + lwz r12,facAct(r22) ; Get the activation associated with the context stw r30,SAVprev+4(r3) ; Point us to the old context stw r31,SAVlevel(r3) ; Tag our level li r7,SAVfloat ; Get the floating point ID stw r12,SAVact(r3) ; Make sure we point to the right guy stb r7,SAVflags+2(r3) ; Set that we have a floating point save area - - li r7,0 ; Get the unlock value - - beq-- fswnulock ; Skip unlock if user (we did not lock it)... - eieio ; Make sure that these updates make it out - stw r7,FPUsync(r22) ; Unlock it. + stw r3,FPUsave(r22) ; Set this as the latest context savearea for the thread -fswnulock: - #if FPVECDBG lis r0,hi16(CutTrace) ; (TEST/DEBUG) li r2,0x7F03 ; (TEST/DEBUG) @@ -631,17 +702,51 @@ fswnulock: bl fp_store ; store all 32 FPRs +fsnosavelk: li r7,0 ; Get the unlock value + eieio ; Make sure that these updates make it out + stw r7,FPUsync(r22) ; Unlock it. + ; ; The context is all saved now and the facility is free. ; -; If we do not we need to fill the registers with junk, because this level has +; Check if we need to fill the registers with junk, because this level has ; never used them before and some thieving bastard could hack the old values ; of some thread! Just imagine what would happen if they could! Why, nothing ; would be safe! My God! It is terrifying! ; +; Make sure that the live context block is not mucked with while +; we are trying to load it up +; + +fsnosave: la r15,FPUsync(r29) ; Point to the sync word + lis r11,ha16(EXT(LockTimeOut)) ; Get the high part + mftb r3 ; Get the time now + lwz r11,lo16(EXT(LockTimeOut))(r11) ; Get the timeout value + b fsnsync0a ; Jump to the lock... + + .align 5 + +fsnsync0: li r19,lgKillResv ; Get killing field + stwcx. r19,0,r19 ; Kill reservation + +fsnsync0a: lwz r19,0(r15) ; Sniff the lock + mftb r18 ; Is it time yet? + cmplwi cr1,r19,0 ; Is it locked? + sub r18,r18,r3 ; How long have we been spinning? + cmplw r18,r11 ; Has it been too long? + bgt-- fsntimeout ; Way too long, panic... + bne-- cr1,fsnsync0a ; Yea, still locked so sniff harder... +fsnsync1: lwarx r19,0,r15 ; Get the sync word + li r0,1 ; Get the lock + mr. r19,r19 ; Is it unlocked? + bne-- fsnsync0 ; Unfortunately, it is locked... + stwcx. r0,0,r15 ; Store lock and test reservation + bne-- fsnsync1 ; Try again if lost reservation... + + isync ; Toss speculation -fsnosave: lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one + lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one lwz r19,FPUcpu(r29) ; Get the last CPU we ran on lwz r14,FPUsave(r29) ; Point to the top of the "new" context stack @@ -685,9 +790,10 @@ fsinvothr: lwarx r18,r16,r19 ; Get the owner dcbt 0,r11 ; Touch line in - lwz r3,SAVprev+4(r14) ; Get the previous context lwz r0,SAVlevel(r14) ; Get the level of first facility savearea + lwz r3,SAVprev+4(r14) ; Get the previous context cmplw r0,r15 ; Top level correct to load? + li r7,0 ; Get the unlock value bne-- MakeSureThatNoTerroristsCanHurtUsByGod ; No, go initialize... stw r3,FPUsave(r29) ; Pop the context (we will toss the savearea later) @@ -699,6 +805,9 @@ fsinvothr: lwarx r18,r16,r19 ; Get the owner sc ; (TEST/DEBUG) #endif + eieio ; Make sure that these updates make it out + stw r7,FPUsync(r29) ; Unlock context now that the context save has been removed + // Note this code is used both by 32- and 128-byte processors. This means six extra DCBTs // are executed on a 128-byte machine, but that is better than a mispredicted branch. @@ -788,7 +897,11 @@ MakeSureThatNoTerroristsCanHurtUsByGod: sc ; (TEST/DEBUG) #endif lis r5,hi16(EXT(FloatInit)) ; Get top secret floating point init value address + li r7,0 ; Get the unlock value ori r5,r5,lo16(EXT(FloatInit)) ; Slam bottom + eieio ; Make sure that these updates make it out + stw r7,FPUsync(r29) ; Unlock it now that the context has been removed + lfd f0,0(r5) ; Initialize FP0 fmr f1,f0 ; Do them all fmr f2,f0 @@ -826,13 +939,18 @@ MakeSureThatNoTerroristsCanHurtUsByGod: ; ; We get here when we are switching to the same context at the same level and the context -; is still live. Essentially, all we are doing is turning on the faility. It may have +; is still live. Essentially, all we are doing is turning on the facility. It may have ; gotten turned off due to doing a context save for the current level or a context switch ; back to the live guy. ; .align 5 + +fsthesamel: li r7,0 ; Get the unlock value + eieio ; Make sure that these updates make it out + stw r7,FPUsync(r22) ; Unlock it. + fsthesame: #if FPVECDBG @@ -848,13 +966,39 @@ fsthesame: cmplw r11,r31 ; Are live and saved the same? - bne+ fsenable ; Level not the same, nothing to pop, go enable and exit... + bne++ fsenable ; Level not the same, nothing to pop, go enable and exit... mr r3,r30 ; Get the old savearea (we popped it before) stw r14,FPUsave(r22) ; Pop the savearea from the stack bl EXT(save_ret) ; Toss it b fsenable ; Go enable and exit... +; +; Note that we need to choke in this code rather than panic because there is no +; stack. +; + +fswtimeout: lis r0,hi16(Choke) ; Choke code + ori r0,r0,lo16(Choke) ; and the rest + li r3,failTimeout ; Timeout code + sc ; System ABEND + +fsntimeout: lis r0,hi16(Choke) ; Choke code + ori r0,r0,lo16(Choke) ; and the rest + li r3,failTimeout ; Timeout code + sc ; System ABEND + +vswtimeout0: + lis r0,hi16(Choke) ; Choke code + ori r0,r0,lo16(Choke) ; and the rest + li r3,failTimeout ; Timeout code + sc ; System ABEND + +vswtimeout1: + lis r0,hi16(Choke) ; Choke code + ori r0,r0,lo16(Choke) ; and the rest + li r3,failTimeout ; Timeout code + sc ; System ABEND ; ; This function invalidates any live floating point context for the passed in facility_context. @@ -960,6 +1104,7 @@ LEXT(vec_save) lwz r12,VMXowner(r6) ; Get the context ID for owner #if FPVECDBG + mr r11,r6 ; (TEST/DEBUG) mr r7,r0 ; (TEST/DEBUG) li r4,0 ; (TEST/DEBUG) mr r10,r3 ; (TEST/DEBUG) @@ -967,7 +1112,8 @@ LEXT(vec_save) mr. r3,r12 ; (TEST/DEBUG) li r2,0x5F00 ; (TEST/DEBUG) li r5,0 ; (TEST/DEBUG) - beq- noowneryeu ; (TEST/DEBUG) + lwz r6,liveVRS(r6) ; (TEST/DEBUG) + beq-- noowneryeu ; (TEST/DEBUG) lwz r4,VMXlevel(r12) ; (TEST/DEBUG) lwz r5,VMXsave(r12) ; (TEST/DEBUG) @@ -975,58 +1121,96 @@ noowneryeu: oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) mr r0,r7 ; (TEST/DEBUG) mr r3,r10 ; (TEST/DEBUG) + mr r6,r11 ; (TEST/DEBUG) #endif mflr r2 ; Save the return address -vsretry: mr. r12,r12 ; Anyone own the vector? + cmplw r3,r12 ; Is the specified context live? lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number - beq- vsret ; Nobody owns the vector, no save required... - - cmplw cr1,r3,r12 ; Is the specified context live? - - isync ; Force owner check first - + bne-- vsret ; We do not own the vector, no save required... lwz r9,VMXcpu(r12) ; Get the cpu that context was last on - bne- cr1,vsret ; Specified context is not live - cmplw cr1,r9,r11 ; Was the context for this processor? - beq+ cr1,vsgoodcpu ; Facility last used on this processor... + cmplw r9,r11 ; Was the context for this processor? + la r5,VMXsync(r3) ; Point to the sync word + bne-- vsret ; Specified context is not live - b vsret ; Someone else claimed this... +; +; It looks like we need to save this one. Or possibly toss a saved one if +; the VRSAVE is 0. +; +; First, make sure that the live context block is not mucked with while +; we are trying to save it on out. Then we will give it the final check. +; + + lis r9,ha16(EXT(LockTimeOut)) ; Get the high part + mftb r8 ; Get the time now + lwz r9,lo16(EXT(LockTimeOut))(r9) ; Get the timeout value + b vssync0a ; Jump to the lock... .align 5 -vsgoodcpu: lwz r3,VMXsave(r12) ; Get the current vector savearea for the thread +vssync0: li r7,lgKillResv ; Get killing field + stwcx. r7,0,r7 ; Kill reservation + +vssync0a: lwz r7,0(r5) ; Sniff the lock + mftb r10 ; Is it time yet? + cmplwi cr1,r7,0 ; Is it locked? + sub r10,r10,r8 ; How long have we been spinning? + cmplw r10,r9 ; Has it been too long? + bgt-- vswtimeout0 ; Way too long, panic... + bne-- cr1,vssync0a ; Yea, still locked so sniff harder... + +vssync1: lwarx r7,0,r5 ; Get the sync word + li r12,1 ; Get the lock + mr. r7,r7 ; Is it unlocked? + bne-- vssync0 ; No, it is unlocked... + stwcx. r12,0,r5 ; Store lock and test reservation + bne-- vssync1 ; Try again if lost reservation... + + isync ; Toss speculation + + lwz r12,VMXowner(r6) ; Get the context ID for owner + cmplw r3,r12 ; Check again if we own VMX? lwz r10,liveVRS(r6) ; Get the right VRSave register - lwz r9,VMXlevel(r12) ; Get our current level indicator + bne-- vsretlk ; Go unlock and return since we no longer own context + lwz r5,VMXcpu(r12) ; Get the cpu that context was last on + lwz r7,VMXsave(r12) ; Get the current vector savearea for the thread + cmplwi cr1,r10,0 ; Is VRsave set to 0? + cmplw r5,r11 ; Is this for the same processor? + lwz r9,VMXlevel(r12) ; Get our current level indicator + bne-- vsretlk ; Not the same processor, skip any save... - cmplwi cr1,r3,0 ; Have we ever saved this facility context? - beq- cr1,vsneedone ; Never saved it, so we need an area... + cmplwi r7,0 ; Have we ever saved this facility context? + beq-- vsneedone ; Never saved it, so we need an area... - lwz r8,SAVlevel(r3) ; Get the level this savearea is for - mr. r10,r10 ; Is VRsave set to 0? - cmplw cr1,r9,r8 ; Correct level? - bne- cr1,vsneedone ; Different level, so we need to save... + lwz r8,SAVlevel(r7) ; Get the level this savearea is for + cmplw r9,r8 ; Correct level? + bne-- vsneedone ; Different level, so we need to save... - bne+ vsret ; VRsave is non-zero so we need to keep what is saved... + bne++ cr1,vsretlk ; VRsave is non-zero so we need to keep what is saved... - lwz r4,SAVprev+4(r3) ; Pick up the previous area - lwz r5,SAVlevel(r4) ; Get the level associated with save + lwz r4,SAVprev+4(r7) ; Pick up the previous area + li r5,0 ; Assume we just dumped the last + mr. r4,r4 ; Is there one? stw r4,VMXsave(r12) ; Dequeue this savearea - li r4,0 ; Clear - stw r5,VMXlevel(r12) ; Save the level - - stw r4,VMXowner(r12) ; Show no live context here - eieio + beq-- vsnomore ; We do not have another... + + lwz r5,SAVlevel(r4) ; Get the level associated with save + +vsnomore: stw r5,VMXlevel(r12) ; Save the level + li r7,0 ; Clear + stw r7,VMXowner(r6) ; Show no live context here vsbackout: mr r4,r0 ; restore the saved MSR + eieio + stw r7,VMXsync(r12) ; Unlock the context + b EXT(save_ret_wMSR) ; Toss the savearea and return from there... .align 5 -vsneedone: mr. r10,r10 ; Is VRsave set to 0? - beq- vsret ; Yeah, they do not care about any of them... +vsneedone: beq-- cr1,vsclrlive ; VRSave is zero, go blow away the context... bl EXT(save_get) ; Get a savearea for the context @@ -1036,7 +1220,8 @@ vsneedone: mr. r10,r10 ; Is VRsave set to 0? lwz r12,VMXowner(r6) ; Get back our context ID stb r4,SAVflags+2(r3) ; Mark this savearea as a vector mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it... - beq- vsbackout ; If disowned, just toss savearea... + li r7,0 ; Clear + beq-- vsbackout ; If disowned, just toss savearea... lwz r4,facAct(r12) ; Get the activation associated with live context lwz r8,VMXsave(r12) ; Get the current top vector savearea stw r4,SAVact(r3) ; Indicate the right activation for this context @@ -1050,14 +1235,25 @@ vsneedone: mr. r10,r10 ; Is VRsave set to 0? bl vr_store ; store live VRs into savearea as required (uses r4-r11) + mfsprg r6,1 ; Get the current activation mtcrf 255,r12 ; Restore the non-volatile CRs - mtlr r2 ; restore return address + lwz r6,ACT_PER_PROC(r6) ; Get the per_proc block + mtlr r2 ; Restore return address + lwz r12,VMXowner(r6) ; Get back our context ID + +vsretlk: li r7,0 ; Get the unlock value + eieio ; Make sure that these updates make it out + stw r7,VMXsync(r12) ; Unlock it vsret: mtmsr r0 ; Put interrupts on if they were and vector off isync blr +vsclrlive: li r7,0 ; Clear + stw r7,VMXowner(r6) ; Show no live context here + b vsretlk ; Go unlock and leave... + /* * vec_switch() * @@ -1111,28 +1307,28 @@ LEXT(vec_switch) li r2,0x5F01 ; (TEST/DEBUG) mr r3,r22 ; (TEST/DEBUG) mr r5,r29 ; (TEST/DEBUG) + lwz r6,liveVRS(r26) ; (TEST/DEBUG) oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) sc ; (TEST/DEBUG) #endif lhz r16,PP_CPU_NUMBER(r26) ; Get the current CPU number -vsvretry: mr. r22,r22 ; See if there is any live vector status - - beq- vsnosave ; No live context, so nothing to save... + mr. r22,r22 ; See if there is any live vector status + la r15,VMXsync(r22) ; Point to the sync word - isync ; Make sure we see this in the right order + beq-- vswnosave ; No live context, so nothing to save... - lwz r30,VMXsave(r22) ; Get the top savearea - cmplw cr2,r22,r29 ; Are both old and new the same context? lwz r18,VMXcpu(r22) ; Get the last CPU we ran on + cmplw cr2,r22,r29 ; Are both old and new the same context? + lwz r30,VMXsave(r22) ; Get the top savearea cmplwi cr1,r30,0 ; Anything saved yet? - cmplw r18,r16 ; Make sure we are on the right processor lwz r31,VMXlevel(r22) ; Get the context level + cmplw r18,r16 ; Make sure we are on the right processor lwz r10,liveVRS(r26) ; Get the right VRSave register - bne- vsnosave ; No, not on the same processor... + bne-- vswnosave ; No, not on the same processor... ; ; Check to see if the live context has already been saved. @@ -1141,13 +1337,67 @@ vsvretry: mr. r22,r22 ; See if there is any live vector status ; cmplw r31,r27 ; See if the current and active levels are the same - crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same - li r8,0 ; Clear this + crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same + + beq-- vswthesame ; New and old are the same, just go enable... + +; +; Make sure that the live context block is not mucked with while +; we are trying to save it on out +; + + lis r11,ha16(EXT(LockTimeOut)) ; Get the high part + mftb r3 ; Get the time now + lwz r11,lo16(EXT(LockTimeOut))(r11) ; Get the timeout value + b vswsync0a ; Jump to the lock... + + .align 5 - beq- vsthesame ; New and old are the same, just go enable... +vswsync0: li r19,lgKillResv ; Get killing field + stwcx. r19,0,r19 ; Kill reservation + +vswsync0a: lwz r19,0(r15) ; Sniff the lock + mftb r18 ; Is it time yet? + cmplwi cr1,r19,0 ; Is it locked? + sub r18,r18,r3 ; How long have we been spinning? + cmplw r18,r11 ; Has it been too long? + bgt-- vswtimeout0 ; Way too long, panic... + bne-- cr1,vswsync0a ; Yea, still locked so sniff harder... + +vswsync1: lwarx r19,0,r15 ; Get the sync word + li r0,1 ; Get the lock + mr. r19,r19 ; Is it unlocked? + bne-- vswsync0 + stwcx. r0,0,r15 ; Store lock and test reservation + bne-- vswsync1 ; Try again if lost reservation... + + isync ; Toss speculation + +; +; Note that now that we have the lock, we need to check if anything changed. +; Also note that the possible changes are limited. The context owner can +; never change to a different thread or level although it can be invalidated. +; A new context can not be pushed on top of us, but it can be popped. The +; cpu indicator will always change if another processor mucked with any +; contexts. +; +; It should be very rare that any of the context stuff changes across the lock. +; + lwz r0,VMXowner(r26) ; Get the thread that owns the vectors again + lwz r11,VMXsave(r22) ; Get the top savearea again + lwz r18,VMXcpu(r22) ; Get the last CPU we ran on again + sub r0,r0,r22 ; Non-zero if we lost ownership, 0 if not + xor r11,r11,r30 ; Non-zero if saved context changed, 0 if not + xor r18,r18,r16 ; Non-zero if cpu changed, 0 if not + cmplwi cr1,r30,0 ; Is anything saved? + or r0,r0,r11 ; Zero only if both owner and context are unchanged + or. r0,r0,r18 ; Zero only if nothing has changed cmplwi cr2,r10,0 ; Check VRSave to see if we really need to save anything... - beq- cr1,vsmstsave ; Not saved yet, go do it... + li r8,0 ; Clear + + bne-- vswnosavelk ; Something has changed, so this is not ours to save... + beq-- cr1,vswmstsave ; There is no context saved yet... lwz r11,SAVlevel(r30) ; Get the level of top saved context @@ -1162,82 +1412,38 @@ vsvretry: mr. r22,r22 ; See if there is any live vector status sc ; (TEST/DEBUG) #endif - bne- vsmstsave ; Live context has not been saved yet... - - bne- cr2,vsnosave ; Live context saved and VRSave not 0, no save and keep context... + beq++ vswnosavelk ; Same level, already saved... + bne-- cr2,vswnosavelk ; Live context saved and VRSave not 0, no save and keep context... lwz r4,SAVprev+4(r30) ; Pick up the previous area li r5,0 ; Assume this is the only one (which should be the ususal case) mr. r4,r4 ; Was this the only one? stw r4,VMXsave(r22) ; Dequeue this savearea - beq+ vsonlyone ; This was the only one... + beq++ vswonlyone ; This was the only one... lwz r5,SAVlevel(r4) ; Get the level associated with previous save -vsonlyone: stw r5,VMXlevel(r22) ; Save the level +vswonlyone: stw r5,VMXlevel(r22) ; Save the level stw r8,VMXowner(r26) ; Clear owner - eieio + mr r3,r30 ; Copy the savearea we are tossing bl EXT(save_ret) ; Toss the savearea - b vsnosave ; Go load up the context... + b vswnosavelk ; Go load up the context... .align 5 - -vsmstsave: stw r8,VMXowner(r26) ; Clear owner - eieio - beq- cr2,vsnosave ; The VRSave was 0, so there is nothing to save... +vswmstsave: stw r8,VMXowner(r26) ; Clear owner + beq-- cr2,vswnosavelk ; The VRSave was 0, so there is nothing to save... bl EXT(save_get) ; Go get a savearea - mr. r31,r31 ; Are we saving the user state? - la r15,VMXsync(r22) ; Point to the sync word - beq++ vswusave ; Yeah, no need for lock... -; -; Here we make sure that the live context is not tossed while we are -; trying to push it. This can happen only for kernel context and -; then only by a race with act_machine_sv_free. -; -; We only need to hold this for a very short time, so no sniffing needed. -; If we find any change to the level, we just abandon. -; -vswsync: lwarx r19,0,r15 ; Get the sync word - li r0,1 ; Get the lock - cmplwi cr1,r19,0 ; Is it unlocked? - stwcx. r0,0,r15 ; Store lock and test reservation - crand cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked - bne-- vswsync ; Try again if lost reservation or locked... - - isync ; Toss speculation - - lwz r0,VMXlevel(r22) ; Pick up the level again - li r7,0 ; Get unlock value - cmplw r0,r31 ; Same level? - beq++ vswusave ; Yeah, we expect it to be... - - stw r7,VMXsync(r22) ; Unlock lock. No need to sync here - - bl EXT(save_ret) ; Toss save area because we are abandoning save - b vsnosave ; Skip the save... - - .align 5 - -vswusave: lwz r12,facAct(r22) ; Get the activation associated with the context + lwz r12,facAct(r22) ; Get the activation associated with the context stw r3,VMXsave(r22) ; Set this as the latest context savearea for the thread - mr. r31,r31 ; Check again if we were user level stw r30,SAVprev+4(r3) ; Point us to the old context stw r31,SAVlevel(r3) ; Tag our level li r7,SAVvector ; Get the vector ID stw r12,SAVact(r3) ; Make sure we point to the right guy stb r7,SAVflags+2(r3) ; Set that we have a vector save area - li r7,0 ; Get the unlock value - - beq-- vswnulock ; Skip unlock if user (we did not lock it)... - eieio ; Make sure that these updates make it out - stw r7,VMXsync(r22) ; Unlock it. - -vswnulock: - #if FPVECDBG lis r0,hi16(CutTrace) ; (TEST/DEBUG) li r2,0x5F03 ; (TEST/DEBUG) @@ -1248,11 +1454,10 @@ vswnulock: lwz r10,liveVRS(r26) ; Get the right VRSave register bl vr_store ; store VRs into savearea according to vrsave (uses r4-r11) - ; ; The context is all saved now and the facility is free. ; -; If we do not we need to fill the registers with junk, because this level has +; Check if we need to fill the registers with junk, because this level has ; never used them before and some thieving bastard could hack the old values ; of some thread! Just imagine what would happen if they could! Why, nothing ; would be safe! My God! It is terrifying! @@ -1260,11 +1465,44 @@ vswnulock: ; Also, along the way, thanks to Ian Ollmann, we generate the 0x7FFFDEAD (QNaNbarbarian) ; constant that we may need to fill unused vector registers. ; +; Make sure that the live context block is not mucked with while +; we are trying to load it up +; +vswnosavelk: + li r7,0 ; Get the unlock value + eieio ; Make sure that these updates make it out + stw r7,VMXsync(r22) ; Unlock the old context + +vswnosave: la r15,VMXsync(r29) ; Point to the sync word + lis r11,ha16(EXT(LockTimeOut)) ; Get the high part + mftb r3 ; Get the time now + lwz r11,lo16(EXT(LockTimeOut))(r11) ; Get the timeout value + b vswnsync0a ; Jump to the lock... + + .align 5 + +vswnsync0: li r19,lgKillResv ; Get killing field + stwcx. r19,0,r19 ; Kill reservation +vswnsync0a: lwz r19,0(r15) ; Sniff the lock + mftb r18 ; Is it time yet? + cmplwi cr1,r19,0 ; Is it locked? + sub r18,r18,r3 ; How long have we been spinning? + cmplw r18,r11 ; Has it been too long? + bgt-- vswtimeout1 ; Way too long, panic... + bne-- cr1,vswnsync0a ; Yea, still locked so sniff harder... +vswnsync1: lwarx r19,0,r15 ; Get the sync word + li r0,1 ; Get the lock + mr. r19,r19 ; Is it unlocked? + bne-- vswnsync0 ; Unfortunately, it is locked... + stwcx. r0,0,r15 ; Store lock and test reservation + bne-- vswnsync1 ; Try again if lost reservation... + + isync ; Toss speculation -vsnosave: vspltisb v31,-10 ; Get 0xF6F6F6F6 + vspltisb v31,-10 ; Get 0xF6F6F6F6 lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one vspltisb v30,5 ; Get 0x05050505 lwz r19,VMXcpu(r29) ; Get the last CPU we ran on @@ -1297,7 +1535,7 @@ vsnosave: vspltisb v31,-10 ; Get 0xF6F6F6F6 lwz r19,ppe_vaddr(r19) ; Point to the owner per_proc vrlb v31,v31,v29 ; Get 0xDEADDEAD -vsinvothr: lwarx r18,r16,r19 ; Get the owner +vswinvothr: lwarx r18,r16,r19 ; Get the owner sub r0,r18,r29 ; Subtract one from the other sub r11,r29,r18 ; Subtract the other from the one @@ -1305,7 +1543,7 @@ vsinvothr: lwarx r18,r16,r19 ; Get the owner srawi r11,r11,31 ; Get a 0 if equal or -1 of not and r18,r18,r11 ; Make 0 if same, unchanged if not stwcx. r18,r16,r19 ; Try to invalidate it - bne-- vsinvothr ; Try again if there was a collision... + bne-- vswinvothr ; Try again if there was a collision... cmplwi cr1,r14,0 ; Do we possibly have some context to load? vmrghh v31,v30,v31 ; Get 0x7FFFDEAD. V31 keeps this value until the bitter end @@ -1336,6 +1574,10 @@ vsinvothr: lwarx r18,r16,r19 ; Get the owner bl vr_load ; load VRs from save area based on vrsave in r10 bl EXT(save_ret) ; Toss the save area after loading VRs + +vrenablelk: li r7,0 ; Get the unlock value + eieio ; Make sure that these updates make it out + stw r7,VMXsync(r29) ; Unlock the new context vrenable: lwz r8,savesrr1+4(r25) ; Get the msr of the interrupted guy oris r8,r8,hi16(MASK(MSR_VEC)) ; Enable the vector facility @@ -1403,7 +1645,7 @@ ProtectTheAmericanWay: vor v28,v31,v31 ; Copy into the next register vor v29,v31,v31 ; Copy into the next register vor v30,v31,v31 ; Copy into the next register - b vrenable ; Finish setting it all up... + b vrenablelk ; Finish setting it all up... @@ -1416,7 +1658,7 @@ ProtectTheAmericanWay: .align 5 -vsthesame: +vswthesame: #if FPVECDBG lis r0,hi16(CutTrace) ; (TEST/DEBUG) diff --git a/osfmk/ppc/exception.h b/osfmk/ppc/exception.h index dedad228d..1994d53a8 100644 --- a/osfmk/ppc/exception.h +++ b/osfmk/ppc/exception.h @@ -644,9 +644,10 @@ extern char *trap_type[]; #define failSkipLists 7 #define failUnalignedStk 8 #define failPmap 9 +#define failTimeout 10 /* Always must be last - update failNames table in model_dep.c as well */ -#define failUnknown 10 +#define failUnknown 11 #ifndef ASSEMBLER diff --git a/osfmk/ppc/hw_exception.s b/osfmk/ppc/hw_exception.s index 51b344457..f2099e2a7 100644 --- a/osfmk/ppc/hw_exception.s +++ b/osfmk/ppc/hw_exception.s @@ -50,6 +50,7 @@ #define VERIFYSAVE 0 #define FPVECDBG 0 +#define FPFLOOD 0 #define INSTRUMENT 0 /* @@ -198,6 +199,10 @@ tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame .L_call_trap: +#if FPFLOOD + stfd f31,emfp31(r25) ; (TEST/DEBUG) +#endif + bl EXT(trap) lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable @@ -396,6 +401,11 @@ noassist: cmplwi r15,0x7000 ; Do we have a fast path trap? mr r4,r13 ; current activation addi r7,r7,1 ; Bump it stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it + +#if FPFLOOD + stfd f31,emfp31(r25) ; (TEST/DEBUG) +#endif + bl EXT(unix_syscall) ; Check out unix... .L_call_server_syscall_exception: @@ -552,11 +562,11 @@ ksystrace: lwz r7,TASK_SYSCALLS_MACH(r10) ; Get the current count neg r31,r0 ; Make this positive - mr r3,r31 ; save it - slwi r27,r3,4 ; multiply by 16 - slwi r3,r3,2 ; and the original by 4 + mr r3,r31 ; save it + slwi r27,r3,4 ; multiply by 16 + slwi r3,r3,2 ; and the original by 4 ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table - add r27,r27,r3 ; for a total of 20x (5 words/entry) + add r27,r27,r3 ; for a total of 20x (5 words/entry) addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count cmplwi r8,0 ; Is kdebug_enable non-zero stw r7,TASK_SYSCALLS_MACH(r10) ; Save count @@ -575,7 +585,7 @@ ksystrace: .L_kernel_syscall_munge: cmplwi r0,0 ; test for null munger - mtctr r0 ; Set the function call address + mtctr r0 ; Set the function call address addi r3,r30,saver3 ; Pointer to args from save area addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args beq-- .L_kernel_syscall_trapcall ; null munger - skip to trap call @@ -585,6 +595,11 @@ ksystrace: lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address mtctr r0 ; Set the function call address addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args + +#if FPFLOOD + stfd f31,emfp31(r25) ; (TEST/DEBUG) +#endif + bctrl @@ -942,7 +957,11 @@ ihbootnover: ; (TEST/DEBUG) mr r4,r30 lwz r5,savedsisr(r30) ; Get the DSISR lwz r6,savedar+4(r30) ; Get the DAR - + +#if FPFLOOD + stfd f31,emfp31(r25) ; (TEST/DEBUG) +#endif + bl EXT(interrupt) @@ -1159,7 +1178,10 @@ fpuhasdfrd: lwz r24,FPUsave(r26) ; (TEST/DEBUG) Get the first savearea mr. r23,r23 ; (TEST/DEBUG) Should be level 0 beq++ fpulvl0 ; (TEST/DEBUG) Yes... - BREAKPOINT_TRAP ; (TEST/DEBUG) + + lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code + ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest + sc ; (TEST/DEBUG) System ABEND fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context? beq fpunusrstt ; (TEST/DEBUG) No... @@ -1167,11 +1189,17 @@ fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context? lwz r21,SAVprev+4(r24) ; (TEST/DEBUG) Get previous pointer mr. r23,r23 ; (TEST/DEBUG) Is this our user context? beq++ fpulvl0b ; (TEST/DEBUG) Yes... - BREAKPOINT_TRAP ; (TEST/DEBUG) + + lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code + ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest + sc ; (TEST/DEBUG) System ABEND fpulvl0b: mr. r21,r21 ; (TEST/DEBUG) Is there a forward chain? beq++ fpunusrstt ; (TEST/DEBUG) Nope... - BREAKPOINT_TRAP ; (TEST/DEBUG) + + lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code + ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest + sc ; (TEST/DEBUG) System ABEND fpunusrstt: ; (TEST/DEBUG) #endif @@ -1191,6 +1219,7 @@ fpunusrstt: ; (TEST/DEBUG) beq++ fpuena ; Nope... lwz r25,SAVlevel(r24) ; Get the level of savearea lwz r0,SAVprev+4(r24) ; Get the previous + cmplw r30,r25 ; Is savearea for the level we are launching? bne++ fpuena ; No, just go enable... diff --git a/osfmk/ppc/hw_vm.s b/osfmk/ppc/hw_vm.s index bcfeda6ee..988b3e373 100644 --- a/osfmk/ppc/hw_vm.s +++ b/osfmk/ppc/hw_vm.s @@ -2263,7 +2263,7 @@ hsg64Hit: bl mapPhysUnlock ; Unlock physent chain .align 5 hsg64Miss: bl mapPhysUnlock ; Unlock physent chain - mtmsr r11 ; Restore 'rupts, translation + mtmsrd r11 ; Restore 'rupts, translation li r3,mapRtEmpty ; No mappings found matching specified criteria b hrmRetnCmn ; Exit through common epilog diff --git a/osfmk/ppc/lowmem_vectors.s b/osfmk/ppc/lowmem_vectors.s index 9bdf98f95..a3b0e9e5a 100644 --- a/osfmk/ppc/lowmem_vectors.s +++ b/osfmk/ppc/lowmem_vectors.s @@ -354,10 +354,11 @@ notDCache: mtcrf 255,r13 ; Restore CRs ; System Calls (sc instruction) ; -; The syscall number is in r0. All we do here is munge the number into a -; 7-bit index into the "scTable", and dispatch on it to handle the Ultra +; The syscall number is in r0. All we do here is munge the number into an +; 8-bit index into the "scTable", and dispatch on it to handle the Ultra ; Fast Traps (UFTs.) The index is: ; +; 0x80 - set if syscall number is 0x80000000 (CutTrace) ; 0x40 - set if syscall number is 0x00006004 ; 0x20 - set if upper 29 bits of syscall number are 0xFFFFFFF8 ; 0x10 - set if upper 29 bits of syscall number are 0x00007FF0 @@ -368,22 +369,26 @@ notDCache: mtcrf 255,r13 ; Restore CRs .L_handlerC00: mtsprg 3,r11 ; Save R11 mtsprg 2,r13 ; Save R13 - rlwinm r11,r0,0,0xFFFFFFF8 ; mask off low 3 bits of syscall number - xori r13,r11,0x7FF0 ; start to check for the 0x7FFx traps - addi r11,r11,8 ; make a 0 iff this is a 0xFFFFFFF8 trap - cntlzw r13,r13 ; set bit 0x20 iff a 0x7FFx trap - cntlzw r11,r11 ; set bit 0x20 iff a 0xFFFFFFF8 trap - rlwimi r11,r13,31,0x10 ; move 0x7FFx bit into position - xori r13,r0,0x6004 ; start to check for 0x6004 - rlwimi r11,r0,1,0xE ; move in low 3 bits of syscall number - cntlzw r13,r13 ; set bit 0x20 iff 0x6004 - rlwinm r11,r11,0,0,30 ; clear out bit 31 - rlwimi r11,r13,1,0x40 ; move 0x6004 bit into position - lhz r11,lo16(scTable)(r11) ; get branch address from sc table - mfctr r13 ; save caller's ctr in r13 - mtctr r11 ; set up branch to syscall handler - mfsprg r11,0 ; get per_proc, which most UFTs use - bctr ; dispatch (r11 in sprg3, r13 in sprg2, ctr in r13, per_proc in r11) + rlwinm r11,r0,0,0xFFFFFFF8 ; mask off low 3 bits of syscall number + xori r13,r11,0x7FF0 ; start to check for the 0x7FFx traps + addi r11,r11,8 ; make a 0 iff this is a 0xFFFFFFF8 trap + cntlzw r13,r13 ; set bit 0x20 iff a 0x7FFx trap + cntlzw r11,r11 ; set bit 0x20 iff a 0xFFFFFFF8 trap + xoris r0,r0,0x8000 ; Flip bit to make 0 iff 0x80000000 + rlwimi r11,r13,31,0x10 ; move 0x7FFx bit into position + cntlzw r13,r0 ; Set bit 0x20 iff 0x80000000 + xoris r0,r0,0x8000 ; Flip bit to restore R0 + rlwimi r11,r13,2,0x80 ; Set bit 0x80 iff CutTrace + xori r13,r0,0x6004 ; start to check for 0x6004 + rlwimi r11,r0,1,0xE ; move in low 3 bits of syscall number + cntlzw r13,r13 ; set bit 0x20 iff 0x6004 + rlwinm r11,r11,0,0,30 ; clear out bit 31 + rlwimi r11,r13,1,0x40 ; move 0x6004 bit into position + lhz r11,lo16(scTable)(r11) ; get branch address from sc table + mfctr r13 ; save caller's ctr in r13 + mtctr r11 ; set up branch to syscall handler + mfsprg r11,0 ; get per_proc, which most UFTs use + bctr ; dispatch (r11 in sprg3, r13 in sprg2, ctr in r13, per_proc in r11) /* * Trace - generated by single stepping @@ -650,7 +655,8 @@ EXT(exception_entry): * 3. If (syscall & 0xFFFFFFF0) == 0xFFFFFFF0, then it is also a UFT and is dispatched here. * * 4. If (syscall & 0xFFFFF000) == 0x80000000, then it is a "firmware" call and is dispatched in - * Firmware.s, though the special "Cut Trace" trap (0x80000000) is handled here in xcpSyscall. + * Firmware.s, though the special "Cut Trace" trap (0x80000000) is handled here as an ultra + * fast trap. * * 5. If (syscall & 0xFFFFF000) == 0xFFFFF000, and it is not one of the above, then it is a Mach * syscall, which are dispatched in hw_exceptions.s via "mach_trap_table". @@ -672,62 +678,162 @@ EXT(exception_entry): * "scTable" is an array of 2-byte addresses, accessed using a 7-bit index derived from the syscall * number as follows: * - * 0x40 (A) - set if syscall number is 0x00006004 - * 0x20 (B) - set if upper 29 bits of syscall number are 0xFFFFFFF8 - * 0x10 (C) - set if upper 29 bits of syscall number are 0x00007FF0 - * 0x0E (D) - low three bits of syscall number + * 0x80 (A) - set if syscall number is 0x80000000 + * 0x40 (B) - set if syscall number is 0x00006004 + * 0x20 (C) - set if upper 29 bits of syscall number are 0xFFFFFFF8 + * 0x10 (D) - set if upper 29 bits of syscall number are 0x00007FF0 + * 0x0E (E) - low three bits of syscall number * * If you define another UFT, try to use a number in one of the currently decoded ranges, ie one marked * "unassigned" below. The dispatch table and the UFT handlers must reside in the first 32KB of * physical memory. */ - .align 7 ; start this table on a cache line -scTable: ; ABC D - .short uftNormalSyscall-baseR ; 000 0 these syscalls are not in a reserved range - .short uftNormalSyscall-baseR ; 000 1 these syscalls are not in a reserved range - .short uftNormalSyscall-baseR ; 000 2 these syscalls are not in a reserved range - .short uftNormalSyscall-baseR ; 000 3 these syscalls are not in a reserved range - .short uftNormalSyscall-baseR ; 000 4 these syscalls are not in a reserved range - .short uftNormalSyscall-baseR ; 000 5 these syscalls are not in a reserved range - .short uftNormalSyscall-baseR ; 000 6 these syscalls are not in a reserved range - .short uftNormalSyscall-baseR ; 000 7 these syscalls are not in a reserved range - - .short uftNormalSyscall-baseR ; 001 0 0x7FF0 is unassigned - .short uftNormalSyscall-baseR ; 001 1 0x7FF1 is Set Thread Info Fast Trap (pass up) - .short uftThreadInfo-baseR ; 001 2 0x7FF2 is Thread Info - .short uftFacilityStatus-baseR ; 001 3 0x7FF3 is Facility Status - .short uftLoadMSR-baseR ; 001 4 0x7FF4 is Load MSR - .short uftNormalSyscall-baseR ; 001 5 0x7FF5 is the Null FastPath Trap (pass up) - .short uftNormalSyscall-baseR ; 001 6 0x7FF6 is unassigned - .short uftNormalSyscall-baseR ; 001 7 0x7FF7 is unassigned - - .short uftNormalSyscall-baseR ; 010 0 0xFFFFFFF0 is unassigned - .short uftNormalSyscall-baseR ; 010 1 0xFFFFFFF1 is unassigned - .short uftNormalSyscall-baseR ; 010 2 0xFFFFFFF2 is unassigned - .short uftNormalSyscall-baseR ; 010 3 0xFFFFFFF3 is unassigned - .short uftNormalSyscall-baseR ; 010 4 0xFFFFFFF4 is unassigned - .short uftNormalSyscall-baseR ; 010 5 0xFFFFFFF5 is unassigned - .short uftIsPreemptiveTaskEnv-baseR ; 010 6 0xFFFFFFFE is Blue Box uftIsPreemptiveTaskEnv - .short uftIsPreemptiveTask-baseR ; 010 7 0xFFFFFFFF is Blue Box IsPreemptiveTask - - .short WhoaBaby-baseR ; 011 0 impossible combination - .short WhoaBaby-baseR ; 011 1 impossible combination - .short WhoaBaby-baseR ; 011 2 impossible combination - .short WhoaBaby-baseR ; 011 3 impossible combination - .short WhoaBaby-baseR ; 011 4 impossible combination - .short WhoaBaby-baseR ; 011 5 impossible combination - .short WhoaBaby-baseR ; 011 6 impossible combination - .short WhoaBaby-baseR ; 011 7 impossible combination - - .short WhoaBaby-baseR ; 100 0 0x6000 is an impossible index (diagCall) - .short WhoaBaby-baseR ; 100 1 0x6001 is an impossible index (vmm_get_version) - .short WhoaBaby-baseR ; 100 2 0x6002 is an impossible index (vmm_get_features) - .short WhoaBaby-baseR ; 100 3 0x6003 is an impossible index (vmm_init_context) - .short uftVMM-baseR ; 100 4 0x6004 is vmm_dispatch (only some of which are UFTs) - .short WhoaBaby-baseR ; 100 5 0x6005 is an impossible index (bb_enable_bluebox) - .short WhoaBaby-baseR ; 100 6 0x6006 is an impossible index (bb_disable_bluebox) - .short WhoaBaby-baseR ; 100 7 0x6007 is an impossible index (bb_settaskenv) + .align 8 ; start this table on a 256-byte boundry +scTable: ; ABCD E + .short uftNormalSyscall-baseR ; 0000 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0000 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0000 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0000 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0000 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0000 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0000 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0000 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 0001 0 0x7FF0 is unassigned + .short uftNormalSyscall-baseR ; 0001 1 0x7FF1 is Set Thread Info Fast Trap (pass up) + .short uftThreadInfo-baseR ; 0001 2 0x7FF2 is Thread Info + .short uftFacilityStatus-baseR ; 0001 3 0x7FF3 is Facility Status + .short uftLoadMSR-baseR ; 0001 4 0x7FF4 is Load MSR + .short uftNormalSyscall-baseR ; 0001 5 0x7FF5 is the Null FastPath Trap (pass up) + .short uftNormalSyscall-baseR ; 0001 6 0x7FF6 is unassigned + .short uftNormalSyscall-baseR ; 0001 7 0x7FF7 is unassigned + + .short uftNormalSyscall-baseR ; 0010 0 0xFFFFFFF0 is unassigned + .short uftNormalSyscall-baseR ; 0010 1 0xFFFFFFF1 is unassigned + .short uftNormalSyscall-baseR ; 0010 2 0xFFFFFFF2 is unassigned + .short uftNormalSyscall-baseR ; 0010 3 0xFFFFFFF3 is unassigned + .short uftNormalSyscall-baseR ; 0010 4 0xFFFFFFF4 is unassigned + .short uftNormalSyscall-baseR ; 0010 5 0xFFFFFFF5 is unassigned + .short uftIsPreemptiveTaskEnv-baseR ; 0010 6 0xFFFFFFFE is Blue Box uftIsPreemptiveTaskEnv + .short uftIsPreemptiveTask-baseR ; 0010 7 0xFFFFFFFF is Blue Box IsPreemptiveTask + + .short WhoaBaby-baseR ; 0011 0 impossible combination + .short WhoaBaby-baseR ; 0011 1 impossible combination + .short WhoaBaby-baseR ; 0011 2 impossible combination + .short WhoaBaby-baseR ; 0011 3 impossible combination + .short WhoaBaby-baseR ; 0011 4 impossible combination + .short WhoaBaby-baseR ; 0011 5 impossible combination + .short WhoaBaby-baseR ; 0011 6 impossible combination + .short WhoaBaby-baseR ; 0011 7 impossible combination + + .short WhoaBaby-baseR ; 0100 0 0x6000 is an impossible index (diagCall) + .short WhoaBaby-baseR ; 0100 1 0x6001 is an impossible index (vmm_get_version) + .short WhoaBaby-baseR ; 0100 2 0x6002 is an impossible index (vmm_get_features) + .short WhoaBaby-baseR ; 0100 3 0x6003 is an impossible index (vmm_init_context) + .short uftVMM-baseR ; 0100 4 0x6004 is vmm_dispatch (only some of which are UFTs) + .short WhoaBaby-baseR ; 0100 5 0x6005 is an impossible index (bb_enable_bluebox) + .short WhoaBaby-baseR ; 0100 6 0x6006 is an impossible index (bb_disable_bluebox) + .short WhoaBaby-baseR ; 0100 7 0x6007 is an impossible index (bb_settaskenv) + + .short uftNormalSyscall-baseR ; 0101 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0101 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0101 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0101 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0101 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0101 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0101 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0101 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 0110 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0110 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0110 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0110 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0110 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0110 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0110 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0110 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 0111 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0111 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0111 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0111 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0111 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0111 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0111 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 0111 7 these syscalls are not in a reserved range + + .short uftCutTrace-baseR ; 1000 0 CutTrace + .short uftNormalSyscall-baseR ; 1000 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1000 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1000 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1000 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1000 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1000 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1000 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 1001 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1001 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1001 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1001 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1001 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1001 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1001 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1001 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 1010 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1010 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1010 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1010 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1010 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1010 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1010 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1010 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 1011 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1011 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1011 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1011 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1011 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1011 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1011 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1011 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 1100 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1100 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1100 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1100 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1100 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1100 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1100 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1100 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 1101 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1101 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1101 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1101 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1101 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1101 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1101 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1101 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 1110 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1110 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1110 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1110 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1110 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1110 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1110 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1110 7 these syscalls are not in a reserved range + + .short uftNormalSyscall-baseR ; 1111 0 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1111 1 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1111 2 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1111 3 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1111 4 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1111 5 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1111 6 these syscalls are not in a reserved range + .short uftNormalSyscall-baseR ; 1111 7 these syscalls are not in a reserved range .align 2 ; prepare for code @@ -744,86 +850,86 @@ scTable: ; ABC D * sprg3 = holds caller's r11 */ -; Handle "vmm_dispatch" (0x6004), of which only some selectors are UFTs. +; Handle "vmm_dispatch" (0x6004), of which only some selectors are UFTs. uftVMM: - mtctr r13 ; restore caller's ctr - lwz r11,spcFlags(r11) ; get the special flags word from per_proc - mfcr r13 ; save caller's entire cr (we use all fields below) + mtctr r13 ; restore caller's ctr + lwz r11,spcFlags(r11) ; get the special flags word from per_proc + mfcr r13 ; save caller's entire cr (we use all fields below) rlwinm r11,r11,16,16,31 ; Extract spcFlags upper bits andi. r11,r11,hi16(runningVM|FamVMena|FamVMmode) cmpwi cr0,r11,hi16(runningVM|FamVMena|FamVMmode) ; Test in VM FAM - bne-- uftNormal80 ; not eligible for FAM UFTs + bne-- uftNormal80 ; not eligible for FAM UFTs cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest cmpwi cr2,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister cror cr1_eq,cr5_lt,cr2_gt ; Set true if out of VMM Fast syscall range - bt-- cr1_eq,uftNormalFF ; Exit if out of range (the others are not UFTs) + bt-- cr1_eq,uftNormalFF ; Exit if out of range (the others are not UFTs) b EXT(vmm_ufp) ; handle UFT range of vmm_dispatch syscall - -; Handle blue box UFTs (syscalls -1 and -2). + +; Handle blue box UFTs (syscalls -1 and -2). uftIsPreemptiveTask: uftIsPreemptiveTaskEnv: - mtctr r13 ; restore caller's ctr - lwz r11,spcFlags(r11) ; get the special flags word from per_proc - mfcr r13,0x80 ; save caller's cr0 so we can use it - andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need + mtctr r13 ; restore caller's ctr + lwz r11,spcFlags(r11) ; get the special flags word from per_proc + mfcr r13,0x80 ; save caller's cr0 so we can use it + andi. r11,r11,bbNoMachSC|bbPreemptive ; Clear what we do not need cmplwi r11,bbNoMachSC ; See if we are trapping syscalls - blt-- uftNormal80 ; No... - cmpwi r0,-2 ; is this call IsPreemptiveTaskEnv? + blt-- uftNormal80 ; No... + cmpwi r0,-2 ; is this call IsPreemptiveTaskEnv? rlwimi r13,r11,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq mfsprg r11,0 ; Get the per proc once more - bne++ uftRestoreThenRFI ; do not load r0 if IsPreemptiveTask + bne++ uftRestoreThenRFI ; do not load r0 if IsPreemptiveTask lwz r0,ppbbTaskEnv(r11) ; Get the shadowed taskEnv (only difference) - b uftRestoreThenRFI ; restore modified cr0 and return + b uftRestoreThenRFI ; restore modified cr0 and return -; Handle "Thread Info" UFT (0x7FF2) +; Handle "Thread Info" UFT (0x7FF2) - .globl EXT(uft_uaw_nop_if_32bit) + .globl EXT(uft_uaw_nop_if_32bit) uftThreadInfo: - lwz r3,UAW+4(r11) ; get user assist word, assuming a 32-bit processor + lwz r3,UAW+4(r11) ; get user assist word, assuming a 32-bit processor LEXT(uft_uaw_nop_if_32bit) - ld r3,UAW(r11) ; get the whole doubleword if 64-bit (patched to nop if 32-bit) - mtctr r13 ; restore caller's ctr - b uftRFI ; done + ld r3,UAW(r11) ; get the whole doubleword if 64-bit (patched to nop if 32-bit) + mtctr r13 ; restore caller's ctr + b uftRFI ; done -; Handle "Facility Status" UFT (0x7FF3) +; Handle "Facility Status" UFT (0x7FF3) uftFacilityStatus: - lwz r3,spcFlags(r11) ; get "special flags" word from per_proc - mtctr r13 ; restore caller's ctr - b uftRFI ; done + lwz r3,spcFlags(r11) ; get "special flags" word from per_proc + mtctr r13 ; restore caller's ctr + b uftRFI ; done -; Handle "Load MSR" UFT (0x7FF4). This is not used on 64-bit processors, though it would work. +; Handle "Load MSR" UFT (0x7FF4). This is not used on 64-bit processors, though it would work. uftLoadMSR: - mfsrr1 r11 ; get caller's MSR - mtctr r13 ; restore caller's ctr - mfcr r13,0x80 ; save caller's cr0 so we can test PR - rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; really in the kernel? - bne- uftNormal80 ; do not permit from user mode - mfsprg r11,0 ; restore per_proc + mfsrr1 r11 ; get caller's MSR + mtctr r13 ; restore caller's ctr + mfcr r13,0x80 ; save caller's cr0 so we can test PR + rlwinm. r11,r11,0,MSR_PR_BIT,MSR_PR_BIT ; really in the kernel? + bne- uftNormal80 ; do not permit from user mode + mfsprg r11,0 ; restore per_proc mtsrr1 r3 ; Set new MSR -; Return to caller after UFT. When called: -; r11 = per_proc ptr -; r13 = callers cr0 in upper nibble (if uftRestoreThenRFI called) -; sprg2 = callers r13 -; sprg3 = callers r11 +; Return to caller after UFT. When called: +; r11 = per_proc ptr +; r13 = callers cr0 in upper nibble (if uftRestoreThenRFI called) +; sprg2 = callers r13 +; sprg3 = callers r11 -uftRestoreThenRFI: ; WARNING: can drop down to here - mtcrf 0x80,r13 ; restore caller's cr0 +uftRestoreThenRFI: ; WARNING: can drop down to here + mtcrf 0x80,r13 ; restore caller's cr0 uftRFI: - .globl EXT(uft_nop_if_32bit) + .globl EXT(uft_nop_if_32bit) LEXT(uft_nop_if_32bit) - b uftX64 ; patched to NOP if 32-bit processor + b uftX64 ; patched to NOP if 32-bit processor - lwz r11,pfAvailable(r11) ; Get the feature flags +uftX32: lwz r11,pfAvailable(r11) ; Get the feature flags mfsprg r13,2 ; Restore R13 mtsprg 2,r11 ; Set the feature flags mfsprg r11,3 ; Restore R11 @@ -839,21 +945,292 @@ uftX64: mtspr hsprg0,r14 ; Save a register in a Hypervisor SPRG mfspr r14,hsprg0 ; Restore R14 rfid ; Back to our guy... +; +; Quickly cut a trace table entry for the CutTrace firmware call. +; +; All registers except R11 and R13 are unchanged. +; +; Note that this code cuts a trace table entry for the CutTrace call only. +; An identical entry is made during normal interrupt processing. Any entry +; format entry changes made must be done in both places. +; + + .align 5 + + .globl EXT(uft_cuttrace) +LEXT(uft_cuttrace) +uftCutTrace: + b uftct64 ; patched to NOP if 32-bit processor + + stw r20,tempr0(r11) ; Save some work registers + lwz r20,dgFlags(0) ; Get the flags + stw r21,tempr1(r11) ; Save some work registers + mfsrr1 r21 ; Get the SRR1 + rlwinm r20,r20,MSR_PR_BIT-enaUsrFCallb,MASK(MSR_PR) ; Shift the validity bit over to pr bit spot + stw r25,tempr2(r11) ; Save some work registers + orc r20,r20,r21 ; Get ~PR | FC + mfcr r25 ; Save the CR + stw r22,tempr3(r11) ; Save some work registers + lhz r22,PP_CPU_NUMBER(r11) ; Get the logical processor number + andi. r20,r20,MASK(MSR_PR) ; Set cr0_eq is we are in problem state and the validity bit is not set + stw r23,tempr4(r11) ; Save some work registers + lwz r23,traceMask(0) ; Get the trace mask + stw r24,tempr5(r11) ; Save some work registers + beq- ctbail32 ; Can not issue from user... + + + addi r24,r22,16 ; Get shift to move cpu mask to syscall mask + rlwnm r24,r23,r24,12,12 ; Shift cpu mask bit to rupt type mask + and. r24,r24,r23 ; See if both are on + +; +; We select a trace entry using a compare and swap on the next entry field. +; Since we do not lock the actual trace buffer, there is a potential that +; another processor could wrap an trash our entry. Who cares? +; + + li r23,trcWork ; Get the trace work area address + lwz r21,traceStart(0) ; Get the start of trace table + lwz r22,traceEnd(0) ; Get end of trace table + + beq-- ctdisa32 ; Leave because tracing is disabled... + +ctgte32: lwarx r20,0,r23 ; Get and reserve the next slot to allocate + addi r24,r20,LTR_size ; Point to the next trace entry + cmplw r24,r22 ; Do we need to wrap the trace table? + bne+ ctgte32s ; No wrap, we got us a trace entry... + + mr r24,r21 ; Wrap back to start + +ctgte32s: stwcx. r24,0,r23 ; Try to update the current pointer + bne- ctgte32 ; Collision, try again... + +#if ESPDEBUG + dcbf 0,r23 ; Force to memory + sync +#endif + + dcbz 0,r20 ; Clear and allocate first trace line + li r24,32 ; Offset to next line + +ctgte32tb: mftbu r21 ; Get the upper time now + mftb r22 ; Get the lower time now + mftbu r23 ; Get upper again + cmplw r21,r23 ; Has it ticked? + bne- ctgte32tb ; Yes, start again... + + dcbz r24,r20 ; Clean second line + +; +; Let us cut that trace entry now. +; +; Note that this code cuts a trace table entry for the CutTrace call only. +; An identical entry is made during normal interrupt processing. Any entry +; format entry changes made must be done in both places. +; + + lhz r24,PP_CPU_NUMBER(r11) ; Get the logical processor number + li r23,T_SYSTEM_CALL ; Get the system call id + mtctr r13 ; Restore the caller's CTR + sth r24,LTR_cpu(r20) ; Save processor number + li r24,64 ; Offset to third line + sth r23,LTR_excpt(r20) ; Set the exception code + dcbz r24,r20 ; Clean 3rd line + mfspr r23,dsisr ; Get the DSISR + stw r21,LTR_timeHi(r20) ; Save top of time stamp + li r24,96 ; Offset to fourth line + mflr r21 ; Get the LR + dcbz r24,r20 ; Clean 4th line + stw r22,LTR_timeLo(r20) ; Save bottom of time stamp + mfsrr0 r22 ; Get SRR0 + stw r25,LTR_cr(r20) ; Save CR + mfsrr1 r24 ; Get the SRR1 + stw r23,LTR_dsisr(r20) ; Save DSISR + stw r22,LTR_srr0+4(r20) ; Save SRR0 + mfdar r23 ; Get DAR + stw r24,LTR_srr1+4(r20) ; Save SRR1 + stw r23,LTR_dar+4(r20) ; Save DAR + stw r21,LTR_lr+4(r20) ; Save LR + + stw r13,LTR_ctr+4(r20) ; Save CTR + stw r0,LTR_r0+4(r20) ; Save register + stw r1,LTR_r1+4(r20) ; Save register + stw r2,LTR_r2+4(r20) ; Save register + stw r3,LTR_r3+4(r20) ; Save register + stw r4,LTR_r4+4(r20) ; Save register + stw r5,LTR_r5+4(r20) ; Save register + stw r6,LTR_r6+4(r20) ; Save register + +#if 0 + lwz r21,FPUowner(r11) ; (TEST/DEBUG) Get the current floating point owner + stw r21,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner +#endif + +#if ESPDEBUG + addi r21,r20,32 ; Second line + addi r22,r20,64 ; Third line + dcbst 0,r20 ; Force to memory + dcbst 0,r21 ; Force to memory + addi r21,r22,32 ; Fourth line + dcbst 0,r22 ; Force to memory + dcbst 0,r21 ; Force to memory + sync ; Make sure it all goes +#endif + +ctdisa32: mtcrf 0x80,r25 ; Restore the used condition register field + lwz r20,tempr0(r11) ; Restore work register + lwz r21,tempr1(r11) ; Restore work register + lwz r25,tempr2(r11) ; Restore work register + mtctr r13 ; Restore the caller's CTR + lwz r22,tempr3(r11) ; Restore work register + lwz r23,tempr4(r11) ; Restore work register + lwz r24,tempr5(r11) ; Restore work register + b uftX32 ; Go restore the rest and go... + +ctbail32: mtcrf 0x80,r25 ; Restore the used condition register field + lwz r20,tempr0(r11) ; Restore work register + lwz r21,tempr1(r11) ; Restore work register + lwz r25,tempr2(r11) ; Restore work register + mtctr r13 ; Restore the caller's CTR + lwz r22,tempr3(r11) ; Restore work register + lwz r23,tempr4(r11) ; Restore work register + b uftNormalSyscall ; Go pass it on along... + +; +; This is the 64-bit version. +; + +uftct64: std r20,tempr0(r11) ; Save some work registers + lwz r20,dgFlags(0) ; Get the flags + std r21,tempr1(r11) ; Save some work registers + mfsrr1 r21 ; Get the SRR1 + rlwinm r20,r20,MSR_PR_BIT-enaUsrFCallb,MASK(MSR_PR) ; Shift the validity bit over to pr bit spot + std r25,tempr2(r11) ; Save some work registers + orc r20,r20,r21 ; Get ~PR | FC + mfcr r25 ; Save the CR + std r22,tempr3(r11) ; Save some work registers + lhz r22,PP_CPU_NUMBER(r11) ; Get the logical processor number + andi. r20,r20,MASK(MSR_PR) ; Set cr0_eq when we are in problem state and the validity bit is not set + std r23,tempr4(r11) ; Save some work registers + lwz r23,traceMask(0) ; Get the trace mask + std r24,tempr5(r11) ; Save some work registers + beq-- ctbail64 ; Can not issue from user... + + addi r24,r22,16 ; Get shift to move cpu mask to syscall mask + rlwnm r24,r23,r24,12,12 ; Shift cpu mask bit to rupt type mask + and. r24,r24,r23 ; See if both are on + +; +; We select a trace entry using a compare and swap on the next entry field. +; Since we do not lock the actual trace buffer, there is a potential that +; another processor could wrap an trash our entry. Who cares? +; + + li r23,trcWork ; Get the trace work area address + lwz r21,traceStart(0) ; Get the start of trace table + lwz r22,traceEnd(0) ; Get end of trace table + + beq-- ctdisa64 ; Leave because tracing is disabled... -; Handle a system call that is not a UFT and which thus goes upstairs. +ctgte64: lwarx r20,0,r23 ; Get and reserve the next slot to allocate + addi r24,r20,LTR_size ; Point to the next trace entry + cmplw r24,r22 ; Do we need to wrap the trace table? + bne++ ctgte64s ; No wrap, we got us a trace entry... + + mr r24,r21 ; Wrap back to start -uftNormalFF: ; here with entire cr in r13 - mtcr r13 ; restore all 8 fields +ctgte64s: stwcx. r24,0,r23 ; Try to update the current pointer + bne-- ctgte64 ; Collision, try again... + +#if ESPDEBUG + dcbf 0,r23 ; Force to memory + sync +#endif + + dcbz128 0,r20 ; Zap the trace entry + + mftb r21 ; Get the time + +; +; Let us cut that trace entry now. +; +; Note that this code cuts a trace table entry for the CutTrace call only. +; An identical entry is made during normal interrupt processing. Any entry +; format entry changes made must be done in both places. +; + + lhz r24,PP_CPU_NUMBER(r11) ; Get the logical processor number + li r23,T_SYSTEM_CALL ; Get the system call id + sth r24,LTR_cpu(r20) ; Save processor number + sth r23,LTR_excpt(r20) ; Set the exception code + mfspr r23,dsisr ; Get the DSISR + std r21,LTR_timeHi(r20) ; Save top of time stamp + mflr r21 ; Get the LR + mfsrr0 r22 ; Get SRR0 + stw r25,LTR_cr(r20) ; Save CR + mfsrr1 r24 ; Get the SRR1 + stw r23,LTR_dsisr(r20) ; Save DSISR + std r22,LTR_srr0(r20) ; Save SRR0 + mfdar r23 ; Get DAR + std r24,LTR_srr1(r20) ; Save SRR1 + std r23,LTR_dar(r20) ; Save DAR + std r21,LTR_lr(r20) ; Save LR + + std r13,LTR_ctr(r20) ; Save CTR + std r0,LTR_r0(r20) ; Save register + std r1,LTR_r1(r20) ; Save register + std r2,LTR_r2(r20) ; Save register + std r3,LTR_r3(r20) ; Save register + std r4,LTR_r4(r20) ; Save register + std r5,LTR_r5(r20) ; Save register + std r6,LTR_r6(r20) ; Save register + +#if 0 + lwz r21,FPUowner(r11) ; (TEST/DEBUG) Get the current floating point owner + stw r21,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner +#endif + +#if ESPDEBUG + dcbf 0,r20 ; Force to memory + sync ; Make sure it all goes +#endif + +ctdisa64: mtcrf 0x80,r25 ; Restore the used condition register field + ld r20,tempr0(r11) ; Restore work register + ld r21,tempr1(r11) ; Restore work register + ld r25,tempr2(r11) ; Restore work register + mtctr r13 ; Restore the caller's CTR + ld r22,tempr3(r11) ; Restore work register + ld r23,tempr4(r11) ; Restore work register + ld r24,tempr5(r11) ; Restore work register + b uftX64 ; Go restore the rest and go... + +ctbail64: mtcrf 0x80,r25 ; Restore the used condition register field + ld r20,tempr0(r11) ; Restore work register + ld r21,tempr1(r11) ; Restore work register + ld r25,tempr2(r11) ; Restore work register + mtctr r13 ; Restore the caller's CTR + ld r22,tempr3(r11) ; Restore work register + ld r23,tempr4(r11) ; Restore work register + li r11,T_SYSTEM_CALL|T_FAM ; Set system code call + b extEntry64 ; Go straight to the 64-bit code... + + + +; Handle a system call that is not a UFT and which thus goes upstairs. + +uftNormalFF: ; here with entire cr in r13 + mtcr r13 ; restore all 8 fields b uftNormalSyscall1 ; Join common... - -uftNormal80: ; here with callers cr0 in r13 - mtcrf 0x80,r13 ; restore cr0 + +uftNormal80: ; here with callers cr0 in r13 + mtcrf 0x80,r13 ; restore cr0 b uftNormalSyscall1 ; Join common... - -uftNormalSyscall: ; r13 = callers ctr - mtctr r13 ; restore ctr + +uftNormalSyscall: ; r13 = callers ctr + mtctr r13 ; restore ctr uftNormalSyscall1: - li r11,T_SYSTEM_CALL|T_FAM ; this is a system call (and fall through) + li r11,T_SYSTEM_CALL|T_FAM ; this is a system call (and fall through) /*<><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>*/ @@ -880,7 +1257,7 @@ uftNormalSyscall1: * misses, so these stores won't take all that long. Except the first line that is because * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are * off also. - * + * * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions * are ignored. */ @@ -1171,7 +1548,7 @@ noPerfMonSave32: lwz r25,traceMask(0) ; Get the trace mask li r0,SAVgeneral ; Get the savearea type value lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number - rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2 + rlwinm r22,r11,30,0,31 ; Divide interrupt code by 4 stb r0,SAVflags+2(r13) ; Mark valid context addi r22,r22,10 ; Adjust code so we shift into CR5 li r23,trcWork ; Get the trace work area address @@ -1227,6 +1604,10 @@ gotTrcEnt: stwcx. r22,0,r23 ; Try to update the current pointer ; ; Let us cut that trace entry now. +; +; Note that this code cuts a trace table entry for everything but the CutTrace call. +; An identical entry is made during normal CutTrace processing. Any entry +; format changes made must be done in both places. ; lwz r16,ruptStamp(r2) ; Get top of time base @@ -1551,7 +1932,7 @@ trcselSF: lwarx r20,0,r23 ; Get and reserve the next slot to allocate addi r22,r20,LTR_size ; Point to the next trace entry cmplw r22,r26 ; Do we need to wrap the trace table? - bne+ gotTrcEntSF ; No wrap, we got us a trace entry... + bne++ gotTrcEntSF ; No wrap, we got us a trace entry... mr r22,r25 ; Wrap back to start @@ -1566,6 +1947,10 @@ gotTrcEntSF: ; ; Let us cut that trace entry now. +; +; Note that this code cuts a trace table entry for everything but the CutTrace call. +; An identical entry is made during normal CutTrace processing. Any entry +; format changes made must be done in both places. ; dcbz128 0,r20 ; Zap the trace entry @@ -1608,6 +1993,10 @@ gotTrcEntSF: std r13,LTR_save(r20) ; Save the savearea stw r17,LTR_dsisr(r20) ; Save the DSISR sth r11,LTR_excpt(r20) ; Save the exception type +#if 0 + lwz r17,FPUowner(r2) ; (TEST/DEBUG) Get the current floating point owner + stw r17,LTR_rsvd0(r20) ; (TEST/DEBUG) Record the owner +#endif #if ESPDEBUG dcbf 0,r20 ; Force to memory diff --git a/osfmk/ppc/model_dep.c b/osfmk/ppc/model_dep.c index ca0f8a3ef..db35a19ae 100644 --- a/osfmk/ppc/model_dep.c +++ b/osfmk/ppc/model_dep.c @@ -183,6 +183,7 @@ char *failNames[] = { "Corrupt skip lists", /* failSkipLists */ "Unaligned stack", /* failUnalignedStk */ "Invalid pmap", /* failPmap */ + "Lock timeout", /* failTimeout */ "Unknown failure code" /* Unknown failure code - must always be last */ }; diff --git a/osfmk/ppc/pcb.c b/osfmk/ppc/pcb.c index d3c70db49..653b9b3f3 100644 --- a/osfmk/ppc/pcb.c +++ b/osfmk/ppc/pcb.c @@ -244,7 +244,7 @@ machine_thread_create( * at the base of the kernel stack (see stack_attach()). */ - thread->machine.upcb = sv; /* Set user pcb */ + thread->machine.upcb = sv; /* Set user pcb */ sv->save_srr1 = (uint64_t)MSR_EXPORT_MASK_SET; /* Set the default user MSR */ if(task_has_64BitAddr(task)) sv->save_srr1 |= (uint64_t)MASK32(MSR_SF) << 32; /* If 64-bit task, force 64-bit mode */ sv->save_fpscr = 0; /* Clear all floating point exceptions */ @@ -269,6 +269,7 @@ machine_thread_destroy( register savearea_fpu *fsv, *fpsv; register savearea *svp; register int i; + boolean_t intr; /* * This function will release all context. @@ -281,10 +282,12 @@ machine_thread_destroy( * Walk through and release all floating point and vector contexts. Also kill live context. * */ + + intr = ml_set_interrupts_enabled(FALSE); /* Disable for interruptions */ - toss_live_vec(thread->machine.curctx); /* Dump live vectors */ + toss_live_vec(thread->machine.curctx); /* Dump live vectors */ - vsv = thread->machine.curctx->VMXsave; /* Get the top vector savearea */ + vsv = thread->machine.curctx->VMXsave; /* Get the top vector savearea */ while(vsv) { /* Any VMX saved state? */ vpsv = vsv; /* Remember so we can toss this */ @@ -292,11 +295,11 @@ machine_thread_destroy( save_release((savearea *)vpsv); /* Release it */ } - thread->machine.curctx->VMXsave = 0; /* Kill chain */ + thread->machine.curctx->VMXsave = 0; /* Kill chain */ - toss_live_fpu(thread->machine.curctx); /* Dump live float */ + toss_live_fpu(thread->machine.curctx); /* Dump live float */ - fsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */ + fsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */ while(fsv) { /* Any float saved state? */ fpsv = fsv; /* Remember so we can toss this */ @@ -304,13 +307,13 @@ machine_thread_destroy( save_release((savearea *)fpsv); /* Release it */ } - thread->machine.curctx->FPUsave = 0; /* Kill chain */ + thread->machine.curctx->FPUsave = 0; /* Kill chain */ /* * free all regular saveareas. */ - pcb = thread->machine.pcb; /* Get the general savearea */ + pcb = thread->machine.pcb; /* Get the general savearea */ while(pcb) { /* Any float saved state? */ ppsv = pcb; /* Remember so we can toss this */ @@ -319,6 +322,9 @@ machine_thread_destroy( } hw_atomic_sub((uint32_t *)&saveanchor.savetarget, 4); /* Unaccount for the number of saveareas we think we "need" */ + + (void) ml_set_interrupts_enabled(intr); /* Restore interrupts if enabled */ + } /* @@ -326,7 +332,9 @@ machine_thread_destroy( * release saveareas associated with an act. if flag is true, release * user level savearea(s) too, else don't * - * this code cannot block so we call the proper save area free routine + * This code must run with interruptions disabled because an interrupt handler could use + * floating point and/or vectors. If this happens and the thread we are blowing off owns + * the facility, we can deadlock. */ void act_machine_sv_free(thread_t act) @@ -336,6 +344,7 @@ act_machine_sv_free(thread_t act) register savearea_fpu *fsv, *fpst, *fsvt; register savearea *svp; register int i; + boolean_t intr; /* * This function will release all non-user state context. @@ -355,22 +364,23 @@ act_machine_sv_free(thread_t act) * Then we unlock. Next, all of the old kernel contexts are released. * */ - + + intr = ml_set_interrupts_enabled(FALSE); /* Disable for interruptions */ + if(act->machine.curctx->VMXlevel) { /* Is the current level user state? */ toss_live_vec(act->machine.curctx); /* Dump live vectors if is not user */ - - vsv = act->machine.curctx->VMXsave; /* Get the top vector savearea */ - while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */ - if(!hw_lock_to((hw_lock_t)&act->machine.curctx->VMXsync, LockTimeOut)) { /* Get the sync lock */ panic("act_machine_sv_free - timeout getting VMX sync lock\n"); /* Tell all and die */ } + + vsv = act->machine.curctx->VMXsave; /* Get the top vector savearea */ + while(vsv && vsv->save_hdr.save_level) vsv = (savearea_vec *)vsv->save_hdr.save_prev; /* Find user context if any */ - vsvt = act->machine.curctx->VMXsave; /* Get the top of the chain */ + vsvt = act->machine.curctx->VMXsave; /* Get the top of the chain */ act->machine.curctx->VMXsave = vsv; /* Point to the user context */ - act->machine.curctx->VMXlevel = 0; /* Set the level to user */ + act->machine.curctx->VMXlevel = 0; /* Set the level to user */ hw_lock_unlock((hw_lock_t)&act->machine.curctx->VMXsync); /* Unlock */ while(vsvt) { /* Clear any VMX saved state */ @@ -386,17 +396,16 @@ act_machine_sv_free(thread_t act) toss_live_fpu(act->machine.curctx); /* Dump live floats if is not user */ - fsv = act->machine.curctx->FPUsave; /* Get the top floats savearea */ - - while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */ - if(!hw_lock_to((hw_lock_t)&act->machine.curctx->FPUsync, LockTimeOut)) { /* Get the sync lock */ panic("act_machine_sv_free - timeout getting FPU sync lock\n"); /* Tell all and die */ } - fsvt = act->machine.curctx->FPUsave; /* Get the top of the chain */ + fsv = act->machine.curctx->FPUsave; /* Get the top floats savearea */ + while(fsv && fsv->save_hdr.save_level) fsv = (savearea_fpu *)fsv->save_hdr.save_prev; /* Find user context if any */ + + fsvt = act->machine.curctx->FPUsave; /* Get the top of the chain */ act->machine.curctx->FPUsave = fsv; /* Point to the user context */ - act->machine.curctx->FPUlevel = 0; /* Set the level to user */ + act->machine.curctx->FPUlevel = 0; /* Set the level to user */ hw_lock_unlock((hw_lock_t)&act->machine.curctx->FPUsync); /* Unlock */ while(fsvt) { /* Clear any VMX saved state */ @@ -426,18 +435,19 @@ act_machine_sv_free(thread_t act) } act->machine.pcb = userpcb; /* Chain in the user if there is one, or 0 if not */ - + (void) ml_set_interrupts_enabled(intr); /* Restore interrupts if enabled */ + } void machine_act_terminate( thread_t act) { - if(act->machine.bbDescAddr) { /* Check if the Blue box assist is active */ + if(act->machine.bbDescAddr) { /* Check if the Blue box assist is active */ disable_bluebox_internal(act); /* Kill off bluebox */ } - if(act->machine.vmmControl) { /* Check if VMM is active */ + if(act->machine.vmmControl) { /* Check if VMM is active */ vmm_tear_down_all(act); /* Kill off all VMM contexts */ } } @@ -649,7 +659,7 @@ machine_stack_handoff( if (branch_tracing_enabled()) ppinfo->cpu_flags |= traceBE; - if(trcWork.traceMask) dbgTrace(0x12345678, (unsigned int)old, (unsigned int)new, 0); /* Cut trace entry if tracing */ + if(trcWork.traceMask) dbgTrace(0x9903, (unsigned int)old, (unsigned int)new, 0, 0); /* Cut trace entry if tracing */ return; } diff --git a/osfmk/ppc/ppc_init.c b/osfmk/ppc/ppc_init.c index 0ed30e1b0..71168cd3a 100644 --- a/osfmk/ppc/ppc_init.c +++ b/osfmk/ppc/ppc_init.c @@ -82,6 +82,7 @@ extern unsigned int memcpy_nop_if_32bit; extern unsigned int xsum_nop_if_32bit; extern unsigned int uft_nop_if_32bit; extern unsigned int uft_uaw_nop_if_32bit; +extern unsigned int uft_cuttrace; int forcenap = 0; int wcte = 0; /* Non-cache gather timer disabled */ @@ -113,6 +114,7 @@ patch_entry_t patch_table[] = { {&xsum_nop_if_32bit, 0x60000000, PATCH_FEATURE, PatchExt32}, {&uft_nop_if_32bit, 0x60000000, PATCH_FEATURE, PatchExt32}, {&uft_uaw_nop_if_32bit, 0x60000000, PATCH_FEATURE, PatchExt32}, + {&uft_cuttrace, 0x60000000, PATCH_FEATURE, PatchExt32}, {NULL, 0x00000000, PATCH_END_OF_TABLE, 0} }; diff --git a/osfmk/ppc/status.c b/osfmk/ppc/status.c index a0545f6fe..d481829fe 100644 --- a/osfmk/ppc/status.c +++ b/osfmk/ppc/status.c @@ -1595,16 +1595,18 @@ void act_thread_catt(void *ctx) { thread = current_thread(); - toss_live_fpu(thread->machine.curctx); /* Toss my floating point if live anywhere */ - toss_live_vec(thread->machine.curctx); /* Toss my vector if live anywhere */ + act_machine_sv_free(thread); /* Blow away any current kernel FP or vector. + We do not support those across a vfork */ + toss_live_fpu(thread->machine.curctx); /* Toss my floating point if live anywhere */ + toss_live_vec(thread->machine.curctx); /* Toss my vector if live anywhere */ sv->save_hdr.save_misc2 = 0; /* Eye catcher for debug */ sv->save_hdr.save_misc3 = 0; /* Eye catcher for debug */ sv->save_hdr.save_act = thread; - spc = (unsigned int)thread->map->pmap->space; /* Get the space we're in */ + spc = (unsigned int)thread->map->pmap->space; /* Get the space we're in */ - osv = thread->machine.pcb; /* Get the top general savearea */ + osv = thread->machine.pcb; /* Get the top general savearea */ psv = 0; while(osv) { /* Any saved state? */ if(osv->save_srr1 & MASK(MSR_PR)) break; /* Leave if this is user state */ @@ -1635,21 +1637,24 @@ void act_thread_catt(void *ctx) { if(ovsv) { /* Did we find one? */ if(pvsv) pvsv->save_hdr.save_prev = 0; /* Yes, clear pointer to it (it should always be last) or */ - else thread->machine.curctx->VMXsave = 0; /* to the start if the only one */ + else thread->machine.curctx->VMXsave = 0; /* to the start if the only one */ save_release((savearea *)ovsv); /* Nope, release it */ } if(vsv) { /* Are we sticking any vector on this one? */ if(pvsv) pvsv->save_hdr.save_prev = (addr64_t)((uintptr_t)vsv); /* Yes, chain us to the end or */ - else thread->machine.curctx->VMXsave = vsv; /* to the start if the only one */ + else { + thread->machine.curctx->VMXsave = vsv; /* to the start if the only one */ + thread->machine.curctx->VMXlevel = 0; /* Insure that we don't have a leftover level */ + } vsv->save_hdr.save_misc2 = 0; /* Eye catcher for debug */ vsv->save_hdr.save_misc3 = 0; /* Eye catcher for debug */ vsv->save_hdr.save_act = thread; } - ofsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */ + ofsv = thread->machine.curctx->FPUsave; /* Get the top float savearea */ pfsv = 0; while(ofsv) { /* Any float saved state? */ @@ -1660,14 +1665,17 @@ void act_thread_catt(void *ctx) { if(ofsv) { /* Did we find one? */ if(pfsv) pfsv->save_hdr.save_prev = 0; /* Yes, clear pointer to it (it should always be last) or */ - else thread->machine.curctx->FPUsave = 0; /* to the start if the only one */ + else thread->machine.curctx->FPUsave = 0; /* to the start if the only one */ save_release((savearea *)ofsv); /* Nope, release it */ } if(fsv) { /* Are we sticking any vector on this one? */ if(pfsv) pfsv->save_hdr.save_prev = (addr64_t)((uintptr_t)fsv); /* Yes, chain us to the end or */ - else thread->machine.curctx->FPUsave = fsv; /* to the start if the only one */ + else { + thread->machine.curctx->FPUsave = fsv; /* to the start if the only one */ + thread->machine.curctx->FPUlevel = 0; /* Insure that we don't have a leftover level */ + } fsv->save_hdr.save_misc2 = 0; /* Eye catcher for debug */ fsv->save_hdr.save_misc3 = 0; /* Eye catcher for debug */ -- 2.45.2