* have the ASCII name of the userid.
*/
if (VFS_VGET(HFSTOVFS(hfsmp), &parid, &ddvp) == 0) {
- if (VTOC(ddvp)->c_desc.cd_nameptr &&
- (cp->c_uid == strtoul(VTOC(ddvp)->c_desc.cd_nameptr, 0, 0))) {
+ if (VTOC(ddvp)->c_desc.cd_nameptr) {
+ uid_t uid;
+
+ uid = strtoul(VTOC(ddvp)->c_desc.cd_nameptr, 0, 0);
+ if (uid == cp->c_uid || uid == cnp->cn_cred->cr_uid) {
cp->c_flags |= UF_NODUMP;
cp->c_flag |= C_CHANGE;
+ }
}
vput(ddvp);
}
register struct pcred *pc = p->p_cred;
int error;
- error = suser(pc->pc_ucred, &p->p_acflag);
- if (error)
- return (error);
-
error = copyout((void *)&p->p_au->ai_auid, (void *)uap->auid,
sizeof(*uap->auid));
if (error)
/*
* System calls to get and set process audit information.
+ * If the caller is privileged, they get the whole set of
+ * audit information. Otherwise, the real audit mask is
+ * filtered out - but the rest of the information is
+ * returned.
*/
struct getaudit_args {
struct auditinfo *auditinfo;
getaudit(struct proc *p, struct getaudit_args *uap, register_t *retval)
{
register struct pcred *pc = p->p_cred;
+ struct auditinfo ai = *p->p_au;
int error;
+ /* only superuser gets to see the real mask */
error = suser(pc->pc_ucred, &p->p_acflag);
- if (error)
- return (error);
- error = copyout((void *)p->p_au, (void *)uap->auditinfo,
- sizeof(*uap->auditinfo));
+ if (error) {
+ ai.ai_mask.am_success = ~0;
+ ai.ai_mask.am_failure = ~0;
+ }
+
+ error = copyout((void *)&ai, (void *)uap->auditinfo, sizeof(ai));
if (error)
return (error);
int
getaudit_addr(struct proc *p, struct getaudit_addr_args *uap, register_t *retval)
{
- register struct pcred *pc = p->p_cred;
- int error;
-
- error = suser(pc->pc_ucred, &p->p_acflag);
- if (error)
- return (error);
return (ENOSYS);
}
kau_write(rec, tok);
/* fall thru */
+ case AUE_OPEN:
case AUE_OPEN_R:
case AUE_OPEN_RT:
case AUE_OPEN_RW:
break;
case AUE_SYSCTL:
+ case AUE_SYSCTL_NONADMIN:
if (ar->ar_valid_arg & (ARG_CTLNAME | ARG_LEN)) {
for (ctr = 0; ctr < ar->ar_arg_len; ctr++) {
tok = au_to_arg32(1, "name", ar->ar_arg_ctlname[ctr]);
int execve(struct proc *p, struct execve_args *uap, register_t *retval);
static int execargs_alloc(vm_offset_t *addrp);
static int execargs_free(vm_offset_t addr);
+static int sugid_scripts = 0;
+SYSCTL_INT (_kern, OID_AUTO, sugid_scripts, CTLFLAG_RW, &sugid_scripts, 0, "");
int
execv(p, args, retval)
error = EBADARCH;
goto bad;
}
+
+ /* Check to see if SUGID scripts are permitted. If they aren't then
+ * clear the SUGID bits.
+ */
+ if (sugid_scripts == 0) {
+ origvattr.va_mode &= ~(VSUID | VSGID);
+ }
+
cp = &exdata.ex_shell[2]; /* skip "#!" */
while (cp < &exdata.ex_shell[SHSIZE]) {
if (*cp == '\t') /* convert all tabs to spaces */
thread_dup(newth);
/* p2 = newth->task->proc; */
p2 = (struct proc *)(get_bsdtask_info(get_threadtask(newth)));
+ set_security_token(p2); /* propagate change of PID */
AUDIT_ARG(pid, p2->p_pid);
)
{
struct machine_slot *ms;
- int ncmds;
+ uint32_t ncmds;
struct load_command *lcp, *next;
struct dylinker_command *dlp = 0;
void * pager;
load_return_t ret = LOAD_SUCCESS;
vm_offset_t addr, kl_addr;
vm_size_t size,kl_size;
- int offset;
+ size_t offset;
+ size_t oldoffset; /* for overflow check */
int pass;
struct proc *p = current_proc(); /* XXXX */
int error;
* Scan through the commands, processing each one as necessary.
*/
for (pass = 1; pass <= 2; pass++) {
+ /*
+ * Loop through each of the load_commands indicated by the
+ * Mach-O header; if an absurd value is provided, we just
+ * run off the end of the reserved section by incrementing
+ * the offset too far, so we are implicitly fail-safe.
+ */
offset = sizeof(struct mach_header);
ncmds = header->ncmds;
while (ncmds--) {
* Get a pointer to the command.
*/
lcp = (struct load_command *)(addr + offset);
+ oldoffset = offset;
offset += lcp->cmdsize;
/*
- * Check for valid lcp pointer by checking
- * next offset.
+ * Perform prevalidation of the struct load_command
+ * before we attempt to use its contents. Invalid
+ * values are ones which result in an overflow, or
+ * which can not possibly be valid commands, or which
+ * straddle or exist past the reserved section at the
+ * start of the image.
*/
- if (offset > header->sizeofcmds
- + sizeof(struct mach_header)) {
- if (kl_addr )
- kfree(kl_addr, kl_size);
- return(LOAD_BADMACHO);
+ if (oldoffset > offset ||
+ lcp->cmdsize < sizeof(struct load_command) ||
+ offset > header->sizeofcmds + sizeof(struct mach_header)) {
+ ret = LOAD_BADMACHO;
+ break;
}
/*
- * Check for valid command.
+ * Act on struct load_command's for which kernel
+ * intervention is required.
*/
switch(lcp->cmd) {
case LC_SEGMENT:
ret = LOAD_FAILURE;
break;
default:
- ret = LOAD_SUCCESS;/* ignore other stuff */
+ /* Other commands are ignored by the kernel */
+ ret = LOAD_SUCCESS;
}
if (ret != LOAD_SUCCESS)
break;
extern struct sysctl_oid sysctl__kern_ipc_sosendminchain;
extern struct sysctl_oid sysctl__kern_ipc_sorecvmincopy;
extern struct sysctl_oid sysctl__kern_ipc_maxsockets;
+extern struct sysctl_oid sysctl__kern_sugid_scripts;
extern struct sysctl_oid sysctl__net_inet_icmp_icmplim;
extern struct sysctl_oid sysctl__net_inet_icmp_maskrepl;
extern struct sysctl_oid sysctl__net_inet_icmp_timestamp;
,&sysctl__kern_ipc_sosendminchain
,&sysctl__kern_ipc_sorecvmincopy
,&sysctl__kern_ipc_maxsockets
+ ,&sysctl__kern_sugid_scripts
,&sysctl__hw_machine
,&sysctl__hw_model
UNLOCK_AND_RETURN(eval);
}
- if (nsops > MAX_SOPS) {
+ if (nsops < 0 || nsops > MAX_SOPS) {
#ifdef SEM_DEBUG
printf("too many sops (max=%d, nsops=%d)\n", MAX_SOPS, nsops);
#endif
#define DBG_FNC_SORECEIVE NETDBG_CODE(DBG_NETSOCK, (8 << 8))
#define DBG_FNC_SOSHUTDOWN NETDBG_CODE(DBG_NETSOCK, (9 << 8))
+#define MAX_SOOPTGETM_SIZE (128 * MCLBYTES)
SYSCTL_DECL(_kern_ipc);
struct mbuf *m, *m_prev;
int sopt_size = sopt->sopt_valsize;
+ if (sopt_size > MAX_SOOPTGETM_SIZE)
+ return EMSGSIZE;
+
MGET(m, sopt->sopt_p ? M_WAIT : M_DONTWAIT, MT_DATA);
if (m == 0)
return ENOBUFS;
struct mbuf *m;
int optname;
+ if (sopt->sopt_valsize > MCLBYTES) {
+ error = EMSGSIZE;
+ break;
+ }
+
if ((error = soopt_getm(sopt, &m)) != 0) /* XXX */
break;
if ((error = soopt_mcopyin(sopt, m)) != 0) /* XXX */
if (so != ip6_mrouter && sopt->sopt_name != MRT6_INIT)
return (EACCES);
+ if (sopt->sopt_valsize > MCLBYTES)
+ return (EMSGSIZE);
+
+
if ((error = soopt_getm(sopt, &m)) != 0) /* XXX */
return (error);
if ((error = soopt_mcopyin(sopt, m)) != 0) /* XXX */
{
struct mbuf *m;
+ if (sopt->sopt_valsize > MCLBYTES) {
+ error = EMSGSIZE;
+ break;
+ }
error = soopt_getm(sopt, &m); /* XXX */
if (error != NULL)
break;
size_t len = 0;
struct mbuf *m;
+ if (sopt->sopt_valsize > MCLBYTES) {
+ error = EMSGSIZE;
+ break;
+ }
if ((error = soopt_getm(sopt, &m)) != 0) /* XXX */
break;
if ((error = soopt_mcopyin(sopt, m)) != 0) /* XXX */
struct mbuf *m = NULL;
struct mbuf **mp = &m;
+ if (sopt->sopt_valsize > MCLBYTES) {
+ error = EMSGSIZE;
+ break;
+ }
error = soopt_getm(sopt, &m); /* XXX */
if (error != NULL)
break;
return (EPROGMISMATCH);
#endif /* !NO_COMPAT_PRELITE2 */
}
- if (args.fhsize > NFSX_V3FHMAX)
+ if (args.fhsize < 0 || args.fhsize > NFSX_V3FHMAX)
return (EINVAL);
error = copyin((caddr_t)args.fh, (caddr_t)nfh, args.fhsize);
if (error)
#define FSOPT_NOFOLLOW 0x00000001
#define FSOPT_NOINMEMUPDATE 0x00000002
+/* we currently aren't anywhere near this amount for a valid
+ * fssearchblock.sizeofsearchparams1 or fssearchblock.sizeofsearchparams2
+ * but we put a sanity check in to avoid abuse of the value passed in from
+ * user land.
+ */
+#define SEARCHFS_MAX_SEARCHPARMS 4096
+
typedef u_int32_t text_encoding_t;
typedef u_int32_t fsobj_type_t;
if (error = copyin((caddr_t) uap->searchblock, (caddr_t) &searchblock,sizeof(struct fssearchblock)))
return(error);
+ /* Do a sanity check on sizeofsearchparams1 and sizeofsearchparams2.
+ */
+ if (searchblock.sizeofsearchparams1 > SEARCHFS_MAX_SEARCHPARMS ||
+ searchblock.sizeofsearchparams2 > SEARCHFS_MAX_SEARCHPARMS)
+ return(EINVAL);
+
/* Now malloc a big bunch of space to hold the search parameters, the attrlists and the search state. */
/* It all has to do into local memory and it's not that big so we might as well put it all together. */
/* Searchparams1 shall be first so we might as well use that to hold the base address of the allocated*/
_ml_thrm_init
_ml_thrm_set
_ml_throttle
+_ml_mem_backoff
_mtdar
_mtdec
_mtmmcr0
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>System Resource Pseudoextension, Apple Computer Inc, 7.8.0</string>
+ <string>System Resource Pseudoextension, Apple Computer Inc, 7.9.0</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kernel</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleCompatibleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>AppleNMI Pseudoextension, Apple Computer Inc, 7.8.0</string>
+ <string>AppleNMI Pseudoextension, Apple Computer Inc, 7.9.0</string>
<key>CFBundleIdentifier</key>
<string>com.apple.driver.AppleNMI</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleRequired</key>
<string>Root</string>
<key>OSKernelResource</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>Apple Platform Family Pseudoextension, Apple Computer Inc, 7.8.0</string>
+ <string>Apple Platform Family Pseudoextension, Apple Computer Inc, 7.9.0</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.ApplePlatformFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0</string>
<key>OSBundleRequired</key>
<key>CFBundleExecutable</key>
<string>BSDKernel</string>
<key>CFBundleGetInfoString</key>
- <string>BSD Kernel Pseudoextension, Apple Computer Inc, 7.8.0</string>
+ <string>BSD Kernel Pseudoextension, Apple Computer Inc, 7.9.0</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kpi.bsd</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleCompatibleVersion</key>
<string>7.0</string>
<key>OSBundleRequired</key>
<key>CFBundleExecutable</key>
<string>IOKit</string>
<key>CFBundleGetInfoString</key>
- <string>I/O Kit Pseudoextension, Apple Computer Inc, 7.8.0</string>
+ <string>I/O Kit Pseudoextension, Apple Computer Inc, 7.9.0</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kpi.iokit</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleCompatibleVersion</key>
<string>7.0</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>AppleNMI Pseudoextension, Apple Computer Inc, 7.8.0</string>
+ <string>AppleNMI Pseudoextension, Apple Computer Inc, 7.9.0</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.IONVRAMFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleCompatibleVersion</key>
<string>1.1</string>
<key>OSBundleRequired</key>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleGetInfoString</key>
- <string>System Management Pseudoextension, Apple Computer Inc, 7.8.0</string>
+ <string>System Management Pseudoextension, Apple Computer Inc, 7.9.0</string>
<key>CFBundleIdentifier</key>
<string>com.apple.iokit.IOSystemManagementFamily</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleCompatibleVersion</key>
<string>1.0.0b1</string>
<key>OSBundleRequired</key>
<key>CFBundleExecutable</key>
<string>Libkern</string>
<key>CFBundleGetInfoString</key>
- <string>Libkern Pseudoextension, Apple Computer Inc, 7.8.0</string>
+ <string>Libkern Pseudoextension, Apple Computer Inc, 7.9.0</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kpi.libkern</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleCompatibleVersion</key>
<string>7.0</string>
<key>OSBundleRequired</key>
<key>CFBundleExecutable</key>
<string>Mach</string>
<key>CFBundleGetInfoString</key>
- <string>Mach Kernel Pseudoextension, Apple Computer Inc, 7.8.0</string>
+ <string>Mach Kernel Pseudoextension, Apple Computer Inc, 7.9.0</string>
<key>CFBundleIdentifier</key>
<string>com.apple.kpi.mach</string>
<key>CFBundleInfoDictionaryVersion</key>
<key>CFBundlePackageType</key>
<string>KEXT</string>
<key>CFBundleShortVersionString</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
- <string>7.8.0</string>
+ <string>7.9.0</string>
<key>OSBundleCompatibleVersion</key>
<string>7.0</string>
<key>OSBundleRequired</key>
_ml_thrm_init
_ml_thrm_set
_ml_throttle
+_ml_mem_backoff
_mtdar
_mtdec
_mtmmcr0
*/
const char * gIOKernelKmods =
"{"
-" 'com.apple.kernel' = '7.8.0';"
-" 'com.apple.kpi.bsd' = '7.8.0';"
-" 'com.apple.kpi.iokit' = '7.8.0';"
-" 'com.apple.kpi.libkern' = '7.8.0';"
-" 'com.apple.kpi.mach' = '7.8.0';"
-" 'com.apple.iokit.IONVRAMFamily' = '7.8.0';"
-" 'com.apple.driver.AppleNMI' = '7.8.0';"
-" 'com.apple.iokit.IOSystemManagementFamily' = '7.8.0';"
-" 'com.apple.iokit.ApplePlatformFamily' = '7.8.0';"
+" 'com.apple.kernel' = '7.9.0';"
+" 'com.apple.kpi.bsd' = '7.9.0';"
+" 'com.apple.kpi.iokit' = '7.9.0';"
+" 'com.apple.kpi.libkern' = '7.9.0';"
+" 'com.apple.kpi.mach' = '7.9.0';"
+" 'com.apple.iokit.IONVRAMFamily' = '7.9.0';"
+" 'com.apple.driver.AppleNMI' = '7.9.0';"
+" 'com.apple.iokit.IOSystemManagementFamily' = '7.9.0';"
+" 'com.apple.iokit.ApplePlatformFamily' = '7.9.0';"
" 'com.apple.kernel.6.0' = '6.9.9';"
" 'com.apple.kernel.bsd' = '6.9.9';"
" 'com.apple.kernel.iokit' = '6.9.9';"
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
processor_offline, processor);
if (processor != current_processor())
timer_call_shutdown(processor);
+
+ _mk_sp_thread_begin(self, self->last_processor);
+
thread_dispatch(old_thread);
}
emulation_vector_t emulation_vector,
mach_msg_type_number_t emulation_vector_count)
{
- eml_dispatch_t cur_eml, new_eml, old_eml;
- vm_size_t new_size;
- int cur_start, cur_end;
- int new_start, new_end;
- int vector_end;
-
- if (task == TASK_NULL)
- return EML_BAD_TASK;
-
- vector_end = vector_start + (int) emulation_vector_count;
-
- /*
- * We try to re-use the existing emulation vetor
- * if possible. We can reuse the vector if it
- * is not shared with another task and if it is
- * large enough to contain the entries we are
- * supplying.
- *
- * We must grab the lock on the task to check whether
- * there is an emulation vector.
- * If the vector is shared or not large enough, we
- * need to drop the lock and allocate a new emulation
- * vector.
- *
- * While the lock is dropped, the emulation vector
- * may be released by all other tasks (giving us
- * exclusive use), or may be enlarged by another
- * task_set_emulation_vector call. Therefore,
- * after allocating the new emulation vector, we
- * must grab the lock again to check whether we
- * really need the new vector we just allocated.
- *
- * Since an emulation vector cannot be altered
- * if it is in use by more than one task, the
- * task lock is sufficient to protect the vector`s
- * start, count, and contents. The lock in the
- * vector protects only the reference count.
- */
-
- old_eml = EML_DISPATCH_NULL; /* vector to discard */
- new_eml = EML_DISPATCH_NULL; /* new vector */
-
- for (;;) {
- /*
- * Find the current emulation vector.
- * See whether we can overwrite it.
- */
- task_lock(task);
- cur_eml = task->eml_dispatch;
- if (cur_eml != EML_DISPATCH_NULL) {
- cur_start = cur_eml->disp_min;
- cur_end = cur_eml->disp_count + cur_start;
-
- mutex_lock(&cur_eml->lock);
- if (cur_eml->ref_count == 1 &&
- cur_start <= vector_start &&
- cur_end >= vector_end)
- {
- /*
- * Can use the existing emulation vector.
- * Discard any new one we allocated.
- */
- mutex_unlock(&cur_eml->lock);
- old_eml = new_eml;
- break;
- }
-
- if (new_eml != EML_DISPATCH_NULL &&
- new_start <= cur_start &&
- new_end >= cur_end)
- {
- /*
- * A new vector was allocated, and it is large enough
- * to hold all the entries from the current vector.
- * Copy the entries to the new emulation vector,
- * deallocate the current one, and use the new one.
- */
-
- bcopy((char *)&cur_eml->disp_vector[0],
- (char *)&new_eml->disp_vector[cur_start-new_start],
- cur_eml->disp_count * sizeof(vm_offset_t));
-
-
- if (--cur_eml->ref_count == 0)
- old_eml = cur_eml; /* discard old vector */
- mutex_unlock(&cur_eml->lock);
-
- task->eml_dispatch = new_eml;
- syscall_emulation_sync(task);
- cur_eml = new_eml;
- break;
- }
- mutex_unlock(&cur_eml->lock);
-
- /*
- * Need a new emulation vector.
- * Ensure it will hold all the entries from
- * both the old and new emulation vectors.
- */
- new_start = vector_start;
- if (new_start > cur_start)
- new_start = cur_start;
- new_end = vector_end;
- if (new_end < cur_end)
- new_end = cur_end;
- }
- else {
- /*
- * There is no curren emulation vector.
- * If a new one was allocated, use it.
- */
- if (new_eml != EML_DISPATCH_NULL) {
- task->eml_dispatch = new_eml;
- cur_eml = new_eml;
- break;
- }
-
- /*
- * Compute the size needed for the new vector.
- */
- new_start = vector_start;
- new_end = vector_end;
- }
-
- /*
- * Have no vector (or one that is no longer large enough).
- * Drop all the locks and allocate a new vector.
- * Repeat the loop to check whether the old vector was
- * changed while we didn`t hold the locks.
- */
-
- task_unlock(task);
-
- if (new_eml != EML_DISPATCH_NULL)
- kfree((vm_offset_t)new_eml, count_to_size(new_eml->disp_count));
-
- new_size = count_to_size(new_end - new_start);
- new_eml = (eml_dispatch_t) kalloc(new_size);
-
- bzero((char *)new_eml, new_size);
- mutex_init(&new_eml->lock, ETAP_MISC_EMULATE);
- new_eml->ref_count = 1;
- new_eml->disp_min = new_start;
- new_eml->disp_count = new_end - new_start;
-
- continue;
- }
-
- /*
- * We have the emulation vector.
- * Install the new emulation entries.
- */
- bcopy((char *)&emulation_vector[0],
- (char *)&cur_eml->disp_vector[vector_start - cur_eml->disp_min],
- emulation_vector_count * sizeof(vm_offset_t));
-
- task_unlock(task);
-
- /*
- * Discard any old emulation vector we don`t need.
- */
- if (old_eml)
- kfree((vm_offset_t) old_eml, count_to_size(old_eml->disp_count));
-
- return KERN_SUCCESS;
+ return KERN_NOT_SUPPORTED;
}
/*
emulation_vector_t emulation_vector,
mach_msg_type_number_t emulation_vector_count)
{
- kern_return_t kr;
- vm_offset_t emul_vector_addr;
-
- if (task == TASK_NULL)
- return EML_BAD_TASK; /* XXX sb KERN_INVALID_ARGUMENT */
-
- /*
- * XXX - barbou@gr.osf.org.
- * If emulation_vector_count is NULL, discard the emulation
- * vectors.
- * We need a way to do that for emulator-less servers started
- * from a classic server. There seems to be no way to get rid
- * of or to avoid inheriting the emulation vector !?
- */
- if (emulation_vector_count == 0) {
- eml_task_deallocate(task);
- return KERN_SUCCESS;
- }
-
-
- /*
- * The emulation vector is really a vm_map_copy_t.
- */
- kr = vm_map_copyout(ipc_kernel_map, &emul_vector_addr,
- (vm_map_copy_t) emulation_vector);
- if (kr != KERN_SUCCESS)
- return kr;
-
- /*
- * Can't fault while we hold locks.
- */
- kr = vm_map_wire(ipc_kernel_map,
- trunc_page_32(emul_vector_addr),
- round_page_32(emul_vector_addr +
- emulation_vector_count *
- sizeof(eml_dispatch_t)),
- VM_PROT_READ|VM_PROT_WRITE, FALSE);
- assert(kr == KERN_SUCCESS);
-
- /*
- * Do the work.
- */
- kr = task_set_emulation_vector_internal(
- task,
- vector_start,
- (emulation_vector_t) emul_vector_addr,
- emulation_vector_count);
- assert(kr == KERN_SUCCESS);
-
- /*
- * Discard the memory
- */
- (void) kmem_free(ipc_kernel_map,
- emul_vector_addr,
- emulation_vector_count * sizeof(eml_dispatch_t));
-
- return KERN_SUCCESS;
+ return KERN_NOT_SUPPORTED;
}
/*
emulation_vector_t *emulation_vector, /* out */
mach_msg_type_number_t *emulation_vector_count) /* out */
{
- eml_dispatch_t eml;
- vm_size_t vector_size, size;
- vm_offset_t addr;
-
- if (task == TASK_NULL)
- return EML_BAD_TASK;
-
- addr = 0;
- size = 0;
-
- for(;;) {
- vm_size_t size_needed;
-
- task_lock(task);
- eml = task->eml_dispatch;
- if (eml == EML_DISPATCH_NULL) {
- task_unlock(task);
- if (addr)
- (void) kmem_free(ipc_kernel_map, addr, size);
- *vector_start = 0;
- *emulation_vector = 0;
- *emulation_vector_count = 0;
- return KERN_SUCCESS;
- }
-
- /*
- * Do we have the memory we need?
- */
- vector_size = eml->disp_count * sizeof(vm_offset_t);
-
- size_needed = round_page_32(vector_size);
- if (size_needed <= size)
- break;
-
- /*
- * If not, unlock the task and allocate more memory.
- */
- task_unlock(task);
-
- if (size != 0)
- kmem_free(ipc_kernel_map, addr, size);
-
- size = size_needed;
- if (kmem_alloc(ipc_kernel_map, &addr, size) != KERN_SUCCESS)
- return KERN_RESOURCE_SHORTAGE;
- }
-
- /*
- * Copy out the dispatch addresses
- */
- *vector_start = eml->disp_min;
- *emulation_vector_count = eml->disp_count;
- bcopy((char *)eml->disp_vector,
- (char *)addr,
- vector_size);
-
- /*
- * Unlock the task and free any memory we did not need
- */
- task_unlock(task);
- {
- vm_size_t size_used, size_left;
- vm_map_copy_t memory;
-
- /*
- * Free any unused memory beyond the end of the last page used
- */
- size_used = round_page_32(vector_size);
- if (size_used != size)
- (void) kmem_free(ipc_kernel_map,
- addr + size_used,
- size - size_used);
-
- /*
- * Zero the remainder of the page being returned.
- */
- size_left = size_used - vector_size;
- if (size_left > 0)
- bzero((char *)addr + vector_size, size_left);
-
- /*
- * Unwire and make memory into copyin form.
- */
- (void) vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
- (void) vm_map_copyin(ipc_kernel_map, addr, vector_size,
- TRUE, &memory);
-
- *emulation_vector = (emulation_vector_t) memory;
- }
-
- return KERN_SUCCESS;
+ return KERN_NOT_SUPPORTED;
}
/*
vm_offset_t routine_entry_pt,
int routine_number)
{
- return task_set_emulation_vector_internal(task, routine_number,
- &routine_entry_pt, 1);
+ return KERN_NOT_SUPPORTED;
}
z->zone_name = name;
z->count = 0;
z->doing_alloc = FALSE;
+ z->doing_gc = FALSE;
z->exhaustible = FALSE;
z->collectable = TRUE;
z->allows_foreign = FALSE;
REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ while ((addr == 0) && canblock && (zone->doing_gc)) {
+ zone->waiting = TRUE;
+ zone_sleep(zone);
+ REMOVE_FROM_ZONE(zone, addr, vm_offset_t);
+ }
+
while ((addr == 0) && canblock) {
/*
* If nothing was there, try to get more
zone_free_pages = NULL;
for (i = 0; i < max_zones; i++, z = z->next_zone) {
- unsigned int n;
+ unsigned int n, m;
vm_size_t elt_size, size_freed;
- struct zone_free_element *elt, *prev, *scan, *keep, *tail;
+ struct zone_free_element *elt, *base_elt, *base_prev, *prev, *scan, *keep, *tail;
assert(z != ZONE_NULL);
continue;
}
+ z->doing_gc = TRUE;
+
/*
* Snatch all of the free elements away from the zone.
*/
* Dribble back the elements we are keeping.
*/
- if (++n >= 50 && keep != NULL) {
- lock_zone(z);
+ if (++n >= 50) {
+ if (z->waiting == TRUE) {
+ lock_zone(z);
- tail->next = (void *)z->free_elements;
- (void *)z->free_elements = keep;
+ if (keep != NULL) {
+ tail->next = (void *)z->free_elements;
+ (void *)z->free_elements = keep;
+ tail = keep = NULL;
+ } else {
+ m =0;
+ base_elt = elt;
+ base_prev = prev;
+ while ((elt != NULL) && (++m < 50)) {
+ prev = elt;
+ elt = elt->next;
+ }
+ if (m !=0 ) {
+ prev->next = (void *)z->free_elements;
+ (void *)z->free_elements = (void *)base_elt;
+ base_prev->next = elt;
+ prev = base_prev;
+ }
+ }
- unlock_zone(z);
+ if (z->waiting) {
+ z->waiting = FALSE;
+ zone_wakeup(z);
+ }
- n = 0; tail = keep = NULL;
+ unlock_zone(z);
+ }
+ n =0;
}
}
* and update the zone size info.
*/
- if (++n >= 50 && keep != NULL) {
+ if (++n >= 50) {
lock_zone(z);
z->cur_size -= size_freed;
size_freed = 0;
- tail->next = (void *)z->free_elements;
- (void *)z->free_elements = keep;
+ if (keep != NULL) {
+ tail->next = (void *)z->free_elements;
+ (void *)z->free_elements = keep;
+ }
+
+ if (z->waiting) {
+ z->waiting = FALSE;
+ zone_wakeup(z);
+ }
unlock_zone(z);
* the zone size info.
*/
+ lock_zone(z);
+
if (size_freed > 0 || keep != NULL) {
- lock_zone(z);
z->cur_size -= size_freed;
(void *)z->free_elements = keep;
}
- unlock_zone(z);
}
+
+ z->doing_gc = FALSE;
+ if (z->waiting) {
+ z->waiting = FALSE;
+ zone_wakeup(z);
+ }
+ unlock_zone(z);
}
/*
/* boolean_t */ allows_foreign :1,/* (F) allow non-zalloc space */
/* boolean_t */ doing_alloc :1, /* is zone expanding now? */
/* boolean_t */ waiting :1, /* is thread waiting for expansion? */
- /* boolean_t */ async_pending :1; /* asynchronous allocation pending? */
+ /* boolean_t */ async_pending :1, /* asynchronous allocation pending? */
+ /* boolean_t */ doing_gc :1; /* garbage collect in progress? */
struct zone * next_zone; /* Link for all-zones list */
call_entry_data_t call_async_alloc; /* callout for asynchronous alloc */
#if ZONE_DEBUG
#include <ppc/trap.h>
extern struct vc_info vinfo;
+extern uint32_t warFlags;
+#define warDisMBpoff 0x80000000
kern_return_t testPerfTrap(int trapno, struct savearea *ss,
unsigned int dsisr, addr64_t dar);
+
int diagCall(struct savearea *save) {
union {
} ttt, adj;
natural_t tbu, tbu2, tbl;
struct per_proc_info *per_proc; /* Area for my per_proc address */
- int cpu, ret;
- unsigned int tstrt, tend, temp, temp2;
+ int cpu, ret, subc;
+ unsigned int tstrt, tend, temp, temp2, oldwar;
addr64_t src, snk;
uint64_t scom, hid1, hid4, srrwrk, stat;
scomcomm sarea;
return -1; /* Return and check for ASTs... */
+ case dgWar: /* Set or reset workaround flags */
+
+ save->save_r3 = (uint32_t)warFlags; /* Get the old flags */
+ oldwar = warFlags; /* Remember the old war flags */
+
+ subc = (int32_t)save->save_r4; /* Extract the subcommand */
+ switch(subc) { /* Do what we need */
+ case 1: /* Replace all */
+ warFlags = (uint32_t)save->save_r5; /* Do them all */
+ break;
+
+ case 2: /* Turn on selected workarounds */
+ warFlags = warFlags | (uint32_t)save->save_r5;
+ break;
+
+ case 3: /* Turn off selected workarounds */
+ warFlags = warFlags & ~((uint32_t)save->save_r5);
+ break;
+
+ case 4: /* Start up selected workaround */
+ break;
+
+ case 5: /* Stop selected workaround */
+ break;
+
+ case 6: /* Reset specific workaround parameters to default */
+ break;
+
+ case 7: /* Set workaround parameters */
+ break;
+
+ default:
+
+ break;
+
+ }
+
+ save->save_r3 = oldwar; /* Pass back original */
+ return -1;
+
default: /* Handle invalid ones */
return 0; /* Return an exception */
return KERN_SUCCESS;
}
+
#define dgPerfMon 15
#define dgMapPage 16
#define dgScom 17
+#define dgWar 18
typedef struct diagWork { /* Diagnostic work area */
ori r2,r2,lo16(MASK(MSR_FP)) ; Get the FP enable
ori r4,r4,lo16(MASK(MSR_EE)) ; Get the EE bit
+ mfsprg r9,2 ; Get feature flags
+ mtcrf 0x02,r9 ; move pf64Bit cr6
- mfmsr r0 ; Save the MSR
- andc r0,r0,r2 ; Turn of VEC and FP
- andc r4,r0,r4 ; And EE
+ mfmsr r0 ; Save the MSR
+ andc r0,r0,r2 ; Turn off VEC and FP
+ andc r4,r0,r4 ; And EE
mtmsr r4
isync
stw r12,4(r3)
rlwinm r12,r12,16,16,31
+ bt++ pf64Bitb,stsSF1 ; skip if 64-bit (only they take the hint)
+
mfdbatu r4,0
mfdbatl r5,0
mfdbatu r6,1
mfsdr1 r4
stw r4,88(r3)
-
+
la r4,92(r3)
li r5,0
mr. r5,r5
addi r4,r4,4
bne+ stSnsr
-
+
cmplwi r12,PROCESSOR_VERSION_750
mfspr r4,hid0
stw r4,(39*4)(r3)
blr
+stsSF1: mfsprg r4,0
+ mfsprg r5,1
+ mfsprg r6,2
+ mfsprg r7,3
+ std r4,(18*4)(r3)
+ std r5,(20*4)(r3)
+ std r6,(22*4)(r3)
+ std r7,(24*4)(r3)
+
+ mfsdr1 r4
+ std r4,(26*4)(r3)
+
+ mfspr r4,hid0
+ std r4,(28*4)(r3)
+ mfspr r4,hid1
+ std r4,(30*4)(r3)
+ mfspr r4,hid4
+ std r4,(32*4)(r3)
+ mfspr r4,hid5
+ std r4,(34*4)(r3)
+
+
+stsSF2: li r5,0
+ la r4,(80*4)(r3)
+
+stsslbm: slbmfee r6,r5
+ slbmfev r7,r5
+ std r6,0(r4)
+ std r7,8(r4)
+ addi r5,r5,1
+ cmplwi r5,64
+ addi r4,r4,16
+ blt stsslbm
+
+
+ mtmsr r0
+ isync
+
+ blr
;
; fwEmMck - this forces the hardware to emulate machine checks
.globl EXT(dbspecrs)
.align 3
EXT(dbspecrs):
- .set .,.+(80*4)
+ .set .,.+(336*4)
/*
* Interrupt and debug stacks go here
#define dcfim 0x00000400
#define spd 22
#define spdm 0x00000200
+#define hdice 23
+#define hdicem 0x00000100
#define sge 24
#define sgem 0x00000080
#define dcfa 25
#define chud_970_mmcr1 798
#define chud_970_imc 799
-/* PPC SPRs - 7400/7410 Specific */
+/* PPC SPRs - 7400/7410 Specific, Private */
#define chud_7400_msscr1 1015
-
-/* PPC SPRs - 64-bit implementations */
+
+/* PPC SPRs - 64-bit implementations, Private */
#define chud_ppc64_accr 29
#define chud_ppc64_ctrl 152
-/* PPC SPRs - 970 Specific */
+/* PPC SPRs - 970 Specific, Private */
#define chud_970_scomc 276
#define chud_970_scomd 277
#define chud_970_hsprg0 304
proc_info->need_ast = (unsigned int)&need_ast[cpu];
proc_info->FPU_owner = 0;
proc_info->VMX_owner = 0;
+ proc_info->rtcPop = 0xFFFFFFFFFFFFFFFFULL;
mp = (mapping *)(&proc_info->ppCIOmp);
mp->mpFlags = 0x01000000 | mpSpecial | 1;
mp->mpSpace = invalSpace;
int cpu;
struct SIGtimebase *timebaseAddr;
natural_t tbu, tbu2, tbl;
-
+ broadcastFunc xfunc;
cpu = cpu_number(); /* Get the CPU number */
pproc = &per_proc_info[cpu]; /* Point to our block */
-
+
/*
* Since we've been signaled, wait about 31 ms for the signal lock to pass
*/
case CPRQsps:
{
- extern void ml_set_processor_speed_slave(unsigned long speed);
+ extern void ml_set_processor_speed_slave(unsigned long speed);
- ml_set_processor_speed_slave(holdParm2);
- return;
- }
+ ml_set_processor_speed_slave(holdParm2);
+ return;
+ }
+
default:
panic("cpu_signal_handler: unknown CPU request - %08X\n", holdParm1);
return;
pproc->hwCtr.numSIGPwake++; /* Count this one */
return; /* No need to do anything, the interrupt does it all... */
+ case SIGPcall: /* Call function on CPU */
+ pproc->hwCtr.numSIGPcall++; /* Count this one */
+ xfunc = holdParm1; /* Do this since I can't seem to figure C out */
+ xfunc(holdParm2); /* Call the passed function */
+ return; /* Done... */
+
default:
panic("cpu_signal_handler: unknown SIGP message order - %08X\n", holdParm0);
return;
(void)ml_set_interrupts_enabled(intr);
}
+
+/*
+ * Call a function on all running processors
+ *
+ * Note that the synch paramter is used to wait until all functions are complete.
+ * It is not passed to the other processor and must be known by the called function.
+ * The called function must do a thread_wakeup on the synch if it decrements the
+ * synch count to 0.
+ */
+
+
+int32_t cpu_broadcast(uint32_t *synch, broadcastFunc func, uint32_t parm) {
+
+ int sigproc, cpu, ocpu;
+
+ cpu = cpu_number(); /* Who are we? */
+ sigproc = 0; /* Clear called processor count */
+
+ if(real_ncpus > 1) { /* Are we just a uni? */
+
+ assert_wait((event_t)synch, THREAD_UNINT); /* If more than one processor, we may have to wait */
+
+ for(ocpu = 0; ocpu < real_ncpus; ocpu++) { /* Tell everyone to call */
+ if(ocpu == cpu) continue; /* If we talk to ourselves, people will wonder... */
+ hw_atomic_add(synch, 1); /* Tentatively bump synchronizer */
+ sigproc++; /* Tentatively bump signal sent count */
+ if(KERN_SUCCESS != cpu_signal(ocpu, SIGPcall, (uint32_t)func, parm)) { /* Call the function on the other processor */
+ hw_atomic_sub(synch, 1); /* Other guy isn't really there, ignore it */
+ sigproc--; /* and don't count it */
+ }
+ }
+
+ if(!sigproc) clear_wait(current_thread(), THREAD_AWAKENED); /* Clear wait if we never signalled */
+ else thread_block(THREAD_CONTINUE_NULL); /* Wait for everyone to get into step... */
+ }
+
+ return sigproc; /* Return the number of guys actually signalled */
+
+}
ReadReal((addr64_t)xxltr + 64, &(((unsigned int *)&xltr)[16])); /* Get the second half */
ReadReal((addr64_t)xxltr + 96, &(((unsigned int *)&xltr)[24])); /* Get the second half */
- db_printf("\n%s%08llX %1X %08X %08X - %04X\n", (xxltr != cxltr ? " " : "*"),
+ db_printf("\n%s%08llX %1X %08X %08X - %04X", (xxltr != cxltr ? " " : "*"),
xxltr,
- xltr.LTR_cpu, xltr.LTR_timeHi, xltr.LTR_timeLo,
+ (xltr.LTR_cpu & 0xFF), xltr.LTR_timeHi, xltr.LTR_timeLo,
(xltr.LTR_excpt & 0x8000 ? 0xFFFF : xltr.LTR_excpt * 64)); /* Print the first line */
+
+ if(xltr.LTR_cpu & 0xFF00) db_printf(", sflgs = %02X\n", ((xltr.LTR_cpu >> 8) & 0xFF));
+ else db_printf("\n");
db_printf(" DAR/DSR/CR: %016llX %08X %08X\n", xltr.LTR_dar, xltr.LTR_dsisr, xltr.LTR_cr);
llslot = ((long long)xpteg[i] << 32) | (long long)xpteg[i + 1]; /* Make a long long version of this */
space = (llslot >> 12) & (maxAdrSp - 1); /* Extract the space */
llhash = (unsigned long long)space | ((unsigned long long)space << maxAdrSpb) | ((unsigned long long)space << (2 * maxAdrSpb)); /* Get the hash */
- llhash = llhash & 0x0000001FFFFFFFFF; /* Make sure we stay within supported ranges */
+ llhash = llhash & 0x0000001FFFFFFFFFULL; /* Make sure we stay within supported ranges */
pva = (unsigned long long)ptegindex ^ llhash; /* Get part of the vaddr */
llseg = (llslot >> 12) ^ llhash; /* Get the segment number */
api = (llslot >> 7) & 0x1F; /* Get the API */
int i, j, pents;
stSpecrs(dbspecrs); /* Save special registers */
- db_printf("PIR: %08X\n", dbspecrs[0]);
- db_printf("PVR: %08X\n", dbspecrs[1]);
- db_printf("SDR1: %08X\n", dbspecrs[22]);
- db_printf("HID0: %08X\n", dbspecrs[39]);
- db_printf("HID1: %08X\n", dbspecrs[40]);
- db_printf("L2CR: %08X\n", dbspecrs[41]);
- db_printf("MSSCR0: %08X\n", dbspecrs[42]);
- db_printf("MSSCR1: %08X\n", dbspecrs[43]);
- db_printf("THRM1: %08X\n", dbspecrs[44]);
- db_printf("THRM2: %08X\n", dbspecrs[45]);
- db_printf("THRM3: %08X\n", dbspecrs[46]);
- db_printf("ICTC: %08X\n", dbspecrs[47]);
- db_printf("L2CR2: %08X\n", dbspecrs[48]);
- db_printf("DABR: %08X\n", dbspecrs[49]);
- db_printf("\n");
-
- db_printf("DBAT: %08X %08X %08X %08X\n", dbspecrs[2], dbspecrs[3], dbspecrs[4], dbspecrs[5]);
- db_printf(" %08X %08X %08X %08X\n", dbspecrs[6], dbspecrs[7], dbspecrs[8], dbspecrs[9]);
- db_printf("IBAT: %08X %08X %08X %08X\n", dbspecrs[10], dbspecrs[11], dbspecrs[12], dbspecrs[13]);
- db_printf(" %08X %08X %08X %08X\n", dbspecrs[14], dbspecrs[15], dbspecrs[16], dbspecrs[17]);
- db_printf("SPRG: %08X %08X %08X %08X\n", dbspecrs[18], dbspecrs[19], dbspecrs[20], dbspecrs[21]);
- db_printf("\n");
- for(i = 0; i < 16; i += 8) { /* Print 8 at a time */
- db_printf("SR%02d: %08X %08X %08X %08X %08X %08X %08X %08X\n", i,
- dbspecrs[23+i], dbspecrs[24+i], dbspecrs[25+i], dbspecrs[26+i],
- dbspecrs[27+i], dbspecrs[28+i], dbspecrs[29+i], dbspecrs[30+i]);
+ if(per_proc_info[0].pf.Available & pf64Bit) {
+ db_printf("PIR: %08X\n", dbspecrs[0]);
+ db_printf("PVR: %08X\n", dbspecrs[1]);
+ db_printf("SDR1: %08X.%08X\n", dbspecrs[26], dbspecrs[27]);
+ db_printf("HID0: %08X.%08X\n", dbspecrs[28], dbspecrs[29]);
+ db_printf("HID1: %08X.%08X\n", dbspecrs[30], dbspecrs[31]);
+ db_printf("HID4: %08X.%08X\n", dbspecrs[32], dbspecrs[33]);
+ db_printf("HID5: %08X.%08X\n", dbspecrs[34], dbspecrs[35]);
+ db_printf("SPRG0: %08X.%08X %08X.%08X\n", dbspecrs[18], dbspecrs[19], dbspecrs[20], dbspecrs[21]);
+ db_printf("SPRG2: %08X.%08X %08X.%08X\n", dbspecrs[22], dbspecrs[23], dbspecrs[24], dbspecrs[25]);
+ db_printf("\n");
+ for(i = 0; i < (64 * 4); i += 4) {
+ db_printf("SLB %02d: %08X.%08X %08X.%08X\n", i / 4, dbspecrs[80 + i], dbspecrs[81 + i], dbspecrs[82 + i], dbspecrs[83 + i]);
+ }
+ }
+ else {
+ db_printf("PIR: %08X\n", dbspecrs[0]);
+ db_printf("PVR: %08X\n", dbspecrs[1]);
+ db_printf("SDR1: %08X\n", dbspecrs[22]);
+ db_printf("HID0: %08X\n", dbspecrs[39]);
+ db_printf("HID1: %08X\n", dbspecrs[40]);
+ db_printf("L2CR: %08X\n", dbspecrs[41]);
+ db_printf("MSSCR0: %08X\n", dbspecrs[42]);
+ db_printf("MSSCR1: %08X\n", dbspecrs[43]);
+ db_printf("THRM1: %08X\n", dbspecrs[44]);
+ db_printf("THRM2: %08X\n", dbspecrs[45]);
+ db_printf("THRM3: %08X\n", dbspecrs[46]);
+ db_printf("ICTC: %08X\n", dbspecrs[47]);
+ db_printf("L2CR2: %08X\n", dbspecrs[48]);
+ db_printf("DABR: %08X\n", dbspecrs[49]);
+
+ db_printf("DBAT: %08X %08X %08X %08X\n", dbspecrs[2], dbspecrs[3], dbspecrs[4], dbspecrs[5]);
+ db_printf(" %08X %08X %08X %08X\n", dbspecrs[6], dbspecrs[7], dbspecrs[8], dbspecrs[9]);
+ db_printf("IBAT: %08X %08X %08X %08X\n", dbspecrs[10], dbspecrs[11], dbspecrs[12], dbspecrs[13]);
+ db_printf(" %08X %08X %08X %08X\n", dbspecrs[14], dbspecrs[15], dbspecrs[16], dbspecrs[17]);
+ db_printf("SPRG: %08X %08X %08X %08X\n", dbspecrs[18], dbspecrs[19], dbspecrs[20], dbspecrs[21]);
+ db_printf("\n");
+ for(i = 0; i < 16; i += 8) { /* Print 8 at a time */
+ db_printf("SR%02d: %08X %08X %08X %08X %08X %08X %08X %08X\n", i,
+ dbspecrs[23+i], dbspecrs[24+i], dbspecrs[25+i], dbspecrs[26+i],
+ dbspecrs[27+i], dbspecrs[28+i], dbspecrs[29+i], dbspecrs[30+i]);
+ }
}
db_printf("\n");
llslot = ((long long)xpteg[slot] << 32) | (long long)xpteg[slot + 1]; /* Make a long long version of this */
space = (llslot >> 12) & (maxAdrSp - 1); /* Extract the space */
llhash = (unsigned long long)space | ((unsigned long long)space << maxAdrSpb) | ((unsigned long long)space << (2 * maxAdrSpb)); /* Get the hash */
- llhash = llhash & 0x0000001FFFFFFFFF; /* Make sure we stay within supported ranges */
+ llhash = llhash & 0x0000001FFFFFFFFFULL; /* Make sure we stay within supported ranges */
pva = i ^ llhash; /* Get part of the vaddr */
llseg = ((llslot >> 12) ^ llhash); /* Get the segment number */
api = (llslot >> 7) & 0x1F; /* Get the API */
unsigned int hwSoftPatches; /* Soft Patch interruptions */
unsigned int hwMaintenances; /* Maintenance interruptions */
unsigned int hwInstrumentations; /* Instrumentation interruptions */
- unsigned int hwrsvd14; /* Reswerved */
-/* 0x0B4 */
-
- unsigned int hwspare0[17]; /* Reserved */
- unsigned int hwRedrives; /* Number of redriven interrupts */
- unsigned int hwSteals; /* PTE Steals */
-/* 0x100 */
+ unsigned int hwrsvd14; /* Reserved */
+ unsigned int hwhdec; /* 0B4 Hypervisor decrementer */
+
+ unsigned int hwspare0[11]; /* 0B8 Reserved */
+ unsigned int hwspare0a; /* 0E4 Reserved */
+ unsigned int hwspare0b; /* 0E8 Reserved */
+ unsigned int hwspare0c; /* 0EC Reserved */
+ unsigned int hwspare0d; /* 0F0 Reserved */
+ unsigned int hwIgnored; /* 0F4 Interruptions ignored */
+ unsigned int hwRedrives; /* 0F8 Number of redriven interrupts */
+ unsigned int hwSteals; /* 0FC Steals */
+/* 100 */
unsigned int hwMckHang; /* ? */
unsigned int hwMckSLBPE; /* ? */
unsigned int numSIGPtimo; /* Number of SIGP send timeouts */
unsigned int numSIGPmast; /* Number of SIGPast messages merged */
unsigned int numSIGPmwake; /* Number of SIGPwake messages merged */
+ unsigned int numSIGPcall; /* Number of SIGPcall messages received */
- unsigned int hwspare3[21]; /* Pad to 512 */
+ unsigned int hwspare3[20]; /* Pad to 512 */
};
#pragma pack()
/* PPC cache line boundary here - 020 */
- unsigned int rsrvd020[2];
+ uint64_t rtcPop; /* Real Time Clock pop */
unsigned int need_ast; /* pointer to need_ast[CPU_NO] */
/*
* Note: the following two pairs of words need to stay in order and each pair must
#define MPsigpFunc 0x0000FF00 /* Current function */
#define MPsigpIdle 0x00 /* No function pending */
#define MPsigpSigp 0x04 /* Signal a processor */
+
#define SIGPast 0 /* Requests an ast on target processor */
#define SIGPcpureq 1 /* Requests CPU specific function */
#define SIGPdebug 2 /* Requests a debugger entry */
#define SIGPwake 3 /* Wake up a sleeping processor */
+#define SIGPcall 4 /* Call a function on a processor */
+
#define CPRQtemp 0 /* Get temprature of processor */
-#define CPRQtimebase 1 /* Get timebase of processor */
+#define CPRQtimebase 1 /* Get timebase of processor */
#define CPRQsegload 2 /* Segment registers reload */
#define CPRQscom 3 /* SCOM */
#define CPRQchud 4 /* CHUD perfmon */
#define CPRQsps 5 /* Set Processor Speed */
+
unsigned int MPsigpParm0; /* SIGP parm 0 */
unsigned int MPsigpParm1; /* SIGP parm 1 */
unsigned int MPsigpParm2; /* SIGP parm 2 */
#define T_MAINTENANCE (0x2A * T_VECTOR_SIZE)
#define T_INSTRUMENTATION (0x2B * T_VECTOR_SIZE)
#define T_ARCHDEP0 (0x2C * T_VECTOR_SIZE)
+#define T_HDEC (0x2D * T_VECTOR_SIZE)
#define T_AST (0x100 * T_VECTOR_SIZE)
#define T_MAX T_CHOKE /* Maximum exception no */
DECLARE("FPUowner", offsetof(struct per_proc_info *, FPU_owner));
DECLARE("VMXowner", offsetof(struct per_proc_info *, VMX_owner));
DECLARE("holdQFret", offsetof(struct per_proc_info *, holdQFret));
+ DECLARE("rtcPop", offsetof(struct per_proc_info *, rtcPop));
DECLARE("PP_SAVE_EXCEPTION_TYPE", offsetof(struct per_proc_info *, save_exception_type));
DECLARE("PP_NEED_AST", offsetof(struct per_proc_info *, need_ast));
DECLARE("hwMaintenances", offsetof(struct per_proc_info *, hwCtr.hwMaintenances));
DECLARE("hwInstrumentations", offsetof(struct per_proc_info *, hwCtr.hwInstrumentations));
DECLARE("hwRedrives", offsetof(struct per_proc_info *, hwCtr.hwRedrives));
+ DECLARE("hwIgnored", offsetof(struct per_proc_info *, hwCtr.hwIgnored));
+ DECLARE("hwhdec", offsetof(struct per_proc_info *, hwCtr.hwhdec));
DECLARE("hwSteals", offsetof(struct per_proc_info *, hwCtr.hwSteals));
DECLARE("hwMckHang", offsetof(struct per_proc_info *, hwCtr.hwMckHang));
DECLARE("dgMisc0", offsetof(struct lowglo *, lgdgWork.dgMisc0));
DECLARE("lgKillResv", offsetof(struct lowglo *, lgKillResv));
+
DECLARE("scomcpu", offsetof(struct scomcomm *, scomcpu));
DECLARE("scomfunc", offsetof(struct scomcomm *, scomfunc));
unsigned int hw_Maintenances; /* Maintenance interruptions */
unsigned int hw_Instrumentations; /* Instrumentation interruptions */
unsigned int hw_rsvd14; /* Reswerved */
+ unsigned int hw_hdec; /* Hypervisor decrementer */
- unsigned int hw_spare[19]; /* Pad to 256 bytes */
+ unsigned int hw_spare[18]; /* Pad to 256 bytes */
} hw_counters;
#pragma pack()
bl mapSelSlot ; Go select a slot (note that the PCA image is already set up)
- cmplwi cr5,r3,1 ; Did we steal a slot?
- rlwinm r5,r4,3,26,28 ; Convert index to slot offset
- add r19,r19,r5 ; Point directly to the PTE
+ cmplwi cr5,r3,1 ; Did we steal a slot?
+ rlwimi r19,r4,3,26,28 ; Insert PTE index into PTEG address yielding PTE address
mr r16,r6 ; Remember the PCA image after selection
blt+ cr5,hpfInser32 ; Nope, no steal...
bl mapSelSlot ; Go select a slot
cmplwi cr5,r3,1 ; Did we steal a slot?
- rlwinm r5,r4,4,25,27 ; Convert index to slot offset
mr r18,r6 ; Remember the PCA image after selection
- add r19,r19,r5 ; Point directly to the PTE
+ insrdi r19,r4,3,57 ; Insert slot index into PTEG address bits 57:59, forming the PTE address
lwz r10,hwSteals(r2) ; Get the steal count
blt++ cr5,hpfInser64 ; Nope, no steal...
unsigned int lgRsv184[31]; /* 5184 reserved - push to next line */
struct diagWork lgdgWork; /* 5200 Start of diagnostic work area */
unsigned int lgRsv220[24]; /* 5220 reserved */
- unsigned int lgRst280[32]; /* 5280 reserved */
+ unsigned int lgRsv280[32]; /* 5280 reserved */
unsigned int lgKillResv; /* 5300 line used to kill reservations */
unsigned int lgKillResvpad[31]; /* 5304 pad reservation kill line */
- unsigned int lgRsv380[768]; /* 5380 reserved - push to 1 page */
+
+ unsigned int lgRsv380[32]; /* 5380 - 5400 reserved */
+
+ unsigned int lgRsv400[32]; /* 5400 - 5480 reserved */
+
+ uint32_t lgRsv480[704]; /* 5480 reserved - push to 1 page */
} lowglo;
/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
mtlr r4
blr
-resetexc: cmplwi r13,RESET_HANDLER_BUPOR ; Special bring up POR sequence?
+resetexc: cmplwi r13,RESET_HANDLER_BUPOR ; Special bring up POR sequence?
bne resetexc2 ; No...
lis r4,hi16(EXT(resetPOR)) ; Get POR code
ori r4,r4,lo16(EXT(resetPOR)) ; The rest
; Fall through here for 970 MCKs.
;
- li r11,1 ; ?
- sldi r11,r11,32+3 ; ?
- mfspr r13,hid4 ; ?
- or r11,r11,r13 ; ?
+ li r11,1 ;
+ sldi r11,r11,32+3 ;
+ mfspr r13,hid4 ;
+ or r11,r11,r13 ;
sync
- mtspr hid4,r11 ; ?
+ mtspr hid4,r11 ;
isync
- li r11,1 ; ?
- sldi r11,r11,32+8 ; ?
- andc r13,r13,r11 ; ?
+ li r11,1 ;
+ sldi r11,r11,32+8 ;
+ andc r13,r13,r11 ;
lis r11,0xE000 ; Get the unlikeliest ESID possible
sync
- mtspr hid4,r13 ; ?
- isync ; ?
+ mtspr hid4,r13 ;
+ isync ;
- srdi r11,r11,1 ; ?
- slbie r11 ; ?
+ srdi r11,r11,1 ;
+ slbie r11 ;
sync
isync
h200aaa: mfsrr1 r11 ; Get the SRR1
mfcr r13 ; Save the CR
- rlwinm. r11,r11,0,dcmck,dcmck ; ?
- beq+ notDCache ; ?
+ rlwinm. r11,r11,0,dcmck,dcmck ;
+ beq+ notDCache ;
sync
- mfspr r11,msscr0 ; ?
- dssall ; ?
+ mfspr r11,msscr0 ;
+ dssall ;
sync
isync
- oris r11,r11,hi16(dl1hwfm) ; ?
- mtspr msscr0,r11 ; ?
+ oris r11,r11,hi16(dl1hwfm) ;
+ mtspr msscr0,r11 ;
-rstbsy: mfspr r11,msscr0 ; ?
+rstbsy: mfspr r11,msscr0 ;
- rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ?
- bne rstbsy ; ?
+ rlwinm. r11,r11,0,dl1hwf,dl1hwf ;
+ bne rstbsy ;
- sync ; ?
+ sync ;
mfsprg r11,0 ; Get the per_proc
mtcrf 255,r13 ; Restore CRs
li r0,SAVgeneral ; Get the savearea type value
lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number
stb r0,SAVflags+2(r13) ; Mark valid context
- ori r23,r23,lo16(EXT(trcWork)) ; Get the rest
rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2
li r23,trcWork ; Get the trace work area address
addi r22,r22,10 ; Adjust code so we shift into CR5
dcbz128 0,r20 ; Zap the trace entry
+ lwz r9,SAVflags(r13) ; Get savearea flags
+
ld r16,ruptStamp(r2) ; Get top of time base
ld r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not)
std r16,LTR_timeHi(r20) ; Set the upper part of TB
ld r1,saver1(r13) ; Get back interrupt time R1
+ rlwinm r9,r9,20,16,23 ; Isolate the special flags
ld r18,saver2(r13) ; Get back interrupt time R2
- std r0,LTR_r0(r20) ; Save off register 0
+ std r0,LTR_r0(r20) ; Save off register 0
+ rlwimi r9,r19,0,24,31 ; Slide in the cpu number
ld r3,saver3(r13) ; Restore this one
- sth r19,LTR_cpu(r20) ; Stash the cpu number
+ sth r9,LTR_cpu(r20) ; Stash the cpu number and special flags
std r1,LTR_r1(r20) ; Save off register 1
ld r4,saver4(r13) ; Restore this one
std r18,LTR_r2(r20) ; Save off register 2
li r20,lo16(xcpTable) ; Point to the vector table (note: this must be in 1st 64k of physical memory)
la r12,hwCounts(r2) ; Point to the exception count area
+ andis. r24,r22,hi16(SAVeat) ; Should we eat this one?
rlwinm r22,r22,SAVredriveb+1,31,31 ; Get a 1 if we are redriving
add r12,r12,r11 ; Point to the count
lwzx r20,r20,r11 ; Get the interrupt handler
lwz r25,0(r12) ; Get the old value
lwz r23,hwRedrives(r2) ; Get the redrive count
+ crmove cr3_eq,cr0_eq ; Remember if we are ignoring
xori r24,r22,1 ; Get the NOT of the redrive
mtctr r20 ; Point to the interrupt handler
mtcrf 0x80,r0 ; Set our CR0 to the high nybble of possible syscall code
crandc cr0_lt,cr0_lt,cr0_gt ; See if we have R0 equal to 0b10xx...x
stw r25,0(r12) ; Store it back
stw r23,hwRedrives(r2) ; Save the redrive count
+ bne-- cr3,IgnoreRupt ; Interruption is being ignored...
bctr ; Go process the exception...
.long WhoaBaby ; T_SOFT_PATCH
.long WhoaBaby ; T_MAINTENANCE
.long WhoaBaby ; T_INSTRUMENTATION
-
+ .long WhoaBaby ; T_ARCHDEP0
+ .long EatRupt ; T_HDEC
;
; Just what the heck happened here????
;
WhoaBaby: b . ; Open the hood and wait for help
+ .align 5
+
+IgnoreRupt:
+ lwz r20,hwIgnored(r2) ; Grab the ignored interruption count
+ addi r20,r20,1 ; Count this one
+ stw r20,hwIgnored(r2) ; Save the ignored count
+ b EatRupt ; Ignore it...
+
+
;
; System call
;
; Handle machine check here.
;
-; ?
+;
;
.align 5
MachineCheck:
- bt++ pf64Bitb,mck64 ; ?
+ bt++ pf64Bitb,mck64 ;
lwz r27,savesrr1+4(r13) ; Pick up srr1
.long 0 ; 5008 Zero
.long 0 ; 500C Zero cont...
.long EXT(per_proc_info) ; 5010 pointer to per_procs
- .long 0 ;
+ .long 0
.long 0 ; 5018 reserved
.long 0 ; 501C reserved
.long 0 ; 5020 reserved
.long 0 ; 53F4 reserved
.long 0 ; 53F8 reserved
.long 0 ; 53FC reserved
-
-
+ .long 0 ; 5400 reserved
+ .long 0 ; 5404 reserved
+ .long 0 ; 5408 reserved
+ .long 0 ; 540C reserved
+ .long 0 ; 5410 reserved
+ .long 0 ; 5414 reserved
+ .long 0 ; 5418 reserved
+ .long 0 ; 541C reserved
+ .long 0 ; 5420 reserved
+ .long 0 ; 5424 reserved
+ .long 0 ; 5428 reserved
+ .long 0 ; 542C reserved
+ .long 0 ; 5430 reserved
+ .long 0 ; 5434 reserved
+ .long 0 ; 5438 reserved
+ .long 0 ; 543C reserved
+ .long 0 ; 5440 reserved
+ .long 0 ; 5444 reserved
+ .long 0 ; 5448 reserved
+ .long 0 ; 544C reserved
+ .long 0 ; 5450 reserved
+ .long 0 ; 5454 reserved
+ .long 0 ; 5458 reserved
+ .long 0 ; 545C reserved
+ .long 0 ; 5460 reserved
+ .long 0 ; 5464 reserved
+ .long 0 ; 5468 reserved
+ .long 0 ; 546C reserved
+ .long 0 ; 5470 reserved
+ .long 0 ; 5474 reserved
+ .long 0 ; 5478 reserved
+ .long 0 ; 547C reserved
;
; The "shared page" is used for low-level debugging
;
void cpu_exit_wait(
int);
+typedef void (*broadcastFunc) (int);
+
+int32_t cpu_broadcast(uint32_t *, broadcastFunc, uint32_t);
+
#endif /* _PPC_MACHINE_CPU_H_ */
decl_simple_lock_data(, spsLock);
unsigned int spsLockInit = 0;
-
+uint32_t warFlags = 0;
+#define warDisMBpoff 0x80000000
#define MAX_CPUS_SET 0x1
#define MAX_CPUS_WAIT 0x2
void machine_idle(void)
{
- if (per_proc_info[cpu_number()].interrupts_enabled == TRUE) {
- int cur_decr;
+ struct per_proc_info *ppinfo;
+
+ ppinfo = getPerProc();
- machine_idle_ppc();
+ if ((ppinfo->interrupts_enabled == TRUE) &&
+ (ppinfo->cpu_flags & SignalReady)) { /* Check to see if we are allowed to nap */
+ int cur_decr;
+ machine_idle_ppc();
/*
- * protect against a lost decrementer trap
- * if the current decrementer value is negative
- * by more than 10 ticks, re-arm it since it's
- * unlikely to fire at this point... a hardware
- * interrupt got us out of machine_idle and may
- * also be contributing to this state
- */
+ * protect against a lost decrementer trap
+ * if the current decrementer value is negative
+ * by more than 10 ticks, re-arm it since it's
+ * unlikely to fire at this point... a hardware
+ * interrupt got us out of machine_idle and may
+ * also be contributing to this state
+ */
cur_decr = isync_mfdec();
if (cur_decr < -10) {
mtdec(1);
}
}
+ else {
+ (void) ml_set_interrupts_enabled(TRUE); /* Enable for interruptions even if nap is not allowed */
+ }
}
void
return(per_proc_info[mycpu].cpu_flags & traceBE);
}
+
+void ml_mem_backoff(void) {
+
+ if(warFlags & warDisMBpoff) return; /* If backoff disabled, exit */
+
+ __asm__ volatile("sync");
+ __asm__ volatile("isync");
+
+ return;
+}
+
void ml_set_processor_voltage(unsigned long voltage);
unsigned int ml_scom_write(uint32_t reg, uint64_t data);
unsigned int ml_scom_read(uint32_t reg, uint64_t *data);
+uint32_t ml_hdec_ratio(void);
#endif /* __APPLE_API_PRIVATE */
*
* There is one bit of hackery in here: we need to enable for
* interruptions when we go to sleep and there may be a pending
- * decrimenter rupt. So we make the decrimenter 0x7FFFFFFF and enable for
- * interruptions. The decrimenter rupt vector recognizes this and returns
+ * decrementer rupt. So we make the decrementer 0x7FFFFFFF and enable for
+ * interruptions. The decrementer rupt vector recognizes this and returns
* directly back here.
*
*/
mpsClearDEC:
mfmsr r5 ; Get the current MSR
rlwinm r10,r10,0,1,31 ; Make 0x7FFFFFFF
- mtdec r10 ; Load decrimenter with 0x7FFFFFFF
+ mtdec r10 ; Load decrementer with 0x7FFFFFFF
isync ; and make sure,
mfdec r9 ; really sure, it gets there
mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
mfspr r4,hid0 ; Yes, this is a duplicate, keep it here
- mtmsr r3 ; Enable for interrupts to drain decrimenter
+ mtmsr r3 ; Enable for interrupts to drain decrementer
add r6,r4,r5 ; Just waste time
add r6,r6,r4 ; A bit more
;
; We are here with translation off, interrupts off, all possible
-; interruptions drained off, and a decrimenter that will not pop.
+; interruptions drained off, and a decrementer that will not pop.
;
bl EXT(cacheInit) ; Clear out the caches. This will leave them on
eqv r4,r4,r4 ; Get all foxes
rlwinm r4,r4,0,1,31 ; Make 0x7FFFFFFF
beq slSleepNow ; skip if 32-bit...
- li r3, 0x4000 ; Cause decrimenter to roll over soon
- mtdec r3 ; Load decrimenter with 0x00004000
+ li r3,0x4000 ; Cause decrementer to roll over soon
+ mtdec r3 ; Load decrementer with 0x00004000
isync ; and make sure,
mfdec r3 ; really sure, it gets there
sync ; Sync it all up
mtmsr r5 ; Do sleep with interruptions enabled
isync ; Take a pill
- mtdec r4 ; Load decrimenter with 0x7FFFFFFF
+ mtdec r4 ; Load decrementer with 0x7FFFFFFF
isync ; and make sure,
mfdec r3 ; really sure, it gets there
b slSleepNow ; Go back to sleep if we wake up...
bf 31,cisnlck ; Skip if pfLClck not set...
- mfspr r4,msscr0 ; ?
- rlwinm r6,r4,0,0,l2pfes-1 ; ?
+ mfspr r4,msscr0 ;
+ rlwinm r6,r4,0,0,l2pfes-1 ;
mtspr msscr0,r6 ; Set it
sync
isync
sync
isync
- mtspr msscr0,r4 ; ?
+ mtspr msscr0,r4 ;
sync
isync
bne+ ciinvdl3b ; Assume so...
sync
- lwz r10, pfBootConfig(r12) ; ?
- rlwinm. r10, r10, 24, 28, 31 ; ?
- beq ciinvdl3nopdet ; ?
-
- mfspr r8,l3pdet ; ?
- srw r2, r8, r10 ; ?
- rlwimi r2, r8, 0, 24, 31 ; ?
- subfic r10, r10, 32 ; ?
- li r8, -1 ; ?
- ori r2, r2, 0x0080 ; ?
- slw r8, r8, r10 ; ?
- or r8, r2, r8 ; ?
- mtspr l3pdet, r8 ; ?
+ lwz r10, pfBootConfig(r12) ;
+ rlwinm. r10, r10, 24, 28, 31 ;
+ beq ciinvdl3nopdet ;
+
+ mfspr r8,l3pdet ;
+ srw r2, r8, r10 ;
+ rlwimi r2, r8, 0, 24, 31 ;
+ subfic r10, r10, 32 ;
+ li r8, -1 ;
+ ori r2, r2, 0x0080 ;
+ slw r8, r8, r10 ;
+ or r8, r2, r8 ;
+ mtspr l3pdet, r8 ;
isync
ciinvdl3nopdet:
rlwinm r8,r8,0,l3clken+1,l3clken-1 ; Clear the clock enable bit
mtspr l3cr,r8 ; Disable the clock
- li r2,128 ; ?
-ciinvdl3c: addi r2,r2,-1 ; ?
- cmplwi r2,0 ; ?
+ li r2,128 ;
+ciinvdl3c: addi r2,r2,-1 ;
+ cmplwi r2,0 ;
bne+ ciinvdl3c
- mfspr r10,msssr0 ; ?
- rlwinm r10,r10,0,vgL3TAG+1,vgL3TAG-1 ; ?
- mtspr msssr0,r10 ; ?
+ mfspr r10,msssr0 ;
+ rlwinm r10,r10,0,vgL3TAG+1,vgL3TAG-1 ;
+ mtspr msssr0,r10 ;
sync
mtspr l3cr,r3 ; Enable it as desired
beqlr ; the received data in r5
std r5,0(r4) ; Pass back the received data
blr ; Leave...
+
+;
+; Calculates the hdec to dec ratio
+;
+
+ .align 5
+ .globl EXT(ml_hdec_ratio)
+
+LEXT(ml_hdec_ratio)
+
+ li r0,0 ; Clear the EE bit (and everything else for that matter)
+ mfmsr r11 ; Get the MSR
+ mtmsrd r0,1 ; Set the EE bit only (do not care about RI)
+ rlwinm r11,r11,0,MSR_EE_BIT,MSR_EE_BIT ; Isolate just the EE bit
+ mfmsr r10 ; Refresh our view of the MSR (VMX/FP may have changed)
+ or r12,r10,r11 ; Turn on EE if on before we turned it off
+
+ mftb r9 ; Get time now
+ mfspr r2,hdec ; Save hdec
+
+mhrcalc: mftb r8 ; Get time now
+ sub r8,r8,r9 ; How many ticks?
+ cmplwi r8,10000 ; 10000 yet?
+ blt mhrcalc ; Nope...
+
+ mfspr r9,hdec ; Get hdec now
+ sub r3,r2,r9 ; How many ticks?
+ mtmsrd r12,1 ; Flip EE on if needed
+ blr ; Leave...
unsigned int lastTrace; /* Value of low-level exception trace controls */
+
volatile unsigned int cpus_holding_bkpts; /* counter for number of cpus holding
breakpoints (ie: cpus that did not
insert back breakpoints) */
addr64_t instr_ptr;
ppnum_t instr_pp;
unsigned int instr;
- int my_cpu, tcpu;
+ int my_cpu, tcpu, wasdebugger;
+ struct per_proc_info *pp;
+ uint64_t nowtime, poptime;
my_cpu = cpu_number(); /* Get our CPU */
if (debugger_debug) kprintf("Call_DebuggerC(%d): lasttrace = %08X\n", my_cpu, lastTrace); /* (TEST/DEBUG) */
#endif
debugger_cpu = my_cpu; /* Show that we are debugger */
+
+
lastTrace = LLTraceSet(0); /* Disable low-level tracing */
for(tcpu = 0; tcpu < NCPUS; tcpu++) { /* Stop all the other guys */
debugger_exit:
#if 0
if (debugger_debug) kprintf("Call_DebuggerC(%d): exit - inst = %08X, cpu=%d(%d), run=%d\n", my_cpu,
- instr, my_cpu, debugger_cpu, db_run_mode); /* (TEST/DEBUG) */
+ instr, my_cpu, debugger_cpu, db_run_mode); /* (TEST/DEBUG) */
#endif
if ((instr == TRAP_DEBUGGER_INST) || /* Did we trap to enter debugger? */
(instr == TRAP_DIRECT_INST)) saved_state->save_srr0 += TRAP_INST_SIZE; /* Yes, point past trap */
- if(debugger_cpu == my_cpu) LLTraceSet(lastTrace); /* Enable tracing on the way out if we are debugger */
+ wasdebugger = 0; /* Assume not debugger */
+ if(debugger_cpu == my_cpu) { /* Are the debugger processor? */
+ wasdebugger = 1; /* Remember that we were the debugger */
+ LLTraceSet(lastTrace); /* Enable tracing on the way out if we are debugger */
+ }
wait = FALSE; /* Assume we are not going to wait */
if (db_run_mode == STEP_CONTINUE) { /* Are we going to run? */
if (wait) while(cpus_holding_bkpts); /* Wait for breakpoints to clear */
+
hw_atomic_sub(&debug_mode, 1); /* Set out of debug now */
return(1); /* Exit debugger normally */
extern vm_offset_t static_memory_end;
thread_t thread;
mapping *mp;
-
+
+
/*
* Setup per_proc info for first cpu.
*/
per_proc_info[0].need_ast = (unsigned int)&need_ast[0];
per_proc_info[0].FPU_owner = 0;
per_proc_info[0].VMX_owner = 0;
+ per_proc_info[0].rtcPop = 0xFFFFFFFFFFFFFFFFULL;
mp = (mapping *)per_proc_info[0].ppCIOmp;
mp->mpFlags = 0x01000000 | mpSpecial | 1;
mp->mpSpace = invalSpace;
#include <machine/mach_param.h> /* HZ */
#include <machine/commpage.h>
#include <machine/machine_routines.h>
+#include <ppc/exception.h>
#include <ppc/proc_reg.h>
#include <pexpert/pexpert.h>
int sysclk_init(void);
+void treqs(uint32_t dec);
+
kern_return_t sysclk_gettime(
mach_timespec_t *cur_time);
rtclock_tick_deadline[mycpu] = abstime;
rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
- mtdec(decr);
+ treqs(decr);
return(1);
}
rtclock_tick_deadline[mycpu] = abstime;
rtclock_tick_deadline[mycpu] += rtclock_tick_interval;
decr = deadline_to_decrementer(rtclock_tick_deadline[mycpu], abstime);
- mtdec(decr);
+ treqs(decr);
rtclock_initialized = TRUE;
rtclock_decrementer_min < (natural_t)decr )
decr = rtclock_decrementer_min;
- mtdec(decr);
+ treqs(decr);
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
| DBG_FUNC_NONE, decr, 2, 0, 0, 0);
* We may receive interrupts too early, we must reject them.
*/
if (rtclock_initialized == FALSE) {
- mtdec(DECREMENTER_MAX); /* Max the decrementer if not init */
+ treqs(DECREMENTER_MAX); /* Max the decrementer if not init */
return;
}
rtclock_decrementer_min < (natural_t)decr1 )
decr1 = rtclock_decrementer_min;
- mtdec(decr1);
+ treqs(decr1);
KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_DECI, 1)
| DBG_FUNC_NONE, decr1, 3, 0, 0, 0);
{
delay_for_interval((usec < 0)? -usec: usec, NSEC_PER_USEC);
}
+
+/*
+ * Request a decrementer pop
+ *
+ */
+
+void treqs(uint32_t dec) {
+
+
+ struct per_proc_info *pp;
+ uint64_t nowtime, newtime;
+
+ nowtime = mach_absolute_time(); /* What time is it? */
+ pp = getPerProc(); /* Get our processor block */
+ newtime = nowtime + (uint64_t)dec; /* Get requested pop time */
+ pp->rtcPop = newtime; /* Copy it */
+
+ mtdec((uint32_t)(newtime - nowtime)); /* Set decrementer */
+ return;
+
+}
#define SAVredriveb 13 /* Indicates that the low-level fault handler associated */
#define SAVinstrument 0x00080000 /* Indicates that we should return instrumentation data */
#define SAVinstrumentb 12 /* Indicates that we should return instrumentation data */
+#define SAVeat 0x00100000 /* Indicates that interruption should be ignored */
+#define SAVeatb 11 /* Indicates that interruption should be ignored */
#define SAVtype 0x0000FF00 /* Shows type of savearea */
#define SAVtypeshft 8 /* Shift to position type */
#define SAVempty 0x86 /* Savearea is on free list */
vm_object_deallocate(prev_entry->object.vm_object);
vm_map_entry_dispose(map, prev_entry);
SAVE_HINT(map, this_entry);
- counter(c_vm_map_entry_simplified++);
+ counter(c_vm_map_simplified++);
}
counter(c_vm_map_simplify_entry_called++);
}