bsd/netinet6/in6.c optional inet6
bsd/netinet6/in6_cksum.c optional inet6
bsd/netinet6/in6_gif.c optional gif inet6
-bsd/netinet6/ip6_fw.c optional inet6
+bsd/netinet6/ip6_fw.c optional inet6 ipfw2
bsd/netinet6/ip6_forward.c optional inet6
bsd/netinet6/in6_ifattach.c optional inet6
bsd/netinet6/ip6_input.c optional inet6
#include <i386/tsc.h>
static int
-hw_cpu_sysctl SYSCTL_HANDLER_ARGS
+_i386_cpu_info SYSCTL_HANDLER_ARGS
{
__unused struct sysctl_oid *unused_oidp = oidp;
- i386_cpu_info_t *cpu_info = cpuid_info();
- void *ptr = (uint8_t *)cpu_info + (uintptr_t)arg1;
+ void *ptr = arg1;
int value;
if (arg2 == -1) {
}
static int
-hw_cpu_sysctl_nonzero SYSCTL_HANDLER_ARGS
+i386_cpu_info SYSCTL_HANDLER_ARGS
{
- i386_cpu_info_t *cpu_info = cpuid_info();
- void *ptr = (uint8_t *)cpu_info + (uintptr_t)arg1;
+ void *ptr = (uint8_t *)cpuid_info() + (uintptr_t)arg1;
+ return _i386_cpu_info(oidp, ptr, arg2, req);
+}
+
+static int
+i386_cpu_info_nonzero SYSCTL_HANDLER_ARGS
+{
+ void *ptr = (uint8_t *)cpuid_info() + (uintptr_t)arg1;
int value = *(uint32_t *)ptr;
if (value == 0)
return ENOENT;
- return hw_cpu_sysctl(oidp, arg1, arg2, req);
+ return _i386_cpu_info(oidp, ptr, arg2, req);
+}
+static int
+cpu_mwait SYSCTL_HANDLER_ARGS
+{
+ i386_cpu_info_t *cpu_info = cpuid_info();
+ void *ptr = (uint8_t *)cpu_info->cpuid_mwait_leafp + (uintptr_t)arg1;
+ if (cpu_info->cpuid_mwait_leafp == NULL)
+ return ENOENT;
+ return _i386_cpu_info(oidp, ptr, arg2, req);
+}
+
+static int
+cpu_thermal SYSCTL_HANDLER_ARGS
+{
+ i386_cpu_info_t *cpu_info = cpuid_info();
+ void *ptr = (uint8_t *)cpu_info->cpuid_thermal_leafp + (uintptr_t)arg1;
+ if (cpu_info->cpuid_thermal_leafp == NULL)
+ return ENOENT;
+ return _i386_cpu_info(oidp, ptr, arg2, req);
+}
+
+static int
+cpu_arch_perf SYSCTL_HANDLER_ARGS
+{
+ i386_cpu_info_t *cpu_info = cpuid_info();
+ void *ptr = (uint8_t *)cpu_info->cpuid_arch_perf_leafp + (uintptr_t)arg1;
+ if (cpu_info->cpuid_arch_perf_leafp == NULL)
+ return ENOENT;
+ return _i386_cpu_info(oidp, ptr, arg2, req);
}
static int
-hw_cpu_features SYSCTL_HANDLER_ARGS
+cpu_features SYSCTL_HANDLER_ARGS
{
__unused struct sysctl_oid *unused_oidp = oidp;
__unused void *unused_arg1 = arg1;
}
static int
-hw_cpu_extfeatures SYSCTL_HANDLER_ARGS
+cpu_extfeatures SYSCTL_HANDLER_ARGS
{
__unused struct sysctl_oid *unused_oidp = oidp;
__unused void *unused_arg1 = arg1;
}
static int
-hw_cpu_logical_per_package SYSCTL_HANDLER_ARGS
+cpu_logical_per_package SYSCTL_HANDLER_ARGS
{
__unused struct sysctl_oid *unused_oidp = oidp;
__unused void *unused_arg1 = arg1;
}
static int
-hw_cpu_flex_ratio_desired SYSCTL_HANDLER_ARGS
+cpu_flex_ratio_desired SYSCTL_HANDLER_ARGS
{
__unused struct sysctl_oid *unused_oidp = oidp;
__unused void *unused_arg1 = arg1;
}
static int
-hw_cpu_flex_ratio_min SYSCTL_HANDLER_ARGS
+cpu_flex_ratio_min SYSCTL_HANDLER_ARGS
{
__unused struct sysctl_oid *unused_oidp = oidp;
__unused void *unused_arg1 = arg1;
}
static int
-hw_cpu_flex_ratio_max SYSCTL_HANDLER_ARGS
+cpu_flex_ratio_max SYSCTL_HANDLER_ARGS
{
__unused struct sysctl_oid *unused_oidp = oidp;
__unused void *unused_arg1 = arg1;
SYSCTL_PROC(_machdep_cpu, OID_AUTO, max_basic, CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_max_basic),sizeof(uint32_t),
- hw_cpu_sysctl, "IU", "Max Basic Information value");
+ i386_cpu_info, "IU", "Max Basic Information value");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, max_ext, CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_max_ext), sizeof(uint32_t),
- hw_cpu_sysctl, "IU", "Max Extended Function Information value");
+ i386_cpu_info, "IU", "Max Extended Function Information value");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, vendor, CTLTYPE_STRING | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_vendor), 0,
- hw_cpu_sysctl, "A", "CPU vendor");
+ i386_cpu_info, "A", "CPU vendor");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand_string, CTLTYPE_STRING | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_brand_string), 0,
- hw_cpu_sysctl, "A", "CPU brand string");
+ i386_cpu_info, "A", "CPU brand string");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, family, CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_family), sizeof(uint8_t),
- hw_cpu_sysctl, "I", "CPU family");
+ i386_cpu_info, "I", "CPU family");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, model, CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_model), sizeof(uint8_t),
- hw_cpu_sysctl, "I", "CPU model");
+ i386_cpu_info, "I", "CPU model");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, extmodel, CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_extmodel), sizeof(uint8_t),
- hw_cpu_sysctl, "I", "CPU extended model");
+ i386_cpu_info, "I", "CPU extended model");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfamily, CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_extfamily), sizeof(uint8_t),
- hw_cpu_sysctl, "I", "CPU extended family");
+ i386_cpu_info, "I", "CPU extended family");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, stepping, CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_stepping), sizeof(uint8_t),
- hw_cpu_sysctl, "I", "CPU stepping");
+ i386_cpu_info, "I", "CPU stepping");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, feature_bits, CTLTYPE_QUAD | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_features), sizeof(uint64_t),
- hw_cpu_sysctl, "IU", "CPU features");
+ i386_cpu_info, "IU", "CPU features");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfeature_bits, CTLTYPE_QUAD | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_extfeatures), sizeof(uint64_t),
- hw_cpu_sysctl, "IU", "CPU extended features");
+ i386_cpu_info, "IU", "CPU extended features");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, signature, CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_signature), sizeof(uint32_t),
- hw_cpu_sysctl, "I", "CPU signature");
+ i386_cpu_info, "I", "CPU signature");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, brand, CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_brand), sizeof(uint8_t),
- hw_cpu_sysctl, "I", "CPU brand");
+ i386_cpu_info, "I", "CPU brand");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, features, CTLTYPE_STRING | CTLFLAG_RD,
0, 0,
- hw_cpu_features, "A", "CPU feature names");
+ cpu_features, "A", "CPU feature names");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, extfeatures, CTLTYPE_STRING | CTLFLAG_RD,
0, 0,
- hw_cpu_extfeatures, "A", "CPU extended feature names");
+ cpu_extfeatures, "A", "CPU extended feature names");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, logical_per_package,
CTLTYPE_INT | CTLFLAG_RD,
0, 0,
- hw_cpu_logical_per_package, "I", "CPU logical cpus per package");
+ cpu_logical_per_package, "I", "CPU logical cpus per package");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, cores_per_package,
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_cores_per_package),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "CPU cores per package");
+ i386_cpu_info, "I", "CPU cores per package");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, microcode_version,
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_microcode_version),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Microcode version number");
+ i386_cpu_info, "I", "Microcode version number");
SYSCTL_NODE(_machdep_cpu, OID_AUTO, mwait, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
SYSCTL_PROC(_machdep_cpu_mwait, OID_AUTO, linesize_min,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_mwait_linesize_min),
+ (void *)offsetof(cpuid_mwait_leaf_t, linesize_min),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Monitor/mwait minimum line size");
+ cpu_mwait, "I", "Monitor/mwait minimum line size");
SYSCTL_PROC(_machdep_cpu_mwait, OID_AUTO, linesize_max,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_mwait_linesize_max),
+ (void *)offsetof(cpuid_mwait_leaf_t, linesize_max),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Monitor/mwait maximum line size");
+ cpu_mwait, "I", "Monitor/mwait maximum line size");
SYSCTL_PROC(_machdep_cpu_mwait, OID_AUTO, extensions,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_mwait_extensions),
+ (void *)offsetof(cpuid_mwait_leaf_t, extensions),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Monitor/mwait extensions");
+ cpu_mwait, "I", "Monitor/mwait extensions");
SYSCTL_PROC(_machdep_cpu_mwait, OID_AUTO, sub_Cstates,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_mwait_sub_Cstates),
+ (void *)offsetof(cpuid_mwait_leaf_t, sub_Cstates),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Monitor/mwait sub C-states");
+ cpu_mwait, "I", "Monitor/mwait sub C-states");
SYSCTL_NODE(_machdep_cpu, OID_AUTO, thermal, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, sensor,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_thermal_sensor),
+ (void *)offsetof(cpuid_thermal_leaf_t, sensor),
sizeof(boolean_t),
- hw_cpu_sysctl, "I", "Thermal sensor present");
+ cpu_thermal, "I", "Thermal sensor present");
SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, dynamic_acceleration,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_thermal_dynamic_acceleration),
+ (void *)offsetof(cpuid_thermal_leaf_t, dynamic_acceleration),
sizeof(boolean_t),
- hw_cpu_sysctl, "I", "Dynamic Acceleration Technology");
+ cpu_thermal, "I", "Dynamic Acceleration Technology (Turbo Mode)");
SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, thresholds,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_thermal_thresholds),
+ (void *)offsetof(cpuid_thermal_leaf_t, thresholds),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Number of interrupt thresholds");
+ cpu_thermal, "I", "Number of interrupt thresholds");
SYSCTL_PROC(_machdep_cpu_thermal, OID_AUTO, ACNT_MCNT,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_thermal_ACNT_MCNT),
+ (void *)offsetof(cpuid_thermal_leaf_t, ACNT_MCNT),
sizeof(boolean_t),
- hw_cpu_sysctl, "I", "ACNT_MCNT capability");
+ cpu_thermal, "I", "ACNT_MCNT capability");
SYSCTL_NODE(_machdep_cpu, OID_AUTO, arch_perf, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, version,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_arch_perf_version),
+ (void *)offsetof(cpuid_arch_perf_leaf_t, version),
sizeof(uint8_t),
- hw_cpu_sysctl, "I", "Architectural Performance Version Number");
+ cpu_arch_perf, "I", "Architectural Performance Version Number");
SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, number,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_arch_perf_number),
+ (void *)offsetof(cpuid_arch_perf_leaf_t, number),
sizeof(uint8_t),
- hw_cpu_sysctl, "I", "Number of counters per logical cpu");
+ cpu_arch_perf, "I", "Number of counters per logical cpu");
SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, width,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_arch_perf_width),
+ (void *)offsetof(cpuid_arch_perf_leaf_t, width),
sizeof(uint8_t),
- hw_cpu_sysctl, "I", "Bit width of counters");
+ cpu_arch_perf, "I", "Bit width of counters");
SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, events_number,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_arch_perf_events_number),
+ (void *)offsetof(cpuid_arch_perf_leaf_t, events_number),
sizeof(uint8_t),
- hw_cpu_sysctl, "I", "Number of monitoring events");
+ cpu_arch_perf, "I", "Number of monitoring events");
SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, events,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_arch_perf_events),
+ (void *)offsetof(cpuid_arch_perf_leaf_t, events),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Bit vector of events");
+ cpu_arch_perf, "I", "Bit vector of events");
SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, fixed_number,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_arch_perf_fixed_number),
+ (void *)offsetof(cpuid_arch_perf_leaf_t, fixed_number),
sizeof(uint8_t),
- hw_cpu_sysctl, "I", "Number of fixed-function counters");
+ cpu_arch_perf, "I", "Number of fixed-function counters");
SYSCTL_PROC(_machdep_cpu_arch_perf, OID_AUTO, fixed_width,
CTLTYPE_INT | CTLFLAG_RD,
- (void *)offsetof(i386_cpu_info_t, cpuid_arch_perf_fixed_width),
+ (void *)offsetof(cpuid_arch_perf_leaf_t, fixed_width),
sizeof(uint8_t),
- hw_cpu_sysctl, "I", "Bit-width of fixed-function counters");
+ cpu_arch_perf, "I", "Bit-width of fixed-function counters");
SYSCTL_NODE(_machdep_cpu, OID_AUTO, cache, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_cache_linesize),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Cacheline size");
+ i386_cpu_info, "I", "Cacheline size");
SYSCTL_PROC(_machdep_cpu_cache, OID_AUTO, L2_associativity,
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_cache_L2_associativity),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "L2 cache associativity");
+ i386_cpu_info, "I", "L2 cache associativity");
SYSCTL_PROC(_machdep_cpu_cache, OID_AUTO, size,
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_cache_size),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Cache size (in Kbytes)");
+ i386_cpu_info, "I", "Cache size (in Kbytes)");
SYSCTL_NODE(_machdep_cpu, OID_AUTO, tlb, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
(void *)offsetof(i386_cpu_info_t,
cpuid_tlb[TLB_INST][TLB_SMALL][0]),
sizeof(uint32_t),
- hw_cpu_sysctl_nonzero, "I",
+ i386_cpu_info_nonzero, "I",
"Number of small page instruction TLBs");
SYSCTL_PROC(_machdep_cpu_tlb_data, OID_AUTO, small,
(void *)offsetof(i386_cpu_info_t,
cpuid_tlb[TLB_DATA][TLB_SMALL][0]),
sizeof(uint32_t),
- hw_cpu_sysctl_nonzero, "I",
+ i386_cpu_info_nonzero, "I",
"Number of small page data TLBs (1st level)");
SYSCTL_PROC(_machdep_cpu_tlb_data, OID_AUTO, small_level1,
(void *)offsetof(i386_cpu_info_t,
cpuid_tlb[TLB_DATA][TLB_SMALL][1]),
sizeof(uint32_t),
- hw_cpu_sysctl_nonzero, "I",
+ i386_cpu_info_nonzero, "I",
"Number of small page data TLBs (2nd level)");
SYSCTL_PROC(_machdep_cpu_tlb_inst, OID_AUTO, large,
(void *)offsetof(i386_cpu_info_t,
cpuid_tlb[TLB_INST][TLB_LARGE][0]),
sizeof(uint32_t),
- hw_cpu_sysctl_nonzero, "I",
+ i386_cpu_info_nonzero, "I",
"Number of large page instruction TLBs");
SYSCTL_PROC(_machdep_cpu_tlb_data, OID_AUTO, large,
(void *)offsetof(i386_cpu_info_t,
cpuid_tlb[TLB_DATA][TLB_LARGE][0]),
sizeof(uint32_t),
- hw_cpu_sysctl_nonzero, "I",
+ i386_cpu_info_nonzero, "I",
"Number of large page data TLBs (1st level)");
SYSCTL_PROC(_machdep_cpu_tlb_data, OID_AUTO, large_level1,
(void *)offsetof(i386_cpu_info_t,
cpuid_tlb[TLB_DATA][TLB_LARGE][1]),
sizeof(uint32_t),
- hw_cpu_sysctl_nonzero, "I",
+ i386_cpu_info_nonzero, "I",
"Number of large page data TLBs (2nd level)");
SYSCTL_PROC(_machdep_cpu_tlb, OID_AUTO, shared,
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_stlb),
sizeof(uint32_t),
- hw_cpu_sysctl_nonzero, "I",
+ i386_cpu_info_nonzero, "I",
"Number of shared TLBs");
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_address_bits_physical),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Number of physical address bits");
+ i386_cpu_info, "I", "Number of physical address bits");
SYSCTL_PROC(_machdep_cpu_address_bits, OID_AUTO, virtual,
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, cpuid_address_bits_virtual),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Number of virtual address bits");
+ i386_cpu_info, "I", "Number of virtual address bits");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, core_count,
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, core_count),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Number of enabled cores per package");
+ i386_cpu_info, "I", "Number of enabled cores per package");
SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count,
CTLTYPE_INT | CTLFLAG_RD,
(void *)offsetof(i386_cpu_info_t, thread_count),
sizeof(uint32_t),
- hw_cpu_sysctl, "I", "Number of enabled threads per package");
+ i386_cpu_info, "I", "Number of enabled threads per package");
SYSCTL_NODE(_machdep_cpu, OID_AUTO, flex_ratio, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
"Flex ratio");
SYSCTL_PROC(_machdep_cpu_flex_ratio, OID_AUTO, desired,
CTLTYPE_INT | CTLFLAG_RD,
0, 0,
- hw_cpu_flex_ratio_desired, "I", "Flex ratio desired (0 disabled)");
+ cpu_flex_ratio_desired, "I", "Flex ratio desired (0 disabled)");
SYSCTL_PROC(_machdep_cpu_flex_ratio, OID_AUTO, min,
CTLTYPE_INT | CTLFLAG_RD,
0, 0,
- hw_cpu_flex_ratio_min, "I", "Flex ratio min (efficiency)");
+ cpu_flex_ratio_min, "I", "Flex ratio min (efficiency)");
SYSCTL_PROC(_machdep_cpu_flex_ratio, OID_AUTO, max,
CTLTYPE_INT | CTLFLAG_RD,
0, 0,
- hw_cpu_flex_ratio_max, "I", "Flex ratio max (non-turbo)");
+ cpu_flex_ratio_max, "I", "Flex ratio max (non-turbo)");
uint64_t pmap_pv_hashlist_walks;
uint64_t pmap_pv_hashlist_cnts;
/* We're done when parent directory changes */
if (state->cbs_parentID != curID) {
+ /*
+ * If the parent ID is different from curID this means we've hit
+ * the EOF for the directory. To help future callers, we mark
+ * the cbs_eof boolean. However, we should only mark the EOF
+ * boolean if we're about to return from this function.
+ *
+ * This is because this callback function does its own uiomove
+ * to get the data to userspace. If we set the boolean before determining
+ * whether or not the current entry has enough room to write its
+ * data to userland, we could fool the callers of this catalog function
+ * into thinking they've hit EOF earlier than they really would have.
+ * In that case, we'd know that we have more entries to process and
+ * send to userland, but we didn't have enough room.
+ *
+ * To be safe, we mark cbs_eof here ONLY for the cases where we know we're
+ * about to return and won't write any new data back
+ * to userland. In the stop_after_pack case, we'll set this boolean
+ * regardless, so it's slightly safer to let that logic mark the boolean,
+ * especially since it's closer to the return of this function.
+ */
+
if (state->cbs_extended) {
/* The last record has not been returned yet, so we
* want to stop after packing the last item
if (state->cbs_hasprevdirentry) {
stop_after_pack = true;
} else {
+ state->cbs_eof = true;
state->cbs_result = ENOENT;
return (0); /* stop */
}
} else {
+ state->cbs_eof = true;
state->cbs_result = ENOENT;
return (0); /* stop */
}
state.cbs_nlinks = 0;
state.cbs_maxlinks = maxlinks;
state.cbs_linkinfo = (linkinfo_t *)((char *)buffer + MAXPATHLEN);
+ /*
+ * We need to set cbs_eof to false regardless of whether or not the
+ * control flow is actually in the extended case, since we use this
+ * field to track whether or not we've returned EOF from the iterator function.
+ */
+ state.cbs_eof = false;
iterator = (BTreeIterator *) ((char *)state.cbs_linkinfo + (maxlinks * sizeof(linkinfo_t)));
key = (CatalogKey *)&iterator->key;
if (extended) {
state.cbs_direntry = (struct direntry *)((char *)iterator + sizeof(BTreeIterator));
state.cbs_prevdirentry = state.cbs_direntry + 1;
- state.cbs_eof = false;
}
/*
* Attempt to build a key from cached filename
/* Note that state.cbs_index is still valid on errors */
*items = state.cbs_index - index;
index = state.cbs_index;
-
+
+ /*
+ * Also note that cbs_eof is set in all cases if we ever hit EOF
+ * during the enumeration by the catalog callback. Mark the directory's hint
+ * descriptor as having hit EOF.
+ */
if (state.cbs_eof) {
+ dirhint->dh_desc.cd_flags |= CD_EOF;
*eofflag = 1;
}
const u_int8_t * cd_nameptr; /* pointer to cnode name */
};
-/* cd_flags */
+/* cd_flags
+ *
+ * CD_EOF is used by hfs_vnop_readdir / cat_getdirentries to indicate EOF was
+ * encountered during a directory enumeration. When this flag is observed
+ * on the next call to hfs_vnop_readdir it tells the caller that there's no
+ * need to descend into the catalog as EOF was encountered during the last call.
+ * This flag should only be set on the descriptor embedded in the directoryhint.
+ */
+
#define CD_HASBUF 0x01 /* allocated filename buffer */
#define CD_DECOMPOSED 0x02 /* name is fully decomposed */
+#define CD_EOF 0x04 /* see above */
#define CD_ISMETA 0x40 /* describes a metadata file */
#define CD_ISDIR 0x80 /* describes a directory */
static int
hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock)
{
+ int vref = 0;
+
if (!data_vp || !cp) /* sanity check incoming parameters */
return EINVAL;
if (c_vp) {
/* we already have a data vnode */
*data_vp = c_vp;
- vnode_ref(*data_vp);
+ vref = vnode_ref(*data_vp);
if (!skiplock) hfs_unlock(cp);
- return 0;
+ if (vref == 0) {
+ return 0;
+ }
+ return EINVAL;
}
/* no data fork vnode in the cnode, so ask hfs for one. */
if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1) &&
0 != data_vp) {
- vnode_ref(*data_vp);
+ vref = vnode_ref(*data_vp);
vnode_put(*data_vp);
if (!skiplock) hfs_unlock(cp);
- return 0;
+ if (vref == 0) {
+ return 0;
+ }
+ return EINVAL;
}
/* there was an error getting the vnode */
*data_vp = NULL;
if (index == 0) {
dirhint->dh_threadhint = cp->c_dirthreadhint;
}
+ else {
+ /*
+ * If we have a non-zero index, there is a possibility that during the last
+ * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case
+ * then we don't want to return any new entries for the caller. Just return 0
+ * items, mark the eofflag, and bail out. Because we won't have done any work, the
+ * code at the end of the function will release the dirhint for us.
+ *
+ * Don't forget to unlock the catalog lock on the way out, too.
+ */
+ if (dirhint->dh_desc.cd_flags & CD_EOF) {
+ error = 0;
+ eofflag = 1;
+ uio_setoffset(uio, startoffset);
+ hfs_systemfile_unlock (hfsmp, lockflags);
+
+ goto seekoffcalc;
+ }
+ }
/* Pack the buffer with dirent entries. */
error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, extended, &items, &eofflag);
if (S_ISWHT(mode)) {
goto exit;
}
-
- /*
- * We need to release the cnode lock on dcp before calling into
- * hfs_getnewvnode to make sure we don't double lock this node
- */
- if (dcp) {
- dcp->c_flag &= ~C_DIR_MODIFICATION;
- wakeup((caddr_t)&dcp->c_flag);
-
- hfs_unlock(dcp);
- /* so we don't double-unlock it later */
- dcp = NULL;
- }
/*
* Create a vnode for the object just created.
*
- * NOTE: Because we have just unlocked the parent directory above (dcp),
- * we are open to a race condition wherein another thread could look up the
- * entry we just added to the catalog and delete it BEFORE we actually get the
- * vnode out of the call below. In that case, we may return ENOENT because the
- * cnode was already marked for C_DELETE. This is because we are grabbing the cnode
- * out of the hash via the CNID/fileid provided in attr, and with the parent
- * directory unlocked, it is now accessible. In this case, the VFS should re-drive the
- * create operation to re-attempt.
+ * NOTE: Maintaining the cnode lock on the parent directory is important,
+ * as it prevents race conditions where other threads want to look up entries
+ * in the directory and/or add things as we are in the process of creating
+ * the vnode below. However, this has the potential for causing a
+ * double lock panic when dealing with shadow files on a HFS boot partition.
+ * The panic could occur if we are not cleaning up after ourselves properly
+ * when done with a shadow file or in the error cases. The error would occur if we
+ * try to create a new vnode, and then end up reclaiming another shadow vnode to
+ * create the new one. However, if everything is working properly, this should
+ * be a non-issue as we would never enter that reclaim codepath.
*
* The cnode is locked on successful return.
*/
cat_releasedesc(&out_desc);
/*
- * In case we get here via error handling, make sure we release cnode lock on dcp if we
- * didn't do it already.
+ * Make sure we release cnode lock on dcp.
*/
if (dcp) {
dcp->c_flag &= ~C_DIR_MODIFICATION;
if (result) {
return (result);
}
- /* VNOP_WRITE will update timestamps accordingly */
+ /*
+ * VNOP_WRITE marks the vnode as needing a modtime update.
+ */
result = VNOP_WRITE(rvp, uio, 0, ap->a_context);
- /* if open unlinked, force it inactive */
+ /* if open unlinked, force it inactive and recycle */
if (openunlinked) {
int vref;
vref = vnode_ref (rvp);
if (vref == 0) {
vnode_rele(rvp);
}
- vnode_recycle (rvp);
+ vnode_recycle (rvp);
}
+ else {
+ /* re-lock the cnode so we can update the modtimes */
+ if ((result = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
+ vnode_recycle(rvp);
+ vnode_put(rvp);
+ return (result);
+ }
+ /* HFS fsync the resource fork to force it out to disk */
+ result = hfs_fsync (rvp, MNT_NOWAIT, 0, vfs_context_proc(ap->a_context));
+
+ hfs_unlock(cp);
+ }
vnode_put(rvp);
return (result);
#include <pexpert/pexpert.h>
-#if CONFIG_EMBEDDED
-#include <libkern/OSKextLib.h>
-#endif
-
void * get_user_regs(thread_t); /* XXX kludge for <machine/thread.h> */
void IOKitInitializeTime(void); /* XXX */
void IOSleep(unsigned int); /* XXX */
consider_zone_gc(FALSE);
#endif
-#if CONFIG_EMBEDDED
- /*
- * XXX workaround for:
- * <rdar://problem/6378731> Kirkwood7A135: PPP KEXT no longer loads
- */
- OSKextLoadKextWithIdentifier("com.apple.nke.ppp");
- OSKextLoadKextWithIdentifier("com.apple.nke.l2tp");
- OSKextLoadKextWithIdentifier("com.apple.nke.pptp");
-#endif
-
bsd_init_kprintf("done\n");
}
*/
error = msleep1(&p->AIO_SUSPEND_SLEEP_CHAN, aio_proc_mutex(p), PCATCH | PWAIT | PDROP, "aio_suspend", abstime); /* XXX better priority? */
- if ( error == THREAD_AWAKENED ) {
+ if ( error == 0 ) {
/*
* got our wakeup call from aio_work_thread().
* Since we can get a wakeup on this channel from another thread in the
*/
goto check_for_our_aiocbp;
}
- else if ( error == THREAD_TIMED_OUT ) {
+ else if ( error == EWOULDBLOCK ) {
/* our timeout expired */
error = EAGAIN;
}
case DTYPE_VNODE:
case DTYPE_SOCKET:
case DTYPE_PIPE:
+ case DTYPE_PSXSHM:
return TRUE;
default:
- /* DTYPE_KQUEUE, DTYPE_FSEVENTS, DTYPE_PSXSHM, DTYPE_PSXSEM */
+ /* DTYPE_KQUEUE, DTYPE_FSEVENTS, DTYPE_PSXSEM */
return FALSE;
}
}
error = fops->f_attach(kn);
- /*
- * Anyone trying to drop this knote will yield to
- * us, since KN_ATTACHING is set.
- */
kqlock(kq);
- if (error != 0 || (kn->kn_status & KN_DROPPING)) {
- if (error == 0) {
- kn->kn_fop->f_detach(kn);
- }
+ if (error != 0) {
+ /*
+ * Failed to attach correctly, so drop.
+ * All other possible users/droppers
+ * have deferred to us.
+ */
kn->kn_status |= KN_DROPPING;
kqunlock(kq);
knote_drop(kn, p);
goto done;
+ } else if (kn->kn_status & KN_DROPPING) {
+ /*
+ * Attach succeeded, but someone else
+ * deferred their drop - now we have
+ * to do it for them (after detaching).
+ */
+ kqunlock(kq);
+ kn->kn_fop->f_detach(kn);
+ knote_drop(kn, p);
+ goto done;
}
kn->kn_status &= ~KN_ATTACHING;
kqunlock(kq);
x86_64_flag = ((_get_cpu_capabilities() & k64Bit) == k64Bit)? 1 : 0;
/* hw.cpufamily */
- switch (cpuid_info()->cpuid_family) {
- case 6:
- switch (cpuid_info()->cpuid_model) {
- case 13:
- cpufamily = CPUFAMILY_INTEL_6_13;
- break;
- case 14:
- cpufamily = CPUFAMILY_INTEL_6_14; /* Core Solo/Duo */
- break;
- case 15:
- cpufamily = CPUFAMILY_INTEL_6_15; /* Core 2 */
- break;
- case 23:
- cpufamily = CPUFAMILY_INTEL_6_23;
- break;
- case 26:
- cpufamily = CPUFAMILY_INTEL_6_26;
- break;
- default:
- cpufamily = CPUFAMILY_UNKNOWN;
- }
- break;
- default:
- cpufamily = CPUFAMILY_UNKNOWN;
- }
+ cpufamily = cpuid_cpufamily();
+
/* hw.cacheconfig */
cacheconfig[0] = ml_cpu_cache_sharing(0);
cacheconfig[1] = ml_cpu_cache_sharing(1);
} else
map = new_map;
+#ifndef CONFIG_ENFORCE_SIGNED_CODE
+ /* This turns off faulting for executable pages, which allows to
+ * circumvent Code Signing Enforcement */
if ( (header->flags & MH_ALLOW_STACK_EXECUTION) )
vm_map_disable_NX(map);
-
+#endif
+
if (!result)
result = &myresult;
#include <sys/ttycom.h>
#include <sys/filedesc.h>
#include <sys/uio_internal.h>
-#include <sys/fcntl.h>
#include <sys/file_internal.h>
#include <sys/event.h>
bp->bif_dlist = d;
if (first) {
- bpf_tap_mode tap_mode;
-
- switch ((d->bd_oflags & (FREAD | FWRITE))) {
- case FREAD:
- tap_mode = BPF_TAP_INPUT;
- break;
- case FWRITE:
- tap_mode = BPF_TAP_OUTPUT;
- break;
- default:
- tap_mode = BPF_TAP_INPUT_OUTPUT;
- break;
- }
-
/* Find the default bpf entry for this ifp */
if (bp->bif_ifp->if_bpf == NULL) {
struct bpf_if *primary;
/* Only call dlil_set_bpf_tap for primary dlt */
if (bp->bif_ifp->if_bpf == bp)
- dlil_set_bpf_tap(bp->bif_ifp, tap_mode, bpf_tap_callback);
+ dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT, bpf_tap_callback);
if (bp->bif_tap)
- error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt, tap_mode);
+ error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt, BPF_TAP_INPUT_OUTPUT);
}
return error;
do {
if (raw == 0 && ifp->if_framer) {
+ int rcvif_set = 0;
+
+ /*
+ * If this is a broadcast packet that needs to be
+ * looped back into the system, set the inbound ifp
+ * to that of the outbound ifp. This will allow
+ * us to determine that it is a legitimate packet
+ * for the system. Only set the ifp if it's not
+ * already set, just to be safe.
+ */
+ if ((m->m_flags & (M_BCAST | M_LOOP)) &&
+ m->m_pkthdr.rcvif == NULL) {
+ m->m_pkthdr.rcvif = ifp;
+ rcvif_set = 1;
+ }
+
retval = ifp->if_framer(ifp, &m, dest, dst_linkaddr, frame_type);
if (retval) {
if (retval != EJUSTRETURN) {
}
goto next;
}
+
+ /*
+ * Clear the ifp if it was set above, and to be
+ * safe, only if it is still the same as the
+ * outbound ifp we have in context. If it was
+ * looped back, then a copy of it was sent to the
+ * loopback interface with the rcvif set, and we
+ * are clearing the one that will go down to the
+ * layer below.
+ */
+ if (rcvif_set && m->m_pkthdr.rcvif == ifp)
+ m->m_pkthdr.rcvif = NULL;
}
#if BRIDGE
#define _offsetof(t, m) ((uintptr_t)((caddr_t)&((t *)0)->m))
masklen = _offsetof(struct sockaddr_dl, sdl_data[0]) + namelen;
socksize = masklen + ifp->if_addrlen;
-#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1)))
+#define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(u_int32_t) - 1)))
if ((u_int32_t)socksize < sizeof(struct sockaddr_dl))
socksize = sizeof(struct sockaddr_dl);
socksize = ROUNDUP(socksize);
* it doesn't fire when we call it there because the node
* hasn't been added to the tree yet.
*/
- if (!(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
+ if (req == RTM_ADD &&
+ !(rt->rt_flags & RTF_HOST) && rt_mask(rt) != 0) {
struct rtfc_arg arg;
arg.rnh = rnh;
arg.rt0 = rt;
struct rtentry *rt0 = ap->rt0;
struct radix_node_head *rnh = ap->rnh;
u_char *xk1, *xm1, *xk2, *xmp;
- int i, len, mlen;
+ int i, len;
lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
xm1 = (u_char *)rt_mask(rt0);
xk2 = (u_char *)rt_key(rt);
- /* avoid applying a less specific route */
- xmp = (u_char *)rt_mask(rt->rt_parent);
- mlen = rt_key(rt->rt_parent)->sa_len;
- if (mlen > rt_key(rt0)->sa_len) {
- RT_UNLOCK(rt);
- return (0);
- }
-
- for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
- if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
+ /*
+ * Avoid applying a less specific route; do this only if the parent
+ * route (rt->rt_parent) is a network route, since otherwise its mask
+ * will be NULL if it is a cloning host route.
+ */
+ if ((xmp = (u_char *)rt_mask(rt->rt_parent)) != NULL) {
+ int mlen = rt_mask(rt->rt_parent)->sa_len;
+ if (mlen > rt_mask(rt0)->sa_len) {
RT_UNLOCK(rt);
return (0);
}
+
+ for (i = rnh->rnh_treetop->rn_offset; i < mlen; i++) {
+ if ((xmp[i] & ~(xmp[i] ^ xm1[i])) != xmp[i]) {
+ RT_UNLOCK(rt);
+ return (0);
+ }
+ }
}
for (i = rnh->rnh_treetop->rn_offset; i < len; i++) {
ro->ro_rt->rt_use++;
if (ro->ro_rt->rt_flags & RTF_GATEWAY)
dst = (struct sockaddr_in *)ro->ro_rt->rt_gateway;
- if (ro->ro_rt->rt_flags & RTF_HOST)
+ if (ro->ro_rt->rt_flags & RTF_HOST) {
isbroadcast = (ro->ro_rt->rt_flags & RTF_BROADCAST);
- else
+ } else {
+ /* Become a regular mutex */
+ RT_CONVERT_LOCK(ro->ro_rt);
isbroadcast = in_broadcast(dst->sin_addr, ifp);
+ }
RT_UNLOCK(ro->ro_rt);
}
ro_fwd->ro_rt->rt_use++;
if (ro_fwd->ro_rt->rt_flags & RTF_GATEWAY)
dst = (struct sockaddr_in *)ro_fwd->ro_rt->rt_gateway;
- if (ro_fwd->ro_rt->rt_flags & RTF_HOST)
+ if (ro_fwd->ro_rt->rt_flags & RTF_HOST) {
isbroadcast =
(ro_fwd->ro_rt->rt_flags & RTF_BROADCAST);
- else
+ } else {
+ /* Become a regular mutex */
+ RT_CONVERT_LOCK(ro_fwd->ro_rt);
isbroadcast = in_broadcast(dst->sin_addr, ifp);
+ }
RT_UNLOCK(ro_fwd->ro_rt);
rtfree(ro->ro_rt);
ro->ro_rt = ro_fwd->ro_rt;
head_ifscope = (inp->inp_flags & INP_BOUND_IF) ?
inp->inp_boundif : IFSCOPE_NONE;
-#if !IPSEC
/*
- * Current IPsec implementation makes incorrect IPsec
- * cache if this check is done here.
- * So delay this until duplicated socket is created.
+ * If the state is LISTEN then ignore segment if it contains an RST.
+ * If the segment contains an ACK then it is bad and send a RST.
+ * If it does not contain a SYN then it is not interesting; drop it.
+ * If it is from this socket, drop it, it must be forged.
*/
if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) {
- /*
- * Note: dropwithreset makes sure we don't
- * send a RST in response to a RST.
- */
+ if (thflags & TH_RST) {
+ goto drop;
+ }
if (thflags & TH_ACK) {
+ tp = NULL;
tcpstat.tcps_badsyn++;
rstreason = BANDLIM_RST_OPENPORT;
goto dropwithreset;
}
+
+ /* We come here if there is no SYN set */
+ tcpstat.tcps_badsyn++;
goto drop;
}
-#endif
KERNEL_DEBUG(DBG_FNC_TCP_NEWCONN | DBG_FUNC_START,0,0,0,0,0);
+ if (th->th_dport == th->th_sport) {
+#if INET6
+ if (isipv6) {
+ if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
+ &ip6->ip6_src))
+ goto drop;
+ } else
+#endif /* INET6 */
+ if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
+ goto drop;
+ }
+ /*
+ * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
+ * in_broadcast() should never return true on a received
+ * packet with M_BCAST not set.
+ *
+ * Packets with a multicast source address should also
+ * be discarded.
+ */
+ if (m->m_flags & (M_BCAST|M_MCAST))
+ goto drop;
+#if INET6
+ if (isipv6) {
+ if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
+ IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
+ goto drop;
+ } else
+#endif
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
+ IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
+ ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
+ in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
+ goto drop;
+
#if INET6
/*
so = so2;
tcp_lock(so, 1, 0);
/*
- * This is ugly, but ....
- *
* Mark socket as temporary until we're
* committed to keeping it. The code at
* ``drop'' and ``dropwithreset'' check the
* socket created here should be discarded.
* We mark the socket as discardable until
* we're committed to it below in TCPS_LISTEN.
+ * There are some error conditions in which we
+ * have to drop the temporary socket.
*/
dropsocket++;
-
/*
* Inherit INP_BOUND_IF from listener; testing if
* head_ifscope is non-zero is sufficient, since it
inp->inp_vflag &= ~INP_IPV6;
inp->inp_vflag |= INP_IPV4;
#endif /* INET6 */
- inp->inp_laddr = ip->ip_dst;
+ inp->inp_laddr = ip->ip_dst;
#if INET6
}
#endif /* INET6 */
tcp_unlock(oso, 1, 0);
goto drop;
}
-#if IPSEC
- /*
- * To avoid creating incorrectly cached IPsec
- * association, this is need to be done here.
- *
- * Subject: (KAME-snap 748)
- * From: Wayne Knowles <w.knowles@niwa.cri.nz>
- * ftp://ftp.kame.net/pub/mail-list/snap-users/748
- */
- if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) {
- /*
- * Note: dropwithreset makes sure we don't
- * send a RST in response to a RST.
- */
- tcp_lock(oso, 0, 0); /* release ref on parent */
- tcp_unlock(oso, 1, 0);
- if (thflags & TH_ACK) {
- tcpstat.tcps_badsyn++;
- rstreason = BANDLIM_RST_OPENPORT;
- goto dropwithreset;
- }
- goto drop;
- }
-#endif
#if INET6
if (isipv6) {
/*
switch (tp->t_state) {
/*
- * If the state is LISTEN then ignore segment if it contains an RST.
- * If the segment contains an ACK then it is bad and send a RST.
- * If it does not contain a SYN then it is not interesting; drop it.
- * If it is from this socket, drop it, it must be forged.
- * Don't bother responding if the destination was a broadcast.
- * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
+ * Initialize tp->rcv_nxt, and tp->irs, select an initial
* tp->iss, and send a segment:
* <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
* Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
#if 1
lck_mtx_assert(((struct inpcb *)so->so_pcb)->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
#endif
- if (thflags & TH_RST)
- goto drop;
- if (thflags & TH_ACK) {
- rstreason = BANDLIM_RST_OPENPORT;
- goto dropwithreset;
- }
- if ((thflags & TH_SYN) == 0)
- goto drop;
- if (th->th_dport == th->th_sport) {
-#if INET6
- if (isipv6) {
- if (IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst,
- &ip6->ip6_src))
- goto drop;
- } else
-#endif /* INET6 */
- if (ip->ip_dst.s_addr == ip->ip_src.s_addr)
- goto drop;
- }
- /*
- * RFC1122 4.2.3.10, p. 104: discard bcast/mcast SYN
- * in_broadcast() should never return true on a received
- * packet with M_BCAST not set.
- *
- * Packets with a multicast source address should also
- * be discarded.
- */
- if (m->m_flags & (M_BCAST|M_MCAST))
- goto drop;
-#if INET6
- if (isipv6) {
- if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
- IN6_IS_ADDR_MULTICAST(&ip6->ip6_src))
- goto drop;
- } else
-#endif
- if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
- IN_MULTICAST(ntohl(ip->ip_src.s_addr)) ||
- ip->ip_src.s_addr == htonl(INADDR_BROADCAST) ||
- in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif))
- goto drop;
#if INET6
if (isipv6) {
MALLOC(sin6, struct sockaddr_in6 *, sizeof *sin6,
type = ND_REDIRECT;
}
+#if IPFW2
/*
* Check with the firewall...
*/
/* We still have the extra ref on rt */
RT_LOCK(rt);
}
+#endif
/*
* Fake scoped addresses. Note that even link-local source or
int in6_init2done = 0;
+#if IPFW2
/* firewall hooks */
ip6_fw_chk_t *ip6_fw_chk_ptr;
ip6_fw_ctl_t *ip6_fw_ctl_ptr;
int ip6_fw_enable = 1;
+#endif
struct ip6stat ip6stat;
ip6stat.ip6s_nxthist[ip6->ip6_nxt]++;
+#if IPFW2
/*
* Check with the firewall...
*/
return;
}
}
+#endif
/*
* Check against address spoofing/corruption.
in6_clearscope(&ip6->ip6_dst);
#endif
+#if IPFW2
/*
* Check with the firewall...
*/
goto done;
}
}
+#endif
/*
* If the outgoing packet contains a hop-by-hop options header,
return(error);
}
+#if IPFW2
static void
load_ip6fw(void)
{
ip6_fw_init();
}
+#endif
/*
* Raw IPv6 socket option processing.
switch (sopt->sopt_dir) {
case SOPT_GET:
switch (sopt->sopt_name) {
+#if IPFW2
case IPV6_FW_ADD:
case IPV6_FW_GET:
if (ip6_fw_ctl_ptr == 0)
else
error = ENOPROTOOPT;
break;
+#endif
case MRT6_INIT:
case MRT6_DONE:
case SOPT_SET:
switch (sopt->sopt_name) {
+#if IPFW2
case IPV6_FW_ADD:
case IPV6_FW_DEL:
case IPV6_FW_FLUSH:
else
error = ENOPROTOOPT;
break;
+#endif
case MRT6_INIT:
case MRT6_DONE:
#define FP_INCREATE 0x0001
#define FP_INCLOSE 0x0002
#define FP_INSELECT 0x0004
-/*
- * see <rdar://problem/6647955>
- */
-#if CONFIG_EMBEDDED
-#define FP_INCHRREAD 0x0000
-#else
-#define FP_INCHRREAD 0x0008
-#endif
+#define FP_INCHRREAD 0x0000 /* disable FP_INCHRREAD <rdar://6986929> */
#define FP_WRITTEN 0x0010
#define FP_CLOSING 0x0020
#define FP_WAITCLOSE 0x0040
/* Create the shadow stream file. */
error = VNOP_CREATE(dvp, &svp, &cn, &va, context);
if (error == 0) {
+ vnode_recycle(svp);
*creator = 1;
} else if ((error == EEXIST) && !makestream) {
error = VNOP_LOOKUP(dvp, &svp, &cn, context);
-10.0.0
+10.2.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
virtual bool serialize(OSSerialize * s) const;
bool serializeData(IOOptionBits kind, OSSerialize * s) const;
-
- /*!
- @function removePersonalities
- @abstract Remove exact personalities from the database.
- @param personalitiesArray An array of personalities to remove.
- @result Returns true if all personalities are removed successfully. Failure is due to a memory allocation failure.
- */
- bool removePersonalities(OSArray * personalitiesArray);
/* This stuff is no longer used at all we keep it around for PPC/i386
* binary compatibility only. Symbols are no longer exported.
{
kIOPreparationIDUnprepared = 0,
kIOPreparationIDUnsupported = 1,
+ kIOPreparationIDAlwaysPrepared = 2,
};
/*! @class IOMemoryDescriptor : public OSObject
#define sub_iokit_ahci err_sub(12)
#define sub_iokit_powermanagement err_sub(13)
//#define sub_iokit_hidsystem err_sub(14)
+#define sub_iokit_scsi err_sub(16)
//#define sub_iokit_pccard err_sub(21)
#define sub_iokit_vendor_specific err_sub(-2)
void all_done ( void );
void start_ack_timer ( void );
void stop_ack_timer ( void );
- unsigned long compute_settle_time ( void );
- IOReturn startSettleTimer ( unsigned long delay );
- IOReturn ask_parent ( unsigned long requestedState );
+ void startSettleTimer( void );
bool checkForDone ( void );
bool responseValid ( unsigned long x, int pid );
void computeDesiredState ( unsigned long tempDesire = 0 );
void removePowerClient( const OSSymbol * client );
uint32_t getPowerStateForClient( const OSSymbol * client );
IOReturn requestPowerState( const OSSymbol * client, uint32_t state );
+ IOReturn requestDomainPower( unsigned long ourPowerState, IOOptionBits options = 0 );
#endif /* XNU_KERNEL_PRIVATE */
};
return ret;
}
-bool IOCatalogue::removePersonalities(OSArray * personalitiesToRemove)
-{
- bool result = true;
- OSArray * arrayCopy = NULL; // do not release
- OSCollectionIterator * iterator = NULL; // must release
- OSDictionary * personality = NULL; // do not release
- OSDictionary * checkPersonality = NULL; // do not release
- unsigned int count, i;
-
- // remove configs from catalog.
-
- arrayCopy = OSArray::withArray(array);
- if (!arrayCopy) {
- result = false;
- goto finish;
- }
-
- iterator = OSCollectionIterator::withCollection(arrayCopy);
- arrayCopy->release();
- if (!iterator) {
- result = false;
- goto finish;
- }
-
- array->flushCollection();
-
- count = personalitiesToRemove->getCount();
-
- /* Go through the old catalog's list of personalities and add back any that
- * are *not* found in 'personalitiesToRemove'.
- */
- while ((personality = (OSDictionary *)iterator->getNextObject())) {
- bool found = false;
-
- for (i = 0; i < count; i++) {
- checkPersonality = OSDynamicCast(OSDictionary,
- personalitiesToRemove->getObject(i));
-
- /* Do isEqualTo() with the single-arg version to make an exact
- * comparison (unlike _removeDrivers() above).
- */
- if (personality->isEqualTo(checkPersonality)) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- array->setObject(personality);
- }
- }
-
-finish:
-
- OSSafeRelease(iterator);
- return result;
-}
-
IOReturn IOCatalogue::terminateDrivers(OSDictionary * matching)
{
IOReturn ret;
IOGeneralMemoryDescriptor::getPreparationID( void )
{
ioGMDData *dataP;
- if (!_wireCount || !(dataP = getDataP(_memoryEntries)))
+
+ if (!_wireCount)
return (kIOPreparationIDUnprepared);
+
+ if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
+ return (kIOPreparationIDAlwaysPrepared);
+
+ if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
+ return (kIOPreparationIDUnprepared);
+
if (kIOPreparationIDUnprepared == dataP->fPreparationID)
{
#if defined(__ppc__ )
#if IOMD_DEBUG_DMAACTIVE
} else if (kIOMDSetDMAActive == op) {
IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
- md->__iomd_reservedA++;
+ OSIncrementAtomic(&md->__iomd_reservedA);
} else if (kIOMDSetDMAInactive == op) {
IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
if (md->__iomd_reservedA)
- md->__iomd_reservedA--;
+ OSDecrementAtomic(&md->__iomd_reservedA);
else
panic("kIOMDSetDMAInactive");
#endif /* IOMD_DEBUG_DMAACTIVE */
length = ((IOMemoryMap *) __address)->fLength;
}
- if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
+ if ((addressMap == kernel_map)
+ && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
addressMap = IOPageableMapForAddress( address );
#if DEBUG
OSDefineMetaClassAndStructors(IOPMprot, OSObject)
#endif
-// log setPowerStates longer than (ns):
-#define LOG_SETPOWER_TIMES (50ULL * 1000ULL * 1000ULL)
-// log app responses longer than (ns):
-#define LOG_APP_RESPONSE_TIMES (100ULL * 1000ULL * 1000ULL)
-// use message tracer to log messages longer than (ns):
-#define LOG_APP_RESPONSE_MSG_TRACER (3 * 1000ULL * 1000ULL * 1000ULL)
-
//*********************************************************************************
// Globals
//*********************************************************************************
// Macros
//*********************************************************************************
-#define PM_ERROR(x...) do { kprintf(x); IOLog(x); } while (false)
-#define PM_DEBUG(x...) do { kprintf(x); } while (false)
-
-#define PM_TRACE(x...) do { \
+#define PM_ERROR(x...) do { kprintf(x); IOLog(x); } while (false)
+#define PM_DEBUG(x...) do { kprintf(x); } while (false)
+#define PM_TRACE(x...) do { \
if (kIOLogDebugPower & gIOKitDebug) kprintf(x); } while (false)
#define PM_CONNECT(x...)
#define kIOPMPowerStateMax 0xFFFFFFFF
#define IS_PM_ROOT() (this == gIOPMRootNode)
+#define IS_POWER_DROP (fHeadNotePowerState < fCurrentPowerState)
+#define IS_POWER_RISE (fHeadNotePowerState > fCurrentPowerState)
+
+// log setPowerStates longer than (ns):
+#define LOG_SETPOWER_TIMES (50ULL * 1000ULL * 1000ULL)
+// log app responses longer than (ns):
+#define LOG_APP_RESPONSE_TIMES (100ULL * 1000ULL * 1000ULL)
+// use message tracer to log messages longer than (ns):
+#define LOG_APP_RESPONSE_MSG_TRACER (3 * 1000ULL * 1000ULL * 1000ULL)
+
+#define RESERVE_DOMAIN_POWER 1
+
+enum {
+ kReserveDomainPower = 1
+};
//*********************************************************************************
// PM machine states
//*********************************************************************************
// [private] PMfree
//
-// Free up the data created in PMinit, if it exists.
+// Free the data created by PMinit. Only called from IOService::free().
//*********************************************************************************
void IOService::PMfree ( void )
// [deprecated] youAreRoot
//
// Power Managment is informing us that we are the root power domain.
-// The only difference between us and any other power domain is that
-// we have no parent and therefore never call it.
//*********************************************************************************
IOReturn IOService::youAreRoot ( void )
{
PM_TRACE("%s::powerDomainDidChangeTo parentsKnowState = true\n",
getName());
- ask_parent( fDesiredPowerState );
+ requestDomainPower( fDesiredPowerState );
}
exit_no_ack:
//*********************************************************************************
// [public] requestPowerDomainState
//
-// The child of a power domain calls it parent here to request power of a certain
-// character.
+// Called on a power parent when a child's power requirement changes.
//*********************************************************************************
-IOReturn IOService::requestPowerDomainState (
- IOPMPowerFlags desiredState,
- IOPowerConnection * whichChild,
- unsigned long specification )
+IOReturn IOService::requestPowerDomainState(
+ IOPMPowerFlags childRequestPowerFlags,
+ IOPowerConnection * childConnection,
+ unsigned long specification )
{
- unsigned long i;
- unsigned long computedState;
- unsigned long theDesiredState;
- IOService * child;
- IOPMRequest * childRequest;
+ unsigned long ps;
+ IOPMPowerFlags outputPowerFlags;
+ IOService * child;
+ IOPMRequest * subRequest;
+ bool preventIdle, preventSleep;
+ bool adjustPower = false;
if (!initialized)
return IOPMNotYetInitialized;
return kIOReturnSuccess;
}
- theDesiredState = desiredState & ~(kIOPMPreventIdleSleep | kIOPMPreventSystemSleep);
+ OUR_PMLog(kPMLogRequestDomain, childRequestPowerFlags, specification);
- OUR_PMLog(kPMLogRequestDomain, desiredState, specification);
-
- if (!isChild(whichChild, gIOPowerPlane))
+ if (!isChild(childConnection, gIOPowerPlane))
return kIOReturnNotAttached;
- if (fControllingDriver == NULL || !fPowerStates)
+ if (!fControllingDriver || !fNumberOfPowerStates)
return IOPMNotYetInitialized;
- child = (IOService *) whichChild->getChildEntry(gIOPowerPlane);
+ child = (IOService *) childConnection->getChildEntry(gIOPowerPlane);
assert(child);
- switch (specification) {
- case IOPMLowestState:
- i = 0;
- while ( i < fNumberOfPowerStates )
- {
- if ( ( fPowerStates[i].outputPowerCharacter & theDesiredState) ==
- (theDesiredState & fOutputPowerCharacterFlags) )
- {
- break;
- }
- i++;
- }
- if ( i >= fNumberOfPowerStates )
- {
- return IOPMNoSuchState;
- }
- break;
+ preventIdle = ((childRequestPowerFlags & kIOPMPreventIdleSleep) != 0);
+ preventSleep = ((childRequestPowerFlags & kIOPMPreventSystemSleep) != 0);
+ childRequestPowerFlags &= ~(kIOPMPreventIdleSleep | kIOPMPreventSystemSleep);
- case IOPMNextLowerState:
- i = fCurrentPowerState - 1;
- while ( (int) i >= 0 )
- {
- if ( ( fPowerStates[i].outputPowerCharacter & theDesiredState) ==
- (theDesiredState & fOutputPowerCharacterFlags) )
- {
- break;
- }
- i--;
- }
- if ( (int) i < 0 )
- {
- return IOPMNoSuchState;
- }
- break;
+ // Merge in the power flags contributed by this power parent
+ // at its current or impending power state.
- case IOPMHighestState:
- i = fNumberOfPowerStates;
- while ( (int) i >= 0 )
- {
- i--;
- if ( ( fPowerStates[i].outputPowerCharacter & theDesiredState) ==
- (theDesiredState & fOutputPowerCharacterFlags) )
- {
- break;
- }
- }
- if ( (int) i < 0 )
- {
- return IOPMNoSuchState;
- }
- break;
+ outputPowerFlags = fPowerStates[fCurrentPowerState].outputPowerCharacter;
+ if ((fMachineState != kIOPM_Finished) && (getPMRootDomain() != this))
+ {
+ if (IS_POWER_DROP)
+ {
+ // Use the lower power state when dropping power.
+ // Must be careful since a power drop can be canceled
+ // from the following states:
+ // - kIOPM_OurChangeTellClientsPowerDown
+ // - kIOPM_OurChangeTellPriorityClientsPowerDown
+ //
+ // The child must not wait for this parent to raise power
+ // if the power drop was cancelled. The solution is to cancel
+ // the power drop if possible, then schedule an adjustment to
+ // re-evaluate our correct power state.
+ //
+ // Root domain is excluded to avoid idle sleep issues. And permit
+ // root domain children to pop up when system is going to sleep.
+
+ if ((fMachineState == kIOPM_OurChangeTellClientsPowerDown) ||
+ (fMachineState == kIOPM_OurChangeTellPriorityClientsPowerDown))
+ {
+ fDoNotPowerDown = true; // cancel power drop
+ adjustPower = true; // schedule an adjustment
+ PM_TRACE("%s: power drop cancelled in state %u by %s\n",
+ getName(), fMachineState, child->getName());
+ }
+ else
+ {
+ // Beyond cancellation point, report the impending state.
+ outputPowerFlags =
+ fPowerStates[fHeadNotePowerState].outputPowerCharacter;
+ }
+ }
+ else
+ {
+ // When raising power, must report the output power flags from
+ // child's perspective. A child power request may arrive while
+ // parent is transitioning upwards. If a request arrives after
+ // setParentInfo() has already recorded the output power flags
+ // for the next power state, then using the power supplied by
+ // fCurrentPowerState is incorrect, and might cause the child
+ // to wait when it should not.
+
+ outputPowerFlags = childConnection->parentCurrentPowerFlags();
+ }
+ }
+ child->fHeadNoteDomainTargetFlags |= outputPowerFlags;
- case IOPMNextHigherState:
- i = fCurrentPowerState + 1;
- while ( i < fNumberOfPowerStates )
- {
- if ( ( fPowerStates[i].outputPowerCharacter & theDesiredState) ==
- (theDesiredState & fOutputPowerCharacterFlags) )
- {
- break;
- }
- i++;
- }
- if ( i == fNumberOfPowerStates )
- {
- return IOPMNoSuchState;
- }
- break;
+ // Map child's requested power flags to one of our power state.
- default:
- return IOPMBadSpecification;
+ for (ps = 0; ps < fNumberOfPowerStates; ps++)
+ {
+ if ((fPowerStates[ps].outputPowerCharacter & childRequestPowerFlags) ==
+ (fOutputPowerCharacterFlags & childRequestPowerFlags))
+ break;
+ }
+ if (ps >= fNumberOfPowerStates)
+ {
+ ps = 0; // should never happen
}
- computedState = i;
+ // Conditions that warrants a power adjustment on this parent.
+ // Adjust power will also propagate any changes to the child's
+ // prevent idle/sleep flags towards the root domain.
+
+ if (!childConnection->childHasRequestedPower() ||
+ (ps != childConnection->getDesiredDomainState()) ||
+ (childConnection->getPreventIdleSleepFlag() != preventIdle) ||
+ (childConnection->getPreventSystemSleepFlag() != preventSleep))
+ adjustPower = true;
+
+#if ENABLE_DEBUG_LOGS
+ if (adjustPower)
+ {
+ PM_DEBUG("requestPowerDomainState[%s]: %s, init %d, %u->%u\n",
+ getName(), child->getName(),
+ !childConnection->childHasRequestedPower(),
+ (uint32_t) childConnection->getDesiredDomainState(),
+ (uint32_t) ps);
+ }
+#endif
// Record the child's desires on the connection.
#if SUPPORT_IDLE_CANCEL
- bool attemptCancel = ((kIOPMPreventIdleSleep & desiredState) && !whichChild->getPreventIdleSleepFlag());
+ bool attemptCancel = (preventIdle && !childConnection->getPreventIdleSleepFlag());
#endif
- whichChild->setDesiredDomainState( computedState );
- whichChild->setPreventIdleSleepFlag( desiredState & kIOPMPreventIdleSleep );
- whichChild->setPreventSystemSleepFlag( desiredState & kIOPMPreventSystemSleep );
- whichChild->setChildHasRequestedPower();
-
- if (whichChild->getReadyFlag() == false)
- return IOPMNoErr;
+ childConnection->setChildHasRequestedPower();
+ childConnection->setDesiredDomainState( ps );
+ childConnection->setPreventIdleSleepFlag( preventIdle );
+ childConnection->setPreventSystemSleepFlag( preventSleep );
// Schedule a request to re-evaluate all children desires and
// adjust power state. Submit a request if one wasn't pending,
// or if the current request is part of a call tree.
- if (!fDeviceOverrides && (!fAdjustPowerScheduled || gIOPMRequest->getRootRequest()))
+ if (adjustPower && !fDeviceOverrides &&
+ (!fAdjustPowerScheduled || gIOPMRequest->getRootRequest()))
{
- childRequest = acquirePMRequest( this, kIOPMRequestTypeAdjustPowerState, gIOPMRequest );
- if (childRequest)
+ subRequest = acquirePMRequest(
+ this, kIOPMRequestTypeAdjustPowerState, gIOPMRequest );
+ if (subRequest)
{
- submitPMRequest( childRequest );
+ submitPMRequest( subRequest );
fAdjustPowerScheduled = true;
}
- }
+ }
+
#if SUPPORT_IDLE_CANCEL
if (attemptCancel)
{
- childRequest = acquirePMRequest( this, kIOPMRequestTypeIdleCancel );
- if (childRequest)
+ subRequest = acquirePMRequest( this, kIOPMRequestTypeIdleCancel );
+ if (subRequest)
{
- submitPMRequest( childRequest );
+ submitPMRequest( subRequest );
}
}
#endif
- return IOPMNoErr;
+ return kIOReturnSuccess;
}
//*********************************************************************************
return (IOPMAckImplied == ret);
}
+// MARK: -
+// MARK: Power Change Initiated by Driver
+
+//*********************************************************************************
+// [private] OurChangeStart
+//
+// Begin the processing of a power change initiated by us.
+//*********************************************************************************
+
+void IOService::OurChangeStart ( void )
+{
+ PM_ASSERT_IN_GATE();
+ OUR_PMLog( kPMLogStartDeviceChange, fHeadNotePowerState, fCurrentPowerState );
+
+ // fMaxCapability is our maximum possible power state based on the current
+ // power state of our parents. If we are trying to raise power beyond the
+ // maximum, send an async request for more power to all parents.
+
+ if (!IS_PM_ROOT() && (fMaxCapability < fHeadNotePowerState))
+ {
+ fHeadNoteFlags |= kIOPMNotDone;
+ requestDomainPower(fHeadNotePowerState);
+ OurChangeFinish();
+ return;
+ }
+
+ // Redundant power changes skips to the end of the state machine.
+
+ if (!fInitialChange && (fHeadNotePowerState == fCurrentPowerState))
+ {
+ OurChangeFinish();
+ return;
+ }
+ fInitialChange = false;
+
+#if ROOT_DOMAIN_RUN_STATES
+ // Change started, but may not complete...
+ // Can be canceled (power drop) or deferred (power rise).
+
+ getPMRootDomain()->handlePowerChangeStartForService(
+ /* service */ this,
+ /* RD flags */ &fRootDomainState,
+ /* new pwr state */ fHeadNotePowerState,
+ /* change flags */ fHeadNoteFlags );
+#endif
+
+ // Two separate paths, depending if power is being raised or lowered.
+ // Lowering power is subject to approval by clients of this service.
+
+ if (IS_POWER_DROP)
+ {
+ // Next machine state for a power drop.
+ fMachineState = kIOPM_OurChangeTellClientsPowerDown;
+ fDoNotPowerDown = false;
+
+ // Ask apps and kernel clients permission to lower power.
+ fOutOfBandParameter = kNotifyApps;
+ askChangeDown(fHeadNotePowerState);
+ }
+ else
+ {
+ // This service is raising power and parents are able to support the
+ // new power state. However a parent may have already committed to
+ // drop power, which might force this object to temporarily drop power.
+ // This results in "oscillations" before the state machines converge
+ // to a steady state.
+ //
+ // To prevent this, a child must make a power reservation against all
+ // parents before raising power. If the reservation fails, indicating
+ // that the child will be unable to sustain the higher power state,
+ // then the child will signal the parent to adjust power, and the child
+ // will defer its power change.
+
+#if RESERVE_DOMAIN_POWER
+ IOReturn ret;
+
+ // Reserve parent power necessary to achieve fHeadNotePowerState.
+ ret = requestDomainPower( fHeadNotePowerState, kReserveDomainPower );
+ if (ret != kIOReturnSuccess)
+ {
+ // Reservation failed, defer power rise.
+ fHeadNoteFlags |= kIOPMNotDone;
+ OurChangeFinish();
+ return;
+ }
+#endif
+ // Notify interested drivers and children.
+ notifyAll( kIOPM_OurChangeSetPowerState, kNotifyWillChange );
+ }
+}
+
+//*********************************************************************************
+
+struct IOPMRequestDomainPowerContext {
+ IOService * child; // the requesting child
+ IOPMPowerFlags requestPowerFlags; // power flags requested by child
+};
+
+static void
+requestDomainPowerApplier(
+ IORegistryEntry * entry,
+ void * inContext )
+{
+ IOPowerConnection * connection;
+ IOService * parent;
+ IOPMRequestDomainPowerContext * context;
+
+ if ((connection = OSDynamicCast(IOPowerConnection, entry)) == 0)
+ return;
+ parent = (IOService *) connection->copyParentEntry(gIOPowerPlane);
+ if (!parent)
+ return;
+
+ assert(inContext);
+ context = (IOPMRequestDomainPowerContext *) inContext;
+
+ if (connection->parentKnowsState() && connection->getReadyFlag())
+ {
+ parent->requestPowerDomainState(
+ context->requestPowerFlags,
+ connection,
+ IOPMLowestState);
+ }
+
+ parent->release();
+}
+
+//*********************************************************************************
+// [private] requestDomainPower
+//*********************************************************************************
+
+IOReturn IOService::requestDomainPower(
+ unsigned long ourPowerState,
+ IOOptionBits options )
+{
+ const IOPMPowerState * powerStateEntry;
+ IOPMPowerFlags requestPowerFlags;
+ unsigned long maxPowerState;
+ IOPMRequestDomainPowerContext context;
+
+ PM_ASSERT_IN_GATE();
+ assert(ourPowerState < fNumberOfPowerStates);
+ if (ourPowerState >= fNumberOfPowerStates)
+ return kIOReturnBadArgument;
+ if (IS_PM_ROOT())
+ return kIOReturnSuccess;
+
+ // Fetch the input power flags for the requested power state.
+ // Parent request is stated in terms of required power flags.
+
+ powerStateEntry = &fPowerStates[ourPowerState];
+ requestPowerFlags = powerStateEntry->inputPowerRequirement;
+
+ if (powerStateEntry->capabilityFlags & (kIOPMChildClamp | kIOPMPreventIdleSleep))
+ requestPowerFlags |= kIOPMPreventIdleSleep;
+ if (powerStateEntry->capabilityFlags & (kIOPMChildClamp2 | kIOPMPreventSystemSleep))
+ requestPowerFlags |= kIOPMPreventSystemSleep;
+
+ // Disregard the "previous request" for power reservation.
+
+ if (((options & kReserveDomainPower) == 0) &&
+ (fPreviousRequest == requestPowerFlags))
+ {
+ // skip if domain already knows our requirements
+ goto done;
+ }
+ fPreviousRequest = requestPowerFlags;
+
+ context.child = this;
+ context.requestPowerFlags = requestPowerFlags;
+ fHeadNoteDomainTargetFlags = 0;
+ applyToParents(requestDomainPowerApplier, &context, gIOPowerPlane);
+
+ if (options & kReserveDomainPower)
+ {
+ maxPowerState = fControllingDriver->maxCapabilityForDomainState(
+ fHeadNoteDomainTargetFlags );
+
+ if (maxPowerState < fHeadNotePowerState)
+ {
+ PM_TRACE("%s: power desired %u:0x%x got %u:0x%x\n",
+ getName(),
+ (uint32_t) ourPowerState, (uint32_t) requestPowerFlags,
+ (uint32_t) maxPowerState, (uint32_t) fHeadNoteDomainTargetFlags);
+ return kIOReturnNoPower;
+ }
+ }
+
+done:
+ return kIOReturnSuccess;
+}
+
+//*********************************************************************************
+// [private] OurSyncStart
+//*********************************************************************************
+
+void IOService::OurSyncStart ( void )
+{
+ PM_ASSERT_IN_GATE();
+
+ if (fInitialChange)
+ return;
+
+#if ROOT_DOMAIN_RUN_STATES
+ getPMRootDomain()->handlePowerChangeStartForService(
+ /* service */ this,
+ /* RD flags */ &fRootDomainState,
+ /* new pwr state */ fHeadNotePowerState,
+ /* change flags */ fHeadNoteFlags );
+#endif
+
+ fMachineState = kIOPM_SyncNotifyDidChange;
+ fDriverCallReason = kDriverCallInformPreChange;
+
+ notifyChildren();
+}
+
//*********************************************************************************
// [private] OurChangeTellClientsPowerDown
//
// [private] OurChangeWaitForPowerSettle
//
// Our controlling driver has changed power state on the hardware
-// during a power change we initiated. Here we see if we need to wait
-// for power to settle before continuing. If not, we continue processing
-// (notifying interested parties post-change). If so, we wait and
-// continue later.
+// during a power change we initiated. Wait for the driver specified
+// settle time to expire, before notifying interested parties post-change.
//*********************************************************************************
-void IOService::OurChangeWaitForPowerSettle ( void )
+void IOService::OurChangeWaitForPowerSettle( void )
{
fMachineState = kIOPM_OurChangeNotifyInterestedDriversDidChange;
- fSettleTimeUS = compute_settle_time();
- if ( fSettleTimeUS )
- {
- startSettleTimer(fSettleTimeUS);
- }
+ startSettleTimer();
}
//*********************************************************************************
all_done();
}
-//*********************************************************************************
-// [private] ParentDownTellPriorityClientsPowerDown
-//
-// All applications and kernel clients have been notified of a power lowering
-// initiated by the parent and we had to wait for responses. Here
-// we notify any priority clients. If they all ack, we continue with the power change.
-// If at least one doesn't, we have to wait for it to acknowledge and then continue.
-//*********************************************************************************
-
-void IOService::ParentDownTellPriorityClientsPowerDown ( void )
-{
- fMachineState = kIOPM_ParentDownNotifyInterestedDriversWillChange;
- tellChangeDown2(fHeadNotePowerState);
-}
+// MARK: -
+// MARK: Power Change Initiated by Parent
//*********************************************************************************
-// [private] ParentDownNotifyInterestedDriversWillChange
+// [private] ParentChangeStart
//
-// All applications and kernel clients have been notified of a power lowering
-// initiated by the parent and we had to wait for their responses. Here we notify
-// any interested drivers and power domain children. If they all ack, we continue
-// with the power change.
-// If at least one doesn't, we have to wait for it to acknowledge and then continue.
+// Here we begin the processing of a power change initiated by our parent.
//*********************************************************************************
-void IOService::ParentDownNotifyInterestedDriversWillChange ( void )
+IOReturn IOService::ParentChangeStart ( void )
{
- IOPMrootDomain *rootDomain;
- if ((rootDomain = getPMRootDomain()) == this)
+ PM_ASSERT_IN_GATE();
+ OUR_PMLog( kPMLogStartParentChange, fHeadNotePowerState, fCurrentPowerState );
+
+ // Power domain is lowering power
+ if ( fHeadNotePowerState < fCurrentPowerState )
{
- rootDomain->tracePoint(kIOPMTracePointSystemSleepDriversPhase);
- }
+ // TODO: redundant? See handlePowerDomainWillChangeTo()
+ setParentInfo( fHeadNoteParentFlags, fHeadNoteParentConnection, true );
- notifyAll( kIOPM_ParentDownSetPowerState, kNotifyWillChange );
-}
+#if ROOT_DOMAIN_RUN_STATES
+ getPMRootDomain()->handlePowerChangeStartForService(
+ /* service */ this,
+ /* RD flags */ &fRootDomainState,
+ /* new pwr state */ fHeadNotePowerState,
+ /* change flags */ fHeadNoteFlags );
+#endif
-//*********************************************************************************
-// [private] ParentDownSetPowerState
-//
-// We had to wait for it, but all parties have acknowledged our pre-change
-// notification of a power lowering initiated by the parent.
-// Here we instruct our controlling driver
-// to put the hardware in the state it needs to be in when the domain is
-// lowered. If it does so, we continue processing
-// (waiting for settle and acknowledging the parent.)
+ // tell apps and kernel clients
+ fInitialChange = false;
+ fMachineState = kIOPM_ParentDownTellPriorityClientsPowerDown;
+ tellChangeDown1(fHeadNotePowerState);
+ return IOPMWillAckLater;
+ }
+
+ // Power domain is raising power
+ if ( fHeadNotePowerState > fCurrentPowerState )
+ {
+ if ( fDesiredPowerState > fCurrentPowerState )
+ {
+ if ( fDesiredPowerState < fHeadNotePowerState )
+ {
+ // We power up, but not all the way
+ fHeadNotePowerState = fDesiredPowerState;
+ fHeadNotePowerArrayEntry = &fPowerStates[fDesiredPowerState];
+ OUR_PMLog(kPMLogAmendParentChange, fHeadNotePowerState, 0);
+ }
+ } else {
+ // We don't need to change
+ fHeadNotePowerState = fCurrentPowerState;
+ fHeadNotePowerArrayEntry = &fPowerStates[fCurrentPowerState];
+ OUR_PMLog(kPMLogAmendParentChange, fHeadNotePowerState, 0);
+ }
+ }
+
+ if ( fHeadNoteFlags & kIOPMDomainDidChange )
+ {
+ if ( fHeadNotePowerState > fCurrentPowerState )
+ {
+#if ROOT_DOMAIN_RUN_STATES
+ getPMRootDomain()->handlePowerChangeStartForService(
+ /* service */ this,
+ /* RD flags */ &fRootDomainState,
+ /* new pwr state */ fHeadNotePowerState,
+ /* change flags */ fHeadNoteFlags );
+#endif
+
+ // Parent did change up - start our change up
+ fInitialChange = false;
+ notifyAll( kIOPM_ParentUpSetPowerState, kNotifyWillChange );
+ return IOPMWillAckLater;
+ }
+ else if (fHeadNoteFlags & kIOPMSynchronize)
+ {
+ // We do not need to change power state, but notify
+ // children to propagate tree synchronization.
+ fMachineState = kIOPM_SyncNotifyDidChange;
+ fDriverCallReason = kDriverCallInformPreChange;
+ notifyChildren();
+ return IOPMWillAckLater;
+ }
+ }
+
+ all_done();
+ return IOPMAckImplied;
+}
+
+//*********************************************************************************
+// [private] ParentDownTellPriorityClientsPowerDown
+//
+// All applications and kernel clients have been notified of a power lowering
+// initiated by the parent and we had to wait for responses. Here
+// we notify any priority clients. If they all ack, we continue with the power change.
+// If at least one doesn't, we have to wait for it to acknowledge and then continue.
+//*********************************************************************************
+
+void IOService::ParentDownTellPriorityClientsPowerDown ( void )
+{
+ fMachineState = kIOPM_ParentDownNotifyInterestedDriversWillChange;
+ tellChangeDown2(fHeadNotePowerState);
+}
+
+//*********************************************************************************
+// [private] ParentDownNotifyInterestedDriversWillChange
+//
+// All applications and kernel clients have been notified of a power lowering
+// initiated by the parent and we had to wait for their responses. Here we notify
+// any interested drivers and power domain children. If they all ack, we continue
+// with the power change.
+// If at least one doesn't, we have to wait for it to acknowledge and then continue.
+//*********************************************************************************
+
+void IOService::ParentDownNotifyInterestedDriversWillChange ( void )
+{
+ IOPMrootDomain *rootDomain;
+ if ((rootDomain = getPMRootDomain()) == this)
+ {
+ rootDomain->tracePoint(kIOPMTracePointSystemSleepDriversPhase);
+ }
+
+ notifyAll( kIOPM_ParentDownSetPowerState, kNotifyWillChange );
+}
+
+//*********************************************************************************
+// [private] ParentDownSetPowerState
+//
+// We had to wait for it, but all parties have acknowledged our pre-change
+// notification of a power lowering initiated by the parent.
+// Here we instruct our controlling driver
+// to put the hardware in the state it needs to be in when the domain is
+// lowered. If it does so, we continue processing
+// (waiting for settle and acknowledging the parent.)
// If it doesn't, we have to wait for it to acknowledge and then continue.
//*********************************************************************************
void IOService::ParentDownWaitForPowerSettle ( void )
{
fMachineState = kIOPM_ParentDownNotifyDidChangeAndAcknowledgeChange;
- fSettleTimeUS = compute_settle_time();
- if ( fSettleTimeUS )
- {
- startSettleTimer(fSettleTimeUS);
- }
+ startSettleTimer();
}
//*********************************************************************************
void IOService::ParentUpWaitForSettleTime ( void )
{
fMachineState = kIOPM_ParentUpNotifyInterestedDriversDidChange;
- fSettleTimeUS = compute_settle_time();
- if ( fSettleTimeUS )
- {
- startSettleTimer(fSettleTimeUS);
- }
+ startSettleTimer();
}
//*********************************************************************************
if ( !( fHeadNoteFlags & kIOPMNotDone) )
{
// we changed, tell our parent
- if ( !IS_PM_ROOT() )
- {
- ask_parent(fHeadNotePowerState);
- }
+ requestDomainPower(fHeadNotePowerState);
// yes, did power raise?
if ( fCurrentPowerState < fHeadNotePowerState )
}
//*********************************************************************************
-// [private] compute_settle_time
+// settle_timer_expired
//
-// Compute the power-settling delay in microseconds for the
-// change from myCurrentState to head_note_state.
+// Holds a retain while the settle timer callout is in flight.
//*********************************************************************************
-unsigned long IOService::compute_settle_time ( void )
+static void
+settle_timer_expired( thread_call_param_t arg0, thread_call_param_t arg1 )
{
- unsigned long totalTime;
- unsigned long i;
+ IOService * me = (IOService *) arg0;
+
+ if (gIOPMWorkLoop && gIOPMReplyQueue)
+ {
+ gIOPMWorkLoop->runAction(
+ OSMemberFunctionCast(IOWorkLoop::Action, me, &IOService::settleTimerExpired),
+ me);
+ gIOPMReplyQueue->signalWorkAvailable();
+ }
+ me->release();
+}
+
+//*********************************************************************************
+// [private] startSettleTimer
+//
+// Calculate a power-settling delay in microseconds and start a timer.
+//*********************************************************************************
+
+void IOService::startSettleTimer( void )
+{
+ AbsoluteTime deadline;
+ unsigned long i;
+ uint32_t settleTime = 0;
+ boolean_t pending;
PM_ASSERT_IN_GATE();
- // compute total time to attain the new state
- totalTime = 0;
i = fCurrentPowerState;
- // we're lowering power
+ // lowering power
if ( fHeadNotePowerState < fCurrentPowerState )
{
while ( i > fHeadNotePowerState )
{
- totalTime += fPowerStates[i].settleDownTime;
+ settleTime += (uint32_t) fPowerStates[i].settleDownTime;
i--;
}
}
- // we're raising power
+ // raising power
if ( fHeadNotePowerState > fCurrentPowerState )
{
while ( i < fHeadNotePowerState )
{
- totalTime += fPowerStates[i+1].settleUpTime;
+ settleTime += (uint32_t) fPowerStates[i+1].settleUpTime;
i++;
}
}
- return totalTime;
-}
-
-//*********************************************************************************
-// [private] startSettleTimer
-//
-// Enter a power-settling delay in microseconds and start a timer for that delay.
-//*********************************************************************************
-
-IOReturn IOService::startSettleTimer ( unsigned long delay )
-{
- AbsoluteTime deadline;
- boolean_t pending;
-
- retain();
- clock_interval_to_deadline(delay, kMicrosecondScale, &deadline);
- pending = thread_call_enter_delayed(fSettleTimer, deadline);
- if (pending) release();
-
- return IOPMNoErr;
+ if (settleTime)
+ {
+ retain();
+ clock_interval_to_deadline(settleTime, kMicrosecondScale, &deadline);
+ pending = thread_call_enter_delayed(fSettleTimer, deadline);
+ if (pending) release();
+ }
}
//*********************************************************************************
-// [public] ackTimerTick
+// [private] ackTimerTick
//
// The acknowledgement timeout periodic timer has ticked.
// If we are awaiting acks for a power change notification,
me->release();
}
-//*********************************************************************************
-// settle_timer_expired
-//
-// Thread call function. Holds a retain while the callout is in flight.
-//*********************************************************************************
-
-static void
-settle_timer_expired ( thread_call_param_t arg0, thread_call_param_t arg1 )
-{
- IOService * me = (IOService *) arg0;
-
- if (gIOPMWorkLoop && gIOPMReplyQueue)
- {
- gIOPMWorkLoop->runAction(
- OSMemberFunctionCast(IOWorkLoop::Action, me, &IOService::settleTimerExpired),
- me);
- gIOPMReplyQueue->signalWorkAvailable();
- }
- me->release();
-}
-
-//*********************************************************************************
-// [private] ParentChangeStart
-//
-// Here we begin the processing of a power change initiated by our parent.
-//*********************************************************************************
-
-IOReturn IOService::ParentChangeStart ( void )
-{
- PM_ASSERT_IN_GATE();
- OUR_PMLog( kPMLogStartParentChange, fHeadNotePowerState, fCurrentPowerState );
-
- // Power domain is lowering power
- if ( fHeadNotePowerState < fCurrentPowerState )
- {
- setParentInfo( fHeadNoteParentFlags, fHeadNoteParentConnection, true );
-
-#if ROOT_DOMAIN_RUN_STATES
- getPMRootDomain()->handlePowerChangeStartForService(
- /* service */ this,
- /* RD flags */ &fRootDomainState,
- /* new pwr state */ fHeadNotePowerState,
- /* change flags */ fHeadNoteFlags );
-#endif
-
- // tell apps and kernel clients
- fInitialChange = false;
- fMachineState = kIOPM_ParentDownTellPriorityClientsPowerDown;
- tellChangeDown1(fHeadNotePowerState);
- return IOPMWillAckLater;
- }
-
- // Power domain is raising power
- if ( fHeadNotePowerState > fCurrentPowerState )
- {
- if ( fDesiredPowerState > fCurrentPowerState )
- {
- if ( fDesiredPowerState < fHeadNotePowerState )
- {
- // We power up, but not all the way
- fHeadNotePowerState = fDesiredPowerState;
- fHeadNotePowerArrayEntry = &fPowerStates[fDesiredPowerState];
- OUR_PMLog(kPMLogAmendParentChange, fHeadNotePowerState, 0);
- }
- } else {
- // We don't need to change
- fHeadNotePowerState = fCurrentPowerState;
- fHeadNotePowerArrayEntry = &fPowerStates[fCurrentPowerState];
- OUR_PMLog(kPMLogAmendParentChange, fHeadNotePowerState, 0);
- }
- }
-
- if ( fHeadNoteFlags & kIOPMDomainDidChange )
- {
- if ( fHeadNotePowerState > fCurrentPowerState )
- {
-#if ROOT_DOMAIN_RUN_STATES
- getPMRootDomain()->handlePowerChangeStartForService(
- /* service */ this,
- /* RD flags */ &fRootDomainState,
- /* new pwr state */ fHeadNotePowerState,
- /* change flags */ fHeadNoteFlags );
-#endif
-
- // Parent did change up - start our change up
- fInitialChange = false;
- notifyAll( kIOPM_ParentUpSetPowerState, kNotifyWillChange );
- return IOPMWillAckLater;
- }
- else if (fHeadNoteFlags & kIOPMSynchronize)
- {
- // We do not need to change power state, but notify
- // children to propagate tree synchronization.
- fMachineState = kIOPM_SyncNotifyDidChange;
- fDriverCallReason = kDriverCallInformPreChange;
- notifyChildren();
- return IOPMWillAckLater;
- }
- }
-
- all_done();
- return IOPMAckImplied;
-}
-
-//*********************************************************************************
-// [private] OurChangeStart
-//
-// Here we begin the processing of a power change initiated by us.
-//*********************************************************************************
-
-void IOService::OurChangeStart ( void )
-{
- PM_ASSERT_IN_GATE();
- OUR_PMLog( kPMLogStartDeviceChange, fHeadNotePowerState, fCurrentPowerState );
-
- // fMaxCapability is our maximum possible power state based on the current
- // power state of our parents. If we are trying to raise power beyond the
- // maximum, send an async request for more power to all parents.
-
- if (!IS_PM_ROOT() && (fMaxCapability < fHeadNotePowerState))
- {
- fHeadNoteFlags |= kIOPMNotDone;
- ask_parent(fHeadNotePowerState);
- OurChangeFinish();
- return;
- }
-
- // Redundant power changes skips to the end of the state machine.
-
- if (!fInitialChange && (fHeadNotePowerState == fCurrentPowerState))
- {
- OurChangeFinish();
- return;
- }
- fInitialChange = false;
-
-#if ROOT_DOMAIN_RUN_STATES
- getPMRootDomain()->handlePowerChangeStartForService(
- /* service */ this,
- /* RD flags */ &fRootDomainState,
- /* new pwr state */ fHeadNotePowerState,
- /* change flags */ fHeadNoteFlags );
-#endif
-
- // Two separate paths, depending if power is being raised or lowered.
- // Lowering power is subject to client approval.
-
- if ( fHeadNotePowerState < fCurrentPowerState )
- {
- // Next state when dropping power.
- fMachineState = kIOPM_OurChangeTellClientsPowerDown;
- fDoNotPowerDown = false;
-
- // Ask apps and kernel clients permission to lower power.
- fOutOfBandParameter = kNotifyApps;
- askChangeDown(fHeadNotePowerState);
- }
- else
- {
- // Notify interested drivers and children.
- notifyAll( kIOPM_OurChangeSetPowerState, kNotifyWillChange );
- }
-}
-
-//*********************************************************************************
-// [private] OurSyncStart
-//*********************************************************************************
-
-void IOService::OurSyncStart ( void )
-{
- PM_ASSERT_IN_GATE();
-
- if (fInitialChange)
- return;
-
-#if ROOT_DOMAIN_RUN_STATES
- getPMRootDomain()->handlePowerChangeStartForService(
- /* service */ this,
- /* RD flags */ &fRootDomainState,
- /* new pwr state */ fHeadNotePowerState,
- /* change flags */ fHeadNoteFlags );
-#endif
-
- fMachineState = kIOPM_SyncNotifyDidChange;
- fDriverCallReason = kDriverCallInformPreChange;
-
- notifyChildren();
-}
-
-//*********************************************************************************
-// [private] ask_parent
-//
-// Call the power domain parent to ask for a higher power state in the domain
-// or to suggest a lower power state.
-//*********************************************************************************
-
-IOReturn IOService::ask_parent ( unsigned long requestedState )
-{
- OSIterator * iter;
- OSObject * next;
- IOPowerConnection * connection;
- IOService * parent;
- const IOPMPowerState * powerStatePtr;
- unsigned long ourRequest;
-
- PM_ASSERT_IN_GATE();
- if (requestedState >= fNumberOfPowerStates)
- return IOPMNoErr;
-
- powerStatePtr = &fPowerStates[requestedState];
- ourRequest = powerStatePtr->inputPowerRequirement;
-
- if ( powerStatePtr->capabilityFlags & (kIOPMChildClamp | kIOPMPreventIdleSleep) )
- {
- ourRequest |= kIOPMPreventIdleSleep;
- }
- if ( powerStatePtr->capabilityFlags & (kIOPMChildClamp2 | kIOPMPreventSystemSleep) )
- {
- ourRequest |= kIOPMPreventSystemSleep;
- }
-
- // is this a new desire?
- if ( fPreviousRequest == ourRequest )
- {
- // no, the parent knows already, just return
- return IOPMNoErr;
- }
-
- if ( IS_PM_ROOT() )
- {
- return IOPMNoErr;
- }
- fPreviousRequest = ourRequest;
-
- iter = getParentIterator(gIOPowerPlane);
- if ( iter )
- {
- while ( (next = iter->getNextObject()) )
- {
- if ( (connection = OSDynamicCast(IOPowerConnection, next)) )
- {
- parent = (IOService *)connection->copyParentEntry(gIOPowerPlane);
- if ( parent ) {
- if ( parent->requestPowerDomainState(
- ourRequest, connection, IOPMLowestState) != IOPMNoErr )
- {
- OUR_PMLog(kPMLogRequestDenied, fPreviousRequest, 0);
- }
- parent->release();
- }
- }
- }
- iter->release();
- }
-
- return IOPMNoErr;
-}
-
//*********************************************************************************
// [private] notifyControllingDriver
//*********************************************************************************
PM_UNLOCK();
}
+// MARK: -
+// MARK: IOPMRequest
+
//*********************************************************************************
// IOPMRequest Class
//
}
}
+// MARK: -
+// MARK: IOPMRequestQueue
+
//*********************************************************************************
// IOPMRequestQueue Class
//
IOEventSource::signalWorkAvailable();
}
+// MARK: -
+// MARK: IOPMWorkQueue
+
//*********************************************************************************
// IOPMWorkQueue Class
//
return false;
}
+// MARK: -
+// MARK: IOPMCompletionQueue
+
//*********************************************************************************
// IOPMCompletionQueue Class
//*********************************************************************************
return more;
}
+// MARK: -
+// MARK: IOServicePM
+
OSDefineMetaClassAndStructors(IOServicePM, OSObject)
//*********************************************************************************
// Power flags supplied by all parents (domain).
unsigned long HeadNoteDomainFlags;
+ // Power flags supplied by domain accounting for parent changes.
+ IOPMPowerFlags HeadNoteDomainTargetFlags;
+
// Connection attached to the changing parent.
IOPowerConnection * HeadNoteParentConnection;
#define fHeadNotePowerState pwrMgt->HeadNotePowerState
#define fHeadNotePowerArrayEntry pwrMgt->HeadNotePowerArrayEntry
#define fHeadNoteDomainFlags pwrMgt->HeadNoteDomainFlags
+#define fHeadNoteDomainTargetFlags pwrMgt->HeadNoteDomainTargetFlags
#define fHeadNoteParentConnection pwrMgt->HeadNoteParentConnection
#define fHeadNoteParentFlags pwrMgt->HeadNoteParentFlags
#define fHeadNotePendingAcks pwrMgt->HeadNotePendingAcks
#define fRemoveInterestSet pwrMgt->RemoveInterestSet
#define fStrictTreeOrder pwrMgt->StrictTreeOrder
#define fNotifyChildArray pwrMgt->NotifyChildArray
-#define fIdleTimerStopped pwrMgt->IdleTimerStopped
+#define fIdleTimerStopped pwrMgt->IdleTimerStopped
#define fAdjustPowerScheduled pwrMgt->AdjustPowerScheduled
#define fActivityTicklePowerState pwrMgt->ActivityTicklePowerState
#define fPMVars pwrMgt->PMVars
protected:
IOService * fTarget; // request target
IOPMRequest * fRequestNext; // the next request in the chain
- IOPMRequest * fRequestRoot; // the root request in the issue tree
+ IOPMRequest * fRequestRoot; // the root request in the issue tree
IOItemCount fWorkWaitCount; // execution blocked if non-zero
- IOItemCount fFreeWaitCount; // completion blocked if non-zero
+ IOItemCount fFreeWaitCount; // completion blocked if non-zero
uint32_t fType; // request type
IOPMCompletionAction fCompletionAction;
return fRequestNext;
}
- inline IOPMRequest * getRootRequest( void ) const
- {
+ inline IOPMRequest * getRootRequest( void ) const
+ {
if (fRequestRoot) return fRequestRoot;
if (fCompletionAction) return (IOPMRequest *) this;
- return 0;
- }
+ return 0;
+ }
inline uint32_t getType( void ) const
{
#
# configurable kernel related resources
#
-options CONFIG_MAX_THREADS=32 # <medium,large,xlarge>
-options CONFIG_MAX_THREADS=32 # <small,xsmall>
-options CONFIG_MAX_THREADS=32 # <bsmall>
+options CONFIG_MAX_THREADS=64 # <medium,large,xlarge>
+options CONFIG_MAX_THREADS=64 # <small,xsmall>
+options CONFIG_MAX_THREADS=64 # <bsmall>
#
# configurable kernel - use these options to strip strings from panic
| resume_off Don't resume when detaching from gdb
|
| sendcore Configure kernel to send a coredump to the specified IP
+| sendsyslog Configure kernel to send a system log to the specified IP
+| sendpaniclog Configure kernel to send a panic log to the specified IP
| disablecore Configure the kernel to disable coredump transmission
+| getdumpinfo Retrieve the current remote dump parameters
+| setdumpinfo Configure the remote dump parameters
+|
| switchtocorethread Corefile version of "switchtoact"
| resetcorectx Corefile version of "resetctx"
|
| showallgdbcorestacks Corefile equivalent of "showallgdbstacks"
| kdp-reenter Schedule reentry into the debugger and continue.
| kdp-reboot Restart remote target
+| kdp-version Get KDP version number
|
| zstack Print zalloc caller stack (zone leak debugging)
| findoldest Find oldest zone leak debugging record
set $kgm_mtype = ((unsigned int *)&_mh_execute_header)[1]
set $kgm_lp64 = $kgm_mtype & 0x01000000
+set $kgm_manual_pkt_ppc = 0x549C
+set $kgm_manual_pkt_i386 = 0x249C
+set $kgm_manual_pkt_x86_64 = 0xFFFFFF8000002930
+set $kgm_manual_pkt_arm = 0xFFFF04A0
+
+set $kgm_kdp_pkt_data_len = 128
+
+# part of data packet
+set $kgm_kdp_pkt_hdr_req_off = 0
+set $kgm_kdp_pkt_hdr_seq_off = 1
+set $kgm_kdp_pkt_hdr_len_off = 2
+set $kgm_kdp_pkt_hdr_key_off = 4
+
+# after data packet
+set $kgm_kdp_pkt_len_off = $kgm_kdp_pkt_data_len
+set $kgm_kdp_pkt_input_off = $kgm_kdp_pkt_data_len + 4
+
+set $kgm_kdp_pkt_hostreboot = 0x13
+set $kgm_kdp_pkt_hdr_size = 8
+
set $kgm_lcpu_self = 0xFFFE
set $kgm_reg_depth = 0
set $kgm_show_kmod_syms = 0
+# send a manual packet header that doesn't require knowing the location
+# of everything.
+define manualhdrint
+ set $req = $arg0
+
+ set $hdrp = (uint32_t *) $kgm_manual_pkt_i386
+ if ($kgm_mtype == $kgm_mtype_ppc)
+ set $hdrp = (uint32_t *) $kgm_manual_pkt_ppc
+ set $req = $req << 1 # shift to deal with endiannness
+ end
+ if ($kgm_mtype == $kgm_mtype_x86_64)
+ set $hdrp = (uint64_t *) $kgm_manual_pkt_x86_64
+ end
+ if ($kgm_mtype == $kgm_mtype_arm)
+ set $hdrp = (uint32_t *) $kgm_manual_pkt_arm
+ end
+
+ set $pkt_hdr = *$hdrp
+ set *((uint8_t *) ($pkt_hdr + $kgm_kdp_pkt_input_off)) = 0
+ set *((uint32_t *) ($pkt_hdr + $kgm_kdp_pkt_len_off)) = $kgm_kdp_pkt_hdr_size
+
+ set *((uint8_t *) ($pkt_hdr + $kgm_kdp_pkt_hdr_req_off)) = $req
+ set *((uint8_t *) ($pkt_hdr + $kgm_kdp_pkt_hdr_seq_off)) = 0
+ set *((uint16_t *) ($pkt_hdr + $kgm_kdp_pkt_hdr_len_off)) = $kgm_kdp_pkt_hdr_size
+ set *((uint32_t *) ($pkt_hdr + $kgm_kdp_pkt_hdr_key_off)) = 0
+ set *((uint8_t *) ($pkt_hdr + $kgm_kdp_pkt_input_off)) = 1
+
+ # dummy to make sure manual packet is executed
+ set $kgm_dummy = &_mh_execute_header
+end
+
# Print a pointer
define showptr
if $kgm_lp64
if ($kgm_mtype == $kgm_mtype_i386)
set $kdpstatep = (struct x86_saved_state32 *) kdp.saved_state
if ($kdp_act_counter == 0)
- set $kdpstate = *($kdpstatep)
+ set $kdpstate = *($kdpstatep)
end
set $kdp_act_counter = $kdp_act_counter + 1
set $kgm_statep = (struct x86_kernel_state *) \
($newact->kernel_stack + kernel_stack_size \
- sizeof(struct x86_kernel_state))
- set $kdpstatep->ebx = $kgm_statep->k_ebx
+ set $kdpstatep->ebx = $kgm_statep->k_ebx
set $kdpstatep->ebp = $kgm_statep->k_ebp
set $kdpstatep->edi = $kgm_statep->k_edi
set $kdpstatep->esi = $kgm_statep->k_esi
- set $kdpstatep->eip = $kgm_statep->k_eip
+ set $kdpstatep->eip = $kgm_statep->k_eip
flushregs
flushstack
set $pc = $kgm_statep->k_eip
if ($kgm_mtype == $kgm_mtype_x86_64)
set $kdpstatep = (struct x86_saved_state64 *) kdp.saved_state
if ($kdp_act_counter == 0)
- set $kdpstate = *($kdpstatep)
+ set $kdpstate = *($kdpstatep)
end
set $kdp_act_counter = $kdp_act_counter + 1
set $kgm_statep = (struct x86_kernel_state *) \
($newact->kernel_stack + kernel_stack_size \
- sizeof(struct x86_kernel_state))
- set $kdpstatep->rbx = $kgm_statep->k_rbx
- set $kdpstatep->rbp = $kgm_statep->k_rbp
- set $kdpstatep->r12 = $kgm_statep->k_r12
- set $kdpstatep->r13 = $kgm_statep->k_r13
- set $kdpstatep->r14 = $kgm_statep->k_r14
- set $kdpstatep->r15 = $kgm_statep->k_r15
- set $kdpstatep->isf.rsp = $kgm_statep->k_rsp
+ set $kdpstatep->rbx = $kgm_statep->k_rbx
+ set $kdpstatep->rbp = $kgm_statep->k_rbp
+ set $kdpstatep->r12 = $kgm_statep->k_r12
+ set $kdpstatep->r13 = $kgm_statep->k_r13
+ set $kdpstatep->r14 = $kgm_statep->k_r14
+ set $kdpstatep->r15 = $kgm_statep->k_r15
+ set $kdpstatep->isf.rsp = $kgm_statep->k_rsp
flushregs
flushstack
set $pc = $kgm_statep->k_rip
select 0
if ($kgm_mtype == $kgm_mtype_ppc)
if ($kdp_act_counter == 0)
- set $kdpstate = (struct savearea *) kdp.saved_state
+ set $kdpstate = (struct savearea *) kdp.saved_state
end
set $kdp_act_counter = $kdp_act_counter + 1
set (struct savearea *) kdp.saved_state=(struct savearea *) $arg0
end
define resume_on
- set noresume_on_disconnect = 0
+ set $resume = KDP_DUMPINFO_SETINFO | KDP_DUMPINFO_RESUME
+ dumpinfoint $resume
end
document resume_on
end
define resume_off
- set noresume_on_disconnect = 1
+ set $noresume = KDP_DUMPINFO_SETINFO | KDP_DUMPINFO_NORESUME
+ dumpinfoint $noresume
end
document resume_off
|macro in some cases.
end
-#Stopgap until gdb can generate the HOSTREBOOT packet
define kdp-reboot
-#Alternatively, set *(*(unsigned **) 0x2498) = 1 (or 0x5498 on PPC)
- set flag_kdp_trigger_reboot = 1
+# Alternatively, set *(*(unsigned **) 0x2498) = 1
+# (or 0x5498 on PPC, 0xffffff8000002928 on x86_64, 0xffff049c on arm)
+ manualhdrint $kgm_kdp_pkt_hostreboot
continue
end
document kdp-reboot
Syntax: kdp-reboot
-|Reboot the remote target machine; not guaranteed to succeed. Requires symbols
-|until gdb support for the HOSTREBOOT packet is implemented.
+|Reboot the remote target machine; not guaranteed to succeed.
+end
+
+define kdpversionint
+ # set up the manual KDP packet
+ set manual_pkt.input = 0
+ set manual_pkt.len = sizeof(kdp_version_req_t)
+ set $kgm_pkt = (kdp_version_req_t *)&manual_pkt.data
+ set $kgm_pkt->hdr.request = KDP_VERSION
+ set $kgm_pkt->hdr.len = sizeof(kdp_version_req_t)
+ set $kgm_pkt->hdr.is_reply = 0
+ set $kgm_pkt->hdr.seq = 0
+ set $kgm_pkt->hdr.key = 0
+ set manual_pkt.input = 1
+ # dummy to make sure manual packet is executed
+ set $kgm_dummy = &_mh_execute_header
+ set $kgm_pkt = (kdp_version_reply_t *)&manual_pkt.data
+ set $kgm_kdp_version = $kgm_pkt->version
+ set $kgm_kdp_feature = $kgm_pkt->feature
+end
+
+define kdp-version
+ kdpversionint
+ printf "KDP VERSION = %d, FEATURE = 0x%x\n", $kgm_kdp_version, $kgm_kdp_feature
+end
+
+document kdp-version
+Syntax: kdp-version
+|Get the KDP protocol version being used by the kernel.
+end
+
+define dumpinfoint
+ # set up the manual KDP packet
+ set manual_pkt.input = 0
+
+ set manual_pkt.len = sizeof(kdp_dumpinfo_req_t)
+ set $kgm_pkt = (kdp_dumpinfo_req_t *)manual_pkt.data
+ set $kgm_pkt->hdr.request = KDP_DUMPINFO
+ set $kgm_pkt->hdr.len = sizeof(kdp_dumpinfo_req_t)
+ set $kgm_pkt->hdr.is_reply = 0
+ set $kgm_pkt->hdr.seq = 0
+ set $kgm_pkt->hdr.key = 0
+ set $kgm_pkt->type = $arg0
+ set $kgm_pkt->name = ""
+ set $kgm_pkt->destip = ""
+ set $kgm_pkt->routerip = ""
+ set $kgm_pkt->port = 0
+
+ if $argc > 1
+ set $kgm_pkt->name = "$arg1"
+ end
+ if $argc > 2
+ set $kgm_pkt->destip = "$arg2"
+ end
+ if $argc > 3
+ set $kgm_pkt->routerip = "$arg3"
+ end
+ if $argc > 4
+ set $kgm_pkt->port = $arg4
+ end
+
+ set manual_pkt.input = 1
+ # dummy to make sure manual packet is executed
+ set $kgm_dummy = &_mh_execute_header
end
define sendcore
- set kdp_trigger_core_dump = 1
- set kdp_flag |= 0x40
- set panicd_ip_str = "$arg0"
- set panicd_specified = 1
- set disable_debug_output = 0
- set disableConsoleOutput = 0
- set logPanicDataToScreen = 1
- set reattach_wait = 1
- resume_off
+ if $argc > 1
+ dumpinfoint KDP_DUMPINFO_CORE $arg1 $arg0
+ else
+ dumpinfoint KDP_DUMPINFO_CORE \0 $arg0
+ end
end
document sendcore
-Syntax: sendcore <IP address>
+Syntax: sendcore <IP address> [filename]
|Configure the kernel to transmit a kernel coredump to a server (kdumpd)
|at the specified IP address. This is useful when the remote target has
|not been previously configured to transmit coredumps, and you wish to
|preserve kernel state for later examination. NOTE: You must issue a "continue"
|command after using this macro to trigger the kernel coredump. The kernel
|will resume waiting in the debugger after completion of the coredump. You
-|may disable coredumps by executing the "disablecore" macro.
+|may disable coredumps by executing the "disablecore" macro. You can
+|optionally specify the filename to be used for the generated core file.
+end
+
+define sendsyslog
+ if $argc > 1
+ dumpinfoint KDP_DUMPINFO_SYSTEMLOG $arg1 $arg0
+ else
+ dumpinfoint KDP_DUMPINFO_SYSTEMLOG \0 $arg0
+ end
+end
+
+document sendsyslog
+Syntax: sendsyslog <IP address> [filename]
+|Configure the kernel to transmit a kernel system log to a server (kdumpd)
+|at the specified IP address. NOTE: You must issue a "continue"
+|command after using this macro to trigger the kernel system log. The kernel
+|will resume waiting in the debugger after completion. You can optionally
+|specify the name to be used for the generated system log.
+end
+
+define sendpaniclog
+ if panicstr
+ if $argc > 1
+ dumpinfoint KDP_DUMPINFO_PANICLOG $arg1 $arg0
+ else
+ dumpinfoint KDP_DUMPINFO_PANICLOG \0 $arg0
+ end
+ else
+ printf "No panic log available.\n"
+ end
+end
+
+document sendpaniclog
+Syntax: sendpaniclog <IP address> [filename]
+|Configure the kernel to transmit a kernel paniclog to a server (kdumpd)
+|at the specified IP address. NOTE: You must issue a "continue"
+|command after using this macro to trigger the kernel panic log. The kernel
+|will resume waiting in the debugger after completion. You can optionally
+|specify the name to be used for the generated panic log.
+end
+
+define getdumpinfo
+ dumpinfoint KDP_DUMPINFO_GETINFO
+ set $kgm_dumpinfo = (kdp_dumpinfo_reply_t *) manual_pkt.data
+ if $kgm_dumpinfo->type & KDP_DUMPINFO_REBOOT
+ printf "Sysem will reboot after kernel info gets dumped.\n"
+ else
+ printf "Sysem will not reboot after kernel info gets dumped.\n"
+ end
+ if $kgm_dumpinfo->type & KDP_DUMPINFO_NORESUME
+ printf "System will allow a re-attach after a KDP disconnect.\n"
+ else
+ printf "System will resume after a KDP disconnect.\n"
+ end
+ set $kgm_dumpinfo_type = $kgm_dumpinfo->type & KDP_DUMPINFO_MASK
+ if $kgm_dumpinfo_type == KDP_DUMPINFO_DISABLE
+ printf "Kernel not setup for remote dumps.\n"
+ else
+ printf "Remote dump type: "
+ if $kgm_dumpinfo_type == KDP_DUMPINFO_CORE
+ printf "Core file\n"
+ end
+ if $kgm_dumpinfo_type == KDP_DUMPINFO_PANICLOG
+ printf "Panic log\n"
+ end
+ if $kgm_dumpinfo_type == KDP_DUMPINFO_SYSTEMLOG
+ printf "System log\n"
+ end
+
+ printf "Name: "
+ if $kgm_dumpinfo->name[0] == '\0'
+ printf "(autogenerated)\n"
+ else
+ printf "%s\n", $kgm_dumpinfo->name
+ end
+
+ printf "Network Info: %s[%d] ", $kgm_dumpinfo->destip, $kgm_dumpinfo->port
+ if $kgm_dumpinfo->routerip[0] == '\0'
+ printf "\n"
+ else
+ printf "Router: %s\n", $kgm_dumpinfo->routerip
+ end
+ end
+end
+
+document getdumpinfo
+Syntax: getdumpinfo
+|Retrieve the current remote dump settings.
+end
+
+define setdumpinfo
+ dumpinfoint KDP_DUMPINFO_SETINFO $arg0 $arg1 $arg2 $arg3
+end
+
+document setdumpinfo
+Syntax: setdumpinfo <filename> <ip> <router> <port>
+|Configure the current remote dump settings. Specify \0 if you
+|want to use the defaults (filename) or previously configured
+|settings (ip/router). Specify 0 for the port if you wish to
+|use the previously configured/default setting for that.
end
define disablecore
- set kdp_trigger_core_dump = 0
- set kdp_flag |= 0x40
- set kdp_flag &= ~0x10
- set panicd_specified = 0
+ dumpinfoint KDP_DUMPINFO_DISABLE
end
document disablecore
end
define findregistryentryint
- set $kgm_namekey = (OSSymbol *) $kgm_reg_plane->nameKey
- set $kgm_childkey = (OSSymbol *) $kgm_reg_plane->keys[1]
- if $kgm_findregistry_verbose
- printf "Searching"
+ if !$kgm_reg_plane
+ set $kgm_reg_plane = (IORegistryPlane *) gIOServicePlane
+ end
+
+ if !$kgm_reg_plane
+ printf "Please load kgmacros after KDP attaching to the target.\n"
+ else
+ set $kgm_namekey = (OSSymbol *) $kgm_reg_plane->nameKey
+ set $kgm_childkey = (OSSymbol *) $kgm_reg_plane->keys[1]
+ if $kgm_findregistry_verbose
+ printf "Searching"
+ end
+ findregistryentryrecurse _ $arg0 0 0
end
- findregistryentryrecurse _ $arg0 0 0
end
define _findregistryentry
end
define showregistryentryint
- set $kgm_namekey = (OSSymbol *) $kgm_reg_plane->nameKey
- set $kgm_childkey = (OSSymbol *) $kgm_reg_plane->keys[1]
+ if !$kgm_reg_plane
+ set $kgm_reg_plane = (IORegistryPlane *) gIOServicePlane
+ end
- showregistryentryrecurse _ $arg0 0 0
+ if !$kgm_reg_plane
+ printf "Please load kgmacros after KDP attaching to the target.\n"
+ else
+ set $kgm_namekey = (OSSymbol *) $kgm_reg_plane->nameKey
+ set $kgm_childkey = (OSSymbol *) $kgm_reg_plane->keys[1]
+ showregistryentryrecurse _ $arg0 0 0
+ end
end
define showregistry
end
define showregistryentryintpmstate
- set $kgm_namekey = (OSSymbol *) $kgm_reg_plane->nameKey
- set $kgm_childkey = (OSSymbol *) $kgm_reg_plane->keys[1]
- showregistryentryrecursepmstate _ $arg0 0 0
+ if !$kgm_reg_plane
+ set $kgm_reg_plane = (IORegistryPlane *) gIOServicePlane
+ end
+
+ if !$kgm_reg_plane
+ printf "Please load kgmacros after KDP attaching to the target.\n"
+ else
+ set $kgm_namekey = (OSSymbol *) $kgm_reg_plane->nameKey
+ set $kgm_childkey = (OSSymbol *) $kgm_reg_plane->keys[1]
+ showregistryentryrecursepmstate _ $arg0 0 0
+ end
end
define showregistrypmstate
| best-effort guess to find any workloops that are actually not blocked in a continuation. For a
| complete list, it is best to compare the output of this macro against the output of 'showallstacks'.
end
+
define showthreadfortid
set $kgm_id_found = 0
static OSDictionary * sKextsByID = NULL;
static OSArray * sLoadedKexts = NULL;
-static OSArray * sPrelinkedPersonalities = NULL;
-
// Requests to kextd waiting to be picked up.
static OSArray * sKernelRequests = NULL;
// Identifier of kext load requests in sKernelRequests
{
IORecursiveLockLock(sKextLock);
sKextdActive = active;
- if (sPrelinkedPersonalities) {
- gIOCatalogue->removePersonalities(sPrelinkedPersonalities);
- OSSafeReleaseNULL(sPrelinkedPersonalities);
+ if (sKernelRequests->getCount()) {
+ OSKextPingKextd();
}
IORecursiveLockUnlock(sKextLock);
goto finish;
}
- OSKextPingKextd();
+ OSKextPingKextd();
finish:
IORecursiveLockUnlock(sKextLock);
goto finish;
}
- OSKextPingKextd();
+ OSKextPingKextd();
result = kOSReturnSuccess;
if (requestTagOut) {
return result;
}
-/*********************************************************************
-*********************************************************************/
-/* static */
-void
-OSKext::setPrelinkedPersonalities(OSArray * personalitiesArray)
-{
- sPrelinkedPersonalities = personalitiesArray;
- if (sPrelinkedPersonalities) {
- sPrelinkedPersonalities->retain();
- gIOCatalogue->addDrivers(sPrelinkedPersonalities);
- }
- return;
-}
-
/*********************************************************************
*********************************************************************/
/* static */
}
if (personalitiesArray->getCount()) {
- OSKext::setPrelinkedPersonalities(personalitiesArray);
+ gIOCatalogue->addDrivers(personalitiesArray);
}
/* Store the number of prelinked kexts in the registry so we can tell
#include <mach/machine.h>
#include <mach/vm_map.h>
#include <mach/mach_vm.h>
+#include <mach/machine.h>
+#include <i386/cpuid.h>
#include <i386/tsc.h>
#include <i386/rtclock.h>
#include <i386/cpu_data.h>
#include <i386/machine_routines.h>
#include <i386/misc_protos.h>
+#include <i386/cpuid.h>
#include <machine/cpu_capabilities.h>
#include <machine/commpage.h>
#include <machine/pmap.h>
#define k64Bit 0x00000200 /* processor supports EM64T (not what mode you're running in) */
#define kHasSSE4_1 0x00000400
#define kHasSSE4_2 0x00000800
-
+#define kHasAES 0x00001000
+#define kInOrderPipeline 0x00002000 /* in-order execution */
#define kSlow 0x00004000 /* tsc < nanosecond */
#define kUP 0x00008000 /* set if (kNumCPUs == 1) */
#define kNumCPUs 0x00FF0000 /* number of CPUs (see _NumCPUs() below) */
*/
#include <vm/vm_kern.h>
#include <kern/kalloc.h>
+#include <kern/etimer.h>
#include <mach/machine.h>
#include <i386/cpu_threads.h>
#include <i386/cpuid.h>
*/
topoParms.maxSharingLLC = nCPUsSharing;
- topoParms.nCoresSharingLLC = nCPUsSharing;
+ topoParms.nCoresSharingLLC = nCPUsSharing / (cpuinfo->thread_count /
+ cpuinfo->core_count);
topoParms.nLCPUsSharingLLC = nCPUsSharing;
/*
topoParms.nLThreadsPerPackage = topoParms.nLThreadsPerCore * topoParms.nLCoresPerPackage;
topoParms.nPThreadsPerPackage = topoParms.nPThreadsPerCore * topoParms.nPCoresPerPackage;
+ DBG("\nCache Topology Parameters:\n");
+ DBG("\tLLC Depth: %d\n", topoParms.LLCDepth);
+ DBG("\tCores Sharing LLC: %d\n", topoParms.nCoresSharingLLC);
+ DBG("\tThreads Sharing LLC: %d\n", topoParms.nLCPUsSharingLLC);
+ DBG("\tmax Sharing of LLC: %d\n", topoParms.maxSharingLLC);
+
DBG("\nLogical Topology Parameters:\n");
DBG("\tThreads per Core: %d\n", topoParms.nLThreadsPerCore);
DBG("\tCores per Die: %d\n", topoParms.nLCoresPerDie);
/*
* Make sure that the die has the correct number of cores.
*/
- DBG("Die(%d)->cores: ");
+ DBG("Die(%d)->cores: ", die->pdie_num);
nCores = 0;
core = die->cores;
while (core != NULL) {
*/
nCPUs = 0;
lcpu = core->lcpus;
- DBG("Core(%d)->lcpus: ");
+ DBG("Core(%d)->lcpus: ", core->pcore_num);
while (lcpu != NULL) {
if (lcpu->core == NULL)
panic("CPU(%d)->core is NULL",
#define quad(hi,lo) (((uint64_t)(hi)) << 32 | (lo))
/* Only for 32bit values */
-#define bit(n) (1U << (n))
-#define bitmask(h,l) ((bit(h)|(bit(h)-1)) & ~(bit(l)-1))
-#define bitfield(x,h,l) ((((x) & bitmask(h,l)) >> l))
+#define bit32(n) (1U << (n))
+#define bitmask32(h,l) ((bit32(h)|(bit32(h)-1)) & ~(bit32(l)-1))
+#define bitfield32(x,h,l) ((((x) & bitmask32(h,l)) >> l))
/*
* Leaf 2 cache descriptor encodings.
{ 0xD0, CACHE, L3, 4, 512*K, 64 },
{ 0xD1, CACHE, L3, 4, 1*M, 64 },
{ 0xD2, CACHE, L3, 4, 2*M, 64 },
+ { 0xD3, CACHE, L3, 4, 4*M, 64 },
+ { 0xD4, CACHE, L3, 4, 8*M, 64 },
{ 0xD6, CACHE, L3, 8, 1*M, 64 },
{ 0xD7, CACHE, L3, 8, 2*M, 64 },
{ 0xD8, CACHE, L3, 8, 4*M, 64 },
+ { 0xD9, CACHE, L3, 8, 8*M, 64 },
+ { 0xDA, CACHE, L3, 8, 12*M, 64 },
{ 0xDC, CACHE, L3, 12, 1536*K, 64 },
{ 0xDD, CACHE, L3, 12, 3*M, 64 },
{ 0xDE, CACHE, L3, 12, 6*M, 64 },
+ { 0xDF, CACHE, L3, 12, 12*M, 64 },
+ { 0xE0, CACHE, L3, 12, 18*M, 64 },
{ 0xE2, CACHE, L3, 16, 2*M, 64 },
{ 0xE3, CACHE, L3, 16, 4*M, 64 },
{ 0xE4, CACHE, L3, 16, 8*M, 64 },
+ { 0xE5, CACHE, L3, 16, 16*M, 64 },
+ { 0xE6, CACHE, L3, 16, 24*M, 64 },
{ 0xF0, PREFETCH, NA, NA, 64, NA },
{ 0xF1, PREFETCH, NA, NA, 128, NA }
};
static i386_cpu_info_t cpuid_cpu_info;
#if defined(__x86_64__)
-static void _do_cpuid(uint32_t selector, uint32_t *result)
+static void cpuid_fn(uint32_t selector, uint32_t *result)
{
do_cpuid(selector, result);
}
#else
-static void _do_cpuid(uint32_t selector, uint32_t *result)
+static void cpuid_fn(uint32_t selector, uint32_t *result)
{
if (cpu_mode_is64bit()) {
asm("call _cpuid64"
/* Get processor cache descriptor info using leaf 2. We don't use
* this internally, but must publish it for KEXTs.
*/
- _do_cpuid(2, cpuid_result);
+ cpuid_fn(2, cpuid_result);
for (j = 0; j < 4; j++) {
if ((cpuid_result[j] >> 31) == 1) /* bit31 is validity */
continue;
for (i = 1; i < info_p->cache_info[0]; i++) {
if (i*16 > sizeof(info_p->cache_info))
break;
- _do_cpuid(2, cpuid_result);
+ cpuid_fn(2, cpuid_result);
for (j = 0; j < 4; j++) {
if ((cpuid_result[j] >> 31) == 1)
continue;
* Most processors Mac OS X supports implement this flavor of CPUID.
* Loop over each cache on the processor.
*/
- _do_cpuid(0, cpuid_result);
+ cpuid_fn(0, cpuid_result);
if (cpuid_result[eax] >= 4)
cpuid_deterministic_supported = TRUE;
reg[ecx] = index; /* index starting at 0 */
cpuid(reg);
//kprintf("cpuid(4) index=%d eax=%p\n", index, reg[eax]);
- cache_type = bitfield(reg[eax], 4, 0);
+ cache_type = bitfield32(reg[eax], 4, 0);
if (cache_type == 0)
break; /* no more caches */
- cache_level = bitfield(reg[eax], 7, 5);
- cache_sharing = bitfield(reg[eax], 25, 14) + 1;
+ cache_level = bitfield32(reg[eax], 7, 5);
+ cache_sharing = bitfield32(reg[eax], 25, 14) + 1;
info_p->cpuid_cores_per_package
- = bitfield(reg[eax], 31, 26) + 1;
- cache_linesize = bitfield(reg[ebx], 11, 0) + 1;
- cache_partitions = bitfield(reg[ebx], 21, 12) + 1;
- cache_associativity = bitfield(reg[ebx], 31, 22) + 1;
- cache_sets = bitfield(reg[ecx], 31, 0) + 1;
+ = bitfield32(reg[eax], 31, 26) + 1;
+ cache_linesize = bitfield32(reg[ebx], 11, 0) + 1;
+ cache_partitions = bitfield32(reg[ebx], 21, 12) + 1;
+ cache_associativity = bitfield32(reg[ebx], 31, 22) + 1;
+ cache_sets = bitfield32(reg[ecx], 31, 0) + 1;
/* Map type/levels returned by CPUID into cache_type_t */
switch (cache_level) {
static void
cpuid_set_generic_info(i386_cpu_info_t *info_p)
{
- uint32_t cpuid_reg[4];
+ uint32_t reg[4];
char str[128], *p;
/* do cpuid 0 to get vendor */
- _do_cpuid(0, cpuid_reg);
- info_p->cpuid_max_basic = cpuid_reg[eax];
- bcopy((char *)&cpuid_reg[ebx], &info_p->cpuid_vendor[0], 4); /* ug */
- bcopy((char *)&cpuid_reg[ecx], &info_p->cpuid_vendor[8], 4);
- bcopy((char *)&cpuid_reg[edx], &info_p->cpuid_vendor[4], 4);
+ cpuid_fn(0, reg);
+ info_p->cpuid_max_basic = reg[eax];
+ bcopy((char *)®[ebx], &info_p->cpuid_vendor[0], 4); /* ug */
+ bcopy((char *)®[ecx], &info_p->cpuid_vendor[8], 4);
+ bcopy((char *)®[edx], &info_p->cpuid_vendor[4], 4);
info_p->cpuid_vendor[12] = 0;
/* get extended cpuid results */
- _do_cpuid(0x80000000, cpuid_reg);
- info_p->cpuid_max_ext = cpuid_reg[eax];
+ cpuid_fn(0x80000000, reg);
+ info_p->cpuid_max_ext = reg[eax];
/* check to see if we can get brand string */
if (info_p->cpuid_max_ext >= 0x80000004) {
* The brand string 48 bytes (max), guaranteed to
* be NUL terminated.
*/
- _do_cpuid(0x80000002, cpuid_reg);
- bcopy((char *)cpuid_reg, &str[0], 16);
- _do_cpuid(0x80000003, cpuid_reg);
- bcopy((char *)cpuid_reg, &str[16], 16);
- _do_cpuid(0x80000004, cpuid_reg);
- bcopy((char *)cpuid_reg, &str[32], 16);
+ cpuid_fn(0x80000002, reg);
+ bcopy((char *)reg, &str[0], 16);
+ cpuid_fn(0x80000003, reg);
+ bcopy((char *)reg, &str[16], 16);
+ cpuid_fn(0x80000004, reg);
+ bcopy((char *)reg, &str[32], 16);
for (p = str; *p != '\0'; p++) {
if (*p != ' ') break;
}
/* Get cache and addressing info. */
if (info_p->cpuid_max_ext >= 0x80000006) {
- _do_cpuid(0x80000006, cpuid_reg);
- info_p->cpuid_cache_linesize = bitfield(cpuid_reg[ecx], 7, 0);
+ cpuid_fn(0x80000006, reg);
+ info_p->cpuid_cache_linesize = bitfield32(reg[ecx], 7, 0);
info_p->cpuid_cache_L2_associativity =
- bitfield(cpuid_reg[ecx],15,12);
- info_p->cpuid_cache_size = bitfield(cpuid_reg[ecx],31,16);
- _do_cpuid(0x80000008, cpuid_reg);
+ bitfield32(reg[ecx],15,12);
+ info_p->cpuid_cache_size = bitfield32(reg[ecx],31,16);
+ cpuid_fn(0x80000008, reg);
info_p->cpuid_address_bits_physical =
- bitfield(cpuid_reg[eax], 7, 0);
+ bitfield32(reg[eax], 7, 0);
info_p->cpuid_address_bits_virtual =
- bitfield(cpuid_reg[eax],15, 8);
+ bitfield32(reg[eax],15, 8);
}
/* get processor signature and decode */
- _do_cpuid(1, cpuid_reg);
- info_p->cpuid_signature = cpuid_reg[eax];
- info_p->cpuid_stepping = bitfield(cpuid_reg[eax], 3, 0);
- info_p->cpuid_model = bitfield(cpuid_reg[eax], 7, 4);
- info_p->cpuid_family = bitfield(cpuid_reg[eax], 11, 8);
- info_p->cpuid_type = bitfield(cpuid_reg[eax], 13, 12);
- info_p->cpuid_extmodel = bitfield(cpuid_reg[eax], 19, 16);
- info_p->cpuid_extfamily = bitfield(cpuid_reg[eax], 27, 20);
- info_p->cpuid_brand = bitfield(cpuid_reg[ebx], 7, 0);
- info_p->cpuid_features = quad(cpuid_reg[ecx], cpuid_reg[edx]);
+ cpuid_fn(1, reg);
+ info_p->cpuid_signature = reg[eax];
+ info_p->cpuid_stepping = bitfield32(reg[eax], 3, 0);
+ info_p->cpuid_model = bitfield32(reg[eax], 7, 4);
+ info_p->cpuid_family = bitfield32(reg[eax], 11, 8);
+ info_p->cpuid_type = bitfield32(reg[eax], 13, 12);
+ info_p->cpuid_extmodel = bitfield32(reg[eax], 19, 16);
+ info_p->cpuid_extfamily = bitfield32(reg[eax], 27, 20);
+ info_p->cpuid_brand = bitfield32(reg[ebx], 7, 0);
+ info_p->cpuid_features = quad(reg[ecx], reg[edx]);
/* Fold extensions into family/model */
if (info_p->cpuid_family == 0x0f)
if (info_p->cpuid_features & CPUID_FEATURE_HTT)
info_p->cpuid_logical_per_package =
- bitfield(cpuid_reg[ebx], 23, 16);
+ bitfield32(reg[ebx], 23, 16);
else
info_p->cpuid_logical_per_package = 1;
if (info_p->cpuid_max_ext >= 0x80000001) {
- _do_cpuid(0x80000001, cpuid_reg);
+ cpuid_fn(0x80000001, reg);
info_p->cpuid_extfeatures =
- quad(cpuid_reg[ecx], cpuid_reg[edx]);
+ quad(reg[ecx], reg[edx]);
}
/* Fold in the Invariant TSC feature bit, if present */
if (info_p->cpuid_max_ext >= 0x80000007) {
- _do_cpuid(0x80000007, cpuid_reg);
+ cpuid_fn(0x80000007, reg);
info_p->cpuid_extfeatures |=
- cpuid_reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;
+ reg[edx] & (uint32_t)CPUID_EXTFEATURE_TSCI;
}
/* Find the microcode version number a.k.a. signature a.k.a. BIOS ID */
info_p->cpuid_microcode_version =
(uint32_t) (rdmsr64(MSR_IA32_BIOS_SIGN_ID) >> 32);
- if (info_p->cpuid_model == CPUID_MODEL_NEHALEM) {
- /*
- * For Nehalem, find the number of enabled cores and threads
- * (which determines whether SMT/Hyperthreading is active).
- */
- uint64_t msr_core_thread_count = rdmsr64(MSR_CORE_THREAD_COUNT);
- info_p->core_count = bitfield((uint32_t)msr_core_thread_count, 31, 16);
- info_p->thread_count = bitfield((uint32_t)msr_core_thread_count, 15, 0);
- }
-
if (info_p->cpuid_max_basic >= 0x5) {
+ cpuid_mwait_leaf_t *cmp = &info_p->cpuid_mwait_leaf;
+
/*
* Extract the Monitor/Mwait Leaf info:
*/
- _do_cpuid(5, cpuid_reg);
- info_p->cpuid_mwait_linesize_min = cpuid_reg[eax];
- info_p->cpuid_mwait_linesize_max = cpuid_reg[ebx];
- info_p->cpuid_mwait_extensions = cpuid_reg[ecx];
- info_p->cpuid_mwait_sub_Cstates = cpuid_reg[edx];
+ cpuid_fn(5, reg);
+ cmp->linesize_min = reg[eax];
+ cmp->linesize_max = reg[ebx];
+ cmp->extensions = reg[ecx];
+ cmp->sub_Cstates = reg[edx];
+ info_p->cpuid_mwait_leafp = cmp;
}
if (info_p->cpuid_max_basic >= 0x6) {
+ cpuid_thermal_leaf_t *ctp = &info_p->cpuid_thermal_leaf;
+
/*
* The thermal and Power Leaf:
*/
- _do_cpuid(6, cpuid_reg);
- info_p->cpuid_thermal_sensor =
- bitfield(cpuid_reg[eax], 0, 0);
- info_p->cpuid_thermal_dynamic_acceleration =
- bitfield(cpuid_reg[eax], 1, 1);
- info_p->cpuid_thermal_thresholds =
- bitfield(cpuid_reg[ebx], 3, 0);
- info_p->cpuid_thermal_ACNT_MCNT =
- bitfield(cpuid_reg[ecx], 0, 0);
+ cpuid_fn(6, reg);
+ ctp->sensor = bitfield32(reg[eax], 0, 0);
+ ctp->dynamic_acceleration = bitfield32(reg[eax], 1, 1);
+ ctp->thresholds = bitfield32(reg[ebx], 3, 0);
+ ctp->ACNT_MCNT = bitfield32(reg[ecx], 0, 0);
+ info_p->cpuid_thermal_leafp = ctp;
}
if (info_p->cpuid_max_basic >= 0xa) {
+ cpuid_arch_perf_leaf_t *capp = &info_p->cpuid_arch_perf_leaf;
+
/*
* Architectural Performance Monitoring Leaf:
*/
- _do_cpuid(0xa, cpuid_reg);
- info_p->cpuid_arch_perf_version =
- bitfield(cpuid_reg[eax], 7, 0);
- info_p->cpuid_arch_perf_number =
- bitfield(cpuid_reg[eax],15, 8);
- info_p->cpuid_arch_perf_width =
- bitfield(cpuid_reg[eax],23,16);
- info_p->cpuid_arch_perf_events_number =
- bitfield(cpuid_reg[eax],31,24);
- info_p->cpuid_arch_perf_events =
- cpuid_reg[ebx];
- info_p->cpuid_arch_perf_fixed_number =
- bitfield(cpuid_reg[edx], 4, 0);
- info_p->cpuid_arch_perf_fixed_width =
- bitfield(cpuid_reg[edx],12, 5);
+ cpuid_fn(0xa, reg);
+ capp->version = bitfield32(reg[eax], 7, 0);
+ capp->number = bitfield32(reg[eax], 15, 8);
+ capp->width = bitfield32(reg[eax], 23, 16);
+ capp->events_number = bitfield32(reg[eax], 31, 24);
+ capp->events = reg[ebx];
+ capp->fixed_number = bitfield32(reg[edx], 4, 0);
+ capp->fixed_width = bitfield32(reg[edx], 12, 5);
+ info_p->cpuid_arch_perf_leafp = capp;
}
return;
}
+static uint32_t
+cpuid_set_cpufamily(i386_cpu_info_t *info_p)
+{
+ uint32_t cpufamily = CPUFAMILY_UNKNOWN;
+
+ switch (info_p->cpuid_family) {
+ case 6:
+ switch (info_p->cpuid_model) {
+ case 13:
+ cpufamily = CPUFAMILY_INTEL_6_13;
+ break;
+ case 14:
+ cpufamily = CPUFAMILY_INTEL_YONAH;
+ break;
+ case 15:
+ cpufamily = CPUFAMILY_INTEL_MEROM;
+ break;
+ case 23:
+ cpufamily = CPUFAMILY_INTEL_PENRYN;
+ break;
+ case CPUID_MODEL_NEHALEM:
+ case CPUID_MODEL_FIELDS:
+ case CPUID_MODEL_DALES:
+ case CPUID_MODEL_NEHALEM_EX:
+ cpufamily = CPUFAMILY_INTEL_NEHALEM;
+ break;
+ }
+ break;
+ }
+
+ info_p->cpuid_cpufamily = cpufamily;
+ return cpufamily;
+}
+
void
cpuid_set_info(void)
{
- bzero((void *)&cpuid_cpu_info, sizeof(cpuid_cpu_info));
+ i386_cpu_info_t *info_p = &cpuid_cpu_info;
+
+ bzero((void *)info_p, sizeof(cpuid_cpu_info));
- cpuid_set_generic_info(&cpuid_cpu_info);
+ cpuid_set_generic_info(info_p);
/* verify we are running on a supported CPU */
- if ((strncmp(CPUID_VID_INTEL, cpuid_cpu_info.cpuid_vendor,
+ if ((strncmp(CPUID_VID_INTEL, info_p->cpuid_vendor,
min(strlen(CPUID_STRING_UNKNOWN) + 1,
- sizeof(cpuid_cpu_info.cpuid_vendor)))) ||
- (cpuid_cpu_info.cpuid_family != 6) ||
- (cpuid_cpu_info.cpuid_model < 13))
+ sizeof(info_p->cpuid_vendor)))) ||
+ (cpuid_set_cpufamily(info_p) == CPUFAMILY_UNKNOWN))
panic("Unsupported CPU");
- cpuid_cpu_info.cpuid_cpu_type = CPU_TYPE_X86;
- cpuid_cpu_info.cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
+ info_p->cpuid_cpu_type = CPU_TYPE_X86;
+ info_p->cpuid_cpu_subtype = CPU_SUBTYPE_X86_ARCH1;
cpuid_set_cache_info(&cpuid_cpu_info);
- if (cpuid_cpu_info.core_count == 0) {
- cpuid_cpu_info.core_count =
- cpuid_cpu_info.cpuid_cores_per_package;
- cpuid_cpu_info.thread_count =
- cpuid_cpu_info.cpuid_logical_per_package;
+ /*
+ * Find the number of enabled cores and threads
+ * (which determines whether SMT/Hyperthreading is active).
+ */
+ switch (info_p->cpuid_cpufamily) {
+ case CPUFAMILY_INTEL_NEHALEM: {
+ uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
+ info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
+ info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
+ break;
+ }
+ }
+ if (info_p->core_count == 0) {
+ info_p->core_count = info_p->cpuid_cores_per_package;
+ info_p->thread_count = info_p->cpuid_logical_per_package;
}
cpuid_cpu_info.cpuid_model_string = ""; /* deprecated */
return cpuid_info()->cpuid_family;
}
+uint32_t
+cpuid_cpufamily(void)
+{
+ return cpuid_info()->cpuid_cpufamily;
+}
+
cpu_type_t
cpuid_cputype(void)
{
#define CPUID_MODEL_MEROM 15
#define CPUID_MODEL_PENRYN 23
#define CPUID_MODEL_NEHALEM 26
+#define CPUID_MODEL_ATOM 28
+#define CPUID_MODEL_FIELDS 30 /* Lynnfield, Clarksfield, Jasper */
+#define CPUID_MODEL_DALES 31 /* Havendale, Auburndale */
+#define CPUID_MODEL_NEHALEM_EX 46
#ifndef ASSEMBLER
#include <stdint.h>
{ value, type, size, linesize }
#endif /* KERNEL */
+/* Monitor/mwait Leaf: */
+typedef struct {
+ uint32_t linesize_min;
+ uint32_t linesize_max;
+ uint32_t extensions;
+ uint32_t sub_Cstates;
+} cpuid_mwait_leaf_t;
+
+/* Thermal and Power Management Leaf: */
+typedef struct {
+ boolean_t sensor;
+ boolean_t dynamic_acceleration;
+ uint32_t thresholds;
+ boolean_t ACNT_MCNT;
+} cpuid_thermal_leaf_t;
+
+/* Architectural Performance Monitoring Leaf: */
+typedef struct {
+ uint8_t version;
+ uint8_t number;
+ uint8_t width;
+ uint8_t events_number;
+ uint32_t events;
+ uint8_t fixed_number;
+ uint8_t fixed_width;
+} cpuid_arch_perf_leaf_t;
+
/* Physical CPU info - this is exported out of the kernel (kexts), so be wary of changes */
typedef struct {
char cpuid_vendor[16];
char cpuid_brand_string[48];
const char *cpuid_model_string;
- cpu_type_t cpuid_type; /* this is *not* a cpu_type_t in our <mach/machine.h> */
+ cpu_type_t cpuid_type; /* this is *not* a cpu_type_t in our <mach/machine.h> */
uint8_t cpuid_family;
uint8_t cpuid_model;
uint8_t cpuid_extmodel;
cpu_type_t cpuid_cpu_type; /* <mach/machine.h> */
cpu_subtype_t cpuid_cpu_subtype; /* <mach/machine.h> */
- /* Monitor/mwait Leaf: */
- uint32_t cpuid_mwait_linesize_min;
- uint32_t cpuid_mwait_linesize_max;
- uint32_t cpuid_mwait_extensions;
- uint32_t cpuid_mwait_sub_Cstates;
-
- /* Thermal and Power Management Leaf: */
- boolean_t cpuid_thermal_sensor;
- boolean_t cpuid_thermal_dynamic_acceleration;
- uint32_t cpuid_thermal_thresholds;
- boolean_t cpuid_thermal_ACNT_MCNT;
-
- /* Architectural Performance Monitoring Leaf: */
- uint8_t cpuid_arch_perf_version;
- uint8_t cpuid_arch_perf_number;
- uint8_t cpuid_arch_perf_width;
- uint8_t cpuid_arch_perf_events_number;
- uint32_t cpuid_arch_perf_events;
- uint8_t cpuid_arch_perf_fixed_number;
- uint8_t cpuid_arch_perf_fixed_width;
-
+ /* Per-vendor info */
+ cpuid_mwait_leaf_t cpuid_mwait_leaf;
+#define cpuid_mwait_linesize_max cpuid_mwait_leaf.linesize_max
+#define cpuid_mwait_linesize_min cpuid_mwait_leaf.linesize_min
+#define cpuid_mwait_extensions cpuid_mwait_leaf.extensions
+#define cpuid_mwait_sub_Cstates cpuid_mwait_leaf.sub_Cstates
+ cpuid_thermal_leaf_t cpuid_thermal_leaf;
+ cpuid_arch_perf_leaf_t cpuid_arch_perf_leaf;
+
/* Cache details: */
uint32_t cpuid_cache_linesize;
uint32_t cpuid_cache_L2_associativity;
/* Max leaf ids available from CPUID */
uint32_t cpuid_max_basic;
uint32_t cpuid_max_ext;
+
+ /* Family-specific info links */
+ uint32_t cpuid_cpufamily;
+ cpuid_mwait_leaf_t *cpuid_mwait_leafp;
+ cpuid_thermal_leaf_t *cpuid_thermal_leafp;
+ cpuid_arch_perf_leaf_t *cpuid_arch_perf_leafp;
+
} i386_cpu_info_t;
#ifdef __cplusplus
extern uint64_t cpuid_features(void);
extern uint64_t cpuid_extfeatures(void);
extern uint32_t cpuid_family(void);
+extern uint32_t cpuid_cpufamily(void);
extern void cpuid_get_info(i386_cpu_info_t *info_p);
extern i386_cpu_info_t *cpuid_info(void);
uint32_t lgDevSlot2; /* 0x2490 For developer use */
uint32_t lgOSVersion; /* 0x2494 Pointer to OS version string */
uint32_t lgRebootFlag; /* 0x2498 Pointer to debugger reboot trigger */
- uint32_t lgRsv49C[729]; /* 0x549C Reserved - push to 1 page */
+ uint32_t lgManualPktAddr; /* 0x249C Pointer to manual packet structure */
+ uint32_t lgRsv49C[728]; /* 0x24A0 Reserved - push to 1 page */
} lowglo;
#pragma pack()
extern lowglo lowGlo;
.long EXT(osversion) /* 0x2494 Pointer to osversion string */
#if MACH_KDP
.long EXT(flag_kdp_trigger_reboot) /* 0x2498 Pointer to debugger reboot trigger */
+ .long EXT(manual_pkt) /* 0x249C Poiner to debugger manual packet address */
#else
.long 0 /* 0x2498 Reserved */
+ .long 0 /* 0x249C Reserved */
#endif
- .fill 729, 4, 0
+ .fill 728, 4, 0
#include <kern/machine.h>
#include <kern/pms.h>
#include <kern/misc_protos.h>
+#include <kern/etimer.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#endif /* MP_DEBUG */
+#define ABS(v) (((v) > 0)?(v):-(v))
+
void slave_boot_init(void);
#if MACH_KDB
static int NMIInterruptHandler(x86_saved_state_t *regs);
boolean_t smp_initialized = FALSE;
+uint32_t TSC_sync_margin = 0xFFF;
volatile boolean_t force_immediate_debugger_NMI = FALSE;
volatile boolean_t pmap_tlb_flush_timeout = FALSE;
decl_simple_lock_data(,mp_kdp_lock);
install_real_mode_bootstrap(slave_pstart);
+ if (PE_parse_boot_argn("TSC_sync_margin",
+ &TSC_sync_margin, sizeof(TSC_sync_margin)))
+ kprintf("TSC sync Margin 0x%x\n", TSC_sync_margin);
smp_initialized = TRUE;
return;
}
+typedef struct {
+ int target_cpu;
+ int target_lapic;
+ int starter_cpu;
+} processor_start_info_t;
+static processor_start_info_t start_info __attribute__((aligned(64)));
+
+/*
+ * Cache-alignment is to avoid cross-cpu false-sharing interference.
+ */
+static volatile long tsc_entry_barrier __attribute__((aligned(64)));
+static volatile long tsc_exit_barrier __attribute__((aligned(64)));
+static volatile uint64_t tsc_target __attribute__((aligned(64)));
+
/*
* Poll a CPU to see when it has marked itself as running.
*/
static void
mp_wait_for_cpu_up(int slot_num, unsigned int iters, unsigned int usecdelay)
{
- while (iters-- > 0) {
+ while (iters-- > 0) {
if (cpu_datap(slot_num)->cpu_running)
- break;
+ break;
delay(usecdelay);
}
}
kern_return_t
intel_startCPU_fast(int slot_num)
{
- kern_return_t rc;
+ kern_return_t rc;
/*
* Try to perform a fast restart
*/
if (cpu_datap(slot_num)->cpu_running)
return(KERN_SUCCESS);
- else
+ else
return(KERN_FAILURE);
}
-typedef struct {
- int target_cpu;
- int target_lapic;
- int starter_cpu;
-} processor_start_info_t;
+static void
+started_cpu(void)
+{
+ /* Here on the started cpu with cpu_running set TRUE */
-static processor_start_info_t start_info;
+ if (TSC_sync_margin &&
+ start_info.target_cpu == cpu_number()) {
+ /*
+ * I've just started-up, synchronize again with the starter cpu
+ * and then snap my TSC.
+ */
+ tsc_target = 0;
+ atomic_decl(&tsc_entry_barrier, 1);
+ while (tsc_entry_barrier != 0)
+ ; /* spin for starter and target at barrier */
+ tsc_target = rdtsc64();
+ atomic_decl(&tsc_exit_barrier, 1);
+ }
+}
static void
start_cpu(void *arg)
i *= 10000;
#endif
mp_wait_for_cpu_up(psip->target_cpu, i*100, 100);
+ if (TSC_sync_margin &&
+ cpu_datap(psip->target_cpu)->cpu_running) {
+ /*
+ * Compare the TSC from the started processor with ours.
+ * Report and log/panic if it diverges by more than
+ * TSC_sync_margin (TSC_SYNC_MARGIN) ticks. This margin
+ * can be overriden by boot-arg (with 0 meaning no checking).
+ */
+ uint64_t tsc_starter;
+ int64_t tsc_delta;
+ atomic_decl(&tsc_entry_barrier, 1);
+ while (tsc_entry_barrier != 0)
+ ; /* spin for both processors at barrier */
+ tsc_starter = rdtsc64();
+ atomic_decl(&tsc_exit_barrier, 1);
+ while (tsc_exit_barrier != 0)
+ ; /* spin for target to store its TSC */
+ tsc_delta = tsc_target - tsc_starter;
+ kprintf("TSC sync for cpu %d: 0x%016llx delta 0x%llx (%lld)\n",
+ psip->target_cpu, tsc_target, tsc_delta, tsc_delta);
+ if (ABS(tsc_delta) > (int64_t) TSC_sync_margin) {
+#if DEBUG
+ panic(
+#else
+ printf(
+#endif
+ "Unsynchronized TSC for cpu %d: "
+ "0x%016llx, delta 0x%llx\n",
+ psip->target_cpu, tsc_target, tsc_delta);
+ }
+ }
}
extern char prot_mode_gdt[];
start_info.starter_cpu = cpu_number();
start_info.target_cpu = slot_num;
start_info.target_lapic = lapic;
+ tsc_entry_barrier = 2;
+ tsc_exit_barrier = 2;
/*
* Perform the processor startup sequence with all running
*/
mp_rendezvous_no_intrs(start_cpu, (void *) &start_info);
+ start_info.target_cpu = 0;
+
ml_set_interrupts_enabled(istate);
lck_mtx_unlock(&mp_cpu_boot_lock);
simple_lock(&x86_topo_lock);
cdp->cpu_running = TRUE;
+ started_cpu();
simple_unlock(&x86_topo_lock);
}
#include <kern/cpu_data.h>
#include <mach/mach_types.h>
#include <mach/machine.h>
+#include <kern/etimer.h>
#include <mach/vm_map.h>
#include <mach/machine/vm_param.h>
#include <vm/vm_kern.h>
struct fake_descriptor64 kernel_tss_desc64 = {
0,
sizeof(struct x86_64_tss)-1,
-#ifdef __x86_64__
- SZ_G,
-#else
0,
-#endif
ACC_P|ACC_PL_K|ACC_TSS,
0
};
/*
* Master CPU uses the tables built at boot time.
* Just set the index pointers to the low memory space.
- * Note that in 64-bit mode these are addressed in the
- * double-mapped window (uber-space).
*/
cdi->cdi_ktss = (void *)&master_ktss64;
cdi->cdi_sstk = (vm_offset_t) &master_sstk.top;
cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep;
/*
* Per-cpu GDT, IDT, KTSS descriptors are allocated in kernel
- * heap (cpu_desc_table) .
- * On K32 they're double-mapped in uber-space (over 4GB).
+ * heap (cpu_desc_table).
* LDT descriptors are mapped into a separate area.
*/
cdi->cdi_gdt.ptr = (struct fake_descriptor *)cdt->gdt;
*
* Refer to commpage/cpu_number.s for the IDT limit trick.
*/
- gdtptr64.length = GDTSZ * sizeof(struct real_descriptor64) - 1;
+ gdtptr64.length = GDTSZ * sizeof(struct real_descriptor) - 1;
gdtptr64.offset[0] = (uint32_t) cdi->cdi_gdt.ptr;
gdtptr64.offset[1] = KERNEL_UBER_BASE_HI32;
idtptr64.length = 0x1000 + cdp->cpu_number;
ml_load_desc64();
#else
/* Load the GDT, LDT, IDT and TSS */
- cdi->cdi_gdt.size = sizeof(struct real_descriptor64)*GDTSZ - 1;
+ cdi->cdi_gdt.size = sizeof(struct real_descriptor)*GDTSZ - 1;
cdi->cdi_idt.size = 0x1000 + cdp->cpu_number;
lgdt((unsigned long *) &cdi->cdi_gdt);
lidt((unsigned long *) &cdi->cdi_idt);
#include <kern/machine.h>
#include <kern/pms.h>
#include <kern/processor.h>
+#include <kern/etimer.h>
#include <i386/cpu_threads.h>
#include <i386/pmCPU.h>
#include <i386/cpuid.h>
lapic_send_ipi(cpu, LAPIC_PM_INTERRUPT);
}
+static rtc_nanotime_t *
+pmGetNanotimeInfo(void)
+{
+ return(&rtc_nanotime_info);
+}
+
/*
* Called by the power management kext to register itself and to get the
* callbacks it might need into other kernel functions. This interface
callbacks->ThreadBind = thread_bind;
callbacks->GetSavedRunCount = pmGetSavedRunCount;
callbacks->pmSendIPI = pmSendIPI;
+ callbacks->GetNanotimeInfo = pmGetNanotimeInfo;
callbacks->topoParms = &topoParms;
} else {
panic("Version mis-match between Kernel and CPU PM");
#define _I386_PMCPU_H_
#include <i386/cpu_topology.h>
+#include <i386/rtclock.h>
#ifndef ASSEMBLER
* This value should be changed each time that pmDsipatch_t or pmCallBacks_t
* changes.
*/
-#define PM_DISPATCH_VERSION 17
+#define PM_DISPATCH_VERSION 18
/*
* Dispatch table for functions that get installed when the power
processor_t (*ThreadBind)(processor_t proc);
uint32_t (*GetSavedRunCount)(void);
void (*pmSendIPI)(int cpu);
+ rtc_nanotime_t *(*GetNanotimeInfo)(void);
x86_topology_parameters_t *topoParms;
} pmCallBacks_t;
#include <kern/misc_protos.h>
#include <kern/spl.h>
#include <kern/assert.h>
+#include <kern/etimer.h>
#include <mach/vm_prot.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h> /* for kernel_map */
uint32_t spare1;
} rtc_nanotime_t;
+#if 0
#include <kern/etimer.h>
+#endif
struct cpu_data;
*/
busFreq = EFI_FSB_frequency();
- if (cpuid_info()->cpuid_family != CPU_FAMILY_PENTIUM_M) {
- panic("tsc_init: unknown CPU family: 0x%X\n",
- cpuid_info()->cpuid_family);
- }
-
- switch (cpuid_info()->cpuid_model) {
- case CPUID_MODEL_NEHALEM: {
+ switch (cpuid_cpufamily()) {
+ case CPUFAMILY_INTEL_NEHALEM: {
uint64_t cpu_mhz;
uint64_t msr_flex_ratio;
uint64_t msr_platform_info;
#include <kdp/kdp_internal.h>
#include <kdp/kdp_private.h>
+#include <kdp/kdp_core.h>
#include <libsa/types.h>
/*17 */ kdp_breakpoint64_remove,
/*18 */ kdp_kernelversion,
/*19 */ kdp_readphysmem64,
-/*20 */ kdp_writephysmem64,
-/*21 */ kdp_readioport,
-/*22 */ kdp_writeioport,
-/*23 */ kdp_readmsr64,
-/*24 */ kdp_writemsr64,
+/*1A */ kdp_writephysmem64,
+/*1B */ kdp_readioport,
+/*1C */ kdp_writeioport,
+/*1D */ kdp_readmsr64,
+/*1E */ kdp_writemsr64,
+/*1F */ kdp_dumpinfo,
};
kdp_glob_t kdp;
* Version 11 of the KDP Protocol adds support for 64-bit wide memory
* addresses (read/write and breakpoints) as well as a dedicated
* kernelversion request. Version 12 adds read/writing of physical
- * memory with 64-bit wide memory addresses.
+ * memory with 64-bit wide memory addresses.
*/
#define KDP_VERSION 12
kdp_connect_req_t *rq = &pkt->connect_req;
size_t plen = *len;
kdp_connect_reply_t *rp = &pkt->connect_reply;
+ uint16_t rport, eport;
+ uint32_t key;
+ uint8_t seq;
if (plen < sizeof (*rq))
return (FALSE);
dprintf(("kdp_connect seq %x greeting %s\n", rq->hdr.seq, rq->greeting));
+ rport = rq->req_reply_port;
+ eport = rq->exc_note_port;
+ key = rq->hdr.key;
+ seq = rq->hdr.seq;
if (kdp.is_conn) {
- if (rq->hdr.seq == kdp.conn_seq) /* duplicate request */
+ if ((seq == kdp.conn_seq) && /* duplicate request */
+ (rport == kdp.reply_port) &&
+ (eport == kdp.exception_port) &&
+ (key == kdp.session_key))
rp->error = KDPERR_NO_ERROR;
- else
+ else
rp->error = KDPERR_ALREADY_CONNECTED;
}
else {
- kdp.reply_port = rq->req_reply_port;
- kdp.exception_port = rq->exc_note_port;
- kdp.is_conn = TRUE;
- kdp.conn_seq = rq->hdr.seq;
-
+ kdp.reply_port = rport;
+ kdp.exception_port = eport;
+ kdp.is_conn = TRUE;
+ kdp.conn_seq = seq;
+ kdp.session_key = key;
+
rp->error = KDPERR_NO_ERROR;
}
rp->hdr.is_reply = 1;
rp->hdr.len = sizeof (*rp);
- *reply_port = kdp.reply_port;
+ *reply_port = rport;
*len = rp->hdr.len;
if (current_debugger == KDP_CUR_DB)
kdp.reply_port = kdp.exception_port = 0;
kdp.is_halted = kdp.is_conn = FALSE;
kdp.exception_seq = kdp.conn_seq = 0;
+ kdp.session_key = 0;
if ((panicstr != NULL) && (return_on_panic == 0))
reattach_wait = 1;
return (TRUE);
}
+
+static boolean_t
+kdp_dumpinfo(
+ kdp_pkt_t *pkt,
+ int *len,
+ unsigned short *reply_port
+ )
+{
+ kdp_dumpinfo_req_t *rq = &pkt->dumpinfo_req;
+ kdp_dumpinfo_reply_t *rp = &pkt->dumpinfo_reply;
+ size_t plen = *len;
+
+ if (plen < sizeof (*rq))
+ return (FALSE);
+
+ dprintf(("kdp_dumpinfo file=%s destip=%s routerip=%s\n", rq->name, rq->destip, rq->routerip));
+ rp->hdr.is_reply = 1;
+ rp->hdr.len = sizeof (*rp);
+
+ if ((rq->type & KDP_DUMPINFO_MASK) != KDP_DUMPINFO_GETINFO) {
+ kdp_set_dump_info(rq->type, rq->name, rq->destip, rq->routerip,
+ rq->port);
+ }
+
+ /* gather some stats for reply */
+ kdp_get_dump_info(&rp->type, rp->name, rp->destip, rp->routerip,
+ &rp->port);
+
+ *reply_port = kdp.reply_port;
+ *len = rp->hdr.len;
+
+ return (TRUE);
+}
#define CORE_REMOTE_PORT 1069 /* hardwired, we can't really query the services file */
void kdp_panic_dump (void);
-
void abort_panic_transfer (void);
+void kdp_set_dump_info(const uint32_t flags, const char *file, const char *destip,
+ const char *routerip, const uint32_t port);
+void kdp_get_dump_info(uint32_t *flags, char *file, char *destip, char *routerip,
+ uint32_t *port);
+
struct corehdr *create_panic_header(unsigned int request, const char *corename, unsigned length, unsigned block);
#include <libsa/types.h>
typedef struct {
- unsigned short reply_port;
+ void *saved_state;
+ thread_t kdp_thread;
+ int kdp_cpu;
+ uint32_t session_key;
unsigned int conn_seq;
+ unsigned short reply_port;
+ unsigned short exception_port;
boolean_t is_conn;
- void *saved_state;
boolean_t is_halted;
- unsigned short exception_port;
unsigned char exception_seq;
boolean_t exception_ack_needed;
- int kdp_cpu;
- thread_t kdp_thread;
} kdp_glob_t;
extern kdp_glob_t kdp;
extern volatile int kdp_flag;
+extern int noresume_on_disconnect;
#define KDP_READY 0x1
#define KDP_ARP 0x2
#define DBG_POST_CORE 0x40
#define PANIC_LOG_DUMP 0x80
#define REBOOT_POST_CORE 0x100
+#define SYSTEM_LOG_DUMP 0x200
typedef boolean_t
(*kdp_dispatch_t) (
kdp_pkt_t *,
int
kdp_machine_msr64_write(kdp_writemsr64_req_t *, caddr_t /* data */, uint16_t /* lcpu */);
-
static boolean_t
kdp_writemsr64(kdp_pkt_t *, int *, unsigned short *);
+
+static boolean_t
+kdp_dumpinfo(kdp_pkt_t *, int *, unsigned short *);
* Definition of remote debugger protocol.
*/
+
+#ifdef MACH_KERNEL_PRIVATE
#include <mach/vm_prot.h>
#include <stdint.h>
+#endif
+
+#ifdef KDP_PROXY_PACK_SUPPORT
+#pragma pack(1)
+#define KDP_PACKED
+#else
+#define KDP_PACKED __attribute__((packed))
+#endif
/*
* Retransmit parameters
#endif /* DDEBUG_DEBUG || DEBUG_DEBUG */
#define KDP_REXMIT_TRIES 8 /* xmit 8 times, then give up */
-#define KDP_PACKED __attribute__((packed))
/*
* (NMI) Attention Max Wait Time
/* msr access (64-bit) */
KDP_READMSR64, KDP_WRITEMSR64,
+ /* get/dump panic/corefile info */
+ KDP_DUMPINFO,
+
/* keep this last */
KDP_INVALID_REQUEST
} kdp_req_t;
+typedef enum {
+ KDP_DUMPINFO_GETINFO = 0x00000000,
+ KDP_DUMPINFO_SETINFO = 0x00000001,
+ KDP_DUMPINFO_CORE = 0x00000102,
+ KDP_DUMPINFO_PANICLOG = 0x00000103,
+ KDP_DUMPINFO_SYSTEMLOG = 0x00000104,
+ KDP_DUMPINFO_DISABLE = 0x00000105,
+ KDP_DUMPINFO_MASK = 0x00000FFF,
+ KDP_DUMPINFO_DUMP = 0x00000100,
+
+ KDP_DUMPINFO_REBOOT = 0x10000000,
+ KDP_DUMPINFO_NORESUME = 0x20000000,
+ KDP_DUMPINFO_RESUME = 0x00000000, /* default behaviour */
+ KDP_DUMPINFO_NOINTR = 0x40000000, /* don't interrupt */
+ KDP_DUMPINFO_INTR = 0x00000000, /* default behaviour */
+} kdp_dumpinfo_t;
+
/*
* Common KDP packet header
+ * NOTE: kgmacros has a non-symboled version of kdp_hdr_t so that some basic information.
+ * can be gathered from a kernel without any symbols. changes to this structure
+ * need to be reflected in kgmacros as well.
*/
typedef struct {
kdp_req_t request:7; /* kdp_req_t, request type */
KDPERR_ALREADY_CONNECTED,
KDPERR_BAD_NBYTES,
KDPERR_BADFLAVOR, /* bad flavor in w/r regs */
+
KDPERR_MAX_BREAKPOINTS = 100,
KDPERR_BREAKPOINT_NOT_FOUND = 101,
KDPERR_BREAKPOINT_ALREADY_SET = 102
-
} kdp_error_t;
/*
kdp_hdr_t hdr;
} KDP_PACKED kdp_termination_ack_t;
+/*
+ * KDP_DUMPINFO
+ */
+typedef struct { /* KDP_DUMPINFO request */
+ kdp_hdr_t hdr;
+ char name[50];
+ char destip[16];
+ char routerip[16];
+ uint32_t port;
+ kdp_dumpinfo_t type;
+} KDP_PACKED kdp_dumpinfo_req_t;
+
+typedef struct { /* KDP_DUMPINFO reply */
+ kdp_hdr_t hdr;
+ char name[50];
+ char destip[16];
+ char routerip[16];
+ uint32_t port;
+ kdp_dumpinfo_t type;
+} KDP_PACKED kdp_dumpinfo_reply_t;
+
+
typedef union {
kdp_hdr_t hdr;
kdp_connect_req_t connect_req;
kdp_readmsr64_reply_t readmsr64_reply;
kdp_writemsr64_req_t writemsr64_req;
kdp_writemsr64_reply_t writemsr64_reply;
+ kdp_dumpinfo_req_t dumpinfo_req;
+ kdp_dumpinfo_reply_t dumpinfo_reply;
} kdp_pkt_t;
#define MAX_KDP_PKT_SIZE 1200 /* max packet size */
#define MAX_KDP_DATA_SIZE 1024 /* max r/w data per packet */
+/*
+ * Support relatively small request/responses here.
+ * If kgmacros needs to make a larger request, increase
+ * this buffer size
+ */
+#define KDP_MANUAL_PACKET_SIZE 128
+struct kdp_manual_pkt {
+ unsigned char data[KDP_MANUAL_PACKET_SIZE];
+ unsigned int len;
+ boolean_t input;
+} KDP_PACKED;
+
+#ifdef KDP_PROXY_PACK_SUPPORT
+#pragma pack()
+#endif
+
#endif // _KDP_PROTOCOL_H_
#include <mach/memory_object_types.h>
+#include <sys/msgbuf.h>
+
#include <string.h>
#define DO_ALIGN 1 /* align all packet data accesses */
boolean_t input;
} pkt, saved_reply;
-/*
- * Support relatively small request/responses here.
- * If kgmacros needs to make a larger request, increase
- * this buffer size
- */
-static struct {
- unsigned char data[128];
- unsigned int len;
- boolean_t input;
-} manual_pkt;
+struct kdp_manual_pkt manual_pkt;
struct {
struct {
static volatile boolean_t panicd_specified = FALSE;
static boolean_t router_specified = FALSE;
+static boolean_t corename_specified = FALSE;
static unsigned int panicd_port = CORE_REMOTE_PORT;
static struct ether_addr etherbroadcastaddr = {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}};
static boolean_t flag_panic_dump_in_progress = FALSE;
static boolean_t flag_router_mac_initialized = FALSE;
+static boolean_t flag_dont_abort_panic_dump = FALSE;
static boolean_t flag_arp_resolved = FALSE;
static char panicd_ip_str[20];
static char router_ip_str[20];
+static char corename_str[50];
static unsigned int panic_block = 0;
volatile unsigned int kdp_trigger_core_dump = 0;
if (!PE_parse_boot_argn("panicd_port", &panicd_port, sizeof (panicd_port)))
panicd_port = CORE_REMOTE_PORT;
+ if (PE_parse_boot_argn("_panicd_corename", &corename_str, sizeof (corename_str)))
+ corename_specified = TRUE;
+
kdp_flag |= KDP_READY;
if (current_debugger == NO_CUR_DB)
current_debugger = KDP_CUR_DB;
static void
kdp_reply(
- unsigned short reply_port
- )
+ unsigned short reply_port,
+ const boolean_t sideband
+ )
{
struct udpiphdr aligned_ui, *ui = &aligned_ui;
struct ip aligned_ip, *ip = &aligned_ip;
pkt.len += (unsigned int)sizeof (struct ether_header);
// save reply for possible retransmission
- bcopy((char *)&pkt, (char *)&saved_reply, sizeof(pkt));
+ if (!sideband)
+ bcopy((char *)&pkt, (char *)&saved_reply, sizeof(pkt));
(*kdp_en_send_pkt)(&pkt.data[pkt.off], pkt.len);
// increment expected sequence number
- exception_seq++;
+ if (!sideband)
+ exception_seq++;
}
static void
}
/* If we receive a kernel debugging packet whilst a
* core dump is in progress, abort the transfer and
- * enter the debugger.
+ * enter the debugger if not told otherwise.
*/
else
if (flag_panic_dump_in_progress)
{
- abort_panic_transfer();
+ if (!flag_dont_abort_panic_dump) {
+ abort_panic_transfer();
+ }
return;
}
(*kdp_en_send_pkt)(&saved_reply.data[saved_reply.off],
saved_reply.len);
goto again;
- } else if (hdr->seq != exception_seq) {
+ } else if ((hdr->seq != exception_seq) &&
+ (hdr->request != KDP_CONNECT)) {
printf("kdp: bad sequence %d (want %d)\n",
hdr->seq, exception_seq);
goto again;
if (kdp_packet((unsigned char*)&pkt.data[pkt.off],
(int *)&pkt.len,
(unsigned short *)&reply_port)) {
- kdp_reply(reply_port);
+ boolean_t sideband = FALSE;
+
+ /* if it's an already connected error message,
+ * send a sideband reply for that. for successful connects,
+ * make sure the sequence number is correct. */
+ if (hdr->request == KDP_CONNECT) {
+ kdp_connect_reply_t *rp =
+ (kdp_connect_reply_t *) &pkt.data[pkt.off];
+ kdp_error_t err = rp->error;
+
+ if (err == KDPERR_NO_ERROR) {
+ exception_seq = hdr->seq;
+ } else if (err == KDPERR_ALREADY_CONNECTED) {
+ sideband = TRUE;
+ }
+ }
+
+ kdp_reply(reply_port, sideband);
}
again:
if (kdp_packet((unsigned char *)&pkt.data[pkt.off],
(int *)&pkt.len,
(unsigned short *)&reply_port))
- kdp_reply(reply_port);
+ kdp_reply(reply_port, FALSE);
if (hdr->request == KDP_REATTACH) {
reattach_wait = 0;
hdr->request=KDP_DISCONNECT;
*/
if (1 == kdp_trigger_core_dump) {
- kdp_flag &= ~PANIC_LOG_DUMP;
kdp_flag |= KDP_PANIC_DUMP_ENABLED;
kdp_panic_dump();
+ if (kdp_flag & REBOOT_POST_CORE)
+ kdp_machine_reboot();
kdp_trigger_core_dump = 0;
}
kdp.reply_port = kdp.exception_port = 0;
kdp.is_halted = kdp.is_conn = FALSE;
kdp.exception_seq = kdp.conn_seq = 0;
+ kdp.session_key = 0;
}
struct corehdr *
return coreh;
}
+static int kdp_send_crashdump_seek(char *corename, uint64_t seek_off)
+{
+ int panic_error;
+
+#if defined(__LP64__)
+ if (kdp_feature_large_crashdumps) {
+ panic_error = kdp_send_crashdump_pkt(KDP_SEEK, corename,
+ sizeof(seek_off),
+ &seek_off);
+ } else
+#endif
+ {
+ uint32_t off = (uint32_t) seek_off;
+ panic_error = kdp_send_crashdump_pkt(KDP_SEEK, corename,
+ sizeof(off), &off);
+ }
+
+ if (panic_error < 0) {
+ printf ("kdp_send_crashdump_pkt failed with error %d\n",
+ panic_error);
+ return panic_error;
+ }
+
+ return 0;
+}
+
int kdp_send_crashdump_data(unsigned int request, char *corename,
uint64_t length, caddr_t txstart)
{
- caddr_t txend = txstart + length;
int panic_error = 0;
- if (length <= SEGSIZE) {
- if ((panic_error = kdp_send_crashdump_pkt(request, corename, length, (caddr_t) txstart)) < 0) {
+ while (length > 0) {
+ uint64_t chunk = MIN(SEGSIZE, length);
+
+ panic_error = kdp_send_crashdump_pkt(request, corename, chunk,
+ (caddr_t) txstart);
+ if (panic_error < 0) {
printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
- return panic_error ;
- }
- }
- else
- {
- while (txstart <= (txend - SEGSIZE)) {
- if ((panic_error = kdp_send_crashdump_pkt(KDP_DATA, NULL, SEGSIZE, txstart)) < 0) {
- printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
- return panic_error;
- }
- txstart += SEGSIZE;
- if (!(panic_block % 2000))
- kdb_printf_unbuffered(".");
- }
- if (txstart < txend) {
- kdp_send_crashdump_pkt(request, corename, (unsigned int)(txend - txstart), txstart);
+ return panic_error;
}
+
+ if (!(panic_block % 2000))
+ kdb_printf_unbuffered(".");
+
+ txstart += chunk;
+ length -= chunk;
}
return 0;
}
th = create_panic_header(request, corename, (unsigned)length, panic_block);
if (request == KDP_DATA) {
+ /* as all packets are SEGSIZE in length, the last packet
+ * may end up with trailing bits. make sure that those
+ * bits aren't confusing. */
+ if (length < SEGSIZE)
+ memset(th->th_data + length, 'X',
+ SEGSIZE - (uint32_t) length);
+
if (!kdp_machine_vm_read((mach_vm_address_t)(intptr_t)panic_data, (caddr_t) th->th_data, length)) {
memset ((caddr_t) th->th_data, 'X', (size_t)length);
}
extern char *inet_aton(const char *cp, struct in_addr *pin);
+void
+kdp_set_dump_info(const uint32_t flags, const char *filename,
+ const char *destipstr, const char *routeripstr,
+ const uint32_t port)
+{
+ uint32_t cmd;
+
+ if (destipstr && (destipstr[0] != '\0')) {
+ strlcpy(panicd_ip_str, destipstr, sizeof(panicd_ip_str));
+ panicd_specified = 1;
+ }
+
+ if (routeripstr && (routeripstr[0] != '\0')) {
+ strlcpy(router_ip_str, routeripstr, sizeof(router_ip_str));
+ router_specified = 1;
+ }
+
+ if (filename && (filename[0] != '\0')) {
+ strlcpy(corename_str, filename, sizeof(corename_str));
+ corename_specified = TRUE;
+ } else {
+ corename_specified = FALSE;
+ }
+
+ if (port)
+ panicd_port = port;
+
+ /* on a disconnect, should we stay in KDP or not? */
+ noresume_on_disconnect = (flags & KDP_DUMPINFO_NORESUME) ? 1 : 0;
+
+ if ((flags & KDP_DUMPINFO_DUMP) == 0)
+ return;
+
+ /* the rest of the commands can modify kdp_flags */
+ cmd = flags & KDP_DUMPINFO_MASK;
+ if (cmd == KDP_DUMPINFO_DISABLE) {
+ kdp_flag &= ~KDP_PANIC_DUMP_ENABLED;
+ panicd_specified = 0;
+ kdp_trigger_core_dump = 0;
+ return;
+ }
+
+ kdp_flag &= ~REBOOT_POST_CORE;
+ if (flags & KDP_DUMPINFO_REBOOT)
+ kdp_flag |= REBOOT_POST_CORE;
+
+ kdp_flag &= ~PANIC_LOG_DUMP;
+ if (cmd == KDP_DUMPINFO_PANICLOG)
+ kdp_flag |= PANIC_LOG_DUMP;
+
+ kdp_flag &= ~SYSTEM_LOG_DUMP;
+ if (cmd == KDP_DUMPINFO_SYSTEMLOG)
+ kdp_flag |= SYSTEM_LOG_DUMP;
+
+ /* trigger a dump */
+ kdp_flag |= DBG_POST_CORE;
+
+ flag_dont_abort_panic_dump = (flags & KDP_DUMPINFO_NOINTR) ?
+ TRUE : FALSE;
+
+ reattach_wait = 1;
+ logPanicDataToScreen = 1;
+ disableConsoleOutput = 0;
+ disable_debug_output = 0;
+ kdp_trigger_core_dump = 1;
+}
+
+void
+kdp_get_dump_info(uint32_t *flags, char *filename, char *destipstr,
+ char *routeripstr, uint32_t *port)
+{
+ if (destipstr) {
+ if (panicd_specified)
+ strlcpy(destipstr, panicd_ip_str,
+ sizeof(panicd_ip_str));
+ else
+ destipstr[0] = '\0';
+ }
+
+ if (routeripstr) {
+ if (router_specified)
+ strlcpy(routeripstr, router_ip_str,
+ sizeof(router_ip_str));
+ else
+ routeripstr[0] = '\0';
+ }
+
+ if (filename) {
+ if (corename_specified)
+ strlcpy(filename, corename_str,
+ sizeof(corename_str));
+ else
+ filename[0] = '\0';
+
+ }
+
+ if (port)
+ *port = panicd_port;
+
+ if (flags) {
+ *flags = 0;
+ if (!panicd_specified)
+ *flags |= KDP_DUMPINFO_DISABLE;
+ else if (kdp_flag & PANIC_LOG_DUMP)
+ *flags |= KDP_DUMPINFO_PANICLOG;
+ else
+ *flags |= KDP_DUMPINFO_CORE;
+
+ if (noresume_on_disconnect)
+ *flags |= KDP_DUMPINFO_NORESUME;
+ }
+}
+
+
/* Primary dispatch routine for the system dump */
void
kdp_panic_dump(void)
{
- char corename[50];
char coreprefix[10];
int panic_error;
kdp_get_xnu_version((char *) &pkt.data[0]);
- /* Panic log bit takes precedence over core dump bit */
- if ((panicstr != (char *) 0) && (kdp_flag & PANIC_LOG_DUMP))
- strncpy(coreprefix, "paniclog", sizeof(coreprefix));
- else
- strncpy(coreprefix, "core", sizeof(coreprefix));
+ if (!corename_specified) {
+ /* Panic log bit takes precedence over core dump bit */
+ if ((panicstr != (char *) 0) && (kdp_flag & PANIC_LOG_DUMP))
+ strlcpy(coreprefix, "paniclog", sizeof(coreprefix));
+ else if (kdp_flag & SYSTEM_LOG_DUMP)
+ strlcpy(coreprefix, "systemlog", sizeof(coreprefix));
+ else
+ strlcpy(coreprefix, "core", sizeof(coreprefix));
- abstime = mach_absolute_time();
- pkt.data[20] = '\0';
- snprintf (corename, sizeof(corename), "%s-%s-%d.%d.%d.%d-%x",
- coreprefix, &pkt.data[0],
- (current_ip & 0xff000000) >> 24,
- (current_ip & 0xff0000) >> 16,
- (current_ip & 0xff00) >> 8,
- (current_ip & 0xff),
- (unsigned int) (abstime & 0xffffffff));
+ abstime = mach_absolute_time();
+ pkt.data[20] = '\0';
+ snprintf (corename_str, sizeof(corename_str), "%s-%s-%d.%d.%d.%d-%x",
+ coreprefix, &pkt.data[0],
+ (current_ip & 0xff000000) >> 24,
+ (current_ip & 0xff0000) >> 16,
+ (current_ip & 0xff00) >> 8,
+ (current_ip & 0xff),
+ (unsigned int) (abstime & 0xffffffff));
+ }
if (0 == inet_aton(panicd_ip_str, (struct in_addr *) &panic_server_ip)) {
printf("inet_aton() failed interpreting %s as a panic server IP\n", panicd_ip_str);
destination_mac.ether_addr_octet[5] & 0xff);
printf("Kernel map size is %llu\n", (unsigned long long) get_vmmap_size(kernel_map));
- printf("Sending write request for %s\n", corename);
+ printf("Sending write request for %s\n", corename_str);
- if ((panic_error = kdp_send_crashdump_pkt(KDP_WRQ, corename, 0 , NULL)) < 0) {
+ if ((panic_error = kdp_send_crashdump_pkt(KDP_WRQ, corename_str, 0 , NULL)) < 0) {
printf ("kdp_send_crashdump_pkt failed with error %d\n", panic_error);
goto panic_dump_exit;
}
/* Just the panic log requested */
if ((panicstr != (char *) 0) && (kdp_flag & PANIC_LOG_DUMP)) {
printf("Transmitting panic log, please wait: ");
- kdp_send_crashdump_data(KDP_DATA, corename, (unsigned int)(debug_buf_ptr - debug_buf), debug_buf);
+ kdp_send_crashdump_data(KDP_DATA, corename_str,
+ debug_buf_ptr - debug_buf,
+ debug_buf);
kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0));
printf("Please file a bug report on this panic, if possible.\n");
goto panic_dump_exit;
}
+ /* maybe we wanted the systemlog */
+ if (kdp_flag & SYSTEM_LOG_DUMP) {
+ long start_off = msgbufp->msg_bufx;
+ long len;
+
+ printf("Transmitting system log, please wait: ");
+ if (start_off >= msgbufp->msg_bufr) {
+ len = msgbufp->msg_size - start_off;
+ kdp_send_crashdump_data(KDP_DATA, corename_str, len,
+ msgbufp->msg_bufc + start_off);
+
+ /* seek to remove trailing bytes */
+ if (len & (SEGSIZE - 1))
+ kdp_send_crashdump_seek(corename_str, len);
+ start_off = 0;
+ }
+
+ if (start_off != msgbufp->msg_bufr) {
+ len = msgbufp->msg_bufr - start_off;
+ kdp_send_crashdump_data(KDP_DATA, corename_str, len,
+ msgbufp->msg_bufc + start_off);
+ }
+
+ kdp_send_crashdump_pkt (KDP_EOF, NULL, 0, ((void *) 0));
+ goto panic_dump_exit;
+ }
+
/* We want a core dump if we're here */
kern_dump();
+
panic_dump_exit:
abort_panic_transfer();
pkt.input = FALSE;
abort_panic_transfer(void)
{
flag_panic_dump_in_progress = FALSE;
+ flag_dont_abort_panic_dump = FALSE;
not_in_kdp = 1;
panic_block = 0;
}
* Prefer the last processor, when appropriate.
*/
if (processor != PROCESSOR_NULL) {
- if (thread->sched_pri < BASEPRI_RTQUEUES && processor->processor_meta != PROCESSOR_META_NULL &&
- processor->processor_meta->primary->state == PROCESSOR_IDLE)
+ if (processor->processor_meta != PROCESSOR_META_NULL)
processor = processor->processor_meta->primary;
if (processor->processor_set != pset || processor->state == PROCESSOR_INACTIVE ||
processor->state == PROCESSOR_SHUTDOWN || processor->state == PROCESSOR_OFF_LINE)
processor = PROCESSOR_NULL;
else
- if (processor->state == PROCESSOR_IDLE ||
- (thread->sched_pri > BASEPRI_DEFAULT && processor->current_pri < thread->sched_pri))
+ if (processor->state == PROCESSOR_IDLE)
return (processor);
}
* Use feature flags (eg, hw.optional.altivec) to test for optional
* functionality.
*/
-#define CPUFAMILY_UNKNOWN 0
-#define CPUFAMILY_POWERPC_G3 0xcee41549
-#define CPUFAMILY_POWERPC_G4 0x77c184ae
-#define CPUFAMILY_POWERPC_G5 0xed76d8aa
-#define CPUFAMILY_INTEL_6_13 0xaa33392b
-#define CPUFAMILY_INTEL_6_14 0x73d67300 /* "Intel Core Solo" and "Intel Core Duo" (32-bit Pentium-M with SSE3) */
-#define CPUFAMILY_INTEL_6_15 0x426f69ef /* "Intel Core 2 Duo" */
-#define CPUFAMILY_INTEL_6_23 0x78ea4fbc /* Penryn */
-#define CPUFAMILY_INTEL_6_26 0x6b5a4cd2 /* Nehalem */
-#define CPUFAMILY_ARM_9 0xe73283ae
-#define CPUFAMILY_ARM_11 0x8ff620d8
-#define CPUFAMILY_ARM_XSCALE 0x53b005f5
-#define CPUFAMILY_ARM_13 0x0cc90e64
-
-#define CPUFAMILY_INTEL_YONAH CPUFAMILY_INTEL_6_14
-#define CPUFAMILY_INTEL_MEROM CPUFAMILY_INTEL_6_15
-#define CPUFAMILY_INTEL_PENRYN CPUFAMILY_INTEL_6_23
-#define CPUFAMILY_INTEL_NEHALEM CPUFAMILY_INTEL_6_26
-
-#define CPUFAMILY_INTEL_CORE CPUFAMILY_INTEL_6_14
-#define CPUFAMILY_INTEL_CORE2 CPUFAMILY_INTEL_6_15
+#define CPUFAMILY_UNKNOWN 0
+#define CPUFAMILY_POWERPC_G3 0xcee41549
+#define CPUFAMILY_POWERPC_G4 0x77c184ae
+#define CPUFAMILY_POWERPC_G5 0xed76d8aa
+#define CPUFAMILY_INTEL_6_13 0xaa33392b
+#define CPUFAMILY_INTEL_YONAH 0x73d67300
+#define CPUFAMILY_INTEL_MEROM 0x426f69ef
+#define CPUFAMILY_INTEL_PENRYN 0x78ea4fbc
+#define CPUFAMILY_INTEL_NEHALEM 0x6b5a4cd2
+#define CPUFAMILY_ARM_9 0xe73283ae
+#define CPUFAMILY_ARM_11 0x8ff620d8
+#define CPUFAMILY_ARM_XSCALE 0x53b005f5
+#define CPUFAMILY_ARM_13 0x0cc90e64
+
+/* The following synonyms are deprecated: */
+#define CPUFAMILY_INTEL_6_14 CPUFAMILY_INTEL_YONAH
+#define CPUFAMILY_INTEL_6_15 CPUFAMILY_INTEL_MEROM
+#define CPUFAMILY_INTEL_6_23 CPUFAMILY_INTEL_PENRYN
+#define CPUFAMILY_INTEL_6_26 CPUFAMILY_INTEL_NEHALEM
+
+#define CPUFAMILY_INTEL_CORE CPUFAMILY_INTEL_YONAH
+#define CPUFAMILY_INTEL_CORE2 CPUFAMILY_INTEL_MEROM
+
#endif /* _MACH_MACHINE_H_ */
uint32_t lgDevSlot2; /* 0x5490 For developer use */
uint32_t lgOSVersion; /* 0x5494 Pointer to OS version string */
uint32_t lgRebootFlag; /* 0x5498 Pointer to debugger reboot trigger */
- uint32_t lgRsv49C[729]; /* 0x549C Reserved - push to 1 page */
+ uint32_t lgManualPktAddr; /* 0x549C Pointer to manual packet structure */
+ uint32_t lgRsv49C[728]; /* 0x54A0 Reserved - push to 1 page */
} lowglo;
extern lowglo lowGlo;
.long 0 ; 5490 Reserved for developer use
.long EXT(osversion) ; 5494 Pointer to osversion string, debugging aid
.long EXT(flag_kdp_trigger_reboot) ; 5498 Pointer to KDP reboot trigger, debugging aid
+ .long EXT(manual_pkt) ; 549C Pointer to KDP manual packet, debugging aid
;
; The "shared page" is used for low-level debugging and is actually 1/2 page long
uint64_t lgDevSlot2; /* 0xffffff8000002918 For developer use */
uint64_t lgOSVersion; /* 0xffffff8000002920 Pointer to OS version string */
uint64_t lgRebootFlag; /* 0xffffff8000002928 Pointer to debugger reboot trigger */
- uint64_t lgRsv49C[218]; /* 0xffffff8000002930 Reserved - push to 1 page */
+ uint64_t lgManualPktAddr; /* 0xffffff8000002930 Pointer to manual packet structure */
+
+ uint64_t lgRsv49C[217]; /* 0xffffff8000002938 Reserved - push to 1 page */
} lowglo;
#pragma pack()
extern lowglo lowGlo;
.quad EXT(osversion) /* +0x920 Pointer to osversion string */
#if MACH_KDP
.quad EXT(flag_kdp_trigger_reboot) /* +0x928 Pointer to debugger reboot trigger */
+ .quad EXT(manual_pkt) /* +0x930 Pointer to manual packet structure */
#else
.quad 0 /* +0x928 Reserved */
-#endif
- .fill 436, 4, 0 /* pad to 0x1000 (page size) - rdar://problem/5783217 */
+ .quad 0 /* +0x930 Reserved */
+#endif
+ .fill 434, 4, 0 /* pad to 0x1000 (page size) - rdar://problem/5783217 */