CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED, 0, 0,
cpu_ucode_update, "S", "Microcode update interface");
+SYSCTL_NODE(_machdep_cpu, OID_AUTO, tsc_ccc, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
+ "TSC/CCC frequency information");
+
+SYSCTL_PROC(_machdep_cpu_tsc_ccc, OID_AUTO, numerator,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)offsetof(i386_cpu_info_t, cpuid_tsc_leaf.numerator),
+ sizeof(uint32_t),
+ i386_cpu_info, "I", "Numerator of TSC/CCC ratio");
+
+SYSCTL_PROC(_machdep_cpu_tsc_ccc, OID_AUTO, denominator,
+ CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
+ (void *)offsetof(i386_cpu_info_t, cpuid_tsc_leaf.denominator),
+ sizeof(uint32_t),
+ i386_cpu_info, "I", "Denominator of TSC/CCC ratio");
+
static const uint32_t apic_timer_vector = (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT);
static const uint32_t apic_IPI_vector = (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT);
{
uint32_t kmem;
- if (PE_parse_boot_argn("kmem", &kmem, sizeof (kmem))) {
+ if (PE_i_can_has_debugger(NULL) &&
+ PE_parse_boot_argn("kmem", &kmem, sizeof (kmem))) {
if (kmem & 0x1) {
dev_kmem_enabled = TRUE;
}
continue; /* entry is too big, just carry on with the next guy */
}
+ //
+ // If a file is not an autocandidate (i.e. it's a user-tagged file desirous of
+ // being hotfile cached) but it is already bigger than 4 megs, don't bother
+ // hotfile caching it. Note that if a user tagged file starts small, gets
+ // adopted and then grows over time we will allow it to grow bigger than 4 megs
+ // which is intentional for things like the Mail or Photos database files which
+ // grow slowly over time and benefit from being on the FastDevice.
+ //
+ if ((hfsmp->hfs_flags & HFS_CS_HOTFILE_PIN) &&
+ !(VTOC(vp)->c_attr.ca_recflags & kHFSAutoCandidateMask) &&
+ (VTOC(vp)->c_attr.ca_recflags & kHFSFastDevCandidateMask) &&
+ (unsigned int)fileblocks > ((4*1024*1024) / (uint64_t)HFSTOVCB(hfsmp)->blockSize)) {
+
+ vnode_clearfastdevicecandidate(vp); // turn off the fast-dev-candidate flag so we don't keep trying to cache it.
+
+ hfs_unlock(VTOC(vp));
+ vnode_put(vp);
+ listp->hfl_hotfile[i].hf_temperature = 0;
+ listp->hfl_next++;
+ listp->hfl_totalblocks -= listp->hfl_hotfile[i].hf_blocks;
+ continue; /* entry is too big, just carry on with the next guy */
+ }
+
if (fileblocks > hfs_hotfile_cur_freeblks(hfsmp)) {
//
// No room for this file. Although eviction should have made space
// it was an automatically added file and this function is intended
// to pin new blocks being added to user-generated content.
//
- // If a file is marked FastDevPinned or FastDevCandidate it is an
- // existing pinned file or a new file that should be pinned.
- //
if (fcb->ff_cp->c_attr.ca_recflags & kHFSAutoCandidateMask) {
return 0;
}
- if ((fcb->ff_cp->c_attr.ca_recflags & (kHFSFastDevPinnedMask|kHFSFastDevCandidateMask)) != 0) {
+ //
+ // If a file is marked FastDevPinned it is an existing pinned file
+ // or a new file that should be pinned.
+ //
+ // If a file is marked FastDevCandidate it is a new file that is
+ // being written to for the first time so we don't want to pin it
+ // just yet as it may not meet the criteria (i.e. too large).
+ //
+ if ((fcb->ff_cp->c_attr.ca_recflags & (kHFSFastDevPinnedMask)) != 0) {
pin_blocks = 1;
} else {
pin_blocks = 0;
SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRTM, 0, sysctl_cpu_capability, "I", "");
SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasHLE, 0, sysctl_cpu_capability, "I", "");
SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasADX, 0, sysctl_cpu_capability, "I", "");
+SYSCTL_PROC(_hw_optional, OID_AUTO, mpx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasMPX, 0, sysctl_cpu_capability, "I", "");
+SYSCTL_PROC(_hw_optional, OID_AUTO, sgx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasSGX, 0, sysctl_cpu_capability, "I", "");
#else
#error Unsupported arch
#endif /* !__i386__ && !__x86_64 && !__arm__ && ! __arm64__ */
tmp, FALSE) != KERN_SUCCESS) {
kmem_free(kernel_map, copy_start,
round_page(arg_size));
+ vm_map_copy_discard(tmp);
return (EIO);
}
static UInt32 cs_blob_size_max = 0;
static SInt32 cs_blob_count_peak = 0;
-int cs_validation = 1;
-
-#ifndef SECURE_KERNEL
-SYSCTL_INT(_vm, OID_AUTO, cs_validation, CTLFLAG_RW | CTLFLAG_LOCKED, &cs_validation, 0, "Do validate code signatures");
-#endif
SYSCTL_INT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_count, 0, "Current number of code signature blobs");
SYSCTL_INT(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, (int *)(uintptr_t)&cs_blob_size, 0, "Current size of all code signature blobs");
SYSCTL_INT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
int nxt = 0, ours = 0;
struct ifnet *inifp, *deliverifp = NULL;
ipfilter_t inject_ipfref = NULL;
- int seen;
+ int seen = 1;
struct in6_ifaddr *ia6 = NULL;
struct sockaddr_in6 *dst6;
#if DUMMYNET
int cs_invalid_page(addr64_t);
int csproc_get_platform_path(struct proc *);
-extern int cs_validation;
#if !SECURE_KERNEL
extern int cs_enforcement_panic;
#endif
+++ /dev/null
-# See top level .clang-format for explanation of options
-AlignEscapedNewlinesLeft: true
-AlignTrailingComments: true
-AllowAllParametersOfDeclarationOnNextLine: true
-AllowShortBlocksOnASingleLine: true
-AllowShortCaseLabelsOnASingleLine: true
-AllowShortFunctionsOnASingleLine: None
-AllowShortIfStatementsOnASingleLine: false
-AllowShortLoopsOnASingleLine: false
-AlwaysBreakAfterDefinitionReturnType: false
-AlwaysBreakBeforeMultilineStrings: true
-BinPackArguments: true
-BinPackParameters: false
-BreakBeforeBinaryOperators: None
-BreakBeforeBraces: Allman
-ColumnLimit: 132
-IndentCaseLabels: false
-IndentWidth: 4
-IndentWrappedFunctionNames: false
-KeepEmptyLinesAtTheStartOfBlocks: false
-PointerAlignment: Middle
-SpaceAfterCStyleCast: false
-SpaceBeforeAssignmentOperators: true
-SpaceBeforeParens: ControlStatements
-SpaceInEmptyParentheses: false
-SpacesInCStyleCastParentheses: false
-SpacesInParentheses: false
-SpacesInSquareBrackets: false
-TabWidth: 4
-UseTab: Never
--- /dev/null
+./iokit/.clang-format
\ No newline at end of file
infoDict = OSDynamicCast(OSDictionary,
- mkextInfoDictArray->getObject(i));
+ mkextInfoDictArray->getObject(i));
/* Create the kext for the entry, then release it, because the
* kext system keeps them around until explicitly removed.
* Any creation/registration failures are already logged for us.
*/
- OSKext * newKext = OSKext::withMkext2Info(infoDict, mkextData);
- OSSafeRelease(newKext);
+ if (infoDict) {
+ OSKext * newKext = OSKext::withMkext2Info(infoDict, mkextData);
+ OSSafeRelease(newKext);
+ }
}
/* Even if we didn't keep any kexts from the mkext, we may have a load
OSCollectionIterator * iterator = NULL; // must release
OSData * executable = NULL; // must release
- if (!super::init()) {
+ if (anInfoDict == NULL || !super::init()) {
goto finish;
}
/* Get the path. Don't look for an arch-specific path property.
*/
kextPath = OSDynamicCast(OSString,
- anInfoDict->getObject(kMKEXTBundlePathKey));
+ anInfoDict->getObject(kMKEXTBundlePathKey));
if (!setInfoDictionaryAndPath(anInfoDict, kextPath)) {
goto finish;
+++ /dev/null
-*.pbxuser
-*.perspectivev3
-build/
FALSE);
assert(KERN_SUCCESS == kr);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)oaddr,
- (vm_map_size_t)osize, TRUE, &pcopy);
+ (vm_map_size_t)(num_objects * sizeof(*objects)), TRUE, &pcopy);
assert(KERN_SUCCESS == kr);
*objectsp = (default_pager_object_array_t)objects;
FALSE);
assert(KERN_SUCCESS == kr);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
- (vm_map_size_t)size, TRUE, ©);
+ (vm_map_size_t)(actual * sizeof(*pages)), TRUE, ©);
assert(KERN_SUCCESS == kr);
pkes.IA_frequency_clipping_cause = ~0ULL;
uint32_t ia_perf_limits = MSR_IA32_IA_PERF_LIMIT_REASONS;
+ /* Should perhaps be a generic register map module for these
+ * registers with identical functionality that were renumbered.
+ */
+ switch (cpuid_cpufamily()) {
+ case CPUFAMILY_INTEL_SKYLAKE:
+ ia_perf_limits = MSR_IA32_IA_PERF_LIMIT_REASONS_SKL;
+ break;
+ default:
+ break;
+ }
rdmsr64_carefully(ia_perf_limits, &pkes.IA_frequency_clipping_cause);
setif(bits, kHasADX, cpuid_features() &
CPUID_LEAF7_FEATURE_ADX);
+ setif(bits, kHasMPX, cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_MPX);
+ setif(bits, kHasSGX, cpuid_leaf7_features() &
+ CPUID_LEAF7_FEATURE_SGX);
+
uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
(cpuid_leaf7_features() &
#define kHasHLE 0x0000000200000000ULL
#define kHasRDSEED 0x0000000800000000ULL
#define kHasADX 0x0000000400000000ULL
+#define kHasMPX 0x0000001000000000ULL
+#define kHasSGX 0x0000002000000000ULL
#ifndef __ASSEMBLER__
DBG(" EBX : 0x%x\n", reg[ebx]);
DBG(" ECX : 0x%x\n", reg[ecx]);
}
+
+ if (info_p->cpuid_max_basic >= 0x15) {
+ /*
+ * TCS/CCC frequency leaf:
+ */
+ cpuid_fn(0x15, reg);
+ info_p->cpuid_tsc_leaf.denominator = reg[eax];
+ info_p->cpuid_tsc_leaf.numerator = reg[ebx];
+
+ DBG(" TSC/CCC Information Leaf:\n");
+ DBG(" numerator : 0x%x\n", reg[ebx]);
+ DBG(" denominator : 0x%x\n", reg[eax]);
+ }
+
+ return;
}
static uint32_t
case CPUID_MODEL_BRYSTALWELL:
cpufamily = CPUFAMILY_INTEL_BROADWELL;
break;
+ case CPUID_MODEL_SKYLAKE:
+ case CPUID_MODEL_SKYLAKE_DT:
+ cpufamily = CPUFAMILY_INTEL_SKYLAKE;
+ break;
}
break;
}
{CPUID_LEAF7_FEATURE_SMAP, "SMAP"},
{CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"},
{CPUID_LEAF7_FEATURE_ADX, "ADX"},
+ {CPUID_LEAF7_FEATURE_IPT, "IPT"},
+ {CPUID_LEAF7_FEATURE_SGX, "SGX"},
+ {CPUID_LEAF7_FEATURE_PQM, "PQM"},
+ {CPUID_LEAF7_FEATURE_FPU_CSDS, "FPU_CSDS"},
+ {CPUID_LEAF7_FEATURE_MPX, "MPX"},
+ {CPUID_LEAF7_FEATURE_PQE, "PQE"},
+ {CPUID_LEAF7_FEATURE_CLFSOPT, "CLFSOPT"},
+ {CPUID_LEAF7_FEATURE_SHA, "SHA"},
{0, 0}
};
#define CPUID_LEAF7_FEATURE_RDSEED _Bit(18) /* RDSEED Instruction */
#define CPUID_LEAF7_FEATURE_ADX _Bit(19) /* ADX Instructions */
#define CPUID_LEAF7_FEATURE_SMAP _Bit(20) /* Supervisor Mode Access Protect */
+#define CPUID_LEAF7_FEATURE_SGX _Bit(2) /* Software Guard eXtensions */
+#define CPUID_LEAF7_FEATURE_PQM _Bit(12) /* Platform Qos Monitoring */
+#define CPUID_LEAF7_FEATURE_FPU_CSDS _Bit(13) /* FPU CS/DS deprecation */
+#define CPUID_LEAF7_FEATURE_MPX _Bit(14) /* Memory Protection eXtensions */
+#define CPUID_LEAF7_FEATURE_PQE _Bit(15) /* Platform Qos Enforcement */
+#define CPUID_LEAF7_FEATURE_CLFSOPT _Bit(23) /* CLFSOPT */
+#define CPUID_LEAF7_FEATURE_IPT _Bit(25) /* Intel Processor Trace */
+#define CPUID_LEAF7_FEATURE_SHA _Bit(29) /* SHA instructions */
+
+#define CPUID_LEAF7_FEATURE_PREFETCHWT1 _HBit(0)/* Prefetch Write/T1 hint */
/*
* The CPUID_EXTFEATURE_XXX values define 64-bit values
#define CPUID_MODEL_BROADWELL_ULX 0x3D
#define CPUID_MODEL_BROADWELL_ULT 0x3D
#define CPUID_MODEL_BRYSTALWELL 0x47
+#define CPUID_MODEL_SKYLAKE 0x4E
+#define CPUID_MODEL_SKYLAKE_ULT 0x4E
+#define CPUID_MODEL_SKYLAKE_ULX 0x4E
+#define CPUID_MODEL_SKYLAKE_DT 0x5E
#define CPUID_VMM_FAMILY_UNKNOWN 0x0
#define CPUID_VMM_FAMILY_VMWARE 0x1
uint8_t fixed_width;
} cpuid_arch_perf_leaf_t;
+/* The TSC to Core Crystal (RefCLK) Clock Information leaf */
+typedef struct {
+ uint32_t numerator;
+ uint32_t denominator;
+} cpuid_tsc_leaf_t;
+
/* Physical CPU info - this is exported out of the kernel (kexts), so be wary of changes */
typedef struct {
char cpuid_vendor[16];
cpuid_arch_perf_leaf_t *cpuid_arch_perf_leafp;
cpuid_xsave_leaf_t *cpuid_xsave_leafp;
uint64_t cpuid_leaf7_features;
+ cpuid_tsc_leaf_t cpuid_tsc_leaf;
cpuid_xsave_leaf_t cpuid_xsave_leaf[2];
} i386_cpu_info_t;
/*
* CR4
*/
+#define CR4_SEE 0x00008000 /* Secure Enclave Enable XXX */
#define CR4_SMAP 0x00200000 /* Supervisor-Mode Access Protect */
#define CR4_SMEP 0x00100000 /* Supervisor-Mode Execute Protect */
#define CR4_OSXSAVE 0x00040000 /* OS supports XSAVE */
#define XCR0_X87 (1ULL << 0) /* x87, FPU/MMX (always set) */
#define XCR0_SSE (1ULL << 1) /* SSE supported by XSAVE/XRESTORE */
#define XCR0_YMM (1ULL << 2) /* YMM state available */
+#define XCR0_BNDREGS (1ULL << 3) /* MPX Bounds register state */
+#define XCR0_BNDCSR (1ULL << 4) /* MPX Bounds configuration/state */
#define XFEM_X87 XCR0_X87
#define XFEM_SSE XCR0_SSE
#define XFEM_YMM XCR0_YMM
+#define XFEM_BNDREGS XCR0_BNDREGS
+#define XFEM_BNDCSR XCR0_BNDCSR
#define XCR0 (0)
#define PMAP_PCID_PRESERVE (1ULL << 63)
#define MSR_IA32_PP0_ENERGY_STATUS 0x639
#define MSR_IA32_PP1_ENERGY_STATUS 0x641
+#if !defined(XNU_HIDE_SKYLAKE)
+#define MSR_IA32_IA_PERF_LIMIT_REASONS_SKL 0x64F
+#endif
#define MSR_IA32_IA_PERF_LIMIT_REASONS 0x690
#define MSR_IA32_GT_PERF_LIMIT_REASONS 0x6B0
}
switch (cpuid_cpufamily()) {
+ case CPUFAMILY_INTEL_SKYLAKE: {
+ /*
+ * SkyLake and later has an Always Running Timer (ART) providing
+ * the reference frequency. CPUID leaf 0x15 determines the
+ * rationship between this and the TSC frequency expressed as
+ * - multiplier (numerator, N), and
+ * - divisor (denominator, M).
+ * So that TSC = ART * N / M.
+ */
+ cpuid_tsc_leaf_t *tsc_leafp = &cpuid_info()->cpuid_tsc_leaf;
+ uint64_t N = (uint64_t) tsc_leafp->numerator;
+ uint64_t M = (uint64_t) tsc_leafp->denominator;
+ uint64_t refFreq;
+
+ refFreq = EFI_get_frequency("ARTFrequency");
+ if (refFreq == 0)
+ refFreq = BASE_ART_CLOCK_SOURCE;
+
+ assert(N != 0);
+ assert(M != 1);
+ tscFreq = refFreq * N / M;
+ busFreq = tscFreq; /* bus is APIC frequency */
+
+ kprintf(" ART: Frequency = %6d.%06dMHz, N/M = %lld/%llu\n",
+ (uint32_t)(refFreq / Mega),
+ (uint32_t)(refFreq % Mega),
+ N, M);
+
+ break;
+ }
default: {
uint64_t msr_flex_ratio;
uint64_t msr_platform_info;
#define _I386_TSC_H_
#define BASE_NHM_CLOCK_SOURCE 133333333ULL
+#define BASE_ART_CLOCK_SOURCE 24000000ULL /* 24Mhz */
#define IA32_PERF_STS 0x198
#define SLOW_TSC_THRESHOLD 1000067800 /* if slower, nonzero shift required in nanotime() algorithm */
kern_return_t kr;
rcv_addr = 0;
+ if (vm_map_copy_validate_size(map, copy, (vm_map_size_t)size) == FALSE)
+ panic("Inconsistent OOL/copyout size on %p: expected %d, got %lld @%p",
+ dsc, size, (unsigned long long)copy->size, copy);
kr = vm_map_copyout(map, &rcv_addr, copy);
if (kr != KERN_SUCCESS) {
if (kr == KERN_RESOURCE_SHORTAGE)
/* prepare the table out-of-line data for return */
if (table_size > 0) {
- if (table_size > infop->iis_table_size * sizeof(ipc_info_name_t))
+ vm_size_t used_table_size;
+
+ used_table_size = infop->iis_table_size * sizeof(ipc_info_name_t);
+ if (table_size > used_table_size)
bzero((char *)&table_info[infop->iis_table_size],
- table_size - infop->iis_table_size * sizeof(ipc_info_name_t));
+ table_size - used_table_size);
kr = vm_map_unwire(
ipc_kernel_map,
FALSE);
assert(kr == KERN_SUCCESS);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr,
- (vm_map_size_t)table_size, TRUE, ©);
+ (vm_map_size_t)used_table_size, TRUE, ©);
assert(kr == KERN_SUCCESS);
*tablep = (ipc_info_name_t *)copy;
*tableCntp = infop->iis_table_size;
"Port Name: 0x%x, "
"Expected Guard: 0x%x, "
"Received Guard: 0x%x\n",
- (unsigned)t,
+ (unsigned)VM_KERNEL_UNSLIDE_OR_PERM(t),
(unsigned)name,
(unsigned)portguard,
(unsigned)inguard);
result = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(ipc_kernel_map)), FALSE);
assert(result == KERN_SUCCESS);
- result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)size, TRUE, ©);
+ result = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr, (vm_map_size_t)needed, TRUE, ©);
assert(result == KERN_SUCCESS);
*out_pcount = pcount;
lockgroup_info_t *lockgroup_info;
vm_offset_t lockgroup_info_addr;
vm_size_t lockgroup_info_size;
+ vm_size_t lockgroup_info_vmsize;
lck_grp_t *lck_grp;
unsigned int i;
- vm_size_t used;
vm_map_copy_t copy;
kern_return_t kr;
lck_mtx_lock(&lck_grp_lock);
- lockgroup_info_size = round_page(lck_grp_cnt * sizeof *lockgroup_info);
+ lockgroup_info_size = lck_grp_cnt * sizeof(*lockgroup_info);
+ lockgroup_info_vmsize = round_page(lockgroup_info_size);
kr = kmem_alloc_pageable(ipc_kernel_map,
- &lockgroup_info_addr, lockgroup_info_size, VM_KERN_MEMORY_IPC);
+ &lockgroup_info_addr, lockgroup_info_vmsize, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS) {
lck_mtx_unlock(&lck_grp_lock);
return(kr);
*lockgroup_infoCntp = lck_grp_cnt;
lck_mtx_unlock(&lck_grp_lock);
- used = (*lockgroup_infoCntp) * sizeof *lockgroup_info;
-
- if (used != lockgroup_info_size)
- bzero((char *) lockgroup_info, lockgroup_info_size - used);
+ if (lockgroup_info_size != lockgroup_info_vmsize)
+ bzero((char *)lockgroup_info, lockgroup_info_vmsize - lockgroup_info_size);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)lockgroup_info_addr,
(vm_map_size_t)lockgroup_info_size, TRUE, ©);
bzero((char *) (names_addr + used), names_size - used);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
- (vm_map_size_t)names_size, TRUE, ©);
+ (vm_map_size_t)used, TRUE, ©);
assert(kr == KERN_SUCCESS);
*namesp = (mach_zone_name_t *) copy;
bzero((char *) (info_addr + used), info_size - used);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
- (vm_map_size_t)info_size, TRUE, ©);
+ (vm_map_size_t)used, TRUE, ©);
assert(kr == KERN_SUCCESS);
*infop = (task_zone_info_t *) copy;
mach_memory_info_t *memory_info;
vm_offset_t memory_info_addr;
vm_size_t memory_info_size;
+ vm_size_t memory_info_vmsize;
unsigned int num_sites;
unsigned int max_zones, i;
if (memoryInfop && memoryInfoCntp)
{
num_sites = VM_KERN_MEMORY_COUNT + VM_KERN_COUNTER_COUNT;
- memory_info_size = round_page(num_sites * sizeof *info);
+ memory_info_size = num_sites * sizeof(*info);
+ memory_info_vmsize = round_page(memory_info_size);
kr = kmem_alloc_pageable(ipc_kernel_map,
- &memory_info_addr, memory_info_size, VM_KERN_MEMORY_IPC);
+ &memory_info_addr, memory_info_vmsize, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS) {
kmem_free(ipc_kernel_map,
names_addr, names_size);
return kr;
}
- kr = vm_map_wire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_size,
+ kr = vm_map_wire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC), FALSE);
assert(kr == KERN_SUCCESS);
memory_info = (mach_memory_info_t *) memory_info_addr;
vm_page_diagnose(memory_info, num_sites);
- kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_size, FALSE);
+ kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
assert(kr == KERN_SUCCESS);
}
bzero((char *) (names_addr + used), names_size - used);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
- (vm_map_size_t)names_size, TRUE, ©);
+ (vm_map_size_t)used, TRUE, ©);
assert(kr == KERN_SUCCESS);
*namesp = (mach_zone_name_t *) copy;
bzero((char *) (info_addr + used), info_size - used);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
- (vm_map_size_t)info_size, TRUE, ©);
+ (vm_map_size_t)used, TRUE, ©);
assert(kr == KERN_SUCCESS);
*infop = (mach_zone_info_t *) copy;
bzero((char *) (names_addr + used), names_size - used);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)names_addr,
- (vm_map_size_t)names_size, TRUE, ©);
+ (vm_map_size_t)used, TRUE, ©);
assert(kr == KERN_SUCCESS);
*namesp = (zone_name_t *) copy;
bzero((char *) (info_addr + used), info_size - used);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)info_addr,
- (vm_map_size_t)info_size, TRUE, ©);
+ (vm_map_size_t)used, TRUE, ©);
assert(kr == KERN_SUCCESS);
*infop = (zone_info_t *) copy;
#define CPUFAMILY_INTEL_IVYBRIDGE 0x1f65e835
#define CPUFAMILY_INTEL_HASWELL 0x10b282dc
#define CPUFAMILY_INTEL_BROADWELL 0x582ed09c
+#define CPUFAMILY_INTEL_SKYLAKE 0x37fc219f
#define CPUFAMILY_ARM_9 0xe73283ae
#define CPUFAMILY_ARM_11 0x8ff620d8
#define CPUFAMILY_ARM_XSCALE 0x53b005f5
#define CPUFAMILY_ARM_SWIFT 0x1e2d6381
#define CPUFAMILY_ARM_CYCLONE 0x37a09642
#define CPUFAMILY_ARM_TYPHOON 0x2c91a47e
+#define CPUFAMILY_ARM_TWISTER 0x92fb37c8
/* The following synonyms are deprecated: */
#define CPUFAMILY_INTEL_6_14 CPUFAMILY_INTEL_YONAH
return KERN_FAILURE;
#else
vm_map_copy_t copy;
- vm_offset_t addr; /* memory for OOL data */
+ vm_offset_t addr = 0; /* memory for OOL data */
vm_size_t size; /* size of the memory */
unsigned int room; /* room for this many objects */
unsigned int used; /* actually this many objects */
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
} else {
- vm_size_t size_used =
- vm_map_round_page(used * sizeof(vm_info_object_t),
+ vm_size_t size_used = (used * sizeof(vm_info_object_t));
+ vm_size_t vmsize_used = vm_map_round_page(size_used,
VM_MAP_PAGE_MASK(ipc_kernel_map));
kr = vm_map_unwire(
(vm_map_size_t)size_used, TRUE, ©);
assert(kr == KERN_SUCCESS);
- if (size != size_used)
+ if (size != vmsize_used)
kmem_free(ipc_kernel_map,
- addr + size_used, size - size_used);
+ addr + vmsize_used, size - vmsize_used);
}
*regionp = region;
return KERN_FAILURE;
#else
vm_map_copy_t copy;
- vm_offset_t addr; /* memory for OOL data */
+ vm_offset_t addr = 0; /* memory for OOL data */
vm_size_t size; /* size of the memory */
unsigned int room; /* room for this many objects */
unsigned int used; /* actually this many objects */
if (size != 0)
kmem_free(ipc_kernel_map, addr, size);
} else {
- vm_size_t size_used =
- vm_map_round_page(used * sizeof(vm_info_object_t),
+ vm_size_t size_used = (used * sizeof(vm_info_object_t));
+ vm_size_t vmsize_used = vm_map_round_page(size_used,
VM_MAP_PAGE_MASK(ipc_kernel_map));
kr = vm_map_unwire(
(vm_map_size_t)size_used, TRUE, ©);
assert(kr == KERN_SUCCESS);
- if (size != size_used)
+ if (size != vmsize_used)
kmem_free(ipc_kernel_map,
- addr + size_used, size - size_used);
+ addr + vmsize_used, size - vmsize_used);
}
*regionp = region;
vm_size_t size, size_used;
unsigned int actual, space;
page_address_array_t list;
- vm_offset_t addr;
+ vm_offset_t addr = 0;
if (map == VM_MAP_NULL)
return (KERN_INVALID_ARGUMENT);
(void) kmem_free(ipc_kernel_map, addr, size);
}
else {
+ vm_size_t vmsize_used;
*pages_count = actual;
- size_used = vm_map_round_page(actual * sizeof(vm_offset_t),
- VM_MAP_PAGE_MASK(ipc_kernel_map));
+ size_used = (actual * sizeof(vm_offset_t));
+ vmsize_used = vm_map_round_page(size_used,
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
(void) vm_map_wire(
ipc_kernel_map,
vm_map_trunc_page(addr,
(vm_map_size_t)size_used,
TRUE,
(vm_map_copy_t *)pages);
- if (size_used != size) {
+ if (vmsize_used != size) {
(void) kmem_free(ipc_kernel_map,
- addr + size_used,
- size - size_used);
+ addr + vmsize_used,
+ size - vmsize_used);
}
}
#if !MACH_VM_DEBUG
return KERN_FAILURE;
#else
- vm_offset_t addr;
+ vm_offset_t addr = 0;
vm_size_t size = 0;
hash_info_bucket_t *info;
unsigned int potential, actual;
size = vm_map_round_page(actual * sizeof *info,
VM_MAP_PAGE_MASK(ipc_kernel_map));
- kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size, VM_KERN_MEMORY_IPC);
+ kr = vm_allocate(ipc_kernel_map, &addr, size,
+ VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
*countp = 0;
} else {
vm_map_copy_t copy;
- vm_size_t used;
+ vm_size_t used, vmused;
- used = vm_map_round_page(actual * sizeof *info,
- VM_MAP_PAGE_MASK(ipc_kernel_map));
+ used = (actual * sizeof(*info));
+ vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
- if (used != size)
- kmem_free(ipc_kernel_map, addr + used, size - used);
+ if (vmused != size)
+ kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
(vm_map_size_t)used, TRUE, ©);
assert(page->busy);
vm_object_lock_assert_exclusive(page->object);
- if (!cs_validation) {
- return;
- }
-
if (page->wpmapped && !page->cs_tainted) {
/*
* This page was mapped for "write" access sometime in the
vm_object_lock_assert_held(page->object);
- if (!cs_validation) {
- return;
- }
-
if (page->wpmapped && !page->cs_tainted) {
vm_object_lock_assert_exclusive(page->object);
assert(page->busy);
vm_object_lock_assert_exclusive(page->object);
- if (!cs_validation) {
- return;
- }
-
object = page->object;
assert(object->code_signed);
offset = page->offset;
}
}
+
+boolean_t
+vm_map_copy_validate_size(
+ vm_map_t dst_map,
+ vm_map_copy_t copy,
+ vm_map_size_t size)
+{
+ if (copy == VM_MAP_COPY_NULL)
+ return FALSE;
+ switch (copy->type) {
+ case VM_MAP_COPY_OBJECT:
+ case VM_MAP_COPY_KERNEL_BUFFER:
+ if (size == copy->size)
+ return TRUE;
+ break;
+ case VM_MAP_COPY_ENTRY_LIST:
+ /*
+ * potential page-size rounding prevents us from exactly
+ * validating this flavor of vm_map_copy, but we can at least
+ * assert that it's within a range.
+ */
+ if (copy->size >= size &&
+ copy->size <= vm_map_round_page(size,
+ VM_MAP_PAGE_MASK(dst_map)))
+ return TRUE;
+ break;
+ default:
+ break;
+ }
+ return FALSE;
+}
+
+
/*
* Routine: vm_map_copyout
*
vm_object_t object;
+ if (entry->is_sub_map) {
+ return FALSE;
+ }
+
switch (VME_ALIAS(entry)) {
case VM_MEMORY_MALLOC:
case VM_MEMORY_MALLOC_SMALL:
start_offset += VME_OFFSET(entry);
end_offset += VME_OFFSET(entry);
+ assert(!entry->is_sub_map);
object = VME_OBJECT(entry);
if (object != VM_OBJECT_NULL) {
vm_object_lock(object);
start_offset += VME_OFFSET(entry);
end_offset += VME_OFFSET(entry);
+ assert(!entry->is_sub_map);
object = VME_OBJECT(entry);
if (object == VM_OBJECT_NULL)
continue;
vm_map_copy_t copy,
boolean_t interruptible);
+/* returns TRUE if size of vm_map_copy == size parameter FALSE otherwise */
+extern boolean_t vm_map_copy_validate_size(
+ vm_map_t dst_map,
+ vm_map_copy_t copy,
+ vm_map_size_t size);
+
/* Place a copy into a map */
extern kern_return_t vm_map_copyout(
vm_map_t dst_map,