{
switch (uap->type) {
case 1: {
- uint64_t counts[2] = {};
- uint64_t thread_counts[MT_CORE_NFIXED];
+ uint64_t counts[2] = { 0 };
+ uint64_t thread_counts[MT_CORE_NFIXED] = { 0 };
mt_cur_thread_fixed_counts(thread_counts);
mt_sysctl SYSCTL_HANDLER_ARGS
{
#pragma unused(oidp, arg2)
- uint64_t start[MT_CORE_NFIXED], end[MT_CORE_NFIXED];
- uint64_t counts[2] = {};
+ uint64_t start[MT_CORE_NFIXED] = { 0 }, end[MT_CORE_NFIXED] = { 0 };
+ uint64_t counts[2] = { 0 };
switch ((enum mt_sysctl)arg1) {
case MT_SUPPORTED:
msleep(&p->p_fpdrainwait, &p->p_fdmlock, PRIBIO,
"fpdrain", NULL);
}
-
+ if (fp->f_flags & FP_WAITEVENT) {
+ (void)waitevent_close(p, fp);
+ }
closef_locked(fp, fp->f_fglob, p);
fileproc_free(fp);
goto out;
}
kq_ids = kalloc(bufsize);
- assert(kq_ids != NULL);
+ if (!kq_ids) {
+ err = ENOMEM;
+ goto out;
+ }
+ bzero(kq_ids, bufsize);
}
kqhash_lock(p);
if (kq_ids) {
size_t copysize;
- if (os_mul_overflow(sizeof(kqueue_id_t), min(ubuflen, nkqueues), ©size)) {
+ if (os_mul_overflow(sizeof(kqueue_id_t), min(buflen, nkqueues), ©size)) {
err = ERANGE;
goto out;
}
proc_fdunlock(p);
return(error);
}
+ if (fp->f_type != DTYPE_PSXSEM) {
+ proc_fdunlock(p);
+ return(EBADF);
+ }
procfdtbl_markclosefd(p, fd);
fileproc_drain(p, fp);
fdrelse(p, fd);
vm_map_offset_t user_start_addr;
vm_map_size_t map_size, mapped_size;
int prot = uap->prot;
+ int max_prot = VM_PROT_DEFAULT;
int flags = uap->flags;
vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos;
vm_object_offset_t map_pos;
return(EINVAL);
- if ((prot & PROT_WRITE) && ((fp->f_flag & FWRITE) == 0)) {
- return(EPERM);
+ /* Can't allow write permission if the shm_open() didn't */
+ if (!(fp->f_flag & FWRITE)) {
+ if (prot & VM_PROT_WRITE) {
+ return EPERM;
+ }
+ max_prot &= ~VM_PROT_WRITE;
}
if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
file_pos - map_pos,
docow,
prot,
- VM_PROT_DEFAULT,
+ max_prot,
VM_INHERIT_SHARE);
if (kret != KERN_SUCCESS)
goto out;
}
if (IS_64BIT_PROCESS(p)) {
- struct user_shmid_ds shmid_ds;
+ struct user_shmid_ds shmid_ds = {};
memcpy(&shmid_ds, &shmseg->u, sizeof(struct user_shmid_ds));
/* Clear kernel reserved pointer before copying to user space */
return ENOMEM;
}
- MALLOC(shmsegs, struct shmid_kernel *, sz, M_SHM, M_WAITOK);
+ MALLOC(shmsegs, struct shmid_kernel *, sz, M_SHM, M_WAITOK | M_ZERO);
if (shmsegs == NULL) {
return ENOMEM;
}
if (return_route_rule_id) {
*return_route_rule_id = inp->inp_policyresult.results.route_rule_id;
}
+ if (return_skip_policy_id) {
+ *return_skip_policy_id = inp->inp_policyresult.skip_policy_id;
+ }
}
lck_rw_done(&necp_kernel_policy_lock);
goto done;
if (fd_cb->local_address != NULL) {
/* socket is bound. */
error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_LOCAL_ADDR,
- sizeof(struct sockaddr_storage), fd_cb->local_address);
+ fd_cb->local_address->sa_len, fd_cb->local_address);
if (error) {
goto done;
}
}
}
socket_unlock(fd_cb->so, 0);
-
- if (data != NULL) {
- mbuf_freem(data);
- }
}
FDUNLOCK(fd_cb);
}
uuid_string_t uuidstr;
int err;
+ mpte_unlock(mpte);
err = necp_client_assert_bb_radio_manager(mpsotomppcb(mp_so)->necp_client_uuid,
TRUE);
+ mpte_lock(mpte);
if (err == 0)
mpte->mpte_triggered_cell = 1;
# FILESYS_DEV = [ FILESYS_BASE fdesc ]
# FILESYS_DEBUG = [ FILESYS_BASE fdesc ]
# NFS = [ nfsclient nfsserver ]
-# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto packet_mangler if_fake ]
+# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto if_fake ]
+# NETWORKING_RELEASE = [ NETWORKING ]
+# NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler ]
+# NETWORKING_DEBUG = [ NETWORKING_DEV ]
# VPN = [ ipsec flow_divert necp content_filter ]
# PF = [ pf ]
# MULTIPATH = [ multipath mptcp ]
# VM_DEV = [ VM_BASE dynamic_codesigning ]
# VM_DEBUG = [ VM_BASE dynamic_codesigning ]
# SECURITY = [ config_macf kernel_integrity ]
-# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ]
-# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ]
-# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ]
+# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ]
+# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ]
+# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ]
# KASAN = [ DEVELOPMENT ]
#
######################################################################
# FILESYS_DEV = [ FILESYS_BASE fdesc ]
# FILESYS_DEBUG = [ FILESYS_BASE fdesc ]
# NFS = [ nfsclient nfsserver ]
-# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto packet_mangler if_fake ]
+# NETWORKING = [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto if_fake ]
+# NETWORKING_RELEASE = [ NETWORKING ]
+# NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler ]
+# NETWORKING_DEBUG = [ NETWORKING_DEV ]
# VPN = [ ipsec flow_divert necp content_filter ]
# PF = [ pf ]
# MULTIPATH = [ multipath mptcp ]
# VM_DEV = [ VM_BASE dynamic_codesigning ]
# VM_DEBUG = [ VM_BASE dynamic_codesigning ]
# SECURITY = [ config_macf kernel_integrity ]
-# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ]
-# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ]
-# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ]
+# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ]
+# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF MULTIPATH VPN IOKIT_DEV LIBKERN_DEV PERF_DBG_DEV MACH_DEV SCHED_DEV VM_DEV SECURITY ]
+# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG SKYWALK_DEBUG NETWORKING_DEBUG PF MULTIPATH VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG_DEBUG MACH_DEBUG SCHED_DEBUG VM_DEBUG SECURITY ]
# KASAN = [ DEVELOPMENT ]
#
######################################################################
# FILESYS_DEV = [ FILESYS_BASE ]
# FILESYS_DEBUG = [ FILESYS_BASE ]
# NFS = [ nfsclient nfsserver ]
-# NETWORKING = [ inet inet6 ipv6send tcpdrop_synfin bpfilter dummynet traffic_mgt sendfile ah_all_crypto bond vlan gif stf ifnet_input_chk config_mbuf_jumbo if_bridge ipcomp_zlib MULTIPATH packet_mangler if_fake ]
+# NETWORKING = [ inet inet6 ipv6send tcpdrop_synfin bpfilter dummynet traffic_mgt sendfile ah_all_crypto bond vlan gif stf ifnet_input_chk config_mbuf_jumbo if_bridge ipcomp_zlib MULTIPATH if_fake ]
+# NETWORKING_RELEASE = [ NETWORKING ]
+# NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler ]
+# NETWORKING_DEBUG = [ NETWORKING_DEV ]
# VPN = [ ipsec flow_divert necp content_filter ]
# PF = [ pf pflog ]
# MULTIPATH = [ multipath mptcp ]
# SCHED_DEBUG = [ SCHED_BASE config_sched_grrr config_sched_proto ]
# VM = [ vm_pressure_events memorystatus dynamic_codesigning config_code_decryption encrypted_swap phantom_cache config_background_queue]
# SECURITY = [ config_macf config_audit config_csr ]
-# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING PF VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG MACH_RELEASE SCHED_RELEASE VM SECURITY ]
-# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING PF VPN IOKIT_DEV LIBKERN_DEV PERF_DBG MACH_DEV SCHED_DEV VM SECURITY ]
-# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS SKYWALK_DEBUG NETWORKING PF VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG MACH_DEBUG SCHED_DEBUG VM SECURITY ]
+# RELEASE = [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING_RELEASE PF VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG MACH_RELEASE SCHED_RELEASE VM SECURITY ]
+# DEVELOPMENT = [ KERNEL_DEV BSD_DEV FILESYS_DEV NFS SKYWALK_DEV NETWORKING_DEV PF VPN IOKIT_DEV LIBKERN_DEV PERF_DBG MACH_DEV SCHED_DEV VM SECURITY ]
+# DEBUG = [ KERNEL_DEBUG BSD_DEBUG FILESYS_DEBUG NFS SKYWALK_DEBUG NETWORKING_DEBUG PF VPN IOKIT_DEBUG LIBKERN_DEBUG PERF_DBG MACH_DEBUG SCHED_DEBUG VM SECURITY ]
# KASAN = [ DEVELOPMENT ]
#
######################################################################
_mach_memory_entry_page_op
_mach_memory_entry_range_op
_mach_msg_rpc_from_kernel_proper
+_mach_msg_destroy_from_kernel_proper
_mach_vm_region
_max_mem
_mem_size
static const OSSymbol *sleepSupportedPEFunction = NULL;
static const OSSymbol *sleepMessagePEFunction = NULL;
+static const OSSymbol * gIOPMPSExternalConnectedKey;
+static const OSSymbol * gIOPMPSExternalChargeCapableKey;
+static const OSSymbol * gIOPMPSBatteryInstalledKey;
+static const OSSymbol * gIOPMPSIsChargingKey;
+static const OSSymbol * gIOPMPSAtWarnLevelKey;
+static const OSSymbol * gIOPMPSAtCriticalLevelKey;
+static const OSSymbol * gIOPMPSCurrentCapacityKey;
+static const OSSymbol * gIOPMPSMaxCapacityKey;
+static const OSSymbol * gIOPMPSDesignCapacityKey;
+static const OSSymbol * gIOPMPSTimeRemainingKey;
+static const OSSymbol * gIOPMPSAmperageKey;
+static const OSSymbol * gIOPMPSVoltageKey;
+static const OSSymbol * gIOPMPSCycleCountKey;
+static const OSSymbol * gIOPMPSMaxErrKey;
+static const OSSymbol * gIOPMPSAdapterInfoKey;
+static const OSSymbol * gIOPMPSLocationKey;
+static const OSSymbol * gIOPMPSErrorConditionKey;
+static const OSSymbol * gIOPMPSManufacturerKey;
+static const OSSymbol * gIOPMPSManufactureDateKey;
+static const OSSymbol * gIOPMPSModelKey;
+static const OSSymbol * gIOPMPSSerialKey;
+static const OSSymbol * gIOPMPSLegacyBatteryInfoKey;
+static const OSSymbol * gIOPMPSBatteryHealthKey;
+static const OSSymbol * gIOPMPSHealthConfidenceKey;
+static const OSSymbol * gIOPMPSCapacityEstimatedKey;
+static const OSSymbol * gIOPMPSBatteryChargeStatusKey;
+static const OSSymbol * gIOPMPSBatteryTemperatureKey;
+static const OSSymbol * gIOPMPSAdapterDetailsKey;
+static const OSSymbol * gIOPMPSChargerConfigurationKey;
+static const OSSymbol * gIOPMPSAdapterDetailsIDKey;
+static const OSSymbol * gIOPMPSAdapterDetailsWattsKey;
+static const OSSymbol * gIOPMPSAdapterDetailsRevisionKey;
+static const OSSymbol * gIOPMPSAdapterDetailsSerialNumberKey;
+static const OSSymbol * gIOPMPSAdapterDetailsFamilyKey;
+static const OSSymbol * gIOPMPSAdapterDetailsAmperageKey;
+static const OSSymbol * gIOPMPSAdapterDetailsDescriptionKey;
+static const OSSymbol * gIOPMPSAdapterDetailsPMUConfigurationKey;
+static const OSSymbol * gIOPMPSAdapterDetailsSourceIDKey;
+static const OSSymbol * gIOPMPSAdapterDetailsErrorFlagsKey;
+static const OSSymbol * gIOPMPSAdapterDetailsSharedSourceKey;
+static const OSSymbol * gIOPMPSAdapterDetailsCloakedKey;
+static const OSSymbol * gIOPMPSInvalidWakeSecondsKey;
+static const OSSymbol * gIOPMPSPostChargeWaitSecondsKey;
+static const OSSymbol * gIOPMPSPostDishargeWaitSecondsKey;
+
#define kIOSleepSupportedKey "IOSleepSupported"
#define kIOPMSystemCapabilitiesKey "System Capabilities"
void IORootParent::initialize( void )
{
+
+ gIOPMPSExternalConnectedKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalConnectedKey);
+ gIOPMPSExternalChargeCapableKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalChargeCapableKey);
+ gIOPMPSBatteryInstalledKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryInstalledKey);
+ gIOPMPSIsChargingKey = OSSymbol::withCStringNoCopy(kIOPMPSIsChargingKey);
+ gIOPMPSAtWarnLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtWarnLevelKey);
+ gIOPMPSAtCriticalLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtCriticalLevelKey);
+ gIOPMPSCurrentCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSCurrentCapacityKey);
+ gIOPMPSMaxCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxCapacityKey);
+ gIOPMPSDesignCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSDesignCapacityKey);
+ gIOPMPSTimeRemainingKey = OSSymbol::withCStringNoCopy(kIOPMPSTimeRemainingKey);
+ gIOPMPSAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAmperageKey);
+ gIOPMPSVoltageKey = OSSymbol::withCStringNoCopy(kIOPMPSVoltageKey);
+ gIOPMPSCycleCountKey = OSSymbol::withCStringNoCopy(kIOPMPSCycleCountKey);
+ gIOPMPSMaxErrKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxErrKey);
+ gIOPMPSAdapterInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterInfoKey);
+ gIOPMPSLocationKey = OSSymbol::withCStringNoCopy(kIOPMPSLocationKey);
+ gIOPMPSErrorConditionKey = OSSymbol::withCStringNoCopy(kIOPMPSErrorConditionKey);
+ gIOPMPSManufacturerKey = OSSymbol::withCStringNoCopy(kIOPMPSManufacturerKey);
+ gIOPMPSManufactureDateKey = OSSymbol::withCStringNoCopy(kIOPMPSManufactureDateKey);
+ gIOPMPSModelKey = OSSymbol::withCStringNoCopy(kIOPMPSModelKey);
+ gIOPMPSSerialKey = OSSymbol::withCStringNoCopy(kIOPMPSSerialKey);
+ gIOPMPSLegacyBatteryInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSLegacyBatteryInfoKey);
+ gIOPMPSBatteryHealthKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryHealthKey);
+ gIOPMPSHealthConfidenceKey = OSSymbol::withCStringNoCopy(kIOPMPSHealthConfidenceKey);
+ gIOPMPSCapacityEstimatedKey = OSSymbol::withCStringNoCopy(kIOPMPSCapacityEstimatedKey);
+ gIOPMPSBatteryChargeStatusKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryChargeStatusKey);
+ gIOPMPSBatteryTemperatureKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryTemperatureKey);
+ gIOPMPSAdapterDetailsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsKey);
+ gIOPMPSChargerConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSChargerConfigurationKey);
+ gIOPMPSAdapterDetailsIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsIDKey);
+ gIOPMPSAdapterDetailsWattsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsWattsKey);
+ gIOPMPSAdapterDetailsRevisionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsRevisionKey);
+ gIOPMPSAdapterDetailsSerialNumberKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSerialNumberKey);
+ gIOPMPSAdapterDetailsFamilyKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsFamilyKey);
+ gIOPMPSAdapterDetailsAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsAmperageKey);
+ gIOPMPSAdapterDetailsDescriptionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsDescriptionKey);
+ gIOPMPSAdapterDetailsPMUConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsPMUConfigurationKey);
+ gIOPMPSAdapterDetailsSourceIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSourceIDKey);
+ gIOPMPSAdapterDetailsErrorFlagsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsErrorFlagsKey);
+ gIOPMPSAdapterDetailsSharedSourceKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSharedSourceKey);
+ gIOPMPSAdapterDetailsCloakedKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsCloakedKey);
+ gIOPMPSInvalidWakeSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSInvalidWakeSecondsKey);
+ gIOPMPSPostChargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostChargeWaitSecondsKey);
+ gIOPMPSPostDishargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostDishargeWaitSecondsKey);
}
bool IORootParent::start( IOService * nub )
}
}
+void ml_delay_on_yield(void) {}
+
boolean_t ml_thread_is64bit(thread_t thread)
{
return (thread_is_64bit_addr(thread));
boolean_t ml_delay_should_spin(uint64_t interval);
+void ml_delay_on_yield(void);
+
uint32_t ml_get_decrementer(void);
#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
uint64_t MutexSpin;
boolean_t is_clock_configured = FALSE;
+uint32_t yield_delay_us = 42; /* Less than cpu_idle_latency to ensure ml_delay_should_spin is true */
+
extern int mach_assert;
extern volatile uint32_t debug_enabled;
default_bg_preemption_rate = boot_arg;
}
+ PE_parse_boot_argn("yield_delay_us", &yield_delay_us, sizeof (yield_delay_us));
+
machine_conf();
/*
}
}
+void
+ml_delay_on_yield(void)
+{
+}
+
boolean_t ml_thread_is64bit(thread_t thread) {
return (thread_is_64bit_addr(thread));
}
static kern_return_t atm_listener_delete(atm_value_t atm_value, atm_task_descriptor_t task_descriptor, atm_guard_t guard);
static void atm_link_get_reference(atm_link_object_t link_object) __unused;
static void atm_link_dealloc(atm_link_object_t link_object);
-kern_return_t atm_invoke_collection(atm_value_t atm_value, mach_atm_subaid_t subaid, uint32_t flags);
-kern_return_t atm_send_user_notification(aid_t aid, mach_atm_subaid_t sub_aid, mach_port_t *buffers_array, uint64_t *sizes_array, mach_msg_type_number_t count, uint32_t flags);
kern_return_t
atm_release_value(
uint32_t aid_array_count = 0;
atm_task_descriptor_t task_descriptor = ATM_TASK_DESCRIPTOR_NULL;
task_t task;
- uint32_t collection_flags = ATM_ACTION_LOGFAIL;
kern_return_t kr = KERN_SUCCESS;
atm_guard_t guard;
switch (command) {
case ATM_ACTION_COLLECT:
- collection_flags = ATM_ACTION_COLLECT;
/* Fall through */
- case ATM_ACTION_LOGFAIL: {
- mach_atm_subaid_t sub_aid = 0;
-
- if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE))
- return KERN_NOT_SUPPORTED;
-
- /* find the first non-default atm_value */
- for (i = 0; i < value_count; i++) {
- atm_value = HANDLE_TO_ATM_VALUE(values[i]);
- if (atm_value != VAM_DEFAULT_VALUE)
- break;
- }
-
- /* if we are not able to find any atm values
- * in stack then this call was made in error
- */
- if (atm_value == NULL) {
- return KERN_FAILURE;
- }
-
- if (in_content_size >= sizeof(mach_atm_subaid_t)) {
- sub_aid = *(mach_atm_subaid_t *)(void *)in_content;
- }
-
- *out_content_size = 0;
- kr = atm_invoke_collection(atm_value, sub_aid, collection_flags);
- break;
- }
+ case ATM_ACTION_LOGFAIL:
+ return KERN_NOT_SUPPORTED;
case ATM_FIND_MIN_SUB_AID:
if ((in_content_size/sizeof(aid_t)) > (*out_content_size/sizeof(mach_atm_subaid_t)))
}
-/*
- * Routine: atm_invoke_collection
- * Purpose: Sends a notification with array of memory buffer.
- * Note: may block till user daemon responds.
- */
-kern_return_t
-atm_invoke_collection(
- atm_value_t atm_value,
- mach_atm_subaid_t sub_aid,
- uint32_t flags)
-{
- aid_t aid = atm_value->aid;
- kern_return_t kr = KERN_SUCCESS;
- uint32_t array_count = 0, i = 0, j = 0, requestor_index = 0;
- uint64_t *sizes_array = NULL;
- atm_link_object_t link_object = NULL;
- mach_port_t *mem_array = NULL;
- boolean_t need_swap_first = FALSE;
- atm_task_descriptor_t requesting_descriptor = current_task()->atm_context;
-
- lck_mtx_lock(&atm_value->listener_lock);
- array_count = atm_value->listener_count;
- lck_mtx_unlock(&atm_value->listener_lock);
-
- if (array_count == 0){
- return KERN_SUCCESS;
- }
-
- mem_array = kalloc(sizeof(mach_port_t) * array_count);
- if (mem_array == NULL){
- return KERN_NO_SPACE;
- }
-
- sizes_array = kalloc(sizeof(uint64_t) * array_count);
- if (sizes_array == NULL){
- kfree(mem_array, sizeof(mach_port_t) * array_count);
- return KERN_NO_SPACE;
- }
-
- lck_mtx_lock(&atm_value->listener_lock);
- queue_iterate(&atm_value->listeners, link_object, atm_link_object_t, listeners_element) {
- if (i >= array_count){
- break;
- }
-
- if (!need_swap_first && requesting_descriptor == link_object->descriptor){
- assert(requesting_descriptor != NULL);
- requestor_index = i;
- need_swap_first = TRUE;
- }
-
- sizes_array[i] = link_object->descriptor->trace_buffer_size;
- mem_array[i] = ipc_port_copy_send(link_object->descriptor->trace_buffer);
- if (!IPC_PORT_VALID(mem_array[i])){
- mem_array[i] = NULL;
- }
- i++;
- }
- lck_mtx_unlock(&atm_value->listener_lock);
-
- /*
- * Swap the position of requesting task ahead, diagnostics can
- * process its buffers the first.
- */
- if (need_swap_first && requestor_index != 0){
- assert(requestor_index < array_count);
- mach_port_t tmp_port = mem_array[0];
- uint64_t tmp_size = sizes_array[0];
- mem_array[0] = mem_array[requestor_index];
- sizes_array[0] = sizes_array[requestor_index];
- mem_array[requestor_index] = tmp_port;
- sizes_array[requestor_index] = tmp_size;
- }
-
- if (i > 0) {
- kr = atm_send_user_notification(aid, sub_aid, mem_array, sizes_array, i, flags);
- }
-
- for (j = 0; j < i; j++) {
- if (mem_array[j] != NULL)
- ipc_port_release_send(mem_array[j]);
- }
-
- kfree(mem_array, sizeof(mach_port_t) * array_count);
- kfree(sizes_array, sizeof(uint64_t) * array_count);
-
- return kr;
-}
-
-/*
- * Routine: atm_send_user_notification
- * Purpose: Make an upcall to user space daemon if its listening for atm notifications.
- * Returns: KERN_SUCCESS for successful delivery.
- * KERN_FAILURE if port is dead or NULL.
- */
-kern_return_t
-atm_send_user_notification(
- aid_t aid,
- mach_atm_subaid_t sub_aid,
- mach_port_t *buffers_array,
- uint64_t *sizes_array,
- mach_msg_type_number_t count,
- uint32_t flags)
-{
- mach_port_t user_port;
- int error;
- thread_t th = current_thread();
- kern_return_t kr;
-
- error = host_get_atm_notification_port(host_priv_self(), &user_port);
- if ((error != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
- return KERN_FAILURE;
- }
-
- thread_set_honor_qlimit(th);
- kr = atm_collect_trace_info(user_port, aid, sub_aid, flags, buffers_array, count, sizes_array, count);
- thread_clear_honor_qlimit(th);
-
- if (kr != KERN_SUCCESS) {
- ipc_port_release_send(user_port);
-
- if (kr == MACH_SEND_TIMED_OUT) {
- kr = KERN_SUCCESS;
- }
- }
-
- return kr;
-}
-
-/*
- * Routine: atm_send_proc_inspect_notification
- * Purpose: Make an upcall to user space daemon if its listening for trace
- * notifications for per process inspection.
- * Returns: KERN_SUCCESS for successful delivery.
- * KERN_FAILURE if port is dead or NULL.
- */
-
-kern_return_t
-atm_send_proc_inspect_notification(
- task_t task,
- int32_t traced_pid,
- uint64_t traced_uniqueid)
-{
- mach_port_t user_port = MACH_PORT_NULL;
- mach_port_t memory_port = MACH_PORT_NULL;
- kern_return_t kr;
- atm_task_descriptor_t task_descriptor = ATM_TASK_DESCRIPTOR_NULL;
- uint64_t buffer_size = 0;
- int error;
- thread_t th = current_thread();
-
- if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE))
- return KERN_NOT_SUPPORTED;
-
- /* look for the requested memory in target task */
- if (!task)
- return KERN_INVALID_TASK;
-
- task_lock(task);
- if (task->atm_context){
- task_descriptor = task->atm_context;
- atm_descriptor_get_reference(task_descriptor);
- }
- task_unlock(task);
-
- if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL){
- return KERN_FAILURE;
- }
-
- memory_port = ipc_port_copy_send(task_descriptor->trace_buffer);
- buffer_size = task_descriptor->trace_buffer_size;
- atm_task_descriptor_dealloc(task_descriptor);
-
- /* get the communication port */
- error = host_get_atm_notification_port(host_priv_self(), &user_port);
- if ((error != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
- ipc_port_release_send(memory_port);
- return KERN_FAILURE;
- }
-
- thread_set_honor_qlimit(th);
- kr = atm_inspect_process_buffer(user_port, traced_pid, traced_uniqueid, buffer_size, memory_port);
- thread_clear_honor_qlimit(th);
-
- if (kr != KERN_SUCCESS) {
- ipc_port_release_send(user_port);
-
- if (kr == MACH_SEND_TIMED_OUT) {
- kr = KERN_SUCCESS;
- }
- }
-
- ipc_port_release_send(memory_port);
- return kr;
-}
-
/*
* Routine: atm_value_alloc_init
* Purpose: Allocates an atm value struct and initialize it.
return (interval < delay_spin_threshold) ? TRUE : FALSE;
}
+void ml_delay_on_yield(void) {}
+
/*
* This is called from the machine-independent layer
* to perform machine-dependent info updates. Defer to cpu_thread_init().
boolean_t ml_delay_should_spin(uint64_t interval);
+extern void ml_delay_on_yield(void);
+
vm_offset_t
ml_static_ptovirt(
vm_offset_t);
ipc_space_t space)
{
ipc_object_t dest;
- ipc_object_t reply;
+ mach_port_t reply;
mach_msg_type_name_t dest_type;
mach_msg_type_name_t reply_type;
- mach_port_name_t dest_name, reply_name;
+ mach_port_name_t dest_name;
dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
- reply = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
+ reply = kmsg->ikm_header->msgh_local_port;
dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
dest_name = MACH_PORT_DEAD;
}
- reply_name = CAST_MACH_PORT_TO_NAME(reply);
-
kmsg->ikm_header->msgh_bits =
(MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
MACH_MSGH_BITS(reply_type, dest_type));
kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
- kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
+ kmsg->ikm_header->msgh_remote_port = reply;
}
#if IKM_SUPPORT_LEGACY
assert(task != TASK_NULL);
- uint64_t counts[MT_CORE_NFIXED] = {};
+ uint64_t counts[MT_CORE_NFIXED] = { 0 };
mt_fixed_task_counts(task, counts);
#ifdef MT_CORE_INSTRS
ri->ri_instructions = counts[MT_CORE_INSTRS];
panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
{
va_list panic_str_args;
- __assert_only uint32_t th_ref_count;
assert_thread_magic(thread);
- th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire);
- assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
/* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
thread_reference(thread);
return mr;
}
+/*
+ * Routine: mach_msg_destroy_from_kernel_proper
+ * Purpose:
+ * mach_msg_destroy_from_kernel_proper is used to destroy
+ * an unwanted/unexpected reply message from a MIG
+ * kernel-specific user-side stub. It is like ipc_kmsg_destroy(),
+ * except we no longer have the kmsg - just the contents.
+ */
+void
+mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg)
+{
+ mach_msg_bits_t mbits = msg->msgh_bits;
+ ipc_object_t object;
+
+ object = (ipc_object_t) msg->msgh_remote_port;
+ if (IO_VALID(object)) {
+ ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
+ }
+
+ /*
+ * The destination (now in msg->msgh_local_port via
+ * ipc_kmsg_copyout_to_kernel) has been consumed with
+ * ipc_object_copyout_dest.
+ */
+
+ /* MIG kernel users don't receive vouchers */
+ assert(!MACH_PORT_VALID(msg->msgh_voucher_port));
+
+ /* For simple messages, we're done */
+ if ((mbits & MACH_MSGH_BITS_COMPLEX) == 0) {
+ return;
+ }
+
+ /* Discard descriptor contents */
+ mach_msg_body_t *body = (mach_msg_body_t *)(msg + 1);
+ mach_msg_descriptor_t *daddr = (mach_msg_descriptor_t *)(body + 1);
+ mach_msg_size_t i;
+
+ for (i = 0 ; i < body->msgh_descriptor_count; i++, daddr++ ) {
+ switch (daddr->type.type) {
+
+ case MACH_MSG_PORT_DESCRIPTOR: {
+ mach_msg_port_descriptor_t *dsc = &daddr->port;
+ if (IO_VALID((ipc_object_t) dsc->name)) {
+ ipc_object_destroy((ipc_object_t) dsc->name, dsc->disposition);
+ }
+ break;
+ }
+ case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+ case MACH_MSG_OOL_DESCRIPTOR : {
+ mach_msg_ool_descriptor_t *dsc =
+ (mach_msg_ool_descriptor_t *)&daddr->out_of_line;
+
+ if (dsc->size > 0) {
+ vm_map_copy_discard((vm_map_copy_t) dsc->address);
+ } else {
+ assert(dsc->address == (void *) 0);
+ }
+ break;
+ }
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR : {
+ ipc_object_t *objects;
+ mach_msg_type_number_t j;
+ mach_msg_ool_ports_descriptor_t *dsc;
+
+ dsc = (mach_msg_ool_ports_descriptor_t *)&daddr->ool_ports;
+ objects = (ipc_object_t *) dsc->address;
+
+ if (dsc->count == 0) {
+ break;
+ }
+ assert(objects != 0);
+ for (j = 0; j < dsc->count; j++) {
+ object = objects[j];
+ if (IO_VALID(object)) {
+ ipc_object_destroy(object, dsc->disposition);
+ }
+ }
+ kfree(dsc->address, (vm_size_t) dsc->count * sizeof(mach_port_t));
+ break;
+ }
+ default :
+ break;
+ }
+ }
+}
/************** These Calls are set up for kernel-loaded tasks/threads **************/
#define mach_msg_rpc_from_kernel mach_msg_rpc_from_kernel_proper
+extern void
+mach_msg_destroy_from_kernel_proper(
+ mach_msg_header_t *msg);
+
+#define mach_msg_destroy_from_kernel mach_msg_destroy_from_kernel_proper
+
#ifdef XNU_KERNEL_PRIVATE
extern mach_msg_return_t mach_msg_send_from_kernel_with_options_legacy(
mach_msg_header_t *msg,
assert(task != TASK_NULL);
assert(counts_out != NULL);
- uint64_t counts[MT_CORE_NFIXED];
if (!mt_core_supported) {
- for (int i = 0; i < MT_CORE_NFIXED; i++) {
- counts[i] = 0;
- }
- return 0;
+ memset(counts_out, 0, sizeof(*counts_out) * MT_CORE_NFIXED);
+ return 1;
}
task_lock(task);
+ uint64_t counts[MT_CORE_NFIXED] = { 0 };
for (int i = 0; i < MT_CORE_NFIXED; i++) {
counts[i] = task->task_monotonic.mtk_counts[i];
}
- uint64_t thread_counts[MT_CORE_NFIXED] = {};
+ uint64_t thread_counts[MT_CORE_NFIXED] = { 0 };
thread_t thread = THREAD_NULL;
thread_t curthread = current_thread();
bool needs_current = false;
mt_cur_thread_fixed_counts(uint64_t *counts)
{
if (!mt_core_supported) {
- for (int i = 0; i < MT_CORE_NFIXED; i++) {
- counts[i] = 0;
- }
+ memset(counts, 0, sizeof(*counts) * MT_CORE_NFIXED);
return;
}
ledger->l_template = template;
ledger->l_id = ledger_cnt++;
- ledger->l_refs = 1;
+ os_ref_init(&ledger->l_refs, NULL);
ledger->l_size = (int32_t)cnt;
template_lock(template);
{
if (!LEDGER_VALID(ledger))
return (KERN_INVALID_ARGUMENT);
- OSIncrementAtomic(&ledger->l_refs);
+ os_ref_retain(&ledger->l_refs);
return (KERN_SUCCESS);
}
if (!LEDGER_VALID(ledger))
return (-1);
- return (ledger->l_refs);
+ return os_ref_get_count(&ledger->l_refs);
}
/*
kern_return_t
ledger_dereference(ledger_t ledger)
{
- int v;
-
if (!LEDGER_VALID(ledger))
return (KERN_INVALID_ARGUMENT);
- v = OSDecrementAtomic(&ledger->l_refs);
- ASSERT(v >= 1);
-
- /* Just released the last reference. Free it. */
- if (v == 1) {
+ if (os_ref_release(&ledger->l_refs) == 0) {
if (ledger->l_template->lt_zone) {
zfree(ledger->l_template->lt_zone, ledger);
} else {
#include <mach/mach_types.h> /* ledger_t */
+#ifdef MACH_KERNEL_PRIVATE
+#include <os/refcnt.h>
+#endif /* MACH_KERNEL_PRIVATE */
+
#define LEDGER_INFO 0
#define LEDGER_ENTRY_INFO 1
#define LEDGER_TEMPLATE_INFO 2
struct ledger {
uint64_t l_id;
- int32_t l_refs;
+ struct os_refcnt l_refs;
int32_t l_size;
struct ledger_template *l_template;
struct ledger_entry l_entries[0] __attribute__((aligned(8)));
result = SCHED(thread_should_yield)(myprocessor, current_thread());
enable_preemption();
+ ml_delay_on_yield();
+
thread_syscall_return(result);
/*NOTREACHED*/
}
result = SCHED(thread_should_yield)(myprocessor, current_thread());
mp_enable_preemption();
+ ml_delay_on_yield();
+
thread_syscall_return(result);
/*NOTREACHED*/
}
if (option == SWITCH_OPTION_DEPRESS || option == SWITCH_OPTION_OSLOCK_DEPRESS)
thread_depress_abort(self);
+ ml_delay_on_yield();
+
thread_syscall_return(KERN_SUCCESS);
/*NOTREACHED*/
}
thread_deallocate(thread);
}
- if (wait_option)
+ if (wait_option) {
assert_wait_timeout((event_t)assert_wait_timeout, interruptible, option_time, scale_factor);
- else if (depress_option)
- thread_depress_ms(option_time);
+ } else {
+ disable_preemption();
+ bool should_yield = SCHED(thread_should_yield)(current_processor(), current_thread());
+ enable_preemption();
+
+ if (should_yield == false) {
+ /* Early-return if yielding to the scheduler will not be beneficial */
+ return KERN_SUCCESS;
+ }
+
+ if (depress_option) {
+ thread_depress_ms(option_time);
+ }
+ }
thread_yield_with_continuation(thread_switch_continue, (void *)(intptr_t)option);
__builtin_unreachable();
void * bt[TASK_REF_BTDEPTH];
int numsaved = 0;
+ os_ref_retain(&task->ref_count);
+
numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
-
- (void)hw_atomic_add(&(task)->ref_count, 1);
btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_INCR,
bt, numsaved);
}
-uint32_t
+os_ref_count_t
task_deallocate_internal(task_t task)
{
void * bt[TASK_REF_BTDEPTH];
int numsaved = 0;
numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
-
btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_DECR,
bt, numsaved);
- return hw_atomic_sub(&(task)->ref_count, 1);
+
+ return os_ref_release(&task->ref_count);
}
#endif /* TASK_REFERENCE_LEAK_DEBUG */
task_ledger_template = t;
}
+os_refgrp_decl(static, task_refgrp, "task", NULL);
+
kern_return_t
task_create_internal(
task_t parent_task,
return(KERN_RESOURCE_SHORTAGE);
/* one ref for just being alive; one for our caller */
- new_task->ref_count = 2;
+ os_ref_init_count(&new_task->ref_count, &task_refgrp, 2);
/* allocate with active entries */
assert(task_ledger_template != NULL);
task_t task)
{
ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
- uint32_t refs;
+ os_ref_count_t refs;
if (task == TASK_NULL)
return;
refs = task_deallocate_internal(task);
#if IMPORTANCE_INHERITANCE
- if (refs > 1)
- return;
-
- atomic_load_explicit(&task->ref_count, memory_order_acquire);
-
if (refs == 1) {
/*
* If last ref potentially comes from the task's importance,
* disconnect it. But more task refs may be added before
* that completes, so wait for the reference to go to zero
- * naturually (it may happen on a recursive task_deallocate()
+ * naturally (it may happen on a recursive task_deallocate()
* from the ipc_importance_disconnect_task() call).
*/
if (IIT_NULL != task->task_imp_base)
ipc_importance_disconnect_task(task);
return;
}
-#else
- if (refs > 0)
- return;
-
- atomic_load_explicit(&task->ref_count, memory_order_acquire);
-
#endif /* IMPORTANCE_INHERITANCE */
+ if (refs > 0) {
+ return;
+ }
+
lck_mtx_lock(&tasks_threads_lock);
queue_remove(&terminated_tasks, task, task_t, tasks);
terminated_tasks_count--;
kern_return_t
task_send_trace_memory(
- task_t target_task,
+ __unused task_t target_task,
__unused uint32_t pid,
__unused uint64_t uniqueid)
{
- kern_return_t kr = KERN_INVALID_ARGUMENT;
- if (target_task == TASK_NULL)
- return (KERN_INVALID_ARGUMENT);
-
-#if CONFIG_ATM
- kr = atm_send_proc_inspect_notification(target_task,
- pid,
- uniqueid);
-
-#endif
- return (kr);
+ return KERN_INVALID_ARGUMENT;
}
+
/*
* This routine was added, pretty much exclusively, for registering the
* RPC glue vector for in-kernel short circuited tasks. Rather than
switch (flavor) {
case TASK_INSPECT_BASIC_COUNTS: {
struct task_inspect_basic_counts *bc;
- uint64_t task_counts[MT_CORE_NFIXED];
+ uint64_t task_counts[MT_CORE_NFIXED] = { 0 };
if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
kr = KERN_INVALID_ARGUMENT;
#include <kern/thread.h>
#include <mach/coalition.h>
#include <stdatomic.h>
+#include <os/refcnt.h>
#ifdef CONFIG_ATM
#include <atm/atm_internal.h>
struct task {
/* Synchronization/destruction information */
decl_lck_mtx_data(,lock) /* Task's lock */
- _Atomic uint32_t ref_count; /* Number of references to me */
+ os_refcnt_t ref_count; /* Number of references to me */
boolean_t active; /* Task has not been terminated */
boolean_t halting; /* Task is being halted */
/* Virtual timers */
#if TASK_REFERENCE_LEAK_DEBUG
extern void task_reference_internal(task_t task);
-extern uint32_t task_deallocate_internal(task_t task);
+extern os_ref_count_t task_deallocate_internal(task_t task);
#else
-#define task_reference_internal(task) \
- (void)atomic_fetch_add_explicit(&(task)->ref_count, 1, memory_order_relaxed)
-
-#define task_deallocate_internal(task) \
- (atomic_fetch_sub_explicit(&task->ref_count, 1, memory_order_release) - 1)
+#define task_reference_internal(task) os_ref_retain(&(task)->ref_count)
+#define task_deallocate_internal(task) os_ref_release(&(task)->ref_count)
#endif
#define task_reference(task) \
*/
#define MINIMUM_CPULIMIT_INTERVAL_MS 1
+os_refgrp_decl(static, thread_refgrp, "thread", NULL);
+
void
thread_bootstrap(void)
{
thread_template.runq = PROCESSOR_NULL;
- thread_template.ref_count = 2;
-
thread_template.reason = AST_NONE;
thread_template.at_safe_point = FALSE;
thread_template.wait_event = NO_EVENT64;
thread_template.th_work_interval = NULL;
init_thread = thread_template;
+
machine_set_current_thread(&init_thread);
}
/*NOTREACHED*/
}
-/* Drop a thread refcount safely without triggering a zfree */
-void
-thread_deallocate_safe(thread_t thread)
+static bool
+thread_ref_release(thread_t thread)
{
- __assert_only uint32_t th_ref_count;
-
- if (thread == THREAD_NULL)
- return;
+ if (thread == THREAD_NULL) {
+ return false;
+ }
assert_thread_magic(thread);
- if (__probable(atomic_fetch_sub_explicit(&thread->ref_count, 1,
- memory_order_release) - 1 > 0)) {
- return;
- }
-
- th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire);
- assert(th_ref_count == 0);
-
- /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
- thread_deallocate_enqueue(thread);
+ return os_ref_release(&thread->ref_count) == 0;
}
+/* Drop a thread refcount safely without triggering a zfree */
void
-thread_deallocate(
- thread_t thread)
+thread_deallocate_safe(thread_t thread)
{
- __assert_only uint32_t th_ref_count;
-
- if (thread == THREAD_NULL)
- return;
-
- assert_thread_magic(thread);
-
- if (__probable(atomic_fetch_sub_explicit(&thread->ref_count, 1,
- memory_order_release) - 1 > 0)) {
- return;
- }
-
- th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire);
- assert(th_ref_count == 0);
+ if (__improbable(thread_ref_release(thread))) {
+ /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
+ thread_deallocate_enqueue(thread);
+ }
+}
- thread_deallocate_complete(thread);
+void
+thread_deallocate(thread_t thread)
+{
+ if (__improbable(thread_ref_release(thread))) {
+ thread_deallocate_complete(thread);
+ }
}
void
assert_thread_magic(thread);
- assert(thread->ref_count == 0);
+ assert(os_ref_get_count(&thread->ref_count) == 0);
assert(thread_owned_workloops_count(thread) == 0);
if (new_thread != first_thread)
*new_thread = thread_template;
+ os_ref_init_count(&new_thread->ref_count, &thread_refgrp, 2);
+
#ifdef MACH_BSD
new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
if (new_thread->uthread == NULL) {
#include <kern/waitq.h>
#include <san/kasan.h>
+#include <os/refcnt.h>
#include <ipc/ipc_kmsg.h>
int16_t promotions; /* level of promotion */
int iotier_override; /* atomic operations to set, cleared on ret to user */
- _Atomic uint32_t ref_count; /* number of references to me */
+ struct os_refcnt ref_count; /* number of references to me */
lck_mtx_t* waiting_for_mutex; /* points to mutex we're waiting for until we acquire it */
extern void thread_daemon_init(void);
#define thread_reference_internal(thread) \
- (void)atomic_fetch_add_explicit(&(thread)->ref_count, 1, memory_order_relaxed)
+ os_ref_retain(&(thread)->ref_count);
#define thread_reference(thread) \
MACRO_BEGIN \
#define TC_LOOKUP_RESULT_MASK 0xffL
#define TC_LOOKUP_FOUND 1
-#define TC_LOOKUP_FALLBACK 2
+// #define TC_LOOKUP_FALLBACK 2 /* obsolete with removal of legacy static trust caches */
#ifdef XNU_KERNEL_PRIVATE
} __attribute__((__packed__));
-// Legacy Static Trust Cache
-
-/* This is the old legacy trust cache baked into the AMFI kext.
- * We support it for a transitionary period, until external trust caches
- * are fully established, and the AMFI trust cache can be removed. */
-
-struct legacy_trust_cache_bucket {
- uint16_t count;
- uint16_t offset;
-} __attribute__((__packed__));
-
-#define LEGACY_TRUST_CACHE_ENTRY_LEN (CS_CDHASH_LEN-1)
-#define LEGACY_TRUST_CACHE_BUCKET_COUNT (256)
-
-typedef uint8_t pmap_cs_legacy_stc_entry[CS_CDHASH_LEN-1]; // bucketized with first byte
-
void trust_cache_init(void);
uint32_t lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]);
}
pid_t pid = task_pid(task);
/* drop the ref taken by port_name_to_task */
- task_deallocate_internal(task);
+ (void)task_deallocate_internal(task);
return pid;
}
{
compressor_pager_t pager;
compressor_slot_t *slot_p;
+#if __arm__ || __arm64__
unsigned int prev_wimg = VM_WIMG_DEFAULT;
boolean_t set_cache_attr = FALSE;
+#endif
compressor_pager_stats.put++;
*compressed_count_delta_p -= 1;
}
+#if __arm__ || __arm64__
/*
* cacheability should be set to the system default (usually writeback)
* during compressor operations, both for performance and correctness,
pmap_set_cache_attributes(ppnum, prev_wimg);
return KERN_RESOURCE_SHORTAGE;
}
+#else
+ if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+#endif
*compressed_count_delta_p += 1;
return KERN_SUCCESS;
if (kr == KERN_SUCCESS) {
int retval;
+#if __arm__ || __arm64__
unsigned int prev_wimg = VM_WIMG_DEFAULT;
boolean_t set_cache_attr = FALSE;
set_cache_attr = TRUE;
pmap_set_cache_attributes(ppnum, VM_WIMG_DEFAULT);
}
-
+#endif
/* get the page from the compressor */
retval = vm_compressor_get(ppnum, slot_p, flags);
if (retval == -1)
assert((flags & C_DONT_BLOCK));
kr = KERN_FAILURE;
}
+#if __arm__ || __arm64__
if (set_cache_attr)
pmap_set_cache_attributes(ppnum, prev_wimg);
+#endif
}
if (kr == KERN_SUCCESS) {
record_memory_pressure();
}
+extern boolean_t hibernation_vmqueues_inspection;
void
vm_page_balance_inactive(int max_to_move)
LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+ if (hibernation_vmqueues_inspection == TRUE) {
+ /*
+ * It is likely that the hibernation code path is
+ * dealing with these very queues as we are about
+ * to move pages around in/from them and completely
+ * change the linkage of the pages.
+ *
+ * And so we skip the rebalancing of these queues.
+ */
+ return;
+ }
vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
vm_page_inactive_count +
vm_page_speculative_count);
int speculative_steal_index = 0;
struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1];
+boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
+ * Updated and checked behind the vm_page_queues_lock. */
__private_extern__ void vm_page_init_lck_grp(void);
lck_mtx_lock(&vm_page_queue_free_lock);
}
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+ hibernation_vmqueues_inspection = TRUE;
+
m = (vm_page_t) hibernate_gobble_queue;
while (m)
{
if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+ hibernation_vmqueues_inspection = FALSE;
+
#if MACH_ASSERT || DEBUG
if (!preflight)
{
dispatch_main();
}
+static void *
+spin_thread_self_counts(__unused void *arg)
+{
+ extern int thread_selfcounts(int, void *, size_t);
+ uint64_t counts[2] = { 0 };
+ while (true) {
+ (void)thread_selfcounts(1, &counts, sizeof(counts));
+ }
+}
+
+static void *
+spin_task_inspect(__unused void *arg)
+{
+ task_t task = mach_task_self();
+ uint64_t counts[2] = { 0 };
+ unsigned int size = 0;
+ while (true) {
+ size = (unsigned int)sizeof(counts);
+ (void)task_inspect(task, TASK_INSPECT_BASIC_COUNTS,
+ (task_inspect_info_t)&counts[0], &size);
+ /*
+ * Not realistic for a process to see count values with the high bit
+ * set, but kernel pointers will be that high.
+ */
+ T_QUIET; T_ASSERT_LT(counts[0], 1ULL << 63,
+ "check for valid count entry 1");
+ T_QUIET; T_ASSERT_LT(counts[1], 1ULL << 63,
+ "check for valid count entry 2");
+ }
+}
+
+T_DECL(core_fixed_stack_leak_race,
+ "ensure no stack data is leaked by TASK_INSPECT_BASIC_COUNTS")
+{
+ T_SETUPBEGIN;
+
+ int ncpus = 0;
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.logicalcpu_max", &ncpus,
+ &(size_t){ sizeof(ncpus) }, NULL, 0), "get number of CPUs");
+ T_QUIET; T_ASSERT_GT(ncpus, 0, "got non-zero number of CPUs");
+ pthread_t *threads = calloc((unsigned long)ncpus, sizeof(*threads));
+
+ T_QUIET; T_ASSERT_NOTNULL(threads, "allocated space for threads");
+
+ T_LOG("creating %d threads to attempt to race around task counts", ncpus);
+ /*
+ * Have half the threads hammering thread_self_counts and the other half
+ * trying to get an error to occur inside TASK_INSPECT_BASIC_COUNTS and see
+ * uninitialized kernel memory.
+ */
+ for (int i = 0; i < ncpus; i++) {
+ T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&threads[i], NULL,
+ i & 1 ? spin_task_inspect : spin_thread_self_counts, NULL),
+ NULL);
+ }
+
+ T_SETUPEND;
+
+ sleep(10);
+ T_PASS("ending test after 10 seconds");
+}
+
static void
perf_sysctl_deltas(const char *sysctl_name, const char *stat_name)
{