]> git.saurik.com Git - apple/xnu.git/commitdiff
xnu-4903.231.4.tar.gz macos-10142 v4903.231.4
authorApple <opensource@apple.com>
Tue, 26 Mar 2019 22:12:40 +0000 (22:12 +0000)
committerApple <opensource@apple.com>
Tue, 26 Mar 2019 22:12:40 +0000 (22:12 +0000)
39 files changed:
bsd/dev/monotonic.c
bsd/kern/kern_descrip.c
bsd/kern/kern_event.c
bsd/kern/posix_sem.c
bsd/kern/posix_shm.c
bsd/kern/sysv_shm.c
bsd/net/necp.c
bsd/netinet/flow_divert.c
bsd/netinet/mptcp_subr.c
config/MASTER.arm64
config/MASTER.arm64.bcm2837
config/MASTER.x86_64
config/Unsupported.exports
iokit/Kernel/IOPMrootDomain.cpp
osfmk/arm/machine_routines.c
osfmk/arm/machine_routines.h
osfmk/arm64/machine_routines.c
osfmk/atm/atm.c
osfmk/i386/machine_routines.c
osfmk/i386/machine_routines.h
osfmk/ipc/ipc_kmsg.c
osfmk/kern/bsd_kern.c
osfmk/kern/debug.c
osfmk/kern/ipc_mig.c
osfmk/kern/ipc_mig.h
osfmk/kern/kern_monotonic.c
osfmk/kern/ledger.c
osfmk/kern/ledger.h
osfmk/kern/syscall_subr.c
osfmk/kern/task.c
osfmk/kern/task.h
osfmk/kern/thread.c
osfmk/kern/thread.h
osfmk/kern/trustcache.h
osfmk/kperf/kperf.c
osfmk/vm/vm_compressor_pager.c
osfmk/vm/vm_pageout.c
osfmk/vm/vm_resident.c
tests/monotonic_core.c

index 8a5d276e30ecfec558743174e4755326c088c52b..375a0ca4e670defdd3f35620cdee1dd41eac2641 100644 (file)
@@ -305,8 +305,8 @@ int thread_selfcounts(__unused struct proc *p,
 {
        switch (uap->type) {
        case 1: {
-               uint64_t counts[2] = {};
-               uint64_t thread_counts[MT_CORE_NFIXED];
+               uint64_t counts[2] = { 0 };
+               uint64_t thread_counts[MT_CORE_NFIXED] = { 0 };
 
                mt_cur_thread_fixed_counts(thread_counts);
 
@@ -338,8 +338,8 @@ static int
 mt_sysctl SYSCTL_HANDLER_ARGS
 {
 #pragma unused(oidp, arg2)
-       uint64_t start[MT_CORE_NFIXED], end[MT_CORE_NFIXED];
-       uint64_t counts[2] = {};
+       uint64_t start[MT_CORE_NFIXED] = { 0 }, end[MT_CORE_NFIXED] = { 0 };
+       uint64_t counts[2] = { 0 };
 
        switch ((enum mt_sysctl)arg1) {
        case MT_SUPPORTED:
index efc8616f7fbeab25555248667f3dbd98771dd625..d906cf4405fb9d9235fe026666cfaf5088148fe7 100644 (file)
@@ -4749,7 +4749,9 @@ fdexec(proc_t p, short flags, int self_exec)
                                msleep(&p->p_fpdrainwait, &p->p_fdmlock, PRIBIO,
                                    "fpdrain", NULL);
                        }
-
+                       if (fp->f_flags & FP_WAITEVENT) {
+                               (void)waitevent_close(p, fp);
+                       }
                        closef_locked(fp, fp->f_fglob, p);
 
                        fileproc_free(fp);
index d8096ba03cf605d5d97eb3f9eabadd8fc0cd9784..df25f3112263dfa04f443cbb54913a9c5bcc78f0 100644 (file)
@@ -8595,7 +8595,11 @@ kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize,
                        goto out;
                }
                kq_ids = kalloc(bufsize);
-               assert(kq_ids != NULL);
+               if (!kq_ids) {
+                       err = ENOMEM;
+                       goto out;
+               }
+               bzero(kq_ids, bufsize);
        }
 
        kqhash_lock(p);
@@ -8618,7 +8622,7 @@ kevent_copyout_proc_dynkqids(void *proc, user_addr_t ubuf, uint32_t ubufsize,
 
        if (kq_ids) {
                size_t copysize;
-               if (os_mul_overflow(sizeof(kqueue_id_t), min(ubuflen, nkqueues), &copysize)) {
+               if (os_mul_overflow(sizeof(kqueue_id_t), min(buflen, nkqueues), &copysize)) {
                        err = ERANGE;
                        goto out;
                }
index 9dc882363251acc510b282af4cfd551f7804d441..08a9a0c0441f3170f4366ca5769e1885ab83998e 100644 (file)
@@ -819,6 +819,10 @@ sem_close(proc_t p, struct sem_close_args *uap, __unused int32_t *retval)
                proc_fdunlock(p);
                return(error);
        }
+       if (fp->f_type != DTYPE_PSXSEM) {
+               proc_fdunlock(p);
+               return(EBADF);
+       }
        procfdtbl_markclosefd(p, fd);
        fileproc_drain(p, fp);
        fdrelse(p, fd);
index 1fe7878123144c4056e1609702b8f42c17dd97f1..d220614dbe1ff7ac66c749f96ed7980ac8b73011 100644 (file)
@@ -865,6 +865,7 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct
        vm_map_offset_t user_start_addr;
        vm_map_size_t   map_size, mapped_size;
        int prot = uap->prot;
+       int max_prot = VM_PROT_DEFAULT;
        int flags = uap->flags;
        vm_object_offset_t file_pos = (vm_object_offset_t)uap->pos;
        vm_object_offset_t map_pos;
@@ -887,8 +888,12 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct
                return(EINVAL);
 
 
-       if ((prot & PROT_WRITE) && ((fp->f_flag & FWRITE) == 0)) {
-               return(EPERM);
+       /* Can't allow write permission if the shm_open() didn't */
+       if (!(fp->f_flag & FWRITE)) {
+               if (prot & VM_PROT_WRITE) {
+                       return EPERM;
+               }
+               max_prot &= ~VM_PROT_WRITE;
        }
 
        if (((pnode = (struct pshmnode *)fp->f_data)) == PSHMNODE_NULL )
@@ -1000,7 +1005,7 @@ pshm_mmap(__unused proc_t p, struct mmap_args *uap, user_addr_t *retval, struct
                        file_pos - map_pos,
                        docow,
                        prot,
-                       VM_PROT_DEFAULT, 
+                       max_prot,
                        VM_INHERIT_SHARE);
                if (kret != KERN_SUCCESS) 
                        goto out;
index 2fb45c9965378f7cc5cb5ef6af8989c9dc16a710..9a240bbf98fbc37b2d15d396f84555f89504ac19 100644 (file)
@@ -618,7 +618,7 @@ shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval)
                }
 
                if (IS_64BIT_PROCESS(p)) {
-                       struct user_shmid_ds shmid_ds;
+                       struct user_shmid_ds shmid_ds = {};
                        memcpy(&shmid_ds, &shmseg->u, sizeof(struct user_shmid_ds));
                        
                        /* Clear kernel reserved pointer before copying to user space */
@@ -1046,7 +1046,7 @@ shminit(void)
                        return ENOMEM;
                }
 
-               MALLOC(shmsegs, struct shmid_kernel *, sz, M_SHM, M_WAITOK);
+               MALLOC(shmsegs, struct shmid_kernel *, sz, M_SHM, M_WAITOK | M_ZERO);
                if (shmsegs == NULL) {
                        return ENOMEM;
                }
index 17d00fd4f7352850f63ff7d177034e0ca3e95704..3fd05ae1ba0eba416a44f686bf2c96b4e747b552 100644 (file)
@@ -8826,6 +8826,9 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr
                        if (return_route_rule_id) {
                                *return_route_rule_id = inp->inp_policyresult.results.route_rule_id;
                        }
+                       if (return_skip_policy_id) {
+                               *return_skip_policy_id = inp->inp_policyresult.skip_policy_id;
+                       }
                }
                lck_rw_done(&necp_kernel_policy_lock);
                goto done;
index b73a8617273fc4f0435c661be102de1d8282f549..b6d668440758a0bc7f7d0eb10b938b71704bffa4 100644 (file)
@@ -1186,7 +1186,7 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr
        if (fd_cb->local_address != NULL) {
                /* socket is bound. */
                error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_LOCAL_ADDR,
-                                                     sizeof(struct sockaddr_storage), fd_cb->local_address);
+                                                     fd_cb->local_address->sa_len, fd_cb->local_address);
                if (error) {
                        goto done;
                }
@@ -2017,10 +2017,6 @@ flow_divert_handle_data(struct flow_divert_pcb *fd_cb, mbuf_t packet, size_t off
                        }
                }
                socket_unlock(fd_cb->so, 0);
-
-               if (data != NULL) {
-                       mbuf_freem(data);
-               }
        }
        FDUNLOCK(fd_cb);
 }
index 1606cdb62839934c239cc6de0c2e6d05846d0ffd..c7b154796cb0db5084d5177850b9ca9c5b62b93e 100644 (file)
@@ -774,8 +774,10 @@ mptcp_trigger_cell_bringup(struct mptses *mpte)
                uuid_string_t uuidstr;
                int err;
 
+               mpte_unlock(mpte);
                err = necp_client_assert_bb_radio_manager(mpsotomppcb(mp_so)->necp_client_uuid,
                                                          TRUE);
+               mpte_lock(mpte);
 
                if (err == 0)
                        mpte->mpte_triggered_cell = 1;
index 32189c5fdf2882b3b648e0a91c0c9c7f51307453..a90486db3cfccaff2884a686971cfb9fad4aaed0 100644 (file)
 #  FILESYS_DEV =    [ FILESYS_BASE fdesc ]
 #  FILESYS_DEBUG =  [ FILESYS_BASE fdesc ]
 #  NFS =            [ nfsclient nfsserver ]
-#  NETWORKING =     [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto packet_mangler if_fake ]
+#  NETWORKING =     [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto if_fake ]
+#  NETWORKING_RELEASE = [ NETWORKING ]
+#  NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler ]
+#  NETWORKING_DEBUG = [ NETWORKING_DEV ]
 #  VPN =            [ ipsec flow_divert necp content_filter ]
 #  PF =             [ pf ]
 #  MULTIPATH =      [ multipath mptcp ]
@@ -58,9 +61,9 @@
 #  VM_DEV =         [ VM_BASE dynamic_codesigning ]
 #  VM_DEBUG =       [ VM_BASE dynamic_codesigning ]
 #  SECURITY =       [ config_macf kernel_integrity ]
-#  RELEASE =        [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ]
-#  DEVELOPMENT =    [ KERNEL_DEV     BSD_DEV     FILESYS_DEV NFS SKYWALK_DEV     NETWORKING PF MULTIPATH VPN IOKIT_DEV     LIBKERN_DEV     PERF_DBG_DEV     MACH_DEV     SCHED_DEV     VM_DEV     SECURITY ]
-#  DEBUG =          [ KERNEL_DEBUG   BSD_DEBUG   FILESYS_DEBUG   SKYWALK_DEBUG   NETWORKING PF MULTIPATH VPN IOKIT_DEBUG   LIBKERN_DEBUG   PERF_DBG_DEBUG   MACH_DEBUG   SCHED_DEBUG   VM_DEBUG   SECURITY ]
+#  RELEASE =        [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ]
+#  DEVELOPMENT =    [ KERNEL_DEV     BSD_DEV     FILESYS_DEV NFS SKYWALK_DEV     NETWORKING_DEV PF MULTIPATH VPN IOKIT_DEV     LIBKERN_DEV     PERF_DBG_DEV     MACH_DEV     SCHED_DEV     VM_DEV     SECURITY ]
+#  DEBUG =          [ KERNEL_DEBUG   BSD_DEBUG   FILESYS_DEBUG   SKYWALK_DEBUG   NETWORKING_DEBUG PF MULTIPATH VPN IOKIT_DEBUG   LIBKERN_DEBUG   PERF_DBG_DEBUG   MACH_DEBUG   SCHED_DEBUG   VM_DEBUG   SECURITY ]
 #  KASAN =          [ DEVELOPMENT ]
 #
 ######################################################################
index 65dd4861b665a5b81553a0b1015f85af87979926..f6c35b27b975fb04fd51755ec71dbf8869e62fe7 100644 (file)
 #  FILESYS_DEV =    [ FILESYS_BASE fdesc ]
 #  FILESYS_DEBUG =  [ FILESYS_BASE fdesc ]
 #  NFS =            [ nfsclient nfsserver ]
-#  NETWORKING =     [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto packet_mangler if_fake ]
+#  NETWORKING =     [ inet tcpdrop_synfin bpfilter inet6 ipv6send if_bridge traffic_mgt dummynet ah_all_crypto if_fake ]
+#  NETWORKING_RELEASE = [ NETWORKING ]
+#  NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler ]
+#  NETWORKING_DEBUG = [ NETWORKING_DEV ]
 #  VPN =            [ ipsec flow_divert necp content_filter ]
 #  PF =             [ pf ]
 #  MULTIPATH =      [ multipath mptcp ]
@@ -58,9 +61,9 @@
 #  VM_DEV =         [ VM_BASE dynamic_codesigning ]
 #  VM_DEBUG =       [ VM_BASE dynamic_codesigning ]
 #  SECURITY =       [ config_macf kernel_integrity ]
-#  RELEASE =        [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ]
-#  DEVELOPMENT =    [ KERNEL_DEV     BSD_DEV     FILESYS_DEV NFS SKYWALK_DEV     NETWORKING PF MULTIPATH VPN IOKIT_DEV     LIBKERN_DEV     PERF_DBG_DEV     MACH_DEV     SCHED_DEV     VM_DEV     SECURITY ]
-#  DEBUG =          [ KERNEL_DEBUG   BSD_DEBUG   FILESYS_DEBUG   SKYWALK_DEBUG   NETWORKING PF MULTIPATH VPN IOKIT_DEBUG   LIBKERN_DEBUG   PERF_DBG_DEBUG   MACH_DEBUG   SCHED_DEBUG   VM_DEBUG   SECURITY ]
+#  RELEASE =        [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE SKYWALK_RELEASE NETWORKING_RELEASE PF MULTIPATH VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG_RELEASE MACH_RELEASE SCHED_RELEASE VM_RELEASE SECURITY ]
+#  DEVELOPMENT =    [ KERNEL_DEV     BSD_DEV     FILESYS_DEV NFS SKYWALK_DEV     NETWORKING_DEV PF MULTIPATH VPN IOKIT_DEV     LIBKERN_DEV     PERF_DBG_DEV     MACH_DEV     SCHED_DEV     VM_DEV     SECURITY ]
+#  DEBUG =          [ KERNEL_DEBUG   BSD_DEBUG   FILESYS_DEBUG   SKYWALK_DEBUG   NETWORKING_DEBUG PF MULTIPATH VPN IOKIT_DEBUG   LIBKERN_DEBUG   PERF_DBG_DEBUG   MACH_DEBUG   SCHED_DEBUG   VM_DEBUG   SECURITY ]
 #  KASAN =          [ DEVELOPMENT ]
 #
 ######################################################################
index b14a338d615cfa9cc1c80450527bf7e39b0287cb..7f128cc616fa81915a2eecaa458d74c8ed0824b1 100644 (file)
 #  FILESYS_DEV =    [ FILESYS_BASE ]
 #  FILESYS_DEBUG =  [ FILESYS_BASE ]
 #  NFS =            [ nfsclient nfsserver ]
-#  NETWORKING =     [ inet inet6 ipv6send tcpdrop_synfin bpfilter dummynet traffic_mgt sendfile ah_all_crypto bond vlan gif stf ifnet_input_chk config_mbuf_jumbo if_bridge ipcomp_zlib MULTIPATH packet_mangler if_fake ]
+#  NETWORKING =     [ inet inet6 ipv6send tcpdrop_synfin bpfilter dummynet traffic_mgt sendfile ah_all_crypto bond vlan gif stf ifnet_input_chk config_mbuf_jumbo if_bridge ipcomp_zlib MULTIPATH if_fake ]
+#  NETWORKING_RELEASE = [ NETWORKING ]
+#  NETWORKING_DEV = [ NETWORKING_RELEASE packet_mangler ]
+#  NETWORKING_DEBUG = [ NETWORKING_DEV ]
 #  VPN =            [ ipsec flow_divert necp content_filter ]
 #  PF =             [ pf pflog ]
 #  MULTIPATH =      [ multipath mptcp ]
@@ -52,9 +55,9 @@
 #  SCHED_DEBUG =    [ SCHED_BASE config_sched_grrr config_sched_proto ]
 #  VM =             [ vm_pressure_events memorystatus dynamic_codesigning config_code_decryption encrypted_swap phantom_cache config_background_queue]
 #  SECURITY =       [ config_macf config_audit config_csr ]
-#  RELEASE =        [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING PF VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG MACH_RELEASE SCHED_RELEASE VM SECURITY ]
-#  DEVELOPMENT =    [ KERNEL_DEV     BSD_DEV     FILESYS_DEV     NFS SKYWALK_DEV     NETWORKING PF VPN IOKIT_DEV     LIBKERN_DEV     PERF_DBG MACH_DEV     SCHED_DEV     VM SECURITY ]
-#  DEBUG =          [ KERNEL_DEBUG   BSD_DEBUG   FILESYS_DEBUG   NFS SKYWALK_DEBUG   NETWORKING PF VPN IOKIT_DEBUG   LIBKERN_DEBUG   PERF_DBG MACH_DEBUG   SCHED_DEBUG   VM SECURITY ]
+#  RELEASE =        [ KERNEL_RELEASE BSD_RELEASE FILESYS_RELEASE NFS SKYWALK_RELEASE NETWORKING_RELEASE PF VPN IOKIT_RELEASE LIBKERN_RELEASE PERF_DBG MACH_RELEASE SCHED_RELEASE VM SECURITY ]
+#  DEVELOPMENT =    [ KERNEL_DEV     BSD_DEV     FILESYS_DEV     NFS SKYWALK_DEV     NETWORKING_DEV PF VPN IOKIT_DEV     LIBKERN_DEV     PERF_DBG MACH_DEV     SCHED_DEV     VM SECURITY ]
+#  DEBUG =          [ KERNEL_DEBUG   BSD_DEBUG   FILESYS_DEBUG   NFS SKYWALK_DEBUG   NETWORKING_DEBUG PF VPN IOKIT_DEBUG   LIBKERN_DEBUG   PERF_DBG MACH_DEBUG   SCHED_DEBUG   VM SECURITY ]
 #  KASAN =          [ DEVELOPMENT ]
 #
 ######################################################################
index 7938c5da99033121e2fbc3ee65bf1a5608a3ae9a..8853251cad2d9387a154221ba65a998349fcdc84 100644 (file)
@@ -119,6 +119,7 @@ _mach_make_memory_entry_64
 _mach_memory_entry_page_op
 _mach_memory_entry_range_op
 _mach_msg_rpc_from_kernel_proper
+_mach_msg_destroy_from_kernel_proper
 _mach_vm_region
 _max_mem
 _mem_size
index 40a11e05eb85688a369bb73d1865ce7cae39f4cc..85669860fc75624237e331270f2d44fc48f108a4 100644 (file)
@@ -196,6 +196,51 @@ static void pmEventTimeStamp(uint64_t *recordTS);
 static const OSSymbol *sleepSupportedPEFunction = NULL;
 static const OSSymbol *sleepMessagePEFunction   = NULL;
 
+static const OSSymbol *                gIOPMPSExternalConnectedKey;
+static const OSSymbol *                gIOPMPSExternalChargeCapableKey;
+static const OSSymbol *                gIOPMPSBatteryInstalledKey;
+static const OSSymbol *                gIOPMPSIsChargingKey;
+static const OSSymbol *                gIOPMPSAtWarnLevelKey;
+static const OSSymbol *                gIOPMPSAtCriticalLevelKey;
+static const OSSymbol *                gIOPMPSCurrentCapacityKey;
+static const OSSymbol *                gIOPMPSMaxCapacityKey;
+static const OSSymbol *                gIOPMPSDesignCapacityKey;
+static const OSSymbol *                gIOPMPSTimeRemainingKey;
+static const OSSymbol *                gIOPMPSAmperageKey;
+static const OSSymbol *                gIOPMPSVoltageKey;
+static const OSSymbol *                gIOPMPSCycleCountKey;
+static const OSSymbol *                gIOPMPSMaxErrKey;
+static const OSSymbol *                gIOPMPSAdapterInfoKey;
+static const OSSymbol *                gIOPMPSLocationKey;
+static const OSSymbol *                gIOPMPSErrorConditionKey;
+static const OSSymbol *                gIOPMPSManufacturerKey;
+static const OSSymbol *                gIOPMPSManufactureDateKey;
+static const OSSymbol *                gIOPMPSModelKey;
+static const OSSymbol *                gIOPMPSSerialKey;
+static const OSSymbol *                gIOPMPSLegacyBatteryInfoKey;
+static const OSSymbol *                gIOPMPSBatteryHealthKey;
+static const OSSymbol *                gIOPMPSHealthConfidenceKey;
+static const OSSymbol *                gIOPMPSCapacityEstimatedKey;
+static const OSSymbol *                gIOPMPSBatteryChargeStatusKey;
+static const OSSymbol *                gIOPMPSBatteryTemperatureKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsKey;
+static const OSSymbol *                gIOPMPSChargerConfigurationKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsIDKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsWattsKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsRevisionKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsSerialNumberKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsFamilyKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsAmperageKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsDescriptionKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsPMUConfigurationKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsSourceIDKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsErrorFlagsKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsSharedSourceKey;
+static const OSSymbol *                gIOPMPSAdapterDetailsCloakedKey;
+static const OSSymbol *                gIOPMPSInvalidWakeSecondsKey;
+static const OSSymbol *                gIOPMPSPostChargeWaitSecondsKey;
+static const OSSymbol *                gIOPMPSPostDishargeWaitSecondsKey;
+
 #define kIOSleepSupportedKey        "IOSleepSupported"
 #define kIOPMSystemCapabilitiesKey  "System Capabilities"
 
@@ -9591,6 +9636,51 @@ static IOPMPowerState patriarchPowerStates[2] =
 
 void IORootParent::initialize( void )
 {
+
+    gIOPMPSExternalConnectedKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalConnectedKey);
+    gIOPMPSExternalChargeCapableKey = OSSymbol::withCStringNoCopy(kIOPMPSExternalChargeCapableKey);
+    gIOPMPSBatteryInstalledKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryInstalledKey);
+    gIOPMPSIsChargingKey = OSSymbol::withCStringNoCopy(kIOPMPSIsChargingKey);
+    gIOPMPSAtWarnLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtWarnLevelKey);
+    gIOPMPSAtCriticalLevelKey = OSSymbol::withCStringNoCopy(kIOPMPSAtCriticalLevelKey);
+    gIOPMPSCurrentCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSCurrentCapacityKey);
+    gIOPMPSMaxCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxCapacityKey);
+    gIOPMPSDesignCapacityKey = OSSymbol::withCStringNoCopy(kIOPMPSDesignCapacityKey);
+    gIOPMPSTimeRemainingKey = OSSymbol::withCStringNoCopy(kIOPMPSTimeRemainingKey);
+    gIOPMPSAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAmperageKey);
+    gIOPMPSVoltageKey = OSSymbol::withCStringNoCopy(kIOPMPSVoltageKey);
+    gIOPMPSCycleCountKey = OSSymbol::withCStringNoCopy(kIOPMPSCycleCountKey);
+    gIOPMPSMaxErrKey = OSSymbol::withCStringNoCopy(kIOPMPSMaxErrKey);
+    gIOPMPSAdapterInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterInfoKey);
+    gIOPMPSLocationKey = OSSymbol::withCStringNoCopy(kIOPMPSLocationKey);
+    gIOPMPSErrorConditionKey = OSSymbol::withCStringNoCopy(kIOPMPSErrorConditionKey);
+    gIOPMPSManufacturerKey = OSSymbol::withCStringNoCopy(kIOPMPSManufacturerKey);
+    gIOPMPSManufactureDateKey = OSSymbol::withCStringNoCopy(kIOPMPSManufactureDateKey);
+    gIOPMPSModelKey = OSSymbol::withCStringNoCopy(kIOPMPSModelKey);
+    gIOPMPSSerialKey = OSSymbol::withCStringNoCopy(kIOPMPSSerialKey);
+    gIOPMPSLegacyBatteryInfoKey = OSSymbol::withCStringNoCopy(kIOPMPSLegacyBatteryInfoKey);
+    gIOPMPSBatteryHealthKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryHealthKey);
+    gIOPMPSHealthConfidenceKey = OSSymbol::withCStringNoCopy(kIOPMPSHealthConfidenceKey);
+    gIOPMPSCapacityEstimatedKey = OSSymbol::withCStringNoCopy(kIOPMPSCapacityEstimatedKey);
+    gIOPMPSBatteryChargeStatusKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryChargeStatusKey);
+    gIOPMPSBatteryTemperatureKey = OSSymbol::withCStringNoCopy(kIOPMPSBatteryTemperatureKey);
+    gIOPMPSAdapterDetailsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsKey);
+    gIOPMPSChargerConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSChargerConfigurationKey);
+    gIOPMPSAdapterDetailsIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsIDKey);
+    gIOPMPSAdapterDetailsWattsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsWattsKey);
+    gIOPMPSAdapterDetailsRevisionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsRevisionKey);
+    gIOPMPSAdapterDetailsSerialNumberKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSerialNumberKey);
+    gIOPMPSAdapterDetailsFamilyKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsFamilyKey);
+    gIOPMPSAdapterDetailsAmperageKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsAmperageKey);
+    gIOPMPSAdapterDetailsDescriptionKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsDescriptionKey);
+    gIOPMPSAdapterDetailsPMUConfigurationKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsPMUConfigurationKey);
+    gIOPMPSAdapterDetailsSourceIDKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSourceIDKey);
+    gIOPMPSAdapterDetailsErrorFlagsKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsErrorFlagsKey);
+    gIOPMPSAdapterDetailsSharedSourceKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsSharedSourceKey);
+    gIOPMPSAdapterDetailsCloakedKey = OSSymbol::withCStringNoCopy(kIOPMPSAdapterDetailsCloakedKey);
+    gIOPMPSInvalidWakeSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSInvalidWakeSecondsKey);
+    gIOPMPSPostChargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostChargeWaitSecondsKey);
+    gIOPMPSPostDishargeWaitSecondsKey = OSSymbol::withCStringNoCopy(kIOPMPSPostDishargeWaitSecondsKey);
 }
 
 bool IORootParent::start( IOService * nub )
index 0a6777ea588a1cf9000603397d1dc088cfebe4d3..94fc76bf42efa3756f655b47c3eb57b3d2eb1aad 100644 (file)
@@ -1009,6 +1009,8 @@ ml_delay_should_spin(uint64_t interval)
        }
 }
 
+void ml_delay_on_yield(void) {}
+
 boolean_t ml_thread_is64bit(thread_t thread)
 {
        return (thread_is_64bit_addr(thread));
index 4a7061b652e8bee53074fa5f0255e7eb76ad76e1..d5a77dd9b80579284b908bbeeba17882eaf8dbf7 100644 (file)
@@ -444,6 +444,8 @@ void ml_init_lock_timeout(void);
 
 boolean_t ml_delay_should_spin(uint64_t interval);
 
+void ml_delay_on_yield(void);
+
 uint32_t ml_get_decrementer(void);
 
 #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
index e9a22430d930cc0b31daebf392371a9d9ba14eee..bf9633ed58f9ed41c8a03e8ab556b4ce06c0cdfc 100644 (file)
@@ -72,6 +72,8 @@ uint32_t LockTimeOutUsec;
 uint64_t MutexSpin;
 boolean_t is_clock_configured = FALSE;
 
+uint32_t yield_delay_us = 42; /* Less than cpu_idle_latency to ensure ml_delay_should_spin is true */
+
 extern int mach_assert;
 extern volatile uint32_t debug_enabled;
 
@@ -438,6 +440,8 @@ machine_startup(__unused boot_args * args)
                default_bg_preemption_rate = boot_arg;
        }
 
+       PE_parse_boot_argn("yield_delay_us", &yield_delay_us, sizeof (yield_delay_us));
+
        machine_conf();
 
        /*
@@ -1811,6 +1815,11 @@ ml_delay_should_spin(uint64_t interval)
        }
 }
 
+void
+ml_delay_on_yield(void)
+{
+}
+
 boolean_t ml_thread_is64bit(thread_t thread) {
        return (thread_is_64bit_addr(thread));
 }
index fcf59e3d3ba88856fe70a599474e5bc79c8ce600..0c90d585b8e9dd50716dc0e2c6e720252241d396 100644 (file)
@@ -87,8 +87,6 @@ static kern_return_t atm_value_register(atm_value_t atm_value, atm_task_descript
 static kern_return_t atm_listener_delete(atm_value_t atm_value, atm_task_descriptor_t task_descriptor, atm_guard_t guard);
 static void atm_link_get_reference(atm_link_object_t link_object) __unused;
 static void atm_link_dealloc(atm_link_object_t link_object);
-kern_return_t atm_invoke_collection(atm_value_t atm_value, mach_atm_subaid_t subaid, uint32_t flags);
-kern_return_t atm_send_user_notification(aid_t aid, mach_atm_subaid_t sub_aid, mach_port_t *buffers_array, uint64_t *sizes_array, mach_msg_type_number_t count, uint32_t flags);
 
 kern_return_t
 atm_release_value(
@@ -479,43 +477,15 @@ atm_command(
        uint32_t aid_array_count = 0;
        atm_task_descriptor_t task_descriptor = ATM_TASK_DESCRIPTOR_NULL;
        task_t task;
-       uint32_t collection_flags = ATM_ACTION_LOGFAIL;
        kern_return_t kr = KERN_SUCCESS;
        atm_guard_t guard;
        
        switch (command) {
        case ATM_ACTION_COLLECT:
-               collection_flags = ATM_ACTION_COLLECT;
                /* Fall through */
 
-       case ATM_ACTION_LOGFAIL: {
-               mach_atm_subaid_t sub_aid = 0;
-
-               if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE))
-                       return KERN_NOT_SUPPORTED;
-
-               /* find the first non-default atm_value */
-               for (i = 0; i < value_count; i++) {
-                       atm_value = HANDLE_TO_ATM_VALUE(values[i]);
-                       if (atm_value != VAM_DEFAULT_VALUE)
-                               break;
-               }
-               
-               /* if we are not able to find any atm values
-                * in stack then this call was made in error
-                */
-               if (atm_value == NULL) {
-                       return KERN_FAILURE;
-               }
-
-               if (in_content_size >= sizeof(mach_atm_subaid_t)) {
-                       sub_aid = *(mach_atm_subaid_t *)(void *)in_content;
-               }
-
-               *out_content_size = 0;
-               kr = atm_invoke_collection(atm_value, sub_aid, collection_flags);
-               break;
-       }
+       case ATM_ACTION_LOGFAIL:
+               return KERN_NOT_SUPPORTED;
 
        case ATM_FIND_MIN_SUB_AID:
                if ((in_content_size/sizeof(aid_t)) > (*out_content_size/sizeof(mach_atm_subaid_t)))
@@ -611,202 +581,6 @@ atm_release(
 }
 
 
-/*
- * Routine: atm_invoke_collection
- * Purpose: Sends a notification with array of memory buffer.
- * Note: may block till user daemon responds.
- */
-kern_return_t
-atm_invoke_collection(
-       atm_value_t atm_value,
-       mach_atm_subaid_t sub_aid,
-       uint32_t flags)
-{
-       aid_t aid = atm_value->aid;
-       kern_return_t kr = KERN_SUCCESS;
-       uint32_t array_count = 0, i = 0, j = 0, requestor_index = 0;
-       uint64_t *sizes_array = NULL;
-       atm_link_object_t link_object = NULL;
-       mach_port_t *mem_array = NULL;
-       boolean_t need_swap_first = FALSE;
-       atm_task_descriptor_t requesting_descriptor = current_task()->atm_context;
-
-       lck_mtx_lock(&atm_value->listener_lock);
-       array_count = atm_value->listener_count;
-       lck_mtx_unlock(&atm_value->listener_lock);
-
-       if (array_count == 0){
-               return KERN_SUCCESS;
-       }
-
-       mem_array = kalloc(sizeof(mach_port_t) * array_count);
-       if (mem_array == NULL){
-               return KERN_NO_SPACE;
-       }
-
-       sizes_array = kalloc(sizeof(uint64_t) * array_count);
-       if (sizes_array == NULL){
-               kfree(mem_array, sizeof(mach_port_t) * array_count);
-               return KERN_NO_SPACE;
-       }
-
-       lck_mtx_lock(&atm_value->listener_lock);
-       queue_iterate(&atm_value->listeners, link_object, atm_link_object_t, listeners_element) {
-               if (i >= array_count){
-                       break;
-               }
-
-               if (!need_swap_first && requesting_descriptor == link_object->descriptor){
-                       assert(requesting_descriptor != NULL);
-                       requestor_index = i;
-                       need_swap_first = TRUE;
-               }
-
-               sizes_array[i] = link_object->descriptor->trace_buffer_size;
-               mem_array[i] = ipc_port_copy_send(link_object->descriptor->trace_buffer);
-               if (!IPC_PORT_VALID(mem_array[i])){
-                       mem_array[i] = NULL;
-               }
-               i++;
-       }
-       lck_mtx_unlock(&atm_value->listener_lock);
-
-       /*
-        * Swap the position of requesting task ahead, diagnostics can 
-        * process its buffers the first.
-        */
-       if (need_swap_first && requestor_index != 0){
-               assert(requestor_index < array_count);
-               mach_port_t tmp_port = mem_array[0];
-               uint64_t tmp_size = sizes_array[0];
-               mem_array[0] = mem_array[requestor_index];
-               sizes_array[0] = sizes_array[requestor_index];
-               mem_array[requestor_index] = tmp_port;
-               sizes_array[requestor_index] = tmp_size;
-       }
-
-       if (i > 0) {
-               kr = atm_send_user_notification(aid, sub_aid, mem_array, sizes_array, i, flags);
-       }
-
-       for (j = 0; j < i; j++) {
-               if (mem_array[j] != NULL)
-                       ipc_port_release_send(mem_array[j]);
-       }
-
-       kfree(mem_array, sizeof(mach_port_t) * array_count);
-       kfree(sizes_array, sizeof(uint64_t) * array_count);
-
-       return kr;
-}
-
-/*
- * Routine: atm_send_user_notification
- * Purpose: Make an upcall to user space daemon if its listening for atm notifications.
- * Returns: KERN_SUCCESS for successful delivery.
- *                     KERN_FAILURE if port is dead or NULL.
- */
-kern_return_t
-atm_send_user_notification(
-       aid_t aid,
-       mach_atm_subaid_t sub_aid,
-       mach_port_t *buffers_array,
-       uint64_t *sizes_array,
-       mach_msg_type_number_t count,
-       uint32_t flags)
-{
-       mach_port_t user_port;
-       int                     error;
-       thread_t th = current_thread();
-       kern_return_t kr;
-
-       error = host_get_atm_notification_port(host_priv_self(), &user_port);
-       if ((error != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
-               return KERN_FAILURE;
-       }
-
-       thread_set_honor_qlimit(th);
-       kr = atm_collect_trace_info(user_port, aid, sub_aid, flags, buffers_array, count, sizes_array, count);
-       thread_clear_honor_qlimit(th);
-
-       if (kr != KERN_SUCCESS) {
-               ipc_port_release_send(user_port);
-
-               if (kr == MACH_SEND_TIMED_OUT) {
-                       kr = KERN_SUCCESS;
-               }
-       }
-
-       return kr;
-}
-
-/*
- * Routine: atm_send_proc_inspect_notification
- * Purpose: Make an upcall to user space daemon if its listening for trace 
- *          notifications for per process inspection.
- * Returns: KERN_SUCCESS for successful delivery.
- *                     KERN_FAILURE if port is dead or NULL.
- */
-
-kern_return_t
-atm_send_proc_inspect_notification(
-       task_t task,
-       int32_t traced_pid,
-       uint64_t traced_uniqueid)
-{
-       mach_port_t user_port = MACH_PORT_NULL;
-       mach_port_t memory_port = MACH_PORT_NULL;
-       kern_return_t kr;
-       atm_task_descriptor_t task_descriptor = ATM_TASK_DESCRIPTOR_NULL;
-       uint64_t buffer_size = 0;
-       int                     error;
-       thread_t th = current_thread();
-
-       if (disable_atm || (atm_get_diagnostic_config() & ATM_TRACE_DISABLE))
-               return KERN_NOT_SUPPORTED;
-
-       /* look for the requested memory in target task */
-       if (!task)
-               return KERN_INVALID_TASK;
-
-       task_lock(task);
-       if (task->atm_context){
-               task_descriptor = task->atm_context;
-               atm_descriptor_get_reference(task_descriptor);
-       }
-       task_unlock(task);
-
-       if (task_descriptor == ATM_TASK_DESCRIPTOR_NULL){
-               return KERN_FAILURE;
-       }
-
-       memory_port = ipc_port_copy_send(task_descriptor->trace_buffer);
-       buffer_size =  task_descriptor->trace_buffer_size;
-       atm_task_descriptor_dealloc(task_descriptor);
-
-       /* get the communication port */
-       error = host_get_atm_notification_port(host_priv_self(), &user_port);
-       if ((error != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) {
-               ipc_port_release_send(memory_port);
-               return KERN_FAILURE;
-       }
-
-       thread_set_honor_qlimit(th);
-       kr =  atm_inspect_process_buffer(user_port, traced_pid, traced_uniqueid, buffer_size, memory_port);
-       thread_clear_honor_qlimit(th);
-
-       if (kr != KERN_SUCCESS) {
-               ipc_port_release_send(user_port);
-
-               if (kr == MACH_SEND_TIMED_OUT) {
-                       kr = KERN_SUCCESS;
-               }
-       }
-
-       ipc_port_release_send(memory_port);
-       return kr;
-}
-
 /*
  * Routine: atm_value_alloc_init
  * Purpose: Allocates an atm value struct and initialize it.
index e62e821c6b91a38eacbac26f522b08a3b6f42154..611470eaa4f910c88f9ce6c6b7808f0f9ad513ac 100644 (file)
@@ -760,6 +760,8 @@ ml_delay_should_spin(uint64_t interval)
        return (interval < delay_spin_threshold) ? TRUE : FALSE;
 }
 
+void ml_delay_on_yield(void) {}
+
 /*
  * This is called from the machine-independent layer
  * to perform machine-dependent info updates. Defer to cpu_thread_init().
index 8020990521059e34a61a712cb95acb6ac7d7220f..487cc6b6166fa5e90e680c35fb0c23512a9d25ce 100644 (file)
@@ -88,6 +88,8 @@ void ml_init_delay_spin_threshold(int);
 
 boolean_t ml_delay_should_spin(uint64_t interval);
 
+extern void ml_delay_on_yield(void);
+
 vm_offset_t
 ml_static_ptovirt(
        vm_offset_t);
index 81776c7291b4b6a9d52ed768d4e0067ae19bf35c..7955cf45de23d3250d65dab8a6031ad813de394f 100644 (file)
@@ -4615,13 +4615,13 @@ ipc_kmsg_copyout_to_kernel(
        ipc_space_t     space)
 {
        ipc_object_t dest;
-       ipc_object_t reply;
+       mach_port_t reply;
        mach_msg_type_name_t dest_type;
        mach_msg_type_name_t reply_type;
-       mach_port_name_t dest_name, reply_name;
+       mach_port_name_t dest_name;
 
        dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
-       reply = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
+       reply = kmsg->ikm_header->msgh_local_port;
        dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
        reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
 
@@ -4637,13 +4637,11 @@ ipc_kmsg_copyout_to_kernel(
                dest_name = MACH_PORT_DEAD;
        }
 
-       reply_name = CAST_MACH_PORT_TO_NAME(reply);
-
        kmsg->ikm_header->msgh_bits =
                (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
                                        MACH_MSGH_BITS(reply_type, dest_type));
        kmsg->ikm_header->msgh_local_port =  CAST_MACH_NAME_TO_PORT(dest_name);
-       kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
+       kmsg->ikm_header->msgh_remote_port = reply;
 }
 
 #if IKM_SUPPORT_LEGACY
index d017ae5203fb76f2eb623143b8d3e42493aef797..b28d396c281f05e749a38a4babf2421896a2f0ae 100644 (file)
@@ -1104,7 +1104,7 @@ fill_task_monotonic_rusage(task_t task, rusage_info_current *ri)
 
        assert(task != TASK_NULL);
 
-       uint64_t counts[MT_CORE_NFIXED] = {};
+       uint64_t counts[MT_CORE_NFIXED] = { 0 };
        mt_fixed_task_counts(task, counts);
 #ifdef MT_CORE_INSTRS
        ri->ri_instructions = counts[MT_CORE_INSTRS];
index 0d15f8f8eee1f33697e927e2fa663332484d68e0..d58ac47f7b0f0a902651fc21a3596ca26d55d09a 100644 (file)
@@ -645,11 +645,8 @@ void
 panic_with_thread_context(unsigned int reason, void *ctx, uint64_t debugger_options_mask, thread_t thread, const char *str, ...)
 {
        va_list panic_str_args;
-       __assert_only uint32_t th_ref_count;
 
        assert_thread_magic(thread);
-       th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire);
-       assertf(th_ref_count > 0, "panic_with_thread_context called with invalid thread %p with refcount %u", thread, th_ref_count);
 
        /* Take a reference on the thread so it doesn't disappear by the time we try to backtrace it */
        thread_reference(thread);
index ddbfa0e5aa6f8129c572730ec6a065deff3fc46b..818854bf6f1b0f7b95ebb189a7eec5b814dc7a85 100644 (file)
@@ -500,6 +500,92 @@ mach_msg_rpc_from_kernel_body(
        return mr;
 }
 
+/*
+ *     Routine:        mach_msg_destroy_from_kernel_proper
+ *     Purpose:
+ *             mach_msg_destroy_from_kernel_proper is used to destroy
+ *             an unwanted/unexpected reply message from a MIG
+ *             kernel-specific user-side stub. It is like ipc_kmsg_destroy(),
+ *             except we no longer have the kmsg - just the contents.
+ */
+void
+mach_msg_destroy_from_kernel_proper(mach_msg_header_t *msg)
+{
+    mach_msg_bits_t mbits = msg->msgh_bits;
+       ipc_object_t object;
+
+       object = (ipc_object_t) msg->msgh_remote_port;
+       if (IO_VALID(object)) {
+               ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
+       }
+
+       /*
+        * The destination (now in msg->msgh_local_port via
+        * ipc_kmsg_copyout_to_kernel) has been consumed with
+        * ipc_object_copyout_dest.
+        */
+
+       /* MIG kernel users don't receive vouchers */
+       assert(!MACH_PORT_VALID(msg->msgh_voucher_port));
+
+       /* For simple messages, we're done */
+       if ((mbits & MACH_MSGH_BITS_COMPLEX) == 0) {
+               return;
+       }
+
+       /* Discard descriptor contents */
+       mach_msg_body_t *body = (mach_msg_body_t *)(msg + 1);
+       mach_msg_descriptor_t *daddr = (mach_msg_descriptor_t *)(body + 1);
+       mach_msg_size_t i;
+
+       for (i = 0 ; i < body->msgh_descriptor_count; i++, daddr++ ) {
+               switch (daddr->type.type) {
+
+               case MACH_MSG_PORT_DESCRIPTOR: {
+                       mach_msg_port_descriptor_t *dsc = &daddr->port;
+                       if (IO_VALID((ipc_object_t) dsc->name)) {
+                               ipc_object_destroy((ipc_object_t) dsc->name, dsc->disposition);
+                       }
+                       break;
+               }
+               case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+               case MACH_MSG_OOL_DESCRIPTOR : {
+                       mach_msg_ool_descriptor_t *dsc =
+                           (mach_msg_ool_descriptor_t *)&daddr->out_of_line;
+
+                       if (dsc->size > 0) {
+                               vm_map_copy_discard((vm_map_copy_t) dsc->address);
+                       } else {
+                               assert(dsc->address == (void *) 0);
+                       }
+                       break;
+               }
+               case MACH_MSG_OOL_PORTS_DESCRIPTOR : {
+                       ipc_object_t                    *objects;
+                       mach_msg_type_number_t          j;
+                       mach_msg_ool_ports_descriptor_t *dsc;
+
+                       dsc = (mach_msg_ool_ports_descriptor_t  *)&daddr->ool_ports;
+                       objects = (ipc_object_t *) dsc->address;
+
+                       if (dsc->count == 0) {
+                               break;
+                       }
+                       assert(objects != 0);
+                       for (j = 0; j < dsc->count; j++) {
+                               object = objects[j];
+                               if (IO_VALID(object)) {
+                                       ipc_object_destroy(object, dsc->disposition);
+                               }
+                       }
+                       kfree(dsc->address, (vm_size_t) dsc->count * sizeof(mach_port_t));
+                       break;
+               }
+               default :
+                       break;
+               }
+       }
+}
 
 /************** These Calls are set up for kernel-loaded tasks/threads **************/
 
index 92fe442c5600997d116e4c6ac5675640cb5d240e..cf1af4da48ff83e76ebabb0a62da2269b4026ac7 100644 (file)
@@ -155,6 +155,12 @@ mach_msg_rpc_from_kernel_proper(
 
 #define mach_msg_rpc_from_kernel mach_msg_rpc_from_kernel_proper
 
+extern void
+mach_msg_destroy_from_kernel_proper(
+       mach_msg_header_t       *msg);
+
+#define mach_msg_destroy_from_kernel mach_msg_destroy_from_kernel_proper
+
 #ifdef XNU_KERNEL_PRIVATE
 extern mach_msg_return_t mach_msg_send_from_kernel_with_options_legacy(
        mach_msg_header_t       *msg,
index 0c9d825e93be769e61f4fa5ab60d35e45877c503..75315a7648c9edf8488d6651d46e80b9e905a1e4 100644 (file)
@@ -199,21 +199,19 @@ mt_fixed_task_counts(task_t task, uint64_t *counts_out)
        assert(task != TASK_NULL);
        assert(counts_out != NULL);
 
-       uint64_t counts[MT_CORE_NFIXED];
        if (!mt_core_supported) {
-               for (int i = 0; i < MT_CORE_NFIXED; i++) {
-                       counts[i] = 0;
-               }
-               return 0;
+               memset(counts_out, 0, sizeof(*counts_out) * MT_CORE_NFIXED);
+               return 1;
        }
 
        task_lock(task);
 
+       uint64_t counts[MT_CORE_NFIXED] = { 0 };
        for (int i = 0; i < MT_CORE_NFIXED; i++) {
                counts[i] = task->task_monotonic.mtk_counts[i];
        }
 
-       uint64_t thread_counts[MT_CORE_NFIXED] = {};
+       uint64_t thread_counts[MT_CORE_NFIXED] = { 0 };
        thread_t thread = THREAD_NULL;
        thread_t curthread = current_thread();
        bool needs_current = false;
@@ -357,9 +355,7 @@ void
 mt_cur_thread_fixed_counts(uint64_t *counts)
 {
        if (!mt_core_supported) {
-               for (int i = 0; i < MT_CORE_NFIXED; i++) {
-                       counts[i] = 0;
-               }
+               memset(counts, 0, sizeof(*counts) * MT_CORE_NFIXED);
                return;
        }
 
index 001cad83b4e3ea2e95fc492a654b6d6ecaa9e564..481378d2f3f3a5c2389dfa32943032e919b45dec 100644 (file)
@@ -378,7 +378,7 @@ ledger_instantiate(ledger_template_t template, int entry_type)
 
        ledger->l_template = template;
        ledger->l_id = ledger_cnt++;
-       ledger->l_refs = 1;
+       os_ref_init(&ledger->l_refs, NULL);
        ledger->l_size = (int32_t)cnt;
 
        template_lock(template);
@@ -429,7 +429,7 @@ ledger_reference(ledger_t ledger)
 {
        if (!LEDGER_VALID(ledger))
                return (KERN_INVALID_ARGUMENT);
-       OSIncrementAtomic(&ledger->l_refs);
+       os_ref_retain(&ledger->l_refs);
        return (KERN_SUCCESS);
 }
 
@@ -439,7 +439,7 @@ ledger_reference_count(ledger_t ledger)
        if (!LEDGER_VALID(ledger))
                return (-1);
 
-       return (ledger->l_refs);
+       return os_ref_get_count(&ledger->l_refs);
 }
 
 /*
@@ -449,16 +449,10 @@ ledger_reference_count(ledger_t ledger)
 kern_return_t
 ledger_dereference(ledger_t ledger)
 {
-       int v;
-
        if (!LEDGER_VALID(ledger))
                return (KERN_INVALID_ARGUMENT);
 
-       v = OSDecrementAtomic(&ledger->l_refs);
-       ASSERT(v >= 1);
-
-       /* Just released the last reference.  Free it. */
-       if (v == 1) {
+       if (os_ref_release(&ledger->l_refs) == 0) {
                if (ledger->l_template->lt_zone) {
                        zfree(ledger->l_template->lt_zone, ledger);
                } else {
index 78eb4f8484f005996836be1cb642d33281273ca7..55faa7f5286d1e17e952710f71048187772b46c6 100644 (file)
 
 #include <mach/mach_types.h>   /* ledger_t */
 
+#ifdef MACH_KERNEL_PRIVATE
+#include <os/refcnt.h>
+#endif /* MACH_KERNEL_PRIVATE */
+
 #define        LEDGER_INFO             0
 #define        LEDGER_ENTRY_INFO       1
 #define        LEDGER_TEMPLATE_INFO    2
@@ -92,7 +96,7 @@ struct ledger_entry {
 
 struct ledger {
        uint64_t                l_id;
-       int32_t                 l_refs;
+       struct os_refcnt        l_refs;
        int32_t                 l_size;
        struct ledger_template *l_template;
        struct ledger_entry     l_entries[0] __attribute__((aligned(8)));
index 1732d7ab23e51e526824eb44bb4c61cc60a7cea3..507d5ec353317f5de90ec1c27e46237e5d24b2c2 100644 (file)
@@ -110,6 +110,8 @@ swtch_continue(void)
        result = SCHED(thread_should_yield)(myprocessor, current_thread());
        enable_preemption();
 
+       ml_delay_on_yield();
+
        thread_syscall_return(result);
        /*NOTREACHED*/
 }
@@ -147,6 +149,8 @@ swtch_pri_continue(void)
        result = SCHED(thread_should_yield)(myprocessor, current_thread());
        mp_enable_preemption();
 
+       ml_delay_on_yield();
+
        thread_syscall_return(result);
        /*NOTREACHED*/
 }
@@ -182,6 +186,8 @@ thread_switch_continue(void *parameter, __unused int ret)
        if (option == SWITCH_OPTION_DEPRESS || option == SWITCH_OPTION_OSLOCK_DEPRESS)
                thread_depress_abort(self);
 
+       ml_delay_on_yield();
+
        thread_syscall_return(KERN_SUCCESS);
        /*NOTREACHED*/
 }
@@ -314,10 +320,22 @@ thread_switch(
                thread_deallocate(thread);
        }
 
-       if (wait_option)
+       if (wait_option) {
                assert_wait_timeout((event_t)assert_wait_timeout, interruptible, option_time, scale_factor);
-       else if (depress_option)
-               thread_depress_ms(option_time);
+       } else {
+               disable_preemption();
+               bool should_yield = SCHED(thread_should_yield)(current_processor(), current_thread());
+               enable_preemption();
+
+               if (should_yield == false) {
+                       /* Early-return if yielding to the scheduler will not be beneficial */
+                       return KERN_SUCCESS;
+               }
+
+               if (depress_option) {
+                       thread_depress_ms(option_time);
+               }
+       }
 
        thread_yield_with_continuation(thread_switch_continue, (void *)(intptr_t)option);
        __builtin_unreachable();
index 20eef5136946dd1f58dac660c95f0950230b27bf..c80e30d300b2be3a73867afd091bf97153521d74 100644 (file)
@@ -655,24 +655,24 @@ task_reference_internal(task_t task)
        void *       bt[TASK_REF_BTDEPTH];
        int             numsaved = 0;
 
+       os_ref_retain(&task->ref_count);
+
        numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
-       
-       (void)hw_atomic_add(&(task)->ref_count, 1);
        btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_INCR,
                                        bt, numsaved);
 }
 
-uint32_t
+os_ref_count_t
 task_deallocate_internal(task_t task)
 {
        void *       bt[TASK_REF_BTDEPTH];
        int             numsaved = 0;
 
        numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
-
        btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_DECR,
                                        bt, numsaved);
-       return hw_atomic_sub(&(task)->ref_count, 1);
+
+       return os_ref_release(&task->ref_count);
 }
 
 #endif /* TASK_REFERENCE_LEAK_DEBUG */
@@ -1115,6 +1115,8 @@ init_task_ledgers(void)
        task_ledger_template = t;
 }
 
+os_refgrp_decl(static, task_refgrp, "task", NULL);
+
 kern_return_t
 task_create_internal(
        task_t          parent_task,
@@ -1136,7 +1138,7 @@ task_create_internal(
                return(KERN_RESOURCE_SHORTAGE);
 
        /* one ref for just being alive; one for our caller */
-       new_task->ref_count = 2;
+       os_ref_init_count(&new_task->ref_count, &task_refgrp, 2);
 
        /* allocate with active entries */
        assert(task_ledger_template != NULL);
@@ -1530,7 +1532,7 @@ task_deallocate(
        task_t          task)
 {
        ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
-       uint32_t refs;
+       os_ref_count_t refs;
 
        if (task == TASK_NULL)
            return;
@@ -1538,31 +1540,24 @@ task_deallocate(
        refs = task_deallocate_internal(task);
 
 #if IMPORTANCE_INHERITANCE
-       if (refs > 1)
-               return;
-
-       atomic_load_explicit(&task->ref_count, memory_order_acquire);
-       
        if (refs == 1) {
                /*
                 * If last ref potentially comes from the task's importance,
                 * disconnect it.  But more task refs may be added before
                 * that completes, so wait for the reference to go to zero
-                * naturually (it may happen on a recursive task_deallocate()
+                * naturally (it may happen on a recursive task_deallocate()
                 * from the ipc_importance_disconnect_task() call).
                 */
                if (IIT_NULL != task->task_imp_base)
                        ipc_importance_disconnect_task(task);
                return;
        }
-#else
-       if (refs > 0)
-               return;
-
-       atomic_load_explicit(&task->ref_count, memory_order_acquire);
-
 #endif /* IMPORTANCE_INHERITANCE */
 
+       if (refs > 0) {
+               return;
+       }
+
        lck_mtx_lock(&tasks_threads_lock);
        queue_remove(&terminated_tasks, task, task_t, tasks);
        terminated_tasks_count--;
@@ -3653,22 +3648,13 @@ host_security_set_task_token(
 
 kern_return_t
 task_send_trace_memory(
-       task_t        target_task,
+       __unused task_t   target_task,
        __unused uint32_t pid,
        __unused uint64_t uniqueid)
 {
-       kern_return_t kr = KERN_INVALID_ARGUMENT;
-       if (target_task == TASK_NULL)
-               return (KERN_INVALID_ARGUMENT);
-
-#if CONFIG_ATM
-       kr = atm_send_proc_inspect_notification(target_task,
-                                 pid,
-                                 uniqueid);
-
-#endif
-       return (kr);
+       return KERN_INVALID_ARGUMENT;
 }
+
 /*
  * This routine was added, pretty much exclusively, for registering the
  * RPC glue vector for in-kernel short circuited tasks.  Rather than
@@ -6202,7 +6188,7 @@ task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
        switch (flavor) {
        case TASK_INSPECT_BASIC_COUNTS: {
                struct task_inspect_basic_counts *bc;
-               uint64_t task_counts[MT_CORE_NFIXED];
+               uint64_t task_counts[MT_CORE_NFIXED] = { 0 };
 
                if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
                        kr = KERN_INVALID_ARGUMENT;
index fe43b2db1098a9a9485d6b416d4ed3a30b6ae1c7..12a2162300b381444dc6151a73227aecda7415a0 100644 (file)
 #include <kern/thread.h>
 #include <mach/coalition.h>
 #include <stdatomic.h>
+#include <os/refcnt.h>
 
 #ifdef CONFIG_ATM
 #include <atm/atm_internal.h>
@@ -147,7 +148,7 @@ struct _cpu_time_qos_stats {
 struct task {
        /* Synchronization/destruction information */
        decl_lck_mtx_data(,lock)                /* Task's lock */
-       _Atomic uint32_t        ref_count;      /* Number of references to me */
+       os_refcnt_t     ref_count;      /* Number of references to me */
        boolean_t       active;         /* Task has not been terminated */
        boolean_t       halting;        /* Task is being halted */
        /* Virtual timers */
@@ -477,13 +478,10 @@ task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *);
 
 #if TASK_REFERENCE_LEAK_DEBUG
 extern void task_reference_internal(task_t task);
-extern uint32_t task_deallocate_internal(task_t task);
+extern os_ref_count_t task_deallocate_internal(task_t task);
 #else
-#define task_reference_internal(task)          \
-                       (void)atomic_fetch_add_explicit(&(task)->ref_count, 1, memory_order_relaxed)
-
-#define task_deallocate_internal(task)         \
-                       (atomic_fetch_sub_explicit(&task->ref_count, 1, memory_order_release) - 1)
+#define task_reference_internal(task) os_ref_retain(&(task)->ref_count)
+#define task_deallocate_internal(task) os_ref_release(&(task)->ref_count)
 #endif
 
 #define task_reference(task)                                   \
index 81f934a17cc048044670970847ab6332a3f85d1d..84e0277b0796ecefbfc2852a45d7855584f89670 100644 (file)
@@ -244,6 +244,8 @@ void __attribute__((noinline)) SENDING_NOTIFICATION__TASK_HAS_TOO_MANY_THREADS(t
  */
 #define MINIMUM_CPULIMIT_INTERVAL_MS 1
 
+os_refgrp_decl(static, thread_refgrp, "thread", NULL);
+
 void
 thread_bootstrap(void)
 {
@@ -257,8 +259,6 @@ thread_bootstrap(void)
 
        thread_template.runq = PROCESSOR_NULL;
 
-       thread_template.ref_count = 2;
-
        thread_template.reason = AST_NONE;
        thread_template.at_safe_point = FALSE;
        thread_template.wait_event = NO_EVENT64;
@@ -413,6 +413,7 @@ thread_bootstrap(void)
        thread_template.th_work_interval = NULL;
 
        init_thread = thread_template;
+
        machine_set_current_thread(&init_thread);
 }
 
@@ -687,49 +688,34 @@ thread_terminate_self(void)
        /*NOTREACHED*/
 }
 
-/* Drop a thread refcount safely without triggering a zfree */
-void
-thread_deallocate_safe(thread_t thread)
+static bool
+thread_ref_release(thread_t thread)
 {
-       __assert_only uint32_t          th_ref_count;
-
-       if (thread == THREAD_NULL)
-               return;
+       if (thread == THREAD_NULL) {
+               return false;
+       }
 
        assert_thread_magic(thread);
 
-       if (__probable(atomic_fetch_sub_explicit(&thread->ref_count, 1,
-                       memory_order_release) - 1 > 0)) {
-                return;
-        }
-
-       th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire);
-       assert(th_ref_count == 0);
-
-       /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
-       thread_deallocate_enqueue(thread);
+       return os_ref_release(&thread->ref_count) == 0;
 }
 
+/* Drop a thread refcount safely without triggering a zfree */
 void
-thread_deallocate(
-       thread_t                        thread)
+thread_deallocate_safe(thread_t thread)
 {
-       __assert_only uint32_t          th_ref_count;
-
-       if (thread == THREAD_NULL)
-               return;
-
-       assert_thread_magic(thread);
-
-       if (__probable(atomic_fetch_sub_explicit(&thread->ref_count, 1,
-                       memory_order_release) - 1 > 0)) {
-                return;
-        }
-
-       th_ref_count = atomic_load_explicit(&thread->ref_count, memory_order_acquire);
-       assert(th_ref_count == 0);
+       if (__improbable(thread_ref_release(thread))) {
+               /* enqueue the thread for thread deallocate deamon to call thread_deallocate_complete */
+               thread_deallocate_enqueue(thread);
+       }
+}
 
-       thread_deallocate_complete(thread);
+void
+thread_deallocate(thread_t thread)
+{
+       if (__improbable(thread_ref_release(thread))) {
+               thread_deallocate_complete(thread);
+       }
 }
 
 void
@@ -740,7 +726,7 @@ thread_deallocate_complete(
 
        assert_thread_magic(thread);
 
-       assert(thread->ref_count == 0);
+       assert(os_ref_get_count(&thread->ref_count) == 0);
 
        assert(thread_owned_workloops_count(thread) == 0);
 
@@ -1332,6 +1318,8 @@ thread_create_internal(
        if (new_thread != first_thread)
                *new_thread = thread_template;
 
+       os_ref_init_count(&new_thread->ref_count, &thread_refgrp, 2);
+
 #ifdef MACH_BSD
        new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
        if (new_thread->uthread == NULL) {
index d2cf4278e7065fab322d12625e8a7bef9a4916f4..d0a5212a26ad2ff76e6d86be1c9d658b20051bd8 100644 (file)
 
 #include <kern/waitq.h>
 #include <san/kasan.h>
+#include <os/refcnt.h>
 
 #include <ipc/ipc_kmsg.h>
 
@@ -287,7 +288,7 @@ struct thread {
 
        int16_t                         promotions;                     /* level of promotion */
        int                             iotier_override; /* atomic operations to set, cleared on ret to user */
-       _Atomic uint32_t                ref_count;              /* number of references to me */
+       struct os_refcnt        ref_count;              /* number of references to me */
 
        lck_mtx_t*                      waiting_for_mutex;      /* points to mutex we're waiting for until we acquire it */
 
@@ -646,7 +647,7 @@ extern void                 thread_init(void);
 extern void                    thread_daemon_init(void);
 
 #define        thread_reference_internal(thread)       \
-                       (void)atomic_fetch_add_explicit(&(thread)->ref_count, 1, memory_order_relaxed)
+                       os_ref_retain(&(thread)->ref_count);
 
 #define thread_reference(thread)                                       \
 MACRO_BEGIN                                                                                    \
index 4fd57d53abf3bf51fb288d62b7d95c0c5d10e77b..889e407d9196fc811aa9439bef756ed7f0099137 100644 (file)
@@ -73,7 +73,7 @@ struct trust_cache_module1 {
 #define TC_LOOKUP_RESULT_MASK                   0xffL
 
 #define TC_LOOKUP_FOUND         1
-#define TC_LOOKUP_FALLBACK      2
+// #define TC_LOOKUP_FALLBACK      2 /* obsolete with removal of legacy static trust caches */
 
 #ifdef XNU_KERNEL_PRIVATE
 
@@ -86,22 +86,6 @@ struct serialized_trust_caches {
 } __attribute__((__packed__));
 
 
-// Legacy Static Trust Cache
-
-/* This is the old legacy trust cache baked into the AMFI kext.
- * We support it for a transitionary period, until external trust caches
- * are fully established, and the AMFI trust cache can be removed. */
-
-struct legacy_trust_cache_bucket {
-       uint16_t count;
-       uint16_t offset;
-} __attribute__((__packed__));
-
-#define LEGACY_TRUST_CACHE_ENTRY_LEN (CS_CDHASH_LEN-1)
-#define LEGACY_TRUST_CACHE_BUCKET_COUNT (256)
-
-typedef uint8_t pmap_cs_legacy_stc_entry[CS_CDHASH_LEN-1]; // bucketized with first byte
-
 void trust_cache_init(void);
 
 uint32_t lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]);
index 831f3afd86563b8b641abc6a545694a38dfdc31a..7d33d9f1b96a788c255d55b3b26126b7c5944633 100644 (file)
@@ -344,7 +344,7 @@ kperf_port_to_pid(mach_port_name_t portname)
        }
        pid_t pid = task_pid(task);
        /* drop the ref taken by port_name_to_task */
-       task_deallocate_internal(task);
+       (void)task_deallocate_internal(task);
 
        return pid;
 }
index c8ec4fed006213c26c63c8faf0fbb908ceb889ab..aa9d4662e3f7590096b8fcd55745e4853be68007 100644 (file)
@@ -707,8 +707,10 @@ vm_compressor_pager_put(
 {
        compressor_pager_t      pager;
        compressor_slot_t       *slot_p;
+#if __arm__ || __arm64__
        unsigned int            prev_wimg = VM_WIMG_DEFAULT;
        boolean_t               set_cache_attr = FALSE;
+#endif
 
        compressor_pager_stats.put++;
 
@@ -749,6 +751,7 @@ vm_compressor_pager_put(
                *compressed_count_delta_p -= 1;
        }
 
+#if __arm__ || __arm64__
        /*
         * cacheability should be set to the system default (usually writeback)
         * during compressor operations, both for performance and correctness,
@@ -772,6 +775,11 @@ vm_compressor_pager_put(
                        pmap_set_cache_attributes(ppnum, prev_wimg);
                return KERN_RESOURCE_SHORTAGE;
        }
+#else
+       if (vm_compressor_put(ppnum, slot_p, current_chead, scratch_buf)) {
+               return KERN_RESOURCE_SHORTAGE;
+       }
+#endif
        *compressed_count_delta_p += 1;
 
        return KERN_SUCCESS;
@@ -820,6 +828,7 @@ vm_compressor_pager_get(
                
        if (kr == KERN_SUCCESS) {
                int     retval;
+#if __arm__ || __arm64__
                unsigned int prev_wimg = VM_WIMG_DEFAULT;
                boolean_t set_cache_attr = FALSE;
 
@@ -835,7 +844,7 @@ vm_compressor_pager_get(
                        set_cache_attr = TRUE;
                        pmap_set_cache_attributes(ppnum, VM_WIMG_DEFAULT);
                }
-
+#endif
                /* get the page from the compressor */
                retval = vm_compressor_get(ppnum, slot_p, flags);
                if (retval == -1)
@@ -846,8 +855,10 @@ vm_compressor_pager_get(
                        assert((flags & C_DONT_BLOCK));
                        kr = KERN_FAILURE;
                }
+#if __arm__ || __arm64__
                if (set_cache_attr)
                        pmap_set_cache_attributes(ppnum, prev_wimg);
+#endif
        }
 
        if (kr == KERN_SUCCESS) {
index bf722548b023d5a64d32add02f84b4ebf62529df..7aaef6cdd3b584db98ad2c9dc4d5dc6e34081e2e 100644 (file)
@@ -1742,6 +1742,7 @@ void update_vm_info(void)
        record_memory_pressure();
 }
 
+extern boolean_t hibernation_vmqueues_inspection;
 
 void
 vm_page_balance_inactive(int max_to_move)
@@ -1750,6 +1751,17 @@ vm_page_balance_inactive(int max_to_move)
 
        LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
 
+       if (hibernation_vmqueues_inspection == TRUE) {
+               /*
+                * It is likely that the hibernation code path is
+                * dealing with these very queues as we are about
+                * to move pages around in/from them and completely
+                * change the linkage of the pages.
+                *
+                * And so we skip the rebalancing of these queues.
+                */
+               return;
+       }
        vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
                                                          vm_page_inactive_count +
                                                          vm_page_speculative_count);
index 748c427546b09e9f0ff73ee29c2bfa5cf0478fc1..860bde4d5314d1739ea8d9288f426ea122be1ab2 100644 (file)
@@ -134,6 +134,8 @@ int         speculative_age_index = 0;
 int            speculative_steal_index = 0;
 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1];
 
+boolean_t      hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
+                                                         * Updated and checked behind the vm_page_queues_lock. */
 
 __private_extern__ void                vm_page_init_lck_grp(void);
 
@@ -7043,6 +7045,10 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
        lck_mtx_lock(&vm_page_queue_free_lock);
     }
 
+    LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+    hibernation_vmqueues_inspection = TRUE;
+
     m = (vm_page_t) hibernate_gobble_queue;
     while (m)
     {
@@ -7325,6 +7331,8 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
 
     if (preflight && will_discard) *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
 
+    hibernation_vmqueues_inspection = FALSE;
+
 #if MACH_ASSERT || DEBUG
     if (!preflight)
     {
index 3feaeba94d9851fff384d2a7f4cc813fb2c710c6..9c04f491a7bdafb88b11bcc01dac8a65162dbee4 100644 (file)
@@ -155,6 +155,68 @@ T_DECL(core_fixed_kdebug, "check that the kdebug macros for monotonic work",
        dispatch_main();
 }
 
+static void *
+spin_thread_self_counts(__unused void *arg)
+{
+       extern int thread_selfcounts(int, void *, size_t);
+       uint64_t counts[2] = { 0 };
+       while (true) {
+               (void)thread_selfcounts(1, &counts, sizeof(counts));
+       }
+}
+
+static void *
+spin_task_inspect(__unused void *arg)
+{
+       task_t task = mach_task_self();
+       uint64_t counts[2] = { 0 };
+       unsigned int size = 0;
+       while (true) {
+               size = (unsigned int)sizeof(counts);
+               (void)task_inspect(task, TASK_INSPECT_BASIC_COUNTS,
+                               (task_inspect_info_t)&counts[0], &size);
+               /*
+                * Not realistic for a process to see count values with the high bit
+                * set, but kernel pointers will be that high.
+                */
+               T_QUIET; T_ASSERT_LT(counts[0], 1ULL << 63,
+                               "check for valid count entry 1");
+               T_QUIET; T_ASSERT_LT(counts[1], 1ULL << 63,
+                               "check for valid count entry 2");
+       }
+}
+
+T_DECL(core_fixed_stack_leak_race,
+               "ensure no stack data is leaked by TASK_INSPECT_BASIC_COUNTS")
+{
+       T_SETUPBEGIN;
+
+       int ncpus = 0;
+       T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.logicalcpu_max", &ncpus,
+                       &(size_t){ sizeof(ncpus) }, NULL, 0), "get number of CPUs");
+       T_QUIET; T_ASSERT_GT(ncpus, 0, "got non-zero number of CPUs");
+       pthread_t *threads = calloc((unsigned long)ncpus, sizeof(*threads));
+
+       T_QUIET; T_ASSERT_NOTNULL(threads, "allocated space for threads");
+
+       T_LOG("creating %d threads to attempt to race around task counts", ncpus);
+       /*
+        * Have half the threads hammering thread_self_counts and the other half
+        * trying to get an error to occur inside TASK_INSPECT_BASIC_COUNTS and see
+        * uninitialized kernel memory.
+        */
+       for (int i = 0; i < ncpus; i++) {
+               T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&threads[i], NULL,
+                               i & 1 ? spin_task_inspect : spin_thread_self_counts, NULL),
+                               NULL);
+       }
+
+       T_SETUPEND;
+
+       sleep(10);
+       T_PASS("ending test after 10 seconds");
+}
+
 static void
 perf_sysctl_deltas(const char *sysctl_name, const char *stat_name)
 {