]> git.saurik.com Git - apple/xnu.git/commitdiff
xnu-3789.60.24.tar.gz macos-10125 v3789.60.24
authorApple <opensource@apple.com>
Wed, 7 Jun 2017 22:31:55 +0000 (22:31 +0000)
committerApple <opensource@apple.com>
Wed, 7 Jun 2017 22:31:55 +0000 (22:31 +0000)
44 files changed:
bsd/conf/files
bsd/kern/imageboot.c
bsd/kern/kern_descrip.c
bsd/kern/kern_ktrace.c
bsd/kern/kern_memorystatus.c
bsd/kern/kern_symfile.c
bsd/kern/uipc_usrreq.c
bsd/libkern/libkern.h
bsd/libkern/url_encode.c [new file with mode: 0644]
bsd/net/network_agent.c
bsd/net/rtsock.c
bsd/netinet6/in6.c
bsd/netinet6/nd6.c
bsd/security/audit/audit_arg.c
bsd/vfs/kpi_vfs.c
bsd/vfs/vfs_attrlist.c
bsd/vfs/vfs_cluster.c
bsd/vfs/vfs_syscalls.c
bsd/vfs/vfs_vnops.c
bsd/vm/vm_compressor_backing_file.c
config/MasterVersion
iokit/IOKit/pwr_mgt/RootDomain.h
iokit/Kernel/IOHibernateIO.cpp
iokit/Kernel/IONVRAM.cpp
iokit/Kernel/IOPMrootDomain.cpp
osfmk/i386/commpage/commpage.h
osfmk/i386/commpage/commpage_asm.s
osfmk/i386/commpage/fifo_queues.s
osfmk/i386/cpu_capabilities.h
osfmk/i386/mp_desc.c
osfmk/i386/pcb_native.c
osfmk/i386/trap.c
osfmk/ipc/ipc_kmsg.c
osfmk/ipc/ipc_object.c
osfmk/ipc/ipc_port.h
osfmk/ipc/ipc_right.c
osfmk/ipc/mach_port.c
osfmk/kern/kern_stackshot.c
osfmk/kern/mk_timer.c
osfmk/kern/task.c
osfmk/kern/zalloc.c
osfmk/x86_64/idt64.s
osfmk/x86_64/locore.s
tools/tests/darwintests/mktimer_kobject.c [new file with mode: 0644]

index 89d1de965ccafa011da70713dd072bad7c4ff495..3eaa5a6a31cd7106a5b95883d98fd2b9d33b19f1 100644 (file)
@@ -151,6 +151,7 @@ bsd/libkern/skpc.c                  standard
 bsd/libkern/strsep.c                   standard
 bsd/libkern/bcd.c                      standard
 bsd/libkern/memchr.c                   standard
+bsd/libkern/url_encode.c               standard
 
 bsd/vfs/vfs_attrlist.c                 standard
 bsd/vfs/vfs_bio.c                      standard
index 8f492566b87fa4aedcd1698ed7f48db38ff6adc1..6ba54a69fa4da9c8549d0555249f69bda4c1c0e8 100644 (file)
@@ -872,11 +872,20 @@ imageboot_setup_new()
 #endif
 
        if (auth_root) {
-               const char *path = root_path;
+               /* Copy the path to use locally */
+               char *path_alloc = kalloc(MAXPATHLEN);
+               if (path_alloc == NULL) {
+                       panic("imageboot path allocation failed\n");
+               }
+
+               char *path = path_alloc;
+               strlcpy(path, root_path, MAXPATHLEN);
+
                size_t len = strlen(kIBFilePrefix);
                if (strncmp(kIBFilePrefix, path, len) == 0) {
-                       /* remove the file:// prefix */
+                       /* its a URL - remove the file:// prefix and percent-decode */
                        path += len;
+                       url_decode(path);
                }
 
                AUTHDBG("authenticating root image at %s", path);
@@ -885,6 +894,8 @@ imageboot_setup_new()
                        panic("root image authentication failed (err = %d)\n", error);
                }
                AUTHDBG("successfully authenticated %s", path);
+
+               kfree_safe(path_alloc);
        }
 
        error = imageboot_mount_image(root_path, height);
index 8630c7cd4eb653aaeaccbc7f4ba09ab064b83803..b93ab3bbfd195c9be69b04c41291f2f7b7ba1631 100644 (file)
@@ -2547,7 +2547,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval)
 
                if ( (error = vnode_getwithref(vp)) == 0 ) {
 #define STK_PARAMS 128
-                       char stkbuf[STK_PARAMS];
+                       char stkbuf[STK_PARAMS] = {0};
                        unsigned int size;
                        caddr_t data, memp;
                        /*
index 9295b5de427188c17b694faf7ec97a0dd1bdf445..af4573ef80e8f5169c38d64d281dc5bb303e79b1 100644 (file)
@@ -135,19 +135,9 @@ static void ktrace_set_invalid_owning_pid(void);
  */
 int ktrace_root_set_owner_allowed = 0;
 
-void
-ktrace_reset(uint32_t reset_mask)
+static void
+ktrace_reset_internal(uint32_t reset_mask)
 {
-       lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);
-       assert(reset_mask != 0);
-
-       if (ktrace_active_mask == 0) {
-               if (!ktrace_keep_ownership_on_reset) {
-                       assert(ktrace_state == KTRACE_STATE_OFF);
-               }
-               return;
-       }
-
        if (!ktrace_keep_ownership_on_reset) {
                ktrace_active_mask &= ~reset_mask;
        }
@@ -172,6 +162,21 @@ ktrace_reset(uint32_t reset_mask)
        }
 }
 
+void
+ktrace_reset(uint32_t reset_mask)
+{
+       lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);
+
+       if (ktrace_active_mask == 0) {
+               if (!ktrace_keep_ownership_on_reset) {
+                       assert(ktrace_state == KTRACE_STATE_OFF);
+               }
+               return;
+       }
+
+       ktrace_reset_internal(reset_mask);
+}
+
 static void
 ktrace_promote_background(void)
 {
@@ -365,7 +370,7 @@ ktrace_set_invalid_owning_pid(void)
 {
        if (ktrace_keep_ownership_on_reset) {
                ktrace_keep_ownership_on_reset = FALSE;
-               ktrace_reset(ktrace_active_mask);
+               ktrace_reset_internal(ktrace_active_mask);
        }
 }
 
index 0817ac1fa14b7c3e8c03d253e6694c77f2113173..46b037abadb862ebff4af4820f94ca06d69a8abf 100644 (file)
@@ -635,6 +635,14 @@ unsigned int memorystatus_frozen_count = 0;
 unsigned int memorystatus_suspended_count = 0;
 unsigned int memorystatus_policy_more_free_offset_pages = 0;
 
+#if CONFIG_JETSAM
+#if DEVELOPMENT || DEBUG
+SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_available_pages, 0, "");
+#else
+SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages, CTLFLAG_RD| CTLFLAG_MASKED | CTLFLAG_LOCKED, &memorystatus_available_pages, 0, "");
+#endif /* DEVELOPMENT || DEBUG */
+#endif /* CONFIG_JETSAM */
+
 /*
  * We use this flag to signal if we have any HWM offenders
  * on the system. This way we can reduce the number of wakeups
@@ -983,7 +991,6 @@ SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_send, CTLTYPE_QUAD|CTLFLAG
 
 SYSCTL_INT(_kern, OID_AUTO, memorystatus_idle_snapshot, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_idle_snapshot, 0, "");
 
-SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_available_pages, 0, "");
 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical, CTLFLAG_RD|CTLFLAG_LOCKED, &memorystatus_available_pages_critical, 0, "");
 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical_base, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_available_pages_critical_base, 0, "");
 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical_idle_offset, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_available_pages_critical_idle_offset, 0, "");
index a612b23fc41f9a756be03564769a00634305e5d7..f1586ef1f71e457eada85246404a6476a13cd24f 100644 (file)
@@ -251,7 +251,7 @@ kern_open_file_for_direct_io(const char * name,
 
     bzero(ref, sizeof(*ref));
     p = kernproc;
-    ref->ctx = vfs_context_create(vfs_context_kernel());
+    ref->ctx = vfs_context_kernel();
 
     fmode  = (create_file) ? (O_CREAT | FWRITE) : FWRITE;
     cmode =  S_IRUSR | S_IWUSR;
@@ -261,7 +261,10 @@ kern_open_file_for_direct_io(const char * name,
     VATTR_SET(&va, va_mode, cmode);
     VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED);
     VATTR_SET(&va, va_dataprotect_class, PROTECTION_CLASS_D);
-    if ((error = vn_open_auth(&nd, &fmode, &va))) goto out;
+    if ((error = vn_open_auth(&nd, &fmode, &va))) {
+       kprintf("vn_open_auth(fmode: %d, cmode: %d) failed with error: %d\n", fmode, cmode, error);
+       goto out;
+    }
 
     ref->vp = nd.ni_vp;
     if (ref->vp->v_type == VREG)
@@ -273,7 +276,10 @@ kern_open_file_for_direct_io(const char * name,
 
     if (write_file_addr && write_file_len)
     {
-       if ((error = kern_write_file(ref, write_file_offset, write_file_addr, write_file_len, 0))) goto out;
+       if ((error = kern_write_file(ref, write_file_offset, write_file_addr, write_file_len, IO_SKIP_ENCRYPTION))) {
+               kprintf("kern_write_file() failed with error: %d\n", error);
+               goto out;
+       }
     }
 
     VATTR_INIT(&va);
@@ -508,13 +514,13 @@ kern_open_file_for_direct_io(const char * name,
     {
         vnode_close(ref->vp, FWRITE, ref->ctx);
         ref->vp = NULLVP;
-       vfs_context_rele(ref->ctx);
        ref->ctx = NULL;
     }
 
 out:
     printf("kern_open_file_for_direct_io(%p, %d)\n", ref, error);
 
+
     if (error && locked)
     {
         p1 = &device;
@@ -529,7 +535,7 @@ out:
            vnode_close(ref->vp, FWRITE, ref->ctx);
            ref->vp = NULLVP;
        }
-       vfs_context_rele(ref->ctx);
+       ref->ctx = NULL;
        kfree(ref, sizeof(struct kern_direct_file_io_ref_t));
        ref = NULL;
     }
@@ -612,18 +618,17 @@ kern_close_file_for_direct_io(struct kern_direct_file_io_ref_t * ref,
 
         if (addr && write_length)
         {
-            (void) kern_write_file(ref, write_offset, addr, write_length, 0);
+            (void) kern_write_file(ref, write_offset, addr, write_length, IO_SKIP_ENCRYPTION);
         }
 
         error = vnode_close(ref->vp, FWRITE, ref->ctx);
 
         ref->vp = NULLVP;
         kprintf("vnode_close(%d)\n", error);
+
     }
-    if (ref->ctx)
-    {
-       vfs_context_rele(ref->ctx);
-       ref->ctx = NULL;
-    }
+
+    ref->ctx = NULL;
+
     kfree(ref, sizeof(struct kern_direct_file_io_ref_t));
 }
index 995f96a138b7d284fcfcab73fc5302a6ed9b1f17..4f31897bdeec0a9fb41d2c10351bfba9fc862d1c 100644 (file)
@@ -1892,13 +1892,13 @@ unp_externalize(struct mbuf *rights)
        struct fileglob **rp = (struct fileglob **)(cm + 1);
        int *fds = (int *)(cm + 1);
        struct fileproc *fp;
-       struct fileglob **fgl;
+       struct fileproc **fileproc_l;
        int newfds = (cm->cmsg_len - sizeof (*cm)) / sizeof (int);
        int f, error = 0;
 
-       MALLOC(fgl, struct fileglob **, newfds * sizeof (struct fileglob *),
-               M_TEMP, M_WAITOK);
-       if (fgl == NULL) {
+       MALLOC(fileproc_l, struct fileproc **,
+           newfds * sizeof (struct fileproc *), M_TEMP, M_WAITOK);
+       if (fileproc_l == NULL) {
                error = ENOMEM;
                goto discard;
        }
@@ -1942,27 +1942,40 @@ unp_externalize(struct mbuf *rights)
                        panic("unp_externalize: MALLOC_ZONE");
                fp->f_iocount = 0;
                fp->f_fglob = rp[i];
-               if (fg_removeuipc_mark(rp[i]))
-                       fgl[i] = rp[i];
-               else
-                       fgl[i] = NULL;
+               if (fg_removeuipc_mark(rp[i])) {
+
+                       /*
+                        * Take an iocount on the fp for completing the
+                        * removal from the global msg queue
+                        */
+                       fp->f_iocount++;
+                       fileproc_l[i] = fp;
+               } else {
+                       fileproc_l[i] = NULL;
+               }
                procfdtbl_releasefd(p, f, fp);
                fds[i] = f;
        }
        proc_fdunlock(p);
 
        for (i = 0; i < newfds; i++) {
-               if (fgl[i] != NULL) {
-                       VERIFY(fgl[i]->fg_lflags & FG_RMMSGQ);
-                       fg_removeuipc(fgl[i]);
+               if (fileproc_l[i] != NULL) {
+                       VERIFY(fileproc_l[i]->f_fglob != NULL &&
+                           (fileproc_l[i]->f_fglob->fg_lflags & FG_RMMSGQ));
+                       VERIFY(fds[i] > 0);
+                       fg_removeuipc(fileproc_l[i]->f_fglob);
+
+                       /* Drop the iocount */
+                       fp_drop(p, fds[i], fileproc_l[i], 0);
+                       fileproc_l[i] = NULL;
                }
-               if (fds[i])
+               if (fds[i] != 0)
                        (void) OSAddAtomic(-1, &unp_rights);
        }
 
 discard:
-       if (fgl)
-               FREE(fgl, M_TEMP);
+       if (fileproc_l != NULL)
+               FREE(fileproc_l, M_TEMP);
        if (error) {
                for (i = 0; i < newfds; i++) {
                        unp_discard(*rp, p);
index 4e660600792e516ab39cbe6bc8de4b2b083a69be..bfef07c6a89bb09861c4f7ac4cd20f6019896358 100644 (file)
@@ -149,6 +149,7 @@ extern quad_t       strtoq(const char *, char **, int);
 extern u_quad_t strtouq(const char *, char **, int);
 extern char    *strsep(char **, const char *);
 extern void    *memchr(const void *, int, size_t);
+extern void    url_decode(char *str);
 
 int    snprintf(char *, size_t, const char *, ...) __printflike(3,4);
 
diff --git a/bsd/libkern/url_encode.c b/bsd/libkern/url_encode.c
new file mode 100644 (file)
index 0000000..28534ad
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#include <libkern/libkern.h>
+
+static int
+hex2int(int c)
+{
+       if (c >= '0' && c <= '9') {
+               return c - '0';
+       } else if (c >= 'A' && c <= 'F') {
+               return c - 'A' + 10;
+       } else if (c >= 'a' && c <= 'f') {
+               return c - 'a' + 10;
+       }
+       return 0;
+}
+
+static bool
+isprint(int ch)
+{
+       return ch >= 0x20 && ch <= 0x7e;
+}
+
+/*
+ * In-place decode of URL percent-encoded str
+ */
+void
+url_decode(char *str)
+{
+       if (!str) {
+               return;
+       }
+
+       while (*str) {
+               if (*str == '%') {
+                       char c = 0;
+                       char *esc = str++; /* remember the start of the escape sequence */
+
+                       if (*str) {
+                               c += hex2int(*str++);
+                       }
+                       if (*str) {
+                               c = (c << 4) + hex2int(*str++);
+                       }
+
+                       if (isprint(c)) {
+                               /* overwrite the '%' with the new char, and bump the rest of the
+                                * string down a few characters */
+                               *esc++ = c;
+                               str = memmove(esc, str, strlen(str)+1);
+                       } else {
+                               str++;
+                       }
+
+               } else {
+                       str++;
+               }
+       }
+}
index 14c14c66ebf38a1c7970a01a7df9e7037400def1..a51eacfb88cd331a0ac1d1da98d0d2a359bacd86 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2014, 2016, 2017 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
@@ -746,12 +746,17 @@ netagent_handle_register_setopt(struct netagent_session *session, u_int8_t *payl
 
        data_size = register_netagent->netagent_data_size;
        if (data_size < 0 || data_size > NETAGENT_MAX_DATA_SIZE) {
-               NETAGENTLOG(LOG_ERR, "Register message size could not be read, data_size %d",
-                                       data_size);
+               NETAGENTLOG(LOG_ERR, "Register message size could not be read, data_size %d", data_size);
                response_error = EINVAL;
                goto done;
        }
 
+       if (payload_length != (sizeof(struct netagent) + data_size)) {
+               NETAGENTLOG(LOG_ERR, "Mismatch between data size and payload length (%u != %u)", (sizeof(struct netagent) + data_size), payload_length);
+               response_error = EINVAL;
+               goto done;
+    }
+
        MALLOC(new_wrapper, struct netagent_wrapper *, sizeof(*new_wrapper) + data_size, M_NETAGENT, M_WAITOK);
        if (new_wrapper == NULL) {
                NETAGENTLOG0(LOG_ERR, "Failed to allocate agent");
@@ -1025,6 +1030,12 @@ netagent_handle_update_setopt(struct netagent_session *session, u_int8_t *payloa
                goto done;
        }
 
+       if (payload_length != (sizeof(struct netagent) + data_size)) {
+               NETAGENTLOG(LOG_ERR, "Mismatch between data size and payload length (%u != %u)", (sizeof(struct netagent) + data_size), payload_length);
+               response_error = EINVAL;
+               goto done;
+    }
+
        MALLOC(new_wrapper, struct netagent_wrapper *, sizeof(*new_wrapper) + data_size, M_NETAGENT, M_WAITOK);
        if (new_wrapper == NULL) {
                NETAGENTLOG0(LOG_ERR, "Failed to allocate agent");
index c729343d2b92197c65f3d69eead084a9ba213515..f666398774a3717b6c10c7f407e1a9e3f5b640d7 100644 (file)
@@ -420,6 +420,22 @@ route_output(struct mbuf *m, struct socket *so)
                        senderr(EINVAL);
                ifscope = rtm->rtm_index;
        }
+       /*
+        * Block changes on INTCOPROC interfaces.
+        */
+       if (ifscope) {
+               unsigned int intcoproc_scope = 0;
+               ifnet_head_lock_shared();
+               TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
+                       if (IFNET_IS_INTCOPROC(ifp)) {
+                               intcoproc_scope = ifp->if_index;
+                               break;
+                       }
+               }
+               ifnet_head_done();
+               if (intcoproc_scope == ifscope && current_proc()->p_pid != 0)
+                       senderr(EINVAL);
+       }
 
        /*
         * RTF_PROXY can only be set internally from within the kernel.
index f5e206f03275a3741557b413c876a0e06360fbb7..976839dd160cc1cbfb96c0e2583e303460e8d95d 100644 (file)
@@ -1791,6 +1791,12 @@ in6_to_kamescope(struct sockaddr_in6 *sin6, struct ifnet *ifp)
        return (0);
 }
 
+/*
+ * When the address is being configured we should clear out certain flags
+ * coming in from the caller.
+ */
+#define        IN6_IFF_CLR_ADDR_FLAG_MASK      (~(IN6_IFF_DEPRECATED | IN6_IFF_DETACHED | IN6_IFF_DUPLICATED))
+
 static int
 in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags)
 {
@@ -1813,23 +1819,35 @@ in6_ifaupdate_aux(struct in6_ifaddr *ia, struct ifnet *ifp, int ifaupflags)
            ia->ia6_flags,
            ifaupflags));
 
+       /*
+        * Just to be safe, always clear certain flags when address
+        * is being configured
+        */
+       ia->ia6_flags &= IN6_IFF_CLR_ADDR_FLAG_MASK;
+
        /*
         * Mark the address as tentative before joining multicast addresses,
         * so that corresponding MLD responses would not have a tentative
         * source address.
         */
-       ia->ia6_flags &= ~IN6_IFF_DUPLICATED;   /* safety */
-       if (in6if_do_dad(ifp))
+       if (in6if_do_dad(ifp)) {
                in6_ifaddr_set_dadprogress(ia);
-
-       /*
-        * Do not delay sending neighbor solicitations when using optimistic
-        * duplicate address detection, c.f. RFC 4429.
-        */
-       if (ia->ia6_flags & IN6_IFF_OPTIMISTIC)
-               ifaupflags &= ~IN6_IFAUPDATE_DADDELAY;
-       else
-               ifaupflags |= IN6_IFAUPDATE_DADDELAY;
+               /*
+                * Do not delay sending neighbor solicitations when using optimistic
+                * duplicate address detection, c.f. RFC 4429.
+                */
+               if (ia->ia6_flags & IN6_IFF_OPTIMISTIC)
+                       ifaupflags &= ~IN6_IFAUPDATE_DADDELAY;
+               else
+                       ifaupflags |= IN6_IFAUPDATE_DADDELAY;
+       } else {
+               /*
+                * If the interface has been marked to not perform
+                * DAD, make sure to reset DAD in progress flags
+                * that may come in from the caller.
+                */
+               ia->ia6_flags &= ~IN6_IFF_DADPROGRESS;
+       }
 
        /* Join necessary multicast groups */
        if ((ifp->if_flags & IFF_MULTICAST) != 0) {
index a9d4463151dfc0473126e54b43e293c59d6e68da..2aef3803eaa4f31f55e3b7bfdbcf563ae00381dd 100644 (file)
@@ -4427,10 +4427,10 @@ in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia)
 
        if (optdad) {
                if ((optdad & ND6_OPTIMISTIC_DAD_LINKLOCAL) &&
-                               IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr))
+                   IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr))
                        flags = IN6_IFF_OPTIMISTIC;
                else if ((optdad & ND6_OPTIMISTIC_DAD_AUTOCONF) &&
-                               (ia->ia6_flags & IN6_IFF_AUTOCONF)) {
+                   (ia->ia6_flags & IN6_IFF_AUTOCONF)) {
                        if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
                                if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY)
                                        flags = IN6_IFF_OPTIMISTIC;
@@ -4452,7 +4452,7 @@ in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia)
                                flags = IN6_IFF_OPTIMISTIC;
                        }
                } else if ((optdad & ND6_OPTIMISTIC_DAD_DYNAMIC) &&
-                               (ia->ia6_flags & IN6_IFF_DYNAMIC)) {
+                   (ia->ia6_flags & IN6_IFF_DYNAMIC)) {
                        if (ia->ia6_flags & IN6_IFF_TEMPORARY) {
                                if (optdad & ND6_OPTIMISTIC_DAD_TEMPORARY)
                                        flags = IN6_IFF_OPTIMISTIC;
@@ -4460,15 +4460,15 @@ in6_ifaddr_set_dadprogress(struct in6_ifaddr *ia)
                                flags = IN6_IFF_OPTIMISTIC;
                        }
                } else if ((optdad & ND6_OPTIMISTIC_DAD_MANUAL) &&
-                               (ia->ia6_flags & IN6_IFF_OPTIMISTIC)) {
+                   (ia->ia6_flags & IN6_IFF_OPTIMISTIC)) {
                        /*
                         * rdar://17483438
                         * Bypass tentative for address assignments
                         * not covered above (e.g. manual) upon request
                         */
                        if (!IN6_IS_ADDR_LINKLOCAL(&ia->ia_addr.sin6_addr) &&
-                                       !(ia->ia6_flags & IN6_IFF_AUTOCONF) &&
-                                       !(ia->ia6_flags & IN6_IFF_DYNAMIC))
+                           !(ia->ia6_flags & IN6_IFF_AUTOCONF) &&
+                           !(ia->ia6_flags & IN6_IFF_DYNAMIC))
                                flags = IN6_IFF_OPTIMISTIC;
                }
        }
index 71f3e0716a5e1d27cb6bb9f8a9532ca1c2f221fb..7e338fd2ba77ba029dab393f4e64cc0502bdfb4b 100644 (file)
@@ -354,8 +354,9 @@ void
 audit_arg_sockaddr(struct kaudit_record *ar, struct vnode *cwd_vp,
     struct sockaddr *sa)
 {
+       char path[SOCK_MAXADDRLEN - offsetof(struct sockaddr_un, sun_path) + 1] = "";
        struct sockaddr_un *sun;
-       char path[SOCK_MAXADDRLEN - offsetof(struct sockaddr_un, sun_path) + 1];
+       ssize_t namelen;
 
        KASSERT(sa != NULL, ("audit_arg_sockaddr: sa == NULL"));
 
@@ -378,11 +379,13 @@ audit_arg_sockaddr(struct kaudit_record *ar, struct vnode *cwd_vp,
 
        case AF_UNIX:
                sun = (struct sockaddr_un *)sa;
-               if (sun->sun_len > offsetof(struct sockaddr_un, sun_path)) {
+               namelen = sun->sun_len - offsetof(struct sockaddr_un, sun_path);
+               if (namelen > 0 && (size_t)namelen < sizeof(path)) {
                        /*
-                        * Make sure the path is NULL-terminated
+                        * Make sure the path is NUL-terminated
                         */
-                       strlcpy(path, sun->sun_path, sizeof(path));
+                       bcopy(sun->sun_path, path, namelen);
+                       path[namelen] = 0;
                        audit_arg_upath(ar, cwd_vp, path, ARG_UPATH1);
                }
                ARG_SET_VALID(ar, ARG_SADDRUNIX);
index 8e9e760149fbffc382fa3ea07f3edbc2505aa7a8..dd3560fedcc3733e67cdf4895b9ed65f2f9d6bdc 100644 (file)
@@ -2566,6 +2566,19 @@ vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
                goto out;
        }
 
+#if DEVELOPMENT || DEBUG
+       /*
+        * XXX VSWAP: Check for entitlements or special flag here
+        * so we can restrict access appropriately.
+        */
+#else /* DEVELOPMENT || DEBUG */
+
+       if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
+               error = EPERM;
+               goto out;
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
 #if NAMEDSTREAMS
        /* For streams, va_data_size is the only setable attribute. */
        if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
index 38df5a58daa33e69a9b51adddc7b4671a605c6e4..e9f323164e4c64d3b7b91d804fe9046d7d462d0d 100644 (file)
@@ -3912,6 +3912,19 @@ setattrlist_internal(vnode_t vp, struct setattrlist_args *uap, proc_t p, vfs_con
                goto out;
        }
 
+#if DEVELOPMENT || DEBUG
+       /*
+        * XXX VSWAP: Check for entitlements or special flag here
+        * so we can restrict access appropriately.
+        */
+#else /* DEVELOPMENT || DEBUG */
+
+       if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
+               error = EPERM;
+               goto out;
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
        VFS_DEBUG(ctx, vp, "%p  ATTRLIST - %s set common %08x vol %08x file %08x dir %08x fork %08x %sfollow on '%s'",
            vp, p->p_comm, al.commonattr, al.volattr, al.fileattr, al.dirattr, al.forkattr,
            (uap->options & FSOPT_NOFOLLOW) ? "no":"", vp->v_name);
index d6b22ff84909a39c05b048607d93c4e4bc3468e6..70eecc5ff44759d87818dc61b6ea475697195ea0 100644 (file)
@@ -1360,47 +1360,69 @@ cluster_io(vnode_t vp, upl_t upl, vm_offset_t upl_offset, off_t f_offset, int no
                                pageout_flags |= UPL_NOCOMMIT;
 
                        if (cbp_head) {
-                               buf_t last_cbp;
+                               buf_t prev_cbp;
+                               int   bytes_in_last_page;
 
                                /*
                                 * first we have to wait for the the current outstanding I/Os
                                 * to complete... EOT hasn't been set yet on this transaction
-                                * so the pages won't be released just because all of the current
-                                * I/O linked to this transaction has completed...
+                                * so the pages won't be released
                                 */
                                cluster_wait_IO(cbp_head, (flags & CL_ASYNC));
 
-                               /*
-                                * we've got a transcation that
-                                * includes the page we're about to push out through vnode_pageout...
-                                * find the last bp in the list which will be the one that
-                                * includes the head of this page and round it's iosize down
-                                * to a page boundary...
-                                */
-                                for (last_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next)
-                                       last_cbp = cbp;
-
-                               cbp->b_bcount &= ~PAGE_MASK;
-
-                               if (cbp->b_bcount == 0) {
-                                       /*
-                                        * this buf no longer has any I/O associated with it
+                               bytes_in_last_page = cbp_head->b_uploffset & PAGE_MASK;
+                               for (cbp = cbp_head; cbp; cbp = cbp->b_trans_next)
+                                       bytes_in_last_page += cbp->b_bcount;
+                               bytes_in_last_page &= PAGE_MASK;
+                               
+                               while (bytes_in_last_page) {
+                                       /*
+                                        * we've got a transcation that
+                                        * includes the page we're about to push out through vnode_pageout...
+                                        * find the bp's in the list which intersect this page and either
+                                        * remove them entirely from the transaction (there could be multiple bp's), or
+                                        * round it's iosize down to the page boundary (there can only be one)...
+                                        *
+                                        * find the last bp in the list and act on it
                                         */
-                                       free_io_buf(cbp);
+                                       for (prev_cbp = cbp = cbp_head; cbp->b_trans_next; cbp = cbp->b_trans_next)
+                                               prev_cbp = cbp;
 
-                                       if (cbp == cbp_head) {
-                                               /*
-                                                * the buf we just freed was the only buf in
-                                                * this transaction... so there's no I/O to do
+                                       if (bytes_in_last_page >= cbp->b_bcount) {
+                                               /*
+                                                * this buf no longer has any I/O associated with it
                                                 */
-                                               cbp_head = NULL;
+                                               bytes_in_last_page -= cbp->b_bcount;
+                                               cbp->b_bcount = 0;
+
+                                               free_io_buf(cbp);
+
+                                               if (cbp == cbp_head) {
+                                                       assert(bytes_in_last_page == 0);
+                                                       /*
+                                                        * the buf we just freed was the only buf in
+                                                        * this transaction... so there's no I/O to do
+                                                        */
+                                                       cbp_head = NULL;
+                                                       cbp_tail = NULL;
+                                               } else {
+                                                       /*
+                                                        * remove the buf we just freed from
+                                                        * the transaction list
+                                                        */
+                                                       prev_cbp->b_trans_next = NULL;
+                                                       cbp_tail = prev_cbp;
+                                               }
                                        } else {
-                                               /*
-                                                * remove the buf we just freed from
-                                                * the transaction list
+                                               /*
+                                                * this is the last bp that has I/O
+                                                * intersecting the page of interest
+                                                * only some of the I/O is in the intersection
+                                                * so clip the size but keep it in the transaction list
                                                 */
-                                               last_cbp->b_trans_next = NULL;
-                                               cbp_tail = last_cbp;
+                                               cbp->b_bcount -= bytes_in_last_page;
+                                               cbp_tail = cbp;
+                                               bytes_in_last_page = 0;
                                        }
                                }
                                if (cbp_head) {
index 70f798e860bb3effc1b16050ffef63e3159e89de..ee016dac6405997b7736adf85b243f4074e85bc9 100644 (file)
@@ -3438,6 +3438,20 @@ open1(vfs_context_t ctx, struct nameidata *ndp, int uflags,
                fp->f_fglob->fg_flag |= FHASLOCK;
        }
 
+#if DEVELOPMENT || DEBUG
+       /*
+        * XXX VSWAP: Check for entitlements or special flag here
+        * so we can restrict access appropriately.
+        */
+#else /* DEVELOPMENT || DEBUG */
+
+       if (vnode_isswap(vp) && (flags & (FWRITE | O_TRUNC)) && (ctx != vfs_context_kernel())) {
+               /* block attempt to write/truncate swapfile */
+               error = EPERM;
+               goto bad;
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
        /* try to truncate by setting the size attribute */
        if ((flags & O_TRUNC) && ((error = vnode_setsize(vp, (off_t)0, 0, ctx)) != 0))
                goto bad;
@@ -4636,6 +4650,19 @@ continue_lookup:
                        error = EBUSY;
                }
 
+#if DEVELOPMENT || DEBUG
+       /*
+        * XXX VSWAP: Check for entitlements or special flag here
+        * so we can restrict access appropriately.
+        */
+#else /* DEVELOPMENT || DEBUG */
+
+               if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
+                       error = EPERM;
+                       goto out;
+               }
+#endif /* DEVELOPMENT || DEBUG */
+
                if (!batched) {
                        error = vn_authorize_unlink(dvp, vp, cnp, ctx, NULL);
                        if (error) {
@@ -7166,6 +7193,24 @@ continue_lookup:
                tvp  = tond->ni_vp;
        }
 
+#if DEVELOPMENT || DEBUG
+       /*
+        * XXX VSWAP: Check for entitlements or special flag here
+        * so we can restrict access appropriately.
+        */
+#else /* DEVELOPMENT || DEBUG */
+
+       if (fromnd->ni_vp && vnode_isswap(fromnd->ni_vp) && (ctx != vfs_context_kernel())) {
+               error = EPERM;
+               goto out1;
+       }
+
+       if (tond->ni_vp && vnode_isswap(tond->ni_vp) && (ctx != vfs_context_kernel())) {
+               error = EPERM;
+               goto out1;
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
        if (!tvp && ISSET(flags, VFS_RENAME_SWAP)) {
                error = ENOENT;
                goto out1;
@@ -7915,6 +7960,19 @@ continue_lookup:
                                goto out;
                        }
 
+#if DEVELOPMENT || DEBUG
+                       /*
+                        * XXX VSWAP: Check for entitlements or special flag here
+                        * so we can restrict access appropriately.
+                        */
+#else /* DEVELOPMENT || DEBUG */
+
+                       if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
+                               error = EPERM;
+                               goto out;
+                       }
+#endif /* DEVELOPMENT || DEBUG */
+
                        /*
                         * Removed a check here; we used to abort if vp's vid
                         * was not the same as what we'd seen the last time around.
@@ -9870,6 +9928,17 @@ fsctl_internal(proc_t p, vnode_t *arg_vp, u_long cmd, user_addr_t udata, u_long
                /* Round up to MAXPATHLEN regardless of user input */
                size = MAXPATHLEN;
        }
+       else if (vp->v_tag == VT_CIFS) {
+               /*
+                * XXX Until fsctl's length encoding can be
+                * XXX fixed properly.
+                */
+               if (IOCBASECMD(cmd) == _IOWR('z', 19, 0) && size < 1432) {
+                       size = 1432; /* sizeof(struct UniqueSMBShareID) */
+               } else if (IOCBASECMD(cmd) == _IOWR('z', 28, 0) && size < 308) {
+                       size = 308; /* sizeof(struct smbDebugTestPB) */
+               }
+       }
 
        if (size > sizeof (stkbuf)) {
                if ((memp = (caddr_t)kalloc(size)) == 0) return ENOMEM;
index 27f61a448b3f6438eaf3f664e5472d94aca98b6b..7a1a2b21618b5aa00b2592c1b86042c64ba0d22a 100644 (file)
@@ -567,6 +567,19 @@ continue_create_lookup:
                panic("Haven't cleaned up adequately in vn_open_auth()");
        }
 
+#if DEVELOPMENT || DEBUG
+       /*
+        * XXX VSWAP: Check for entitlements or special flag here
+        * so we can restrict access appropriately.
+        */
+#else /* DEVELOPMENT || DEBUG */
+
+       if (vnode_isswap(vp) && (fmode & (FWRITE | O_TRUNC)) && (ctx != vfs_context_kernel())) {
+               error = EPERM;
+               goto bad;
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
        /*
         * Expect to use this code for filesystems without compound VNOPs, for the root 
         * of a filesystem, which can't be "looked up" in the sense of VNOP_LOOKUP(),
@@ -922,7 +935,21 @@ vn_rdwr_64(
                                error = VNOP_READ(vp, auio, ioflg, &context);
                        }
                } else {
+
+#if DEVELOPMENT || DEBUG
+                       /*
+                        * XXX VSWAP: Check for entitlements or special flag here
+                        * so we can restrict access appropriately.
+                        */
                        error = VNOP_WRITE(vp, auio, ioflg, &context);
+#else /* DEVELOPMENT || DEBUG */
+
+                       if (vnode_isswap(vp) && ((ioflg & (IO_SWAP_DISPATCH | IO_SKIP_ENCRYPTION)) == 0)) {
+                               error = EPERM;
+                       } else {
+                               error = VNOP_WRITE(vp, auio, ioflg, &context);
+                       }
+#endif /* DEVELOPMENT || DEBUG */
                }
        }
 
@@ -1017,11 +1044,13 @@ vn_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
        count = uio_resid(uio);
 
        if (vnode_isswap(vp) && !(IO_SKIP_ENCRYPTION & ioflag)) {
+
                /* special case for swap files */
                error = vn_read_swapfile(vp, uio);
        } else {
                error = VNOP_READ(vp, uio, ioflag, ctx);
        }
+
        if ((flags & FOF_OFFSET) == 0) {
                fp->f_fglob->fg_offset += count - uio_resid(uio);
                if (offset_locked) {
@@ -1056,6 +1085,21 @@ vn_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
                return(error);
        }
 
+#if DEVELOPMENT || DEBUG
+       /*
+        * XXX VSWAP: Check for entitlements or special flag here
+        * so we can restrict access appropriately.
+        */
+#else /* DEVELOPMENT || DEBUG */
+
+       if (vnode_isswap(vp)) {
+               (void)vnode_put(vp);
+               error = EPERM;
+               return (error);
+       }
+#endif /* DEVELOPMENT || DEBUG */
+
+
 #if CONFIG_MACF
        error = mac_vnode_check_write(ctx, vfs_context_ucred(ctx), vp);
        if (error) {
index c0014b606d06c86a21ca21ba67c19376a7638142..9a83330fb5be6328095e4153f298a0b2f27b9b67 100644 (file)
@@ -53,7 +53,7 @@ void
 vm_swapfile_open(const char *path, vnode_t *vp)
 {
        int error = 0;
-       vfs_context_t   ctx = vfs_context_current();
+       vfs_context_t   ctx = vfs_context_kernel();
 
        if ((error = vnode_open(path, (O_CREAT | O_TRUNC | FREAD | FWRITE), S_IRUSR | S_IWUSR, 0, vp, ctx))) {
                printf("Failed to open swap file %d\n", error);
@@ -93,7 +93,7 @@ int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int);
 void
 vm_swapfile_close(uint64_t path_addr, vnode_t vp)
 {
-       vfs_context_t context = vfs_context_current();
+       vfs_context_t context = vfs_context_kernel();
        int error;
 
        vnode_getwithref(vp);
@@ -117,7 +117,7 @@ vm_swapfile_preallocate(vnode_t vp, uint64_t *size, boolean_t *pin)
        vfs_context_t   ctx = NULL;
 
 
-       ctx = vfs_context_current();
+       ctx = vfs_context_kernel();
 
        error = vnode_setsize(vp, *size, IO_NOZEROFILL, ctx);
 
index cdb274063f9458d9f57c55e7158dc0248e498444..89b86ecd6d0767e8389e135031fe88aa7fed69f6 100644 (file)
@@ -1,4 +1,4 @@
-16.5.0
+16.6.0
 
 # The first line of this file contains the master version number for the kernel.
 # All other instances of the kernel version in xnu are derived from this file.
index 2a6f8dd011dc2edcaf9c9cec6ec927db3b93ddf9..c417c9a3487f06db0dc1f80629fb45a80015febf 100644 (file)
@@ -545,6 +545,7 @@ public:
                     uint32_t *  hibernateMode,
                     uint32_t *  hibernateFreeRatio,
                     uint32_t *  hibernateFreeTime );
+    bool        mustHibernate( void );
 #endif
     void        takeStackshot(bool restart, bool isOSXWatchdog, bool isSpinDump);
     void        sleepWakeDebugTrig(bool restart);
index 8a867abce75717e6f4813331a3ae28776a8322b9..953c6d44530a4a6fed9776f99cae9509ae474b50 100644 (file)
@@ -1556,7 +1556,9 @@ hibernate_write_image(void)
     
         HIBLOG("IOHibernatePollerOpen, ml_get_interrupts_enabled %d\n", 
                 ml_get_interrupts_enabled());
-        err = IOPolledFilePollersOpen(vars->fileVars, kIOPolledBeforeSleepState, true);
+        err = IOPolledFilePollersOpen(vars->fileVars, kIOPolledBeforeSleepState,
+                // abortable if not low battery
+                !IOService::getPMRootDomain()->mustHibernate());
         HIBLOG("IOHibernatePollerOpen(%x)\n", err);
         pollerOpen = (kIOReturnSuccess == err);
         if (!pollerOpen)
index ebaa3087d4a4370b5113738c244678eaa8282bde..6a819b459c7fab720e4a6ea938d05c0399ae254c 100644 (file)
@@ -94,7 +94,7 @@ void IODTNVRAM::initProxyData(void)
       data = OSDynamicCast(OSData, prop);
       if (data != 0) {
         bytes = data->getBytesNoCopy();
-        if (bytes != 0) {
+        if ((bytes != 0) && (data->getLength() <= kIODTNVRAMImageSize)) {
           bcopy(bytes, _nvramImage, data->getLength());
           initNVRAMImage();
           _isProxied = true;
index 90ece1e80343ebfe4b1e2103b4f9889efcfffa35..103a073c3243f580d985a476ac7f78bd6aef9377 100644 (file)
@@ -6245,6 +6245,19 @@ bool IOPMrootDomain::checkSystemCanSustainFullWake( void )
     return true;
 }
 
+//******************************************************************************
+// mustHibernate
+//******************************************************************************
+
+#if HIBERNATION
+
+bool IOPMrootDomain::mustHibernate( void )
+{
+    return (lowBatteryCondition || thermalWarningState);
+}
+
+#endif /* HIBERNATION */
+
 //******************************************************************************
 // adjustPowerState
 //
index 45c001b414fdf8ec5b11aac95fb4d31e51f38569..ac600307e3ec40cc49b056980a4a4b35120dd838 100644 (file)
@@ -106,6 +106,14 @@ COMMPAGE_DESCRIPTOR_NAME(label) ## :                               ;\
 
 #define UNIQUEID(name) L ## name
 
+/* COMMPAGE_JMP(target,from,start)
+ *
+ * This macro perform a jump to another commpage routine.
+ * Used to return from the PFZ by jumping via a return outside the PFZ.
+ */
+#define COMMPAGE_JMP(target,from,start)                                \
+       jmp      L ## start - from + target
+
 #else /* __ASSEMBLER__ */
 
 /* Each potential commpage routine is described by one of these.
index 89278b279cebe1d9c9f5281995dbe5c36ad3e273..1a695f0bda20e86ea3950962f396ba661e0d9adf 100644 (file)
@@ -67,6 +67,7 @@ _commpage_sched_gen_inc:
 _commpage_32_routines:
        COMMPAGE_DESCRIPTOR_REFERENCE(preempt)
        COMMPAGE_DESCRIPTOR_REFERENCE(backoff)
+       COMMPAGE_DESCRIPTOR_REFERENCE(ret)
        COMMPAGE_DESCRIPTOR_REFERENCE(pfz_enqueue)
        COMMPAGE_DESCRIPTOR_REFERENCE(pfz_dequeue)
        .quad   0
@@ -80,6 +81,7 @@ _commpage_32_routines:
 _commpage_64_routines:
        COMMPAGE_DESCRIPTOR_REFERENCE(preempt_64)
        COMMPAGE_DESCRIPTOR_REFERENCE(backoff_64)
+       COMMPAGE_DESCRIPTOR_REFERENCE(ret_64)
        COMMPAGE_DESCRIPTOR_REFERENCE(pfz_enqueue_64)
        COMMPAGE_DESCRIPTOR_REFERENCE(pfz_dequeue_64)
        .quad   0
index 81c041ae406480509ab808d5d8a99df86144b87b..8163e2c3eef8a4bbcb88b006e29d7c0343a2386a 100644 (file)
@@ -141,17 +141,17 @@ COMMPAGE_FUNCTION_START(pfz_enqueue, 32, 4)
        COMMPAGE_CALL(_COMM_PAGE_BACKOFF,_COMM_PAGE_PFZ_ENQUEUE,pfz_enqueue)
        jmp         1b              // loop to try again
 2:
-       movl        4(%edi),%ecx    // get ptr to last element in q
-       testl       %ecx,%ecx       // q null?
+       movl        4(%edi),%eax    // get ptr to last element in q
+       testl       %eax,%eax       // q null?
        jnz         3f              // no
        movl        %esi,(%edi)     // q empty so this is first element
        jmp         4f
 3:
-       movl        %esi,(%edx,%ecx) // point to new element from last
+       movl        %esi,(%edx,%eax) // point to new element from last
 4:
        movl        %esi,4(%edi)    // new element becomes last in q
        movl        $0,8(%edi)      // unlock spinlock
-       ret
+       COMMPAGE_JMP(_COMM_PAGE_RET,_COMM_PAGE_PFZ_ENQUEUE,pfz_enqueue)
 COMMPAGE_DESCRIPTOR(pfz_enqueue,_COMM_PAGE_PFZ_ENQUEUE)
 
 
@@ -197,10 +197,14 @@ COMMPAGE_FUNCTION_START(pfz_dequeue, 32, 4)
        movl        %esi,(%edi)     // update "first" field of q head
 4:
        movl        $0,8(%edi)      // unlock spinlock
-       ret
+       COMMPAGE_JMP(_COMM_PAGE_RET,_COMM_PAGE_PFZ_DEQUEUE,pfz_dequeue)
 COMMPAGE_DESCRIPTOR(pfz_dequeue,_COMM_PAGE_PFZ_DEQUEUE)
 
 
+COMMPAGE_FUNCTION_START(ret, 32, 4)
+       ret
+COMMPAGE_DESCRIPTOR(ret,_COMM_PAGE_RET)
+
 
 
 /************************* x86_64 versions follow **************************/
@@ -286,17 +290,17 @@ COMMPAGE_FUNCTION_START(pfz_enqueue_64, 64, 4)
        COMMPAGE_CALL(_COMM_PAGE_BACKOFF,_COMM_PAGE_PFZ_ENQUEUE,pfz_enqueue_64)
        jmp         1b              // loop to try again
 2:
-       movq        8(%rdi),%rcx    // get ptr to last element in q
-       testq       %rcx,%rcx       // q null?
+       movq        8(%rdi),%rax    // get ptr to last element in q
+       testq       %rax,%rax       // q null?
        jnz         3f              // no
        movq        %rsi,(%rdi)     // q empty so this is first element
        jmp         4f
 3:
-       movq        %rsi,(%rdx,%rcx) // point to new element from last
+       movq        %rsi,(%rdx,%rax) // point to new element from last
 4:
        movq        %rsi,8(%rdi)    // new element becomes last in q
        movl        $0,16(%rdi)     // unlock spinlock
-       ret
+       COMMPAGE_JMP(_COMM_PAGE_RET,_COMM_PAGE_PFZ_ENQUEUE,pfz_enqueue_64)
 COMMPAGE_DESCRIPTOR(pfz_enqueue_64,_COMM_PAGE_PFZ_ENQUEUE)
 
 
@@ -343,5 +347,9 @@ COMMPAGE_FUNCTION_START(pfz_dequeue_64, 64, 4)
        movq        %rsi,(%rdi)     // update "first" field of q head
 4:
        movl        $0,16(%rdi)     // unlock spinlock
-       ret
+       COMMPAGE_JMP(_COMM_PAGE_RET,_COMM_PAGE_PFZ_DEQUEUE,pfz_dequeue_64)
 COMMPAGE_DESCRIPTOR(pfz_dequeue_64,_COMM_PAGE_PFZ_DEQUEUE)
+
+COMMPAGE_FUNCTION_START(ret_64, 64, 4)
+       ret
+COMMPAGE_DESCRIPTOR(ret_64,_COMM_PAGE_RET)
index 868e1b4fd7a4b5158737680ee2957c7f53737022..0f1b7dadde186b2c515417da97ca46762f85ebac 100644 (file)
@@ -241,15 +241,17 @@ int _NumCPUs( void )
 
 #define _COMM_TEXT_PREEMPT_OFFSET              (0x5a0) /* called from withing pfz */
 #define _COMM_TEXT_BACKOFF_OFFSET              (0x600) /* called from PFZ */
+#define _COMM_TEXT_RET_OFFSET                  (0x680) /* called from PFZ */
 #define _COMM_TEXT_PFZ_START_OFFSET            (0xc00) /* offset for Preemption Free Zone */
 #define _COMM_TEXT_PFZ_ENQUEUE_OFFSET          (0xc00) /* internal FIFO enqueue */
 #define _COMM_TEXT_PFZ_DEQUEUE_OFFSET          (0xc80) /* internal FIFO dequeue */
-#define _COMM_TEXT_UNUSED_OFFSET               (0xd80) /* end of routines in text page */
-#define _COMM_TEXT_PFZ_END_OFFSET              (0xfff) /* offset for end of PFZ */
+#define _COMM_TEXT_UNUSED_OFFSET               (0xd00) /* end of routines in text page */
+#define _COMM_TEXT_PFZ_END_OFFSET              (0xd00) /* offset for end of PFZ */
 
 
 #define _COMM_PAGE_PREEMPT             (_COMM_PAGE_TEXT_START+_COMM_TEXT_PREEMPT_OFFSET)
 #define _COMM_PAGE_BACKOFF             (_COMM_PAGE_TEXT_START+_COMM_TEXT_BACKOFF_OFFSET)       
+#define _COMM_PAGE_RET                 (_COMM_PAGE_TEXT_START+_COMM_TEXT_RET_OFFSET)   
 
 #define _COMM_PAGE_PFZ_START           (_COMM_PAGE_TEXT_START+_COMM_PAGE_PFZ_START_OFFSET)
 
index a3dbca3b107ba8638c748b30415c1747087f87d3..a886ae73671aa6219b6192f3556d8bba68587670 100644 (file)
@@ -202,43 +202,6 @@ extern void hi64_syscall(void);
  * Allocate and initialize the per-processor descriptor tables.
  */
 
-struct fake_descriptor ldt_desc_pattern = {
-       (unsigned int) 0,
-       LDTSZ_MIN * sizeof(struct fake_descriptor) - 1,
-       0,
-       ACC_P|ACC_PL_K|ACC_LDT
-};
-
-struct fake_descriptor tss_desc_pattern = {
-       (unsigned int) 0,
-       sizeof(struct i386_tss) - 1,
-       0,
-       ACC_P|ACC_PL_K|ACC_TSS
-};
-
-struct fake_descriptor cpudata_desc_pattern = {
-       (unsigned int) 0,
-       sizeof(cpu_data_t)-1,
-       SZ_32,
-       ACC_P|ACC_PL_K|ACC_DATA_W
-};
-
-#if    NCOPY_WINDOWS > 0
-struct fake_descriptor userwindow_desc_pattern = {
-       (unsigned int) 0,
-       ((NBPDE * NCOPY_WINDOWS) / PAGE_SIZE) - 1,
-       SZ_32 | SZ_G,
-       ACC_P|ACC_PL_U|ACC_DATA_W
-};
-#endif
-
-struct fake_descriptor physwindow_desc_pattern = {
-       (unsigned int) 0,
-       PAGE_SIZE - 1,
-       SZ_32,
-       ACC_P|ACC_PL_K|ACC_DATA_W
-};
-
 /*
  * This is the expanded, 64-bit variant of the kernel LDT descriptor.
  * When switching to 64-bit mode this replaces KERNEL_LDT entry
@@ -715,6 +678,13 @@ valid_user_code_selector(uint16_t selector)
     else if (sel.index < GDTSZ && sel.rpl == USER_PRIV) {
        if ((gdt_desc_p(selector)->access & ACC_PL_U) == ACC_PL_U)
            return (TRUE);
+       /* Explicitly validate the system code selectors
+        * even if not instantaneously privileged,
+        * since they are dynamically re-privileged
+        * at context switch
+        */
+       if ((selector == USER_CS) || (selector == USER64_CS))
+               return (TRUE);
     }
 
     return (FALSE);
index d77d20134cb3f8d47e2b57c776c93dd5f1b4f951..b26756da129e44616c9a216f7b66afdcbafeb46f 100644 (file)
@@ -177,8 +177,8 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new)
                 * Enable the 64-bit user code segment, USER64_CS.
                 * Disable the 32-bit user code segment, USER_CS.
                 */
-               ldt_desc_p(USER64_CS)->access |= ACC_PL_U;
-               ldt_desc_p(USER_CS)->access &= ~ACC_PL_U;
+               gdt_desc_p(USER64_CS)->access |= ACC_PL_U;
+               gdt_desc_p(USER_CS)->access &= ~ACC_PL_U;
 
                /*
                 * Switch user's GS base if necessary
@@ -190,7 +190,8 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new)
                 * in the event it was altered in user space.
                 */
                if ((pcb->cthread_self != 0) || (new->task != kernel_task)) {
-                       if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) || (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE))) {
+                       if ((cdp->cpu_uber.cu_user_gs_base != pcb->cthread_self) ||
+                           (pcb->cthread_self != rdmsr64(MSR_IA32_KERNEL_GS_BASE))) {
                                cdp->cpu_uber.cu_user_gs_base = pcb->cthread_self;
                                wrmsr64(MSR_IA32_KERNEL_GS_BASE, pcb->cthread_self);
                        }
@@ -204,8 +205,14 @@ act_machine_switch_pcb(__unused thread_t old, thread_t new)
                 * Disable USER64_CS
                 * Enable USER_CS
                 */
-               ldt_desc_p(USER64_CS)->access &= ~ACC_PL_U;
-               ldt_desc_p(USER_CS)->access |= ACC_PL_U;
+
+               /* It's possible that writing to the GDT areas
+                * is expensive, if the processor intercepts those
+                * writes to invalidate its internal segment caches
+                * TODO: perhaps only do this if switching bitness
+                */
+               gdt_desc_p(USER64_CS)->access &= ~ACC_PL_U;
+               gdt_desc_p(USER_CS)->access |= ACC_PL_U;
 
                /*
                 * Set the thread`s cthread (a.k.a pthread)
@@ -359,7 +366,7 @@ machine_thread_create(
        }
 
        /*
-        * Assure that the synthesized 32-bit state including
+        * Ensure that the synthesized 32-bit state including
         * the 64-bit interrupt state can be acommodated in the 
         * 64-bit state we allocate for both 32-bit and 64-bit threads.
         */
index 5cdae79ada1ab2ecf6dfc304364805de3da1475a..7924f4f7dd5f0ce543e13640e96759c0de1fe40d 100644 (file)
@@ -1270,3 +1270,27 @@ sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) {
                __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax");
        }
 }
+
+#if DEBUG
+extern void    thread_exception_return_internal(void) __dead2;
+
+void thread_exception_return(void) {
+       thread_t thread = current_thread();
+       ml_set_interrupts_enabled(FALSE);
+       if (thread_is_64bit(thread) != task_has_64BitAddr(thread->task)) {
+               panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d", thread, thread->task, thread_is_64bit(thread),  task_has_64BitAddr(thread->task));
+       }
+
+       if (thread_is_64bit(thread)) {
+               if ((gdt_desc_p(USER64_CS)->access & ACC_PL_U) == 0) {
+                       panic("64-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER64_CS));
+               }
+       } else {
+                       if ((gdt_desc_p(USER_CS)->access & ACC_PL_U) == 0) {
+                               panic("32-GDT mismatch %p, descriptor: %p", thread, gdt_desc_p(USER_CS));
+
+               }
+       }
+       thread_exception_return_internal();
+}
+#endif
index cd5b37f5e2c24c9e41fff0d1d0be15ef8bf5bb86..ef7e61a4c4de89635428425e5fbe0bece9087a0e 100644 (file)
@@ -3937,6 +3937,7 @@ ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t *dsc,
     {
         mach_msg_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
         user_dsc--; // point to the start of this port descriptor
+        bzero((void *)user_dsc, sizeof(*user_dsc));
         user_dsc->name = CAST_MACH_NAME_TO_PORT(name);
         user_dsc->disposition = disp;
         user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
@@ -3944,6 +3945,7 @@ ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t *dsc,
     } else {
         mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
         user_dsc--; // point to the start of this port descriptor
+        bzero((void *)user_dsc, sizeof(*user_dsc));
         user_dsc->name = CAST_MACH_PORT_TO_NAME(name);
         user_dsc->disposition = disp;
         user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
@@ -4004,6 +4006,7 @@ ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descrip
     {
         mach_msg_ool_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
         user_ool_dsc--;
+        bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
 
         user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
         user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
@@ -4016,6 +4019,7 @@ ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descrip
     } else if (is_64bit) {
         mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
         user_ool_dsc--;
+        bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
 
         user_ool_dsc->address = rcv_addr;
         user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
@@ -4028,6 +4032,7 @@ ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descrip
     } else {
         mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
         user_ool_dsc--;
+        bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
 
         user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
         user_ool_dsc->size = (mach_msg_size_t)size;
@@ -4155,6 +4160,7 @@ ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
     if(current_task() == kernel_task) {
         mach_msg_ool_ports_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
         user_ool_dsc--;
+        bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
 
         user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
         user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
@@ -4168,6 +4174,7 @@ ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
     } if (is_64bit) {
         mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
         user_ool_dsc--;
+        bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
 
         user_ool_dsc->address = rcv_addr;
         user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
@@ -4181,6 +4188,7 @@ ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
     } else {
         mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
         user_ool_dsc--;
+        bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
 
         user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
         user_ool_dsc->count = count;
index f972fcd91af0e81024b8ca91c1fa54340b276a30..c308fa0fc9020d8c368fb968b6ae33e4477b7801 100644 (file)
@@ -134,10 +134,9 @@ ipc_object_release(
  *     Returns:
  *             KERN_SUCCESS            Object returned locked.
  *             KERN_INVALID_TASK       The space is dead.
- *             KERN_INVALID_NAME       The name doesn't denote a right.
- *             KERN_INVALID_RIGHT      Name doesn't denote the correct right.
+ *             KERN_INVALID_NAME       The name doesn't denote a right
+ *             KERN_INVALID_RIGHT      Name doesn't denote the correct right
  */
-
 kern_return_t
 ipc_object_translate(
        ipc_space_t             space,
index 966fcc4487e078651d9ff8e7f434e799ebe317b3..f7f2cede7a4459bdb0ce6649721f59d49e9ea5e9 100644 (file)
@@ -129,7 +129,6 @@ struct ipc_port {
        union {
                ipc_kobject_t kobject;
                ipc_importance_task_t imp_task;
-               uintptr_t alias;
        } kdata;
                
        struct ipc_port *ip_nsrequest;
@@ -176,7 +175,6 @@ struct ipc_port {
 
 #define ip_kobject             kdata.kobject
 #define ip_imp_task            kdata.imp_task
-#define ip_alias               kdata.alias
 
 #define IP_NULL                        IPC_PORT_NULL
 #define IP_DEAD                        IPC_PORT_DEAD
index e444f392b3ab77f2cc16792e2f418442331e9b0e..7a937f5bef66d0d1666847f89703dfe977b89aa5 100644 (file)
@@ -75,6 +75,7 @@
 #include <mach/port.h>
 #include <mach/message.h>
 #include <kern/assert.h>
+#include <kern/ipc_kobject.h>
 #include <kern/misc_protos.h>
 #include <ipc/port.h>
 #include <ipc/ipc_entry.h>
@@ -1718,6 +1719,8 @@ ipc_right_copyin_check(
            case MACH_MSG_TYPE_MOVE_RECEIVE:
                if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
                        return FALSE;
+               if (io_kotype(entry->ie_object) != IKOT_NONE)
+                       return FALSE;
                break;
 
            case MACH_MSG_TYPE_COPY_SEND:
@@ -1857,6 +1860,23 @@ ipc_right_copyin(
                if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
                        goto invalid_right;
 
+               /*
+                * Disallow moving receive-right kobjects, e.g. mk_timer ports
+                * The ipc_port structure uses the kdata union of kobject and
+                * imp_task exclusively. Thus, general use of a kobject port as
+                * a receive right can cause type confusion in the importance
+                * code.
+                */
+               if (io_kotype(entry->ie_object) != IKOT_NONE) {
+                       /*
+                        * Distinguish an invalid right, e.g., trying to move
+                        * a send right as a receive right, from this
+                        * situation which is, "This is a valid receive right,
+                        * but it's also a kobject and you can't move it."
+                        */
+                       return KERN_INVALID_CAPABILITY;
+               }
+
                port = (ipc_port_t) entry->ie_object;
                assert(port != IP_NULL);
 
index 9bff072b173253b975ba9baf8523ee105a8806b4..851a6a1ee95ddca247a44507821b982c73c1b735 100644 (file)
@@ -1550,6 +1550,12 @@ mach_port_request_notification(
                        return kr;
                /* port is locked and active */
 
+               /* you cannot register for port death notifications on a kobject */
+               if (ip_kotype(port) != IKOT_NONE) {
+                       ip_unlock(port);
+                       return KERN_INVALID_RIGHT;
+               }
+
                ipc_port_pdrequest(port, notify, &previous);
                /* port is unlocked */
 
index b877ebf814cd87245317244365505a32b1b37ae5..41bdf29622b289eef51f4953bf5f65399b8d08dc 100644 (file)
@@ -1176,7 +1176,7 @@ kcdata_record_thread_snapshot(
 {
        boolean_t dispatch_p              = ((trace_flags & STACKSHOT_GET_DQ) != 0);
        boolean_t active_kthreads_only_p  = ((trace_flags & STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY) != 0);
-       boolean_t trace_fp_p              = ((trace_flags & STACKSHOT_TAILSPIN) == 0);
+       boolean_t trace_fp_p              = false;
        boolean_t collect_delta_stackshot = ((trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0);
        boolean_t collect_iostats         = !collect_delta_stackshot && !(trace_flags & STACKSHOT_TAILSPIN) && !(trace_flags & STACKSHOT_NO_IO_STATS);
 
index 7ca764dac6a03e950a4bdc7fdc78b553e628bc96..f968d1898e58cd9721f0abb3aced183aa6319553 100644 (file)
@@ -70,7 +70,7 @@ mk_timer_create_trap(
                return (MACH_PORT_NULL);
 
        result = mach_port_allocate_qos(myspace, MACH_PORT_RIGHT_RECEIVE,
-                                                                                                               &mk_timer_qos, &name);
+                                       &mk_timer_qos, &name);
        if (result == KERN_SUCCESS)
                result = ipc_port_translate_receive(myspace, name, &port);
 
index e39cdf7294e2763eb83e545928a7f8d48836364a..133f6b718e1c119984a3792ea97fa4239ecb71eb 100644 (file)
@@ -330,7 +330,7 @@ task_set_64bit(
         * state with respect to its task's 64-bitness.
         */
 
-#if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)     
+#if defined(__x86_64__) || defined(__arm64__)
        queue_iterate(&task->threads, thread, thread_t, task_threads) {
                thread_mtx_lock(thread);
                machine_thread_switch_addrmode(thread);
@@ -356,7 +356,7 @@ task_set_64bit(
                        splx(spl);
                }
        }
-#endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */    
+#endif /* defined(__x86_64__) || defined(__arm64__) */
 
 out:
        task_unlock(task);
index f9649749f7d545854c8ebbfeeeb5fd9f4ca434ba..be40d82600f487c1b5b1e8ad906703c88b4db863 100644 (file)
@@ -356,14 +356,13 @@ struct zone_free_element {
 };
 
 /*
- *      Protects num_zones, zone_array and zone_array_index
+ *      Protects num_zones and zone_array
  */
 decl_simple_lock_data(, all_zones_lock)
 unsigned int            num_zones;
 
 #define MAX_ZONES       256
 struct zone             zone_array[MAX_ZONES];
-static int              zone_array_index = 0;
 
 #define MULTIPAGE_METADATA_MAGIC               (0xff)
 
@@ -1565,9 +1564,10 @@ zinit(
        zone_t          z;
 
        simple_lock(&all_zones_lock);
-       z = &(zone_array[zone_array_index]);
-       zone_array_index++;
-       assert(zone_array_index != MAX_ZONES);
+       assert(num_zones < MAX_ZONES);
+       z = &(zone_array[num_zones]);
+       z->index = num_zones;
+       num_zones++;
        simple_unlock(&all_zones_lock);
 
        /* Zone elements must fit both a next pointer and a backup pointer */
@@ -1640,14 +1640,6 @@ zinit(
 
        lock_zone_init(z);
 
-       /*
-        *      Add the zone to the all-zones list.
-        */
-       simple_lock(&all_zones_lock);
-       z->index = num_zones;
-       num_zones++;
-       simple_unlock(&all_zones_lock);
-
        /*
         * Check for and set up zone leak detection if requested via boot-args.  We recognized two
         * boot-args:
index 3ab3d89bc4f1bf3968e4f8d7b49a20b3a5f81e33..78b07486b17416ffdd22ff6c013e307a7830bce0 100644 (file)
@@ -445,6 +445,16 @@ L_32bit_return:
         */
        mov     %r15, %rsp              /* Set the PCB as the stack */
        swapgs
+
+       xor     %r8, %r8
+       xor     %r9, %r9
+       xor     %r10, %r10
+       xor     %r11, %r11
+       xor     %r12, %r12
+       xor     %r13, %r13
+       xor     %r14, %r14
+       xor     %r15, %r15
+
 EXT(ret32_set_ds):     
        movl    R32_DS(%rsp), %ds
 EXT(ret32_set_es):
index 8620c19fc7c1e445adbedc1c6e8e629d7302c62d..88c3372aa42eeb96910069a37a3824e9c08c6388 100644 (file)
@@ -53,7 +53,7 @@
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
-
+#include <debug.h>
 #include <mach_rt.h>
 #include <mach_kdp.h>
 #include <mach_assert.h>
@@ -157,14 +157,22 @@ wrmsr_fail:
        movl    $1, %eax
        ret
 
+#if DEBUG
+.globl EXT(thread_exception_return_internal)
+#else
 .globl EXT(thread_exception_return)
+#endif
 .globl EXT(thread_bootstrap_return)
 LEXT(thread_bootstrap_return)
 #if CONFIG_DTRACE
        call EXT(dtrace_thread_bootstrap)
 #endif
 
+#if DEBUG
+LEXT(thread_exception_return_internal)
+#else
 LEXT(thread_exception_return)
+#endif
        cli
        xorl    %ecx, %ecx              /* don't check if we're in the PFZ */
        jmp     EXT(return_from_trap)
diff --git a/tools/tests/darwintests/mktimer_kobject.c b/tools/tests/darwintests/mktimer_kobject.c
new file mode 100644 (file)
index 0000000..54b24a0
--- /dev/null
@@ -0,0 +1,50 @@
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <mach/mach.h>
+#include <mach/mk_timer.h>
+
+#include <darwintest.h>
+
+T_DECL(mktimer_kobject, "mktimer_kobject()", T_META_ALL_VALID_ARCHS(true))
+{
+       mach_port_t timer_port = MACH_PORT_NULL;
+       mach_port_t notify_port = MACH_PORT_NULL;
+
+       kern_return_t kr = KERN_SUCCESS;
+
+       // timer port
+       // This is a receive right which is also a kobject
+       timer_port = mk_timer_create();
+       T_ASSERT_NE(timer_port, (mach_port_t)MACH_PORT_NULL, "mk_timer_create: %s", mach_error_string(kr));
+
+       mach_port_set_context(mach_task_self(), timer_port, (mach_port_context_t) 0x1);
+       T_ASSERT_EQ(kr, KERN_SUCCESS, "mach_port_set_context(timer_port): %s", mach_error_string(kr));
+
+       // notification port for the mk_timer port to come back on
+       kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &notify_port);
+       T_ASSERT_EQ(kr, KERN_SUCCESS, "mach_port_allocate(notify_port): %s", mach_error_string(kr));
+
+       kr = mach_port_set_context(mach_task_self(), notify_port, (mach_port_context_t) 0x2);
+       T_ASSERT_EQ(kr, KERN_SUCCESS, "mach_port_set_context(notify_port): %s", mach_error_string(kr));
+
+       T_LOG("timer: 0x%x, notify: 0x%x", timer_port, notify_port);
+
+       mach_port_t previous = MACH_PORT_NULL;
+
+       // request a port-destroyed notification on the timer port
+       kr = mach_port_request_notification(mach_task_self(), timer_port, MACH_NOTIFY_PORT_DESTROYED,
+                                           0, notify_port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous);
+       // this should fail!
+       T_ASSERT_NE(kr, KERN_SUCCESS, "notifications should NOT work on mk_timer ports!");
+
+       // destroy the timer port to send the notification
+       mach_port_mod_refs(mach_task_self(), timer_port, MACH_PORT_RIGHT_RECEIVE, -1);
+
+       // destroy the notification port
+       mach_port_mod_refs(mach_task_self(), notify_port, MACH_PORT_RIGHT_RECEIVE, -1);
+
+       T_LOG("done");
+}
+