From c18c124eaa464aaaa5549e99e5a70fc9cbb50944 Mon Sep 17 00:00:00 2001 From: Apple Date: Fri, 25 Sep 2015 16:05:27 +0000 Subject: [PATCH] xnu-2782.40.9.tar.gz --- bsd/dev/dtrace/fasttrap.c | 3 +- bsd/dev/i386/kern_machdep.c | 6 + bsd/hfs/hfs.h | 2 + bsd/hfs/hfs_vfsops.c | 40 +++++ bsd/hfs/hfs_vfsutils.c | 119 +++++++++---- bsd/kern/kern_descrip.c | 39 ++-- bsd/kern/kern_exec.c | 158 +++++++---------- bsd/kern/mach_fat.c | 187 ++++++++++++++------ bsd/kern/mach_fat.h | 11 +- bsd/kern/mach_loader.c | 161 +++++++++++------ bsd/kern/mach_loader.h | 1 + bsd/kern/policy_check.c | 2 +- bsd/kern/ubc_subr.c | 32 +++- bsd/machine/exec.h | 1 + bsd/sys/codesign.h | 3 + bsd/sys/fcntl.h | 2 + bsd/sys/ubc.h | 2 +- bsd/sys/ubc_internal.h | 2 +- bsd/sys/vnode.h | 1 + bsd/vfs/vfs_cache.c | 10 +- bsd/vfs/vfs_subr.c | 13 +- bsd/vfs/vfs_syscalls.c | 172 ++++++++++++------ bsd/vm/vm_compressor_backing_file.c | 15 +- config/MasterVersion | 2 +- iokit/IOKit/IOLib.h | 5 +- iokit/Kernel/IOService.cpp | 9 +- libsyscall/wrappers/cancelable/fcntl-base.c | 1 + osfmk/ipc/mach_debug.c | 8 +- osfmk/mach/memory_object_types.h | 4 + osfmk/mach/vm_statistics.h | 1 + osfmk/vm/vm_apple_protect.c | 2 + osfmk/vm/vm_fault.c | 15 +- osfmk/vm/vm_map.c | 2 + osfmk/vm/vm_page.h | 3 +- osfmk/vm/vm_pageout.c | 3 + osfmk/vm/vm_protos.h | 5 +- osfmk/vm/vm_resident.c | 5 +- security/mac_framework.h | 2 +- security/mac_policy.h | 4 +- security/mac_vfs.c | 5 +- tools/lldbmacros/memory.py | 1 - 41 files changed, 709 insertions(+), 350 deletions(-) diff --git a/bsd/dev/dtrace/fasttrap.c b/bsd/dev/dtrace/fasttrap.c index f8cbeb4c1..9bcdf28b5 100644 --- a/bsd/dev/dtrace/fasttrap.c +++ b/bsd/dev/dtrace/fasttrap.c @@ -2230,7 +2230,8 @@ fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int * if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) { if (p != PROC_NULL) proc_rele(p); - return (ESRCH); + ret = ESRCH; + goto err; } // proc_lock(p); // FIXME! How is this done on OS X? diff --git a/bsd/dev/i386/kern_machdep.c b/bsd/dev/i386/kern_machdep.c index cd7dbb1d7..8627c26ba 100644 --- a/bsd/dev/i386/kern_machdep.c +++ b/bsd/dev/i386/kern_machdep.c @@ -87,3 +87,9 @@ md_prepare_for_shutdown( __unused char * command) { } + +boolean_t +pie_required(cpu_type_t exectype __unused, cpu_subtype_t execsubtype __unused) +{ + return FALSE; +} diff --git a/bsd/hfs/hfs.h b/bsd/hfs/hfs.h index e3898a3a1..52b8faca1 100644 --- a/bsd/hfs/hfs.h +++ b/bsd/hfs/hfs.h @@ -786,6 +786,8 @@ OSErr hfs_MountHFSVolume(struct hfsmount *hfsmp, HFSMasterDirectoryBlock *mdb, OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, off_t embeddedOffset, u_int64_t disksize, struct proc *p, void *args, kauth_cred_t cred); +OSErr hfs_ValidateHFSPlusVolumeHeader(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp); + extern int hfsUnmount(struct hfsmount *hfsmp, struct proc *p); extern bool overflow_extents(struct filefork *fp); diff --git a/bsd/hfs/hfs_vfsops.c b/bsd/hfs/hfs_vfsops.c index b96e07904..9df531ab8 100644 --- a/bsd/hfs/hfs_vfsops.c +++ b/bsd/hfs/hfs_vfsops.c @@ -1620,6 +1620,12 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size; mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize)); + + if (bp) { + buf_markinvalid(bp); + buf_brelse(bp); + bp = NULL; + } retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys), phys_blksize, cred, &bp); if (retval) { @@ -1639,6 +1645,40 @@ hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, vhp = (HFSPlusVolumeHeader*) mdbp; } + retval = hfs_ValidateHFSPlusVolumeHeader(hfsmp, vhp); + if (retval) + goto error_exit; + + /* + * If allocation block size is less than the physical block size, + * invalidate the buffer read in using native physical block size + * to ensure data consistency. + * + * HFS Plus reserves one allocation block for the Volume Header. + * If the physical size is larger, then when we read the volume header, + * we will also end up reading in the next allocation block(s). + * If those other allocation block(s) is/are modified, and then the volume + * header is modified, the write of the volume header's buffer will write + * out the old contents of the other allocation blocks. + * + * We assume that the physical block size is same as logical block size. + * The physical block size value is used to round down the offsets for + * reading and writing the primary and alternate volume headers. + * + * The same logic is also in hfs_MountHFSPlusVolume to ensure that + * hfs_mountfs, hfs_MountHFSPlusVolume and later are doing the I/Os + * using same block size. + */ + if (SWAP_BE32(vhp->blockSize) < hfsmp->hfs_physical_block_size) { + phys_blksize = hfsmp->hfs_logical_block_size; + hfsmp->hfs_physical_block_size = hfsmp->hfs_logical_block_size; + hfsmp->hfs_log_per_phys = 1; + // There should be one bp associated with devvp in buffer cache. + retval = buf_invalidateblks(devvp, 0, 0, 0); + if (retval) + goto error_exit; + } + if (isroot) { hfs_root_unmounted_cleanly = ((SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) != 0); } diff --git a/bsd/hfs/hfs_vfsutils.c b/bsd/hfs/hfs_vfsutils.c index 9e2adb7d9..ca011c652 100644 --- a/bsd/hfs/hfs_vfsutils.c +++ b/bsd/hfs/hfs_vfsutils.c @@ -320,6 +320,62 @@ MtVolErr: #endif +//******************************************************************************* +// +// Sanity check Volume Header Block: +// Input argument *vhp is a pointer to a HFSPlusVolumeHeader block that has +// not been endian-swapped and represents the on-disk contents of this sector. +// This routine will not change the endianness of vhp block. +// +//******************************************************************************* +OSErr hfs_ValidateHFSPlusVolumeHeader(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp) +{ + u_int16_t signature; + u_int16_t hfs_version; + u_int32_t blockSize; + + signature = SWAP_BE16(vhp->signature); + hfs_version = SWAP_BE16(vhp->version); + + if (signature == kHFSPlusSigWord) { + if (hfs_version != kHFSPlusVersion) { + printf("hfs_ValidateHFSPlusVolumeHeader: invalid HFS+ version: %x\n", hfs_version); + return (EINVAL); + } + } else if (signature == kHFSXSigWord) { + if (hfs_version != kHFSXVersion) { + printf("hfs_ValidateHFSPlusVolumeHeader: invalid HFSX version: %x\n", hfs_version); + return (EINVAL); + } + } else { + /* Removed printf for invalid HFS+ signature because it gives + * false error for UFS root volume + */ + if (HFS_MOUNT_DEBUG) { + printf("hfs_ValidateHFSPlusVolumeHeader: unknown Volume Signature : %x\n", signature); + } + return (EINVAL); + } + + /* Block size must be at least 512 and a power of 2 */ + blockSize = SWAP_BE32(vhp->blockSize); + if (blockSize < 512 || !powerof2(blockSize)) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_ValidateHFSPlusVolumeHeader: invalid blocksize (%d) \n", blockSize); + } + return (EINVAL); + } + + if (blockSize < hfsmp->hfs_logical_block_size) { + if (HFS_MOUNT_DEBUG) { + printf("hfs_ValidateHFSPlusVolumeHeader: invalid physical blocksize (%d), hfs_logical_blocksize (%d) \n", + blockSize, hfsmp->hfs_logical_block_size); + } + return (EINVAL); + } + return 0; +} + //******************************************************************************* // Routine: hfs_MountHFSPlusVolume // @@ -348,38 +404,17 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, signature = SWAP_BE16(vhp->signature); hfs_version = SWAP_BE16(vhp->version); - if (signature == kHFSPlusSigWord) { - if (hfs_version != kHFSPlusVersion) { - printf("hfs_mount: invalid HFS+ version: %x\n", hfs_version); - return (EINVAL); - } - } else if (signature == kHFSXSigWord) { - if (hfs_version != kHFSXVersion) { - printf("hfs_mount: invalid HFSX version: %x\n", hfs_version); - return (EINVAL); - } + retval = hfs_ValidateHFSPlusVolumeHeader(hfsmp, vhp); + if (retval) + return retval; + + if (signature == kHFSXSigWord) { /* The in-memory signature is always 'H+'. */ signature = kHFSPlusSigWord; hfsmp->hfs_flags |= HFS_X; - } else { - /* Removed printf for invalid HFS+ signature because it gives - * false error for UFS root volume - */ - if (HFS_MOUNT_DEBUG) { - printf("hfs_mounthfsplus: unknown Volume Signature : %x\n", signature); - } - return (EINVAL); } - /* Block size must be at least 512 and a power of 2 */ blockSize = SWAP_BE32(vhp->blockSize); - if (blockSize < 512 || !powerof2(blockSize)) { - if (HFS_MOUNT_DEBUG) { - printf("hfs_mounthfsplus: invalid blocksize (%d) \n", blockSize); - } - return (EINVAL); - } - /* don't mount a writable volume if its dirty, it must be cleaned by fsck_hfs */ if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0 && hfsmp->jnl == NULL && (SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) == 0) { @@ -391,22 +426,32 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, /* Make sure we can live with the physical block size. */ if ((disksize & (hfsmp->hfs_logical_block_size - 1)) || - (embeddedOffset & (hfsmp->hfs_logical_block_size - 1)) || - (blockSize < hfsmp->hfs_logical_block_size)) { + (embeddedOffset & (hfsmp->hfs_logical_block_size - 1))) { if (HFS_MOUNT_DEBUG) { - printf("hfs_mounthfsplus: invalid physical blocksize (%d), hfs_logical_blocksize (%d) \n", - blockSize, hfsmp->hfs_logical_block_size); + printf("hfs_mounthfsplus: hfs_logical_blocksize (%d) \n", + hfsmp->hfs_logical_block_size); } return (ENXIO); } - /* If allocation block size is less than the physical - * block size, we assume that the physical block size - * is same as logical block size. The physical block - * size value is used to round down the offsets for - * reading and writing the primary and alternate volume - * headers at physical block boundary and will cause - * problems if it is less than the block size. + /* + * If allocation block size is less than the physical block size, + * same data could be cached in two places and leads to corruption. + * + * HFS Plus reserves one allocation block for the Volume Header. + * If the physical size is larger, then when we read the volume header, + * we will also end up reading in the next allocation block(s). + * If those other allocation block(s) is/are modified, and then the volume + * header is modified, the write of the volume header's buffer will write + * out the old contents of the other allocation blocks. + * + * We assume that the physical block size is same as logical block size. + * The physical block size value is used to round down the offsets for + * reading and writing the primary and alternate volume headers. + * + * The same logic to ensure good hfs_physical_block_size is also in + * hfs_mountfs so that hfs_mountfs, hfs_MountHFSPlusVolume and + * later are doing the I/Os using same block size. */ if (blockSize < hfsmp->hfs_physical_block_size) { hfsmp->hfs_physical_block_size = hfsmp->hfs_logical_block_size; diff --git a/bsd/kern/kern_descrip.c b/bsd/kern/kern_descrip.c index 722fcfdff..54faaeb13 100644 --- a/bsd/kern/kern_descrip.c +++ b/bsd/kern/kern_descrip.c @@ -144,7 +144,7 @@ void fileport_releasefg(struct fileglob *fg); /* We don't want these exported */ __private_extern__ -int unlink1(vfs_context_t, struct nameidata *, int); +int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int); static void _fdrelse(struct proc * p, int fd); @@ -1579,7 +1579,6 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) * SPI (private) for unlinking a file starting from a dir fd */ case F_UNLINKFROM: { - struct nameidata nd; user_addr_t pathname; /* Check if this isn't a valid file descriptor */ @@ -1611,11 +1610,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } /* Start the lookup relative to the file descriptor's vnode. */ - NDINIT(&nd, DELETE, OP_UNLINK, USEDVP | AUDITVNPATH1, UIO_USERSPACE, - pathname, &context); - nd.ni_dvp = vp; - - error = unlink1(&context, &nd, 0); + error = unlink1(&context, vp, pathname, UIO_USERSPACE, 0); vnode_put(vp); break; @@ -1624,11 +1619,13 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) case F_ADDSIGS: case F_ADDFILESIGS: + case F_ADDFILESIGS_FOR_DYLD_SIM: { struct user_fsignatures fs; kern_return_t kr; vm_offset_t kernel_blob_addr; vm_size_t kernel_blob_size; + int blob_add_flags = 0; if (fp->f_type != DTYPE_VNODE) { error = EBADF; @@ -1636,6 +1633,16 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) } vp = (struct vnode *)fp->f_data; proc_fdunlock(p); + + if (uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM) { + blob_add_flags |= MAC_VNODE_CHECK_DYLD_SIM; + if ((p->p_csflags & CS_KILL) == 0) { + proc_lock(p); + p->p_csflags |= CS_KILL; + proc_unlock(p); + } + } + error = vnode_getwithref(vp); if (error) goto outdrop; @@ -1656,8 +1663,13 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) goto outdrop; } - if(ubc_cs_blob_get(vp, CPU_TYPE_ANY, fs.fs_file_start)) + struct cs_blob * existing_blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, fs.fs_file_start); + if (existing_blob != NULL) { + /* If this is for dyld_sim revalidate the blob */ + if (uap->cmd == F_ADDFILESIGS_FOR_DYLD_SIM) { + error = ubc_cs_blob_revalidate(vp, existing_blob, blob_add_flags); + } vnode_put(vp); goto outdrop; } @@ -1690,6 +1702,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) (void *) kernel_blob_addr, kernel_blob_size); } else /* F_ADDFILESIGS */ { + int resid; + error = vn_rdwr(UIO_READ, vp, (caddr_t) kernel_blob_addr, @@ -1698,8 +1712,12 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) UIO_SYSSPACE, 0, kauth_cred_get(), - 0, + &resid, p); + if ((error == 0) && resid) { + /* kernel_blob_size rounded to a page size, but signature may be at end of file */ + memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid); + } } if (error) { @@ -1714,7 +1732,8 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval) CPU_TYPE_ANY, /* not for a specific architecture */ fs.fs_file_start, kernel_blob_addr, - kernel_blob_size); + kernel_blob_size, + blob_add_flags); if (error) { ubc_cs_blob_deallocate(kernel_blob_addr, kernel_blob_size); diff --git a/bsd/kern/kern_exec.c b/bsd/kern/kern_exec.c index 4816a4891..fc270ae21 100644 --- a/bsd/kern/kern_exec.c +++ b/bsd/kern/kern_exec.c @@ -217,7 +217,7 @@ __attribute__((noinline)) int __EXEC_WAITING_ON_TASKGATED_CODE_SIGNATURE_UPCALL_ * activator in exec_activate_image() before treating * it as malformed/corrupt. */ -#define EAI_ITERLIMIT 10 +#define EAI_ITERLIMIT 3 /* * For #! interpreter parsing @@ -402,14 +402,14 @@ exec_reset_save_path(struct image_params *imgp) /* * exec_shell_imgact * - * Image activator for interpreter scripts. If the image begins with the - * characters "#!", then it is an interpreter script. Verify that we are - * not already executing in PowerPC mode, and that the length of the script - * line indicating the interpreter is not in excess of the maximum allowed - * size. If this is the case, then break out the arguments, if any, which - * are separated by white space, and copy them into the argument save area - * as if they were provided on the command line before all other arguments. - * The line ends when we encounter a comment character ('#') or newline. + * Image activator for interpreter scripts. If the image begins with + * the characters "#!", then it is an interpreter script. Verify the + * length of the script line indicating the interpreter is not in + * excess of the maximum allowed size. If this is the case, then + * break out the arguments, if any, which are separated by white + * space, and copy them into the argument save area as if they were + * provided on the command line before all other arguments. The line + * ends when we encounter a comment character ('#') or newline. * * Parameters; struct image_params * image parameter block * @@ -435,10 +435,6 @@ exec_shell_imgact(struct image_params *imgp) /* * Make sure it's a shell script. If we've already redirected * from an interpreted file once, don't do it again. - * - * Note: We disallow PowerPC, since the expectation is that we - * may run a PowerPC interpreter, but not an interpret a PowerPC - * image. This is consistent with historical behaviour. */ if (vdata[0] != '#' || vdata[1] != '!' || @@ -446,6 +442,11 @@ exec_shell_imgact(struct image_params *imgp) return (-1); } + if (imgp->ip_origcputype != 0) { + /* Fat header previously matched, don't allow shell script inside */ + return (-1); + } + imgp->ip_flags |= IMGPF_INTERPRET; imgp->ip_interp_sugid_fd = -1; imgp->ip_interp_buffer[0] = '\0'; @@ -590,64 +591,28 @@ exec_fat_imgact(struct image_params *imgp) int resid, error; load_return_t lret; + if (imgp->ip_origcputype != 0) { + /* Fat header previously matched, don't allow another fat file inside */ + return (-1); + } + /* Make sure it's a fat binary */ - if ((fat_header->magic != FAT_MAGIC) && - (fat_header->magic != FAT_CIGAM)) { - error = -1; + if (OSSwapBigToHostInt32(fat_header->magic) != FAT_MAGIC) { + error = -1; /* not claimed */ goto bad; } -#if DEVELOPMENT || DEBUG - if (cpu_type() == CPU_TYPE_ARM64) { - uint32_t fat_nfat_arch = OSSwapBigToHostInt32(fat_header->nfat_arch); - struct fat_arch *archs; - int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC); - int spawn = (imgp->ip_flags & IMGPF_SPAWN); - - archs = (struct fat_arch *)(imgp->ip_vdata + sizeof(struct fat_header)); - - /* ip_vdata always has PAGE_SIZE of data */ - if (PAGE_SIZE >= (sizeof(struct fat_header) + (fat_nfat_arch + 1) * sizeof(struct fat_arch))) { - if (fat_nfat_arch > 0 - && OSSwapBigToHostInt32(archs[fat_nfat_arch].cputype) == CPU_TYPE_ARM64) { - - /* rdar://problem/15001727 */ - printf("Attempt to execute malformed binary %s\n", imgp->ip_strings); - - proc_lock(p); - p->p_csflags |= CS_KILLED; - proc_unlock(p); - - /* - * We can't stop the system call, so make sure the child never executes - * For vfork exec, the current implementation has not set up the thread in the - * child process, so we cannot signal it. Return an error code in that case. - */ - if (!vfexec && !spawn) { - psignal(p, SIGKILL); - error = 0; - } else { - error = EBADEXEC; - } - goto bad; - } - } + /* imgp->ip_vdata has PAGE_SIZE, zerofilled if the file is smaller */ + lret = fatfile_validate_fatarches((vm_offset_t)fat_header, PAGE_SIZE); + if (lret != LOAD_SUCCESS) { + error = load_return_to_errno(lret); + goto bad; } -#endif /* If posix_spawn binprefs exist, respect those prefs. */ psa = (struct _posix_spawnattr *) imgp->ip_px_sa; if (psa != NULL && psa->psa_binprefs[0] != 0) { - struct fat_arch *arches = (struct fat_arch *) (fat_header + 1); - int nfat_arch = 0, pr = 0, f = 0; - - nfat_arch = OSSwapBigToHostInt32(fat_header->nfat_arch); - - /* make sure bogus nfat_arch doesn't cause chaos - 19376072 */ - if ( (sizeof(struct fat_header) + (nfat_arch * sizeof(struct fat_arch))) > PAGE_SIZE ) { - error = EBADEXEC; - goto bad; - } + uint32_t pr = 0; /* Check each preference listed against all arches in header */ for (pr = 0; pr < NBINPREFS; pr++) { @@ -660,36 +625,28 @@ exec_fat_imgact(struct image_params *imgp) if (pref == CPU_TYPE_ANY) { /* Fall through to regular grading */ - break; + goto regular_grading; } - for (f = 0; f < nfat_arch; f++) { - cpu_type_t archtype = OSSwapBigToHostInt32( - arches[f].cputype); - cpu_type_t archsubtype = OSSwapBigToHostInt32( - arches[f].cpusubtype) & ~CPU_SUBTYPE_MASK; - if (pref == archtype && - grade_binary(archtype, archsubtype)) { - /* We have a winner! */ - fat_arch.cputype = archtype; - fat_arch.cpusubtype = archsubtype; - fat_arch.offset = OSSwapBigToHostInt32( - arches[f].offset); - fat_arch.size = OSSwapBigToHostInt32( - arches[f].size); - fat_arch.align = OSSwapBigToHostInt32( - arches[f].align); - goto use_arch; - } + lret = fatfile_getbestarch_for_cputype(pref, + (vm_offset_t)fat_header, + PAGE_SIZE, + &fat_arch); + if (lret == LOAD_SUCCESS) { + goto use_arch; } } + + /* Requested binary preference was not honored */ + error = EBADEXEC; + goto bad; } +regular_grading: /* Look up our preferred architecture in the fat file. */ - lret = fatfile_getarch_affinity(imgp->ip_vp, - (vm_offset_t)fat_header, - &fat_arch, - (p->p_flag & P_AFFINITY)); + lret = fatfile_getbestarch((vm_offset_t)fat_header, + PAGE_SIZE, + &fat_arch); if (lret != LOAD_SUCCESS) { error = load_return_to_errno(lret); goto bad; @@ -705,16 +662,16 @@ use_arch: goto bad; } - /* Did we read a complete header? */ if (resid) { - error = EBADEXEC; - goto bad; + memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid); } /* Success. Indicate we have identified an encapsulated binary */ error = -2; imgp->ip_arch_offset = (user_size_t)fat_arch.offset; imgp->ip_arch_size = (user_size_t)fat_arch.size; + imgp->ip_origcputype = fat_arch.cputype; + imgp->ip_origcpusubtype = fat_arch.cpusubtype; bad: kauth_cred_unref(&cred); @@ -780,14 +737,19 @@ exec_mach_imgact(struct image_params *imgp) goto bad; } - switch (mach_header->filetype) { - case MH_DYLIB: - case MH_BUNDLE: + if (mach_header->filetype != MH_EXECUTE) { error = -1; goto bad; } - if (!imgp->ip_origcputype) { + if (imgp->ip_origcputype != 0) { + /* Fat header previously had an idea about this thin file */ + if (imgp->ip_origcputype != mach_header->cputype || + imgp->ip_origcpusubtype != mach_header->cpusubtype) { + error = EBADARCH; + goto bad; + } + } else { imgp->ip_origcputype = mach_header->cputype; imgp->ip_origcpusubtype = mach_header->cpusubtype; } @@ -1260,7 +1222,7 @@ exec_activate_image(struct image_params *imgp) int resid; int once = 1; /* save SGUID-ness for interpreted files */ int i; - int iterlimit = EAI_ITERLIMIT; + int itercount = 0; proc_t p = vfs_context_proc(imgp->ip_vfs_context); error = execargs_alloc(imgp); @@ -1324,10 +1286,14 @@ again: &resid, vfs_context_proc(imgp->ip_vfs_context)); if (error) goto bad; - + + if (resid) { + memset(imgp->ip_vdata + (PAGE_SIZE - resid), 0x0, resid); + } + encapsulated_binary: /* Limit the number of iterations we will attempt on each binary */ - if (--iterlimit == 0) { + if (++itercount > EAI_ITERLIMIT) { error = EBADEXEC; goto bad; } @@ -1338,7 +1304,7 @@ encapsulated_binary: switch (error) { /* case -1: not claimed: continue */ - case -2: /* Encapsulated binary */ + case -2: /* Encapsulated binary, imgp->ip_XXX set for next iteration */ goto encapsulated_binary; case -3: /* Interpreter */ diff --git a/bsd/kern/mach_fat.c b/bsd/kern/mach_fat.c index acac3b819..7af7c6580 100644 --- a/bsd/kern/mach_fat.c +++ b/bsd/kern/mach_fat.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1991-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1991-2015 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -45,8 +45,8 @@ * Function: Locate the architecture-dependant contents of a fat * file that match this CPU. * - * Args: vp: The vnode for the fat file. - * header: A pointer to the fat file header. + * Args: header: A pointer to the fat file header. + * size: How large the fat file header is (including fat_arch array) * req_cpu_type: The required cpu type. * mask_bits: Bits to mask from the sub-image type when * grading it vs. the req_cpu_type @@ -58,75 +58,40 @@ **********************************************************************/ static load_return_t fatfile_getarch( -#if 0 - struct vnode *vp, -#else - __unused struct vnode *vp, -#endif vm_offset_t data_ptr, + vm_size_t data_size, cpu_type_t req_cpu_type, cpu_type_t mask_bits, struct fat_arch *archret) { - /* vm_pager_t pager; */ - vm_offset_t addr; - vm_size_t size; load_return_t lret; struct fat_arch *arch; struct fat_arch *best_arch; int grade; int best_grade; - int nfat_arch; - off_t end_of_archs; + uint32_t nfat_arch, max_nfat_arch; cpu_type_t testtype; cpu_type_t testsubtype; struct fat_header *header; -#if 0 - off_t filesize; -#endif - /* - * Get the pager for the file. - */ + if (sizeof(struct fat_header) > data_size) { + return (LOAD_FAILURE); + } header = (struct fat_header *)data_ptr; - - /* - * Map portion that must be accessible directly into - * kernel's map. - */ nfat_arch = OSSwapBigToHostInt32(header->nfat_arch); - end_of_archs = (off_t)nfat_arch * sizeof(struct fat_arch) + - sizeof(struct fat_header); -#if 0 - filesize = ubc_getsize(vp); - if (end_of_archs > (int)filesize) { - return(LOAD_BADMACHO); + max_nfat_arch = (data_size - sizeof(struct fat_header)) / sizeof(struct fat_arch); + if (nfat_arch > max_nfat_arch) { + /* nfat_arch would cause us to read off end of buffer */ + return (LOAD_BADMACHO); } -#endif - - /* - * This check is limited on the top end because we are reading - * only PAGE_SIZE bytes - */ - if (end_of_archs > PAGE_SIZE || - end_of_archs < (off_t)(sizeof(struct fat_header)+sizeof(struct fat_arch))) - return(LOAD_BADMACHO); - - /* - * Round size of fat_arch structures up to page boundry. - */ - size = round_page(end_of_archs); - if (size == 0) - return(LOAD_BADMACHO); /* * Scan the fat_arch's looking for the best one. */ - addr = data_ptr; best_arch = NULL; best_grade = 0; - arch = (struct fat_arch *) (addr + sizeof(struct fat_header)); + arch = (struct fat_arch *) (data_ptr + sizeof(struct fat_header)); for (; nfat_arch-- > 0; arch++) { testtype = OSSwapBigToHostInt32(arch->cputype); testsubtype = OSSwapBigToHostInt32(arch->cpusubtype) & ~CPU_SUBTYPE_MASK; @@ -179,17 +144,29 @@ fatfile_getarch( } load_return_t -fatfile_getarch_affinity( - struct vnode *vp, +fatfile_getbestarch( vm_offset_t data_ptr, - struct fat_arch *archret, - int affinity __unused) + vm_size_t data_size, + struct fat_arch *archret) { /* * Ignore all architectural bits when determining if an image * in a fat file should be skipped or graded. */ - return fatfile_getarch(vp, data_ptr, cpu_type(), CPU_ARCH_MASK, archret); + return fatfile_getarch(data_ptr, data_size, cpu_type(), CPU_ARCH_MASK, archret); +} + +load_return_t +fatfile_getbestarch_for_cputype( + cpu_type_t cputype, + vm_offset_t data_ptr, + vm_size_t data_size, + struct fat_arch *archret) +{ + /* + * Scan the fat_arch array for exact matches for this cpu_type_t only + */ + return fatfile_getarch(data_ptr, data_size, cputype, 0, archret); } /********************************************************************** @@ -209,11 +186,111 @@ fatfile_getarch_affinity( **********************************************************************/ load_return_t fatfile_getarch_with_bits( - struct vnode *vp, integer_t archbits, vm_offset_t data_ptr, + vm_size_t data_size, struct fat_arch *archret) { - return fatfile_getarch(vp, data_ptr, archbits | (cpu_type() & ~CPU_ARCH_MASK), 0, archret); + /* + * Scan the fat_arch array for matches with the requested + * architectural bits set, and for the current hardware cpu CPU. + */ + return fatfile_getarch(data_ptr, data_size, (archbits & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK), 0, archret); } +/* + * Validate the fat_header and fat_arch array in memory. We check that: + * + * 1) arch count would not exceed the data buffer + * 2) arch list does not contain duplicate cputype/cpusubtype tuples + * 3) arch list does not have two overlapping slices. The area + * at the front of the file containing the fat headers is implicitly + * a range that a slice should also not try to cover + */ +load_return_t +fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size) +{ + uint32_t magic, nfat_arch; + uint32_t max_nfat_arch, i, j; + uint32_t fat_header_size; + + struct fat_arch *arches; + struct fat_header *header; + + if (sizeof(struct fat_header) > data_size) { + return (LOAD_FAILURE); + } + + header = (struct fat_header *)data_ptr; + magic = OSSwapBigToHostInt32(header->magic); + nfat_arch = OSSwapBigToHostInt32(header->nfat_arch); + + if (magic != FAT_MAGIC) { + /* must be FAT_MAGIC big endian */ + return (LOAD_FAILURE); + } + + max_nfat_arch = (data_size - sizeof(struct fat_header)) / sizeof(struct fat_arch); + if (nfat_arch > max_nfat_arch) { + /* nfat_arch would cause us to read off end of buffer */ + return (LOAD_BADMACHO); + } + + /* now that we know the fat_arch list fits in the buffer, how much does it use? */ + fat_header_size = sizeof(struct fat_header) + nfat_arch * sizeof(struct fat_arch); + arches = (struct fat_arch *)(data_ptr + sizeof(struct fat_header)); + + for (i=0; i < nfat_arch; i++) { + uint32_t i_begin = OSSwapBigToHostInt32(arches[i].offset); + uint32_t i_size = OSSwapBigToHostInt32(arches[i].size); + uint32_t i_cputype = OSSwapBigToHostInt32(arches[i].cputype); + uint32_t i_cpusubtype = OSSwapBigToHostInt32(arches[i].cpusubtype); + + if (i_begin < fat_header_size) { + /* slice is trying to claim part of the file used by fat headers themselves */ + return (LOAD_BADMACHO); + } + + if ((UINT32_MAX - i_size) < i_begin) { + /* start + size would overflow */ + return (LOAD_BADMACHO); + } + uint32_t i_end = i_begin + i_size; + + for (j=i+1; j < nfat_arch; j++) { + uint32_t j_begin = OSSwapBigToHostInt32(arches[j].offset); + uint32_t j_size = OSSwapBigToHostInt32(arches[j].size); + uint32_t j_cputype = OSSwapBigToHostInt32(arches[j].cputype); + uint32_t j_cpusubtype = OSSwapBigToHostInt32(arches[j].cpusubtype); + + if ((i_cputype == j_cputype) && (i_cpusubtype == j_cpusubtype)) { + /* duplicate cputype/cpusubtype, results in ambiguous references */ + return (LOAD_BADMACHO); + } + + if ((UINT32_MAX - j_size) < j_begin) { + /* start + size would overflow */ + return (LOAD_BADMACHO); + } + uint32_t j_end = j_begin + j_size; + + if (i_begin <= j_begin) { + if (i_end <= j_begin) { + /* I completely precedes J */ + } else { + /* I started before J, but ends somewhere in or after J */ + return (LOAD_BADMACHO); + } + } else { + if (i_begin >= j_end) { + /* I started after J started but also after J ended */ + } else { + /* I started after J started but before it ended, so there is overlap */ + return (LOAD_BADMACHO); + } + } + } + } + + return (LOAD_SUCCESS); +} diff --git a/bsd/kern/mach_fat.h b/bsd/kern/mach_fat.h index df71ddc2a..def48fffd 100644 --- a/bsd/kern/mach_fat.h +++ b/bsd/kern/mach_fat.h @@ -34,9 +34,12 @@ #include #include -load_return_t fatfile_getarch_affinity(struct vnode *vp, vm_offset_t data_ptr, - struct fat_arch *archret, int affinity); -load_return_t fatfile_getarch_with_bits(struct vnode *vp, integer_t archbits, - vm_offset_t data_ptr, struct fat_arch *archret); +load_return_t fatfile_validate_fatarches(vm_offset_t data_ptr, vm_size_t data_size); + +load_return_t fatfile_getbestarch(vm_offset_t data_ptr, vm_size_t data_size, struct fat_arch *archret); +load_return_t fatfile_getbestarch_for_cputype(cpu_type_t cputype, + vm_offset_t data_ptr, vm_size_t data_size, struct fat_arch *archret); +load_return_t fatfile_getarch_with_bits(integer_t archbits, + vm_offset_t data_ptr, vm_size_t data_size, struct fat_arch *archret); #endif /* _BSD_KERN_MACH_FAT_H_ */ diff --git a/bsd/kern/mach_loader.c b/bsd/kern/mach_loader.c index 0f477bdf0..81419cb5f 100644 --- a/bsd/kern/mach_loader.c +++ b/bsd/kern/mach_loader.c @@ -110,6 +110,7 @@ static load_result_t load_result_null = { .prog_allocated_stack = 0, .prog_stack_size = 0, .validentry = 0, + .using_lcmain = 0, .csflags = 0, .uuid = { 0 }, .min_vm_addr = MACH_VM_MAX_ADDRESS, @@ -524,7 +525,7 @@ parse_machfile( /* * Break infinite recursion */ - if (depth > 6) { + if (depth > 1) { return(LOAD_FAILURE); } @@ -542,21 +543,12 @@ parse_machfile( switch (header->filetype) { - case MH_OBJECT: case MH_EXECUTE: - case MH_PRELOAD: if (depth != 1) { return (LOAD_FAILURE); } - break; - - case MH_FVMLIB: - case MH_DYLIB: - if (depth == 1) { - return (LOAD_FAILURE); - } - break; + break; case MH_DYLINKER: if (depth != 2) { return (LOAD_FAILURE); @@ -599,11 +591,18 @@ parse_machfile( error = vn_rdwr(UIO_READ, vp, addr, size, file_offset, UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p); if (error) { - if (kl_addr ) + if (kl_addr) kfree(kl_addr, kl_size); return(LOAD_IOERROR); } + if (resid) { + /* We must be able to read in as much as the mach_header indicated */ + if (kl_addr) + kfree(kl_addr, kl_size); + return(LOAD_BADMACHO); + } + /* * For PIE and dyld, slide everything by the ASLR offset. */ @@ -624,7 +623,7 @@ parse_machfile( /* * Check that the entry point is contained in an executable segments */ - if ((pass == 3) && (result->validentry == 0)) { + if ((pass == 3) && (!result->using_lcmain && result->validentry == 0)) { thread_state_initialize(thread); ret = LOAD_FAILURE; break; @@ -769,13 +768,19 @@ parse_machfile( printf("proc %d: load code signature error %d " "for file \"%s\"\n", p->p_pid, ret, vp->v_name); - ret = LOAD_SUCCESS; /* ignore error */ + /* + * Allow injections to be ignored on devices w/o enforcement enabled + */ + if (!cs_enforcement(NULL)) + ret = LOAD_SUCCESS; /* ignore error */ + } else { got_code_signatures = TRUE; } if (got_code_signatures) { - boolean_t valid = FALSE, tainted = TRUE; + unsigned tainted = CS_VALIDATE_TAINTED; + boolean_t valid = FALSE; struct cs_blob *blobs; vm_size_t off = 0; @@ -785,12 +790,14 @@ parse_machfile( blobs = ubc_get_cs_blobs(vp); while (off < size && ret == LOAD_SUCCESS) { + tainted = CS_VALIDATE_TAINTED; + valid = cs_validate_page(blobs, NULL, file_offset + off, addr + off, &tainted); - if (!valid || tainted) { + if (!valid || (tainted & CS_VALIDATE_TAINTED)) { if (cs_debug) printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n", vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags); @@ -847,39 +854,48 @@ parse_machfile( } if (ret == LOAD_SUCCESS) { - if (! got_code_signatures) { - struct cs_blob *blob; - /* no embedded signatures: look for detached ones */ - blob = ubc_cs_blob_get(vp, -1, file_offset); - if (blob != NULL) { - unsigned int cs_flag_data = blob->csb_flags; - if(0 != ubc_cs_generation_check(vp)) { - if (0 != ubc_cs_blob_revalidate(vp, blob)) { - /* clear out the flag data if revalidation fails */ - cs_flag_data = 0; - result->csflags &= ~CS_VALID; + if (! got_code_signatures) { + if (cs_enforcement(NULL)) { + ret = LOAD_FAILURE; + } else { + /* + * No embedded signatures: look for detached by taskgated, + * this is only done on OSX, on embedded platforms we expect everything + * to be have embedded signatures. + */ + struct cs_blob *blob; + + blob = ubc_cs_blob_get(vp, -1, file_offset); + if (blob != NULL) { + unsigned int cs_flag_data = blob->csb_flags; + if(0 != ubc_cs_generation_check(vp)) { + if (0 != ubc_cs_blob_revalidate(vp, blob, 0)) { + /* clear out the flag data if revalidation fails */ + cs_flag_data = 0; + result->csflags &= ~CS_VALID; + } + } + /* get flags to be applied to the process */ + result->csflags |= cs_flag_data; } } - /* get flags to be applied to the process */ - result->csflags |= cs_flag_data; - } - } + } /* Make sure if we need dyld, we got it */ - if (result->needs_dynlinker && !dlp) { + if ((ret == LOAD_SUCCESS) && result->needs_dynlinker && !dlp) { ret = LOAD_FAILURE; } - - if ((ret == LOAD_SUCCESS) && (dlp != 0)) { + + if ((ret == LOAD_SUCCESS) && (dlp != 0)) { /* - * load the dylinker, and slide it by the independent DYLD ASLR - * offset regardless of the PIE-ness of the main binary. - */ + * load the dylinker, and slide it by the independent DYLD ASLR + * offset regardless of the PIE-ness of the main binary. + */ ret = load_dylinker(dlp, dlarchbits, map, thread, depth, - dyld_aslr_offset, result); + dyld_aslr_offset, result); } - - if((ret == LOAD_SUCCESS) && (depth == 1)) { + + if((ret == LOAD_SUCCESS) && (depth == 1)) { if (result->thread_count == 0) { ret = LOAD_FAILURE; } @@ -1167,8 +1183,16 @@ load_segment( LC_SEGMENT_64 == lcp->cmd, single_section_size, (const char *)lcp + segment_command_size, slide, result); - if ((result->entry_point >= map_addr) && (result->entry_point < (map_addr + map_size))) - result->validentry = 1; + if (result->entry_point != MACH_VM_MIN_ADDRESS) { + if ((result->entry_point >= map_addr) && (result->entry_point < (map_addr + map_size))) { + if ((scp->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) == (VM_PROT_READ|VM_PROT_EXECUTE)) { + result->validentry = 1; + } else { + /* right range but wrong protections, unset if previously validated */ + result->validentry = 0; + } + } + } return ret; } @@ -1211,7 +1235,6 @@ load_main( if (epc->cmdsize < sizeof(*epc)) return (LOAD_BADMACHO); if (result->thread_count != 0) { - printf("load_main: already have a thread!"); return (LOAD_FAILURE); } @@ -1237,9 +1260,14 @@ load_main( result->user_stack = addr; result->user_stack -= slide; + if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { + /* Already processed LC_MAIN or LC_UNIXTHREAD */ + return (LOAD_FAILURE); + } + /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */ result->needs_dynlinker = TRUE; - result->validentry = TRUE; + result->using_lcmain = TRUE; ret = thread_state_initialize( thread ); if (ret != KERN_SUCCESS) { @@ -1269,7 +1297,6 @@ load_unixthread( if (tcp->cmdsize < sizeof(*tcp)) return (LOAD_BADMACHO); if (result->thread_count != 0) { - printf("load_unixthread: already have a thread!"); return (LOAD_FAILURE); } @@ -1308,6 +1335,11 @@ load_unixthread( if (ret != LOAD_SUCCESS) return(ret); + if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { + /* Already processed LC_MAIN or LC_UNIXTHREAD */ + return (LOAD_FAILURE); + } + result->entry_point = addr; result->entry_point += slide; @@ -1490,6 +1522,8 @@ struct macho_data { } __header; }; +#define DEFAULT_DYLD_PATH "/usr/lib/dyld" + static load_return_t load_dylinker( struct dylinker_command *lcp, @@ -1529,6 +1563,12 @@ load_dylinker( return(LOAD_BADMACHO); } while (*p++); +#if !(DEVELOPMENT || DEBUG) + if (0 != strcmp(name, DEFAULT_DYLD_PATH)) { + return (LOAD_BADMACHO); + } +#endif + /* Allocate wad-of-data from heap to reduce excessively deep stacks */ MALLOC(dyld_data, void *, sizeof (*dyld_data), M_TEMP, M_WAITOK); @@ -1667,7 +1707,7 @@ load_code_signature( blob->csb_mem_size == lcp->datasize) { /* it matches the blob we want here, lets verify the version */ if(0 != ubc_cs_generation_check(vp)) { - if (0 != ubc_cs_blob_revalidate(vp, blob)) { + if (0 != ubc_cs_blob_revalidate(vp, blob, 0)) { ret = LOAD_FAILURE; /* set error same as from ubc_cs_blob_add */ goto out; } @@ -1707,7 +1747,8 @@ load_code_signature( cputype, macho_offset, addr, - lcp->datasize)) { + lcp->datasize, + 0)) { ret = LOAD_FAILURE; goto out; } else { @@ -1956,21 +1997,32 @@ get_macho_vnode( goto bad2; } + if (resid) { + error = LOAD_BADMACHO; + goto bad2; + } + if (header->mach_header.magic == MH_MAGIC || header->mach_header.magic == MH_MAGIC_64) { is_fat = FALSE; - } else if (header->fat_header.magic == FAT_MAGIC || - header->fat_header.magic == FAT_CIGAM) { - is_fat = TRUE; + } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) { + is_fat = TRUE; } else { error = LOAD_BADMACHO; goto bad2; } if (is_fat) { + + error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header), + sizeof(*header)); + if (error != LOAD_SUCCESS) { + goto bad2; + } + /* Look up our architecture in the fat file. */ - error = fatfile_getarch_with_bits(vp, archbits, - (vm_offset_t)(&header->fat_header), &fat_arch); + error = fatfile_getarch_with_bits(archbits, + (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch); if (error != LOAD_SUCCESS) goto bad2; @@ -1983,6 +2035,11 @@ get_macho_vnode( goto bad2; } + if (resid) { + error = LOAD_BADMACHO; + goto bad2; + } + /* Is this really a Mach-O? */ if (header->mach_header.magic != MH_MAGIC && header->mach_header.magic != MH_MAGIC_64) { diff --git a/bsd/kern/mach_loader.h b/bsd/kern/mach_loader.h index dc0dbfa5b..b6ab1feb1 100644 --- a/bsd/kern/mach_loader.h +++ b/bsd/kern/mach_loader.h @@ -64,6 +64,7 @@ typedef struct _load_result { prog_allocated_stack :1, prog_stack_size : 1, validentry :1, + using_lcmain :1, :0; unsigned int csflags; unsigned char uuid[16]; diff --git a/bsd/kern/policy_check.c b/bsd/kern/policy_check.c index a8acdd2b0..95ae2d593 100644 --- a/bsd/kern/policy_check.c +++ b/bsd/kern/policy_check.c @@ -118,7 +118,7 @@ common_hook(void) return rv; } -#if (MAC_POLICY_OPS_VERSION != 31) +#if (MAC_POLICY_OPS_VERSION != 32) # error "struct mac_policy_ops doesn't match definition in mac_policy.h" #endif /* diff --git a/bsd/kern/ubc_subr.c b/bsd/kern/ubc_subr.c index 89e61f1a6..83785b788 100644 --- a/bsd/kern/ubc_subr.c +++ b/bsd/kern/ubc_subr.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include @@ -2889,7 +2890,8 @@ ubc_cs_blob_add( cpu_type_t cputype, off_t base_offset, vm_address_t addr, - vm_size_t size) + vm_size_t size, + __unused int flags) { kern_return_t kr; struct ubc_info *uip; @@ -2990,12 +2992,23 @@ ubc_cs_blob_add( * Let policy module check whether the blob's signature is accepted. */ #if CONFIG_MACF - error = mac_vnode_check_signature(vp, base_offset, blob->csb_sha1, (const void*)cd, size, &is_platform_binary); + error = mac_vnode_check_signature(vp, + base_offset, + blob->csb_sha1, + (const void*)cd, + size, flags, + &is_platform_binary); if (error) { if (cs_debug) printf("check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); goto out; } + if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !is_platform_binary) { + if (cs_debug) + printf("check_signature[pid: %d], is not apple signed\n", current_proc()->p_pid); + error = EPERM; + goto out; + } #endif if (is_platform_binary) { @@ -3303,7 +3316,8 @@ ubc_cs_generation_check( int ubc_cs_blob_revalidate( struct vnode *vp, - struct cs_blob *blob + struct cs_blob *blob, + __unused int flags ) { int error = 0; @@ -3325,7 +3339,7 @@ ubc_cs_blob_revalidate( /* callout to mac_vnode_check_signature */ #if CONFIG_MACF - error = mac_vnode_check_signature(vp, blob->csb_base_offset, blob->csb_sha1, (const void*)cd, blob->csb_cpu_type, &is_platform_binary); + error = mac_vnode_check_signature(vp, blob->csb_base_offset, blob->csb_sha1, (const void*)cd, blob->csb_cpu_type, flags, &is_platform_binary); if (cs_debug && error) { printf("revalidate: check_signature[pid: %d], error = %d\n", current_proc()->p_pid, error); } @@ -3414,7 +3428,7 @@ cs_validate_page( memory_object_t pager, memory_object_offset_t page_offset, const void *data, - boolean_t *tainted) + unsigned *tainted) { SHA1_CTX sha1ctxt; unsigned char actual_hash[SHA1_RESULTLEN]; @@ -3540,15 +3554,18 @@ cs_validate_page( pager, page_offset); } validated = FALSE; - *tainted = FALSE; + *tainted = 0; } else { + *tainted = 0; + size = PAGE_SIZE_4K; const uint32_t *asha1, *esha1; if ((off_t)(offset + size) > codeLimit) { /* partial page at end of segment */ assert(offset < codeLimit); size = (size_t) (codeLimit & PAGE_MASK_4K); + *tainted |= CS_VALIDATE_NX; } /* compute the actual page's SHA1 hash */ SHA1Init(&sha1ctxt); @@ -3571,7 +3588,7 @@ cs_validate_page( esha1[3], esha1[4]); } cs_validate_page_bad_hash++; - *tainted = TRUE; + *tainted |= CS_VALIDATE_TAINTED; } else { if (cs_debug > 10) { printf("CODE SIGNING: cs_validate_page: " @@ -3579,7 +3596,6 @@ cs_validate_page( "SHA1 OK\n", pager, page_offset, size); } - *tainted = FALSE; } validated = TRUE; } diff --git a/bsd/machine/exec.h b/bsd/machine/exec.h index a38f0dd74..a5712128a 100644 --- a/bsd/machine/exec.h +++ b/bsd/machine/exec.h @@ -42,6 +42,7 @@ struct exec_info { }; int grade_binary(cpu_type_t, cpu_subtype_t); +boolean_t pie_required(cpu_type_t, cpu_subtype_t); #if defined (__i386__) || defined(__x86_64__) #include "i386/exec.h" diff --git a/bsd/sys/codesign.h b/bsd/sys/codesign.h index bf908f8f8..1a23c3d0a 100644 --- a/bsd/sys/codesign.h +++ b/bsd/sys/codesign.h @@ -55,6 +55,9 @@ #define CS_ENTITLEMENT_FLAGS (CS_GET_TASK_ALLOW | CS_INSTALLER) +/* MAC flags used by F_ADDFILESIGS_* */ +#define MAC_VNODE_CHECK_DYLD_SIM 0x1 /* tells the MAC framework that dyld-sim is being loaded */ + /* csops operations */ #define CS_OPS_STATUS 0 /* return status */ #define CS_OPS_MARKINVALID 1 /* invalidate process */ diff --git a/bsd/sys/fcntl.h b/bsd/sys/fcntl.h index 4c80591ff..e8dcd03fd 100644 --- a/bsd/sys/fcntl.h +++ b/bsd/sys/fcntl.h @@ -336,6 +336,8 @@ */ #endif +#define F_ADDFILESIGS_FOR_DYLD_SIM 83 /* Add signature from same file, only if it is signed by Apple (used by dyld for simulator) */ + // FS-specific fcntl()'s numbers begin at 0x00010000 and go up #define FCNTL_FS_SPECIFIC_BASE 0x00010000 diff --git a/bsd/sys/ubc.h b/bsd/sys/ubc.h index c138d2936..b3389d4b0 100644 --- a/bsd/sys/ubc.h +++ b/bsd/sys/ubc.h @@ -83,7 +83,7 @@ struct cs_blob *ubc_cs_blob_get(vnode_t, cpu_type_t, off_t); /* apis to handle generation count for cs blob */ void cs_blob_reset_cache(void); -int ubc_cs_blob_revalidate(vnode_t, struct cs_blob *); +int ubc_cs_blob_revalidate(vnode_t, struct cs_blob *, int); int ubc_cs_generation_check(vnode_t); int cs_entitlements_blob_get(proc_t, void **, size_t *); diff --git a/bsd/sys/ubc_internal.h b/bsd/sys/ubc_internal.h index 093b4a895..90424d745 100644 --- a/bsd/sys/ubc_internal.h +++ b/bsd/sys/ubc_internal.h @@ -195,7 +195,7 @@ int UBCINFOEXISTS(const struct vnode *); /* code signing */ struct cs_blob; -int ubc_cs_blob_add(vnode_t, cpu_type_t, off_t, vm_address_t, vm_size_t); +int ubc_cs_blob_add(vnode_t, cpu_type_t, off_t, vm_address_t, vm_size_t, int); int ubc_cs_sigpup_add(vnode_t, vm_address_t, vm_size_t); struct cs_blob *ubc_get_cs_blobs(vnode_t); void ubc_get_cs_mtime(vnode_t, struct timespec *); diff --git a/bsd/sys/vnode.h b/bsd/sys/vnode.h index 72e1a8532..309842aee 100644 --- a/bsd/sys/vnode.h +++ b/bsd/sys/vnode.h @@ -734,6 +734,7 @@ extern int vttoif_tab[]; /* VNOP_REMOVE/unlink flags */ #define VNODE_REMOVE_NODELETEBUSY 0x0001 /* Don't delete busy files (Carbon) */ #define VNODE_REMOVE_SKIP_NAMESPACE_EVENT 0x0002 /* Do not upcall to userland handlers */ +#define VNODE_REMOVE_NO_AUDIT_PATH 0x0004 /* Do not audit the path */ /* VNOP_READDIR flags: */ #define VNODE_READDIR_EXTENDED 0x0001 /* use extended directory entries */ diff --git a/bsd/vfs/vfs_cache.c b/bsd/vfs/vfs_cache.c index a08ffe235..1575aafea 100644 --- a/bsd/vfs/vfs_cache.c +++ b/bsd/vfs/vfs_cache.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -342,8 +342,12 @@ again: * and disallow further path construction */ if ((vp->v_parent == NULLVP) && (rootvnode != vp)) { - /* Only '/' is allowed to have a NULL parent pointer */ - ret = EINVAL; + /* + * Only '/' is allowed to have a NULL parent + * pointer. Upper level callers should ideally + * re-drive name lookup on receiving a ENOENT. + */ + ret = ENOENT; /* The code below will exit early if 'tvp = vp' == NULL */ } diff --git a/bsd/vfs/vfs_subr.c b/bsd/vfs/vfs_subr.c index a4e732070..898319ab0 100644 --- a/bsd/vfs/vfs_subr.c +++ b/bsd/vfs/vfs_subr.c @@ -187,7 +187,8 @@ extern kern_return_t adjust_vm_object_cache(vm_size_t oval, vm_size_t nval); __private_extern__ void vntblinit(void); __private_extern__ kern_return_t reset_vmobjectcache(unsigned int val1, unsigned int val2); -__private_extern__ int unlink1(vfs_context_t, struct nameidata *, int); +__private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t, + enum uio_seg, int); extern int system_inshutdown; @@ -8118,7 +8119,6 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * char *rbuf = NULL; void *dir_pos; void *dir_end; - struct nameidata nd_temp; struct dirent *dp; errno_t error; @@ -8259,11 +8259,10 @@ errno_t rmdir_remove_orphaned_appleDouble(vnode_t vp , vfs_context_t ctx, int * (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.')) ) { - NDINIT(&nd_temp, DELETE, OP_UNLINK, USEDVP, - UIO_SYSSPACE, CAST_USER_ADDR_T(dp->d_name), - ctx); - nd_temp.ni_dvp = vp; - error = unlink1(ctx, &nd_temp, VNODE_REMOVE_SKIP_NAMESPACE_EVENT); + error = unlink1(ctx, vp, + CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE, + VNODE_REMOVE_SKIP_NAMESPACE_EVENT | + VNODE_REMOVE_NO_AUDIT_PATH); if (error && error != ENOENT) { goto outsc; diff --git a/bsd/vfs/vfs_syscalls.c b/bsd/vfs/vfs_syscalls.c index ba37a4e38..e2b135a7b 100644 --- a/bsd/vfs/vfs_syscalls.c +++ b/bsd/vfs/vfs_syscalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1995-2014 Apple Inc. All rights reserved. + * Copyright (c) 1995-2015 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -166,6 +166,18 @@ int prepare_coveredvp(vnode_t vp, vfs_context_t ctx, struct componentname *cnp, struct fd_vn_data * fg_vn_data_alloc(void); +/* + * Max retries for ENOENT returns from vn_authorize_{rmdir, unlink, rename} + * Concurrent lookups (or lookups by ids) on hard links can cause the + * vn_getpath (which does not re-enter the filesystem as vn_getpath_fsenter + * does) to return ENOENT as the path cannot be returned from the name cache + * alone. We have no option but to retry and hope to get one namei->reverse path + * generation done without an intervening lookup, lookup by id on the hard link + * item. This is only an issue for MAC hooks which cannot reenter the filesystem + * which currently are the MAC hooks for rename, unlink and rmdir. + */ +#define MAX_AUTHORIZE_ENOENT_RETRIES 1024 + static int rmdirat_internal(vfs_context_t, int, user_addr_t, enum uio_seg); static int fsgetpath_internal(vfs_context_t, int, uint64_t, vm_size_t, caddr_t, int *); @@ -185,7 +197,7 @@ __private_extern__ int sync_internal(void); __private_extern__ -int unlink1(vfs_context_t, struct nameidata *, int); +int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int); extern lck_grp_t *fd_vn_lck_grp; extern lck_grp_attr_t *fd_vn_lck_grp_attr; @@ -4452,8 +4464,10 @@ undelete(__unused proc_t p, __unused struct undelete_args *uap, __unused int32_t */ /* ARGSUSED */ static int -unlink1at(vfs_context_t ctx, struct nameidata *ndp, int unlink_flags, int fd) +unlinkat_internal(vfs_context_t ctx, int fd, vnode_t start_dvp, + user_addr_t path_arg, enum uio_seg segflg, int unlink_flags) { + struct nameidata nd; vnode_t vp, dvp; int error; struct componentname *cnp; @@ -4463,29 +4477,49 @@ unlink1at(vfs_context_t ctx, struct nameidata *ndp, int unlink_flags, int fd) fse_info finfo; struct vnode_attr va; #endif - int flags = 0; - int need_event = 0; - int has_listeners = 0; - int truncated_path=0; + int flags; + int need_event; + int has_listeners; + int truncated_path; int batched; - struct vnode_attr *vap = NULL; + struct vnode_attr *vap; + int do_retry; + int retry_count = 0; + int cn_flags; + + cn_flags = LOCKPARENT; + if (!(unlink_flags & VNODE_REMOVE_NO_AUDIT_PATH)) + cn_flags |= AUDITVNPATH1; + /* If a starting dvp is passed, it trumps any fd passed. */ + if (start_dvp) + cn_flags |= USEDVP; #if NAMEDRSRCFORK /* unlink or delete is allowed on rsrc forks and named streams */ - ndp->ni_cnd.cn_flags |= CN_ALLOWRSRCFORK; + cn_flags |= CN_ALLOWRSRCFORK; #endif - ndp->ni_cnd.cn_flags |= LOCKPARENT; - ndp->ni_flag |= NAMEI_COMPOUNDREMOVE; - cnp = &ndp->ni_cnd; +retry: + do_retry = 0; + flags = 0; + need_event = 0; + has_listeners = 0; + truncated_path = 0; + vap = NULL; + + NDINIT(&nd, DELETE, OP_UNLINK, cn_flags, segflg, path_arg, ctx); + + nd.ni_dvp = start_dvp; + nd.ni_flag |= NAMEI_COMPOUNDREMOVE; + cnp = &nd.ni_cnd; lookup_continue: - error = nameiat(ndp, fd); + error = nameiat(&nd, fd); if (error) return (error); - dvp = ndp->ni_dvp; - vp = ndp->ni_vp; + dvp = nd.ni_dvp; + vp = nd.ni_vp; /* With Carbon delete semantics, busy files cannot be deleted */ @@ -4510,6 +4544,11 @@ lookup_continue: if (!batched) { error = vn_authorize_unlink(dvp, vp, cnp, ctx, NULL); if (error) { + if (error == ENOENT && + retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) { + do_retry = 1; + retry_count++; + } goto out; } } @@ -4548,23 +4587,23 @@ lookup_continue: goto out; } } - len = safe_getpath(dvp, ndp->ni_cnd.cn_nameptr, path, MAXPATHLEN, &truncated_path); + len = safe_getpath(dvp, nd.ni_cnd.cn_nameptr, path, MAXPATHLEN, &truncated_path); } #if NAMEDRSRCFORK - if (ndp->ni_cnd.cn_flags & CN_WANTSRSRCFORK) + if (nd.ni_cnd.cn_flags & CN_WANTSRSRCFORK) error = vnode_removenamedstream(dvp, vp, XATTR_RESOURCEFORK_NAME, 0, ctx); else #endif { - error = vn_remove(dvp, &ndp->ni_vp, ndp, flags, vap, ctx); - vp = ndp->ni_vp; + error = vn_remove(dvp, &nd.ni_vp, &nd, flags, vap, ctx); + vp = nd.ni_vp; if (error == EKEEPLOOKING) { if (!batched) { panic("EKEEPLOOKING, but not a filesystem that supports compound VNOPs?"); } - if ((ndp->ni_flag & NAMEI_CONTLOOKUP) == 0) { + if ((nd.ni_flag & NAMEI_CONTLOOKUP) == 0) { panic("EKEEPLOOKING, but continue flag not set?"); } @@ -4573,6 +4612,16 @@ lookup_continue: goto out; } goto lookup_continue; + } else if (error == ENOENT && batched && + retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) { + /* + * For compound VNOPs, the authorization callback may + * return ENOENT in case of racing hardlink lookups + * hitting the name cache, redrive the lookup. + */ + do_retry = 1; + retry_count += 1; + goto out; } } @@ -4635,39 +4684,45 @@ out: * nameidone has to happen before we vnode_put(dvp) * since it may need to release the fs_nodelock on the dvp */ - nameidone(ndp); + nameidone(&nd); vnode_put(dvp); if (vp) { vnode_put(vp); } + + if (do_retry) { + goto retry; + } + return (error); } int -unlink1(vfs_context_t ctx, struct nameidata *ndp, int unlink_flags) +unlink1(vfs_context_t ctx, vnode_t start_dvp, user_addr_t path_arg, + enum uio_seg segflg, int unlink_flags) { - return (unlink1at(ctx, ndp, unlink_flags, AT_FDCWD)); + return (unlinkat_internal(ctx, AT_FDCWD, start_dvp, path_arg, segflg, + unlink_flags)); } /* - * Delete a name from the filesystem using POSIX semantics. + * Delete a name from the filesystem using Carbon semantics. */ -static int -unlinkat_internal(vfs_context_t ctx, int fd, user_addr_t path, - enum uio_seg segflg) +int +delete(__unused proc_t p, struct delete_args *uap, __unused int32_t *retval) { - struct nameidata nd; - - NDINIT(&nd, DELETE, OP_UNLINK, AUDITVNPATH1, segflg, - path, ctx); - return (unlink1at(ctx, &nd, 0, fd)); + return (unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP, + uap->path, UIO_USERSPACE, VNODE_REMOVE_NODELETEBUSY)); } +/* + * Delete a name from the filesystem using POSIX semantics. + */ int unlink(__unused proc_t p, struct unlink_args *uap, __unused int32_t *retval) { - return (unlinkat_internal(vfs_context_current(), AT_FDCWD, uap->path, - UIO_USERSPACE)); + return (unlinkat_internal(vfs_context_current(), AT_FDCWD, NULLVP, + uap->path, UIO_USERSPACE, 0)); } int @@ -4681,21 +4736,7 @@ unlinkat(__unused proc_t p, struct unlinkat_args *uap, __unused int32_t *retval) uap->path, UIO_USERSPACE)); else return (unlinkat_internal(vfs_context_current(), uap->fd, - uap->path, UIO_USERSPACE)); -} - -/* - * Delete a name from the filesystem using Carbon semantics. - */ -int -delete(__unused proc_t p, struct delete_args *uap, __unused int32_t *retval) -{ - struct nameidata nd; - vfs_context_t ctx = vfs_context_current(); - - NDINIT(&nd, DELETE, OP_UNLINK, AUDITVNPATH1, UIO_USERSPACE, - uap->path, ctx); - return unlink1(ctx, &nd, VNODE_REMOVE_NODELETEBUSY); + NULLVP, uap->path, UIO_USERSPACE, 0)); } /* @@ -6584,6 +6625,7 @@ renameat_internal(vfs_context_t ctx, int fromfd, user_addr_t from, struct nameidata *fromnd, *tond; int error; int do_retry; + int retry_count; int mntrename; int need_event; const char *oname = NULL; @@ -6610,6 +6652,7 @@ renameat_internal(vfs_context_t ctx, int fromfd, user_addr_t from, holding_mntlock = 0; do_retry = 0; + retry_count = 0; retry: fvp = tvp = NULL; fdvp = tdvp = NULL; @@ -6670,13 +6713,15 @@ continue_lookup: if (!batched) { error = vn_authorize_rename(fdvp, fvp, &fromnd->ni_cnd, tdvp, tvp, &tond->ni_cnd, ctx, NULL); if (error) { - if (error == ENOENT) { + if (error == ENOENT && + retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) { /* * We encountered a race where after doing the namei, tvp stops * being valid. If so, simply re-drive the rename call from the * top. */ do_retry = 1; + retry_count += 1; } goto out1; } @@ -6944,6 +6989,17 @@ skipped_lookup: do_retry = 1; } + /* + * For compound VNOPs, the authorization callback may return + * ENOENT in case of racing hardlink lookups hitting the name + * cache, redrive the lookup. + */ + if (batched && error == ENOENT && + retry_count < MAX_AUTHORIZE_ENOENT_RETRIES) { + do_retry = 1; + retry_count += 1; + } + goto out1; } @@ -7318,6 +7374,7 @@ rmdirat_internal(vfs_context_t ctx, int fd, user_addr_t dirpath, struct vnode_attr va; #endif /* CONFIG_FSE */ struct vnode_attr *vap = NULL; + int restart_count = 0; int batched; int restart_flag; @@ -7367,6 +7424,11 @@ continue_lookup: if (!batched) { error = vn_authorize_rmdir(dvp, vp, &nd.ni_cnd, ctx, NULL); if (error) { + if (error == ENOENT && + restart_count < MAX_AUTHORIZE_ENOENT_RETRIES) { + restart_flag = 1; + restart_count += 1; + } goto out; } } @@ -7422,6 +7484,16 @@ continue_lookup: if (error == EKEEPLOOKING) { goto continue_lookup; + } else if (batched && error == ENOENT && + restart_count < MAX_AUTHORIZE_ENOENT_RETRIES) { + /* + * For compound VNOPs, the authorization callback + * may return ENOENT in case of racing hard link lookups + * redrive the lookup. + */ + restart_flag = 1; + restart_count += 1; + goto out; } #if CONFIG_APPLEDOUBLE /* diff --git a/bsd/vm/vm_compressor_backing_file.c b/bsd/vm/vm_compressor_backing_file.c index 580cdc4c3..9a663e9e8 100644 --- a/bsd/vm/vm_compressor_backing_file.c +++ b/bsd/vm/vm_compressor_backing_file.c @@ -74,22 +74,25 @@ vm_swapfile_get_transfer_size(vnode_t vp) return((uint64_t)vp->v_mount->mnt_vfsstat.f_iosize); } -int unlink1(vfs_context_t, struct nameidata *, int); +int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int); void vm_swapfile_close(uint64_t path_addr, vnode_t vp) { - struct nameidata nd; vfs_context_t context = vfs_context_current(); - int error = 0; + int error; vnode_getwithref(vp); vnode_close(vp, 0, context); - NDINIT(&nd, DELETE, OP_UNLINK, AUDITVNPATH1, UIO_SYSSPACE, - path_addr, context); + error = unlink1(context, NULLVP, CAST_USER_ADDR_T(path_addr), + UIO_SYSSPACE, 0); - error = unlink1(context, &nd, 0); +#if DEVELOPMENT || DEBUG + if (error) + printf("%s : unlink of %s failed with error %d", __FUNCTION__, + (char *)path_addr, error); +#endif } int diff --git a/config/MasterVersion b/config/MasterVersion index 7c7dd96b6..a2049c677 100644 --- a/config/MasterVersion +++ b/config/MasterVersion @@ -1,4 +1,4 @@ -14.4.0 +14.5.0 # The first line of this file contains the master version number for the kernel. # All other instances of the kernel version in xnu are derived from this file. diff --git a/iokit/IOKit/IOLib.h b/iokit/IOKit/IOLib.h index 3b5103218..a290e4d91 100644 --- a/iokit/IOKit/IOLib.h +++ b/iokit/IOKit/IOLib.h @@ -149,7 +149,10 @@ void IOFreePageable(void * address, vm_size_t size); /* * Typed memory allocation macros. Both may block. */ -#define IONew(type,number) (type*)IOMalloc(sizeof(type) * (number) ) +#define IONew(type,number) \ +( ((number) != 0 && ((vm_size_t) ((sizeof(type) * (number) / (number))) != sizeof(type)) /* overflow check 21532969 */ \ +? 0 \ +: ((type*)IOMalloc(sizeof(type) * (number)))) ) #define IODelete(ptr,type,number) IOFree( (ptr) , sizeof(type) * (number) ) ///////////////////////////////////////////////////////////////////////////// diff --git a/iokit/Kernel/IOService.cpp b/iokit/Kernel/IOService.cpp index a23e683ec..96bb4fc6c 100644 --- a/iokit/Kernel/IOService.cpp +++ b/iokit/Kernel/IOService.cpp @@ -1686,15 +1686,18 @@ IOReturn IOService::registerInterestForNotifer( IONotifier *svcNotify, const OSS LOCKWRITENOTIFY(); // Get the head of the notifier linked list - IOCommand *notifyList = (IOCommand *) getProperty( typeOfInterest ); - if (!notifyList || !OSDynamicCast(IOCommand, notifyList)) { + IOCommand * notifyList; + OSObject * obj = copyProperty( typeOfInterest ); + if (!(notifyList = OSDynamicCast(IOCommand, obj))) { notifyList = OSTypeAlloc(IOCommand); if (notifyList) { notifyList->init(); - setProperty( typeOfInterest, notifyList); + bool ok = setProperty( typeOfInterest, notifyList); notifyList->release(); + if (!ok) notifyList = 0; } } + if (obj) obj->release(); if (notifyList) { enqueue(¬ifyList->fCommandChain, ¬ify->chain); diff --git a/libsyscall/wrappers/cancelable/fcntl-base.c b/libsyscall/wrappers/cancelable/fcntl-base.c index 7bbded99e..c8808f3ae 100644 --- a/libsyscall/wrappers/cancelable/fcntl-base.c +++ b/libsyscall/wrappers/cancelable/fcntl-base.c @@ -56,6 +56,7 @@ fcntl(int fd, int cmd, ...) case F_UNLINKFROM: case F_ADDSIGS: case F_ADDFILESIGS: + case F_ADDFILESIGS_FOR_DYLD_SIM: case F_FINDSIGS: case F_TRANSCODEKEY: arg = va_arg(ap, void *); diff --git a/osfmk/ipc/mach_debug.c b/osfmk/ipc/mach_debug.c index cc0acd912..d66481376 100644 --- a/osfmk/ipc/mach_debug.c +++ b/osfmk/ipc/mach_debug.c @@ -319,7 +319,6 @@ mach_port_space_basic_info( if (space == IS_NULL) return KERN_INVALID_TASK; - is_read_lock(space); if (!is_active(space)) { is_read_unlock(space); @@ -480,15 +479,20 @@ mach_port_kobject( kaddr = (mach_vm_address_t)port->ip_kobject; ip_unlock(port); - +#if !(DEVELOPMENT || DEBUG) + /* disable this interface on release kernels */ + *addrp = 0; +#else if (0 != kaddr && is_ipc_kobject(*typep)) *addrp = VM_KERNEL_UNSLIDE_OR_PERM(kaddr); else *addrp = 0; +#endif return KERN_SUCCESS; } #endif /* MACH_IPC_DEBUG */ + /* * Routine: mach_port_kernel_object [Legacy kernel call] * Purpose: diff --git a/osfmk/mach/memory_object_types.h b/osfmk/mach/memory_object_types.h index ab6361793..9e000c6af 100644 --- a/osfmk/mach/memory_object_types.h +++ b/osfmk/mach/memory_object_types.h @@ -418,6 +418,7 @@ struct upl_page_info { speculative:1, /* page is valid, but not yet accessed */ cs_validated:1, /* CODE SIGNING: page was validated */ cs_tainted:1, /* CODE SIGNING: page is tainted */ + cs_nx:1, /* CODE SIGNING: page is NX */ needed:1, /* page should be left in cache on abort */ :0; /* force to long boundary */ #else @@ -683,6 +684,9 @@ typedef uint32_t upl_size_t; /* page-aligned byte size */ #define UPL_SET_CS_TAINTED(upl, index, value) \ ((upl)[(index)].cs_tainted = ((value) ? TRUE : FALSE)) +#define UPL_SET_CS_NX(upl, index, value) \ + ((upl)[(index)].cs_nx = ((value) ? TRUE : FALSE)) + #define UPL_SET_REPRIO_INFO(upl, index, blkno, len) \ ((upl)->upl_reprio_info[(index)]) = (((uint64_t)(blkno) & UPL_REPRIO_INFO_MASK) | \ (((uint64_t)(len) & UPL_REPRIO_INFO_MASK) << UPL_REPRIO_INFO_SHIFT)) diff --git a/osfmk/mach/vm_statistics.h b/osfmk/mach/vm_statistics.h index b496747d0..4f9e7172d 100644 --- a/osfmk/mach/vm_statistics.h +++ b/osfmk/mach/vm_statistics.h @@ -222,6 +222,7 @@ typedef struct vm_purgeable_info *vm_purgeable_info_t; #define VM_PAGE_QUERY_PAGE_EXTERNAL 0x80 #define VM_PAGE_QUERY_PAGE_CS_VALIDATED 0x100 #define VM_PAGE_QUERY_PAGE_CS_TAINTED 0x200 +#define VM_PAGE_QUERY_PAGE_CS_NX 0x400 #ifdef MACH_KERNEL_PRIVATE diff --git a/osfmk/vm/vm_apple_protect.c b/osfmk/vm/vm_apple_protect.c index 7cb6b93cf..e5945fbad 100644 --- a/osfmk/vm/vm_apple_protect.c +++ b/osfmk/vm/vm_apple_protect.c @@ -578,6 +578,8 @@ apple_protect_pager_data_request( src_page->cs_validated); UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->cs_tainted); + UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, + src_page->cs_nx); } /* diff --git a/osfmk/vm/vm_fault.c b/osfmk/vm/vm_fault.c index bb506cedd..381f69b3a 100644 --- a/osfmk/vm/vm_fault.c +++ b/osfmk/vm/vm_fault.c @@ -735,6 +735,7 @@ vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill) m->cs_validated = FALSE; m->cs_tainted = FALSE; + m->cs_nx = FALSE; if (no_zero_fill == TRUE) { my_fault = DBG_NZF_PAGE_FAULT; @@ -2653,6 +2654,7 @@ vm_fault_enter(vm_page_t m, } #define page_immutable(m,prot) ((m)->cs_validated /*&& ((prot) & VM_PROT_EXECUTE)*/) +#define page_nx(m) ((m)->cs_nx) map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) && (pmap == vm_map_pmap(current_thread()->map))); @@ -2677,6 +2679,12 @@ vm_fault_enter(vm_page_t m, return KERN_CODESIGN_ERROR; } + if (cs_enforcement_enabled && page_nx(m) && (prot & VM_PROT_EXECUTE)) { + if (cs_debug) + printf("page marked to be NX, not letting it be mapped EXEC\n"); + return KERN_CODESIGN_ERROR; + } + /* A page could be tainted, or pose a risk of being tainted later. * Check whether the receiving process wants it, and make it feel * the consequences (that hapens in cs_invalid_page()). @@ -5807,7 +5815,8 @@ vm_page_validate_cs_mapped( kern_return_t kr; memory_object_t pager; void *blobs; - boolean_t validated, tainted; + boolean_t validated; + unsigned tainted; assert(page->busy); vm_object_lock_assert_exclusive(page->object); @@ -5869,6 +5878,7 @@ vm_page_validate_cs_mapped( } /* verify the SHA1 hash for this page */ + tainted = 0; validated = cs_validate_page(blobs, pager, offset + object->paging_offset, @@ -5877,7 +5887,8 @@ vm_page_validate_cs_mapped( page->cs_validated = validated; if (validated) { - page->cs_tainted = tainted; + page->cs_tainted = !!(tainted & CS_VALIDATE_TAINTED); + page->cs_nx = !!(tainted & CS_VALIDATE_NX); } } diff --git a/osfmk/vm/vm_map.c b/osfmk/vm/vm_map.c index ca11e1bae..58a8c7828 100644 --- a/osfmk/vm/vm_map.c +++ b/osfmk/vm/vm_map.c @@ -13735,6 +13735,8 @@ vm_map_page_info( disposition |= VM_PAGE_QUERY_PAGE_CS_VALIDATED; if (m->cs_tainted) disposition |= VM_PAGE_QUERY_PAGE_CS_TAINTED; + if (m->cs_nx) + disposition |= VM_PAGE_QUERY_PAGE_CS_NX; done_with_object: vm_object_unlock(object); diff --git a/osfmk/vm/vm_page.h b/osfmk/vm/vm_page.h index c0330f0dd..9c3d9f0ae 100644 --- a/osfmk/vm/vm_page.h +++ b/osfmk/vm/vm_page.h @@ -278,12 +278,13 @@ struct vm_page { encrypted_cleaning:1, /* encrypting page */ cs_validated:1, /* code-signing: page was checked */ cs_tainted:1, /* code-signing: page is tainted */ + cs_nx:1, /* code-signing: page is nx */ reusable:1, lopage:1, slid:1, compressor:1, /* page owned by compressor pool */ written_by_kernel:1, /* page was written by kernel (i.e. decompressed) */ - __unused_object_bits:5; /* 5 bits available here */ + __unused_object_bits:4; /* 5 bits available here */ }; #define DEBUG_ENCRYPTED_SWAP 1 diff --git a/osfmk/vm/vm_pageout.c b/osfmk/vm/vm_pageout.c index 080ffb5e7..d2fb0aaf7 100644 --- a/osfmk/vm/vm_pageout.c +++ b/osfmk/vm/vm_pageout.c @@ -5339,6 +5339,7 @@ check_busy: user_page_list[entry].speculative = FALSE; user_page_list[entry].cs_validated = dst_page->cs_validated; user_page_list[entry].cs_tainted = dst_page->cs_tainted; + user_page_list[entry].cs_nx = dst_page->cs_nx; } /* * if UPL_RET_ONLY_ABSENT is set, then @@ -6361,6 +6362,7 @@ process_upl_to_commit: */ m->cs_validated = page_list[entry].cs_validated; m->cs_tainted = page_list[entry].cs_tainted; + m->cs_nx = page_list[entry].cs_nx; } if (flags & UPL_COMMIT_WRITTEN_BY_KERNEL) m->written_by_kernel = TRUE; @@ -8014,6 +8016,7 @@ record_phys_addr: user_page_list[entry].speculative = FALSE; user_page_list[entry].cs_validated = dst_page->cs_validated; user_page_list[entry].cs_tainted = dst_page->cs_tainted; + user_page_list[entry].cs_nx = dst_page->cs_nx; } if (object != kernel_object && object != compressor_object) { /* diff --git a/osfmk/vm/vm_protos.h b/osfmk/vm/vm_protos.h index d241abafb..82704811e 100644 --- a/osfmk/vm/vm_protos.h +++ b/osfmk/vm/vm_protos.h @@ -464,11 +464,14 @@ extern void log_unnest_badness(vm_map_t, vm_map_offset_t, vm_map_offset_t); struct proc; extern int cs_allow_invalid(struct proc *p); extern int cs_invalid_page(addr64_t vaddr); + +#define CS_VALIDATE_TAINTED 0x00000001 +#define CS_VALIDATE_NX 0x00000002 extern boolean_t cs_validate_page(void *blobs, memory_object_t pager, memory_object_offset_t offset, const void *data, - boolean_t *tainted); + unsigned *result); extern kern_return_t mach_memory_entry_purgable_control( ipc_port_t entry_port, diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c index 9d81f5070..bd207bfdd 100644 --- a/osfmk/vm/vm_resident.c +++ b/osfmk/vm/vm_resident.c @@ -571,6 +571,7 @@ vm_page_bootstrap( m->encrypted_cleaning = FALSE; m->cs_validated = FALSE; m->cs_tainted = FALSE; + m->cs_nx = FALSE; m->no_cache = FALSE; m->reusable = FALSE; m->slid = FALSE; @@ -3805,9 +3806,10 @@ _vm_page_print( (p->unusual ? "" : "!"), (p->encrypted ? "" : "!"), (p->encrypted_cleaning ? "" : "!")); - printf(" %scs_validated, %scs_tainted, %sno_cache\n", + printf(" %scs_validated, %scs_tainted, %scs_nx, %sno_cache\n", (p->cs_validated ? "" : "!"), (p->cs_tainted ? "" : "!"), + (p->cs_nx ? "" : "!"), (p->no_cache ? "" : "!")); printf("phys_page=0x%x\n", p->phys_page); @@ -4494,6 +4496,7 @@ did_consider: assert(!m1->encrypted_cleaning); m2->cs_validated = m1->cs_validated; m2->cs_tainted = m1->cs_tainted; + m2->cs_nx = m1->cs_nx; /* * If m1 had really been reusable, diff --git a/security/mac_framework.h b/security/mac_framework.h index c81d4ec41..0ac2572f5 100644 --- a/security/mac_framework.h +++ b/security/mac_framework.h @@ -476,7 +476,7 @@ int mac_vnode_check_exec(vfs_context_t ctx, struct vnode *vp, int mac_vnode_check_fsgetpath(vfs_context_t ctx, struct vnode *vp); int mac_vnode_check_signature(struct vnode *vp, off_t macho_offset, unsigned char *sha1, const void * signature, size_t size, - int *is_platform_binary); + int flags, int *is_platform_binary); int mac_vnode_check_getattrlist(vfs_context_t ctx, struct vnode *vp, struct attrlist *alist); int mac_vnode_check_getextattr(vfs_context_t ctx, struct vnode *vp, diff --git a/security/mac_policy.h b/security/mac_policy.h index 31507419b..7d5d50e63 100644 --- a/security/mac_policy.h +++ b/security/mac_policy.h @@ -4589,7 +4589,7 @@ typedef int mpo_vnode_check_fsgetpath_t( typedef int mpo_vnode_check_signature_t(struct vnode *vp, struct label *label, off_t macho_offset, unsigned char *sha1, const void *signature, int size, - int *is_platform_binary); + int flags, int *is_platform_binary); /** @brief Access control check for retrieving file attributes @@ -5812,7 +5812,7 @@ typedef void mpo_reserved_hook_t(void); * Please note that this should be kept in sync with the check assumptions * policy in bsd/kern/policy_check.c (policy_ops struct). */ -#define MAC_POLICY_OPS_VERSION 31 /* inc when new reserved slots are taken */ +#define MAC_POLICY_OPS_VERSION 32 /* inc when new reserved slots are taken */ struct mac_policy_ops { mpo_audit_check_postselect_t *mpo_audit_check_postselect; mpo_audit_check_preselect_t *mpo_audit_check_preselect; diff --git a/security/mac_vfs.c b/security/mac_vfs.c index b318e3b75..0d31d86c4 100644 --- a/security/mac_vfs.c +++ b/security/mac_vfs.c @@ -838,7 +838,7 @@ int mac_vnode_check_signature(struct vnode *vp, off_t macho_offset, unsigned char *sha1, const void *signature, size_t size, - int *is_platform_binary) + int flags, int *is_platform_binary) { int error; @@ -846,7 +846,8 @@ mac_vnode_check_signature(struct vnode *vp, off_t macho_offset, return (0); MAC_CHECK(vnode_check_signature, vp, vp->v_label, macho_offset, sha1, - signature, size, is_platform_binary); + signature, size, + flags, is_platform_binary); return (error); } diff --git a/tools/lldbmacros/memory.py b/tools/lldbmacros/memory.py index 9f9c30729..ff5ce0ae2 100644 --- a/tools/lldbmacros/memory.py +++ b/tools/lldbmacros/memory.py @@ -2440,4 +2440,3 @@ def ShowTaskLoadInfo(cmd_args=None, cmd_options={}): #print "Load address: %s" % hex(m[1]) print print_format.format(load_addr, end_addr, libname, uuid_out_string, filepath) return None - -- 2.45.2