case CS_OPS_ENTITLEMENTS_BLOB:
case CS_OPS_IDENTITY:
case CS_OPS_BLOB:
- break; /* unrestricted */
+ break; /* not restricted to root */
default:
if (forself == 0 && kauth_cred_issuser(kauth_cred_get()) != TRUE)
return(EPERM);
}
}
+#if CONFIG_MACF
+ switch (ops) {
+ case CS_OPS_MARKINVALID:
+ case CS_OPS_MARKHARD:
+ case CS_OPS_MARKKILL:
+ case CS_OPS_MARKRESTRICT:
+ case CS_OPS_SET_STATUS:
+ if ((error = mac_proc_check_set_cs_info(current_proc(), pt, ops)))
+ goto out;
+ break;
+ default:
+ if ((error = mac_proc_check_get_cs_info(current_proc(), pt, ops)))
+ goto out;
+ }
+#endif
+
switch (ops) {
case CS_OPS_STATUS: {
(uint64_t) map_addr, (uint64_t) (map_addr + map_size),
__FUNCTION__, vp->v_name);
#endif /* DEVELOPMENT || DEBUG */
+
+ /* The DSMOS pager can only be used by apple signed code */
+ struct cs_blob * blob = csvnode_get_blob(vp, file_off);
+ if( blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path)
+ {
+ return LOAD_FAILURE;
+ }
+
kr = vm_map_apple_protected(map,
map_addr,
map_addr + map_size,
return rv;
}
-#if (MAC_POLICY_OPS_VERSION != 37)
+#if (MAC_POLICY_OPS_VERSION != 39)
# error "struct mac_policy_ops doesn't match definition in mac_policy.h"
#endif
/*
.mpo_reserved26 = (mpo_reserved_hook_t *)common_hook,
.mpo_reserved27 = (mpo_reserved_hook_t *)common_hook,
.mpo_reserved28 = (mpo_reserved_hook_t *)common_hook,
- .mpo_reserved29 = (mpo_reserved_hook_t *)common_hook,
- .mpo_reserved30 = (mpo_reserved_hook_t *)common_hook,
+ CHECK_SET_HOOK(proc_check_get_cs_info)
+ CHECK_SET_HOOK(proc_check_set_cs_info)
CHECK_SET_HOOK(iokit_check_hid_control)
-15.5.0
+15.6.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
@abstract Provides a basis for communication between client applications and I/O Kit objects.
*/
-
class IOUserClient : public IOService
{
OSDeclareAbstractStructors(IOUserClient)
bool reserve();
#ifdef XNU_KERNEL_PRIVATE
+
public:
-#else
-private:
-#endif
OSSet * mappings;
UInt8 sharedInstance;
UInt8 closed;
UInt8 __ipcFinal;
UInt8 __reservedA[1];
volatile SInt32 __ipc;
+ queue_head_t owners;
#if __LP64__
- void * __reserved[7];
+ void * __reserved[5];
#else
- void * __reserved[6];
+ void * __reserved[4];
#endif
+#else /* XNU_KERNEL_PRIVATE */
+private:
+ void * __reserved[9];
+#endif /* XNU_KERNEL_PRIVATE */
+
public:
virtual IOReturn externalMethod( uint32_t selector, IOExternalMethodArguments * arguments,
IOExternalMethodDispatch * dispatch = 0, OSObject * target = 0, void * reference = 0 );
OSMetaClassDeclareReservedUnused(IOUserClient, 15);
#ifdef XNU_KERNEL_PRIVATE
+
/* Available within xnu source only */
public:
static void initialize( void );
task_t task,
IOOptionBits mapFlags = kIOMapAnywhere,
mach_vm_address_t atAddress = 0 );
-#endif
+ IOReturn registerOwner(task_t task);
+ void noMoreSenders(void);
+
+#endif /* XNU_KERNEL_PRIVATE */
protected:
static IOReturn sendAsyncResult(OSAsyncReference reference,
if (rc != kIOReturnSuccess) {
notifier->release();
notifier = 0;
+
+ return NULL;
}
if (pmPowerStateQueue)
{
}
}
- IOLockLock(reserved->interruptStatisticsLock);
-
- /* The array count is signed (because the interrupt indices are signed), hence the cast */
- for (cnt = 0; cnt < (unsigned) reserved->interruptStatisticsArrayCount; cnt++) {
- if (reserved->interruptStatisticsArray[cnt].reporter) {
- /*
- * If the reporter is currently associated with the statistics
- * for an event source, we may need to update the reporter.
- */
- if (reserved->interruptStatisticsArray[cnt].statistics)
- interruptAccountingDataUpdateChannels(reserved->interruptStatisticsArray[cnt].statistics, reserved->interruptStatisticsArray[cnt].reporter);
-
- reserved->interruptStatisticsArray[cnt].reporter->configureReport(channelList, action, result, destination);
- }
- }
+ /* 24241819: SU fix for NULL 'reserved' field */
+ if (reserved) {
+ IOLockLock(reserved->interruptStatisticsLock);
+
+ /* The array count is signed (because the interrupt indices are signed), hence the cast */
+ for (cnt = 0; cnt < (unsigned) reserved->interruptStatisticsArrayCount; cnt++) {
+ if (reserved->interruptStatisticsArray[cnt].reporter) {
+ /*
+ * If the reporter is currently associated with the statistics
+ * for an event source, we may need to update the reporter.
+ */
+ if (reserved->interruptStatisticsArray[cnt].statistics)
+ interruptAccountingDataUpdateChannels(reserved->interruptStatisticsArray[cnt].statistics, reserved->interruptStatisticsArray[cnt].reporter);
+
+ reserved->interruptStatisticsArray[cnt].reporter->configureReport(channelList, action, result, destination);
+ }
+ }
- IOLockUnlock(reserved->interruptStatisticsLock);
+ IOLockUnlock(reserved->interruptStatisticsLock);
+ }
+ #if DEVELOPMENT || DEBUG
+ else {
+ IOLog("ALERT: why is %s's 'reserved' field NULL?!\n", getName());
+ }
+ #endif
return kIOReturnSuccess;
}
{
OSDictionary * dict;
IOMachPort * machPort;
+ IOUserClient * uc;
bool destroyed = true;
IOTakeLock( gIOObjectPortLock);
machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
if( machPort) {
destroyed = (machPort->mscount <= *mscount);
- if( destroyed)
+ if (!destroyed) *mscount = machPort->mscount;
+ else
+ {
+ if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
+ {
+ uc->noMoreSenders();
+ }
dict->removeObject( (const OSSymbol *) obj );
- else
- *mscount = machPort->mscount;
+ }
}
obj->release();
}
OSDictionary * dict;
IOMachPort * machPort;
+ assert(IKOT_IOKIT_CONNECT != type);
+
IOTakeLock( gIOObjectPortLock);
if( (dict = dictForType( type ))) {
if (port)
{
IOUserClient * uc;
- if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings)
+ if ((uc = OSDynamicCast(IOUserClient, obj)))
{
- dict->setObject((const OSSymbol *) uc->mappings, port);
- iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
-
- uc->mappings->release();
- uc->mappings = 0;
- }
+ uc->noMoreSenders();
+ if (uc->mappings)
+ {
+ dict->setObject((const OSSymbol *) uc->mappings, port);
+ iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
+
+ uc->mappings->release();
+ uc->mappings = 0;
+ }
+ }
dict->removeObject( (const OSSymbol *) obj );
}
}
if( IKOT_IOKIT_CONNECT == type)
{
- if( (client = OSDynamicCast( IOUserClient, obj ))) {
- IOStatisticsClientCall();
+ if( (client = OSDynamicCast( IOUserClient, obj )))
+ {
+ IOStatisticsClientCall();
client->clientDied();
- }
+ }
}
else if( IKOT_IOKIT_OBJECT == type)
{
#define super IOService
OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
+IOLock * gIOUserClientOwnersLock;
+
void IOUserClient::initialize( void )
{
- gIOObjectPortLock = IOLockAlloc();
-
- assert( gIOObjectPortLock );
+ gIOObjectPortLock = IOLockAlloc();
+ gIOUserClientOwnersLock = IOLockAlloc();
+ assert(gIOObjectPortLock && gIOUserClientOwnersLock);
}
void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
return true;
}
+struct IOUserClientOwner
+{
+ task_t task;
+ queue_chain_t taskLink;
+ IOUserClient * uc;
+ queue_chain_t ucLink;
+};
+
+IOReturn
+IOUserClient::registerOwner(task_t task)
+{
+ IOUserClientOwner * owner;
+ IOReturn ret;
+ bool newOwner;
+
+ IOLockLock(gIOUserClientOwnersLock);
+
+ newOwner = true;
+ ret = kIOReturnSuccess;
+
+ if (!owners.next) queue_init(&owners);
+ else
+ {
+ queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
+ {
+ if (task != owner->task) continue;
+ newOwner = false;
+ break;
+ }
+ }
+ if (newOwner)
+ {
+ owner = IONew(IOUserClientOwner, 1);
+ if (!newOwner) ret = kIOReturnNoMemory;
+ else
+ {
+ owner->task = task;
+ owner->uc = this;
+ queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
+ queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
+ }
+ }
+
+ IOLockUnlock(gIOUserClientOwnersLock);
+
+ return (ret);
+}
+
+void
+IOUserClient::noMoreSenders(void)
+{
+ IOUserClientOwner * owner;
+
+ IOLockLock(gIOUserClientOwnersLock);
+
+ if (owners.next)
+ {
+ while (!queue_empty(&owners))
+ {
+ owner = (IOUserClientOwner *)(void *) queue_first(&owners);
+ queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
+ queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
+ IODelete(owner, IOUserClientOwner, 1);
+ }
+ owners.next = owners.prev = NULL;
+ }
+
+ IOLockUnlock(gIOUserClientOwnersLock);
+}
+
+extern "C" kern_return_t
+iokit_task_terminate(task_t task)
+{
+ IOUserClientOwner * owner;
+ IOUserClient * dead;
+ IOUserClient * uc;
+ queue_head_t * taskque;
+
+ IOLockLock(gIOUserClientOwnersLock);
+
+ taskque = task_io_user_clients(task);
+ dead = NULL;
+ while (!queue_empty(taskque))
+ {
+ owner = (IOUserClientOwner *)(void *) queue_first(taskque);
+ uc = owner->uc;
+ queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
+ queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
+ if (queue_empty(&uc->owners))
+ {
+ uc->retain();
+ IOLog("destroying out of band connect for %s\n", uc->getName());
+ // now using the uc queue head as a singly linked queue,
+ // leaving .next as NULL to mark it empty
+ uc->owners.next = NULL;
+ uc->owners.prev = (queue_entry_t) dead;
+ dead = uc;
+ }
+ IODelete(owner, IOUserClientOwner, 1);
+ }
+
+ IOLockUnlock(gIOUserClientOwnersLock);
+
+ while (dead)
+ {
+ uc = dead;
+ dead = (IOUserClient *)(void *) dead->owners.prev;
+ uc->owners.prev = NULL;
+ if (uc->sharedInstance || !uc->closed) uc->clientDied();
+ uc->release();
+ }
+
+ return (KERN_SUCCESS);
+}
+
void IOUserClient::free()
{
- if( mappings)
- mappings->release();
+ if( mappings) mappings->release();
IOStatisticsUnregisterCounter();
- if (reserved)
- IODelete(reserved, ExpansionData, 1);
+ assert(!owners.next);
+ assert(!owners.prev);
+
+ if (reserved) IODelete(reserved, ExpansionData, 1);
super::free();
}
CHECK( IOService, _service, service );
- if (!owningTask) return (kIOReturnBadArgument);
+ if (!owningTask) return (kIOReturnBadArgument);
+ assert(owningTask == current_task());
+ if (owningTask != current_task()) return (kIOReturnBadArgument);
do
{
{
assert( OSDynamicCast(IOUserClient, client) );
+ client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
+ client->closed = false;
+
disallowAccess = (crossEndian
&& (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
&& (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
res = kIOReturnNotPermitted;
#endif
+
+ if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
+
if (kIOReturnSuccess != res)
{
IOStatisticsClientCall();
client = 0;
break;
}
- client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
- client->closed = false;
OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
if (creatorName)
{
* nuke the entry in the page table
*/
/* remember reference and change */
- pmap_phys_attributes[pai] |=
- (char) (*cpte & (PHYS_MODIFIED | PHYS_REFERENCED));
+ if (!is_ept) {
+ pmap_phys_attributes[pai] |=
+ *cpte & (PHYS_MODIFIED | PHYS_REFERENCED);
+ } else {
+ pmap_phys_attributes[pai] |=
+ ept_refmod_to_physmap((*cpte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
+ }
/*
* Remove the mapping from the pvlist for this physical page.
#define MSR_IA32_KERNEL_GS_BASE 0xC0000102
#define MSR_IA32_TSC_AUX 0xC0000103
+#define HV_VMX_EPTP_MEMORY_TYPE_UC 0x0
+#define HV_VMX_EPTP_MEMORY_TYPE_WB 0x6
+#define HV_VMX_EPTP_WALK_LENGTH(wl) (0ULL | ((((wl) - 1) & 0x7) << 3))
+#define HV_VMX_EPTP_ENABLE_AD_FLAGS (1ULL << 6)
+
#endif /* _I386_PROC_REG_H_ */
kern_return_t task_resume_internal(task_t);
static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
+extern kern_return_t iokit_task_terminate(task_t task);
void proc_init_cpumon_params(void);
extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
new_task->task_purgeable_disowning = FALSE;
new_task->task_purgeable_disowned = FALSE;
+ queue_init(&new_task->io_user_clients);
+
ipc_task_enable(new_task);
lck_mtx_lock(&tasks_threads_lock);
ipc_task_terminate(task);
+ /* let iokit know */
+ iokit_task_terminate(task);
+
if (task->affinity_space)
task_affinity_deallocate(task);
task_t task,
processor_set_t *pset)
{
- if (!task->active)
- return(KERN_FAILURE);
+ if (!task || !task->active)
+ return KERN_FAILURE;
*pset = &pset0;
- return (KERN_SUCCESS);
+ return KERN_SUCCESS;
}
uint64_t
}
return;
}
+
+queue_head_t *
+task_io_user_clients(task_t task)
+{
+ return (&task->io_user_clients);
+}
#include <kern/kern_cdata.h>
#include <mach/sfi_class.h>
+#include <kern/queue.h>
/* defns for task->rsu_controldata */
#define TASK_POLICY_CPU_RESOURCE_USAGE 0
#if HYPERVISOR
void *hv_task_target; /* hypervisor virtual machine object associated with this task */
#endif /* HYPERVISOR */
+
+ queue_head_t io_user_clients;
};
#define task_lock(task) lck_mtx_lock(&(task)->lock)
#define TASK_WRITE_METADATA 0x8
extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags);
+extern queue_head_t * task_io_user_clients(task_t task);
+
#endif /* XNU_KERNEL_PRIVATE */
#ifdef KERNEL_PRIVATE
{
boolean_t empty;
+ if (upl == UPL_NULL)
+ return KERN_INVALID_ARGUMENT;
+
return upl_abort_range(upl, 0, upl->size, error, &empty);
}
{
boolean_t empty;
+ if (upl == UPL_NULL)
+ return KERN_INVALID_ARGUMENT;
+
return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
}
extern long __stack_chk_guard[];
+static uint64_t pmap_eptp_flags = 0;
boolean_t pmap_ept_support_ad = FALSE;
#if CONFIG_VMX
pmap_ept_support_ad = vmx_hv_support() && (VMX_CAP(MSR_IA32_VMX_EPT_VPID_CAP, MSR_IA32_VMX_EPT_VPID_CAP_AD_SHIFT, 1) ? TRUE : FALSE);
-#else
- pmap_ept_support_ad = FALSE;
+ pmap_eptp_flags = HV_VMX_EPTP_MEMORY_TYPE_WB | HV_VMX_EPTP_WALK_LENGTH(4) | (pmap_ept_support_ad ? HV_VMX_EPTP_ENABLE_AD_FLAGS : 0);
#endif /* CONFIG_VMX */
}
memset((char *)p->pm_pml4, 0, PAGE_SIZE);
if (flags & PMAP_CREATE_EPT) {
- p->pm_eptp = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4);
+ p->pm_eptp = (pmap_paddr_t)kvtophys((vm_offset_t)p->pm_pml4) | pmap_eptp_flags;
p->pm_cr3 = 0;
} else {
p->pm_eptp = 0;
int mac_proc_check_debug(proc_t proc1, proc_t proc2);
int mac_proc_check_cpumon(proc_t curp);
int mac_proc_check_proc_info(proc_t curp, proc_t target, int callnum, int flavor);
+int mac_proc_check_get_cs_info(proc_t curp, proc_t target, unsigned int op);
+int mac_proc_check_set_cs_info(proc_t curp, proc_t target, unsigned int op);
int mac_proc_check_fork(proc_t proc);
int mac_proc_check_suspend_resume(proc_t proc, int sr);
int mac_proc_check_get_task_name(kauth_cred_t cred, struct proc *p);
int callnum,
int flavor
);
+/**
+ @brief Access control check for retrieving code signing information.
+ @param cred Subject credential
+ @param target Target process
+ @param op Code signing operation being performed
+
+ Determine whether the subject identified by the credential should be
+ allowed to get code signing information about the target process.
+
+ @return Return 0 if access is granted, otherwise an appropriate value for
+ errno should be returned.
+*/
+typedef int mpo_proc_check_get_cs_info_t(
+ kauth_cred_t cred,
+ struct proc *target,
+ unsigned int op
+);
+/**
+ @brief Access control check for setting code signing information.
+ @param cred Subject credential
+ @param target Target process
+ @param op Code signing operation being performed.
+
+ Determine whether the subject identified by the credential should be
+ allowed to set code signing information about the target process.
+
+ @return Return 0 if permission is granted, otherwise an appropriate
+ value of errno should be returned.
+*/
+typedef int mpo_proc_check_set_cs_info_t(
+ kauth_cred_t cred,
+ struct proc *target,
+ unsigned int op
+);
/**
@brief Access control check for mmap MAP_ANON
@param proc User process requesting the memory
* Please note that this should be kept in sync with the check assumptions
* policy in bsd/kern/policy_check.c (policy_ops struct).
*/
-#define MAC_POLICY_OPS_VERSION 37 /* inc when new reserved slots are taken */
+#define MAC_POLICY_OPS_VERSION 39 /* inc when new reserved slots are taken */
struct mac_policy_ops {
mpo_audit_check_postselect_t *mpo_audit_check_postselect;
mpo_audit_check_preselect_t *mpo_audit_check_preselect;
mpo_reserved_hook_t *mpo_reserved26;
mpo_reserved_hook_t *mpo_reserved27;
mpo_reserved_hook_t *mpo_reserved28;
- mpo_reserved_hook_t *mpo_reserved29;
- mpo_reserved_hook_t *mpo_reserved30;
+ mpo_proc_check_get_cs_info_t *mpo_proc_check_get_cs_info;
+ mpo_proc_check_set_cs_info_t *mpo_proc_check_set_cs_info;
mpo_iokit_check_hid_control_t *mpo_iokit_check_hid_control;
return (error);
}
+
+int
+mac_proc_check_get_cs_info(proc_t curp, proc_t target, unsigned int op)
+{
+ kauth_cred_t cred;
+ int error = 0;
+
+#if SECURITY_MAC_CHECK_ENFORCE
+ /* 21167099 - only check if we allow write */
+ if (!mac_proc_enforce)
+ return 0;
+#endif
+ if (!mac_proc_check_enforce(curp, MAC_PROC_ENFORCE))
+ return 0;
+
+ cred = kauth_cred_proc_ref(curp);
+ MAC_CHECK(proc_check_get_cs_info, cred, target, op);
+ kauth_cred_unref(&cred);
+
+ return (error);
+}
+
+int
+mac_proc_check_set_cs_info(proc_t curp, proc_t target, unsigned int op)
+{
+ kauth_cred_t cred;
+ int error = 0;
+
+#if SECURITY_MAC_CHECK_ENFORCE
+ /* 21167099 - only check if we allow write */
+ if (!mac_proc_enforce)
+ return 0;
+#endif
+ if (!mac_proc_check_enforce(curp, MAC_PROC_ENFORCE))
+ return 0;
+
+ cred = kauth_cred_proc_ref(curp);
+ MAC_CHECK(proc_check_set_cs_info, cred, target, op);
+ kauth_cred_unref(&cred);
+
+ return (error);
+}
+