+kern_return_t
+mach_memory_entry_access_tracking(
+ ipc_port_t entry_port,
+ int *access_tracking,
+ uint32_t *access_tracking_reads,
+ uint32_t *access_tracking_writes)
+{
+ return memory_entry_access_tracking_internal(entry_port,
+ access_tracking,
+ access_tracking_reads,
+ access_tracking_writes);
+}
+
+kern_return_t
+memory_entry_access_tracking_internal(
+ ipc_port_t entry_port,
+ int *access_tracking,
+ uint32_t *access_tracking_reads,
+ uint32_t *access_tracking_writes)
+{
+ vm_named_entry_t mem_entry;
+ vm_object_t object;
+ kern_return_t kr;
+
+ if (!IP_VALID(entry_port) ||
+ ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port);
+
+ named_entry_lock(mem_entry);
+
+ if (mem_entry->is_sub_map ||
+ mem_entry->is_copy) {
+ named_entry_unlock(mem_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ object = mem_entry->backing.object;
+ if (object == VM_OBJECT_NULL) {
+ named_entry_unlock(mem_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+#if VM_OBJECT_ACCESS_TRACKING
+ vm_object_access_tracking(object,
+ access_tracking,
+ access_tracking_reads,
+ access_tracking_writes);
+ kr = KERN_SUCCESS;
+#else /* VM_OBJECT_ACCESS_TRACKING */
+ (void) access_tracking;
+ (void) access_tracking_reads;
+ (void) access_tracking_writes;
+ kr = KERN_NOT_SUPPORTED;
+#endif /* VM_OBJECT_ACCESS_TRACKING */
+
+ named_entry_unlock(mem_entry);
+
+ return kr;
+}
+
+kern_return_t
+mach_memory_entry_ownership(
+ ipc_port_t entry_port,
+ task_t owner,
+ int ledger_tag,
+ int ledger_flags)
+{
+ task_t cur_task;
+ kern_return_t kr;
+ vm_named_entry_t mem_entry;
+ vm_object_t object;
+
+ cur_task = current_task();
+ if (cur_task != kernel_task &&
+ (owner != cur_task ||
+ (ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) ||
+ ledger_tag == VM_LEDGER_TAG_NETWORK)) {
+ /*
+ * An entitlement is required to:
+ * + tranfer memory ownership to someone else,
+ * + request that the memory not count against the footprint,
+ * + tag as "network" (since that implies "no footprint")
+ */
+ if (!cur_task->task_can_transfer_memory_ownership &&
+ IOTaskHasEntitlement(cur_task,
+ "com.apple.private.memory.ownership_transfer")) {
+ cur_task->task_can_transfer_memory_ownership = TRUE;
+ }
+ if (!cur_task->task_can_transfer_memory_ownership) {
+ return KERN_NO_ACCESS;
+ }
+ }
+
+ if (ledger_flags & ~VM_LEDGER_FLAGS) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ if (ledger_tag <= 0 ||
+ ledger_tag > VM_LEDGER_TAG_MAX) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (!IP_VALID(entry_port) ||
+ ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port);
+
+ named_entry_lock(mem_entry);
+
+ if (mem_entry->is_sub_map ||
+ mem_entry->is_copy) {
+ named_entry_unlock(mem_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ object = mem_entry->backing.object;
+ if (object == VM_OBJECT_NULL) {
+ named_entry_unlock(mem_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vm_object_lock(object);
+
+ /* check that named entry covers entire object ? */
+ if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) {
+ vm_object_unlock(object);
+ named_entry_unlock(mem_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ named_entry_unlock(mem_entry);
+
+ kr = vm_object_ownership_change(object,
+ ledger_tag,
+ owner,
+ ledger_flags,
+ FALSE); /* task_objq_locked */
+ vm_object_unlock(object);
+
+ return kr;
+}
+
+kern_return_t
+mach_memory_entry_get_page_counts(
+ ipc_port_t entry_port,
+ unsigned int *resident_page_count,
+ unsigned int *dirty_page_count)
+{
+ kern_return_t kr;
+ vm_named_entry_t mem_entry;
+ vm_object_t object;
+ vm_object_offset_t offset;
+ vm_object_size_t size;
+
+ if (!IP_VALID(entry_port) ||
+ ip_kotype(entry_port) != IKOT_NAMED_ENTRY) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port);
+
+ named_entry_lock(mem_entry);
+
+ if (mem_entry->is_sub_map ||
+ mem_entry->is_copy) {
+ named_entry_unlock(mem_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ object = mem_entry->backing.object;
+ if (object == VM_OBJECT_NULL) {
+ named_entry_unlock(mem_entry);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vm_object_lock(object);
+
+ offset = mem_entry->offset;
+ size = mem_entry->size;
+
+ named_entry_unlock(mem_entry);
+
+ kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count);
+
+ vm_object_unlock(object);
+
+ return kr;
+}
+