]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/dev/dtrace/dtrace_ptss.c
xnu-3789.60.24.tar.gz
[apple/xnu.git] / bsd / dev / dtrace / dtrace_ptss.c
index 8e2ec272e66e6861fa9251ad5daa5b5156a6b025..c09b8f32e68b9540f50e8182aa12cc8b05287da2 100644 (file)
@@ -33,6 +33,7 @@
 #include <sys/user.h>
 #include <sys/dtrace_ptss.h>
 
+#include <mach/vm_map.h>
 #include <mach/vm_param.h>
 #include <mach/mach_vm.h>
 
@@ -70,16 +71,16 @@ dtrace_ptss_claim_entry_locked(struct proc* p) {
                        // CAS the entries onto the free list.
                        do {
                                page->entries[DTRACE_PTSS_ENTRIES_PER_PAGE-1].next = p->p_dtrace_ptss_free_list;
-                       } while (!OSCompareAndSwap((UInt32)page->entries[DTRACE_PTSS_ENTRIES_PER_PAGE-1].next,
-                                                  (UInt32)&page->entries[0],
-                                                  (volatile UInt32 *)&p->p_dtrace_ptss_free_list));
+                       } while (!OSCompareAndSwapPtr((void *)page->entries[DTRACE_PTSS_ENTRIES_PER_PAGE-1].next,
+                                                  (void *)&page->entries[0],
+                                                  (void * volatile *)&p->p_dtrace_ptss_free_list));
                                 
                        // Now that we've added to the free list, try again.
                        continue;
                }
 
                // Claim temp
-               if (!OSCompareAndSwap((UInt32)temp, (UInt32)temp->next, (volatile UInt32 *)&p->p_dtrace_ptss_free_list))
+               if (!OSCompareAndSwapPtr((void *)temp, (void *)temp->next, (void * volatile *)&p->p_dtrace_ptss_free_list))
                        continue;
 
                // At this point, we own temp.
@@ -113,7 +114,7 @@ dtrace_ptss_claim_entry(struct proc* p) {
                }
 
                // Claim temp
-               if (!OSCompareAndSwap((UInt32)temp, (UInt32)temp->next, (volatile UInt32 *)&p->p_dtrace_ptss_free_list))
+               if (!OSCompareAndSwapPtr((void *)temp, (void *)temp->next, (void * volatile *)&p->p_dtrace_ptss_free_list))
                        continue;
 
                // At this point, we own temp.
@@ -127,13 +128,17 @@ dtrace_ptss_claim_entry(struct proc* p) {
 
 /*
  * This function does not require any locks to be held on entry.
+ *
+ * (PR-11138709) A NULL p->p_dtrace_ptss_pages means the entry can
+ * no longer be referenced safely. When found in this state, the chore
+ * of releasing an entry to the free list is ignored.
  */
 void
 dtrace_ptss_release_entry(struct proc* p, struct dtrace_ptss_page_entry* e) {
-       if (p && e) {
+       if (p && p->p_dtrace_ptss_pages && e) {
                do {
                        e->next = p->p_dtrace_ptss_free_list;
-               } while (!OSCompareAndSwap((UInt32)e->next, (UInt32)e, (volatile UInt32 *)&p->p_dtrace_ptss_free_list));
+               } while (!OSCompareAndSwapPtr((void *)e->next, (void *)e, (void * volatile *)&p->p_dtrace_ptss_free_list));
        }
 }
 
@@ -157,21 +162,18 @@ dtrace_ptss_allocate_page(struct proc* p)
        // Now allocate a page in user space and set its protections to allow execute.
        task_t task = p->task;
        vm_map_t map = get_task_map_reference(task);
+       if (map == NULL)
+         goto err;
 
-       mach_vm_address_t addr = 0LL;
-       mach_vm_size_t size = PAGE_SIZE; // We need some way to assert that this matches vm_map_round_page() !!!
+       mach_vm_size_t size = PAGE_MAX_SIZE;
+       mach_vm_offset_t addr = 0;
+       vm_prot_t cur_protection = VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE;
+       vm_prot_t max_protection = VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE;
 
-       kern_return_t kr = mach_vm_allocate(map, &addr, size, VM_FLAGS_ANYWHERE);
+       kern_return_t kr = mach_vm_map(map, &addr, size, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, FALSE, cur_protection, max_protection, VM_INHERIT_DEFAULT);
        if (kr != KERN_SUCCESS) {
                goto err;
        }
-
-       kr = mach_vm_protect(map, addr, size, 0, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
-       if (kr != KERN_SUCCESS) {
-               mach_vm_deallocate(map, addr, size);
-               goto err;
-       }       
-
        // Chain the page entries.
        int i;
        for (i=0; i<DTRACE_PTSS_ENTRIES_PER_PAGE; i++) {
@@ -189,7 +191,8 @@ dtrace_ptss_allocate_page(struct proc* p)
 err:
        _FREE(ptss_page, M_TEMP);
 
-       vm_map_deallocate(map);
+       if (map)
+         vm_map_deallocate(map);
 
        return NULL;
 }
@@ -214,6 +217,7 @@ dtrace_ptss_free_page(struct proc* p, struct dtrace_ptss_page* ptss_page)
        // Silent failures, no point in checking return code.
        mach_vm_deallocate(map, addr, size);
 
+
        vm_map_deallocate(map);
 }