]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/default_pager/dp_memory_object.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / default_pager / dp_memory_object.c
index 2664f3e06c35a80dbbf4b0629bb4dfb601535642..83c24fe6f655eac1aadefeb08d4879815cd601a2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
@@ -73,7 +73,7 @@
 #include <vm/vm_protos.h>
 
 /* forward declaration */
-vstruct_t vs_object_create(vm_size_t size);
+vstruct_t vs_object_create(dp_size_t size);
 
 /*
  * List of all vstructs.  A specific vstruct is
@@ -301,7 +301,7 @@ vs_finish_write(
 
 vstruct_t
 vs_object_create(
-       vm_size_t size)
+       dp_size_t size)
 {
        vstruct_t       vs;
 
@@ -350,18 +350,34 @@ default_pager_add(
                pset = default_pager_external_set;
        }
 
-       ipc_port_make_sonce(mem_obj);
        ip_lock(mem_obj);  /* unlocked in nsrequest below */
+       ipc_port_make_sonce_locked(mem_obj);
        ipc_port_nsrequest(mem_obj, sync, mem_obj, &previous);
 }
 
 #endif
 
+const struct memory_object_pager_ops default_pager_ops = {
+       dp_memory_object_reference,
+       dp_memory_object_deallocate,
+       dp_memory_object_init,
+       dp_memory_object_terminate,
+       dp_memory_object_data_request,
+       dp_memory_object_data_return,
+       dp_memory_object_data_initialize,
+       dp_memory_object_data_unlock,
+       dp_memory_object_synchronize,
+       dp_memory_object_map,
+       dp_memory_object_last_unmap,
+       dp_memory_object_data_reclaim,
+       "default pager"
+};
+
 kern_return_t
 dp_memory_object_init(
        memory_object_t         mem_obj,
        memory_object_control_t control,
-       __unused vm_size_t pager_page_size)
+       __unused memory_object_cluster_size_t pager_page_size)
 {
        vstruct_t               vs;
 
@@ -385,7 +401,7 @@ kern_return_t
 dp_memory_object_synchronize(
        memory_object_t         mem_obj,
        memory_object_offset_t  offset,
-       vm_size_t               length,
+       memory_object_size_t            length,
        __unused vm_sync_t              flags)
 {
        vstruct_t       vs;
@@ -400,14 +416,49 @@ dp_memory_object_synchronize(
 }
 
 kern_return_t
-dp_memory_object_unmap(
-       __unused memory_object_t                mem_obj)
+dp_memory_object_map(
+       __unused memory_object_t        mem_obj,
+       __unused vm_prot_t              prot)
 {
-       panic("dp_memory_object_unmap");
+       panic("dp_memory_object_map");
+       return KERN_FAILURE;
+}
 
+kern_return_t
+dp_memory_object_last_unmap(
+       __unused memory_object_t        mem_obj)
+{
+       panic("dp_memory_object_last_unmap");
        return KERN_FAILURE;
 }
 
+kern_return_t
+dp_memory_object_data_reclaim(
+       memory_object_t         mem_obj,
+       boolean_t               reclaim_backing_store)
+{
+       vstruct_t               vs;
+
+       vs_lookup(mem_obj, vs);
+       for (;;) {
+               vs_lock(vs);
+               vs_async_wait(vs);
+               if (!vs->vs_xfer_pending) {
+                       break;
+               }
+       }
+       vs->vs_xfer_pending = TRUE;
+       vs_unlock(vs);
+
+       ps_vstruct_reclaim(vs, TRUE, reclaim_backing_store);
+
+       vs_lock(vs);
+       vs->vs_xfer_pending = FALSE;
+       vs_unlock(vs);
+
+       return KERN_SUCCESS;
+}
+
 kern_return_t
 dp_memory_object_terminate(
        memory_object_t         mem_obj)
@@ -567,10 +618,12 @@ kern_return_t
 dp_memory_object_data_request(
        memory_object_t         mem_obj,
        memory_object_offset_t  offset,
-       vm_size_t               length,
-       __unused vm_prot_t              protection_required)
+       memory_object_cluster_size_t            length,
+       __unused vm_prot_t      protection_required,
+        memory_object_fault_info_t     fault_info)
 {
        vstruct_t               vs;
+       kern_return_t           kr = KERN_SUCCESS;
 
        GSTAT(global_stats.gs_pagein_calls++);
 
@@ -619,11 +672,23 @@ dp_memory_object_data_request(
        if ((offset & vm_page_mask) != 0 || (length & vm_page_mask) != 0)
                Panic("bad alignment");
 
-       pvs_cluster_read(vs, (vm_offset_t)offset, length);
-
+       assert((dp_offset_t) offset == offset);
+       kr = pvs_cluster_read(vs, (dp_offset_t) offset, length, fault_info);
+
+       /* Regular data requests have a non-zero length and always return KERN_SUCCESS.  
+          Their actual success is determined by the fact that they provide a page or not, 
+          i.e whether we call upl_commit() or upl_abort().  A length of 0 means that the 
+          caller is only asking if the pager has a copy of that page or not.  The answer to 
+          that question is provided by the return value.  KERN_SUCCESS means that the pager 
+          does have that page.
+       */
+       if(length) {
+               kr = KERN_SUCCESS;
+       }
+       
        vs_finish_read(vs);
 
-       return KERN_SUCCESS;
+       return kr;
 }
 
 /*
@@ -642,7 +707,7 @@ kern_return_t
 dp_memory_object_data_initialize(
        memory_object_t         mem_obj,
        memory_object_offset_t  offset,
-       vm_size_t               size)
+       memory_object_cluster_size_t            size)
 {
        vstruct_t       vs;
 
@@ -661,7 +726,8 @@ dp_memory_object_data_initialize(
         * loop if the address range specified crosses cluster
         * boundaries.
         */
-       vs_cluster_write(vs, 0, (vm_offset_t)offset, size, FALSE, 0);
+       assert((upl_offset_t) offset == offset);
+       vs_cluster_write(vs, 0, (upl_offset_t)offset, size, FALSE, 0);
 
        vs_finish_write(vs);
 
@@ -672,7 +738,7 @@ kern_return_t
 dp_memory_object_data_unlock(
        __unused memory_object_t                mem_obj,
        __unused memory_object_offset_t offset,
-       __unused vm_size_t              size,
+       __unused memory_object_size_t           size,
        __unused vm_prot_t              desired_access)
 {
        Panic("dp_memory_object_data_unlock: illegal");
@@ -685,7 +751,7 @@ kern_return_t
 dp_memory_object_data_return(
        memory_object_t         mem_obj,
        memory_object_offset_t  offset,
-       vm_size_t                       size,
+       memory_object_cluster_size_t                    size,
        __unused memory_object_offset_t *resid_offset,
        __unused int            *io_error,
        __unused boolean_t      dirty,
@@ -715,7 +781,7 @@ dp_memory_object_data_return(
                /* a synchronous interface */
                /* return KERN_LOCK_OWNED; */
                upl_t           upl;
-               int             page_list_count = 0;
+               unsigned int    page_list_count = 0;
                memory_object_super_upl_request(vs->vs_control,
                                        (memory_object_offset_t)offset,
                                        size, size,
@@ -730,8 +796,8 @@ dp_memory_object_data_return(
        if ((vs->vs_seqno != vs->vs_next_seqno++)
                        || (vs->vs_readers)
                        || (vs->vs_xfer_pending)) {
-               upl_t   upl;
-               int     page_list_count = 0;
+               upl_t           upl;
+               unsigned int    page_list_count = 0;
 
                vs->vs_next_seqno--;
                 VS_UNLOCK(vs);
@@ -764,7 +830,8 @@ dp_memory_object_data_return(
         * loop if the address range specified crosses cluster
         * boundaries.
         */
-       vs_cluster_write(vs, 0, (vm_offset_t)offset, size, FALSE, 0);
+       assert((upl_offset_t) offset == offset);
+       vs_cluster_write(vs, 0, (upl_offset_t) offset, size, FALSE, 0);
 
        vs_finish_write(vs);
 
@@ -804,7 +871,12 @@ default_pager_memory_object_create(
 
        assert(dmm == default_pager_object);
 
-       vs = vs_object_create(new_size);
+       if ((dp_size_t) new_size != new_size) {
+               /* 32-bit overflow */
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       vs = vs_object_create((dp_size_t) new_size);
        if (vs == VSTRUCT_NULL)
                return KERN_RESOURCE_SHORTAGE;
 
@@ -815,8 +887,8 @@ default_pager_memory_object_create(
         * and this default_pager structure
         */
 
-       vs->vs_mem_obj = ISVS;
-       vs->vs_mem_obj_ikot = IKOT_MEMORY_OBJECT;
+       vs->vs_pager_ops = &default_pager_ops;
+       vs->vs_pager_header.io_bits = IKOT_MEMORY_OBJECT;
 
        /*
         * After this, other threads might receive requests
@@ -842,7 +914,12 @@ default_pager_object_create(
        if (default_pager != default_pager_object)
                return KERN_INVALID_ARGUMENT;
 
-       vs = vs_object_create(size);
+       if ((dp_size_t) size != size) {
+               /* 32-bit overflow */
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       vs = vs_object_create((dp_size_t) size);
        if (vs == VSTRUCT_NULL)
                return KERN_RESOURCE_SHORTAGE;
 
@@ -850,7 +927,7 @@ default_pager_object_create(
         * Set up associations between the default pager
         * and this vstruct structure
         */
-       vs->vs_mem_obj = ISVS;
+       vs->vs_pager_ops = &default_pager_ops;
        vstruct_list_insert(vs);
        *mem_objp = vs_to_mem_obj(vs);
        return KERN_SUCCESS;
@@ -890,8 +967,8 @@ default_pager_objects(
        /*
         * Out out-of-line port arrays are simply kalloc'ed.
         */
-       psize = round_page(actual * sizeof * pagers);
-       ppotential = psize / sizeof * pagers;
+       psize = round_page(actual * sizeof (*pagers));
+       ppotential = (unsigned int) (psize / sizeof (*pagers));
        pagers = (memory_object_t *)kalloc(psize);
        if (0 == pagers)
                return KERN_RESOURCE_SHORTAGE;
@@ -902,8 +979,8 @@ default_pager_objects(
         * then "copied in" as if it had been sent by a
         * user process.
         */
-       osize = round_page(actual * sizeof * objects);
-       opotential = osize / sizeof * objects;
+       osize = round_page(actual * sizeof (*objects));
+       opotential = (unsigned int) (osize / sizeof (*objects));
        kr = kmem_alloc(ipc_kernel_map, &oaddr, osize);
        if (KERN_SUCCESS != kr) {
                kfree(pagers, psize);
@@ -1071,13 +1148,13 @@ default_pager_object_pages(
                if (0 != addr)
                        kmem_free(ipc_kernel_map, addr, size);
 
-               size = round_page(actual * sizeof * pages);
+               size = round_page(actual * sizeof (*pages));
                kr = kmem_alloc(ipc_kernel_map, &addr, size);
                if (KERN_SUCCESS != kr)
                        return KERN_RESOURCE_SHORTAGE;
 
                pages = (default_pager_page_t *)addr;
-               potential = size / sizeof * pages;
+               potential = (unsigned int) (size / sizeof (*pages));
        }
 
        /*