2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * Memory Object Management.
56 #include "default_pager_internal.h"
57 #include <default_pager/default_pager_object_server.h>
58 #include <mach/memory_object_default_server.h>
59 #include <mach/memory_object_control.h>
60 #include <mach/memory_object_types.h>
61 #include <mach/memory_object_server.h>
63 #include <mach/vm_map.h>
64 #include <vm/memory_object.h>
65 #include <vm/vm_pageout.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_protos.h>
69 /* forward declaration */
70 vstruct_t
vs_object_create(vm_size_t size
);
73 * List of all vstructs. A specific vstruct is
74 * found directly via its port, this list is
75 * only used for monitoring purposes by the
76 * default_pager_object* calls and by ps_delete
77 * when abstract memory objects must be scanned
78 * to remove any live storage on a segment which
81 struct vstruct_list_head vstruct_list
;
83 __private_extern__
void
88 queue_enter(&vstruct_list
.vsl_queue
, vs
, vstruct_t
, vs_links
);
89 vstruct_list
.vsl_count
++;
94 __private_extern__
void
98 queue_remove(&vstruct_list
.vsl_queue
, vs
, vstruct_t
, vs_links
);
99 vstruct_list
.vsl_count
--;
103 * We use the sequence numbers on requests to regulate
104 * our parallelism. In general, we allow multiple reads and writes
105 * to proceed in parallel, with the exception that reads must
106 * wait for previous writes to finish. (Because the kernel might
107 * generate a data-request for a page on the heels of a data-write
108 * for the same page, and we must avoid returning stale data.)
109 * terminate requests wait for proceeding reads and writes to finish.
112 static unsigned int default_pager_total
= 0; /* debugging */
113 static unsigned int default_pager_wait_seqno
= 0; /* debugging */
114 static unsigned int default_pager_wait_read
= 0; /* debugging */
115 static unsigned int default_pager_wait_write
= 0; /* debugging */
117 __private_extern__
void
122 ASSERT(vs
->vs_async_pending
>= 0);
123 while (vs
->vs_async_pending
> 0) {
124 vs
->vs_waiting_async
= TRUE
;
125 assert_wait(&vs
->vs_async_pending
, THREAD_UNINT
);
127 thread_block(THREAD_CONTINUE_NULL
);
130 ASSERT(vs
->vs_async_pending
== 0);
136 * Waits for correct sequence number. Leaves pager locked.
138 * JMM - Sequence numbers guarantee ordering of requests generated
139 * by a single thread if the receiver is multithreaded and
140 * the interfaces are asynchronous (i.e. sender can generate
141 * more than one request before the first is received in the
142 * pager). Normally, IPC would generate these number in that
143 * case. But we are trying to avoid using IPC for the in-kernel
144 * scenario. Since these are actually invoked synchronously
145 * anyway (in-kernel), we can just fake the sequence number
146 * generation here (thus avoiding the dependence on IPC).
148 __private_extern__
void
152 mach_port_seqno_t seqno
;
154 default_pager_total
++;
157 seqno
= vs
->vs_next_seqno
++;
159 while (vs
->vs_seqno
!= seqno
) {
160 default_pager_wait_seqno
++;
161 vs
->vs_waiting_seqno
= TRUE
;
162 assert_wait(&vs
->vs_seqno
, THREAD_UNINT
);
164 thread_block(THREAD_CONTINUE_NULL
);
170 * Increments sequence number and unlocks pager.
172 __private_extern__
void
173 vs_unlock(vstruct_t vs
)
176 if (vs
->vs_waiting_seqno
) {
177 vs
->vs_waiting_seqno
= FALSE
;
179 thread_wakeup(&vs
->vs_seqno
);
186 * Start a read - one more reader. Pager must be locked.
188 __private_extern__
void
196 * Wait for readers. Unlocks and relocks pager if wait needed.
198 __private_extern__
void
202 while (vs
->vs_readers
!= 0) {
203 default_pager_wait_read
++;
204 vs
->vs_waiting_read
= TRUE
;
205 assert_wait(&vs
->vs_readers
, THREAD_UNINT
);
207 thread_block(THREAD_CONTINUE_NULL
);
213 * Finish a read. Pager is unlocked and returns unlocked.
215 __private_extern__
void
220 if (--vs
->vs_readers
== 0 && vs
->vs_waiting_read
) {
221 vs
->vs_waiting_read
= FALSE
;
223 thread_wakeup(&vs
->vs_readers
);
230 * Start a write - one more writer. Pager must be locked.
232 __private_extern__
void
240 * Wait for writers. Unlocks and relocks pager if wait needed.
242 __private_extern__
void
246 while (vs
->vs_writers
!= 0) {
247 default_pager_wait_write
++;
248 vs
->vs_waiting_write
= TRUE
;
249 assert_wait(&vs
->vs_writers
, THREAD_UNINT
);
251 thread_block(THREAD_CONTINUE_NULL
);
257 /* This is to be used for the transfer from segment code ONLY */
258 /* The transfer code holds off vs destruction by keeping the */
259 /* vs_async_wait count non-zero. It will not ocnflict with */
260 /* other writers on an async basis because it only writes on */
261 /* a cluster basis into fresh (as of sync time) cluster locations */
263 __private_extern__
void
264 vs_wait_for_sync_writers(
267 while (vs
->vs_writers
!= 0) {
268 default_pager_wait_write
++;
269 vs
->vs_waiting_write
= TRUE
;
270 assert_wait(&vs
->vs_writers
, THREAD_UNINT
);
272 thread_block(THREAD_CONTINUE_NULL
);
279 * Finish a write. Pager is unlocked and returns unlocked.
281 __private_extern__
void
286 if (--vs
->vs_writers
== 0 && vs
->vs_waiting_write
) {
287 vs
->vs_waiting_write
= FALSE
;
289 thread_wakeup(&vs
->vs_writers
);
294 #endif /* PARALLEL */
303 * Allocate a vstruct. If there are any problems, then report them
306 vs
= ps_vstruct_create(size
);
307 if (vs
== VSTRUCT_NULL
) {
308 dprintf(("vs_object_create: unable to allocate %s\n",
309 "-- either run swapon command or reboot"));
317 void default_pager_add(vstruct_t
, boolean_t
); /* forward */
324 memory_object_t mem_obj
= vs
->vs_mem_obj
;
326 mach_port_mscount_t sync
;
327 mach_port_t previous
;
329 static char here
[] = "default_pager_add";
332 * The port currently has a make-send count of zero,
333 * because either we just created the port or we just
334 * received the port in a memory_object_create request.
338 /* possibly generate an immediate no-senders notification */
340 pset
= default_pager_internal_set
;
342 /* delay notification till send right is created */
344 pset
= default_pager_external_set
;
347 ipc_port_make_sonce(mem_obj
);
348 ip_lock(mem_obj
); /* unlocked in nsrequest below */
349 ipc_port_nsrequest(mem_obj
, sync
, mem_obj
, &previous
);
355 dp_memory_object_init(
356 memory_object_t mem_obj
,
357 memory_object_control_t control
,
358 __unused vm_size_t pager_page_size
)
362 assert(pager_page_size
== vm_page_size
);
364 memory_object_control_reference(control
);
366 vs_lookup(mem_obj
, vs
);
369 if (vs
->vs_control
!= MEMORY_OBJECT_CONTROL_NULL
)
370 Panic("bad request");
372 vs
->vs_control
= control
;
379 dp_memory_object_synchronize(
380 memory_object_t mem_obj
,
381 memory_object_offset_t offset
,
383 __unused vm_sync_t flags
)
387 vs_lookup(mem_obj
, vs
);
391 memory_object_synchronize_completed(vs
->vs_control
, offset
, length
);
397 dp_memory_object_unmap(
398 __unused memory_object_t mem_obj
)
400 panic("dp_memory_object_unmap");
406 dp_memory_object_terminate(
407 memory_object_t mem_obj
)
409 memory_object_control_t control
;
413 * control port is a receive right, not a send right.
416 vs_lookup(mem_obj
, vs
);
420 * Wait for read and write requests to terminate.
423 vs_wait_for_readers(vs
);
424 vs_wait_for_writers(vs
);
427 * After memory_object_terminate both memory_object_init
428 * and a no-senders notification are possible, so we need
429 * to clean up our reference to the memory_object_control
430 * to prepare for a new init.
433 control
= vs
->vs_control
;
434 vs
->vs_control
= MEMORY_OBJECT_CONTROL_NULL
;
436 /* a bit of special case ugliness here. Wakeup any waiting reads */
437 /* these data requests had to be removed from the seqno traffic */
438 /* based on a performance bottleneck with large memory objects */
439 /* the problem will right itself with the new component based */
440 /* synchronous interface. The new async will be able to return */
441 /* failure during its sync phase. In the mean time ... */
443 thread_wakeup(&vs
->vs_writers
);
444 thread_wakeup(&vs
->vs_async_pending
);
449 * Now we deallocate our reference on the control.
451 memory_object_control_deallocate(control
);
456 dp_memory_object_reference(
457 memory_object_t mem_obj
)
461 vs_lookup_safe(mem_obj
, vs
);
462 if (vs
== VSTRUCT_NULL
)
466 assert(vs
->vs_references
> 0);
472 dp_memory_object_deallocate(
473 memory_object_t mem_obj
)
476 mach_port_seqno_t seqno
;
479 * Because we don't give out multiple first references
480 * for a memory object, there can't be a race
481 * between getting a deallocate call and creating
482 * a new reference for the object.
485 vs_lookup_safe(mem_obj
, vs
);
486 if (vs
== VSTRUCT_NULL
)
490 if (--vs
->vs_references
> 0) {
495 seqno
= vs
->vs_next_seqno
++;
496 while (vs
->vs_seqno
!= seqno
) {
497 default_pager_wait_seqno
++;
498 vs
->vs_waiting_seqno
= TRUE
;
499 assert_wait(&vs
->vs_seqno
, THREAD_UNINT
);
501 thread_block(THREAD_CONTINUE_NULL
);
505 vs_async_wait(vs
); /* wait for pending async IO */
507 /* do not delete the vs structure until the referencing pointers */
508 /* in the vstruct list have been expunged */
510 /* get VSL_LOCK out of order by using TRY mechanism */
511 while(!VSL_LOCK_TRY()) {
516 vs_async_wait(vs
); /* wait for pending async IO */
521 * We shouldn't get a deallocation call
522 * when the kernel has the object cached.
524 if (vs
->vs_control
!= MEMORY_OBJECT_CONTROL_NULL
)
525 Panic("bad request");
528 * Unlock the pager (though there should be no one
533 /* Lock out paging segment removal for the duration of this */
534 /* call. We are vulnerable to losing a paging segment we rely */
535 /* on as soon as we remove ourselves from the VSL and unlock */
537 /* Keep our thread from blocking on attempt to trigger backing */
539 backing_store_release_trigger_disable
+= 1;
542 * Remove the memory object port association, and then
543 * the destroy the port itself. We must remove the object
544 * from the port list before deallocating the pager,
545 * because of default_pager_objects.
547 vstruct_list_delete(vs
);
550 ps_vstruct_dealloc(vs
);
553 backing_store_release_trigger_disable
-= 1;
554 if(backing_store_release_trigger_disable
== 0) {
555 thread_wakeup((event_t
)&backing_store_release_trigger_disable
);
561 dp_memory_object_data_request(
562 memory_object_t mem_obj
,
563 memory_object_offset_t offset
,
565 __unused vm_prot_t protection_required
)
569 GSTAT(global_stats
.gs_pagein_calls
++);
572 /* CDY at this moment vs_lookup panics when presented with the wrong */
573 /* port. As we are expanding this pager to support user interfaces */
574 /* this should be changed to return kern_failure */
575 vs_lookup(mem_obj
, vs
);
578 /* We are going to relax the strict sequencing here for performance */
579 /* reasons. We can do this because we know that the read and */
580 /* write threads are different and we rely on synchronization */
581 /* of read and write requests at the cache memory_object level */
582 /* break out wait_for_writers, all of this goes away when */
583 /* we get real control of seqno with the new component interface */
585 if (vs
->vs_writers
!= 0) {
586 /* you can't hold on to the seqno and go */
587 /* to sleep like that */
588 vs_unlock(vs
); /* bump internal count of seqno */
590 while (vs
->vs_writers
!= 0) {
591 default_pager_wait_write
++;
592 vs
->vs_waiting_write
= TRUE
;
593 assert_wait(&vs
->vs_writers
, THREAD_UNINT
);
595 thread_block(THREAD_CONTINUE_NULL
);
599 if(vs
->vs_control
== MEMORY_OBJECT_CONTROL_NULL
) {
611 * Request must be on a page boundary and a multiple of pages.
613 if ((offset
& vm_page_mask
) != 0 || (length
& vm_page_mask
) != 0)
614 Panic("bad alignment");
616 pvs_cluster_read(vs
, (vm_offset_t
)offset
, length
);
624 * memory_object_data_initialize: check whether we already have each page, and
625 * write it if we do not. The implementation is far from optimized, and
626 * also assumes that the default_pager is single-threaded.
628 /* It is questionable whether or not a pager should decide what is relevant */
629 /* and what is not in data sent from the kernel. Data initialize has been */
630 /* changed to copy back all data sent to it in preparation for its eventual */
631 /* merge with data return. It is the kernel that should decide what pages */
632 /* to write back. As of the writing of this note, this is indeed the case */
633 /* the kernel writes back one page at a time through this interface */
636 dp_memory_object_data_initialize(
637 memory_object_t mem_obj
,
638 memory_object_offset_t offset
,
643 DP_DEBUG(DEBUG_MO_EXTERNAL
,
644 ("mem_obj=0x%x,offset=0x%x,cnt=0x%x\n",
645 (int)mem_obj
, (int)offset
, (int)size
));
646 GSTAT(global_stats
.gs_pages_init
+= atop_32(size
));
648 vs_lookup(mem_obj
, vs
);
654 * Write the data via clustered writes. vs_cluster_write will
655 * loop if the address range specified crosses cluster
658 vs_cluster_write(vs
, 0, (vm_offset_t
)offset
, size
, FALSE
, 0);
666 dp_memory_object_data_unlock(
667 __unused memory_object_t mem_obj
,
668 __unused memory_object_offset_t offset
,
669 __unused vm_size_t size
,
670 __unused vm_prot_t desired_access
)
672 Panic("dp_memory_object_data_unlock: illegal");
679 dp_memory_object_data_return(
680 memory_object_t mem_obj
,
681 memory_object_offset_t offset
,
683 __unused memory_object_offset_t
*resid_offset
,
684 __unused
int *io_error
,
685 __unused boolean_t dirty
,
686 __unused boolean_t kernel_copy
,
687 __unused
int upl_flags
)
691 DP_DEBUG(DEBUG_MO_EXTERNAL
,
692 ("mem_obj=0x%x,offset=0x%x,size=0x%x\n",
693 (int)mem_obj
, (int)offset
, (int)size
));
694 GSTAT(global_stats
.gs_pageout_calls
++);
696 /* This routine is called by the pageout thread. The pageout thread */
697 /* cannot be blocked by read activities unless the read activities */
698 /* Therefore the grant of vs lock must be done on a try versus a */
699 /* blocking basis. The code below relies on the fact that the */
700 /* interface is synchronous. Should this interface be again async */
701 /* for some type of pager in the future the pages will have to be */
702 /* returned through a separate, asynchronous path. */
704 vs_lookup(mem_obj
, vs
);
706 default_pager_total
++;
707 if(!VS_TRY_LOCK(vs
)) {
708 /* the call below will not be done by caller when we have */
709 /* a synchronous interface */
710 /* return KERN_LOCK_OWNED; */
712 int page_list_count
= 0;
713 memory_object_super_upl_request(vs
->vs_control
,
714 (memory_object_offset_t
)offset
,
716 &upl
, NULL
, &page_list_count
,
717 UPL_NOBLOCK
| UPL_CLEAN_IN_PLACE
718 | UPL_NO_SYNC
| UPL_COPYOUT_FROM
);
724 if ((vs
->vs_seqno
!= vs
->vs_next_seqno
++)
726 || (vs
->vs_xfer_pending
)) {
728 int page_list_count
= 0;
733 /* the call below will not be done by caller when we have */
734 /* a synchronous interface */
735 /* return KERN_LOCK_OWNED; */
736 memory_object_super_upl_request(vs
->vs_control
,
737 (memory_object_offset_t
)offset
,
739 &upl
, NULL
, &page_list_count
,
740 UPL_NOBLOCK
| UPL_CLEAN_IN_PLACE
741 | UPL_NO_SYNC
| UPL_COPYOUT_FROM
);
747 if ((size
% vm_page_size
) != 0)
748 Panic("bad alignment");
753 vs
->vs_async_pending
+= 1; /* protect from backing store contraction */
757 * Write the data via clustered writes. vs_cluster_write will
758 * loop if the address range specified crosses cluster
761 vs_cluster_write(vs
, 0, (vm_offset_t
)offset
, size
, FALSE
, 0);
765 /* temporary, need a finer lock based on cluster */
768 vs
->vs_async_pending
-= 1; /* release vs_async_wait */
769 if (vs
->vs_async_pending
== 0 && vs
->vs_waiting_async
) {
770 vs
->vs_waiting_async
= FALSE
;
772 thread_wakeup(&vs
->vs_async_pending
);
782 * Routine: default_pager_memory_object_create
784 * Handle requests for memory objects from the
787 * Because we only give out the default memory
788 * manager port to the kernel, we don't have to
789 * be so paranoid about the contents.
792 default_pager_memory_object_create(
793 __unused memory_object_default_t dmm
,
795 memory_object_t
*new_mem_obj
)
799 assert(dmm
== default_pager_object
);
801 vs
= vs_object_create(new_size
);
802 if (vs
== VSTRUCT_NULL
)
803 return KERN_RESOURCE_SHORTAGE
;
805 vs
->vs_next_seqno
= 0;
808 * Set up associations between this memory object
809 * and this default_pager structure
812 vs
->vs_mem_obj
= ISVS
;
813 vs
->vs_mem_obj_ikot
= IKOT_MEMORY_OBJECT
;
816 * After this, other threads might receive requests
817 * for this memory object or find it in the port list.
820 vstruct_list_insert(vs
);
821 *new_mem_obj
= vs_to_mem_obj(vs
);
826 * Create an external object.
829 default_pager_object_create(
830 default_pager_t default_pager
,
832 memory_object_t
*mem_objp
)
836 if (default_pager
!= default_pager_object
)
837 return KERN_INVALID_ARGUMENT
;
839 vs
= vs_object_create(size
);
840 if (vs
== VSTRUCT_NULL
)
841 return KERN_RESOURCE_SHORTAGE
;
844 * Set up associations between the default pager
845 * and this vstruct structure
847 vs
->vs_mem_obj
= ISVS
;
848 vstruct_list_insert(vs
);
849 *mem_objp
= vs_to_mem_obj(vs
);
854 default_pager_objects(
855 default_pager_t default_pager
,
856 default_pager_object_array_t
*objectsp
,
857 mach_msg_type_number_t
*ocountp
,
858 mach_port_array_t
*portsp
,
859 mach_msg_type_number_t
*pcountp
)
861 vm_offset_t oaddr
= 0; /* memory for objects */
862 vm_size_t osize
= 0; /* current size */
863 default_pager_object_t
* objects
;
864 unsigned int opotential
= 0;
866 vm_map_copy_t pcopy
= 0; /* copy handle for pagers */
867 vm_size_t psize
= 0; /* current size */
868 memory_object_t
* pagers
;
869 unsigned int ppotential
= 0;
872 unsigned int num_objects
;
876 if (default_pager
!= default_pager_object
)
877 return KERN_INVALID_ARGUMENT
;
880 * We will send no more than this many
882 actual
= vstruct_list
.vsl_count
;
885 * Out out-of-line port arrays are simply kalloc'ed.
887 psize
= round_page(actual
* sizeof * pagers
);
888 ppotential
= psize
/ sizeof * pagers
;
889 pagers
= (memory_object_t
*)kalloc(psize
);
891 return KERN_RESOURCE_SHORTAGE
;
894 * returned out of line data must be allocated out
895 * the ipc_kernel_map, wired down, filled in, and
896 * then "copied in" as if it had been sent by a
899 osize
= round_page(actual
* sizeof * objects
);
900 opotential
= osize
/ sizeof * objects
;
901 kr
= kmem_alloc(ipc_kernel_map
, &oaddr
, osize
);
902 if (KERN_SUCCESS
!= kr
) {
903 kfree(pagers
, psize
);
904 return KERN_RESOURCE_SHORTAGE
;
906 objects
= (default_pager_object_t
*)oaddr
;
916 queue_iterate(&vstruct_list
.vsl_queue
, entry
, vstruct_t
, vs_links
) {
918 memory_object_t pager
;
921 if ((num_objects
>= opotential
) ||
922 (num_objects
>= ppotential
)) {
925 * This should be rare. In any case,
926 * we will only miss recent objects,
927 * because they are added at the end.
933 * Avoid interfering with normal operations
935 if (!VS_MAP_TRY_LOCK(entry
))
937 size
= ps_vstruct_allocated_size(entry
);
938 VS_MAP_UNLOCK(entry
);
943 * We need a reference for our caller. Adding this
944 * reference through the linked list could race with
945 * destruction of the object. If we find the object
946 * has no references, just give up on it.
949 if (entry
->vs_references
== 0) {
953 pager
= vs_to_mem_obj(entry
);
954 dp_memory_object_reference(pager
);
957 /* the arrays are wired, so no deadlock worries */
959 objects
[num_objects
].dpo_object
= (vm_offset_t
) entry
;
960 objects
[num_objects
].dpo_size
= size
;
961 pagers
[num_objects
++] = pager
;
966 * Do not return garbage
968 objects
[num_objects
].dpo_object
= (vm_offset_t
) 0;
969 objects
[num_objects
].dpo_size
= 0;
970 pagers
[num_objects
++] = MEMORY_OBJECT_NULL
;
976 /* clear out any excess allocation */
977 while (num_objects
< opotential
) {
978 objects
[--opotential
].dpo_object
= (vm_offset_t
) 0;
979 objects
[opotential
].dpo_size
= 0;
981 while (num_objects
< ppotential
) {
982 pagers
[--ppotential
] = MEMORY_OBJECT_NULL
;
985 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(oaddr
),
986 vm_map_round_page(oaddr
+ osize
), FALSE
);
987 assert(KERN_SUCCESS
== kr
);
988 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)oaddr
,
989 (vm_map_size_t
)osize
, TRUE
, &pcopy
);
990 assert(KERN_SUCCESS
== kr
);
992 *objectsp
= (default_pager_object_array_t
)objects
;
993 *ocountp
= num_objects
;
994 *portsp
= (mach_port_array_t
)pcopy
;
995 *pcountp
= num_objects
;
1001 default_pager_object_pages(
1002 default_pager_t default_pager
,
1003 mach_port_t memory_object
,
1004 default_pager_page_array_t
*pagesp
,
1005 mach_msg_type_number_t
*countp
)
1007 vm_offset_t addr
= 0; /* memory for page offsets */
1008 vm_size_t size
= 0; /* current memory size */
1010 default_pager_page_t
* pages
= 0;
1011 unsigned int potential
;
1012 unsigned int actual
;
1014 memory_object_t object
;
1016 if (default_pager
!= default_pager_object
)
1017 return KERN_INVALID_ARGUMENT
;
1019 object
= (memory_object_t
) memory_object
;
1026 queue_iterate(&vstruct_list
.vsl_queue
, entry
, vstruct_t
,
1029 if (vs_to_mem_obj(entry
) == object
) {
1037 /* did not find the object */
1039 kmem_free(ipc_kernel_map
, addr
, size
);
1041 return KERN_INVALID_ARGUMENT
;
1045 if (!VS_MAP_TRY_LOCK(entry
)) {
1046 /* oh well bad luck */
1051 assert_wait_timeout((event_t
)assert_wait_timeout
, THREAD_UNINT
, 1, 1000*NSEC_PER_USEC
);
1052 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1053 assert(wresult
== THREAD_TIMED_OUT
);
1057 actual
= ps_vstruct_allocated_pages(entry
, pages
, potential
);
1058 VS_MAP_UNLOCK(entry
);
1061 if (actual
<= potential
)
1064 /* allocate more memory */
1066 kmem_free(ipc_kernel_map
, addr
, size
);
1068 size
= round_page(actual
* sizeof * pages
);
1069 kr
= kmem_alloc(ipc_kernel_map
, &addr
, size
);
1070 if (KERN_SUCCESS
!= kr
)
1071 return KERN_RESOURCE_SHORTAGE
;
1073 pages
= (default_pager_page_t
*)addr
;
1074 potential
= size
/ sizeof * pages
;
1078 * Clear unused memory.
1080 while (actual
< potential
)
1081 pages
[--potential
].dpp_offset
= 0;
1083 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
1084 vm_map_round_page(addr
+ size
), FALSE
);
1085 assert(KERN_SUCCESS
== kr
);
1086 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
1087 (vm_map_size_t
)size
, TRUE
, ©
);
1088 assert(KERN_SUCCESS
== kr
);
1091 *pagesp
= (default_pager_page_array_t
)copy
;
1093 return KERN_SUCCESS
;