2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * Memory Object Management.
62 #include "default_pager_internal.h"
63 #include <default_pager/default_pager_object_server.h>
64 #include <mach/memory_object_default_server.h>
65 #include <mach/memory_object_control.h>
66 #include <mach/memory_object_types.h>
67 #include <mach/memory_object_server.h>
69 #include <mach/vm_map.h>
70 #include <vm/memory_object.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_protos.h>
75 /* forward declaration */
76 vstruct_t
vs_object_create(dp_size_t size
);
79 * List of all vstructs. A specific vstruct is
80 * found directly via its port, this list is
81 * only used for monitoring purposes by the
82 * default_pager_object* calls and by ps_delete
83 * when abstract memory objects must be scanned
84 * to remove any live storage on a segment which
87 struct vstruct_list_head vstruct_list
;
89 __private_extern__
void
94 queue_enter(&vstruct_list
.vsl_queue
, vs
, vstruct_t
, vs_links
);
95 vstruct_list
.vsl_count
++;
100 __private_extern__
void
104 queue_remove(&vstruct_list
.vsl_queue
, vs
, vstruct_t
, vs_links
);
105 vstruct_list
.vsl_count
--;
109 * We use the sequence numbers on requests to regulate
110 * our parallelism. In general, we allow multiple reads and writes
111 * to proceed in parallel, with the exception that reads must
112 * wait for previous writes to finish. (Because the kernel might
113 * generate a data-request for a page on the heels of a data-write
114 * for the same page, and we must avoid returning stale data.)
115 * terminate requests wait for proceeding reads and writes to finish.
118 static unsigned int default_pager_total
= 0; /* debugging */
119 static unsigned int default_pager_wait_seqno
= 0; /* debugging */
120 static unsigned int default_pager_wait_read
= 0; /* debugging */
121 static unsigned int default_pager_wait_write
= 0; /* debugging */
123 __private_extern__
void
128 ASSERT(vs
->vs_async_pending
>= 0);
129 while (vs
->vs_async_pending
> 0) {
130 vs
->vs_waiting_async
= TRUE
;
131 assert_wait(&vs
->vs_async_pending
, THREAD_UNINT
);
133 thread_block(THREAD_CONTINUE_NULL
);
136 ASSERT(vs
->vs_async_pending
== 0);
142 * Waits for correct sequence number. Leaves pager locked.
144 * JMM - Sequence numbers guarantee ordering of requests generated
145 * by a single thread if the receiver is multithreaded and
146 * the interfaces are asynchronous (i.e. sender can generate
147 * more than one request before the first is received in the
148 * pager). Normally, IPC would generate these number in that
149 * case. But we are trying to avoid using IPC for the in-kernel
150 * scenario. Since these are actually invoked synchronously
151 * anyway (in-kernel), we can just fake the sequence number
152 * generation here (thus avoiding the dependence on IPC).
154 __private_extern__
void
158 mach_port_seqno_t seqno
;
160 default_pager_total
++;
163 seqno
= vs
->vs_next_seqno
++;
165 while (vs
->vs_seqno
!= seqno
) {
166 default_pager_wait_seqno
++;
167 vs
->vs_waiting_seqno
= TRUE
;
168 assert_wait(&vs
->vs_seqno
, THREAD_UNINT
);
170 thread_block(THREAD_CONTINUE_NULL
);
176 * Increments sequence number and unlocks pager.
178 __private_extern__
void
179 vs_unlock(vstruct_t vs
)
182 if (vs
->vs_waiting_seqno
) {
183 vs
->vs_waiting_seqno
= FALSE
;
185 thread_wakeup(&vs
->vs_seqno
);
192 * Start a read - one more reader. Pager must be locked.
194 __private_extern__
void
202 * Wait for readers. Unlocks and relocks pager if wait needed.
204 __private_extern__
void
208 while (vs
->vs_readers
!= 0) {
209 default_pager_wait_read
++;
210 vs
->vs_waiting_read
= TRUE
;
211 assert_wait(&vs
->vs_readers
, THREAD_UNINT
);
213 thread_block(THREAD_CONTINUE_NULL
);
219 * Finish a read. Pager is unlocked and returns unlocked.
221 __private_extern__
void
226 if (--vs
->vs_readers
== 0 && vs
->vs_waiting_read
) {
227 vs
->vs_waiting_read
= FALSE
;
229 thread_wakeup(&vs
->vs_readers
);
236 * Start a write - one more writer. Pager must be locked.
238 __private_extern__
void
246 * Wait for writers. Unlocks and relocks pager if wait needed.
248 __private_extern__
void
252 while (vs
->vs_writers
!= 0) {
253 default_pager_wait_write
++;
254 vs
->vs_waiting_write
= TRUE
;
255 assert_wait(&vs
->vs_writers
, THREAD_UNINT
);
257 thread_block(THREAD_CONTINUE_NULL
);
263 /* This is to be used for the transfer from segment code ONLY */
264 /* The transfer code holds off vs destruction by keeping the */
265 /* vs_async_wait count non-zero. It will not ocnflict with */
266 /* other writers on an async basis because it only writes on */
267 /* a cluster basis into fresh (as of sync time) cluster locations */
269 __private_extern__
void
270 vs_wait_for_sync_writers(
273 while (vs
->vs_writers
!= 0) {
274 default_pager_wait_write
++;
275 vs
->vs_waiting_write
= TRUE
;
276 assert_wait(&vs
->vs_writers
, THREAD_UNINT
);
278 thread_block(THREAD_CONTINUE_NULL
);
285 * Finish a write. Pager is unlocked and returns unlocked.
287 __private_extern__
void
292 if (--vs
->vs_writers
== 0 && vs
->vs_waiting_write
) {
293 vs
->vs_waiting_write
= FALSE
;
295 thread_wakeup(&vs
->vs_writers
);
300 #endif /* PARALLEL */
309 * Allocate a vstruct. If there are any problems, then report them
312 vs
= ps_vstruct_create(size
);
313 if (vs
== VSTRUCT_NULL
) {
314 dprintf(("vs_object_create: unable to allocate %s\n",
315 "-- either run swapon command or reboot"));
323 void default_pager_add(vstruct_t
, boolean_t
); /* forward */
330 memory_object_t mem_obj
= vs
->vs_mem_obj
;
332 mach_port_mscount_t sync
;
333 mach_port_t previous
;
335 static char here
[] = "default_pager_add";
338 * The port currently has a make-send count of zero,
339 * because either we just created the port or we just
340 * received the port in a memory_object_create request.
344 /* possibly generate an immediate no-senders notification */
346 pset
= default_pager_internal_set
;
348 /* delay notification till send right is created */
350 pset
= default_pager_external_set
;
353 ip_lock(mem_obj
); /* unlocked in nsrequest below */
354 ipc_port_make_sonce_locked(mem_obj
);
355 ipc_port_nsrequest(mem_obj
, sync
, mem_obj
, &previous
);
360 const struct memory_object_pager_ops default_pager_ops
= {
361 dp_memory_object_reference
,
362 dp_memory_object_deallocate
,
363 dp_memory_object_init
,
364 dp_memory_object_terminate
,
365 dp_memory_object_data_request
,
366 dp_memory_object_data_return
,
367 dp_memory_object_data_initialize
,
368 dp_memory_object_data_unlock
,
369 dp_memory_object_synchronize
,
370 dp_memory_object_map
,
371 dp_memory_object_last_unmap
,
372 dp_memory_object_data_reclaim
,
377 dp_memory_object_init(
378 memory_object_t mem_obj
,
379 memory_object_control_t control
,
380 __unused memory_object_cluster_size_t pager_page_size
)
384 assert(pager_page_size
== vm_page_size
);
386 memory_object_control_reference(control
);
388 vs_lookup(mem_obj
, vs
);
391 if (vs
->vs_control
!= MEMORY_OBJECT_CONTROL_NULL
)
392 Panic("bad request");
394 vs
->vs_control
= control
;
401 dp_memory_object_synchronize(
402 memory_object_t mem_obj
,
403 memory_object_offset_t offset
,
404 memory_object_size_t length
,
405 __unused vm_sync_t flags
)
409 vs_lookup(mem_obj
, vs
);
413 memory_object_synchronize_completed(vs
->vs_control
, offset
, length
);
419 dp_memory_object_map(
420 __unused memory_object_t mem_obj
,
421 __unused vm_prot_t prot
)
423 panic("dp_memory_object_map");
428 dp_memory_object_last_unmap(
429 __unused memory_object_t mem_obj
)
431 panic("dp_memory_object_last_unmap");
436 dp_memory_object_data_reclaim(
437 memory_object_t mem_obj
,
438 boolean_t reclaim_backing_store
)
442 vs_lookup(mem_obj
, vs
);
446 if (!vs
->vs_xfer_pending
) {
450 vs
->vs_xfer_pending
= TRUE
;
453 ps_vstruct_reclaim(vs
, TRUE
, reclaim_backing_store
);
456 vs
->vs_xfer_pending
= FALSE
;
463 dp_memory_object_terminate(
464 memory_object_t mem_obj
)
466 memory_object_control_t control
;
470 * control port is a receive right, not a send right.
473 vs_lookup(mem_obj
, vs
);
477 * Wait for read and write requests to terminate.
480 vs_wait_for_readers(vs
);
481 vs_wait_for_writers(vs
);
484 * After memory_object_terminate both memory_object_init
485 * and a no-senders notification are possible, so we need
486 * to clean up our reference to the memory_object_control
487 * to prepare for a new init.
490 control
= vs
->vs_control
;
491 vs
->vs_control
= MEMORY_OBJECT_CONTROL_NULL
;
493 /* a bit of special case ugliness here. Wakeup any waiting reads */
494 /* these data requests had to be removed from the seqno traffic */
495 /* based on a performance bottleneck with large memory objects */
496 /* the problem will right itself with the new component based */
497 /* synchronous interface. The new async will be able to return */
498 /* failure during its sync phase. In the mean time ... */
500 thread_wakeup(&vs
->vs_writers
);
501 thread_wakeup(&vs
->vs_async_pending
);
506 * Now we deallocate our reference on the control.
508 memory_object_control_deallocate(control
);
513 dp_memory_object_reference(
514 memory_object_t mem_obj
)
518 vs_lookup_safe(mem_obj
, vs
);
519 if (vs
== VSTRUCT_NULL
)
523 assert(vs
->vs_references
> 0);
529 dp_memory_object_deallocate(
530 memory_object_t mem_obj
)
533 mach_port_seqno_t seqno
;
536 * Because we don't give out multiple first references
537 * for a memory object, there can't be a race
538 * between getting a deallocate call and creating
539 * a new reference for the object.
542 vs_lookup_safe(mem_obj
, vs
);
543 if (vs
== VSTRUCT_NULL
)
547 if (--vs
->vs_references
> 0) {
552 seqno
= vs
->vs_next_seqno
++;
553 while (vs
->vs_seqno
!= seqno
) {
554 default_pager_wait_seqno
++;
555 vs
->vs_waiting_seqno
= TRUE
;
556 assert_wait(&vs
->vs_seqno
, THREAD_UNINT
);
558 thread_block(THREAD_CONTINUE_NULL
);
562 vs_async_wait(vs
); /* wait for pending async IO */
564 /* do not delete the vs structure until the referencing pointers */
565 /* in the vstruct list have been expunged */
567 /* get VSL_LOCK out of order by using TRY mechanism */
568 while(!VSL_LOCK_TRY()) {
573 vs_async_wait(vs
); /* wait for pending async IO */
578 * We shouldn't get a deallocation call
579 * when the kernel has the object cached.
581 if (vs
->vs_control
!= MEMORY_OBJECT_CONTROL_NULL
)
582 Panic("bad request");
585 * Unlock the pager (though there should be no one
590 /* Lock out paging segment removal for the duration of this */
591 /* call. We are vulnerable to losing a paging segment we rely */
592 /* on as soon as we remove ourselves from the VSL and unlock */
594 /* Keep our thread from blocking on attempt to trigger backing */
596 backing_store_release_trigger_disable
+= 1;
599 * Remove the memory object port association, and then
600 * the destroy the port itself. We must remove the object
601 * from the port list before deallocating the pager,
602 * because of default_pager_objects.
604 vstruct_list_delete(vs
);
607 ps_vstruct_dealloc(vs
);
610 backing_store_release_trigger_disable
-= 1;
611 if(backing_store_release_trigger_disable
== 0) {
612 thread_wakeup((event_t
)&backing_store_release_trigger_disable
);
618 dp_memory_object_data_request(
619 memory_object_t mem_obj
,
620 memory_object_offset_t offset
,
621 memory_object_cluster_size_t length
,
622 __unused vm_prot_t protection_required
,
623 memory_object_fault_info_t fault_info
)
626 kern_return_t kr
= KERN_SUCCESS
;
628 GSTAT(global_stats
.gs_pagein_calls
++);
631 /* CDY at this moment vs_lookup panics when presented with the wrong */
632 /* port. As we are expanding this pager to support user interfaces */
633 /* this should be changed to return kern_failure */
634 vs_lookup(mem_obj
, vs
);
637 /* We are going to relax the strict sequencing here for performance */
638 /* reasons. We can do this because we know that the read and */
639 /* write threads are different and we rely on synchronization */
640 /* of read and write requests at the cache memory_object level */
641 /* break out wait_for_writers, all of this goes away when */
642 /* we get real control of seqno with the new component interface */
644 if (vs
->vs_writers
!= 0) {
645 /* you can't hold on to the seqno and go */
646 /* to sleep like that */
647 vs_unlock(vs
); /* bump internal count of seqno */
649 while (vs
->vs_writers
!= 0) {
650 default_pager_wait_write
++;
651 vs
->vs_waiting_write
= TRUE
;
652 assert_wait(&vs
->vs_writers
, THREAD_UNINT
);
654 thread_block(THREAD_CONTINUE_NULL
);
658 if(vs
->vs_control
== MEMORY_OBJECT_CONTROL_NULL
) {
670 * Request must be on a page boundary and a multiple of pages.
672 if ((offset
& vm_page_mask
) != 0 || (length
& vm_page_mask
) != 0)
673 Panic("bad alignment");
675 assert((dp_offset_t
) offset
== offset
);
676 kr
= pvs_cluster_read(vs
, (dp_offset_t
) offset
, length
, fault_info
);
678 /* Regular data requests have a non-zero length and always return KERN_SUCCESS.
679 Their actual success is determined by the fact that they provide a page or not,
680 i.e whether we call upl_commit() or upl_abort(). A length of 0 means that the
681 caller is only asking if the pager has a copy of that page or not. The answer to
682 that question is provided by the return value. KERN_SUCCESS means that the pager
695 * memory_object_data_initialize: check whether we already have each page, and
696 * write it if we do not. The implementation is far from optimized, and
697 * also assumes that the default_pager is single-threaded.
699 /* It is questionable whether or not a pager should decide what is relevant */
700 /* and what is not in data sent from the kernel. Data initialize has been */
701 /* changed to copy back all data sent to it in preparation for its eventual */
702 /* merge with data return. It is the kernel that should decide what pages */
703 /* to write back. As of the writing of this note, this is indeed the case */
704 /* the kernel writes back one page at a time through this interface */
707 dp_memory_object_data_initialize(
708 memory_object_t mem_obj
,
709 memory_object_offset_t offset
,
710 memory_object_cluster_size_t size
)
714 DP_DEBUG(DEBUG_MO_EXTERNAL
,
715 ("mem_obj=0x%x,offset=0x%x,cnt=0x%x\n",
716 (int)mem_obj
, (int)offset
, (int)size
));
717 GSTAT(global_stats
.gs_pages_init
+= atop_32(size
));
719 vs_lookup(mem_obj
, vs
);
725 * Write the data via clustered writes. vs_cluster_write will
726 * loop if the address range specified crosses cluster
729 assert((upl_offset_t
) offset
== offset
);
730 vs_cluster_write(vs
, 0, (upl_offset_t
)offset
, size
, FALSE
, 0);
738 dp_memory_object_data_unlock(
739 __unused memory_object_t mem_obj
,
740 __unused memory_object_offset_t offset
,
741 __unused memory_object_size_t size
,
742 __unused vm_prot_t desired_access
)
744 Panic("dp_memory_object_data_unlock: illegal");
751 dp_memory_object_data_return(
752 memory_object_t mem_obj
,
753 memory_object_offset_t offset
,
754 memory_object_cluster_size_t size
,
755 __unused memory_object_offset_t
*resid_offset
,
756 __unused
int *io_error
,
757 __unused boolean_t dirty
,
758 __unused boolean_t kernel_copy
,
759 __unused
int upl_flags
)
763 DP_DEBUG(DEBUG_MO_EXTERNAL
,
764 ("mem_obj=0x%x,offset=0x%x,size=0x%x\n",
765 (int)mem_obj
, (int)offset
, (int)size
));
766 GSTAT(global_stats
.gs_pageout_calls
++);
768 /* This routine is called by the pageout thread. The pageout thread */
769 /* cannot be blocked by read activities unless the read activities */
770 /* Therefore the grant of vs lock must be done on a try versus a */
771 /* blocking basis. The code below relies on the fact that the */
772 /* interface is synchronous. Should this interface be again async */
773 /* for some type of pager in the future the pages will have to be */
774 /* returned through a separate, asynchronous path. */
776 vs_lookup(mem_obj
, vs
);
778 default_pager_total
++;
779 if(!VS_TRY_LOCK(vs
)) {
780 /* the call below will not be done by caller when we have */
781 /* a synchronous interface */
782 /* return KERN_LOCK_OWNED; */
784 unsigned int page_list_count
= 0;
785 memory_object_super_upl_request(vs
->vs_control
,
786 (memory_object_offset_t
)offset
,
788 &upl
, NULL
, &page_list_count
,
789 UPL_NOBLOCK
| UPL_CLEAN_IN_PLACE
790 | UPL_NO_SYNC
| UPL_COPYOUT_FROM
);
796 if ((vs
->vs_seqno
!= vs
->vs_next_seqno
++)
798 || (vs
->vs_xfer_pending
)) {
800 unsigned int page_list_count
= 0;
805 /* the call below will not be done by caller when we have */
806 /* a synchronous interface */
807 /* return KERN_LOCK_OWNED; */
808 memory_object_super_upl_request(vs
->vs_control
,
809 (memory_object_offset_t
)offset
,
811 &upl
, NULL
, &page_list_count
,
812 UPL_NOBLOCK
| UPL_CLEAN_IN_PLACE
813 | UPL_NO_SYNC
| UPL_COPYOUT_FROM
);
819 if ((size
% vm_page_size
) != 0)
820 Panic("bad alignment");
825 vs
->vs_async_pending
+= 1; /* protect from backing store contraction */
829 * Write the data via clustered writes. vs_cluster_write will
830 * loop if the address range specified crosses cluster
833 assert((upl_offset_t
) offset
== offset
);
834 vs_cluster_write(vs
, 0, (upl_offset_t
) offset
, size
, FALSE
, 0);
838 /* temporary, need a finer lock based on cluster */
841 vs
->vs_async_pending
-= 1; /* release vs_async_wait */
842 if (vs
->vs_async_pending
== 0 && vs
->vs_waiting_async
) {
843 vs
->vs_waiting_async
= FALSE
;
845 thread_wakeup(&vs
->vs_async_pending
);
855 * Routine: default_pager_memory_object_create
857 * Handle requests for memory objects from the
860 * Because we only give out the default memory
861 * manager port to the kernel, we don't have to
862 * be so paranoid about the contents.
865 default_pager_memory_object_create(
866 __unused memory_object_default_t dmm
,
868 memory_object_t
*new_mem_obj
)
872 assert(dmm
== default_pager_object
);
874 if ((dp_size_t
) new_size
!= new_size
) {
875 /* 32-bit overflow */
876 return KERN_INVALID_ARGUMENT
;
879 vs
= vs_object_create((dp_size_t
) new_size
);
880 if (vs
== VSTRUCT_NULL
)
881 return KERN_RESOURCE_SHORTAGE
;
883 vs
->vs_next_seqno
= 0;
886 * Set up associations between this memory object
887 * and this default_pager structure
890 vs
->vs_pager_ops
= &default_pager_ops
;
891 vs
->vs_pager_header
.io_bits
= IKOT_MEMORY_OBJECT
;
894 * After this, other threads might receive requests
895 * for this memory object or find it in the port list.
898 vstruct_list_insert(vs
);
899 *new_mem_obj
= vs_to_mem_obj(vs
);
904 * Create an external object.
907 default_pager_object_create(
908 default_pager_t default_pager
,
910 memory_object_t
*mem_objp
)
914 if (default_pager
!= default_pager_object
)
915 return KERN_INVALID_ARGUMENT
;
917 if ((dp_size_t
) size
!= size
) {
918 /* 32-bit overflow */
919 return KERN_INVALID_ARGUMENT
;
922 vs
= vs_object_create((dp_size_t
) size
);
923 if (vs
== VSTRUCT_NULL
)
924 return KERN_RESOURCE_SHORTAGE
;
927 * Set up associations between the default pager
928 * and this vstruct structure
930 vs
->vs_pager_ops
= &default_pager_ops
;
931 vstruct_list_insert(vs
);
932 *mem_objp
= vs_to_mem_obj(vs
);
937 default_pager_objects(
938 default_pager_t default_pager
,
939 default_pager_object_array_t
*objectsp
,
940 mach_msg_type_number_t
*ocountp
,
941 mach_port_array_t
*portsp
,
942 mach_msg_type_number_t
*pcountp
)
944 vm_offset_t oaddr
= 0; /* memory for objects */
945 vm_size_t osize
= 0; /* current size */
946 default_pager_object_t
* objects
;
947 unsigned int opotential
= 0;
949 vm_map_copy_t pcopy
= 0; /* copy handle for pagers */
950 vm_size_t psize
= 0; /* current size */
951 memory_object_t
* pagers
;
952 unsigned int ppotential
= 0;
955 unsigned int num_objects
;
959 if (default_pager
!= default_pager_object
)
960 return KERN_INVALID_ARGUMENT
;
963 * We will send no more than this many
965 actual
= vstruct_list
.vsl_count
;
968 * Out out-of-line port arrays are simply kalloc'ed.
970 psize
= round_page(actual
* sizeof (*pagers
));
971 ppotential
= (unsigned int) (psize
/ sizeof (*pagers
));
972 pagers
= (memory_object_t
*)kalloc(psize
);
974 return KERN_RESOURCE_SHORTAGE
;
977 * returned out of line data must be allocated out
978 * the ipc_kernel_map, wired down, filled in, and
979 * then "copied in" as if it had been sent by a
982 osize
= round_page(actual
* sizeof (*objects
));
983 opotential
= (unsigned int) (osize
/ sizeof (*objects
));
984 kr
= kmem_alloc(ipc_kernel_map
, &oaddr
, osize
);
985 if (KERN_SUCCESS
!= kr
) {
986 kfree(pagers
, psize
);
987 return KERN_RESOURCE_SHORTAGE
;
989 objects
= (default_pager_object_t
*)oaddr
;
999 queue_iterate(&vstruct_list
.vsl_queue
, entry
, vstruct_t
, vs_links
) {
1001 memory_object_t pager
;
1004 if ((num_objects
>= opotential
) ||
1005 (num_objects
>= ppotential
)) {
1008 * This should be rare. In any case,
1009 * we will only miss recent objects,
1010 * because they are added at the end.
1016 * Avoid interfering with normal operations
1018 if (!VS_MAP_TRY_LOCK(entry
))
1020 size
= ps_vstruct_allocated_size(entry
);
1021 VS_MAP_UNLOCK(entry
);
1026 * We need a reference for our caller. Adding this
1027 * reference through the linked list could race with
1028 * destruction of the object. If we find the object
1029 * has no references, just give up on it.
1032 if (entry
->vs_references
== 0) {
1036 pager
= vs_to_mem_obj(entry
);
1037 dp_memory_object_reference(pager
);
1040 /* the arrays are wired, so no deadlock worries */
1042 objects
[num_objects
].dpo_object
= (vm_offset_t
) entry
;
1043 objects
[num_objects
].dpo_size
= size
;
1044 pagers
[num_objects
++] = pager
;
1049 * Do not return garbage
1051 objects
[num_objects
].dpo_object
= (vm_offset_t
) 0;
1052 objects
[num_objects
].dpo_size
= 0;
1053 pagers
[num_objects
++] = MEMORY_OBJECT_NULL
;
1059 /* clear out any excess allocation */
1060 while (num_objects
< opotential
) {
1061 objects
[--opotential
].dpo_object
= (vm_offset_t
) 0;
1062 objects
[opotential
].dpo_size
= 0;
1064 while (num_objects
< ppotential
) {
1065 pagers
[--ppotential
] = MEMORY_OBJECT_NULL
;
1068 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(oaddr
),
1069 vm_map_round_page(oaddr
+ osize
), FALSE
);
1070 assert(KERN_SUCCESS
== kr
);
1071 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)oaddr
,
1072 (vm_map_size_t
)osize
, TRUE
, &pcopy
);
1073 assert(KERN_SUCCESS
== kr
);
1075 *objectsp
= (default_pager_object_array_t
)objects
;
1076 *ocountp
= num_objects
;
1077 *portsp
= (mach_port_array_t
)pcopy
;
1078 *pcountp
= num_objects
;
1080 return KERN_SUCCESS
;
1084 default_pager_object_pages(
1085 default_pager_t default_pager
,
1086 mach_port_t memory_object
,
1087 default_pager_page_array_t
*pagesp
,
1088 mach_msg_type_number_t
*countp
)
1090 vm_offset_t addr
= 0; /* memory for page offsets */
1091 vm_size_t size
= 0; /* current memory size */
1093 default_pager_page_t
* pages
= 0;
1094 unsigned int potential
;
1095 unsigned int actual
;
1097 memory_object_t object
;
1099 if (default_pager
!= default_pager_object
)
1100 return KERN_INVALID_ARGUMENT
;
1102 object
= (memory_object_t
) memory_object
;
1109 queue_iterate(&vstruct_list
.vsl_queue
, entry
, vstruct_t
,
1112 if (vs_to_mem_obj(entry
) == object
) {
1120 /* did not find the object */
1122 kmem_free(ipc_kernel_map
, addr
, size
);
1124 return KERN_INVALID_ARGUMENT
;
1128 if (!VS_MAP_TRY_LOCK(entry
)) {
1129 /* oh well bad luck */
1134 assert_wait_timeout((event_t
)assert_wait_timeout
, THREAD_UNINT
, 1, 1000*NSEC_PER_USEC
);
1135 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1136 assert(wresult
== THREAD_TIMED_OUT
);
1140 actual
= ps_vstruct_allocated_pages(entry
, pages
, potential
);
1141 VS_MAP_UNLOCK(entry
);
1144 if (actual
<= potential
)
1147 /* allocate more memory */
1149 kmem_free(ipc_kernel_map
, addr
, size
);
1151 size
= round_page(actual
* sizeof (*pages
));
1152 kr
= kmem_alloc(ipc_kernel_map
, &addr
, size
);
1153 if (KERN_SUCCESS
!= kr
)
1154 return KERN_RESOURCE_SHORTAGE
;
1156 pages
= (default_pager_page_t
*)addr
;
1157 potential
= (unsigned int) (size
/ sizeof (*pages
));
1161 * Clear unused memory.
1163 while (actual
< potential
)
1164 pages
[--potential
].dpp_offset
= 0;
1166 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
1167 vm_map_round_page(addr
+ size
), FALSE
);
1168 assert(KERN_SUCCESS
== kr
);
1169 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
1170 (vm_map_size_t
)size
, TRUE
, ©
);
1171 assert(KERN_SUCCESS
== kr
);
1174 *pagesp
= (default_pager_page_array_t
)copy
;
1176 return KERN_SUCCESS
;