2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * Memory Object Management.
59 #include "default_pager_internal.h"
60 #include <mach/memory_object_types.h>
61 #include <mach/memory_object_server.h>
62 #include <vm/memory_object.h>
63 #include <vm/vm_pageout.h>
67 * List of all vstructs. A specific vstruct is
68 * found directly via its port, this list is
69 * only used for monitoring purposes by the
70 * default_pager_object* calls and by ps_delete
71 * when abstract memory objects must be scanned
72 * to remove any live storage on a segment which
75 struct vstruct_list_head vstruct_list
;
77 __private_extern__
void
82 queue_enter(&vstruct_list
.vsl_queue
, vs
, vstruct_t
, vs_links
);
83 vstruct_list
.vsl_count
++;
88 __private_extern__
void
92 queue_remove(&vstruct_list
.vsl_queue
, vs
, vstruct_t
, vs_links
);
93 vstruct_list
.vsl_count
--;
97 * We use the sequence numbers on requests to regulate
98 * our parallelism. In general, we allow multiple reads and writes
99 * to proceed in parallel, with the exception that reads must
100 * wait for previous writes to finish. (Because the kernel might
101 * generate a data-request for a page on the heels of a data-write
102 * for the same page, and we must avoid returning stale data.)
103 * terminate requests wait for proceeding reads and writes to finish.
106 static unsigned int default_pager_total
= 0; /* debugging */
107 static unsigned int default_pager_wait_seqno
= 0; /* debugging */
108 static unsigned int default_pager_wait_read
= 0; /* debugging */
109 static unsigned int default_pager_wait_write
= 0; /* debugging */
110 static unsigned int default_pager_wait_refs
= 0; /* debugging */
112 __private_extern__
void
117 ASSERT(vs
->vs_async_pending
>= 0);
118 while (vs
->vs_async_pending
> 0) {
119 vs
->vs_waiting_async
= TRUE
;
120 assert_wait(&vs
->vs_async_pending
, THREAD_UNINT
);
122 thread_block(THREAD_CONTINUE_NULL
);
125 ASSERT(vs
->vs_async_pending
== 0);
131 * Waits for correct sequence number. Leaves pager locked.
133 * JMM - Sequence numbers guarantee ordering of requests generated
134 * by a single thread if the receiver is multithreaded and
135 * the interfaces are asynchronous (i.e. sender can generate
136 * more than one request before the first is received in the
137 * pager). Normally, IPC would generate these number in that
138 * case. But we are trying to avoid using IPC for the in-kernel
139 * scenario. Since these are actually invoked synchronously
140 * anyway (in-kernel), we can just fake the sequence number
141 * generation here (thus avoiding the dependence on IPC).
143 __private_extern__
void
147 mach_port_seqno_t seqno
;
149 default_pager_total
++;
152 seqno
= vs
->vs_next_seqno
++;
154 while (vs
->vs_seqno
!= seqno
) {
155 default_pager_wait_seqno
++;
156 vs
->vs_waiting_seqno
= TRUE
;
157 assert_wait(&vs
->vs_seqno
, THREAD_UNINT
);
159 thread_block(THREAD_CONTINUE_NULL
);
165 * Increments sequence number and unlocks pager.
167 __private_extern__
void
168 vs_unlock(vstruct_t vs
)
171 if (vs
->vs_waiting_seqno
) {
172 vs
->vs_waiting_seqno
= FALSE
;
174 thread_wakeup(&vs
->vs_seqno
);
181 * Start a read - one more reader. Pager must be locked.
183 __private_extern__
void
191 * Wait for readers. Unlocks and relocks pager if wait needed.
193 __private_extern__
void
197 while (vs
->vs_readers
!= 0) {
198 default_pager_wait_read
++;
199 vs
->vs_waiting_read
= TRUE
;
200 assert_wait(&vs
->vs_readers
, THREAD_UNINT
);
202 thread_block(THREAD_CONTINUE_NULL
);
208 * Finish a read. Pager is unlocked and returns unlocked.
210 __private_extern__
void
215 if (--vs
->vs_readers
== 0 && vs
->vs_waiting_read
) {
216 vs
->vs_waiting_read
= FALSE
;
218 thread_wakeup(&vs
->vs_readers
);
225 * Start a write - one more writer. Pager must be locked.
227 __private_extern__
void
235 * Wait for writers. Unlocks and relocks pager if wait needed.
237 __private_extern__
void
241 while (vs
->vs_writers
!= 0) {
242 default_pager_wait_write
++;
243 vs
->vs_waiting_write
= TRUE
;
244 assert_wait(&vs
->vs_writers
, THREAD_UNINT
);
246 thread_block(THREAD_CONTINUE_NULL
);
252 /* This is to be used for the transfer from segment code ONLY */
253 /* The transfer code holds off vs destruction by keeping the */
254 /* vs_async_wait count non-zero. It will not ocnflict with */
255 /* other writers on an async basis because it only writes on */
256 /* a cluster basis into fresh (as of sync time) cluster locations */
258 __private_extern__
void
259 vs_wait_for_sync_writers(
262 while (vs
->vs_writers
!= 0) {
263 default_pager_wait_write
++;
264 vs
->vs_waiting_write
= TRUE
;
265 assert_wait(&vs
->vs_writers
, THREAD_UNINT
);
267 thread_block(THREAD_CONTINUE_NULL
);
274 * Finish a write. Pager is unlocked and returns unlocked.
276 __private_extern__
void
281 if (--vs
->vs_writers
== 0 && vs
->vs_waiting_write
) {
282 vs
->vs_waiting_write
= FALSE
;
284 thread_wakeup(&vs
->vs_writers
);
289 #endif /* PARALLEL */
298 * Allocate a vstruct. If there are any problems, then report them
301 vs
= ps_vstruct_create(size
);
302 if (vs
== VSTRUCT_NULL
) {
303 dprintf(("vs_object_create: unable to allocate %s\n",
304 "-- either run swapon command or reboot"));
312 void default_pager_add(vstruct_t
, boolean_t
); /* forward */
319 memory_object_t mem_obj
= vs
->vs_mem_obj
;
321 mach_port_mscount_t sync
;
322 mach_port_t previous
;
324 static char here
[] = "default_pager_add";
327 * The port currently has a make-send count of zero,
328 * because either we just created the port or we just
329 * received the port in a memory_object_create request.
333 /* possibly generate an immediate no-senders notification */
335 pset
= default_pager_internal_set
;
337 /* delay notification till send right is created */
339 pset
= default_pager_external_set
;
342 ipc_port_make_sonce(mem_obj
);
343 ip_lock(mem_obj
); /* unlocked in nsrequest below */
344 ipc_port_nsrequest(mem_obj
, sync
, mem_obj
, &previous
);
350 dp_memory_object_init(
351 memory_object_t mem_obj
,
352 memory_object_control_t control
,
353 vm_size_t pager_page_size
)
357 assert(pager_page_size
== vm_page_size
);
359 memory_object_control_reference(control
);
361 vs_lookup(mem_obj
, vs
);
364 if (vs
->vs_control
!= MEMORY_OBJECT_CONTROL_NULL
)
365 Panic("bad request");
367 vs
->vs_control
= control
;
374 dp_memory_object_synchronize(
375 memory_object_t mem_obj
,
376 memory_object_offset_t offset
,
382 vs_lookup(mem_obj
, vs
);
386 memory_object_synchronize_completed(vs
->vs_control
, offset
, length
);
392 dp_memory_object_unmap(
393 memory_object_t mem_obj
)
395 panic("dp_memory_object_unmap");
401 dp_memory_object_terminate(
402 memory_object_t mem_obj
)
404 memory_object_control_t control
;
409 * control port is a receive right, not a send right.
412 vs_lookup(mem_obj
, vs
);
416 * Wait for read and write requests to terminate.
419 vs_wait_for_readers(vs
);
420 vs_wait_for_writers(vs
);
423 * After memory_object_terminate both memory_object_init
424 * and a no-senders notification are possible, so we need
425 * to clean up our reference to the memory_object_control
426 * to prepare for a new init.
429 control
= vs
->vs_control
;
430 vs
->vs_control
= MEMORY_OBJECT_CONTROL_NULL
;
432 /* a bit of special case ugliness here. Wakeup any waiting reads */
433 /* these data requests had to be removed from the seqno traffic */
434 /* based on a performance bottleneck with large memory objects */
435 /* the problem will right itself with the new component based */
436 /* synchronous interface. The new async will be able to return */
437 /* failure during its sync phase. In the mean time ... */
439 thread_wakeup(&vs
->vs_writers
);
440 thread_wakeup(&vs
->vs_async_pending
);
445 * Now we deallocate our reference on the control.
447 memory_object_control_deallocate(control
);
452 dp_memory_object_reference(
453 memory_object_t mem_obj
)
457 vs_lookup_safe(mem_obj
, vs
);
458 if (vs
== VSTRUCT_NULL
)
462 assert(vs
->vs_references
> 0);
467 extern ipc_port_t max_pages_trigger_port
;
468 extern int dp_pages_free
;
469 extern int maximum_pages_free
;
471 dp_memory_object_deallocate(
472 memory_object_t mem_obj
)
475 mach_port_seqno_t seqno
;
479 * Because we don't give out multiple first references
480 * for a memory object, there can't be a race
481 * between getting a deallocate call and creating
482 * a new reference for the object.
485 vs_lookup_safe(mem_obj
, vs
);
486 if (vs
== VSTRUCT_NULL
)
490 if (--vs
->vs_references
> 0) {
495 seqno
= vs
->vs_next_seqno
++;
496 while (vs
->vs_seqno
!= seqno
) {
497 default_pager_wait_seqno
++;
498 vs
->vs_waiting_seqno
= TRUE
;
499 assert_wait(&vs
->vs_seqno
, THREAD_UNINT
);
501 thread_block(THREAD_CONTINUE_NULL
);
505 vs_async_wait(vs
); /* wait for pending async IO */
507 /* do not delete the vs structure until the referencing pointers */
508 /* in the vstruct list have been expunged */
510 /* get VSL_LOCK out of order by using TRY mechanism */
511 while(!VSL_LOCK_TRY()) {
516 vs_async_wait(vs
); /* wait for pending async IO */
521 * We shouldn't get a deallocation call
522 * when the kernel has the object cached.
524 if (vs
->vs_control
!= MEMORY_OBJECT_CONTROL_NULL
)
525 Panic("bad request");
528 * Unlock the pager (though there should be no one
533 /* Lock out paging segment removal for the duration of this */
534 /* call. We are vulnerable to losing a paging segment we rely */
535 /* on as soon as we remove ourselves from the VSL and unlock */
537 /* Keep our thread from blocking on attempt to trigger backing */
539 backing_store_release_trigger_disable
+= 1;
542 * Remove the memory object port association, and then
543 * the destroy the port itself. We must remove the object
544 * from the port list before deallocating the pager,
545 * because of default_pager_objects.
547 vstruct_list_delete(vs
);
550 ps_vstruct_dealloc(vs
);
553 backing_store_release_trigger_disable
-= 1;
554 if(backing_store_release_trigger_disable
== 0) {
555 thread_wakeup((event_t
)&backing_store_release_trigger_disable
);
560 if(max_pages_trigger_port
561 && (backing_store_release_trigger_disable
== 0)
562 && (dp_pages_free
> maximum_pages_free
)) {
563 trigger
= max_pages_trigger_port
;
564 max_pages_trigger_port
= NULL
;
569 if (trigger
!= IP_NULL
) {
570 default_pager_space_alert(trigger
, LO_WAT_ALERT
);
571 ipc_port_release_send(trigger
);
577 dp_memory_object_data_request(
578 memory_object_t mem_obj
,
579 memory_object_offset_t offset
,
581 vm_prot_t protection_required
)
585 GSTAT(global_stats
.gs_pagein_calls
++);
588 /* CDY at this moment vs_lookup panics when presented with the wrong */
589 /* port. As we are expanding this pager to support user interfaces */
590 /* this should be changed to return kern_failure */
591 vs_lookup(mem_obj
, vs
);
594 /* We are going to relax the strict sequencing here for performance */
595 /* reasons. We can do this because we know that the read and */
596 /* write threads are different and we rely on synchronization */
597 /* of read and write requests at the cache memory_object level */
598 /* break out wait_for_writers, all of this goes away when */
599 /* we get real control of seqno with the new component interface */
601 if (vs
->vs_writers
!= 0) {
602 /* you can't hold on to the seqno and go */
603 /* to sleep like that */
604 vs_unlock(vs
); /* bump internal count of seqno */
606 while (vs
->vs_writers
!= 0) {
607 default_pager_wait_write
++;
608 vs
->vs_waiting_write
= TRUE
;
609 assert_wait(&vs
->vs_writers
, THREAD_UNINT
);
611 thread_block(THREAD_CONTINUE_NULL
);
615 if(vs
->vs_control
== MEMORY_OBJECT_CONTROL_NULL
) {
627 * Request must be on a page boundary and a multiple of pages.
629 if ((offset
& vm_page_mask
) != 0 || (length
& vm_page_mask
) != 0)
630 Panic("bad alignment");
632 pvs_cluster_read(vs
, (vm_offset_t
)offset
, length
);
640 * memory_object_data_initialize: check whether we already have each page, and
641 * write it if we do not. The implementation is far from optimized, and
642 * also assumes that the default_pager is single-threaded.
644 /* It is questionable whether or not a pager should decide what is relevant */
645 /* and what is not in data sent from the kernel. Data initialize has been */
646 /* changed to copy back all data sent to it in preparation for its eventual */
647 /* merge with data return. It is the kernel that should decide what pages */
648 /* to write back. As of the writing of this note, this is indeed the case */
649 /* the kernel writes back one page at a time through this interface */
652 dp_memory_object_data_initialize(
653 memory_object_t mem_obj
,
654 memory_object_offset_t offset
,
659 DEBUG(DEBUG_MO_EXTERNAL
,
660 ("mem_obj=0x%x,offset=0x%x,cnt=0x%x\n",
661 (int)mem_obj
, (int)offset
, (int)size
));
662 GSTAT(global_stats
.gs_pages_init
+= atop(size
));
664 vs_lookup(mem_obj
, vs
);
670 * Write the data via clustered writes. vs_cluster_write will
671 * loop if the address range specified crosses cluster
674 vs_cluster_write(vs
, 0, (vm_offset_t
)offset
, size
, FALSE
, 0);
682 dp_memory_object_data_unlock(
683 memory_object_t mem_obj
,
684 memory_object_offset_t offset
,
686 vm_prot_t desired_access
)
688 Panic("dp_memory_object_data_unlock: illegal");
694 dp_memory_object_data_return(
695 memory_object_t mem_obj
,
696 memory_object_offset_t offset
,
699 boolean_t kernel_copy
)
703 DEBUG(DEBUG_MO_EXTERNAL
,
704 ("mem_obj=0x%x,offset=0x%x,size=0x%x\n",
705 (int)mem_obj
, (int)offset
, (int)size
));
706 GSTAT(global_stats
.gs_pageout_calls
++);
708 /* This routine is called by the pageout thread. The pageout thread */
709 /* cannot be blocked by read activities unless the read activities */
710 /* Therefore the grant of vs lock must be done on a try versus a */
711 /* blocking basis. The code below relies on the fact that the */
712 /* interface is synchronous. Should this interface be again async */
713 /* for some type of pager in the future the pages will have to be */
714 /* returned through a separate, asynchronous path. */
716 vs_lookup(mem_obj
, vs
);
718 default_pager_total
++;
719 if(!VS_TRY_LOCK(vs
)) {
720 /* the call below will not be done by caller when we have */
721 /* a synchronous interface */
722 /* return KERN_LOCK_OWNED; */
724 int page_list_count
= 0;
725 memory_object_super_upl_request(vs
->vs_control
,
726 (memory_object_offset_t
)offset
,
728 &upl
, NULL
, &page_list_count
,
729 UPL_NOBLOCK
| UPL_CLEAN_IN_PLACE
730 | UPL_NO_SYNC
| UPL_COPYOUT_FROM
);
736 if ((vs
->vs_seqno
!= vs
->vs_next_seqno
++)
738 || (vs
->vs_xfer_pending
)) {
740 int page_list_count
= 0;
745 /* the call below will not be done by caller when we have */
746 /* a synchronous interface */
747 /* return KERN_LOCK_OWNED; */
748 memory_object_super_upl_request(vs
->vs_control
,
749 (memory_object_offset_t
)offset
,
751 &upl
, NULL
, &page_list_count
,
752 UPL_NOBLOCK
| UPL_CLEAN_IN_PLACE
753 | UPL_NO_SYNC
| UPL_COPYOUT_FROM
);
759 if ((size
% vm_page_size
) != 0)
760 Panic("bad alignment");
765 vs
->vs_async_pending
+= 1; /* protect from backing store contraction */
769 * Write the data via clustered writes. vs_cluster_write will
770 * loop if the address range specified crosses cluster
773 vs_cluster_write(vs
, 0, (vm_offset_t
)offset
, size
, FALSE
, 0);
777 /* temporary, need a finer lock based on cluster */
780 vs
->vs_async_pending
-= 1; /* release vs_async_wait */
781 if (vs
->vs_async_pending
== 0 && vs
->vs_waiting_async
) {
782 vs
->vs_waiting_async
= FALSE
;
784 thread_wakeup(&vs
->vs_async_pending
);
794 * Routine: default_pager_memory_object_create
796 * Handle requests for memory objects from the
799 * Because we only give out the default memory
800 * manager port to the kernel, we don't have to
801 * be so paranoid about the contents.
804 default_pager_memory_object_create(
805 memory_object_default_t dmm
,
807 memory_object_t
*new_mem_obj
)
811 assert(dmm
== default_pager_object
);
813 vs
= vs_object_create(new_size
);
814 if (vs
== VSTRUCT_NULL
)
815 return KERN_RESOURCE_SHORTAGE
;
817 vs
->vs_next_seqno
= 0;
820 * Set up associations between this memory object
821 * and this default_pager structure
824 vs
->vs_mem_obj
= ISVS
;
825 vs
->vs_mem_obj_ikot
= IKOT_MEMORY_OBJECT
;
828 * After this, other threads might receive requests
829 * for this memory object or find it in the port list.
832 vstruct_list_insert(vs
);
833 *new_mem_obj
= vs_to_mem_obj(vs
);
838 * Create an external object.
841 default_pager_object_create(
842 default_pager_t pager
,
844 memory_object_t
*mem_objp
)
847 kern_return_t result
;
848 struct vstruct_alias
*alias_struct
;
851 if (pager
!= default_pager_object
)
852 return KERN_INVALID_ARGUMENT
;
854 vs
= vs_object_create(size
);
855 if (vs
== VSTRUCT_NULL
)
856 return KERN_RESOURCE_SHORTAGE
;
859 * Set up associations between the default pager
860 * and this vstruct structure
862 vs
->vs_mem_obj
= ISVS
;
863 vstruct_list_insert(vs
);
864 *mem_objp
= vs_to_mem_obj(vs
);
869 default_pager_objects(
870 default_pager_t pager
,
871 default_pager_object_array_t
*objectsp
,
872 mach_msg_type_number_t
*ocountp
,
873 memory_object_array_t
*pagersp
,
874 mach_msg_type_number_t
*pcountp
)
876 vm_offset_t oaddr
= 0; /* memory for objects */
877 vm_size_t osize
= 0; /* current size */
878 default_pager_object_t
* objects
;
879 unsigned int opotential
;
881 vm_offset_t paddr
= 0; /* memory for pagers */
882 vm_size_t psize
= 0; /* current size */
883 memory_object_t
* pagers
;
884 unsigned int ppotential
;
887 unsigned int num_objects
;
891 if (pager != default_pager_default_port)
892 return KERN_INVALID_ARGUMENT;
895 /* start with the inline memory */
897 kr
= vm_map_copyout(ipc_kernel_map
, (vm_offset_t
*)&objects
,
898 (vm_map_copy_t
) *objectsp
);
900 if (kr
!= KERN_SUCCESS
)
903 osize
= round_page(*ocountp
* sizeof * objects
);
904 kr
= vm_map_wire(ipc_kernel_map
,
905 trunc_page((vm_offset_t
)objects
),
906 round_page(((vm_offset_t
)objects
) + osize
),
907 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
911 /* we start with the inline space */
915 opotential
= *ocountp
;
917 pagers
= (memory_object_t
*) *pagersp
;
918 ppotential
= *pcountp
;
923 * We will send no more than this many
925 actual
= vstruct_list
.vsl_count
;
928 if (opotential
< actual
) {
932 newsize
= 2 * round_page(actual
* sizeof * objects
);
934 kr
= vm_allocate(kernel_map
, &newaddr
, newsize
, TRUE
);
935 if (kr
!= KERN_SUCCESS
)
940 opotential
= osize
/ sizeof * objects
;
941 objects
= (default_pager_object_t
*)oaddr
;
944 if (ppotential
< actual
) {
948 newsize
= 2 * round_page(actual
* sizeof * pagers
);
950 kr
= vm_allocate(kernel_map
, &newaddr
, newsize
, TRUE
);
951 if (kr
!= KERN_SUCCESS
)
956 ppotential
= psize
/ sizeof * pagers
;
957 pagers
= (memory_object_t
*)paddr
;
967 queue_iterate(&vstruct_list
.vsl_queue
, entry
, vstruct_t
, vs_links
) {
969 memory_object_t pager
;
972 if ((num_objects
>= opotential
) ||
973 (num_objects
>= ppotential
)) {
976 * This should be rare. In any case,
977 * we will only miss recent objects,
978 * because they are added at the end.
984 * Avoid interfering with normal operations
986 if (!VS_MAP_TRY_LOCK(entry
))
988 size
= ps_vstruct_allocated_size(entry
);
989 VS_MAP_UNLOCK(entry
);
994 * We need a reference for our caller. Adding this
995 * reference through the linked list could race with
996 * destruction of the object. If we find the object
997 * has no references, just give up on it.
1000 if (entry
->vs_references
== 0) {
1004 dp_memory_object_reference(vs_to_mem_obj(entry
));
1007 /* the arrays are wired, so no deadlock worries */
1009 objects
[num_objects
].dpo_object
= (vm_offset_t
) entry
;
1010 objects
[num_objects
].dpo_size
= size
;
1011 pagers
[num_objects
++] = pager
;
1016 * Do not return garbage
1018 objects
[num_objects
].dpo_object
= (vm_offset_t
) 0;
1019 objects
[num_objects
].dpo_size
= 0;
1020 pagers
[num_objects
++] = MEMORY_OBJECT_NULL
;
1027 * Deallocate and clear unused memory.
1028 * (Returned memory will automagically become pageable.)
1031 if (objects
== *objectsp
) {
1034 * Our returned information fit inline.
1035 * Nothing to deallocate.
1037 *ocountp
= num_objects
;
1038 } else if (actual
== 0) {
1039 (void) vm_deallocate(kernel_map
, oaddr
, osize
);
1041 /* return zero items inline */
1046 used
= round_page(actual
* sizeof * objects
);
1049 (void) vm_deallocate(kernel_map
,
1050 oaddr
+ used
, osize
- used
);
1052 *objectsp
= objects
;
1053 *ocountp
= num_objects
;
1056 if (pagers
== (memory_object_t
*)*pagersp
) {
1059 * Our returned information fit inline.
1060 * Nothing to deallocate.
1063 *pcountp
= num_objects
;
1064 } else if (actual
== 0) {
1065 (void) vm_deallocate(kernel_map
, paddr
, psize
);
1067 /* return zero items inline */
1072 used
= round_page(actual
* sizeof * pagers
);
1075 (void) vm_deallocate(kernel_map
,
1076 paddr
+ used
, psize
- used
);
1078 *pagersp
= (memory_object_array_t
)pagers
;
1079 *pcountp
= num_objects
;
1081 (void) vm_map_unwire(kernel_map
, (vm_offset_t
)objects
,
1082 *ocountp
+ (vm_offset_t
)objects
, FALSE
);
1083 (void) vm_map_copyin(kernel_map
, (vm_offset_t
)objects
,
1084 *ocountp
, TRUE
, (vm_map_copy_t
*)objectsp
);
1086 return KERN_SUCCESS
;
1091 for (i
= 0; i
< num_objects
; i
++)
1092 if (pagers
[i
] != MEMORY_OBJECT_NULL
)
1093 memory_object_deallocate(pagers
[i
]);
1096 if (objects
!= *objectsp
)
1097 (void) vm_deallocate(kernel_map
, oaddr
, osize
);
1099 if (pagers
!= (memory_object_t
*)*pagersp
)
1100 (void) vm_deallocate(kernel_map
, paddr
, psize
);
1102 return KERN_RESOURCE_SHORTAGE
;
1106 default_pager_object_pages(
1107 default_pager_t pager
,
1108 memory_object_t object
,
1109 default_pager_page_array_t
*pagesp
,
1110 mach_msg_type_number_t
*countp
)
1112 vm_offset_t addr
; /* memory for page offsets */
1113 vm_size_t size
= 0; /* current memory size */
1114 default_pager_page_t
* pages
;
1115 unsigned int potential
, actual
;
1119 if (pager
!= default_pager_object
)
1120 return KERN_INVALID_ARGUMENT
;
1122 kr
= vm_map_copyout(ipc_kernel_map
, (vm_offset_t
*)&pages
,
1123 (vm_map_copy_t
) *pagesp
);
1125 if (kr
!= KERN_SUCCESS
)
1128 size
= round_page(*countp
* sizeof * pages
);
1129 kr
= vm_map_wire(ipc_kernel_map
,
1130 trunc_page((vm_offset_t
)pages
),
1131 round_page(((vm_offset_t
)pages
) + size
),
1132 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
1136 /* we start with the inline space */
1138 addr
= (vm_offset_t
)pages
;
1139 potential
= *countp
;
1145 queue_iterate(&vstruct_list
.vsl_queue
, entry
, vstruct_t
,
1148 if (vs_to_mem_obj(entry
) == object
) {
1156 /* did not find the object */
1158 if (pages
!= *pagesp
)
1159 (void) vm_deallocate(kernel_map
, addr
, size
);
1160 return KERN_INVALID_ARGUMENT
;
1164 if (!VS_MAP_TRY_LOCK(entry
)) {
1165 /* oh well bad luck */
1170 assert_wait_timeout( 1, THREAD_UNINT
);
1171 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1172 assert(wresult
== THREAD_TIMED_OUT
);
1176 actual
= ps_vstruct_allocated_pages(entry
, pages
, potential
);
1177 VS_MAP_UNLOCK(entry
);
1180 if (actual
<= potential
)
1183 /* allocate more memory */
1185 if (pages
!= *pagesp
)
1186 (void) vm_deallocate(kernel_map
, addr
, size
);
1187 size
= round_page(actual
* sizeof * pages
);
1188 kr
= vm_allocate(kernel_map
, &addr
, size
, TRUE
);
1189 if (kr
!= KERN_SUCCESS
)
1191 pages
= (default_pager_page_t
*)addr
;
1192 potential
= size
/ sizeof * pages
;
1196 * Deallocate and clear unused memory.
1197 * (Returned memory will automagically become pageable.)
1200 if (pages
== *pagesp
) {
1203 * Our returned information fit inline.
1204 * Nothing to deallocate.
1208 } else if (actual
== 0) {
1209 (void) vm_deallocate(kernel_map
, addr
, size
);
1211 /* return zero items inline */
1216 used
= round_page(actual
* sizeof * pages
);
1219 (void) vm_deallocate(kernel_map
,
1220 addr
+ used
, size
- used
);
1225 (void) vm_map_unwire(kernel_map
, (vm_offset_t
)pages
,
1226 *countp
+ (vm_offset_t
)pages
, FALSE
);
1227 (void) vm_map_copyin(kernel_map
, (vm_offset_t
)pages
,
1228 *countp
, TRUE
, (vm_map_copy_t
*)pagesp
);
1229 return KERN_SUCCESS
;