2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
27 * File: vm/vm_shared_memory_server.c
28 * Author: Chris Youngworth
30 * Support routines for an in-kernel shared memory allocator
33 #include <ipc/ipc_port.h>
34 #include <kern/thread.h>
35 #include <kern/zalloc.h>
36 #include <mach/kern_return.h>
37 #include <mach/vm_inherit.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_page.h>
42 #include <mach/shared_memory_server.h>
43 #include <vm/vm_shared_memory_server.h>
45 /* forward declarations */
48 ipc_port_t
*shared_text_region_handle
,
49 vm_size_t text_region_size
,
50 ipc_port_t
*shared_data_region_handle
,
51 vm_size_t data_region_size
,
52 vm_offset_t
*shared_file_mapping_array
);
54 static load_struct_t
*
56 queue_head_t
*hash_table
,
60 shared_region_task_mappings_t sm_info
);
62 static load_struct_t
*
65 vm_offset_t base_offset
,
66 shared_region_task_mappings_t sm_info
);
71 shared_region_task_mappings_t sm_info
);
75 vm_offset_t mapped_file
,
76 vm_size_t mapped_file_size
,
77 vm_offset_t
*base_address
,
78 sf_mapping_t
*mappings
,
82 shared_region_task_mappings_t sm_info
);
87 vm_offset_t base_offset
,
88 shared_region_task_mappings_t sm_info
);
91 #define load_file_hash(file_object, size) \
92 ((((natural_t)file_object) & 0xffffff) % size)
95 vm_offset_t shared_file_text_region
;
96 vm_offset_t shared_file_data_region
;
98 ipc_port_t shared_text_region_handle
;
99 ipc_port_t shared_data_region_handle
;
100 vm_offset_t shared_file_mapping_array
= 0;
101 shared_region_mapping_t system_shared_region
= NULL
;
103 ipc_port_t sfma_handle
= NULL
;
106 int shared_file_available_hash_ele
;
109 shared_file_create_system_region(
110 shared_region_mapping_t
*shared_region
)
112 ipc_port_t text_handle
;
113 ipc_port_t data_handle
;
116 vm_offset_t mapping_array
;
119 text_size
= 0x10000000;
120 data_size
= 0x10000000;
122 kret
= shared_file_init(&text_handle
,
123 text_size
, &data_handle
, data_size
, &mapping_array
);
126 kret
= shared_region_mapping_create(text_handle
,
127 text_size
, data_handle
, data_size
, mapping_array
,
128 GLOBAL_SHARED_TEXT_SEGMENT
, shared_region
,
129 0x9000000, 0x9000000);
132 (*shared_region
)->flags
= 0;
136 shared_file_boot_time_init(
139 long shared_text_region_size
;
140 long shared_data_region_size
;
141 shared_region_mapping_t new_system_region
;
142 shared_region_mapping_t old_system_region
;
144 shared_text_region_size
= 0x10000000;
145 shared_data_region_size
= 0x10000000;
146 shared_file_init(&shared_text_region_handle
,
147 shared_text_region_size
, &shared_data_region_handle
,
148 shared_data_region_size
, &shared_file_mapping_array
);
150 shared_region_mapping_create(shared_text_region_handle
,
151 shared_text_region_size
, shared_data_region_handle
,
152 shared_data_region_size
, shared_file_mapping_array
,
153 GLOBAL_SHARED_TEXT_SEGMENT
, &new_system_region
,
154 0x9000000, 0x9000000);
155 old_system_region
= system_shared_region
;
156 system_shared_region
= new_system_region
;
157 system_shared_region
->flags
= SHARED_REGION_SYSTEM
;
158 /* consume the reference held because this is the */
159 /* system shared region */
160 if(old_system_region
) {
161 shared_region_mapping_dealloc(old_system_region
);
163 /* hold an extra reference because these are the system */
164 /* shared regions. */
165 shared_region_mapping_ref(system_shared_region
);
166 vm_set_shared_region(current_task(), system_shared_region
);
171 /* called at boot time, allocates two regions, each 256 megs in size */
172 /* these regions are later mapped into task spaces, allowing them to */
173 /* share the contents of the regions. shared_file_init is part of */
174 /* a shared_memory_server which not only allocates the backing maps */
175 /* but also coordinates requests for space. */
180 ipc_port_t
*shared_text_region_handle
,
181 vm_size_t text_region_size
,
182 ipc_port_t
*shared_data_region_handle
,
183 vm_size_t data_region_size
,
184 vm_offset_t
*mapping_array
)
186 vm_offset_t aligned_address
;
187 shared_file_info_t
*sf_head
;
188 vm_offset_t table_mapping_address
;
194 vm_object_t buf_object
;
195 vm_map_entry_t entry
;
200 /* create text and data maps/regions */
201 if(kret
= vm_region_object_create(kernel_map
,
203 shared_text_region_handle
)) {
207 if(kret
= vm_region_object_create(kernel_map
,
209 shared_data_region_handle
)) {
210 ipc_port_release_send(*shared_text_region_handle
);
214 data_table_size
= data_region_size
>> 9;
215 hash_size
= data_region_size
>> 14;
216 table_mapping_address
= data_region_size
- data_table_size
;
218 if(shared_file_mapping_array
== 0) {
219 buf_object
= vm_object_allocate(data_table_size
);
221 if(vm_map_find_space(kernel_map
, &shared_file_mapping_array
,
222 data_table_size
, 0, &entry
) != KERN_SUCCESS
) {
223 panic("shared_file_init: no space");
225 *mapping_array
= shared_file_mapping_array
;
226 vm_map_unlock(kernel_map
);
227 entry
->object
.vm_object
= buf_object
;
230 for (b
= *mapping_array
, alloced
= 0;
231 alloced
< (hash_size
+
232 round_page(sizeof(struct sf_mapping
)));
233 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
234 vm_object_lock(buf_object
);
235 p
= vm_page_alloc(buf_object
, alloced
);
236 if (p
== VM_PAGE_NULL
) {
237 panic("shared_file_init: no space");
240 vm_object_unlock(buf_object
);
241 pmap_enter(kernel_pmap
, b
, p
->phys_addr
,
242 VM_PROT_READ
| VM_PROT_WRITE
,
243 VM_WIMG_USE_DEFAULT
, TRUE
);
247 /* initialize loaded file array */
248 sf_head
= (shared_file_info_t
*)*mapping_array
;
249 sf_head
->hash
= (queue_head_t
*)
250 (((int)*mapping_array
) +
251 sizeof(struct shared_file_info
));
252 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
253 mutex_init(&(sf_head
->lock
), (ETAP_VM_MAP
));
254 sf_head
->hash_init
= FALSE
;
257 mach_make_memory_entry(kernel_map
, &data_table_size
,
258 *mapping_array
, VM_PROT_READ
, &sfma_handle
,
261 if (vm_map_wire(kernel_map
, *mapping_array
,
263 (hash_size
+ round_page(sizeof(struct sf_mapping
))),
264 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
265 panic("shared_file_init: No memory for data table");
268 lsf_zone
= zinit(sizeof(struct load_file_ele
),
270 (hash_size
+ round_page(sizeof(struct sf_mapping
))),
271 0, "load_file_server");
273 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
274 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
275 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
276 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
278 *mapping_array
= shared_file_mapping_array
;
281 vm_map(((vm_named_entry_t
)
282 (*shared_data_region_handle
)->ip_kobject
)->backing
.map
,
283 &table_mapping_address
,
284 data_table_size
, 0, SHARED_LIB_ALIAS
,
285 sfma_handle
, 0, FALSE
,
286 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
);
290 /* A call made from user space, copyin_shared_file requires the user to */
291 /* provide the address and size of a mapped file, the full path name of */
292 /* that file and a list of offsets to be mapped into shared memory. */
293 /* By requiring that the file be pre-mapped, copyin_shared_file can */
294 /* guarantee that the file is neither deleted nor changed after the user */
295 /* begins the call. */
299 vm_offset_t mapped_file
,
300 vm_size_t mapped_file_size
,
301 vm_offset_t
*base_address
,
303 sf_mapping_t
*mappings
,
304 memory_object_control_t file_control
,
305 shared_region_task_mappings_t sm_info
,
308 vm_object_t file_object
;
309 vm_map_entry_t entry
;
310 shared_file_info_t
*shared_file_header
;
311 load_struct_t
*file_entry
;
312 loaded_mapping_t
*file_mapping
;
317 /* wire hash entry pool only as needed, since we are the only */
318 /* users, we take a few liberties with the population of our */
320 static int allocable_hash_pages
;
321 static vm_offset_t hash_cram_address
;
324 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
326 mutex_lock(&shared_file_header
->lock
);
328 /* If this is the first call to this routine, take the opportunity */
329 /* to initialize the hash table which will be used to look-up */
330 /* mappings based on the file object */
332 if(shared_file_header
->hash_init
== FALSE
) {
333 vm_size_t hash_table_size
;
334 vm_size_t hash_table_offset
;
336 hash_table_size
= (shared_file_header
->hash_size
)
337 * sizeof(struct queue_entry
);
338 hash_table_offset
= hash_table_size
+
339 round_page(sizeof(struct sf_mapping
));
340 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
341 queue_init(&shared_file_header
->hash
[i
]);
343 allocable_hash_pages
=
344 ((hash_table_size
<<5) - hash_table_offset
)/PAGE_SIZE
;
346 sm_info
->region_mappings
+ hash_table_offset
;
347 shared_file_available_hash_ele
= 0;
349 shared_file_header
->hash_init
= TRUE
;
352 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
355 cram_size
= allocable_hash_pages
> 3 ?
356 3 : allocable_hash_pages
;
357 allocable_hash_pages
-= cram_size
;
358 cram_size
= cram_size
* PAGE_SIZE
;
359 if (vm_map_wire(kernel_map
, hash_cram_address
,
360 hash_cram_address
+cram_size
,
361 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
362 panic("shared_file_init: No memory for data table");
364 zcram(lsf_zone
, hash_cram_address
, cram_size
);
365 shared_file_available_hash_ele
366 += cram_size
/sizeof(struct load_file_ele
);
367 hash_cram_address
+= cram_size
;
371 /* Find the entry in the map associated with the current mapping */
372 /* of the file object */
373 file_object
= memory_object_control_to_vm_object(file_control
);
374 if(vm_map_lookup_entry(current_map(), mapped_file
, &entry
)) {
375 vm_object_t mapped_object
;
376 if(entry
->is_sub_map
) {
377 mutex_unlock(&shared_file_header
->lock
);
378 return KERN_INVALID_ADDRESS
;
380 mapped_object
= entry
->object
.vm_object
;
381 while(mapped_object
->shadow
!= NULL
) {
382 mapped_object
= mapped_object
->shadow
;
384 /* check to see that the file object passed is indeed the */
385 /* same as the mapped object passed */
386 if(file_object
!= mapped_object
) {
387 if(sm_info
->flags
& SHARED_REGION_SYSTEM
) {
388 mutex_unlock(&shared_file_header
->lock
);
389 return KERN_PROTECTION_FAILURE
;
391 file_object
= mapped_object
;
395 mutex_unlock(&shared_file_header
->lock
);
396 return KERN_INVALID_ADDRESS
;
399 alternate
= (*flags
& ALTERNATE_LOAD_SITE
) ? TRUE
: FALSE
;
401 if (file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
402 (void *) file_object
, shared_file_header
->hash_size
,
403 alternate
, sm_info
)) {
404 /* File is loaded, check the load manifest for exact match */
405 /* we simplify by requiring that the elements be the same */
406 /* size and in the same order rather than checking for */
407 /* semantic equivalence. */
409 /* If the file is being loaded in the alternate */
410 /* area, one load to alternate is allowed per mapped */
411 /* object the base address is passed back to the */
412 /* caller and the mappings field is filled in. If the */
413 /* caller does not pass the precise mappings_cnt */
414 /* and the Alternate is already loaded, an error */
417 file_mapping
= file_entry
->mappings
;
418 while(file_mapping
!= NULL
) {
420 mutex_unlock(&shared_file_header
->lock
);
421 return KERN_INVALID_ARGUMENT
;
423 if(((mappings
[i
].mapping_offset
)
424 & SHARED_DATA_REGION_MASK
) !=
425 file_mapping
->mapping_offset
||
427 file_mapping
->size
||
428 mappings
[i
].file_offset
!=
429 file_mapping
->file_offset
||
430 mappings
[i
].protection
!=
431 file_mapping
->protection
) {
434 file_mapping
= file_mapping
->next
;
438 mutex_unlock(&shared_file_header
->lock
);
439 return KERN_INVALID_ARGUMENT
;
441 *base_address
= (*base_address
& ~SHARED_TEXT_REGION_MASK
)
442 + file_entry
->base_address
;
443 *flags
= SF_PREV_LOADED
;
444 mutex_unlock(&shared_file_header
->lock
);
447 /* File is not loaded, lets attempt to load it */
448 ret
= lsf_load(mapped_file
, mapped_file_size
, base_address
,
453 if(ret
== KERN_NO_SPACE
) {
454 shared_region_mapping_t regions
;
455 regions
= (shared_region_mapping_t
)sm_info
->self
;
456 regions
->flags
|= SHARED_REGION_FULL
;
457 if(regions
== system_shared_region
) {
458 shared_region_mapping_t new_system_shared_regions
;
459 shared_file_boot_time_init();
460 /* current task must stay with its current */
461 /* regions, drop count on system_shared_region */
462 /* and put back our original set */
463 vm_get_shared_region(current_task(), &new_system_shared_regions
);
464 shared_region_mapping_dealloc(new_system_shared_regions
);
465 vm_set_shared_region(current_task(), regions
);
468 mutex_unlock(&shared_file_header
->lock
);
473 /* A hash lookup function for the list of loaded files in */
474 /* shared_memory_server space. */
476 static load_struct_t
*
478 queue_head_t
*hash_table
,
482 shared_region_task_mappings_t sm_info
)
484 register queue_t bucket
;
485 load_struct_t
*entry
;
486 shared_region_mapping_t target_region
;
489 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
490 for (entry
= (load_struct_t
*)queue_first(bucket
);
491 !queue_end(bucket
, &entry
->links
);
492 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
493 if (entry
->file_object
== (int)file_object
) {
494 target_region
= (shared_region_mapping_t
)sm_info
->self
;
495 depth
= target_region
->depth
;
496 while(target_region
) {
497 if((!(sm_info
->self
)) ||
498 ((target_region
== entry
->regions_instance
) &&
499 (target_region
->depth
>= entry
->depth
))) {
501 if (entry
->base_address
>=
502 sm_info
->alternate_base
)
505 if (entry
->base_address
<
506 sm_info
->alternate_base
)
510 if(target_region
->object_chain
) {
511 target_region
= (shared_region_mapping_t
)
512 target_region
->object_chain
->object_chain_region
;
513 depth
= target_region
->object_chain
->depth
;
515 target_region
= NULL
;
521 return (load_struct_t
*)0;
525 lsf_remove_regions_mappings(
526 shared_region_mapping_t region
,
527 shared_region_task_mappings_t sm_info
)
530 register queue_t bucket
;
531 shared_file_info_t
*shared_file_header
;
532 load_struct_t
*entry
;
533 load_struct_t
*next_entry
;
534 load_struct_t
*prev_entry
;
536 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
538 mutex_lock(&shared_file_header
->lock
);
539 if(shared_file_header
->hash_init
== FALSE
) {
540 mutex_unlock(&shared_file_header
->lock
);
543 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
544 bucket
= &shared_file_header
->hash
[i
];
545 for (entry
= (load_struct_t
*)queue_first(bucket
);
546 !queue_end(bucket
, &entry
->links
);) {
547 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
548 if(region
== entry
->regions_instance
) {
549 lsf_unload((void *)entry
->file_object
,
550 entry
->base_address
, sm_info
);
555 mutex_unlock(&shared_file_header
->lock
);
558 /* Removes a map_list, (list of loaded extents) for a file from */
559 /* the loaded file hash table. */
561 static load_struct_t
*
564 vm_offset_t base_offset
,
565 shared_region_task_mappings_t sm_info
)
567 register queue_t bucket
;
568 shared_file_info_t
*shared_file_header
;
569 load_struct_t
*entry
;
570 load_struct_t
*prev_entry
;
572 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
574 bucket
= &shared_file_header
->hash
575 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
577 for (entry
= (load_struct_t
*)queue_first(bucket
);
578 !queue_end(bucket
, &entry
->links
);
579 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
580 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
581 sm_info
->self
== entry
->regions_instance
)) {
582 if ((entry
->file_object
== (int) file_object
) &&
583 (entry
->base_address
== base_offset
)) {
584 queue_remove(bucket
, entry
,
585 load_struct_ptr_t
, links
);
591 return (load_struct_t
*)0;
594 /* Inserts a new map_list, (list of loaded file extents), into the */
595 /* server loaded file hash table. */
599 load_struct_t
*entry
,
600 shared_region_task_mappings_t sm_info
)
602 shared_file_info_t
*shared_file_header
;
604 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
605 queue_enter(&shared_file_header
->hash
606 [load_file_hash(entry
->file_object
,
607 shared_file_header
->hash_size
)],
608 entry
, load_struct_ptr_t
, links
);
611 /* Looks up the file type requested. If already loaded and the */
612 /* file extents are an exact match, returns Success. If not */
613 /* loaded attempts to load the file extents at the given offsets */
614 /* if any extent fails to load or if the file was already loaded */
615 /* in a different configuration, lsf_load fails. */
619 vm_offset_t mapped_file
,
620 vm_size_t mapped_file_size
,
621 vm_offset_t
*base_address
,
622 sf_mapping_t
*mappings
,
626 shared_region_task_mappings_t sm_info
)
629 load_struct_t
*entry
;
630 vm_map_copy_t copy_object
;
631 loaded_mapping_t
*file_mapping
;
632 loaded_mapping_t
**tptr
;
634 ipc_port_t local_map
;
635 vm_offset_t original_alt_load_next
;
636 vm_offset_t alternate_load_next
;
638 entry
= (load_struct_t
*)zalloc(lsf_zone
);
639 shared_file_available_hash_ele
--;
640 entry
->file_object
= (int)file_object
;
641 entry
->mapping_cnt
= map_cnt
;
642 entry
->mappings
= NULL
;
643 entry
->links
.prev
= (queue_entry_t
) 0;
644 entry
->links
.next
= (queue_entry_t
) 0;
645 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
646 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
648 lsf_hash_insert(entry
, sm_info
);
649 tptr
= &(entry
->mappings
);
652 alternate_load_next
= sm_info
->alternate_next
;
653 original_alt_load_next
= alternate_load_next
;
654 if (flags
& ALTERNATE_LOAD_SITE
) {
655 int max_loadfile_offset
;
657 *base_address
= ((*base_address
) & ~SHARED_TEXT_REGION_MASK
) +
658 sm_info
->alternate_next
;
659 max_loadfile_offset
= 0;
660 for(i
= 0; i
<map_cnt
; i
++) {
661 if(((mappings
[i
].mapping_offset
662 & SHARED_TEXT_REGION_MASK
)+ mappings
[i
].size
) >
663 max_loadfile_offset
) {
664 max_loadfile_offset
=
665 (mappings
[i
].mapping_offset
666 & SHARED_TEXT_REGION_MASK
)
670 if((alternate_load_next
+ round_page(max_loadfile_offset
)) >=
671 (sm_info
->data_size
- (sm_info
->data_size
>>9))) {
673 return KERN_NO_SPACE
;
675 alternate_load_next
+= round_page(max_loadfile_offset
);
678 if (((*base_address
) & SHARED_TEXT_REGION_MASK
) >
679 sm_info
->alternate_base
) {
680 entry
->base_address
=
681 (*base_address
) & SHARED_TEXT_REGION_MASK
;
682 lsf_unload(file_object
, entry
->base_address
, sm_info
);
683 return KERN_INVALID_ARGUMENT
;
687 entry
->base_address
= (*base_address
) & SHARED_TEXT_REGION_MASK
;
689 /* copyin mapped file data */
690 for(i
= 0; i
<map_cnt
; i
++) {
691 vm_offset_t target_address
;
692 vm_offset_t region_mask
;
694 if(mappings
[i
].protection
& VM_PROT_COW
) {
695 local_map
= (ipc_port_t
)sm_info
->data_region
;
696 region_mask
= SHARED_DATA_REGION_MASK
;
697 if((mappings
[i
].mapping_offset
698 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) {
699 lsf_unload(file_object
,
700 entry
->base_address
, sm_info
);
701 return KERN_INVALID_ARGUMENT
;
704 region_mask
= SHARED_TEXT_REGION_MASK
;
705 local_map
= (ipc_port_t
)sm_info
->text_region
;
706 if(mappings
[i
].mapping_offset
707 & GLOBAL_SHARED_SEGMENT_MASK
) {
708 lsf_unload(file_object
,
709 entry
->base_address
, sm_info
);
710 return KERN_INVALID_ARGUMENT
;
713 if(!(mappings
[i
].protection
& VM_PROT_ZF
)
714 && ((mapped_file
+ mappings
[i
].file_offset
+
716 (mapped_file
+ mapped_file_size
))) {
717 lsf_unload(file_object
, entry
->base_address
, sm_info
);
718 return KERN_INVALID_ARGUMENT
;
720 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
721 + entry
->base_address
;
722 if(vm_allocate(((vm_named_entry_t
)local_map
->ip_kobject
)
723 ->backing
.map
, &target_address
,
724 mappings
[i
].size
, FALSE
)) {
725 lsf_unload(file_object
, entry
->base_address
, sm_info
);
728 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
729 + entry
->base_address
;
730 if(!(mappings
[i
].protection
& VM_PROT_ZF
)) {
731 if(vm_map_copyin(current_map(),
732 mapped_file
+ mappings
[i
].file_offset
,
733 round_page(mappings
[i
].size
), FALSE
, ©_object
)) {
734 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
735 ->backing
.map
, target_address
, mappings
[i
].size
);
736 lsf_unload(file_object
, entry
->base_address
, sm_info
);
739 if(vm_map_copy_overwrite(((vm_named_entry_t
)
740 local_map
->ip_kobject
)->backing
.map
, target_address
,
741 copy_object
, FALSE
)) {
742 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
743 ->backing
.map
, target_address
, mappings
[i
].size
);
744 lsf_unload(file_object
, entry
->base_address
, sm_info
);
748 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
749 ->backing
.map
, target_address
,
750 round_page(target_address
+ mappings
[i
].size
),
751 (mappings
[i
].protection
&
752 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
754 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
755 ->backing
.map
, target_address
,
756 round_page(target_address
+ mappings
[i
].size
),
757 (mappings
[i
].protection
&
758 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
760 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
761 if(file_mapping
== 0)
762 panic("lsf_load: OUT OF MAPPINGS!");
763 shared_file_available_hash_ele
--;
764 file_mapping
->mapping_offset
= (mappings
[i
].mapping_offset
)
766 file_mapping
->size
= mappings
[i
].size
;
767 file_mapping
->file_offset
= mappings
[i
].file_offset
;
768 file_mapping
->protection
= mappings
[i
].protection
;
769 file_mapping
->next
= NULL
;
770 *tptr
= file_mapping
;
771 tptr
= &(file_mapping
->next
);
773 shared_region_mapping_set_alt_next(sm_info
->self
, alternate_load_next
);
779 /* finds the file_object extent list in the shared memory hash table */
780 /* If one is found the associated extents in shared memory are deallocated */
781 /* and the extent list is freed */
786 vm_offset_t base_offset
,
787 shared_region_task_mappings_t sm_info
)
789 load_struct_t
*entry
;
790 ipc_port_t local_map
;
791 loaded_mapping_t
*map_ele
;
792 loaded_mapping_t
*back_ptr
;
794 entry
= lsf_hash_delete(file_object
, base_offset
, sm_info
);
796 map_ele
= entry
->mappings
;
797 while(map_ele
!= NULL
) {
798 if(map_ele
->protection
& VM_PROT_COW
) {
799 local_map
= (ipc_port_t
)sm_info
->data_region
;
801 local_map
= (ipc_port_t
)sm_info
->text_region
;
803 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
804 ->backing
.map
, entry
->base_address
+
805 map_ele
->mapping_offset
,
808 map_ele
= map_ele
->next
;
809 zfree(lsf_zone
, (vm_offset_t
)back_ptr
);
810 shared_file_available_hash_ele
++;
812 zfree(lsf_zone
, (vm_offset_t
)entry
);
813 shared_file_available_hash_ele
++;
817 /* integer is from 1 to 100 and represents percent full */
819 lsf_mapping_pool_gauge()
821 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;