2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
27 * Support routines for an in-kernel shared memory allocator
30 #include <ipc/ipc_port.h>
31 #include <kern/thread.h>
32 #include <mach/shared_memory_server.h>
33 #include <kern/zalloc.h>
34 #include <mach/kern_return.h>
35 #include <mach/vm_inherit.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <vm/vm_page.h>
41 vm_offset_t shared_file_text_region
;
42 vm_offset_t shared_file_data_region
;
44 ipc_port_t shared_text_region_handle
;
45 ipc_port_t shared_data_region_handle
;
46 vm_offset_t shared_file_mapping_array
= 0;
47 shared_region_mapping_t system_shared_region
;
49 ipc_port_t sfma_handle
= NULL
;
52 int shared_file_available_hash_ele
;
55 shared_file_create_system_region(
56 shared_region_mapping_t
*shared_region
)
58 ipc_port_t text_handle
;
59 ipc_port_t data_handle
;
62 vm_offset_t mapping_array
;
65 text_size
= 0x10000000;
66 data_size
= 0x10000000;
68 kret
= shared_file_init(&text_handle
,
69 text_size
, &data_handle
, data_size
, &mapping_array
);
72 kret
= shared_region_mapping_create(text_handle
,
73 text_size
, data_handle
, data_size
, mapping_array
,
74 GLOBAL_SHARED_TEXT_SEGMENT
, shared_region
,
75 0x9000000, 0x9000000);
78 (*shared_region
)->flags
= 0;
82 shared_file_boot_time_init(
85 long shared_text_region_size
;
86 long shared_data_region_size
;
88 shared_text_region_size
= 0x10000000;
89 shared_data_region_size
= 0x10000000;
90 shared_file_init(&shared_text_region_handle
,
91 shared_text_region_size
, &shared_data_region_handle
,
92 shared_data_region_size
, &shared_file_mapping_array
);
93 shared_region_mapping_create(shared_text_region_handle
,
94 shared_text_region_size
, shared_data_region_handle
,
95 shared_data_region_size
, shared_file_mapping_array
,
96 GLOBAL_SHARED_TEXT_SEGMENT
, &system_shared_region
,
97 0x9000000, 0x9000000);
98 system_shared_region
->flags
= SHARED_REGION_SYSTEM
;
99 vm_set_shared_region(current_task(), system_shared_region
);
104 /* called at boot time, allocates two regions, each 256 megs in size */
105 /* these regions are later mapped into task spaces, allowing them to */
106 /* share the contents of the regions. shared_file_init is part of */
107 /* a shared_memory_server which not only allocates the backing maps */
108 /* but also coordinates requests for space. */
113 ipc_port_t
*shared_text_region_handle
,
114 vm_size_t text_region_size
,
115 ipc_port_t
*shared_data_region_handle
,
116 vm_size_t data_region_size
,
117 vm_offset_t
*mapping_array
)
119 vm_offset_t aligned_address
;
120 shared_file_info_t
*sf_head
;
121 vm_offset_t table_mapping_address
;
127 vm_object_t buf_object
;
128 vm_map_entry_t entry
;
133 /* create text and data maps/regions */
134 if(kret
= vm_region_object_create(kernel_map
,
136 shared_text_region_handle
)) {
140 if(kret
= vm_region_object_create(kernel_map
,
142 shared_data_region_handle
)) {
143 ipc_port_release_send(*shared_text_region_handle
);
147 data_table_size
= data_region_size
>> 9;
148 hash_size
= data_region_size
>> 14;
149 table_mapping_address
= data_region_size
- data_table_size
;
151 if(shared_file_mapping_array
== 0) {
152 buf_object
= vm_object_allocate(data_table_size
);
154 if(vm_map_find_space(kernel_map
, &shared_file_mapping_array
,
155 data_table_size
, 0, &entry
) != KERN_SUCCESS
) {
156 panic("shared_file_init: no space");
158 *mapping_array
= shared_file_mapping_array
;
159 vm_map_unlock(kernel_map
);
160 entry
->object
.vm_object
= buf_object
;
163 for (b
= *mapping_array
, alloced
= 0;
164 alloced
< (hash_size
+
165 round_page(sizeof(struct sf_mapping
)));
166 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
167 vm_object_lock(buf_object
);
168 p
= vm_page_alloc(buf_object
, alloced
);
169 if (p
== VM_PAGE_NULL
) {
170 panic("shared_file_init: no space");
173 vm_object_unlock(buf_object
);
174 pmap_enter(kernel_pmap
, b
, p
->phys_addr
,
175 VM_PROT_READ
| VM_PROT_WRITE
, TRUE
);
179 /* initialize loaded file array */
180 sf_head
= (shared_file_info_t
*)*mapping_array
;
181 sf_head
->hash
= (queue_head_t
*)
182 (((int)*mapping_array
) +
183 sizeof(struct shared_file_info
));
184 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
185 mutex_init(&(sf_head
->lock
), (ETAP_VM_MAP
));
186 sf_head
->hash_init
= FALSE
;
189 mach_make_memory_entry(kernel_map
, &data_table_size
,
190 *mapping_array
, VM_PROT_READ
, &sfma_handle
,
193 if (vm_map_wire(kernel_map
, *mapping_array
,
195 (hash_size
+ round_page(sizeof(struct sf_mapping
))),
196 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
197 panic("shared_file_init: No memory for data table");
200 lsf_zone
= zinit(sizeof(struct load_file_ele
),
202 (hash_size
+ round_page(sizeof(struct sf_mapping
))),
203 0, "load_file_server");
205 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
206 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
207 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
208 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
210 *mapping_array
= shared_file_mapping_array
;
213 vm_map(((vm_named_entry_t
)
214 (*shared_data_region_handle
)->ip_kobject
)->backing
.map
,
215 &table_mapping_address
,
216 data_table_size
, 0, SHARED_LIB_ALIAS
,
217 sfma_handle
, 0, FALSE
,
218 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
);
222 /* A call made from user space, copyin_shared_file requires the user to */
223 /* provide the address and size of a mapped file, the full path name of */
224 /* that file and a list of offsets to be mapped into shared memory. */
225 /* By requiring that the file be pre-mapped, copyin_shared_file can */
226 /* guarantee that the file is neither deleted nor changed after the user */
227 /* begins the call. */
231 vm_offset_t mapped_file
,
232 vm_size_t mapped_file_size
,
233 vm_offset_t
*base_address
,
235 sf_mapping_t
*mappings
,
236 memory_object_control_t file_control
,
237 shared_region_task_mappings_t sm_info
,
240 vm_object_t file_object
;
241 vm_map_entry_t entry
;
242 shared_file_info_t
*shared_file_header
;
243 load_struct_t
*file_entry
;
244 loaded_mapping_t
*file_mapping
;
249 /* wire hash entry pool only as needed, since we are the only */
250 /* users, we take a few liberties with the population of our */
252 static int allocable_hash_pages
;
253 static vm_offset_t hash_cram_address
;
256 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
258 mutex_lock(&shared_file_header
->lock
);
260 /* If this is the first call to this routine, take the opportunity */
261 /* to initialize the hash table which will be used to look-up */
262 /* mappings based on the file object */
264 if(shared_file_header
->hash_init
== FALSE
) {
265 vm_size_t hash_table_size
;
266 vm_size_t hash_table_offset
;
268 hash_table_size
= (shared_file_header
->hash_size
)
269 * sizeof(struct queue_entry
);
270 hash_table_offset
= hash_table_size
+
271 round_page(sizeof(struct sf_mapping
));
272 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
273 queue_init(&shared_file_header
->hash
[i
]);
275 allocable_hash_pages
=
276 ((hash_table_size
<<5) - hash_table_offset
)/PAGE_SIZE
;
278 sm_info
->region_mappings
+ hash_table_offset
;
279 shared_file_available_hash_ele
= 0;
281 shared_file_header
->hash_init
= TRUE
;
284 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
287 cram_size
= allocable_hash_pages
> 3 ?
288 3 : allocable_hash_pages
;
289 allocable_hash_pages
-= cram_size
;
290 cram_size
= cram_size
* PAGE_SIZE
;
291 if (vm_map_wire(kernel_map
, hash_cram_address
,
292 hash_cram_address
+cram_size
,
293 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
294 panic("shared_file_init: No memory for data table");
296 zcram(lsf_zone
, hash_cram_address
, cram_size
);
297 shared_file_available_hash_ele
298 += cram_size
/sizeof(struct load_file_ele
);
299 hash_cram_address
+= cram_size
;
303 /* Find the entry in the map associated with the current mapping */
304 /* of the file object */
305 file_object
= memory_object_control_to_vm_object(file_control
);
306 if(vm_map_lookup_entry(current_map(), mapped_file
, &entry
)) {
307 vm_object_t mapped_object
;
308 if(entry
->is_sub_map
) {
309 mutex_unlock(&shared_file_header
->lock
);
310 return KERN_INVALID_ADDRESS
;
312 mapped_object
= entry
->object
.vm_object
;
313 while(mapped_object
->shadow
!= NULL
) {
314 mapped_object
= mapped_object
->shadow
;
316 /* check to see that the file object passed is indeed the */
317 /* same as the mapped object passed */
318 if(file_object
!= mapped_object
) {
319 if(sm_info
->flags
& SHARED_REGION_SYSTEM
) {
320 mutex_unlock(&shared_file_header
->lock
);
321 return KERN_PROTECTION_FAILURE
;
323 file_object
= mapped_object
;
327 mutex_unlock(&shared_file_header
->lock
);
328 return KERN_INVALID_ADDRESS
;
331 alternate
= (*flags
& ALTERNATE_LOAD_SITE
) ? TRUE
: FALSE
;
333 if (file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
334 (void *) file_object
, shared_file_header
->hash_size
,
335 alternate
, sm_info
)) {
336 /* File is loaded, check the load manifest for exact match */
337 /* we simplify by requiring that the elements be the same */
338 /* size and in the same order rather than checking for */
339 /* semantic equivalence. */
341 /* If the file is being loaded in the alternate */
342 /* area, one load to alternate is allowed per mapped */
343 /* object the base address is passed back to the */
344 /* caller and the mappings field is filled in. If the */
345 /* caller does not pass the precise mappings_cnt */
346 /* and the Alternate is already loaded, an error */
349 file_mapping
= file_entry
->mappings
;
350 while(file_mapping
!= NULL
) {
352 mutex_unlock(&shared_file_header
->lock
);
353 return KERN_INVALID_ARGUMENT
;
355 if(((mappings
[i
].mapping_offset
)
356 & SHARED_DATA_REGION_MASK
) !=
357 file_mapping
->mapping_offset
||
359 file_mapping
->size
||
360 mappings
[i
].file_offset
!=
361 file_mapping
->file_offset
||
362 mappings
[i
].protection
!=
363 file_mapping
->protection
) {
366 file_mapping
= file_mapping
->next
;
370 mutex_unlock(&shared_file_header
->lock
);
371 return KERN_INVALID_ARGUMENT
;
373 *base_address
= (*base_address
& ~SHARED_TEXT_REGION_MASK
)
374 + file_entry
->base_address
;
375 *flags
= SF_PREV_LOADED
;
376 mutex_unlock(&shared_file_header
->lock
);
379 /* File is not loaded, lets attempt to load it */
380 ret
= lsf_load(mapped_file
, mapped_file_size
, base_address
,
385 if(ret
== KERN_NO_SPACE
) {
386 shared_region_mapping_t regions
;
387 regions
= (shared_region_mapping_t
)sm_info
->self
;
388 regions
->flags
|= SHARED_REGION_FULL
;
389 if(regions
== system_shared_region
) {
390 shared_file_boot_time_init();
391 /* current task must stay wit its current */
393 vm_set_shared_region(current_task(), regions
);
396 mutex_unlock(&shared_file_header
->lock
);
401 /* A hash lookup function for the list of loaded files in */
402 /* shared_memory_server space. */
406 queue_head_t
*hash_table
,
410 shared_region_task_mappings_t sm_info
)
412 register queue_t bucket
;
413 load_struct_t
*entry
;
414 shared_region_mapping_t target_region
;
417 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
418 for (entry
= (load_struct_t
*)queue_first(bucket
);
419 !queue_end(bucket
, &entry
->links
);
420 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
421 if (entry
->file_object
== (int)file_object
) {
422 target_region
= (shared_region_mapping_t
)sm_info
->self
;
423 depth
= target_region
->depth
;
424 while(target_region
) {
425 if((!(sm_info
->self
)) ||
426 ((target_region
== entry
->regions_instance
) &&
427 (target_region
->depth
>= entry
->depth
))) {
429 if (entry
->base_address
>=
430 sm_info
->alternate_base
)
433 if (entry
->base_address
<
434 sm_info
->alternate_base
)
438 if(target_region
->object_chain
) {
439 target_region
= (shared_region_mapping_t
)
440 target_region
->object_chain
->object_chain_region
;
441 depth
= target_region
->object_chain
->depth
;
443 target_region
= NULL
;
449 return (load_struct_t
*)0;
453 lsf_remove_regions_mappings(
454 shared_region_mapping_t region
,
455 shared_region_task_mappings_t sm_info
)
458 register queue_t bucket
;
459 shared_file_info_t
*shared_file_header
;
460 load_struct_t
*entry
;
461 load_struct_t
*next_entry
;
462 load_struct_t
*prev_entry
;
464 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
466 mutex_lock(&shared_file_header
->lock
);
467 if(shared_file_header
->hash_init
== FALSE
) {
468 mutex_unlock(&shared_file_header
->lock
);
471 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
472 bucket
= &shared_file_header
->hash
[i
];
473 for (entry
= (load_struct_t
*)queue_first(bucket
);
474 !queue_end(bucket
, &entry
->links
);) {
475 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
476 if(region
== entry
->regions_instance
) {
477 lsf_unload((void *)entry
->file_object
,
478 entry
->base_address
, sm_info
);
483 mutex_unlock(&shared_file_header
->lock
);
486 /* Removes a map_list, (list of loaded extents) for a file from */
487 /* the loaded file hash table. */
492 vm_offset_t base_offset
,
493 shared_region_task_mappings_t sm_info
)
495 register queue_t bucket
;
496 shared_file_info_t
*shared_file_header
;
497 load_struct_t
*entry
;
498 load_struct_t
*prev_entry
;
500 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
502 bucket
= &shared_file_header
->hash
503 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
505 for (entry
= (load_struct_t
*)queue_first(bucket
);
506 !queue_end(bucket
, &entry
->links
);
507 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
508 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
509 sm_info
->self
== entry
->regions_instance
)) {
510 if ((entry
->file_object
== (int) file_object
) &&
511 (entry
->base_address
== base_offset
)) {
512 queue_remove(bucket
, entry
,
513 load_struct_ptr_t
, links
);
519 return (load_struct_t
*)0;
522 /* Inserts a new map_list, (list of loaded file extents), into the */
523 /* server loaded file hash table. */
527 load_struct_t
*entry
,
528 shared_region_task_mappings_t sm_info
)
530 shared_file_info_t
*shared_file_header
;
532 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
533 queue_enter(&shared_file_header
->hash
534 [load_file_hash(entry
->file_object
,
535 shared_file_header
->hash_size
)],
536 entry
, load_struct_ptr_t
, links
);
539 /* Looks up the file type requested. If already loaded and the */
540 /* file extents are an exact match, returns Success. If not */
541 /* loaded attempts to load the file extents at the given offsets */
542 /* if any extent fails to load or if the file was already loaded */
543 /* in a different configuration, lsf_load fails. */
547 vm_offset_t mapped_file
,
548 vm_size_t mapped_file_size
,
549 vm_offset_t
*base_address
,
550 sf_mapping_t
*mappings
,
554 shared_region_task_mappings_t sm_info
)
557 load_struct_t
*entry
;
558 vm_map_copy_t copy_object
;
559 loaded_mapping_t
*file_mapping
;
560 loaded_mapping_t
**tptr
;
562 ipc_port_t local_map
;
563 vm_offset_t original_alt_load_next
;
564 vm_offset_t alternate_load_next
;
566 entry
= (load_struct_t
*)zalloc(lsf_zone
);
567 shared_file_available_hash_ele
--;
568 entry
->file_object
= (int)file_object
;
569 entry
->mapping_cnt
= map_cnt
;
570 entry
->mappings
= NULL
;
571 entry
->links
.prev
= (queue_entry_t
) 0;
572 entry
->links
.next
= (queue_entry_t
) 0;
573 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
574 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
576 lsf_hash_insert(entry
, sm_info
);
577 tptr
= &(entry
->mappings
);
580 alternate_load_next
= sm_info
->alternate_next
;
581 original_alt_load_next
= alternate_load_next
;
582 if (flags
& ALTERNATE_LOAD_SITE
) {
583 int max_loadfile_offset
;
585 *base_address
= ((*base_address
) & ~SHARED_TEXT_REGION_MASK
) +
586 sm_info
->alternate_next
;
587 max_loadfile_offset
= 0;
588 for(i
= 0; i
<map_cnt
; i
++) {
589 if(((mappings
[i
].mapping_offset
590 & SHARED_TEXT_REGION_MASK
)+ mappings
[i
].size
) >
591 max_loadfile_offset
) {
592 max_loadfile_offset
=
593 (mappings
[i
].mapping_offset
594 & SHARED_TEXT_REGION_MASK
)
598 if((alternate_load_next
+ round_page(max_loadfile_offset
)) >=
599 (sm_info
->data_size
- (sm_info
->data_size
>>9))) {
601 return KERN_NO_SPACE
;
603 alternate_load_next
+= round_page(max_loadfile_offset
);
606 if (((*base_address
) & SHARED_TEXT_REGION_MASK
) >
607 sm_info
->alternate_base
) {
608 entry
->base_address
=
609 (*base_address
) & SHARED_TEXT_REGION_MASK
;
610 lsf_unload(file_object
, entry
->base_address
, sm_info
);
611 return KERN_INVALID_ARGUMENT
;
615 entry
->base_address
= (*base_address
) & SHARED_TEXT_REGION_MASK
;
617 /* copyin mapped file data */
618 for(i
= 0; i
<map_cnt
; i
++) {
619 vm_offset_t target_address
;
620 vm_offset_t region_mask
;
622 if(mappings
[i
].protection
& VM_PROT_COW
) {
623 local_map
= (ipc_port_t
)sm_info
->data_region
;
624 region_mask
= SHARED_DATA_REGION_MASK
;
625 if((mappings
[i
].mapping_offset
626 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) {
627 lsf_unload(file_object
,
628 entry
->base_address
, sm_info
);
629 return KERN_INVALID_ARGUMENT
;
632 region_mask
= SHARED_TEXT_REGION_MASK
;
633 local_map
= (ipc_port_t
)sm_info
->text_region
;
634 if(mappings
[i
].mapping_offset
635 & GLOBAL_SHARED_SEGMENT_MASK
) {
636 lsf_unload(file_object
,
637 entry
->base_address
, sm_info
);
638 return KERN_INVALID_ARGUMENT
;
641 if(!(mappings
[i
].protection
& VM_PROT_ZF
)
642 && ((mapped_file
+ mappings
[i
].file_offset
+
644 (mapped_file
+ mapped_file_size
))) {
645 lsf_unload(file_object
, entry
->base_address
, sm_info
);
646 return KERN_INVALID_ARGUMENT
;
648 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
649 + entry
->base_address
;
650 if(vm_allocate(((vm_named_entry_t
)local_map
->ip_kobject
)
651 ->backing
.map
, &target_address
,
652 mappings
[i
].size
, FALSE
)) {
653 lsf_unload(file_object
, entry
->base_address
, sm_info
);
656 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
657 + entry
->base_address
;
658 if(!(mappings
[i
].protection
& VM_PROT_ZF
)) {
659 if(vm_map_copyin(current_map(),
660 mapped_file
+ mappings
[i
].file_offset
,
661 round_page(mappings
[i
].size
), FALSE
, ©_object
)) {
662 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
663 ->backing
.map
, target_address
, mappings
[i
].size
);
664 lsf_unload(file_object
, entry
->base_address
, sm_info
);
667 if(vm_map_copy_overwrite(((vm_named_entry_t
)
668 local_map
->ip_kobject
)->backing
.map
, target_address
,
669 copy_object
, FALSE
)) {
670 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
671 ->backing
.map
, target_address
, mappings
[i
].size
);
672 lsf_unload(file_object
, entry
->base_address
, sm_info
);
676 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
677 ->backing
.map
, target_address
,
678 round_page(target_address
+ mappings
[i
].size
),
679 (mappings
[i
].protection
&
680 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
682 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
683 ->backing
.map
, target_address
,
684 round_page(target_address
+ mappings
[i
].size
),
685 (mappings
[i
].protection
&
686 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
688 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
689 if(file_mapping
== 0)
690 panic("lsf_load: OUT OF MAPPINGS!");
691 shared_file_available_hash_ele
--;
692 file_mapping
->mapping_offset
= (mappings
[i
].mapping_offset
)
694 file_mapping
->size
= mappings
[i
].size
;
695 file_mapping
->file_offset
= mappings
[i
].file_offset
;
696 file_mapping
->protection
= mappings
[i
].protection
;
697 file_mapping
->next
= NULL
;
698 *tptr
= file_mapping
;
699 tptr
= &(file_mapping
->next
);
701 shared_region_mapping_set_alt_next(sm_info
->self
, alternate_load_next
);
707 /* finds the file_object extent list in the shared memory hash table */
708 /* If one is found the associated extents in shared memory are deallocated */
709 /* and the extent list is freed */
714 vm_offset_t base_offset
,
715 shared_region_task_mappings_t sm_info
)
717 load_struct_t
*entry
;
718 ipc_port_t local_map
;
719 loaded_mapping_t
*map_ele
;
720 loaded_mapping_t
*back_ptr
;
722 entry
= lsf_hash_delete(file_object
, base_offset
, sm_info
);
724 map_ele
= entry
->mappings
;
725 while(map_ele
!= NULL
) {
726 if(map_ele
->protection
& VM_PROT_COW
) {
727 local_map
= (ipc_port_t
)sm_info
->data_region
;
729 local_map
= (ipc_port_t
)sm_info
->text_region
;
731 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
732 ->backing
.map
, entry
->base_address
+
733 map_ele
->mapping_offset
,
736 map_ele
= map_ele
->next
;
737 zfree(lsf_zone
, (vm_offset_t
)back_ptr
);
738 shared_file_available_hash_ele
++;
740 zfree(lsf_zone
, (vm_offset_t
)entry
);
741 shared_file_available_hash_ele
++;