]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
xnu-201.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 *
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
26 *
27 * Support routines for an in-kernel shared memory allocator
28 */
29
30 #include <ipc/ipc_port.h>
31 #include <kern/thread.h>
32 #include <mach/shared_memory_server.h>
33 #include <kern/zalloc.h>
34 #include <mach/kern_return.h>
35 #include <mach/vm_inherit.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <vm/vm_page.h>
39
40
41 vm_offset_t shared_file_text_region;
42 vm_offset_t shared_file_data_region;
43
44 ipc_port_t shared_text_region_handle;
45 ipc_port_t shared_data_region_handle;
46 vm_offset_t shared_file_mapping_array = 0;
47 shared_region_mapping_t system_shared_region;
48
49 ipc_port_t sfma_handle = NULL;
50 zone_t lsf_zone;
51
52 int shared_file_available_hash_ele;
53
54 kern_return_t
55 shared_file_create_system_region(
56 shared_region_mapping_t *shared_region)
57 {
58 ipc_port_t text_handle;
59 ipc_port_t data_handle;
60 long text_size;
61 long data_size;
62 vm_offset_t mapping_array;
63 kern_return_t kret;
64
65 text_size = 0x10000000;
66 data_size = 0x10000000;
67
68 kret = shared_file_init(&text_handle,
69 text_size, &data_handle, data_size, &mapping_array);
70 if(kret)
71 return kret;
72 kret = shared_region_mapping_create(text_handle,
73 text_size, data_handle, data_size, mapping_array,
74 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
75 0x9000000, 0x9000000);
76 if(kret)
77 return kret;
78 (*shared_region)->flags = 0;
79 return KERN_SUCCESS;
80 }
81
82 shared_file_boot_time_init(
83 )
84 {
85 long shared_text_region_size;
86 long shared_data_region_size;
87
88 shared_text_region_size = 0x10000000;
89 shared_data_region_size = 0x10000000;
90 shared_file_init(&shared_text_region_handle,
91 shared_text_region_size, &shared_data_region_handle,
92 shared_data_region_size, &shared_file_mapping_array);
93 shared_region_mapping_create(shared_text_region_handle,
94 shared_text_region_size, shared_data_region_handle,
95 shared_data_region_size, shared_file_mapping_array,
96 GLOBAL_SHARED_TEXT_SEGMENT, &system_shared_region,
97 0x9000000, 0x9000000);
98 system_shared_region->flags = SHARED_REGION_SYSTEM;
99 vm_set_shared_region(current_task(), system_shared_region);
100
101 }
102
103
104 /* called at boot time, allocates two regions, each 256 megs in size */
105 /* these regions are later mapped into task spaces, allowing them to */
106 /* share the contents of the regions. shared_file_init is part of */
107 /* a shared_memory_server which not only allocates the backing maps */
108 /* but also coordinates requests for space. */
109
110
111 kern_return_t
112 shared_file_init(
113 ipc_port_t *shared_text_region_handle,
114 vm_size_t text_region_size,
115 ipc_port_t *shared_data_region_handle,
116 vm_size_t data_region_size,
117 vm_offset_t *mapping_array)
118 {
119 vm_offset_t aligned_address;
120 shared_file_info_t *sf_head;
121 vm_offset_t table_mapping_address;
122 int data_table_size;
123 int hash_size;
124 int i;
125 kern_return_t kret;
126
127 vm_object_t buf_object;
128 vm_map_entry_t entry;
129 vm_size_t alloced;
130 vm_offset_t b;
131 vm_page_t p;
132
133 /* create text and data maps/regions */
134 if(kret = vm_region_object_create(kernel_map,
135 text_region_size,
136 shared_text_region_handle)) {
137
138 return kret;
139 }
140 if(kret = vm_region_object_create(kernel_map,
141 data_region_size,
142 shared_data_region_handle)) {
143 ipc_port_release_send(*shared_text_region_handle);
144 return kret;
145 }
146
147 data_table_size = data_region_size >> 9;
148 hash_size = data_region_size >> 14;
149 table_mapping_address = data_region_size - data_table_size;
150
151 if(shared_file_mapping_array == 0) {
152 buf_object = vm_object_allocate(data_table_size);
153
154 if(vm_map_find_space(kernel_map, &shared_file_mapping_array,
155 data_table_size, 0, &entry) != KERN_SUCCESS) {
156 panic("shared_file_init: no space");
157 }
158 *mapping_array = shared_file_mapping_array;
159 vm_map_unlock(kernel_map);
160 entry->object.vm_object = buf_object;
161 entry->offset = 0;
162
163 for (b = *mapping_array, alloced = 0;
164 alloced < (hash_size +
165 round_page(sizeof(struct sf_mapping)));
166 alloced += PAGE_SIZE, b += PAGE_SIZE) {
167 vm_object_lock(buf_object);
168 p = vm_page_alloc(buf_object, alloced);
169 if (p == VM_PAGE_NULL) {
170 panic("shared_file_init: no space");
171 }
172 p->busy = FALSE;
173 vm_object_unlock(buf_object);
174 pmap_enter(kernel_pmap, b, p->phys_addr,
175 VM_PROT_READ | VM_PROT_WRITE, TRUE);
176 }
177
178
179 /* initialize loaded file array */
180 sf_head = (shared_file_info_t *)*mapping_array;
181 sf_head->hash = (queue_head_t *)
182 (((int)*mapping_array) +
183 sizeof(struct shared_file_info));
184 sf_head->hash_size = hash_size/sizeof(queue_head_t);
185 mutex_init(&(sf_head->lock), (ETAP_VM_MAP));
186 sf_head->hash_init = FALSE;
187
188
189 mach_make_memory_entry(kernel_map, &data_table_size,
190 *mapping_array, VM_PROT_READ, &sfma_handle,
191 NULL);
192
193 if (vm_map_wire(kernel_map, *mapping_array,
194 *mapping_array +
195 (hash_size + round_page(sizeof(struct sf_mapping))),
196 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
197 panic("shared_file_init: No memory for data table");
198 }
199
200 lsf_zone = zinit(sizeof(struct load_file_ele),
201 data_table_size -
202 (hash_size + round_page(sizeof(struct sf_mapping))),
203 0, "load_file_server");
204
205 zone_change(lsf_zone, Z_EXHAUST, TRUE);
206 zone_change(lsf_zone, Z_COLLECT, FALSE);
207 zone_change(lsf_zone, Z_EXPAND, FALSE);
208 zone_change(lsf_zone, Z_FOREIGN, TRUE);
209 } else {
210 *mapping_array = shared_file_mapping_array;
211 }
212
213 vm_map(((vm_named_entry_t)
214 (*shared_data_region_handle)->ip_kobject)->backing.map,
215 &table_mapping_address,
216 data_table_size, 0, SHARED_LIB_ALIAS,
217 sfma_handle, 0, FALSE,
218 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
219
220 }
221
222 /* A call made from user space, copyin_shared_file requires the user to */
223 /* provide the address and size of a mapped file, the full path name of */
224 /* that file and a list of offsets to be mapped into shared memory. */
225 /* By requiring that the file be pre-mapped, copyin_shared_file can */
226 /* guarantee that the file is neither deleted nor changed after the user */
227 /* begins the call. */
228
229 kern_return_t
230 copyin_shared_file(
231 vm_offset_t mapped_file,
232 vm_size_t mapped_file_size,
233 vm_offset_t *base_address,
234 int map_cnt,
235 sf_mapping_t *mappings,
236 memory_object_control_t file_control,
237 shared_region_task_mappings_t sm_info,
238 int *flags)
239 {
240 vm_object_t file_object;
241 vm_map_entry_t entry;
242 shared_file_info_t *shared_file_header;
243 load_struct_t *file_entry;
244 loaded_mapping_t *file_mapping;
245 boolean_t alternate;
246 int i;
247 kern_return_t ret;
248
249 /* wire hash entry pool only as needed, since we are the only */
250 /* users, we take a few liberties with the population of our */
251 /* zone. */
252 static int allocable_hash_pages;
253 static vm_offset_t hash_cram_address;
254
255
256 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
257
258 mutex_lock(&shared_file_header->lock);
259
260 /* If this is the first call to this routine, take the opportunity */
261 /* to initialize the hash table which will be used to look-up */
262 /* mappings based on the file object */
263
264 if(shared_file_header->hash_init == FALSE) {
265 vm_size_t hash_table_size;
266 vm_size_t hash_table_offset;
267
268 hash_table_size = (shared_file_header->hash_size)
269 * sizeof(struct queue_entry);
270 hash_table_offset = hash_table_size +
271 round_page(sizeof(struct sf_mapping));
272 for (i = 0; i < shared_file_header->hash_size; i++)
273 queue_init(&shared_file_header->hash[i]);
274
275 allocable_hash_pages =
276 ((hash_table_size<<5) - hash_table_offset)/PAGE_SIZE;
277 hash_cram_address =
278 sm_info->region_mappings + hash_table_offset;
279 shared_file_available_hash_ele = 0;
280
281 shared_file_header->hash_init = TRUE;
282 }
283
284 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
285 int cram_size;
286
287 cram_size = allocable_hash_pages > 3 ?
288 3 : allocable_hash_pages;
289 allocable_hash_pages -= cram_size;
290 cram_size = cram_size * PAGE_SIZE;
291 if (vm_map_wire(kernel_map, hash_cram_address,
292 hash_cram_address+cram_size,
293 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
294 panic("shared_file_init: No memory for data table");
295 }
296 zcram(lsf_zone, hash_cram_address, cram_size);
297 shared_file_available_hash_ele
298 += cram_size/sizeof(struct load_file_ele);
299 hash_cram_address += cram_size;
300 }
301
302
303 /* Find the entry in the map associated with the current mapping */
304 /* of the file object */
305 file_object = memory_object_control_to_vm_object(file_control);
306 if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
307 vm_object_t mapped_object;
308 if(entry->is_sub_map) {
309 mutex_unlock(&shared_file_header->lock);
310 return KERN_INVALID_ADDRESS;
311 }
312 mapped_object = entry->object.vm_object;
313 while(mapped_object->shadow != NULL) {
314 mapped_object = mapped_object->shadow;
315 }
316 /* check to see that the file object passed is indeed the */
317 /* same as the mapped object passed */
318 if(file_object != mapped_object) {
319 if(sm_info->flags & SHARED_REGION_SYSTEM) {
320 mutex_unlock(&shared_file_header->lock);
321 return KERN_PROTECTION_FAILURE;
322 } else {
323 file_object = mapped_object;
324 }
325 }
326 } else {
327 mutex_unlock(&shared_file_header->lock);
328 return KERN_INVALID_ADDRESS;
329 }
330
331 alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
332
333 if (file_entry = lsf_hash_lookup(shared_file_header->hash,
334 (void *) file_object, shared_file_header->hash_size,
335 alternate, sm_info)) {
336 /* File is loaded, check the load manifest for exact match */
337 /* we simplify by requiring that the elements be the same */
338 /* size and in the same order rather than checking for */
339 /* semantic equivalence. */
340
341 /* If the file is being loaded in the alternate */
342 /* area, one load to alternate is allowed per mapped */
343 /* object the base address is passed back to the */
344 /* caller and the mappings field is filled in. If the */
345 /* caller does not pass the precise mappings_cnt */
346 /* and the Alternate is already loaded, an error */
347 /* is returned. */
348 i = 0;
349 file_mapping = file_entry->mappings;
350 while(file_mapping != NULL) {
351 if(i>=map_cnt) {
352 mutex_unlock(&shared_file_header->lock);
353 return KERN_INVALID_ARGUMENT;
354 }
355 if(((mappings[i].mapping_offset)
356 & SHARED_DATA_REGION_MASK) !=
357 file_mapping->mapping_offset ||
358 mappings[i].size !=
359 file_mapping->size ||
360 mappings[i].file_offset !=
361 file_mapping->file_offset ||
362 mappings[i].protection !=
363 file_mapping->protection) {
364 break;
365 }
366 file_mapping = file_mapping->next;
367 i++;
368 }
369 if(i!=map_cnt) {
370 mutex_unlock(&shared_file_header->lock);
371 return KERN_INVALID_ARGUMENT;
372 }
373 *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
374 + file_entry->base_address;
375 *flags = SF_PREV_LOADED;
376 mutex_unlock(&shared_file_header->lock);
377 return KERN_SUCCESS;
378 } else {
379 /* File is not loaded, lets attempt to load it */
380 ret = lsf_load(mapped_file, mapped_file_size, base_address,
381 mappings, map_cnt,
382 (void *)file_object,
383 *flags, sm_info);
384 *flags = 0;
385 if(ret == KERN_NO_SPACE) {
386 shared_region_mapping_t regions;
387 regions = (shared_region_mapping_t)sm_info->self;
388 regions->flags |= SHARED_REGION_FULL;
389 if(regions == system_shared_region) {
390 shared_file_boot_time_init();
391 /* current task must stay wit its current */
392 /* regions */
393 vm_set_shared_region(current_task(), regions);
394 }
395 }
396 mutex_unlock(&shared_file_header->lock);
397 return ret;
398 }
399 }
400
401 /* A hash lookup function for the list of loaded files in */
402 /* shared_memory_server space. */
403
404 load_struct_t *
405 lsf_hash_lookup(
406 queue_head_t *hash_table,
407 void *file_object,
408 int size,
409 boolean_t alternate,
410 shared_region_task_mappings_t sm_info)
411 {
412 register queue_t bucket;
413 load_struct_t *entry;
414 shared_region_mapping_t target_region;
415 int depth;
416
417 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
418 for (entry = (load_struct_t *)queue_first(bucket);
419 !queue_end(bucket, &entry->links);
420 entry = (load_struct_t *)queue_next(&entry->links)) {
421 if (entry->file_object == (int)file_object) {
422 target_region = (shared_region_mapping_t)sm_info->self;
423 depth = target_region->depth;
424 while(target_region) {
425 if((!(sm_info->self)) ||
426 ((target_region == entry->regions_instance) &&
427 (target_region->depth >= entry->depth))) {
428 if(alternate) {
429 if (entry->base_address >=
430 sm_info->alternate_base)
431 return entry;
432 } else {
433 if (entry->base_address <
434 sm_info->alternate_base)
435 return entry;
436 }
437 }
438 if(target_region->object_chain) {
439 target_region = (shared_region_mapping_t)
440 target_region->object_chain->object_chain_region;
441 depth = target_region->object_chain->depth;
442 } else {
443 target_region = NULL;
444 }
445 }
446 }
447 }
448
449 return (load_struct_t *)0;
450 }
451
452 load_struct_t *
453 lsf_remove_regions_mappings(
454 shared_region_mapping_t region,
455 shared_region_task_mappings_t sm_info)
456 {
457 int i;
458 register queue_t bucket;
459 shared_file_info_t *shared_file_header;
460 load_struct_t *entry;
461 load_struct_t *next_entry;
462 load_struct_t *prev_entry;
463
464 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
465
466 mutex_lock(&shared_file_header->lock);
467 if(shared_file_header->hash_init == FALSE) {
468 mutex_unlock(&shared_file_header->lock);
469 return NULL;
470 }
471 for(i = 0; i<shared_file_header->hash_size; i++) {
472 bucket = &shared_file_header->hash[i];
473 for (entry = (load_struct_t *)queue_first(bucket);
474 !queue_end(bucket, &entry->links);) {
475 next_entry = (load_struct_t *)queue_next(&entry->links);
476 if(region == entry->regions_instance) {
477 lsf_unload((void *)entry->file_object,
478 entry->base_address, sm_info);
479 }
480 entry = next_entry;
481 }
482 }
483 mutex_unlock(&shared_file_header->lock);
484 }
485
486 /* Removes a map_list, (list of loaded extents) for a file from */
487 /* the loaded file hash table. */
488
489 load_struct_t *
490 lsf_hash_delete(
491 void *file_object,
492 vm_offset_t base_offset,
493 shared_region_task_mappings_t sm_info)
494 {
495 register queue_t bucket;
496 shared_file_info_t *shared_file_header;
497 load_struct_t *entry;
498 load_struct_t *prev_entry;
499
500 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
501
502 bucket = &shared_file_header->hash
503 [load_file_hash((int)file_object, shared_file_header->hash_size)];
504
505 for (entry = (load_struct_t *)queue_first(bucket);
506 !queue_end(bucket, &entry->links);
507 entry = (load_struct_t *)queue_next(&entry->links)) {
508 if((!(sm_info->self)) || ((shared_region_mapping_t)
509 sm_info->self == entry->regions_instance)) {
510 if ((entry->file_object == (int) file_object) &&
511 (entry->base_address == base_offset)) {
512 queue_remove(bucket, entry,
513 load_struct_ptr_t, links);
514 return entry;
515 }
516 }
517 }
518
519 return (load_struct_t *)0;
520 }
521
522 /* Inserts a new map_list, (list of loaded file extents), into the */
523 /* server loaded file hash table. */
524
525 void
526 lsf_hash_insert(
527 load_struct_t *entry,
528 shared_region_task_mappings_t sm_info)
529 {
530 shared_file_info_t *shared_file_header;
531
532 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
533 queue_enter(&shared_file_header->hash
534 [load_file_hash(entry->file_object,
535 shared_file_header->hash_size)],
536 entry, load_struct_ptr_t, links);
537 }
538
539 /* Looks up the file type requested. If already loaded and the */
540 /* file extents are an exact match, returns Success. If not */
541 /* loaded attempts to load the file extents at the given offsets */
542 /* if any extent fails to load or if the file was already loaded */
543 /* in a different configuration, lsf_load fails. */
544
545 kern_return_t
546 lsf_load(
547 vm_offset_t mapped_file,
548 vm_size_t mapped_file_size,
549 vm_offset_t *base_address,
550 sf_mapping_t *mappings,
551 int map_cnt,
552 void *file_object,
553 int flags,
554 shared_region_task_mappings_t sm_info)
555 {
556
557 load_struct_t *entry;
558 vm_map_copy_t copy_object;
559 loaded_mapping_t *file_mapping;
560 loaded_mapping_t **tptr;
561 int i;
562 ipc_port_t local_map;
563 vm_offset_t original_alt_load_next;
564 vm_offset_t alternate_load_next;
565
566 entry = (load_struct_t *)zalloc(lsf_zone);
567 shared_file_available_hash_ele--;
568 entry->file_object = (int)file_object;
569 entry->mapping_cnt = map_cnt;
570 entry->mappings = NULL;
571 entry->links.prev = (queue_entry_t) 0;
572 entry->links.next = (queue_entry_t) 0;
573 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
574 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
575
576 lsf_hash_insert(entry, sm_info);
577 tptr = &(entry->mappings);
578
579
580 alternate_load_next = sm_info->alternate_next;
581 original_alt_load_next = alternate_load_next;
582 if (flags & ALTERNATE_LOAD_SITE) {
583 int max_loadfile_offset;
584
585 *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
586 sm_info->alternate_next;
587 max_loadfile_offset = 0;
588 for(i = 0; i<map_cnt; i++) {
589 if(((mappings[i].mapping_offset
590 & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
591 max_loadfile_offset) {
592 max_loadfile_offset =
593 (mappings[i].mapping_offset
594 & SHARED_TEXT_REGION_MASK)
595 + mappings[i].size;
596 }
597 }
598 if((alternate_load_next + round_page(max_loadfile_offset)) >=
599 (sm_info->data_size - (sm_info->data_size>>9))) {
600
601 return KERN_NO_SPACE;
602 }
603 alternate_load_next += round_page(max_loadfile_offset);
604
605 } else {
606 if (((*base_address) & SHARED_TEXT_REGION_MASK) >
607 sm_info->alternate_base) {
608 entry->base_address =
609 (*base_address) & SHARED_TEXT_REGION_MASK;
610 lsf_unload(file_object, entry->base_address, sm_info);
611 return KERN_INVALID_ARGUMENT;
612 }
613 }
614
615 entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
616
617 /* copyin mapped file data */
618 for(i = 0; i<map_cnt; i++) {
619 vm_offset_t target_address;
620 vm_offset_t region_mask;
621
622 if(mappings[i].protection & VM_PROT_COW) {
623 local_map = (ipc_port_t)sm_info->data_region;
624 region_mask = SHARED_DATA_REGION_MASK;
625 if((mappings[i].mapping_offset
626 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
627 lsf_unload(file_object,
628 entry->base_address, sm_info);
629 return KERN_INVALID_ARGUMENT;
630 }
631 } else {
632 region_mask = SHARED_TEXT_REGION_MASK;
633 local_map = (ipc_port_t)sm_info->text_region;
634 if(mappings[i].mapping_offset
635 & GLOBAL_SHARED_SEGMENT_MASK) {
636 lsf_unload(file_object,
637 entry->base_address, sm_info);
638 return KERN_INVALID_ARGUMENT;
639 }
640 }
641 if(!(mappings[i].protection & VM_PROT_ZF)
642 && ((mapped_file + mappings[i].file_offset +
643 mappings[i].size) >
644 (mapped_file + mapped_file_size))) {
645 lsf_unload(file_object, entry->base_address, sm_info);
646 return KERN_INVALID_ARGUMENT;
647 }
648 target_address = ((mappings[i].mapping_offset) & region_mask)
649 + entry->base_address;
650 if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
651 ->backing.map, &target_address,
652 mappings[i].size, FALSE)) {
653 lsf_unload(file_object, entry->base_address, sm_info);
654 return KERN_FAILURE;
655 }
656 target_address = ((mappings[i].mapping_offset) & region_mask)
657 + entry->base_address;
658 if(!(mappings[i].protection & VM_PROT_ZF)) {
659 if(vm_map_copyin(current_map(),
660 mapped_file + mappings[i].file_offset,
661 round_page(mappings[i].size), FALSE, &copy_object)) {
662 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
663 ->backing.map, target_address, mappings[i].size);
664 lsf_unload(file_object, entry->base_address, sm_info);
665 return KERN_FAILURE;
666 }
667 if(vm_map_copy_overwrite(((vm_named_entry_t)
668 local_map->ip_kobject)->backing.map, target_address,
669 copy_object, FALSE)) {
670 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
671 ->backing.map, target_address, mappings[i].size);
672 lsf_unload(file_object, entry->base_address, sm_info);
673 return KERN_FAILURE;
674 }
675 }
676 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
677 ->backing.map, target_address,
678 round_page(target_address + mappings[i].size),
679 (mappings[i].protection &
680 (VM_PROT_READ | VM_PROT_EXECUTE)),
681 TRUE);
682 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
683 ->backing.map, target_address,
684 round_page(target_address + mappings[i].size),
685 (mappings[i].protection &
686 (VM_PROT_READ | VM_PROT_EXECUTE)),
687 FALSE);
688 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
689 if(file_mapping == 0)
690 panic("lsf_load: OUT OF MAPPINGS!");
691 shared_file_available_hash_ele--;
692 file_mapping->mapping_offset = (mappings[i].mapping_offset)
693 & region_mask;
694 file_mapping->size = mappings[i].size;
695 file_mapping->file_offset = mappings[i].file_offset;
696 file_mapping->protection = mappings[i].protection;
697 file_mapping->next = NULL;
698 *tptr = file_mapping;
699 tptr = &(file_mapping->next);
700 }
701 shared_region_mapping_set_alt_next(sm_info->self, alternate_load_next);
702 return KERN_SUCCESS;
703
704 }
705
706
707 /* finds the file_object extent list in the shared memory hash table */
708 /* If one is found the associated extents in shared memory are deallocated */
709 /* and the extent list is freed */
710
711 void
712 lsf_unload(
713 void *file_object,
714 vm_offset_t base_offset,
715 shared_region_task_mappings_t sm_info)
716 {
717 load_struct_t *entry;
718 ipc_port_t local_map;
719 loaded_mapping_t *map_ele;
720 loaded_mapping_t *back_ptr;
721
722 entry = lsf_hash_delete(file_object, base_offset, sm_info);
723 if(entry) {
724 map_ele = entry->mappings;
725 while(map_ele != NULL) {
726 if(map_ele->protection & VM_PROT_COW) {
727 local_map = (ipc_port_t)sm_info->data_region;
728 } else {
729 local_map = (ipc_port_t)sm_info->text_region;
730 }
731 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
732 ->backing.map, entry->base_address +
733 map_ele->mapping_offset,
734 map_ele->size);
735 back_ptr = map_ele;
736 map_ele = map_ele->next;
737 zfree(lsf_zone, (vm_offset_t)back_ptr);
738 shared_file_available_hash_ele++;
739 }
740 zfree(lsf_zone, (vm_offset_t)entry);
741 shared_file_available_hash_ele++;
742 }
743 }