]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
626fb5fb9bb14aca9419c5f107d7bcf17a287ec0
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 *
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
26 *
27 * Support routines for an in-kernel shared memory allocator
28 */
29
30 #include <ipc/ipc_port.h>
31 #include <kern/thread.h>
32 #include <mach/shared_memory_server.h>
33 #include <kern/zalloc.h>
34 #include <mach/kern_return.h>
35 #include <mach/vm_inherit.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <vm/vm_page.h>
39
40
41 vm_offset_t shared_file_text_region;
42 vm_offset_t shared_file_data_region;
43
44 ipc_port_t shared_text_region_handle;
45 ipc_port_t shared_data_region_handle;
46 vm_offset_t shared_file_mapping_array = 0;
47 shared_region_mapping_t system_shared_region;
48
49 ipc_port_t sfma_handle = NULL;
50 zone_t lsf_zone;
51
52 int shared_file_available_hash_ele;
53
54 kern_return_t
55 shared_file_create_system_region(
56 shared_region_mapping_t *shared_region)
57 {
58 ipc_port_t text_handle;
59 ipc_port_t data_handle;
60 long text_size;
61 long data_size;
62 vm_offset_t mapping_array;
63 kern_return_t kret;
64
65 text_size = 0x10000000;
66 data_size = 0x10000000;
67
68 kret = shared_file_init(&text_handle,
69 text_size, &data_handle, data_size, &mapping_array);
70 if(kret)
71 return kret;
72 kret = shared_region_mapping_create(text_handle,
73 text_size, data_handle, data_size, mapping_array,
74 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
75 0x9000000, 0x9000000);
76 if(kret)
77 return kret;
78 (*shared_region)->flags = 0;
79 return KERN_SUCCESS;
80 }
81
82 shared_file_boot_time_init(
83 )
84 {
85 long shared_text_region_size;
86 long shared_data_region_size;
87
88 shared_text_region_size = 0x10000000;
89 shared_data_region_size = 0x10000000;
90 shared_file_init(&shared_text_region_handle,
91 shared_text_region_size, &shared_data_region_handle,
92 shared_data_region_size, &shared_file_mapping_array);
93 shared_region_mapping_create(shared_text_region_handle,
94 shared_text_region_size, shared_data_region_handle,
95 shared_data_region_size, shared_file_mapping_array,
96 GLOBAL_SHARED_TEXT_SEGMENT, &system_shared_region,
97 0x9000000, 0x9000000);
98 system_shared_region->flags = SHARED_REGION_SYSTEM;
99 vm_set_shared_region(current_task(), system_shared_region);
100
101 }
102
103
104 /* called at boot time, allocates two regions, each 256 megs in size */
105 /* these regions are later mapped into task spaces, allowing them to */
106 /* share the contents of the regions. shared_file_init is part of */
107 /* a shared_memory_server which not only allocates the backing maps */
108 /* but also coordinates requests for space. */
109
110
111 kern_return_t
112 shared_file_init(
113 ipc_port_t *shared_text_region_handle,
114 vm_size_t text_region_size,
115 ipc_port_t *shared_data_region_handle,
116 vm_size_t data_region_size,
117 vm_offset_t *mapping_array)
118 {
119 vm_offset_t aligned_address;
120 shared_file_info_t *sf_head;
121 vm_offset_t table_mapping_address;
122 int data_table_size;
123 int hash_size;
124 int i;
125 kern_return_t kret;
126
127 vm_object_t buf_object;
128 vm_map_entry_t entry;
129 vm_size_t alloced;
130 vm_offset_t b;
131 vm_page_t p;
132
133 /* create text and data maps/regions */
134 if(kret = vm_region_object_create(kernel_map,
135 text_region_size,
136 shared_text_region_handle)) {
137
138 return kret;
139 }
140 if(kret = vm_region_object_create(kernel_map,
141 data_region_size,
142 shared_data_region_handle)) {
143 ipc_port_release_send(*shared_text_region_handle);
144 return kret;
145 }
146
147 data_table_size = data_region_size >> 9;
148 hash_size = data_region_size >> 14;
149 table_mapping_address = data_region_size - data_table_size;
150
151 if(shared_file_mapping_array == 0) {
152 buf_object = vm_object_allocate(data_table_size);
153
154 if(vm_map_find_space(kernel_map, &shared_file_mapping_array,
155 data_table_size, 0, &entry) != KERN_SUCCESS) {
156 panic("shared_file_init: no space");
157 }
158 *mapping_array = shared_file_mapping_array;
159 vm_map_unlock(kernel_map);
160 entry->object.vm_object = buf_object;
161 entry->offset = 0;
162
163 for (b = *mapping_array, alloced = 0;
164 alloced < (hash_size +
165 round_page(sizeof(struct sf_mapping)));
166 alloced += PAGE_SIZE, b += PAGE_SIZE) {
167 vm_object_lock(buf_object);
168 p = vm_page_alloc(buf_object, alloced);
169 if (p == VM_PAGE_NULL) {
170 panic("shared_file_init: no space");
171 }
172 p->busy = FALSE;
173 vm_object_unlock(buf_object);
174 pmap_enter(kernel_pmap, b, p->phys_addr,
175 VM_PROT_READ | VM_PROT_WRITE, TRUE);
176 }
177
178
179 /* initialize loaded file array */
180 sf_head = (shared_file_info_t *)*mapping_array;
181 sf_head->hash = (queue_head_t *)
182 (((int)*mapping_array) +
183 sizeof(struct shared_file_info));
184 sf_head->hash_size = hash_size/sizeof(queue_head_t);
185 mutex_init(&(sf_head->lock), (ETAP_VM_MAP));
186 sf_head->hash_init = FALSE;
187
188
189 mach_make_memory_entry(kernel_map, &data_table_size,
190 *mapping_array, VM_PROT_READ, &sfma_handle,
191 NULL);
192
193 if (vm_map_wire(kernel_map, *mapping_array,
194 *mapping_array +
195 (hash_size + round_page(sizeof(struct sf_mapping))),
196 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
197 panic("shared_file_init: No memory for data table");
198 }
199
200 lsf_zone = zinit(sizeof(struct load_file_ele),
201 data_table_size -
202 (hash_size + round_page(sizeof(struct sf_mapping))),
203 0, "load_file_server");
204
205 zone_change(lsf_zone, Z_EXHAUST, TRUE);
206 zone_change(lsf_zone, Z_COLLECT, FALSE);
207 zone_change(lsf_zone, Z_EXPAND, FALSE);
208 zone_change(lsf_zone, Z_FOREIGN, TRUE);
209 } else {
210 *mapping_array = shared_file_mapping_array;
211 }
212
213 vm_map(((vm_named_entry_t)
214 (*shared_data_region_handle)->ip_kobject)->backing.map,
215 &table_mapping_address,
216 data_table_size, 0, SHARED_LIB_ALIAS,
217 sfma_handle, 0, FALSE,
218 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
219
220 }
221
222 /* A call made from user space, copyin_shared_file requires the user to */
223 /* provide the address and size of a mapped file, the full path name of */
224 /* that file and a list of offsets to be mapped into shared memory. */
225 /* By requiring that the file be pre-mapped, copyin_shared_file can */
226 /* guarantee that the file is neither deleted nor changed after the user */
227 /* begins the call. */
228
229 kern_return_t
230 copyin_shared_file(
231 vm_offset_t mapped_file,
232 vm_size_t mapped_file_size,
233 vm_offset_t *base_address,
234 int map_cnt,
235 sf_mapping_t *mappings,
236 vm_object_t file_object,
237 shared_region_task_mappings_t sm_info,
238 int *flags)
239 {
240 vm_map_entry_t entry;
241 shared_file_info_t *shared_file_header;
242 load_struct_t *file_entry;
243 loaded_mapping_t *file_mapping;
244 boolean_t alternate;
245 int i;
246 kern_return_t ret;
247
248 /* wire hash entry pool only as needed, since we are the only */
249 /* users, we take a few liberties with the population of our */
250 /* zone. */
251 static int allocable_hash_pages;
252 static vm_offset_t hash_cram_address;
253
254
255 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
256
257 mutex_lock(&shared_file_header->lock);
258
259 /* If this is the first call to this routine, take the opportunity */
260 /* to initialize the hash table which will be used to look-up */
261 /* mappings based on the file object */
262
263 if(shared_file_header->hash_init == FALSE) {
264 vm_size_t hash_table_size;
265 vm_size_t hash_table_offset;
266
267 hash_table_size = (shared_file_header->hash_size)
268 * sizeof(struct queue_entry);
269 hash_table_offset = hash_table_size +
270 round_page(sizeof(struct sf_mapping));
271 for (i = 0; i < shared_file_header->hash_size; i++)
272 queue_init(&shared_file_header->hash[i]);
273
274 allocable_hash_pages =
275 ((hash_table_size<<5) - hash_table_offset)/PAGE_SIZE;
276 hash_cram_address =
277 sm_info->region_mappings + hash_table_offset;
278 shared_file_available_hash_ele = 0;
279
280 shared_file_header->hash_init = TRUE;
281 }
282
283 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
284 int cram_size;
285
286 cram_size = allocable_hash_pages > 3 ?
287 3 : allocable_hash_pages;
288 allocable_hash_pages -= cram_size;
289 cram_size = cram_size * PAGE_SIZE;
290 if (vm_map_wire(kernel_map, hash_cram_address,
291 hash_cram_address+cram_size,
292 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
293 panic("shared_file_init: No memory for data table");
294 }
295 zcram(lsf_zone, hash_cram_address, cram_size);
296 shared_file_available_hash_ele
297 += cram_size/sizeof(struct load_file_ele);
298 hash_cram_address += cram_size;
299 }
300
301
302 /* Find the entry in the map associated with the current mapping */
303 /* of the file object */
304
305 if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
306 vm_object_t mapped_object;
307 if(entry->is_sub_map) {
308 mutex_unlock(&shared_file_header->lock);
309 return KERN_INVALID_ADDRESS;
310 }
311 mapped_object = entry->object.vm_object;
312 while(mapped_object->shadow != NULL) {
313 mapped_object = mapped_object->shadow;
314 }
315 /* check to see that the file object passed is indeed the */
316 /* same as the mapped object passed */
317 if(file_object != mapped_object) {
318 if(sm_info->flags & SHARED_REGION_SYSTEM) {
319 mutex_unlock(&shared_file_header->lock);
320 return KERN_PROTECTION_FAILURE;
321 } else {
322 file_object = mapped_object;
323 }
324 }
325 } else {
326 mutex_unlock(&shared_file_header->lock);
327 return KERN_INVALID_ADDRESS;
328 }
329
330 alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
331
332 if (file_entry = lsf_hash_lookup(shared_file_header->hash,
333 (void *) file_object, shared_file_header->hash_size,
334 alternate, sm_info)) {
335 /* File is loaded, check the load manifest for exact match */
336 /* we simplify by requiring that the elements be the same */
337 /* size and in the same order rather than checking for */
338 /* semantic equivalence. */
339
340 /* If the file is being loaded in the alternate */
341 /* area, one load to alternate is allowed per mapped */
342 /* object the base address is passed back to the */
343 /* caller and the mappings field is filled in. If the */
344 /* caller does not pass the precise mappings_cnt */
345 /* and the Alternate is already loaded, an error */
346 /* is returned. */
347 i = 0;
348 file_mapping = file_entry->mappings;
349 while(file_mapping != NULL) {
350 if(i>=map_cnt) {
351 mutex_unlock(&shared_file_header->lock);
352 return KERN_INVALID_ARGUMENT;
353 }
354 if(((mappings[i].mapping_offset)
355 & SHARED_DATA_REGION_MASK) !=
356 file_mapping->mapping_offset ||
357 mappings[i].size !=
358 file_mapping->size ||
359 mappings[i].file_offset !=
360 file_mapping->file_offset ||
361 mappings[i].protection !=
362 file_mapping->protection) {
363 break;
364 }
365 file_mapping = file_mapping->next;
366 i++;
367 }
368 if(i!=map_cnt) {
369 mutex_unlock(&shared_file_header->lock);
370 return KERN_INVALID_ARGUMENT;
371 }
372 *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
373 + file_entry->base_address;
374 *flags = SF_PREV_LOADED;
375 mutex_unlock(&shared_file_header->lock);
376 return KERN_SUCCESS;
377 } else {
378 /* File is not loaded, lets attempt to load it */
379 ret = lsf_load(mapped_file, mapped_file_size, base_address,
380 mappings, map_cnt,
381 (void *)file_object,
382 *flags, sm_info);
383 *flags = 0;
384 if(ret == KERN_NO_SPACE) {
385 shared_region_mapping_t regions;
386 regions = (shared_region_mapping_t)sm_info->self;
387 regions->flags |= SHARED_REGION_FULL;
388 if(regions == system_shared_region) {
389 shared_file_boot_time_init();
390 /* current task must stay wit its current */
391 /* regions */
392 vm_set_shared_region(current_task(), regions);
393 }
394 }
395 mutex_unlock(&shared_file_header->lock);
396 return ret;
397 }
398 }
399
400 /* A hash lookup function for the list of loaded files in */
401 /* shared_memory_server space. */
402
403 load_struct_t *
404 lsf_hash_lookup(
405 queue_head_t *hash_table,
406 void *file_object,
407 int size,
408 boolean_t alternate,
409 shared_region_task_mappings_t sm_info)
410 {
411 register queue_t bucket;
412 load_struct_t *entry;
413 shared_region_mapping_t target_region;
414 int depth;
415
416 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
417 for (entry = (load_struct_t *)queue_first(bucket);
418 !queue_end(bucket, &entry->links);
419 entry = (load_struct_t *)queue_next(&entry->links)) {
420 if (entry->file_object == (int)file_object) {
421 target_region = (shared_region_mapping_t)sm_info->self;
422 depth = target_region->depth;
423 while(target_region) {
424 if((!(sm_info->self)) ||
425 ((target_region == entry->regions_instance) &&
426 (target_region->depth >= entry->depth))) {
427 if(alternate) {
428 if (entry->base_address >=
429 sm_info->alternate_base)
430 return entry;
431 } else {
432 if (entry->base_address <
433 sm_info->alternate_base)
434 return entry;
435 }
436 }
437 if(target_region->object_chain) {
438 target_region = (shared_region_mapping_t)
439 target_region->object_chain->object_chain_region;
440 depth = target_region->object_chain->depth;
441 } else {
442 target_region = NULL;
443 }
444 }
445 }
446 }
447
448 return (load_struct_t *)0;
449 }
450
451 load_struct_t *
452 lsf_remove_regions_mappings(
453 shared_region_mapping_t region,
454 shared_region_task_mappings_t sm_info)
455 {
456 int i;
457 register queue_t bucket;
458 shared_file_info_t *shared_file_header;
459 load_struct_t *entry;
460 load_struct_t *next_entry;
461 load_struct_t *prev_entry;
462
463 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
464
465 mutex_lock(&shared_file_header->lock);
466 if(shared_file_header->hash_init == FALSE) {
467 mutex_unlock(&shared_file_header->lock);
468 return NULL;
469 }
470 for(i = 0; i<shared_file_header->hash_size; i++) {
471 bucket = &shared_file_header->hash[i];
472 for (entry = (load_struct_t *)queue_first(bucket);
473 !queue_end(bucket, &entry->links);) {
474 next_entry = (load_struct_t *)queue_next(&entry->links);
475 if(region == entry->regions_instance) {
476 lsf_unload((void *)entry->file_object,
477 entry->base_address, sm_info);
478 }
479 entry = next_entry;
480 }
481 }
482 mutex_unlock(&shared_file_header->lock);
483 }
484
485 /* Removes a map_list, (list of loaded extents) for a file from */
486 /* the loaded file hash table. */
487
488 load_struct_t *
489 lsf_hash_delete(
490 void *file_object,
491 vm_offset_t base_offset,
492 shared_region_task_mappings_t sm_info)
493 {
494 register queue_t bucket;
495 shared_file_info_t *shared_file_header;
496 load_struct_t *entry;
497 load_struct_t *prev_entry;
498
499 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
500
501 bucket = &shared_file_header->hash
502 [load_file_hash((int)file_object, shared_file_header->hash_size)];
503
504 for (entry = (load_struct_t *)queue_first(bucket);
505 !queue_end(bucket, &entry->links);
506 entry = (load_struct_t *)queue_next(&entry->links)) {
507 if((!(sm_info->self)) || ((shared_region_mapping_t)
508 sm_info->self == entry->regions_instance)) {
509 if ((entry->file_object == (int) file_object) &&
510 (entry->base_address == base_offset)) {
511 queue_remove(bucket, entry,
512 load_struct_ptr_t, links);
513 return entry;
514 }
515 }
516 }
517
518 return (load_struct_t *)0;
519 }
520
521 /* Inserts a new map_list, (list of loaded file extents), into the */
522 /* server loaded file hash table. */
523
524 void
525 lsf_hash_insert(
526 load_struct_t *entry,
527 shared_region_task_mappings_t sm_info)
528 {
529 shared_file_info_t *shared_file_header;
530
531 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
532 queue_enter(&shared_file_header->hash
533 [load_file_hash(entry->file_object,
534 shared_file_header->hash_size)],
535 entry, load_struct_ptr_t, links);
536 }
537
538 /* Looks up the file type requested. If already loaded and the */
539 /* file extents are an exact match, returns Success. If not */
540 /* loaded attempts to load the file extents at the given offsets */
541 /* if any extent fails to load or if the file was already loaded */
542 /* in a different configuration, lsf_load fails. */
543
544 kern_return_t
545 lsf_load(
546 vm_offset_t mapped_file,
547 vm_size_t mapped_file_size,
548 vm_offset_t *base_address,
549 sf_mapping_t *mappings,
550 int map_cnt,
551 void *file_object,
552 int flags,
553 shared_region_task_mappings_t sm_info)
554 {
555
556 load_struct_t *entry;
557 vm_map_copy_t copy_object;
558 loaded_mapping_t *file_mapping;
559 loaded_mapping_t **tptr;
560 int i;
561 ipc_port_t local_map;
562 vm_offset_t original_alt_load_next;
563 vm_offset_t alternate_load_next;
564
565 entry = (load_struct_t *)zalloc(lsf_zone);
566 shared_file_available_hash_ele--;
567 entry->file_object = (int)file_object;
568 entry->mapping_cnt = map_cnt;
569 entry->mappings = NULL;
570 entry->links.prev = (queue_entry_t) 0;
571 entry->links.next = (queue_entry_t) 0;
572 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
573 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
574
575 lsf_hash_insert(entry, sm_info);
576 tptr = &(entry->mappings);
577
578
579 alternate_load_next = sm_info->alternate_next;
580 original_alt_load_next = alternate_load_next;
581 if (flags & ALTERNATE_LOAD_SITE) {
582 int max_loadfile_offset;
583
584 *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
585 sm_info->alternate_next;
586 max_loadfile_offset = 0;
587 for(i = 0; i<map_cnt; i++) {
588 if(((mappings[i].mapping_offset
589 & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
590 max_loadfile_offset) {
591 max_loadfile_offset =
592 (mappings[i].mapping_offset
593 & SHARED_TEXT_REGION_MASK)
594 + mappings[i].size;
595 }
596 }
597 if((alternate_load_next + round_page(max_loadfile_offset)) >=
598 (sm_info->data_size - (sm_info->data_size>>9))) {
599
600 return KERN_NO_SPACE;
601 }
602 alternate_load_next += round_page(max_loadfile_offset);
603
604 } else {
605 if (((*base_address) & SHARED_TEXT_REGION_MASK) >
606 sm_info->alternate_base) {
607 entry->base_address =
608 (*base_address) & SHARED_TEXT_REGION_MASK;
609 lsf_unload(file_object, entry->base_address, sm_info);
610 return KERN_INVALID_ARGUMENT;
611 }
612 }
613
614 entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
615
616 /* copyin mapped file data */
617 for(i = 0; i<map_cnt; i++) {
618 vm_offset_t target_address;
619 vm_offset_t region_mask;
620
621 if(mappings[i].protection & VM_PROT_COW) {
622 local_map = (ipc_port_t)sm_info->data_region;
623 region_mask = SHARED_DATA_REGION_MASK;
624 if((mappings[i].mapping_offset
625 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
626 lsf_unload(file_object,
627 entry->base_address, sm_info);
628 return KERN_INVALID_ARGUMENT;
629 }
630 } else {
631 region_mask = SHARED_TEXT_REGION_MASK;
632 local_map = (ipc_port_t)sm_info->text_region;
633 if(mappings[i].mapping_offset
634 & GLOBAL_SHARED_SEGMENT_MASK) {
635 lsf_unload(file_object,
636 entry->base_address, sm_info);
637 return KERN_INVALID_ARGUMENT;
638 }
639 }
640 if(!(mappings[i].protection & VM_PROT_ZF)
641 && ((mapped_file + mappings[i].file_offset +
642 mappings[i].size) >
643 (mapped_file + mapped_file_size))) {
644 lsf_unload(file_object, entry->base_address, sm_info);
645 return KERN_INVALID_ARGUMENT;
646 }
647 target_address = ((mappings[i].mapping_offset) & region_mask)
648 + entry->base_address;
649 if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
650 ->backing.map, &target_address,
651 mappings[i].size, FALSE)) {
652 lsf_unload(file_object, entry->base_address, sm_info);
653 return KERN_FAILURE;
654 }
655 target_address = ((mappings[i].mapping_offset) & region_mask)
656 + entry->base_address;
657 if(!(mappings[i].protection & VM_PROT_ZF)) {
658 if(vm_map_copyin(current_map(),
659 mapped_file + mappings[i].file_offset,
660 round_page(mappings[i].size), FALSE, &copy_object)) {
661 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
662 ->backing.map, target_address, mappings[i].size);
663 lsf_unload(file_object, entry->base_address, sm_info);
664 return KERN_FAILURE;
665 }
666 if(vm_map_copy_overwrite(((vm_named_entry_t)
667 local_map->ip_kobject)->backing.map, target_address,
668 copy_object, FALSE)) {
669 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
670 ->backing.map, target_address, mappings[i].size);
671 lsf_unload(file_object, entry->base_address, sm_info);
672 return KERN_FAILURE;
673 }
674 }
675 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
676 ->backing.map, target_address,
677 round_page(target_address + mappings[i].size),
678 (mappings[i].protection &
679 (VM_PROT_READ | VM_PROT_EXECUTE)),
680 TRUE);
681 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
682 ->backing.map, target_address,
683 round_page(target_address + mappings[i].size),
684 (mappings[i].protection &
685 (VM_PROT_READ | VM_PROT_EXECUTE)),
686 FALSE);
687 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
688 if(file_mapping == 0)
689 panic("lsf_load: OUT OF MAPPINGS!");
690 shared_file_available_hash_ele--;
691 file_mapping->mapping_offset = (mappings[i].mapping_offset)
692 & region_mask;
693 file_mapping->size = mappings[i].size;
694 file_mapping->file_offset = mappings[i].file_offset;
695 file_mapping->protection = mappings[i].protection;
696 file_mapping->next = NULL;
697 *tptr = file_mapping;
698 tptr = &(file_mapping->next);
699 }
700 shared_region_mapping_set_alt_next(sm_info->self, alternate_load_next);
701 return KERN_SUCCESS;
702
703 }
704
705
706 /* finds the file_object extent list in the shared memory hash table */
707 /* If one is found the associated extents in shared memory are deallocated */
708 /* and the extent list is freed */
709
710 void
711 lsf_unload(
712 void *file_object,
713 vm_offset_t base_offset,
714 shared_region_task_mappings_t sm_info)
715 {
716 load_struct_t *entry;
717 ipc_port_t local_map;
718 loaded_mapping_t *map_ele;
719 loaded_mapping_t *back_ptr;
720
721 entry = lsf_hash_delete(file_object, base_offset, sm_info);
722 if(entry) {
723 map_ele = entry->mappings;
724 while(map_ele != NULL) {
725 if(map_ele->protection & VM_PROT_COW) {
726 local_map = (ipc_port_t)sm_info->data_region;
727 } else {
728 local_map = (ipc_port_t)sm_info->text_region;
729 }
730 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
731 ->backing.map, entry->base_address +
732 map_ele->mapping_offset,
733 map_ele->size);
734 back_ptr = map_ele;
735 map_ele = map_ele->next;
736 zfree(lsf_zone, (vm_offset_t)back_ptr);
737 shared_file_available_hash_ele++;
738 }
739 zfree(lsf_zone, (vm_offset_t)entry);
740 shared_file_available_hash_ele++;
741 }
742 }