]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
xnu-344.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 *
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
26 *
27 * Support routines for an in-kernel shared memory allocator
28 */
29
30 #include <ipc/ipc_port.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc.h>
33 #include <mach/kern_return.h>
34 #include <mach/vm_inherit.h>
35 #include <vm/vm_kern.h>
36 #include <vm/vm_map.h>
37 #include <vm/vm_page.h>
38
39 #include <mach/shared_memory_server.h>
40 #include <vm/vm_shared_memory_server.h>
41
42 /* forward declarations */
43 static kern_return_t
44 shared_file_init(
45 ipc_port_t *shared_text_region_handle,
46 vm_size_t text_region_size,
47 ipc_port_t *shared_data_region_handle,
48 vm_size_t data_region_size,
49 vm_offset_t *shared_file_mapping_array);
50
51 static load_struct_t *
52 lsf_hash_lookup(
53 queue_head_t *hash_table,
54 void *file_object,
55 int size,
56 boolean_t alternate,
57 shared_region_task_mappings_t sm_info);
58
59 static load_struct_t *
60 lsf_hash_delete(
61 void *file_object,
62 vm_offset_t base_offset,
63 shared_region_task_mappings_t sm_info);
64
65 static void
66 lsf_hash_insert(
67 load_struct_t *entry,
68 shared_region_task_mappings_t sm_info);
69
70 static kern_return_t
71 lsf_load(
72 vm_offset_t mapped_file,
73 vm_size_t mapped_file_size,
74 vm_offset_t *base_address,
75 sf_mapping_t *mappings,
76 int map_cnt,
77 void *file_object,
78 int flags,
79 shared_region_task_mappings_t sm_info);
80
81 static void
82 lsf_unload(
83 void *file_object,
84 vm_offset_t base_offset,
85 shared_region_task_mappings_t sm_info);
86
87
88 #define load_file_hash(file_object, size) \
89 ((((natural_t)file_object) & 0xffffff) % size)
90
91 /* Implementation */
92 vm_offset_t shared_file_text_region;
93 vm_offset_t shared_file_data_region;
94
95 ipc_port_t shared_text_region_handle;
96 ipc_port_t shared_data_region_handle;
97 vm_offset_t shared_file_mapping_array = 0;
98 shared_region_mapping_t system_shared_region = NULL;
99
100 ipc_port_t sfma_handle = NULL;
101 zone_t lsf_zone;
102
103 int shared_file_available_hash_ele;
104
105 kern_return_t
106 shared_file_create_system_region(
107 shared_region_mapping_t *shared_region)
108 {
109 ipc_port_t text_handle;
110 ipc_port_t data_handle;
111 long text_size;
112 long data_size;
113 vm_offset_t mapping_array;
114 kern_return_t kret;
115
116 text_size = 0x10000000;
117 data_size = 0x10000000;
118
119 kret = shared_file_init(&text_handle,
120 text_size, &data_handle, data_size, &mapping_array);
121 if(kret)
122 return kret;
123 kret = shared_region_mapping_create(text_handle,
124 text_size, data_handle, data_size, mapping_array,
125 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
126 0x9000000, 0x9000000);
127 if(kret)
128 return kret;
129 (*shared_region)->flags = 0;
130 return KERN_SUCCESS;
131 }
132
133 shared_file_boot_time_init(
134 )
135 {
136 long shared_text_region_size;
137 long shared_data_region_size;
138 shared_region_mapping_t new_system_region;
139 shared_region_mapping_t old_system_region;
140
141 shared_text_region_size = 0x10000000;
142 shared_data_region_size = 0x10000000;
143 shared_file_init(&shared_text_region_handle,
144 shared_text_region_size, &shared_data_region_handle,
145 shared_data_region_size, &shared_file_mapping_array);
146
147 shared_region_mapping_create(shared_text_region_handle,
148 shared_text_region_size, shared_data_region_handle,
149 shared_data_region_size, shared_file_mapping_array,
150 GLOBAL_SHARED_TEXT_SEGMENT, &new_system_region,
151 0x9000000, 0x9000000);
152 old_system_region = system_shared_region;
153 system_shared_region = new_system_region;
154 system_shared_region->flags = SHARED_REGION_SYSTEM;
155 /* consume the reference held because this is the */
156 /* system shared region */
157 if(old_system_region) {
158 shared_region_mapping_dealloc(old_system_region);
159 }
160 /* hold an extra reference because these are the system */
161 /* shared regions. */
162 shared_region_mapping_ref(system_shared_region);
163 vm_set_shared_region(current_task(), system_shared_region);
164
165 }
166
167
168 /* called at boot time, allocates two regions, each 256 megs in size */
169 /* these regions are later mapped into task spaces, allowing them to */
170 /* share the contents of the regions. shared_file_init is part of */
171 /* a shared_memory_server which not only allocates the backing maps */
172 /* but also coordinates requests for space. */
173
174
175 static kern_return_t
176 shared_file_init(
177 ipc_port_t *shared_text_region_handle,
178 vm_size_t text_region_size,
179 ipc_port_t *shared_data_region_handle,
180 vm_size_t data_region_size,
181 vm_offset_t *mapping_array)
182 {
183 vm_offset_t aligned_address;
184 shared_file_info_t *sf_head;
185 vm_offset_t table_mapping_address;
186 int data_table_size;
187 int hash_size;
188 int i;
189 kern_return_t kret;
190
191 vm_object_t buf_object;
192 vm_map_entry_t entry;
193 vm_size_t alloced;
194 vm_offset_t b;
195 vm_page_t p;
196
197 /* create text and data maps/regions */
198 if(kret = vm_region_object_create(kernel_map,
199 text_region_size,
200 shared_text_region_handle)) {
201
202 return kret;
203 }
204 if(kret = vm_region_object_create(kernel_map,
205 data_region_size,
206 shared_data_region_handle)) {
207 ipc_port_release_send(*shared_text_region_handle);
208 return kret;
209 }
210
211 data_table_size = data_region_size >> 9;
212 hash_size = data_region_size >> 14;
213 table_mapping_address = data_region_size - data_table_size;
214
215 if(shared_file_mapping_array == 0) {
216 buf_object = vm_object_allocate(data_table_size);
217
218 if(vm_map_find_space(kernel_map, &shared_file_mapping_array,
219 data_table_size, 0, &entry) != KERN_SUCCESS) {
220 panic("shared_file_init: no space");
221 }
222 *mapping_array = shared_file_mapping_array;
223 vm_map_unlock(kernel_map);
224 entry->object.vm_object = buf_object;
225 entry->offset = 0;
226
227 for (b = *mapping_array, alloced = 0;
228 alloced < (hash_size +
229 round_page(sizeof(struct sf_mapping)));
230 alloced += PAGE_SIZE, b += PAGE_SIZE) {
231 vm_object_lock(buf_object);
232 p = vm_page_alloc(buf_object, alloced);
233 if (p == VM_PAGE_NULL) {
234 panic("shared_file_init: no space");
235 }
236 p->busy = FALSE;
237 vm_object_unlock(buf_object);
238 pmap_enter(kernel_pmap, b, p->phys_addr,
239 VM_PROT_READ | VM_PROT_WRITE,
240 VM_WIMG_USE_DEFAULT, TRUE);
241 }
242
243
244 /* initialize loaded file array */
245 sf_head = (shared_file_info_t *)*mapping_array;
246 sf_head->hash = (queue_head_t *)
247 (((int)*mapping_array) +
248 sizeof(struct shared_file_info));
249 sf_head->hash_size = hash_size/sizeof(queue_head_t);
250 mutex_init(&(sf_head->lock), (ETAP_VM_MAP));
251 sf_head->hash_init = FALSE;
252
253
254 mach_make_memory_entry(kernel_map, &data_table_size,
255 *mapping_array, VM_PROT_READ, &sfma_handle,
256 NULL);
257
258 if (vm_map_wire(kernel_map, *mapping_array,
259 *mapping_array +
260 (hash_size + round_page(sizeof(struct sf_mapping))),
261 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
262 panic("shared_file_init: No memory for data table");
263 }
264
265 lsf_zone = zinit(sizeof(struct load_file_ele),
266 data_table_size -
267 (hash_size + round_page(sizeof(struct sf_mapping))),
268 0, "load_file_server");
269
270 zone_change(lsf_zone, Z_EXHAUST, TRUE);
271 zone_change(lsf_zone, Z_COLLECT, FALSE);
272 zone_change(lsf_zone, Z_EXPAND, FALSE);
273 zone_change(lsf_zone, Z_FOREIGN, TRUE);
274 } else {
275 *mapping_array = shared_file_mapping_array;
276 }
277
278 vm_map(((vm_named_entry_t)
279 (*shared_data_region_handle)->ip_kobject)->backing.map,
280 &table_mapping_address,
281 data_table_size, 0, SHARED_LIB_ALIAS,
282 sfma_handle, 0, FALSE,
283 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
284
285 }
286
287 /* A call made from user space, copyin_shared_file requires the user to */
288 /* provide the address and size of a mapped file, the full path name of */
289 /* that file and a list of offsets to be mapped into shared memory. */
290 /* By requiring that the file be pre-mapped, copyin_shared_file can */
291 /* guarantee that the file is neither deleted nor changed after the user */
292 /* begins the call. */
293
294 kern_return_t
295 copyin_shared_file(
296 vm_offset_t mapped_file,
297 vm_size_t mapped_file_size,
298 vm_offset_t *base_address,
299 int map_cnt,
300 sf_mapping_t *mappings,
301 memory_object_control_t file_control,
302 shared_region_task_mappings_t sm_info,
303 int *flags)
304 {
305 vm_object_t file_object;
306 vm_map_entry_t entry;
307 shared_file_info_t *shared_file_header;
308 load_struct_t *file_entry;
309 loaded_mapping_t *file_mapping;
310 boolean_t alternate;
311 int i;
312 kern_return_t ret;
313
314 /* wire hash entry pool only as needed, since we are the only */
315 /* users, we take a few liberties with the population of our */
316 /* zone. */
317 static int allocable_hash_pages;
318 static vm_offset_t hash_cram_address;
319
320
321 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
322
323 mutex_lock(&shared_file_header->lock);
324
325 /* If this is the first call to this routine, take the opportunity */
326 /* to initialize the hash table which will be used to look-up */
327 /* mappings based on the file object */
328
329 if(shared_file_header->hash_init == FALSE) {
330 vm_size_t hash_table_size;
331 vm_size_t hash_table_offset;
332
333 hash_table_size = (shared_file_header->hash_size)
334 * sizeof(struct queue_entry);
335 hash_table_offset = hash_table_size +
336 round_page(sizeof(struct sf_mapping));
337 for (i = 0; i < shared_file_header->hash_size; i++)
338 queue_init(&shared_file_header->hash[i]);
339
340 allocable_hash_pages =
341 ((hash_table_size<<5) - hash_table_offset)/PAGE_SIZE;
342 hash_cram_address =
343 sm_info->region_mappings + hash_table_offset;
344 shared_file_available_hash_ele = 0;
345
346 shared_file_header->hash_init = TRUE;
347 }
348
349 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
350 int cram_size;
351
352 cram_size = allocable_hash_pages > 3 ?
353 3 : allocable_hash_pages;
354 allocable_hash_pages -= cram_size;
355 cram_size = cram_size * PAGE_SIZE;
356 if (vm_map_wire(kernel_map, hash_cram_address,
357 hash_cram_address+cram_size,
358 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
359 panic("shared_file_init: No memory for data table");
360 }
361 zcram(lsf_zone, hash_cram_address, cram_size);
362 shared_file_available_hash_ele
363 += cram_size/sizeof(struct load_file_ele);
364 hash_cram_address += cram_size;
365 }
366
367
368 /* Find the entry in the map associated with the current mapping */
369 /* of the file object */
370 file_object = memory_object_control_to_vm_object(file_control);
371 if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
372 vm_object_t mapped_object;
373 if(entry->is_sub_map) {
374 mutex_unlock(&shared_file_header->lock);
375 return KERN_INVALID_ADDRESS;
376 }
377 mapped_object = entry->object.vm_object;
378 while(mapped_object->shadow != NULL) {
379 mapped_object = mapped_object->shadow;
380 }
381 /* check to see that the file object passed is indeed the */
382 /* same as the mapped object passed */
383 if(file_object != mapped_object) {
384 if(sm_info->flags & SHARED_REGION_SYSTEM) {
385 mutex_unlock(&shared_file_header->lock);
386 return KERN_PROTECTION_FAILURE;
387 } else {
388 file_object = mapped_object;
389 }
390 }
391 } else {
392 mutex_unlock(&shared_file_header->lock);
393 return KERN_INVALID_ADDRESS;
394 }
395
396 alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
397
398 if (file_entry = lsf_hash_lookup(shared_file_header->hash,
399 (void *) file_object, shared_file_header->hash_size,
400 alternate, sm_info)) {
401 /* File is loaded, check the load manifest for exact match */
402 /* we simplify by requiring that the elements be the same */
403 /* size and in the same order rather than checking for */
404 /* semantic equivalence. */
405
406 /* If the file is being loaded in the alternate */
407 /* area, one load to alternate is allowed per mapped */
408 /* object the base address is passed back to the */
409 /* caller and the mappings field is filled in. If the */
410 /* caller does not pass the precise mappings_cnt */
411 /* and the Alternate is already loaded, an error */
412 /* is returned. */
413 i = 0;
414 file_mapping = file_entry->mappings;
415 while(file_mapping != NULL) {
416 if(i>=map_cnt) {
417 mutex_unlock(&shared_file_header->lock);
418 return KERN_INVALID_ARGUMENT;
419 }
420 if(((mappings[i].mapping_offset)
421 & SHARED_DATA_REGION_MASK) !=
422 file_mapping->mapping_offset ||
423 mappings[i].size !=
424 file_mapping->size ||
425 mappings[i].file_offset !=
426 file_mapping->file_offset ||
427 mappings[i].protection !=
428 file_mapping->protection) {
429 break;
430 }
431 file_mapping = file_mapping->next;
432 i++;
433 }
434 if(i!=map_cnt) {
435 mutex_unlock(&shared_file_header->lock);
436 return KERN_INVALID_ARGUMENT;
437 }
438 *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
439 + file_entry->base_address;
440 *flags = SF_PREV_LOADED;
441 mutex_unlock(&shared_file_header->lock);
442 return KERN_SUCCESS;
443 } else {
444 /* File is not loaded, lets attempt to load it */
445 ret = lsf_load(mapped_file, mapped_file_size, base_address,
446 mappings, map_cnt,
447 (void *)file_object,
448 *flags, sm_info);
449 *flags = 0;
450 if(ret == KERN_NO_SPACE) {
451 shared_region_mapping_t regions;
452 regions = (shared_region_mapping_t)sm_info->self;
453 regions->flags |= SHARED_REGION_FULL;
454 if(regions == system_shared_region) {
455 shared_region_mapping_t new_system_shared_regions;
456 shared_file_boot_time_init();
457 /* current task must stay with its current */
458 /* regions, drop count on system_shared_region */
459 /* and put back our original set */
460 vm_get_shared_region(current_task(), &new_system_shared_regions);
461 shared_region_mapping_dealloc(new_system_shared_regions);
462 vm_set_shared_region(current_task(), regions);
463 }
464 }
465 mutex_unlock(&shared_file_header->lock);
466 return ret;
467 }
468 }
469
470 /* A hash lookup function for the list of loaded files in */
471 /* shared_memory_server space. */
472
473 static load_struct_t *
474 lsf_hash_lookup(
475 queue_head_t *hash_table,
476 void *file_object,
477 int size,
478 boolean_t alternate,
479 shared_region_task_mappings_t sm_info)
480 {
481 register queue_t bucket;
482 load_struct_t *entry;
483 shared_region_mapping_t target_region;
484 int depth;
485
486 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
487 for (entry = (load_struct_t *)queue_first(bucket);
488 !queue_end(bucket, &entry->links);
489 entry = (load_struct_t *)queue_next(&entry->links)) {
490 if (entry->file_object == (int)file_object) {
491 target_region = (shared_region_mapping_t)sm_info->self;
492 depth = target_region->depth;
493 while(target_region) {
494 if((!(sm_info->self)) ||
495 ((target_region == entry->regions_instance) &&
496 (target_region->depth >= entry->depth))) {
497 if(alternate) {
498 if (entry->base_address >=
499 sm_info->alternate_base)
500 return entry;
501 } else {
502 if (entry->base_address <
503 sm_info->alternate_base)
504 return entry;
505 }
506 }
507 if(target_region->object_chain) {
508 target_region = (shared_region_mapping_t)
509 target_region->object_chain->object_chain_region;
510 depth = target_region->object_chain->depth;
511 } else {
512 target_region = NULL;
513 }
514 }
515 }
516 }
517
518 return (load_struct_t *)0;
519 }
520
521 load_struct_t *
522 lsf_remove_regions_mappings(
523 shared_region_mapping_t region,
524 shared_region_task_mappings_t sm_info)
525 {
526 int i;
527 register queue_t bucket;
528 shared_file_info_t *shared_file_header;
529 load_struct_t *entry;
530 load_struct_t *next_entry;
531 load_struct_t *prev_entry;
532
533 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
534
535 mutex_lock(&shared_file_header->lock);
536 if(shared_file_header->hash_init == FALSE) {
537 mutex_unlock(&shared_file_header->lock);
538 return NULL;
539 }
540 for(i = 0; i<shared_file_header->hash_size; i++) {
541 bucket = &shared_file_header->hash[i];
542 for (entry = (load_struct_t *)queue_first(bucket);
543 !queue_end(bucket, &entry->links);) {
544 next_entry = (load_struct_t *)queue_next(&entry->links);
545 if(region == entry->regions_instance) {
546 lsf_unload((void *)entry->file_object,
547 entry->base_address, sm_info);
548 }
549 entry = next_entry;
550 }
551 }
552 mutex_unlock(&shared_file_header->lock);
553 }
554
555 /* Removes a map_list, (list of loaded extents) for a file from */
556 /* the loaded file hash table. */
557
558 static load_struct_t *
559 lsf_hash_delete(
560 void *file_object,
561 vm_offset_t base_offset,
562 shared_region_task_mappings_t sm_info)
563 {
564 register queue_t bucket;
565 shared_file_info_t *shared_file_header;
566 load_struct_t *entry;
567 load_struct_t *prev_entry;
568
569 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
570
571 bucket = &shared_file_header->hash
572 [load_file_hash((int)file_object, shared_file_header->hash_size)];
573
574 for (entry = (load_struct_t *)queue_first(bucket);
575 !queue_end(bucket, &entry->links);
576 entry = (load_struct_t *)queue_next(&entry->links)) {
577 if((!(sm_info->self)) || ((shared_region_mapping_t)
578 sm_info->self == entry->regions_instance)) {
579 if ((entry->file_object == (int) file_object) &&
580 (entry->base_address == base_offset)) {
581 queue_remove(bucket, entry,
582 load_struct_ptr_t, links);
583 return entry;
584 }
585 }
586 }
587
588 return (load_struct_t *)0;
589 }
590
591 /* Inserts a new map_list, (list of loaded file extents), into the */
592 /* server loaded file hash table. */
593
594 static void
595 lsf_hash_insert(
596 load_struct_t *entry,
597 shared_region_task_mappings_t sm_info)
598 {
599 shared_file_info_t *shared_file_header;
600
601 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
602 queue_enter(&shared_file_header->hash
603 [load_file_hash(entry->file_object,
604 shared_file_header->hash_size)],
605 entry, load_struct_ptr_t, links);
606 }
607
608 /* Looks up the file type requested. If already loaded and the */
609 /* file extents are an exact match, returns Success. If not */
610 /* loaded attempts to load the file extents at the given offsets */
611 /* if any extent fails to load or if the file was already loaded */
612 /* in a different configuration, lsf_load fails. */
613
614 static kern_return_t
615 lsf_load(
616 vm_offset_t mapped_file,
617 vm_size_t mapped_file_size,
618 vm_offset_t *base_address,
619 sf_mapping_t *mappings,
620 int map_cnt,
621 void *file_object,
622 int flags,
623 shared_region_task_mappings_t sm_info)
624 {
625
626 load_struct_t *entry;
627 vm_map_copy_t copy_object;
628 loaded_mapping_t *file_mapping;
629 loaded_mapping_t **tptr;
630 int i;
631 ipc_port_t local_map;
632 vm_offset_t original_alt_load_next;
633 vm_offset_t alternate_load_next;
634
635 entry = (load_struct_t *)zalloc(lsf_zone);
636 shared_file_available_hash_ele--;
637 entry->file_object = (int)file_object;
638 entry->mapping_cnt = map_cnt;
639 entry->mappings = NULL;
640 entry->links.prev = (queue_entry_t) 0;
641 entry->links.next = (queue_entry_t) 0;
642 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
643 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
644
645 lsf_hash_insert(entry, sm_info);
646 tptr = &(entry->mappings);
647
648
649 alternate_load_next = sm_info->alternate_next;
650 original_alt_load_next = alternate_load_next;
651 if (flags & ALTERNATE_LOAD_SITE) {
652 int max_loadfile_offset;
653
654 *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
655 sm_info->alternate_next;
656 max_loadfile_offset = 0;
657 for(i = 0; i<map_cnt; i++) {
658 if(((mappings[i].mapping_offset
659 & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
660 max_loadfile_offset) {
661 max_loadfile_offset =
662 (mappings[i].mapping_offset
663 & SHARED_TEXT_REGION_MASK)
664 + mappings[i].size;
665 }
666 }
667 if((alternate_load_next + round_page(max_loadfile_offset)) >=
668 (sm_info->data_size - (sm_info->data_size>>9))) {
669
670 return KERN_NO_SPACE;
671 }
672 alternate_load_next += round_page(max_loadfile_offset);
673
674 } else {
675 if (((*base_address) & SHARED_TEXT_REGION_MASK) >
676 sm_info->alternate_base) {
677 entry->base_address =
678 (*base_address) & SHARED_TEXT_REGION_MASK;
679 lsf_unload(file_object, entry->base_address, sm_info);
680 return KERN_INVALID_ARGUMENT;
681 }
682 }
683
684 entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
685
686 /* copyin mapped file data */
687 for(i = 0; i<map_cnt; i++) {
688 vm_offset_t target_address;
689 vm_offset_t region_mask;
690
691 if(mappings[i].protection & VM_PROT_COW) {
692 local_map = (ipc_port_t)sm_info->data_region;
693 region_mask = SHARED_DATA_REGION_MASK;
694 if((mappings[i].mapping_offset
695 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
696 lsf_unload(file_object,
697 entry->base_address, sm_info);
698 return KERN_INVALID_ARGUMENT;
699 }
700 } else {
701 region_mask = SHARED_TEXT_REGION_MASK;
702 local_map = (ipc_port_t)sm_info->text_region;
703 if(mappings[i].mapping_offset
704 & GLOBAL_SHARED_SEGMENT_MASK) {
705 lsf_unload(file_object,
706 entry->base_address, sm_info);
707 return KERN_INVALID_ARGUMENT;
708 }
709 }
710 if(!(mappings[i].protection & VM_PROT_ZF)
711 && ((mapped_file + mappings[i].file_offset +
712 mappings[i].size) >
713 (mapped_file + mapped_file_size))) {
714 lsf_unload(file_object, entry->base_address, sm_info);
715 return KERN_INVALID_ARGUMENT;
716 }
717 target_address = ((mappings[i].mapping_offset) & region_mask)
718 + entry->base_address;
719 if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
720 ->backing.map, &target_address,
721 mappings[i].size, FALSE)) {
722 lsf_unload(file_object, entry->base_address, sm_info);
723 return KERN_FAILURE;
724 }
725 target_address = ((mappings[i].mapping_offset) & region_mask)
726 + entry->base_address;
727 if(!(mappings[i].protection & VM_PROT_ZF)) {
728 if(vm_map_copyin(current_map(),
729 mapped_file + mappings[i].file_offset,
730 round_page(mappings[i].size), FALSE, &copy_object)) {
731 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
732 ->backing.map, target_address, mappings[i].size);
733 lsf_unload(file_object, entry->base_address, sm_info);
734 return KERN_FAILURE;
735 }
736 if(vm_map_copy_overwrite(((vm_named_entry_t)
737 local_map->ip_kobject)->backing.map, target_address,
738 copy_object, FALSE)) {
739 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
740 ->backing.map, target_address, mappings[i].size);
741 lsf_unload(file_object, entry->base_address, sm_info);
742 return KERN_FAILURE;
743 }
744 }
745 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
746 ->backing.map, target_address,
747 round_page(target_address + mappings[i].size),
748 (mappings[i].protection &
749 (VM_PROT_READ | VM_PROT_EXECUTE)),
750 TRUE);
751 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
752 ->backing.map, target_address,
753 round_page(target_address + mappings[i].size),
754 (mappings[i].protection &
755 (VM_PROT_READ | VM_PROT_EXECUTE)),
756 FALSE);
757 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
758 if(file_mapping == 0)
759 panic("lsf_load: OUT OF MAPPINGS!");
760 shared_file_available_hash_ele--;
761 file_mapping->mapping_offset = (mappings[i].mapping_offset)
762 & region_mask;
763 file_mapping->size = mappings[i].size;
764 file_mapping->file_offset = mappings[i].file_offset;
765 file_mapping->protection = mappings[i].protection;
766 file_mapping->next = NULL;
767 *tptr = file_mapping;
768 tptr = &(file_mapping->next);
769 }
770 shared_region_mapping_set_alt_next(sm_info->self, alternate_load_next);
771 return KERN_SUCCESS;
772
773 }
774
775
776 /* finds the file_object extent list in the shared memory hash table */
777 /* If one is found the associated extents in shared memory are deallocated */
778 /* and the extent list is freed */
779
780 static void
781 lsf_unload(
782 void *file_object,
783 vm_offset_t base_offset,
784 shared_region_task_mappings_t sm_info)
785 {
786 load_struct_t *entry;
787 ipc_port_t local_map;
788 loaded_mapping_t *map_ele;
789 loaded_mapping_t *back_ptr;
790
791 entry = lsf_hash_delete(file_object, base_offset, sm_info);
792 if(entry) {
793 map_ele = entry->mappings;
794 while(map_ele != NULL) {
795 if(map_ele->protection & VM_PROT_COW) {
796 local_map = (ipc_port_t)sm_info->data_region;
797 } else {
798 local_map = (ipc_port_t)sm_info->text_region;
799 }
800 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
801 ->backing.map, entry->base_address +
802 map_ele->mapping_offset,
803 map_ele->size);
804 back_ptr = map_ele;
805 map_ele = map_ele->next;
806 zfree(lsf_zone, (vm_offset_t)back_ptr);
807 shared_file_available_hash_ele++;
808 }
809 zfree(lsf_zone, (vm_offset_t)entry);
810 shared_file_available_hash_ele++;
811 }
812 }
813
814 /* integer is from 1 to 100 and represents percent full */
815 unsigned int
816 lsf_mapping_pool_gauge()
817 {
818 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
819 }