]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
bc3ef97decbf7a7733c1e591c4ac3e5527565009
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 *
27 * File: vm/vm_shared_memory_server.c
28 * Author: Chris Youngworth
29 *
30 * Support routines for an in-kernel shared memory allocator
31 */
32
33 #include <ipc/ipc_port.h>
34 #include <kern/thread.h>
35 #include <kern/zalloc.h>
36 #include <mach/kern_return.h>
37 #include <mach/vm_inherit.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_page.h>
41
42 #include <mach/shared_memory_server.h>
43 #include <vm/vm_shared_memory_server.h>
44
45 /* forward declarations */
46 static kern_return_t
47 shared_file_init(
48 ipc_port_t *shared_text_region_handle,
49 vm_size_t text_region_size,
50 ipc_port_t *shared_data_region_handle,
51 vm_size_t data_region_size,
52 vm_offset_t *shared_file_mapping_array);
53
54 static load_struct_t *
55 lsf_hash_lookup(
56 queue_head_t *hash_table,
57 void *file_object,
58 int size,
59 boolean_t alternate,
60 shared_region_task_mappings_t sm_info);
61
62 static load_struct_t *
63 lsf_hash_delete(
64 void *file_object,
65 vm_offset_t base_offset,
66 shared_region_task_mappings_t sm_info);
67
68 static void
69 lsf_hash_insert(
70 load_struct_t *entry,
71 shared_region_task_mappings_t sm_info);
72
73 static kern_return_t
74 lsf_load(
75 vm_offset_t mapped_file,
76 vm_size_t mapped_file_size,
77 vm_offset_t *base_address,
78 sf_mapping_t *mappings,
79 int map_cnt,
80 void *file_object,
81 int flags,
82 shared_region_task_mappings_t sm_info);
83
84 static void
85 lsf_unload(
86 void *file_object,
87 vm_offset_t base_offset,
88 shared_region_task_mappings_t sm_info);
89
90
91 #define load_file_hash(file_object, size) \
92 ((((natural_t)file_object) & 0xffffff) % size)
93
94 /* Implementation */
95 vm_offset_t shared_file_text_region;
96 vm_offset_t shared_file_data_region;
97
98 ipc_port_t shared_text_region_handle;
99 ipc_port_t shared_data_region_handle;
100 vm_offset_t shared_file_mapping_array = 0;
101 shared_region_mapping_t system_shared_region = NULL;
102
103 ipc_port_t sfma_handle = NULL;
104 zone_t lsf_zone;
105
106 int shared_file_available_hash_ele;
107
108 kern_return_t
109 shared_file_create_system_region(
110 shared_region_mapping_t *shared_region)
111 {
112 ipc_port_t text_handle;
113 ipc_port_t data_handle;
114 long text_size;
115 long data_size;
116 vm_offset_t mapping_array;
117 kern_return_t kret;
118
119 text_size = 0x10000000;
120 data_size = 0x10000000;
121
122 kret = shared_file_init(&text_handle,
123 text_size, &data_handle, data_size, &mapping_array);
124 if(kret)
125 return kret;
126 kret = shared_region_mapping_create(text_handle,
127 text_size, data_handle, data_size, mapping_array,
128 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
129 0x9000000, 0x9000000);
130 if(kret)
131 return kret;
132 (*shared_region)->flags = 0;
133 return KERN_SUCCESS;
134 }
135
136 shared_file_boot_time_init(
137 )
138 {
139 long shared_text_region_size;
140 long shared_data_region_size;
141 shared_region_mapping_t new_system_region;
142 shared_region_mapping_t old_system_region;
143
144 shared_text_region_size = 0x10000000;
145 shared_data_region_size = 0x10000000;
146 shared_file_init(&shared_text_region_handle,
147 shared_text_region_size, &shared_data_region_handle,
148 shared_data_region_size, &shared_file_mapping_array);
149
150 shared_region_mapping_create(shared_text_region_handle,
151 shared_text_region_size, shared_data_region_handle,
152 shared_data_region_size, shared_file_mapping_array,
153 GLOBAL_SHARED_TEXT_SEGMENT, &new_system_region,
154 0x9000000, 0x9000000);
155 old_system_region = system_shared_region;
156 system_shared_region = new_system_region;
157 system_shared_region->flags = SHARED_REGION_SYSTEM;
158 /* consume the reference held because this is the */
159 /* system shared region */
160 if(old_system_region) {
161 shared_region_mapping_dealloc(old_system_region);
162 }
163 /* hold an extra reference because these are the system */
164 /* shared regions. */
165 shared_region_mapping_ref(system_shared_region);
166 vm_set_shared_region(current_task(), system_shared_region);
167
168 }
169
170
171 /* called at boot time, allocates two regions, each 256 megs in size */
172 /* these regions are later mapped into task spaces, allowing them to */
173 /* share the contents of the regions. shared_file_init is part of */
174 /* a shared_memory_server which not only allocates the backing maps */
175 /* but also coordinates requests for space. */
176
177
178 static kern_return_t
179 shared_file_init(
180 ipc_port_t *shared_text_region_handle,
181 vm_size_t text_region_size,
182 ipc_port_t *shared_data_region_handle,
183 vm_size_t data_region_size,
184 vm_offset_t *mapping_array)
185 {
186 vm_offset_t aligned_address;
187 shared_file_info_t *sf_head;
188 vm_offset_t table_mapping_address;
189 int data_table_size;
190 int hash_size;
191 int i;
192 kern_return_t kret;
193
194 vm_object_t buf_object;
195 vm_map_entry_t entry;
196 vm_size_t alloced;
197 vm_offset_t b;
198 vm_page_t p;
199
200 /* create text and data maps/regions */
201 if(kret = vm_region_object_create(kernel_map,
202 text_region_size,
203 shared_text_region_handle)) {
204
205 return kret;
206 }
207 if(kret = vm_region_object_create(kernel_map,
208 data_region_size,
209 shared_data_region_handle)) {
210 ipc_port_release_send(*shared_text_region_handle);
211 return kret;
212 }
213
214 data_table_size = data_region_size >> 9;
215 hash_size = data_region_size >> 14;
216 table_mapping_address = data_region_size - data_table_size;
217
218 if(shared_file_mapping_array == 0) {
219 buf_object = vm_object_allocate(data_table_size);
220
221 if(vm_map_find_space(kernel_map, &shared_file_mapping_array,
222 data_table_size, 0, &entry) != KERN_SUCCESS) {
223 panic("shared_file_init: no space");
224 }
225 *mapping_array = shared_file_mapping_array;
226 vm_map_unlock(kernel_map);
227 entry->object.vm_object = buf_object;
228 entry->offset = 0;
229
230 for (b = *mapping_array, alloced = 0;
231 alloced < (hash_size +
232 round_page(sizeof(struct sf_mapping)));
233 alloced += PAGE_SIZE, b += PAGE_SIZE) {
234 vm_object_lock(buf_object);
235 p = vm_page_alloc(buf_object, alloced);
236 if (p == VM_PAGE_NULL) {
237 panic("shared_file_init: no space");
238 }
239 p->busy = FALSE;
240 vm_object_unlock(buf_object);
241 pmap_enter(kernel_pmap, b, p->phys_addr,
242 VM_PROT_READ | VM_PROT_WRITE,
243 VM_WIMG_USE_DEFAULT, TRUE);
244 }
245
246
247 /* initialize loaded file array */
248 sf_head = (shared_file_info_t *)*mapping_array;
249 sf_head->hash = (queue_head_t *)
250 (((int)*mapping_array) +
251 sizeof(struct shared_file_info));
252 sf_head->hash_size = hash_size/sizeof(queue_head_t);
253 mutex_init(&(sf_head->lock), (ETAP_VM_MAP));
254 sf_head->hash_init = FALSE;
255
256
257 mach_make_memory_entry(kernel_map, &data_table_size,
258 *mapping_array, VM_PROT_READ, &sfma_handle,
259 NULL);
260
261 if (vm_map_wire(kernel_map, *mapping_array,
262 *mapping_array +
263 (hash_size + round_page(sizeof(struct sf_mapping))),
264 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
265 panic("shared_file_init: No memory for data table");
266 }
267
268 lsf_zone = zinit(sizeof(struct load_file_ele),
269 data_table_size -
270 (hash_size + round_page(sizeof(struct sf_mapping))),
271 0, "load_file_server");
272
273 zone_change(lsf_zone, Z_EXHAUST, TRUE);
274 zone_change(lsf_zone, Z_COLLECT, FALSE);
275 zone_change(lsf_zone, Z_EXPAND, FALSE);
276 zone_change(lsf_zone, Z_FOREIGN, TRUE);
277 } else {
278 *mapping_array = shared_file_mapping_array;
279 }
280
281 vm_map(((vm_named_entry_t)
282 (*shared_data_region_handle)->ip_kobject)->backing.map,
283 &table_mapping_address,
284 data_table_size, 0, SHARED_LIB_ALIAS,
285 sfma_handle, 0, FALSE,
286 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
287
288 }
289
290 /* A call made from user space, copyin_shared_file requires the user to */
291 /* provide the address and size of a mapped file, the full path name of */
292 /* that file and a list of offsets to be mapped into shared memory. */
293 /* By requiring that the file be pre-mapped, copyin_shared_file can */
294 /* guarantee that the file is neither deleted nor changed after the user */
295 /* begins the call. */
296
297 kern_return_t
298 copyin_shared_file(
299 vm_offset_t mapped_file,
300 vm_size_t mapped_file_size,
301 vm_offset_t *base_address,
302 int map_cnt,
303 sf_mapping_t *mappings,
304 memory_object_control_t file_control,
305 shared_region_task_mappings_t sm_info,
306 int *flags)
307 {
308 vm_object_t file_object;
309 vm_map_entry_t entry;
310 shared_file_info_t *shared_file_header;
311 load_struct_t *file_entry;
312 loaded_mapping_t *file_mapping;
313 boolean_t alternate;
314 int i;
315 kern_return_t ret;
316
317 /* wire hash entry pool only as needed, since we are the only */
318 /* users, we take a few liberties with the population of our */
319 /* zone. */
320 static int allocable_hash_pages;
321 static vm_offset_t hash_cram_address;
322
323
324 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
325
326 mutex_lock(&shared_file_header->lock);
327
328 /* If this is the first call to this routine, take the opportunity */
329 /* to initialize the hash table which will be used to look-up */
330 /* mappings based on the file object */
331
332 if(shared_file_header->hash_init == FALSE) {
333 vm_size_t hash_table_size;
334 vm_size_t hash_table_offset;
335
336 hash_table_size = (shared_file_header->hash_size)
337 * sizeof(struct queue_entry);
338 hash_table_offset = hash_table_size +
339 round_page(sizeof(struct sf_mapping));
340 for (i = 0; i < shared_file_header->hash_size; i++)
341 queue_init(&shared_file_header->hash[i]);
342
343 allocable_hash_pages =
344 ((hash_table_size<<5) - hash_table_offset)/PAGE_SIZE;
345 hash_cram_address =
346 sm_info->region_mappings + hash_table_offset;
347 shared_file_available_hash_ele = 0;
348
349 shared_file_header->hash_init = TRUE;
350 }
351
352 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
353 int cram_size;
354
355 cram_size = allocable_hash_pages > 3 ?
356 3 : allocable_hash_pages;
357 allocable_hash_pages -= cram_size;
358 cram_size = cram_size * PAGE_SIZE;
359 if (vm_map_wire(kernel_map, hash_cram_address,
360 hash_cram_address+cram_size,
361 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
362 panic("shared_file_init: No memory for data table");
363 }
364 zcram(lsf_zone, hash_cram_address, cram_size);
365 shared_file_available_hash_ele
366 += cram_size/sizeof(struct load_file_ele);
367 hash_cram_address += cram_size;
368 }
369
370
371 /* Find the entry in the map associated with the current mapping */
372 /* of the file object */
373 file_object = memory_object_control_to_vm_object(file_control);
374 if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
375 vm_object_t mapped_object;
376 if(entry->is_sub_map) {
377 mutex_unlock(&shared_file_header->lock);
378 return KERN_INVALID_ADDRESS;
379 }
380 mapped_object = entry->object.vm_object;
381 while(mapped_object->shadow != NULL) {
382 mapped_object = mapped_object->shadow;
383 }
384 /* check to see that the file object passed is indeed the */
385 /* same as the mapped object passed */
386 if(file_object != mapped_object) {
387 if(sm_info->flags & SHARED_REGION_SYSTEM) {
388 mutex_unlock(&shared_file_header->lock);
389 return KERN_PROTECTION_FAILURE;
390 } else {
391 file_object = mapped_object;
392 }
393 }
394 } else {
395 mutex_unlock(&shared_file_header->lock);
396 return KERN_INVALID_ADDRESS;
397 }
398
399 alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
400
401 if (file_entry = lsf_hash_lookup(shared_file_header->hash,
402 (void *) file_object, shared_file_header->hash_size,
403 alternate, sm_info)) {
404 /* File is loaded, check the load manifest for exact match */
405 /* we simplify by requiring that the elements be the same */
406 /* size and in the same order rather than checking for */
407 /* semantic equivalence. */
408
409 /* If the file is being loaded in the alternate */
410 /* area, one load to alternate is allowed per mapped */
411 /* object the base address is passed back to the */
412 /* caller and the mappings field is filled in. If the */
413 /* caller does not pass the precise mappings_cnt */
414 /* and the Alternate is already loaded, an error */
415 /* is returned. */
416 i = 0;
417 file_mapping = file_entry->mappings;
418 while(file_mapping != NULL) {
419 if(i>=map_cnt) {
420 mutex_unlock(&shared_file_header->lock);
421 return KERN_INVALID_ARGUMENT;
422 }
423 if(((mappings[i].mapping_offset)
424 & SHARED_DATA_REGION_MASK) !=
425 file_mapping->mapping_offset ||
426 mappings[i].size !=
427 file_mapping->size ||
428 mappings[i].file_offset !=
429 file_mapping->file_offset ||
430 mappings[i].protection !=
431 file_mapping->protection) {
432 break;
433 }
434 file_mapping = file_mapping->next;
435 i++;
436 }
437 if(i!=map_cnt) {
438 mutex_unlock(&shared_file_header->lock);
439 return KERN_INVALID_ARGUMENT;
440 }
441 *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
442 + file_entry->base_address;
443 *flags = SF_PREV_LOADED;
444 mutex_unlock(&shared_file_header->lock);
445 return KERN_SUCCESS;
446 } else {
447 /* File is not loaded, lets attempt to load it */
448 ret = lsf_load(mapped_file, mapped_file_size, base_address,
449 mappings, map_cnt,
450 (void *)file_object,
451 *flags, sm_info);
452 *flags = 0;
453 if(ret == KERN_NO_SPACE) {
454 shared_region_mapping_t regions;
455 regions = (shared_region_mapping_t)sm_info->self;
456 regions->flags |= SHARED_REGION_FULL;
457 if(regions == system_shared_region) {
458 shared_region_mapping_t new_system_shared_regions;
459 shared_file_boot_time_init();
460 /* current task must stay with its current */
461 /* regions, drop count on system_shared_region */
462 /* and put back our original set */
463 vm_get_shared_region(current_task(), &new_system_shared_regions);
464 shared_region_mapping_dealloc(new_system_shared_regions);
465 vm_set_shared_region(current_task(), regions);
466 }
467 }
468 mutex_unlock(&shared_file_header->lock);
469 return ret;
470 }
471 }
472
473 /* A hash lookup function for the list of loaded files in */
474 /* shared_memory_server space. */
475
476 static load_struct_t *
477 lsf_hash_lookup(
478 queue_head_t *hash_table,
479 void *file_object,
480 int size,
481 boolean_t alternate,
482 shared_region_task_mappings_t sm_info)
483 {
484 register queue_t bucket;
485 load_struct_t *entry;
486 shared_region_mapping_t target_region;
487 int depth;
488
489 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
490 for (entry = (load_struct_t *)queue_first(bucket);
491 !queue_end(bucket, &entry->links);
492 entry = (load_struct_t *)queue_next(&entry->links)) {
493 if (entry->file_object == (int)file_object) {
494 target_region = (shared_region_mapping_t)sm_info->self;
495 depth = target_region->depth;
496 while(target_region) {
497 if((!(sm_info->self)) ||
498 ((target_region == entry->regions_instance) &&
499 (target_region->depth >= entry->depth))) {
500 if(alternate) {
501 if (entry->base_address >=
502 sm_info->alternate_base)
503 return entry;
504 } else {
505 if (entry->base_address <
506 sm_info->alternate_base)
507 return entry;
508 }
509 }
510 if(target_region->object_chain) {
511 target_region = (shared_region_mapping_t)
512 target_region->object_chain->object_chain_region;
513 depth = target_region->object_chain->depth;
514 } else {
515 target_region = NULL;
516 }
517 }
518 }
519 }
520
521 return (load_struct_t *)0;
522 }
523
524 load_struct_t *
525 lsf_remove_regions_mappings(
526 shared_region_mapping_t region,
527 shared_region_task_mappings_t sm_info)
528 {
529 int i;
530 register queue_t bucket;
531 shared_file_info_t *shared_file_header;
532 load_struct_t *entry;
533 load_struct_t *next_entry;
534 load_struct_t *prev_entry;
535
536 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
537
538 mutex_lock(&shared_file_header->lock);
539 if(shared_file_header->hash_init == FALSE) {
540 mutex_unlock(&shared_file_header->lock);
541 return NULL;
542 }
543 for(i = 0; i<shared_file_header->hash_size; i++) {
544 bucket = &shared_file_header->hash[i];
545 for (entry = (load_struct_t *)queue_first(bucket);
546 !queue_end(bucket, &entry->links);) {
547 next_entry = (load_struct_t *)queue_next(&entry->links);
548 if(region == entry->regions_instance) {
549 lsf_unload((void *)entry->file_object,
550 entry->base_address, sm_info);
551 }
552 entry = next_entry;
553 }
554 }
555 mutex_unlock(&shared_file_header->lock);
556 }
557
558 /* Removes a map_list, (list of loaded extents) for a file from */
559 /* the loaded file hash table. */
560
561 static load_struct_t *
562 lsf_hash_delete(
563 void *file_object,
564 vm_offset_t base_offset,
565 shared_region_task_mappings_t sm_info)
566 {
567 register queue_t bucket;
568 shared_file_info_t *shared_file_header;
569 load_struct_t *entry;
570 load_struct_t *prev_entry;
571
572 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
573
574 bucket = &shared_file_header->hash
575 [load_file_hash((int)file_object, shared_file_header->hash_size)];
576
577 for (entry = (load_struct_t *)queue_first(bucket);
578 !queue_end(bucket, &entry->links);
579 entry = (load_struct_t *)queue_next(&entry->links)) {
580 if((!(sm_info->self)) || ((shared_region_mapping_t)
581 sm_info->self == entry->regions_instance)) {
582 if ((entry->file_object == (int) file_object) &&
583 (entry->base_address == base_offset)) {
584 queue_remove(bucket, entry,
585 load_struct_ptr_t, links);
586 return entry;
587 }
588 }
589 }
590
591 return (load_struct_t *)0;
592 }
593
594 /* Inserts a new map_list, (list of loaded file extents), into the */
595 /* server loaded file hash table. */
596
597 static void
598 lsf_hash_insert(
599 load_struct_t *entry,
600 shared_region_task_mappings_t sm_info)
601 {
602 shared_file_info_t *shared_file_header;
603
604 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
605 queue_enter(&shared_file_header->hash
606 [load_file_hash(entry->file_object,
607 shared_file_header->hash_size)],
608 entry, load_struct_ptr_t, links);
609 }
610
611 /* Looks up the file type requested. If already loaded and the */
612 /* file extents are an exact match, returns Success. If not */
613 /* loaded attempts to load the file extents at the given offsets */
614 /* if any extent fails to load or if the file was already loaded */
615 /* in a different configuration, lsf_load fails. */
616
617 static kern_return_t
618 lsf_load(
619 vm_offset_t mapped_file,
620 vm_size_t mapped_file_size,
621 vm_offset_t *base_address,
622 sf_mapping_t *mappings,
623 int map_cnt,
624 void *file_object,
625 int flags,
626 shared_region_task_mappings_t sm_info)
627 {
628
629 load_struct_t *entry;
630 vm_map_copy_t copy_object;
631 loaded_mapping_t *file_mapping;
632 loaded_mapping_t **tptr;
633 int i;
634 ipc_port_t local_map;
635 vm_offset_t original_alt_load_next;
636 vm_offset_t alternate_load_next;
637
638 entry = (load_struct_t *)zalloc(lsf_zone);
639 shared_file_available_hash_ele--;
640 entry->file_object = (int)file_object;
641 entry->mapping_cnt = map_cnt;
642 entry->mappings = NULL;
643 entry->links.prev = (queue_entry_t) 0;
644 entry->links.next = (queue_entry_t) 0;
645 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
646 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
647
648 lsf_hash_insert(entry, sm_info);
649 tptr = &(entry->mappings);
650
651
652 alternate_load_next = sm_info->alternate_next;
653 original_alt_load_next = alternate_load_next;
654 if (flags & ALTERNATE_LOAD_SITE) {
655 int max_loadfile_offset;
656
657 *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
658 sm_info->alternate_next;
659 max_loadfile_offset = 0;
660 for(i = 0; i<map_cnt; i++) {
661 if(((mappings[i].mapping_offset
662 & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
663 max_loadfile_offset) {
664 max_loadfile_offset =
665 (mappings[i].mapping_offset
666 & SHARED_TEXT_REGION_MASK)
667 + mappings[i].size;
668 }
669 }
670 if((alternate_load_next + round_page(max_loadfile_offset)) >=
671 (sm_info->data_size - (sm_info->data_size>>9))) {
672
673 return KERN_NO_SPACE;
674 }
675 alternate_load_next += round_page(max_loadfile_offset);
676
677 } else {
678 if (((*base_address) & SHARED_TEXT_REGION_MASK) >
679 sm_info->alternate_base) {
680 entry->base_address =
681 (*base_address) & SHARED_TEXT_REGION_MASK;
682 lsf_unload(file_object, entry->base_address, sm_info);
683 return KERN_INVALID_ARGUMENT;
684 }
685 }
686
687 entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
688
689 /* copyin mapped file data */
690 for(i = 0; i<map_cnt; i++) {
691 vm_offset_t target_address;
692 vm_offset_t region_mask;
693
694 if(mappings[i].protection & VM_PROT_COW) {
695 local_map = (ipc_port_t)sm_info->data_region;
696 region_mask = SHARED_DATA_REGION_MASK;
697 if((mappings[i].mapping_offset
698 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
699 lsf_unload(file_object,
700 entry->base_address, sm_info);
701 return KERN_INVALID_ARGUMENT;
702 }
703 } else {
704 region_mask = SHARED_TEXT_REGION_MASK;
705 local_map = (ipc_port_t)sm_info->text_region;
706 if(mappings[i].mapping_offset
707 & GLOBAL_SHARED_SEGMENT_MASK) {
708 lsf_unload(file_object,
709 entry->base_address, sm_info);
710 return KERN_INVALID_ARGUMENT;
711 }
712 }
713 if(!(mappings[i].protection & VM_PROT_ZF)
714 && ((mapped_file + mappings[i].file_offset +
715 mappings[i].size) >
716 (mapped_file + mapped_file_size))) {
717 lsf_unload(file_object, entry->base_address, sm_info);
718 return KERN_INVALID_ARGUMENT;
719 }
720 target_address = ((mappings[i].mapping_offset) & region_mask)
721 + entry->base_address;
722 if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
723 ->backing.map, &target_address,
724 mappings[i].size, FALSE)) {
725 lsf_unload(file_object, entry->base_address, sm_info);
726 return KERN_FAILURE;
727 }
728 target_address = ((mappings[i].mapping_offset) & region_mask)
729 + entry->base_address;
730 if(!(mappings[i].protection & VM_PROT_ZF)) {
731 if(vm_map_copyin(current_map(),
732 mapped_file + mappings[i].file_offset,
733 round_page(mappings[i].size), FALSE, &copy_object)) {
734 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
735 ->backing.map, target_address, mappings[i].size);
736 lsf_unload(file_object, entry->base_address, sm_info);
737 return KERN_FAILURE;
738 }
739 if(vm_map_copy_overwrite(((vm_named_entry_t)
740 local_map->ip_kobject)->backing.map, target_address,
741 copy_object, FALSE)) {
742 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
743 ->backing.map, target_address, mappings[i].size);
744 lsf_unload(file_object, entry->base_address, sm_info);
745 return KERN_FAILURE;
746 }
747 }
748 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
749 ->backing.map, target_address,
750 round_page(target_address + mappings[i].size),
751 (mappings[i].protection &
752 (VM_PROT_READ | VM_PROT_EXECUTE)),
753 TRUE);
754 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
755 ->backing.map, target_address,
756 round_page(target_address + mappings[i].size),
757 (mappings[i].protection &
758 (VM_PROT_READ | VM_PROT_EXECUTE)),
759 FALSE);
760 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
761 if(file_mapping == 0)
762 panic("lsf_load: OUT OF MAPPINGS!");
763 shared_file_available_hash_ele--;
764 file_mapping->mapping_offset = (mappings[i].mapping_offset)
765 & region_mask;
766 file_mapping->size = mappings[i].size;
767 file_mapping->file_offset = mappings[i].file_offset;
768 file_mapping->protection = mappings[i].protection;
769 file_mapping->next = NULL;
770 *tptr = file_mapping;
771 tptr = &(file_mapping->next);
772 }
773 shared_region_mapping_set_alt_next(sm_info->self, alternate_load_next);
774 return KERN_SUCCESS;
775
776 }
777
778
779 /* finds the file_object extent list in the shared memory hash table */
780 /* If one is found the associated extents in shared memory are deallocated */
781 /* and the extent list is freed */
782
783 static void
784 lsf_unload(
785 void *file_object,
786 vm_offset_t base_offset,
787 shared_region_task_mappings_t sm_info)
788 {
789 load_struct_t *entry;
790 ipc_port_t local_map;
791 loaded_mapping_t *map_ele;
792 loaded_mapping_t *back_ptr;
793
794 entry = lsf_hash_delete(file_object, base_offset, sm_info);
795 if(entry) {
796 map_ele = entry->mappings;
797 while(map_ele != NULL) {
798 if(map_ele->protection & VM_PROT_COW) {
799 local_map = (ipc_port_t)sm_info->data_region;
800 } else {
801 local_map = (ipc_port_t)sm_info->text_region;
802 }
803 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
804 ->backing.map, entry->base_address +
805 map_ele->mapping_offset,
806 map_ele->size);
807 back_ptr = map_ele;
808 map_ele = map_ele->next;
809 zfree(lsf_zone, (vm_offset_t)back_ptr);
810 shared_file_available_hash_ele++;
811 }
812 zfree(lsf_zone, (vm_offset_t)entry);
813 shared_file_available_hash_ele++;
814 }
815 }
816
817 /* integer is from 1 to 100 and represents percent full */
818 unsigned int
819 lsf_mapping_pool_gauge()
820 {
821 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
822 }