]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
734fccf2c3322b6affc42fca805a62a607d3e882
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 *
27 * File: vm/vm_shared_memory_server.c
28 * Author: Chris Youngworth
29 *
30 * Support routines for an in-kernel shared memory allocator
31 */
32
33 #include <ipc/ipc_port.h>
34 #include <kern/thread.h>
35 #include <kern/zalloc.h>
36 #include <mach/kern_return.h>
37 #include <mach/vm_inherit.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_page.h>
41
42 #include <mach/shared_memory_server.h>
43 #include <vm/vm_shared_memory_server.h>
44
45 /* forward declarations */
46 static kern_return_t
47 shared_file_init(
48 ipc_port_t *shared_text_region_handle,
49 vm_size_t text_region_size,
50 ipc_port_t *shared_data_region_handle,
51 vm_size_t data_region_size,
52 vm_offset_t *shared_file_mapping_array);
53
54 static load_struct_t *
55 lsf_hash_lookup(
56 queue_head_t *hash_table,
57 void *file_object,
58 vm_offset_t recognizableOffset,
59 int size,
60 boolean_t alternate,
61 shared_region_task_mappings_t sm_info);
62
63 static load_struct_t *
64 lsf_hash_delete(
65 void *file_object,
66 vm_offset_t base_offset,
67 shared_region_task_mappings_t sm_info);
68
69 static void
70 lsf_hash_insert(
71 load_struct_t *entry,
72 shared_region_task_mappings_t sm_info);
73
74 static kern_return_t
75 lsf_load(
76 vm_offset_t mapped_file,
77 vm_size_t mapped_file_size,
78 vm_offset_t *base_address,
79 sf_mapping_t *mappings,
80 int map_cnt,
81 void *file_object,
82 int flags,
83 shared_region_task_mappings_t sm_info);
84
85 static void
86 lsf_unload(
87 void *file_object,
88 vm_offset_t base_offset,
89 shared_region_task_mappings_t sm_info);
90
91
92 #define load_file_hash(file_object, size) \
93 ((((natural_t)file_object) & 0xffffff) % size)
94
95 /* Implementation */
96 vm_offset_t shared_file_text_region;
97 vm_offset_t shared_file_data_region;
98
99 ipc_port_t shared_text_region_handle;
100 ipc_port_t shared_data_region_handle;
101 vm_offset_t shared_file_mapping_array = 0;
102
103 shared_region_mapping_t default_environment_shared_regions = NULL;
104 static decl_mutex_data(,default_regions_list_lock_data)
105
106 #define default_regions_list_lock() \
107 mutex_lock(&default_regions_list_lock_data)
108 #define default_regions_list_lock_try() \
109 mutex_try(&default_regions_list_lock_data)
110 #define default_regions_list_unlock() \
111 mutex_unlock(&default_regions_list_lock_data)
112
113
114 ipc_port_t sfma_handle = NULL;
115 zone_t lsf_zone;
116
117 int shared_file_available_hash_ele;
118
119 /* com region support */
120 ipc_port_t com_region_handle = NULL;
121 vm_map_t com_region_map = NULL;
122 vm_size_t com_region_size = 0x7000;
123 shared_region_mapping_t com_mapping_resource = NULL;
124
125 #define GLOBAL_COM_REGION_BASE 0xFFFF8000
126
127 /* called for the non-default, private branch shared region support */
128 /* system default fields for fs_base and system supported are not */
129 /* relevant as the system default flag is not set */
130 kern_return_t
131 shared_file_create_system_region(
132 shared_region_mapping_t *shared_region)
133 {
134 ipc_port_t text_handle;
135 ipc_port_t data_handle;
136 long text_size;
137 long data_size;
138 vm_offset_t mapping_array;
139 kern_return_t kret;
140
141 text_size = 0x10000000;
142 data_size = 0x10000000;
143
144 kret = shared_file_init(&text_handle,
145 text_size, &data_handle, data_size, &mapping_array);
146 if(kret)
147 return kret;
148 kret = shared_region_mapping_create(text_handle,
149 text_size, data_handle, data_size, mapping_array,
150 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
151 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
152 if(kret)
153 return kret;
154 (*shared_region)->flags = 0;
155 if(com_mapping_resource) {
156 shared_region_mapping_ref(com_mapping_resource);
157 (*shared_region)->next = com_mapping_resource;
158 }
159
160 return KERN_SUCCESS;
161 }
162
163 /*
164 * load a new default for a specified environment into the default share
165 * regions list. If a previous default exists for the envrionment specification
166 * it is returned along with its reference. It is expected that the new
167 * sytem region structure passes a reference.
168 */
169
170 shared_region_mapping_t
171 update_default_shared_region(
172 shared_region_mapping_t new_system_region)
173 {
174 shared_region_mapping_t old_system_region;
175 unsigned int fs_base;
176 unsigned int system;
177
178 fs_base = new_system_region->fs_base;
179 system = new_system_region->system;
180 new_system_region->flags |= SHARED_REGION_SYSTEM;
181 default_regions_list_lock();
182 old_system_region = default_environment_shared_regions;
183
184 if((old_system_region != NULL) &&
185 (old_system_region->fs_base == fs_base) &&
186 (old_system_region->system == system)) {
187 new_system_region->default_env_list =
188 old_system_region->default_env_list;
189 default_environment_shared_regions = new_system_region;
190 default_regions_list_unlock();
191 old_system_region->flags |= SHARED_REGION_STALE;
192 return old_system_region;
193 }
194 if (old_system_region) {
195 while(old_system_region->default_env_list != NULL) {
196 if((old_system_region->default_env_list->fs_base == fs_base) &&
197 (old_system_region->default_env_list->system == system)) {
198 new_system_region->default_env_list =
199 old_system_region->default_env_list
200 ->default_env_list;
201 old_system_region->default_env_list =
202 new_system_region;
203 default_regions_list_unlock();
204 old_system_region->flags |= SHARED_REGION_STALE;
205 return old_system_region;
206 }
207 old_system_region = old_system_region->default_env_list;
208 }
209 }
210 /* If we get here, we are at the end of the system list and we */
211 /* did not find a pre-existing entry */
212 if(old_system_region) {
213 old_system_region->default_env_list = new_system_region;
214 } else {
215 default_environment_shared_regions = new_system_region;
216 }
217 default_regions_list_unlock();
218 return NULL;
219 }
220
221 /*
222 * lookup a system_shared_region for the environment specified. If one is
223 * found, it is returned along with a reference against the structure
224 */
225
226 shared_region_mapping_t
227 lookup_default_shared_region(
228 unsigned int fs_base,
229 unsigned int system)
230 {
231 shared_region_mapping_t system_region;
232 default_regions_list_lock();
233 system_region = default_environment_shared_regions;
234
235 while(system_region != NULL) {
236 if((system_region->fs_base == fs_base) &&
237 (system_region->system == system)) {
238 break;
239 }
240 system_region = system_region->default_env_list;
241 }
242 if(system_region)
243 shared_region_mapping_ref(system_region);
244 default_regions_list_unlock();
245 return system_region;
246 }
247
248 /*
249 * remove a system_region default if it appears in the default regions list.
250 * Drop a reference on removal.
251 */
252
253 void
254 remove_default_shared_region(
255 shared_region_mapping_t system_region)
256 {
257 shared_region_mapping_t old_system_region;
258 unsigned int fs_base;
259 unsigned int system;
260
261 default_regions_list_lock();
262 old_system_region = default_environment_shared_regions;
263
264 if(old_system_region == NULL) {
265 default_regions_list_unlock();
266 return;
267 }
268
269 if (old_system_region == system_region) {
270 default_environment_shared_regions
271 = old_system_region->default_env_list;
272 old_system_region->flags |= SHARED_REGION_STALE;
273 shared_region_mapping_dealloc(old_system_region);
274 default_regions_list_unlock();
275 return;
276 }
277
278 while(old_system_region->default_env_list != NULL) {
279 if(old_system_region->default_env_list == system_region) {
280 shared_region_mapping_t dead_region;
281 dead_region = old_system_region->default_env_list;
282 old_system_region->default_env_list =
283 old_system_region->default_env_list->default_env_list;
284 dead_region->flags |= SHARED_REGION_STALE;
285 shared_region_mapping_dealloc(dead_region);
286 default_regions_list_unlock();
287 return;
288 }
289 old_system_region = old_system_region->default_env_list;
290 }
291 default_regions_list_unlock();
292 }
293
294 void
295 remove_all_shared_regions()
296 {
297 shared_region_mapping_t system_region;
298 shared_region_mapping_t next_system_region;
299
300 default_regions_list_lock();
301 system_region = default_environment_shared_regions;
302
303 if(system_region == NULL) {
304 default_regions_list_unlock();
305 return;
306 }
307
308 while(system_region != NULL) {
309 next_system_region = system_region->default_env_list;
310 system_region->flags |= SHARED_REGION_STALE;
311 shared_region_mapping_dealloc(system_region);
312 system_region = next_system_region;
313 }
314 default_environment_shared_regions = NULL;
315 default_regions_list_unlock();
316 }
317
318 /* shared_com_boot_time_init initializes the common page shared data and */
319 /* text region. This region is semi independent of the split libs */
320 /* and so its policies have to be handled differently by the code that */
321 /* manipulates the mapping of shared region environments. However, */
322 /* the shared region delivery system supports both */
323 shared_com_boot_time_init()
324 {
325 kern_return_t kret;
326 vm_named_entry_t named_entry;
327
328 if(com_region_handle) {
329 panic("shared_com_boot_time_init: "
330 "com_region_handle already set\n");
331 }
332
333 /* create com page region */
334 if(kret = vm_region_object_create(kernel_map,
335 com_region_size,
336 &com_region_handle)) {
337 panic("shared_com_boot_time_init: "
338 "unable to create comm page\n");
339 return;
340 }
341 /* now set export the underlying region/map */
342 named_entry = (vm_named_entry_t)com_region_handle->ip_kobject;
343 com_region_map = named_entry->backing.map;
344 /* wrap the com region in its own shared file mapping structure */
345 shared_region_mapping_create(com_region_handle,
346 com_region_size, NULL, 0, 0,
347 GLOBAL_COM_REGION_BASE, &com_mapping_resource,
348 0, 0);
349
350 }
351
352 shared_file_boot_time_init(
353 unsigned int fs_base,
354 unsigned int system)
355 {
356 long shared_text_region_size;
357 long shared_data_region_size;
358 shared_region_mapping_t new_system_region;
359 shared_region_mapping_t old_default_env;
360
361 shared_text_region_size = 0x10000000;
362 shared_data_region_size = 0x10000000;
363 shared_file_init(&shared_text_region_handle,
364 shared_text_region_size, &shared_data_region_handle,
365 shared_data_region_size, &shared_file_mapping_array);
366
367 shared_region_mapping_create(shared_text_region_handle,
368 shared_text_region_size, shared_data_region_handle,
369 shared_data_region_size, shared_file_mapping_array,
370 GLOBAL_SHARED_TEXT_SEGMENT, &new_system_region,
371 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
372
373 new_system_region->fs_base = fs_base;
374 new_system_region->system = system;
375 new_system_region->flags = SHARED_REGION_SYSTEM;
376
377 /* grab an extra reference for the caller */
378 /* remember to grab before call to update */
379 shared_region_mapping_ref(new_system_region);
380 old_default_env = update_default_shared_region(new_system_region);
381 /* hold an extra reference because these are the system */
382 /* shared regions. */
383 if(old_default_env)
384 shared_region_mapping_dealloc(old_default_env);
385 if(com_mapping_resource == NULL) {
386 shared_com_boot_time_init();
387 }
388 shared_region_mapping_ref(com_mapping_resource);
389 new_system_region->next = com_mapping_resource;
390 vm_set_shared_region(current_task(), new_system_region);
391 }
392
393
394 /* called at boot time, allocates two regions, each 256 megs in size */
395 /* these regions are later mapped into task spaces, allowing them to */
396 /* share the contents of the regions. shared_file_init is part of */
397 /* a shared_memory_server which not only allocates the backing maps */
398 /* but also coordinates requests for space. */
399
400
401 static kern_return_t
402 shared_file_init(
403 ipc_port_t *shared_text_region_handle,
404 vm_size_t text_region_size,
405 ipc_port_t *shared_data_region_handle,
406 vm_size_t data_region_size,
407 vm_offset_t *mapping_array)
408 {
409 vm_offset_t aligned_address;
410 shared_file_info_t *sf_head;
411 vm_offset_t table_mapping_address;
412 int data_table_size;
413 int hash_size;
414 int i;
415 kern_return_t kret;
416
417 vm_object_t buf_object;
418 vm_map_entry_t entry;
419 vm_size_t alloced;
420 vm_offset_t b;
421 vm_page_t p;
422
423 /* create text and data maps/regions */
424 if(kret = vm_region_object_create(kernel_map,
425 text_region_size,
426 shared_text_region_handle)) {
427
428 return kret;
429 }
430 if(kret = vm_region_object_create(kernel_map,
431 data_region_size,
432 shared_data_region_handle)) {
433 ipc_port_release_send(*shared_text_region_handle);
434 return kret;
435 }
436
437 data_table_size = data_region_size >> 9;
438 hash_size = data_region_size >> 14;
439 table_mapping_address = data_region_size - data_table_size;
440
441 if(shared_file_mapping_array == 0) {
442 buf_object = vm_object_allocate(data_table_size);
443
444 if(vm_map_find_space(kernel_map, &shared_file_mapping_array,
445 data_table_size, 0, &entry) != KERN_SUCCESS) {
446 panic("shared_file_init: no space");
447 }
448 *mapping_array = shared_file_mapping_array;
449 vm_map_unlock(kernel_map);
450 entry->object.vm_object = buf_object;
451 entry->offset = 0;
452
453 for (b = *mapping_array, alloced = 0;
454 alloced < (hash_size +
455 round_page_32(sizeof(struct sf_mapping)));
456 alloced += PAGE_SIZE, b += PAGE_SIZE) {
457 vm_object_lock(buf_object);
458 p = vm_page_alloc(buf_object, alloced);
459 if (p == VM_PAGE_NULL) {
460 panic("shared_file_init: no space");
461 }
462 p->busy = FALSE;
463 vm_object_unlock(buf_object);
464 pmap_enter(kernel_pmap, b, p->phys_page,
465 VM_PROT_READ | VM_PROT_WRITE,
466 ((unsigned int)(p->object->wimg_bits))
467 & VM_WIMG_MASK,
468 TRUE);
469 }
470
471
472 /* initialize loaded file array */
473 sf_head = (shared_file_info_t *)*mapping_array;
474 sf_head->hash = (queue_head_t *)
475 (((int)*mapping_array) +
476 sizeof(struct shared_file_info));
477 sf_head->hash_size = hash_size/sizeof(queue_head_t);
478 mutex_init(&(sf_head->lock), (ETAP_VM_MAP));
479 sf_head->hash_init = FALSE;
480
481
482 mach_make_memory_entry(kernel_map, &data_table_size,
483 *mapping_array, VM_PROT_READ, &sfma_handle,
484 NULL);
485
486 if (vm_map_wire(kernel_map, *mapping_array,
487 *mapping_array +
488 (hash_size + round_page_32(sizeof(struct sf_mapping))),
489 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
490 panic("shared_file_init: No memory for data table");
491 }
492
493 lsf_zone = zinit(sizeof(struct load_file_ele),
494 data_table_size -
495 (hash_size + round_page_32(sizeof(struct sf_mapping))),
496 0, "load_file_server");
497
498 zone_change(lsf_zone, Z_EXHAUST, TRUE);
499 zone_change(lsf_zone, Z_COLLECT, FALSE);
500 zone_change(lsf_zone, Z_EXPAND, FALSE);
501 zone_change(lsf_zone, Z_FOREIGN, TRUE);
502
503 /* initialize the global default environment lock */
504 mutex_init(&default_regions_list_lock_data, ETAP_NO_TRACE);
505
506 } else {
507 *mapping_array = shared_file_mapping_array;
508 }
509
510 vm_map(((vm_named_entry_t)
511 (*shared_data_region_handle)->ip_kobject)->backing.map,
512 &table_mapping_address,
513 data_table_size, 0, SHARED_LIB_ALIAS,
514 sfma_handle, 0, FALSE,
515 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
516
517 }
518
519 /* A call made from user space, copyin_shared_file requires the user to */
520 /* provide the address and size of a mapped file, the full path name of */
521 /* that file and a list of offsets to be mapped into shared memory. */
522 /* By requiring that the file be pre-mapped, copyin_shared_file can */
523 /* guarantee that the file is neither deleted nor changed after the user */
524 /* begins the call. */
525
526 kern_return_t
527 copyin_shared_file(
528 vm_offset_t mapped_file,
529 vm_size_t mapped_file_size,
530 vm_offset_t *base_address,
531 int map_cnt,
532 sf_mapping_t *mappings,
533 memory_object_control_t file_control,
534 shared_region_task_mappings_t sm_info,
535 int *flags)
536 {
537 vm_object_t file_object;
538 vm_map_entry_t entry;
539 shared_file_info_t *shared_file_header;
540 load_struct_t *file_entry;
541 loaded_mapping_t *file_mapping;
542 boolean_t alternate;
543 int i;
544 kern_return_t ret;
545
546 /* wire hash entry pool only as needed, since we are the only */
547 /* users, we take a few liberties with the population of our */
548 /* zone. */
549 static int allocable_hash_pages;
550 static vm_offset_t hash_cram_address;
551
552
553 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
554
555 mutex_lock(&shared_file_header->lock);
556
557 /* If this is the first call to this routine, take the opportunity */
558 /* to initialize the hash table which will be used to look-up */
559 /* mappings based on the file object */
560
561 if(shared_file_header->hash_init == FALSE) {
562 vm_size_t hash_table_size;
563 vm_size_t hash_table_offset;
564
565 hash_table_size = (shared_file_header->hash_size)
566 * sizeof(struct queue_entry);
567 hash_table_offset = hash_table_size +
568 round_page_32(sizeof(struct sf_mapping));
569 for (i = 0; i < shared_file_header->hash_size; i++)
570 queue_init(&shared_file_header->hash[i]);
571
572 allocable_hash_pages =
573 ((hash_table_size<<5) - hash_table_offset)/PAGE_SIZE;
574 hash_cram_address =
575 sm_info->region_mappings + hash_table_offset;
576 shared_file_available_hash_ele = 0;
577
578 shared_file_header->hash_init = TRUE;
579 }
580
581 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
582 int cram_size;
583
584 cram_size = allocable_hash_pages > 3 ?
585 3 : allocable_hash_pages;
586 allocable_hash_pages -= cram_size;
587 cram_size = cram_size * PAGE_SIZE;
588 if (vm_map_wire(kernel_map, hash_cram_address,
589 hash_cram_address+cram_size,
590 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
591 panic("shared_file_init: No memory for data table");
592 }
593 zcram(lsf_zone, hash_cram_address, cram_size);
594 shared_file_available_hash_ele
595 += cram_size/sizeof(struct load_file_ele);
596 hash_cram_address += cram_size;
597 }
598
599
600 /* Find the entry in the map associated with the current mapping */
601 /* of the file object */
602 file_object = memory_object_control_to_vm_object(file_control);
603 if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
604 vm_object_t mapped_object;
605 if(entry->is_sub_map) {
606 mutex_unlock(&shared_file_header->lock);
607 return KERN_INVALID_ADDRESS;
608 }
609 mapped_object = entry->object.vm_object;
610 while(mapped_object->shadow != NULL) {
611 mapped_object = mapped_object->shadow;
612 }
613 /* check to see that the file object passed is indeed the */
614 /* same as the mapped object passed */
615 if(file_object != mapped_object) {
616 if(sm_info->flags & SHARED_REGION_SYSTEM) {
617 mutex_unlock(&shared_file_header->lock);
618 return KERN_PROTECTION_FAILURE;
619 } else {
620 file_object = mapped_object;
621 }
622 }
623 } else {
624 mutex_unlock(&shared_file_header->lock);
625 return KERN_INVALID_ADDRESS;
626 }
627
628 alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
629
630 if (file_entry = lsf_hash_lookup(shared_file_header->hash,
631 (void *) file_object, mappings[0].file_offset, shared_file_header->hash_size,
632 alternate, sm_info)) {
633 /* File is loaded, check the load manifest for exact match */
634 /* we simplify by requiring that the elements be the same */
635 /* size and in the same order rather than checking for */
636 /* semantic equivalence. */
637
638 /* If the file is being loaded in the alternate */
639 /* area, one load to alternate is allowed per mapped */
640 /* object the base address is passed back to the */
641 /* caller and the mappings field is filled in. If the */
642 /* caller does not pass the precise mappings_cnt */
643 /* and the Alternate is already loaded, an error */
644 /* is returned. */
645 i = 0;
646 file_mapping = file_entry->mappings;
647 while(file_mapping != NULL) {
648 if(i>=map_cnt) {
649 mutex_unlock(&shared_file_header->lock);
650 return KERN_INVALID_ARGUMENT;
651 }
652 if(((mappings[i].mapping_offset)
653 & SHARED_DATA_REGION_MASK) !=
654 file_mapping->mapping_offset ||
655 mappings[i].size !=
656 file_mapping->size ||
657 mappings[i].file_offset !=
658 file_mapping->file_offset ||
659 mappings[i].protection !=
660 file_mapping->protection) {
661 break;
662 }
663 file_mapping = file_mapping->next;
664 i++;
665 }
666 if(i!=map_cnt) {
667 mutex_unlock(&shared_file_header->lock);
668 return KERN_INVALID_ARGUMENT;
669 }
670 *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
671 + file_entry->base_address;
672 *flags = SF_PREV_LOADED;
673 mutex_unlock(&shared_file_header->lock);
674 return KERN_SUCCESS;
675 } else {
676 /* File is not loaded, lets attempt to load it */
677 ret = lsf_load(mapped_file, mapped_file_size, base_address,
678 mappings, map_cnt,
679 (void *)file_object,
680 *flags, sm_info);
681 *flags = 0;
682 if(ret == KERN_NO_SPACE) {
683 shared_region_mapping_t regions;
684 shared_region_mapping_t system_region;
685 regions = (shared_region_mapping_t)sm_info->self;
686 regions->flags |= SHARED_REGION_FULL;
687 system_region = lookup_default_shared_region(
688 regions->fs_base, regions->system);
689 if(system_region == regions) {
690 shared_region_mapping_t new_system_shared_regions;
691 shared_file_boot_time_init(
692 regions->fs_base, regions->system);
693 /* current task must stay with its current */
694 /* regions, drop count on system_shared_region */
695 /* and put back our original set */
696 vm_get_shared_region(current_task(),
697 &new_system_shared_regions);
698 shared_region_mapping_dealloc(
699 new_system_shared_regions);
700 vm_set_shared_region(current_task(), regions);
701 }
702 if(system_region != NULL) {
703 shared_region_mapping_dealloc(system_region);
704 }
705 }
706 mutex_unlock(&shared_file_header->lock);
707 return ret;
708 }
709 }
710
711 /* A hash lookup function for the list of loaded files in */
712 /* shared_memory_server space. */
713
714 static load_struct_t *
715 lsf_hash_lookup(
716 queue_head_t *hash_table,
717 void *file_object,
718 vm_offset_t recognizableOffset,
719 int size,
720 boolean_t alternate,
721 shared_region_task_mappings_t sm_info)
722 {
723 register queue_t bucket;
724 load_struct_t *entry;
725 shared_region_mapping_t target_region;
726 int depth;
727
728 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
729 for (entry = (load_struct_t *)queue_first(bucket);
730 !queue_end(bucket, &entry->links);
731 entry = (load_struct_t *)queue_next(&entry->links)) {
732
733 if ((entry->file_object == (int) file_object) &&
734 (entry->file_offset != recognizableOffset)) {
735 }
736 if ((entry->file_object == (int)file_object) &&
737 (entry->file_offset == recognizableOffset)) {
738 target_region = (shared_region_mapping_t)sm_info->self;
739 depth = target_region->depth;
740 while(target_region) {
741 if((!(sm_info->self)) ||
742 ((target_region == entry->regions_instance) &&
743 (target_region->depth >= entry->depth))) {
744 if(alternate) {
745 if (entry->base_address >=
746 sm_info->alternate_base)
747 return entry;
748 } else {
749 if (entry->base_address <
750 sm_info->alternate_base)
751 return entry;
752 }
753 }
754 if(target_region->object_chain) {
755 target_region = (shared_region_mapping_t)
756 target_region->object_chain->object_chain_region;
757 depth = target_region->object_chain->depth;
758 } else {
759 target_region = NULL;
760 }
761 }
762 }
763 }
764
765 return (load_struct_t *)0;
766 }
767
768 load_struct_t *
769 lsf_remove_regions_mappings(
770 shared_region_mapping_t region,
771 shared_region_task_mappings_t sm_info)
772 {
773 int i;
774 register queue_t bucket;
775 shared_file_info_t *shared_file_header;
776 load_struct_t *entry;
777 load_struct_t *next_entry;
778 load_struct_t *prev_entry;
779
780 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
781
782 mutex_lock(&shared_file_header->lock);
783 if(shared_file_header->hash_init == FALSE) {
784 mutex_unlock(&shared_file_header->lock);
785 return NULL;
786 }
787 for(i = 0; i<shared_file_header->hash_size; i++) {
788 bucket = &shared_file_header->hash[i];
789 for (entry = (load_struct_t *)queue_first(bucket);
790 !queue_end(bucket, &entry->links);) {
791 next_entry = (load_struct_t *)queue_next(&entry->links);
792 if(region == entry->regions_instance) {
793 lsf_unload((void *)entry->file_object,
794 entry->base_address, sm_info);
795 }
796 entry = next_entry;
797 }
798 }
799 mutex_unlock(&shared_file_header->lock);
800 }
801
802 /* Removes a map_list, (list of loaded extents) for a file from */
803 /* the loaded file hash table. */
804
805 static load_struct_t *
806 lsf_hash_delete(
807 void *file_object,
808 vm_offset_t base_offset,
809 shared_region_task_mappings_t sm_info)
810 {
811 register queue_t bucket;
812 shared_file_info_t *shared_file_header;
813 load_struct_t *entry;
814 load_struct_t *prev_entry;
815
816 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
817
818 bucket = &shared_file_header->hash
819 [load_file_hash((int)file_object, shared_file_header->hash_size)];
820
821 for (entry = (load_struct_t *)queue_first(bucket);
822 !queue_end(bucket, &entry->links);
823 entry = (load_struct_t *)queue_next(&entry->links)) {
824 if((!(sm_info->self)) || ((shared_region_mapping_t)
825 sm_info->self == entry->regions_instance)) {
826 if ((entry->file_object == (int) file_object) &&
827 (entry->base_address == base_offset)) {
828 queue_remove(bucket, entry,
829 load_struct_ptr_t, links);
830 return entry;
831 }
832 }
833 }
834
835 return (load_struct_t *)0;
836 }
837
838 /* Inserts a new map_list, (list of loaded file extents), into the */
839 /* server loaded file hash table. */
840
841 static void
842 lsf_hash_insert(
843 load_struct_t *entry,
844 shared_region_task_mappings_t sm_info)
845 {
846 shared_file_info_t *shared_file_header;
847
848 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
849 queue_enter(&shared_file_header->hash
850 [load_file_hash(entry->file_object,
851 shared_file_header->hash_size)],
852 entry, load_struct_ptr_t, links);
853 }
854
855 /* Looks up the file type requested. If already loaded and the */
856 /* file extents are an exact match, returns Success. If not */
857 /* loaded attempts to load the file extents at the given offsets */
858 /* if any extent fails to load or if the file was already loaded */
859 /* in a different configuration, lsf_load fails. */
860
861 static kern_return_t
862 lsf_load(
863 vm_offset_t mapped_file,
864 vm_size_t mapped_file_size,
865 vm_offset_t *base_address,
866 sf_mapping_t *mappings,
867 int map_cnt,
868 void *file_object,
869 int flags,
870 shared_region_task_mappings_t sm_info)
871 {
872
873 load_struct_t *entry;
874 vm_map_copy_t copy_object;
875 loaded_mapping_t *file_mapping;
876 loaded_mapping_t **tptr;
877 int i;
878 ipc_port_t local_map;
879 vm_offset_t original_alt_load_next;
880 vm_offset_t alternate_load_next;
881
882 entry = (load_struct_t *)zalloc(lsf_zone);
883 shared_file_available_hash_ele--;
884 entry->file_object = (int)file_object;
885 entry->mapping_cnt = map_cnt;
886 entry->mappings = NULL;
887 entry->links.prev = (queue_entry_t) 0;
888 entry->links.next = (queue_entry_t) 0;
889 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
890 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
891 entry->file_offset = mappings[0].file_offset;
892
893 lsf_hash_insert(entry, sm_info);
894 tptr = &(entry->mappings);
895
896
897 alternate_load_next = sm_info->alternate_next;
898 original_alt_load_next = alternate_load_next;
899 if (flags & ALTERNATE_LOAD_SITE) {
900 int max_loadfile_offset;
901
902 *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
903 sm_info->alternate_next;
904 max_loadfile_offset = 0;
905 for(i = 0; i<map_cnt; i++) {
906 if(((mappings[i].mapping_offset
907 & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
908 max_loadfile_offset) {
909 max_loadfile_offset =
910 (mappings[i].mapping_offset
911 & SHARED_TEXT_REGION_MASK)
912 + mappings[i].size;
913 }
914 }
915 if((alternate_load_next + round_page_32(max_loadfile_offset)) >=
916 (sm_info->data_size - (sm_info->data_size>>9))) {
917
918 return KERN_NO_SPACE;
919 }
920 alternate_load_next += round_page_32(max_loadfile_offset);
921
922 } else {
923 if (((*base_address) & SHARED_TEXT_REGION_MASK) >
924 sm_info->alternate_base) {
925 entry->base_address =
926 (*base_address) & SHARED_TEXT_REGION_MASK;
927 lsf_unload(file_object, entry->base_address, sm_info);
928 return KERN_INVALID_ARGUMENT;
929 }
930 }
931
932 entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
933
934 // Sanity check the mappings -- make sure we don't stray across the
935 // alternate boundary. If any bit of a library that we're not trying
936 // to load in the alternate load space strays across that boundary,
937 // return KERN_INVALID_ARGUMENT immediately so that the caller can
938 // try to load it in the alternate shared area. We do this to avoid
939 // a nasty case: if a library tries to load so that it crosses the
940 // boundary, it'll occupy a bit of the alternate load area without
941 // the kernel being aware. When loads into the alternate load area
942 // at the first free address are tried, the load will fail.
943 // Thus, a single library straddling the boundary causes all sliding
944 // libraries to fail to load. This check will avoid such a case.
945
946 if (!(flags & ALTERNATE_LOAD_SITE)) {
947 for (i = 0; i<map_cnt;i++) {
948 vm_offset_t region_mask;
949 vm_address_t region_start;
950 vm_address_t region_end;
951
952 if ((mappings[i].protection & VM_PROT_WRITE) == 0) {
953 // mapping offsets are relative to start of shared segments.
954 region_mask = SHARED_TEXT_REGION_MASK;
955 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
956 region_end = (mappings[i].size + region_start);
957 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
958 // No library is permitted to load so any bit of it is in the
959 // shared alternate space. If they want it loaded, they can put
960 // it in the alternate space explicitly.
961 printf("Library trying to load across alternate shared region boundary -- denied!\n");
962 return KERN_INVALID_ARGUMENT;
963 }
964 } else {
965 // rw section?
966 region_mask = SHARED_DATA_REGION_MASK;
967 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
968 region_end = (mappings[i].size + region_start);
969 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
970 printf("Library trying to load across alternate shared region boundary-- denied!\n");
971 return KERN_INVALID_ARGUMENT;
972 }
973 } // write?
974 } // for
975 } // if not alternate load site.
976
977 /* copyin mapped file data */
978 for(i = 0; i<map_cnt; i++) {
979 vm_offset_t target_address;
980 vm_offset_t region_mask;
981
982 if(mappings[i].protection & VM_PROT_COW) {
983 local_map = (ipc_port_t)sm_info->data_region;
984 region_mask = SHARED_DATA_REGION_MASK;
985 if((mappings[i].mapping_offset
986 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
987 lsf_unload(file_object,
988 entry->base_address, sm_info);
989 return KERN_INVALID_ARGUMENT;
990 }
991 } else {
992 region_mask = SHARED_TEXT_REGION_MASK;
993 local_map = (ipc_port_t)sm_info->text_region;
994 if(mappings[i].mapping_offset
995 & GLOBAL_SHARED_SEGMENT_MASK) {
996 lsf_unload(file_object,
997 entry->base_address, sm_info);
998 return KERN_INVALID_ARGUMENT;
999 }
1000 }
1001 if(!(mappings[i].protection & VM_PROT_ZF)
1002 && ((mapped_file + mappings[i].file_offset +
1003 mappings[i].size) >
1004 (mapped_file + mapped_file_size))) {
1005 lsf_unload(file_object, entry->base_address, sm_info);
1006 return KERN_INVALID_ARGUMENT;
1007 }
1008 target_address = ((mappings[i].mapping_offset) & region_mask)
1009 + entry->base_address;
1010 if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
1011 ->backing.map, &target_address,
1012 mappings[i].size, FALSE)) {
1013 lsf_unload(file_object, entry->base_address, sm_info);
1014 return KERN_FAILURE;
1015 }
1016 target_address = ((mappings[i].mapping_offset) & region_mask)
1017 + entry->base_address;
1018 if(!(mappings[i].protection & VM_PROT_ZF)) {
1019 if(vm_map_copyin(current_map(),
1020 mapped_file + mappings[i].file_offset,
1021 round_page_32(mappings[i].size), FALSE, &copy_object)) {
1022 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
1023 ->backing.map, target_address, mappings[i].size);
1024 lsf_unload(file_object, entry->base_address, sm_info);
1025 return KERN_FAILURE;
1026 }
1027 if(vm_map_copy_overwrite(((vm_named_entry_t)
1028 local_map->ip_kobject)->backing.map, target_address,
1029 copy_object, FALSE)) {
1030 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
1031 ->backing.map, target_address, mappings[i].size);
1032 lsf_unload(file_object, entry->base_address, sm_info);
1033 return KERN_FAILURE;
1034 }
1035 }
1036 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
1037 ->backing.map, target_address,
1038 round_page_32(target_address + mappings[i].size),
1039 (mappings[i].protection &
1040 (VM_PROT_READ | VM_PROT_EXECUTE)),
1041 TRUE);
1042 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
1043 ->backing.map, target_address,
1044 round_page_32(target_address + mappings[i].size),
1045 (mappings[i].protection &
1046 (VM_PROT_READ | VM_PROT_EXECUTE)),
1047 FALSE);
1048 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
1049 if(file_mapping == 0)
1050 panic("lsf_load: OUT OF MAPPINGS!");
1051 shared_file_available_hash_ele--;
1052 file_mapping->mapping_offset = (mappings[i].mapping_offset)
1053 & region_mask;
1054 file_mapping->size = mappings[i].size;
1055 file_mapping->file_offset = mappings[i].file_offset;
1056 file_mapping->protection = mappings[i].protection;
1057 file_mapping->next = NULL;
1058 *tptr = file_mapping;
1059 tptr = &(file_mapping->next);
1060 }
1061 shared_region_mapping_set_alt_next(sm_info->self, alternate_load_next);
1062 return KERN_SUCCESS;
1063
1064 }
1065
1066
1067 /* finds the file_object extent list in the shared memory hash table */
1068 /* If one is found the associated extents in shared memory are deallocated */
1069 /* and the extent list is freed */
1070
1071 static void
1072 lsf_unload(
1073 void *file_object,
1074 vm_offset_t base_offset,
1075 shared_region_task_mappings_t sm_info)
1076 {
1077 load_struct_t *entry;
1078 ipc_port_t local_map;
1079 loaded_mapping_t *map_ele;
1080 loaded_mapping_t *back_ptr;
1081
1082 entry = lsf_hash_delete(file_object, base_offset, sm_info);
1083 if(entry) {
1084 map_ele = entry->mappings;
1085 while(map_ele != NULL) {
1086 if(map_ele->protection & VM_PROT_COW) {
1087 local_map = (ipc_port_t)sm_info->data_region;
1088 } else {
1089 local_map = (ipc_port_t)sm_info->text_region;
1090 }
1091 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
1092 ->backing.map, entry->base_address +
1093 map_ele->mapping_offset,
1094 map_ele->size);
1095 back_ptr = map_ele;
1096 map_ele = map_ele->next;
1097 zfree(lsf_zone, (vm_offset_t)back_ptr);
1098 shared_file_available_hash_ele++;
1099 }
1100 zfree(lsf_zone, (vm_offset_t)entry);
1101 shared_file_available_hash_ele++;
1102 }
1103 }
1104
1105 /* integer is from 1 to 100 and represents percent full */
1106 unsigned int
1107 lsf_mapping_pool_gauge()
1108 {
1109 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
1110 }