]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
xnu-517.11.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 *
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
26 *
27 * Support routines for an in-kernel shared memory allocator
28 */
29
30 #include <ipc/ipc_port.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc.h>
33 #include <mach/kern_return.h>
34 #include <mach/vm_inherit.h>
35 #include <machine/cpu_capabilities.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <vm/vm_page.h>
39
40 #include <mach/shared_memory_server.h>
41 #include <vm/vm_shared_memory_server.h>
42
43 /* forward declarations */
44 static kern_return_t
45 shared_file_init(
46 ipc_port_t *shared_text_region_handle,
47 vm_size_t text_region_size,
48 ipc_port_t *shared_data_region_handle,
49 vm_size_t data_region_size,
50 vm_offset_t *shared_file_mapping_array);
51
52 static load_struct_t *
53 lsf_hash_lookup(
54 queue_head_t *hash_table,
55 void *file_object,
56 vm_offset_t recognizableOffset,
57 int size,
58 boolean_t alternate,
59 shared_region_task_mappings_t sm_info);
60
61 static load_struct_t *
62 lsf_hash_delete(
63 void *file_object,
64 vm_offset_t base_offset,
65 shared_region_task_mappings_t sm_info);
66
67 static void
68 lsf_hash_insert(
69 load_struct_t *entry,
70 shared_region_task_mappings_t sm_info);
71
72 static kern_return_t
73 lsf_load(
74 vm_offset_t mapped_file,
75 vm_size_t mapped_file_size,
76 vm_offset_t *base_address,
77 sf_mapping_t *mappings,
78 int map_cnt,
79 void *file_object,
80 int flags,
81 shared_region_task_mappings_t sm_info);
82
83 static void
84 lsf_unload(
85 void *file_object,
86 vm_offset_t base_offset,
87 shared_region_task_mappings_t sm_info);
88
89
90 #define load_file_hash(file_object, size) \
91 ((((natural_t)file_object) & 0xffffff) % size)
92
93 /* Implementation */
94 vm_offset_t shared_file_text_region;
95 vm_offset_t shared_file_data_region;
96
97 ipc_port_t shared_text_region_handle;
98 ipc_port_t shared_data_region_handle;
99 vm_offset_t shared_file_mapping_array = 0;
100
101 shared_region_mapping_t default_environment_shared_regions = NULL;
102 static decl_mutex_data(,default_regions_list_lock_data)
103
104 #define default_regions_list_lock() \
105 mutex_lock(&default_regions_list_lock_data)
106 #define default_regions_list_lock_try() \
107 mutex_try(&default_regions_list_lock_data)
108 #define default_regions_list_unlock() \
109 mutex_unlock(&default_regions_list_lock_data)
110
111
112 ipc_port_t sfma_handle = NULL;
113 zone_t lsf_zone;
114
115 int shared_file_available_hash_ele;
116
117 /* com region support */
118 ipc_port_t com_region_handle = NULL;
119 vm_map_t com_region_map = NULL;
120 vm_size_t com_region_size = _COMM_PAGE_AREA_LENGTH;
121 shared_region_mapping_t com_mapping_resource = NULL;
122
123 #define GLOBAL_COM_REGION_BASE _COMM_PAGE_BASE_ADDRESS
124
125 /* called for the non-default, private branch shared region support */
126 /* system default fields for fs_base and system supported are not */
127 /* relevant as the system default flag is not set */
128 kern_return_t
129 shared_file_create_system_region(
130 shared_region_mapping_t *shared_region)
131 {
132 ipc_port_t text_handle;
133 ipc_port_t data_handle;
134 long text_size;
135 long data_size;
136 vm_offset_t mapping_array;
137 kern_return_t kret;
138
139 text_size = 0x10000000;
140 data_size = 0x10000000;
141
142 kret = shared_file_init(&text_handle,
143 text_size, &data_handle, data_size, &mapping_array);
144 if(kret)
145 return kret;
146 kret = shared_region_mapping_create(text_handle,
147 text_size, data_handle, data_size, mapping_array,
148 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
149 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
150 if(kret)
151 return kret;
152 (*shared_region)->flags = 0;
153 if(com_mapping_resource) {
154 shared_region_mapping_ref(com_mapping_resource);
155 (*shared_region)->next = com_mapping_resource;
156 }
157
158 return KERN_SUCCESS;
159 }
160
161 /*
162 * load a new default for a specified environment into the default share
163 * regions list. If a previous default exists for the envrionment specification
164 * it is returned along with its reference. It is expected that the new
165 * sytem region structure passes a reference.
166 */
167
168 shared_region_mapping_t
169 update_default_shared_region(
170 shared_region_mapping_t new_system_region)
171 {
172 shared_region_mapping_t old_system_region;
173 unsigned int fs_base;
174 unsigned int system;
175
176 fs_base = new_system_region->fs_base;
177 system = new_system_region->system;
178 new_system_region->flags |= SHARED_REGION_SYSTEM;
179 default_regions_list_lock();
180 old_system_region = default_environment_shared_regions;
181
182 if((old_system_region != NULL) &&
183 (old_system_region->fs_base == fs_base) &&
184 (old_system_region->system == system)) {
185 new_system_region->default_env_list =
186 old_system_region->default_env_list;
187 default_environment_shared_regions = new_system_region;
188 default_regions_list_unlock();
189 old_system_region->flags |= SHARED_REGION_STALE;
190 return old_system_region;
191 }
192 if (old_system_region) {
193 while(old_system_region->default_env_list != NULL) {
194 if((old_system_region->default_env_list->fs_base == fs_base) &&
195 (old_system_region->default_env_list->system == system)) {
196 new_system_region->default_env_list =
197 old_system_region->default_env_list
198 ->default_env_list;
199 old_system_region->default_env_list =
200 new_system_region;
201 default_regions_list_unlock();
202 old_system_region->flags |= SHARED_REGION_STALE;
203 return old_system_region;
204 }
205 old_system_region = old_system_region->default_env_list;
206 }
207 }
208 /* If we get here, we are at the end of the system list and we */
209 /* did not find a pre-existing entry */
210 if(old_system_region) {
211 old_system_region->default_env_list = new_system_region;
212 } else {
213 default_environment_shared_regions = new_system_region;
214 }
215 default_regions_list_unlock();
216 return NULL;
217 }
218
219 /*
220 * lookup a system_shared_region for the environment specified. If one is
221 * found, it is returned along with a reference against the structure
222 */
223
224 shared_region_mapping_t
225 lookup_default_shared_region(
226 unsigned int fs_base,
227 unsigned int system)
228 {
229 shared_region_mapping_t system_region;
230 default_regions_list_lock();
231 system_region = default_environment_shared_regions;
232
233 while(system_region != NULL) {
234 if((system_region->fs_base == fs_base) &&
235 (system_region->system == system)) {
236 break;
237 }
238 system_region = system_region->default_env_list;
239 }
240 if(system_region)
241 shared_region_mapping_ref(system_region);
242 default_regions_list_unlock();
243 return system_region;
244 }
245
246 /*
247 * remove a system_region default if it appears in the default regions list.
248 * Drop a reference on removal.
249 */
250
251 __private_extern__ void
252 remove_default_shared_region_lock(
253 shared_region_mapping_t system_region,
254 int need_lock)
255 {
256 shared_region_mapping_t old_system_region;
257 unsigned int fs_base;
258 unsigned int system;
259
260 default_regions_list_lock();
261 old_system_region = default_environment_shared_regions;
262
263 if(old_system_region == NULL) {
264 default_regions_list_unlock();
265 return;
266 }
267
268 if (old_system_region == system_region) {
269 default_environment_shared_regions
270 = old_system_region->default_env_list;
271 old_system_region->flags |= SHARED_REGION_STALE;
272 shared_region_mapping_dealloc_lock(old_system_region,
273 need_lock);
274 default_regions_list_unlock();
275 return;
276 }
277
278 while(old_system_region->default_env_list != NULL) {
279 if(old_system_region->default_env_list == system_region) {
280 shared_region_mapping_t dead_region;
281 dead_region = old_system_region->default_env_list;
282 old_system_region->default_env_list =
283 old_system_region->default_env_list->default_env_list;
284 dead_region->flags |= SHARED_REGION_STALE;
285 shared_region_mapping_dealloc_lock(dead_region,
286 need_lock);
287 default_regions_list_unlock();
288 return;
289 }
290 old_system_region = old_system_region->default_env_list;
291 }
292 default_regions_list_unlock();
293 }
294
295 /*
296 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
297 * the only caller. Remove this stub function and the corresponding symbol
298 * export for Merlot.
299 */
300 void
301 remove_default_shared_region(
302 shared_region_mapping_t system_region)
303 {
304 remove_default_shared_region_lock(system_region, 1);
305 }
306
307 void
308 remove_all_shared_regions()
309 {
310 shared_region_mapping_t system_region;
311 shared_region_mapping_t next_system_region;
312
313 default_regions_list_lock();
314 system_region = default_environment_shared_regions;
315
316 if(system_region == NULL) {
317 default_regions_list_unlock();
318 return;
319 }
320
321 while(system_region != NULL) {
322 next_system_region = system_region->default_env_list;
323 system_region->flags |= SHARED_REGION_STALE;
324 shared_region_mapping_dealloc(system_region);
325 system_region = next_system_region;
326 }
327 default_environment_shared_regions = NULL;
328 default_regions_list_unlock();
329 }
330
331 /* shared_com_boot_time_init initializes the common page shared data and */
332 /* text region. This region is semi independent of the split libs */
333 /* and so its policies have to be handled differently by the code that */
334 /* manipulates the mapping of shared region environments. However, */
335 /* the shared region delivery system supports both */
336 shared_com_boot_time_init()
337 {
338 kern_return_t kret;
339 vm_named_entry_t named_entry;
340
341 if(com_region_handle) {
342 panic("shared_com_boot_time_init: "
343 "com_region_handle already set\n");
344 }
345
346 /* create com page region */
347 if(kret = vm_region_object_create(kernel_map,
348 com_region_size,
349 &com_region_handle)) {
350 panic("shared_com_boot_time_init: "
351 "unable to create comm page\n");
352 return;
353 }
354 /* now set export the underlying region/map */
355 named_entry = (vm_named_entry_t)com_region_handle->ip_kobject;
356 com_region_map = named_entry->backing.map;
357 /* wrap the com region in its own shared file mapping structure */
358 shared_region_mapping_create(com_region_handle,
359 com_region_size, NULL, 0, 0,
360 GLOBAL_COM_REGION_BASE, &com_mapping_resource,
361 0, 0);
362
363 }
364
365 shared_file_boot_time_init(
366 unsigned int fs_base,
367 unsigned int system)
368 {
369 long shared_text_region_size;
370 long shared_data_region_size;
371 shared_region_mapping_t new_system_region;
372 shared_region_mapping_t old_default_env;
373
374 shared_text_region_size = 0x10000000;
375 shared_data_region_size = 0x10000000;
376 shared_file_init(&shared_text_region_handle,
377 shared_text_region_size, &shared_data_region_handle,
378 shared_data_region_size, &shared_file_mapping_array);
379
380 shared_region_mapping_create(shared_text_region_handle,
381 shared_text_region_size, shared_data_region_handle,
382 shared_data_region_size, shared_file_mapping_array,
383 GLOBAL_SHARED_TEXT_SEGMENT, &new_system_region,
384 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
385
386 new_system_region->fs_base = fs_base;
387 new_system_region->system = system;
388 new_system_region->flags = SHARED_REGION_SYSTEM;
389
390 /* grab an extra reference for the caller */
391 /* remember to grab before call to update */
392 shared_region_mapping_ref(new_system_region);
393 old_default_env = update_default_shared_region(new_system_region);
394 /* hold an extra reference because these are the system */
395 /* shared regions. */
396 if(old_default_env)
397 shared_region_mapping_dealloc(old_default_env);
398 if(com_mapping_resource == NULL) {
399 shared_com_boot_time_init();
400 }
401 shared_region_mapping_ref(com_mapping_resource);
402 new_system_region->next = com_mapping_resource;
403 vm_set_shared_region(current_task(), new_system_region);
404 }
405
406
407 /* called at boot time, allocates two regions, each 256 megs in size */
408 /* these regions are later mapped into task spaces, allowing them to */
409 /* share the contents of the regions. shared_file_init is part of */
410 /* a shared_memory_server which not only allocates the backing maps */
411 /* but also coordinates requests for space. */
412
413
414 static kern_return_t
415 shared_file_init(
416 ipc_port_t *shared_text_region_handle,
417 vm_size_t text_region_size,
418 ipc_port_t *shared_data_region_handle,
419 vm_size_t data_region_size,
420 vm_offset_t *mapping_array)
421 {
422 vm_offset_t aligned_address;
423 shared_file_info_t *sf_head;
424 vm_offset_t table_mapping_address;
425 int data_table_size;
426 int hash_size;
427 int i;
428 kern_return_t kret;
429
430 vm_object_t buf_object;
431 vm_map_entry_t entry;
432 vm_size_t alloced;
433 vm_offset_t b;
434 vm_page_t p;
435
436 /* create text and data maps/regions */
437 if(kret = vm_region_object_create(kernel_map,
438 text_region_size,
439 shared_text_region_handle)) {
440
441 return kret;
442 }
443 if(kret = vm_region_object_create(kernel_map,
444 data_region_size,
445 shared_data_region_handle)) {
446 ipc_port_release_send(*shared_text_region_handle);
447 return kret;
448 }
449
450 data_table_size = data_region_size >> 9;
451 hash_size = data_region_size >> 14;
452 table_mapping_address = data_region_size - data_table_size;
453
454 if(shared_file_mapping_array == 0) {
455 buf_object = vm_object_allocate(data_table_size);
456
457 if(vm_map_find_space(kernel_map, &shared_file_mapping_array,
458 data_table_size, 0, &entry) != KERN_SUCCESS) {
459 panic("shared_file_init: no space");
460 }
461 *mapping_array = shared_file_mapping_array;
462 vm_map_unlock(kernel_map);
463 entry->object.vm_object = buf_object;
464 entry->offset = 0;
465
466 for (b = *mapping_array, alloced = 0;
467 alloced < (hash_size +
468 round_page_32(sizeof(struct sf_mapping)));
469 alloced += PAGE_SIZE, b += PAGE_SIZE) {
470 vm_object_lock(buf_object);
471 p = vm_page_alloc(buf_object, alloced);
472 if (p == VM_PAGE_NULL) {
473 panic("shared_file_init: no space");
474 }
475 p->busy = FALSE;
476 vm_object_unlock(buf_object);
477 pmap_enter(kernel_pmap, b, p->phys_page,
478 VM_PROT_READ | VM_PROT_WRITE,
479 ((unsigned int)(p->object->wimg_bits))
480 & VM_WIMG_MASK,
481 TRUE);
482 }
483
484
485 /* initialize loaded file array */
486 sf_head = (shared_file_info_t *)*mapping_array;
487 sf_head->hash = (queue_head_t *)
488 (((int)*mapping_array) +
489 sizeof(struct shared_file_info));
490 sf_head->hash_size = hash_size/sizeof(queue_head_t);
491 mutex_init(&(sf_head->lock), (ETAP_VM_MAP));
492 sf_head->hash_init = FALSE;
493
494
495 mach_make_memory_entry(kernel_map, &data_table_size,
496 *mapping_array, VM_PROT_READ, &sfma_handle,
497 NULL);
498
499 if (vm_map_wire(kernel_map, *mapping_array,
500 *mapping_array +
501 (hash_size + round_page_32(sizeof(struct sf_mapping))),
502 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
503 panic("shared_file_init: No memory for data table");
504 }
505
506 lsf_zone = zinit(sizeof(struct load_file_ele),
507 data_table_size -
508 (hash_size + round_page_32(sizeof(struct sf_mapping))),
509 0, "load_file_server");
510
511 zone_change(lsf_zone, Z_EXHAUST, TRUE);
512 zone_change(lsf_zone, Z_COLLECT, FALSE);
513 zone_change(lsf_zone, Z_EXPAND, FALSE);
514 zone_change(lsf_zone, Z_FOREIGN, TRUE);
515
516 /* initialize the global default environment lock */
517 mutex_init(&default_regions_list_lock_data, ETAP_NO_TRACE);
518
519 } else {
520 *mapping_array = shared_file_mapping_array;
521 }
522
523 vm_map(((vm_named_entry_t)
524 (*shared_data_region_handle)->ip_kobject)->backing.map,
525 &table_mapping_address,
526 data_table_size, 0, SHARED_LIB_ALIAS,
527 sfma_handle, 0, FALSE,
528 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
529
530 }
531
532 /* A call made from user space, copyin_shared_file requires the user to */
533 /* provide the address and size of a mapped file, the full path name of */
534 /* that file and a list of offsets to be mapped into shared memory. */
535 /* By requiring that the file be pre-mapped, copyin_shared_file can */
536 /* guarantee that the file is neither deleted nor changed after the user */
537 /* begins the call. */
538
539 kern_return_t
540 copyin_shared_file(
541 vm_offset_t mapped_file,
542 vm_size_t mapped_file_size,
543 vm_offset_t *base_address,
544 int map_cnt,
545 sf_mapping_t *mappings,
546 memory_object_control_t file_control,
547 shared_region_task_mappings_t sm_info,
548 int *flags)
549 {
550 vm_object_t file_object;
551 vm_map_entry_t entry;
552 shared_file_info_t *shared_file_header;
553 load_struct_t *file_entry;
554 loaded_mapping_t *file_mapping;
555 boolean_t alternate;
556 int i;
557 kern_return_t ret;
558
559 /* wire hash entry pool only as needed, since we are the only */
560 /* users, we take a few liberties with the population of our */
561 /* zone. */
562 static int allocable_hash_pages;
563 static vm_offset_t hash_cram_address;
564
565
566 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
567
568 mutex_lock(&shared_file_header->lock);
569
570 /* If this is the first call to this routine, take the opportunity */
571 /* to initialize the hash table which will be used to look-up */
572 /* mappings based on the file object */
573
574 if(shared_file_header->hash_init == FALSE) {
575 vm_size_t hash_table_size;
576 vm_size_t hash_table_offset;
577
578 hash_table_size = (shared_file_header->hash_size)
579 * sizeof(struct queue_entry);
580 hash_table_offset = hash_table_size +
581 round_page_32(sizeof(struct sf_mapping));
582 for (i = 0; i < shared_file_header->hash_size; i++)
583 queue_init(&shared_file_header->hash[i]);
584
585 allocable_hash_pages =
586 ((hash_table_size<<5) - hash_table_offset)/PAGE_SIZE;
587 hash_cram_address =
588 sm_info->region_mappings + hash_table_offset;
589 shared_file_available_hash_ele = 0;
590
591 shared_file_header->hash_init = TRUE;
592 }
593
594 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
595 int cram_size;
596
597 cram_size = allocable_hash_pages > 3 ?
598 3 : allocable_hash_pages;
599 allocable_hash_pages -= cram_size;
600 cram_size = cram_size * PAGE_SIZE;
601 if (vm_map_wire(kernel_map, hash_cram_address,
602 hash_cram_address+cram_size,
603 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
604 panic("shared_file_init: No memory for data table");
605 }
606 zcram(lsf_zone, hash_cram_address, cram_size);
607 shared_file_available_hash_ele
608 += cram_size/sizeof(struct load_file_ele);
609 hash_cram_address += cram_size;
610 }
611
612
613 /* Find the entry in the map associated with the current mapping */
614 /* of the file object */
615 file_object = memory_object_control_to_vm_object(file_control);
616 if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
617 vm_object_t mapped_object;
618 if(entry->is_sub_map) {
619 mutex_unlock(&shared_file_header->lock);
620 return KERN_INVALID_ADDRESS;
621 }
622 mapped_object = entry->object.vm_object;
623 while(mapped_object->shadow != NULL) {
624 mapped_object = mapped_object->shadow;
625 }
626 /* check to see that the file object passed is indeed the */
627 /* same as the mapped object passed */
628 if(file_object != mapped_object) {
629 if(sm_info->flags & SHARED_REGION_SYSTEM) {
630 mutex_unlock(&shared_file_header->lock);
631 return KERN_PROTECTION_FAILURE;
632 } else {
633 file_object = mapped_object;
634 }
635 }
636 } else {
637 mutex_unlock(&shared_file_header->lock);
638 return KERN_INVALID_ADDRESS;
639 }
640
641 alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
642
643 if (file_entry = lsf_hash_lookup(shared_file_header->hash,
644 (void *) file_object, mappings[0].file_offset, shared_file_header->hash_size,
645 alternate, sm_info)) {
646 /* File is loaded, check the load manifest for exact match */
647 /* we simplify by requiring that the elements be the same */
648 /* size and in the same order rather than checking for */
649 /* semantic equivalence. */
650
651 /* If the file is being loaded in the alternate */
652 /* area, one load to alternate is allowed per mapped */
653 /* object the base address is passed back to the */
654 /* caller and the mappings field is filled in. If the */
655 /* caller does not pass the precise mappings_cnt */
656 /* and the Alternate is already loaded, an error */
657 /* is returned. */
658 i = 0;
659 file_mapping = file_entry->mappings;
660 while(file_mapping != NULL) {
661 if(i>=map_cnt) {
662 mutex_unlock(&shared_file_header->lock);
663 return KERN_INVALID_ARGUMENT;
664 }
665 if(((mappings[i].mapping_offset)
666 & SHARED_DATA_REGION_MASK) !=
667 file_mapping->mapping_offset ||
668 mappings[i].size !=
669 file_mapping->size ||
670 mappings[i].file_offset !=
671 file_mapping->file_offset ||
672 mappings[i].protection !=
673 file_mapping->protection) {
674 break;
675 }
676 file_mapping = file_mapping->next;
677 i++;
678 }
679 if(i!=map_cnt) {
680 mutex_unlock(&shared_file_header->lock);
681 return KERN_INVALID_ARGUMENT;
682 }
683 *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
684 + file_entry->base_address;
685 *flags = SF_PREV_LOADED;
686 mutex_unlock(&shared_file_header->lock);
687 return KERN_SUCCESS;
688 } else {
689 /* File is not loaded, lets attempt to load it */
690 ret = lsf_load(mapped_file, mapped_file_size, base_address,
691 mappings, map_cnt,
692 (void *)file_object,
693 *flags, sm_info);
694 *flags = 0;
695 if(ret == KERN_NO_SPACE) {
696 shared_region_mapping_t regions;
697 shared_region_mapping_t system_region;
698 regions = (shared_region_mapping_t)sm_info->self;
699 regions->flags |= SHARED_REGION_FULL;
700 system_region = lookup_default_shared_region(
701 regions->fs_base, regions->system);
702 if(system_region == regions) {
703 shared_region_mapping_t new_system_shared_regions;
704 shared_file_boot_time_init(
705 regions->fs_base, regions->system);
706 /* current task must stay with its current */
707 /* regions, drop count on system_shared_region */
708 /* and put back our original set */
709 vm_get_shared_region(current_task(),
710 &new_system_shared_regions);
711 shared_region_mapping_dealloc_lock(
712 new_system_shared_regions, 0);
713 vm_set_shared_region(current_task(), regions);
714 }
715 if(system_region != NULL) {
716 shared_region_mapping_dealloc_lock(
717 system_region, 0);
718 }
719 }
720 mutex_unlock(&shared_file_header->lock);
721 return ret;
722 }
723 }
724
725 /* A hash lookup function for the list of loaded files in */
726 /* shared_memory_server space. */
727
728 static load_struct_t *
729 lsf_hash_lookup(
730 queue_head_t *hash_table,
731 void *file_object,
732 vm_offset_t recognizableOffset,
733 int size,
734 boolean_t alternate,
735 shared_region_task_mappings_t sm_info)
736 {
737 register queue_t bucket;
738 load_struct_t *entry;
739 shared_region_mapping_t target_region;
740 int depth;
741
742 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
743 for (entry = (load_struct_t *)queue_first(bucket);
744 !queue_end(bucket, &entry->links);
745 entry = (load_struct_t *)queue_next(&entry->links)) {
746
747 if ((entry->file_object == (int) file_object) &&
748 (entry->file_offset != recognizableOffset)) {
749 }
750 if ((entry->file_object == (int)file_object) &&
751 (entry->file_offset == recognizableOffset)) {
752 target_region = (shared_region_mapping_t)sm_info->self;
753 depth = target_region->depth;
754 while(target_region) {
755 if((!(sm_info->self)) ||
756 ((target_region == entry->regions_instance) &&
757 (target_region->depth >= entry->depth))) {
758 if(alternate) {
759 if (entry->base_address >=
760 sm_info->alternate_base)
761 return entry;
762 } else {
763 if (entry->base_address <
764 sm_info->alternate_base)
765 return entry;
766 }
767 }
768 if(target_region->object_chain) {
769 target_region = (shared_region_mapping_t)
770 target_region->object_chain->object_chain_region;
771 depth = target_region->object_chain->depth;
772 } else {
773 target_region = NULL;
774 }
775 }
776 }
777 }
778
779 return (load_struct_t *)0;
780 }
781
782 __private_extern__ load_struct_t *
783 lsf_remove_regions_mappings_lock(
784 shared_region_mapping_t region,
785 shared_region_task_mappings_t sm_info,
786 int need_lock)
787 {
788 int i;
789 register queue_t bucket;
790 shared_file_info_t *shared_file_header;
791 load_struct_t *entry;
792 load_struct_t *next_entry;
793 load_struct_t *prev_entry;
794
795 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
796
797 if (need_lock)
798 mutex_lock(&shared_file_header->lock);
799 if(shared_file_header->hash_init == FALSE) {
800 if (need_lock)
801 mutex_unlock(&shared_file_header->lock);
802 return NULL;
803 }
804 for(i = 0; i<shared_file_header->hash_size; i++) {
805 bucket = &shared_file_header->hash[i];
806 for (entry = (load_struct_t *)queue_first(bucket);
807 !queue_end(bucket, &entry->links);) {
808 next_entry = (load_struct_t *)queue_next(&entry->links);
809 if(region == entry->regions_instance) {
810 lsf_unload((void *)entry->file_object,
811 entry->base_address, sm_info);
812 }
813 entry = next_entry;
814 }
815 }
816 if (need_lock)
817 mutex_unlock(&shared_file_header->lock);
818 }
819
820 /*
821 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
822 * only caller. Remove this stub function and the corresponding symbol
823 * export for Merlot.
824 */
825 load_struct_t *
826 lsf_remove_regions_mappings(
827 shared_region_mapping_t region,
828 shared_region_task_mappings_t sm_info)
829 {
830 return lsf_remove_regions_mappings_lock(region, sm_info, 1);
831 }
832
833 /* Removes a map_list, (list of loaded extents) for a file from */
834 /* the loaded file hash table. */
835
836 static load_struct_t *
837 lsf_hash_delete(
838 void *file_object,
839 vm_offset_t base_offset,
840 shared_region_task_mappings_t sm_info)
841 {
842 register queue_t bucket;
843 shared_file_info_t *shared_file_header;
844 load_struct_t *entry;
845 load_struct_t *prev_entry;
846
847 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
848
849 bucket = &shared_file_header->hash
850 [load_file_hash((int)file_object, shared_file_header->hash_size)];
851
852 for (entry = (load_struct_t *)queue_first(bucket);
853 !queue_end(bucket, &entry->links);
854 entry = (load_struct_t *)queue_next(&entry->links)) {
855 if((!(sm_info->self)) || ((shared_region_mapping_t)
856 sm_info->self == entry->regions_instance)) {
857 if ((entry->file_object == (int) file_object) &&
858 (entry->base_address == base_offset)) {
859 queue_remove(bucket, entry,
860 load_struct_ptr_t, links);
861 return entry;
862 }
863 }
864 }
865
866 return (load_struct_t *)0;
867 }
868
869 /* Inserts a new map_list, (list of loaded file extents), into the */
870 /* server loaded file hash table. */
871
872 static void
873 lsf_hash_insert(
874 load_struct_t *entry,
875 shared_region_task_mappings_t sm_info)
876 {
877 shared_file_info_t *shared_file_header;
878
879 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
880 queue_enter(&shared_file_header->hash
881 [load_file_hash(entry->file_object,
882 shared_file_header->hash_size)],
883 entry, load_struct_ptr_t, links);
884 }
885
886 /* Looks up the file type requested. If already loaded and the */
887 /* file extents are an exact match, returns Success. If not */
888 /* loaded attempts to load the file extents at the given offsets */
889 /* if any extent fails to load or if the file was already loaded */
890 /* in a different configuration, lsf_load fails. */
891
892 static kern_return_t
893 lsf_load(
894 vm_offset_t mapped_file,
895 vm_size_t mapped_file_size,
896 vm_offset_t *base_address,
897 sf_mapping_t *mappings,
898 int map_cnt,
899 void *file_object,
900 int flags,
901 shared_region_task_mappings_t sm_info)
902 {
903
904 load_struct_t *entry;
905 vm_map_copy_t copy_object;
906 loaded_mapping_t *file_mapping;
907 loaded_mapping_t **tptr;
908 int i;
909 ipc_port_t local_map;
910 vm_offset_t original_alt_load_next;
911 vm_offset_t alternate_load_next;
912
913 entry = (load_struct_t *)zalloc(lsf_zone);
914 shared_file_available_hash_ele--;
915 entry->file_object = (int)file_object;
916 entry->mapping_cnt = map_cnt;
917 entry->mappings = NULL;
918 entry->links.prev = (queue_entry_t) 0;
919 entry->links.next = (queue_entry_t) 0;
920 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
921 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
922 entry->file_offset = mappings[0].file_offset;
923
924 lsf_hash_insert(entry, sm_info);
925 tptr = &(entry->mappings);
926
927
928 alternate_load_next = sm_info->alternate_next;
929 original_alt_load_next = alternate_load_next;
930 if (flags & ALTERNATE_LOAD_SITE) {
931 int max_loadfile_offset;
932
933 *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
934 sm_info->alternate_next;
935 max_loadfile_offset = 0;
936 for(i = 0; i<map_cnt; i++) {
937 if(((mappings[i].mapping_offset
938 & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
939 max_loadfile_offset) {
940 max_loadfile_offset =
941 (mappings[i].mapping_offset
942 & SHARED_TEXT_REGION_MASK)
943 + mappings[i].size;
944 }
945 }
946 if((alternate_load_next + round_page_32(max_loadfile_offset)) >=
947 (sm_info->data_size - (sm_info->data_size>>9))) {
948 entry->base_address =
949 (*base_address) & SHARED_TEXT_REGION_MASK;
950 lsf_unload(file_object, entry->base_address, sm_info);
951
952 return KERN_NO_SPACE;
953 }
954 alternate_load_next += round_page_32(max_loadfile_offset);
955
956 } else {
957 if (((*base_address) & SHARED_TEXT_REGION_MASK) >
958 sm_info->alternate_base) {
959 entry->base_address =
960 (*base_address) & SHARED_TEXT_REGION_MASK;
961 lsf_unload(file_object, entry->base_address, sm_info);
962 return KERN_INVALID_ARGUMENT;
963 }
964 }
965
966 entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
967
968 // Sanity check the mappings -- make sure we don't stray across the
969 // alternate boundary. If any bit of a library that we're not trying
970 // to load in the alternate load space strays across that boundary,
971 // return KERN_INVALID_ARGUMENT immediately so that the caller can
972 // try to load it in the alternate shared area. We do this to avoid
973 // a nasty case: if a library tries to load so that it crosses the
974 // boundary, it'll occupy a bit of the alternate load area without
975 // the kernel being aware. When loads into the alternate load area
976 // at the first free address are tried, the load will fail.
977 // Thus, a single library straddling the boundary causes all sliding
978 // libraries to fail to load. This check will avoid such a case.
979
980 if (!(flags & ALTERNATE_LOAD_SITE)) {
981 for (i = 0; i<map_cnt;i++) {
982 vm_offset_t region_mask;
983 vm_address_t region_start;
984 vm_address_t region_end;
985
986 if ((mappings[i].protection & VM_PROT_WRITE) == 0) {
987 // mapping offsets are relative to start of shared segments.
988 region_mask = SHARED_TEXT_REGION_MASK;
989 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
990 region_end = (mappings[i].size + region_start);
991 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
992 // No library is permitted to load so any bit of it is in the
993 // shared alternate space. If they want it loaded, they can put
994 // it in the alternate space explicitly.
995 printf("Library trying to load across alternate shared region boundary -- denied!\n");
996 lsf_unload(file_object, entry->base_address, sm_info);
997 return KERN_INVALID_ARGUMENT;
998 }
999 } else {
1000 // rw section?
1001 region_mask = SHARED_DATA_REGION_MASK;
1002 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
1003 region_end = (mappings[i].size + region_start);
1004 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
1005 printf("Library trying to load across alternate shared region boundary-- denied!\n");
1006 lsf_unload(file_object, entry->base_address, sm_info);
1007 return KERN_INVALID_ARGUMENT;
1008 }
1009 } // write?
1010 } // for
1011 } // if not alternate load site.
1012
1013 /* copyin mapped file data */
1014 for(i = 0; i<map_cnt; i++) {
1015 vm_offset_t target_address;
1016 vm_offset_t region_mask;
1017
1018 if(mappings[i].protection & VM_PROT_COW) {
1019 local_map = (ipc_port_t)sm_info->data_region;
1020 region_mask = SHARED_DATA_REGION_MASK;
1021 if((mappings[i].mapping_offset
1022 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
1023 lsf_unload(file_object,
1024 entry->base_address, sm_info);
1025 return KERN_INVALID_ARGUMENT;
1026 }
1027 } else {
1028 region_mask = SHARED_TEXT_REGION_MASK;
1029 local_map = (ipc_port_t)sm_info->text_region;
1030 if(mappings[i].mapping_offset
1031 & GLOBAL_SHARED_SEGMENT_MASK) {
1032 lsf_unload(file_object,
1033 entry->base_address, sm_info);
1034 return KERN_INVALID_ARGUMENT;
1035 }
1036 }
1037 if(!(mappings[i].protection & VM_PROT_ZF)
1038 && ((mapped_file + mappings[i].file_offset +
1039 mappings[i].size) >
1040 (mapped_file + mapped_file_size))) {
1041 lsf_unload(file_object, entry->base_address, sm_info);
1042 return KERN_INVALID_ARGUMENT;
1043 }
1044 target_address = ((mappings[i].mapping_offset) & region_mask)
1045 + entry->base_address;
1046 if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
1047 ->backing.map, &target_address,
1048 mappings[i].size, FALSE)) {
1049 lsf_unload(file_object, entry->base_address, sm_info);
1050 return KERN_FAILURE;
1051 }
1052 target_address = ((mappings[i].mapping_offset) & region_mask)
1053 + entry->base_address;
1054 if(!(mappings[i].protection & VM_PROT_ZF)) {
1055 if(vm_map_copyin(current_map(),
1056 mapped_file + mappings[i].file_offset,
1057 round_page_32(mappings[i].size), FALSE, &copy_object)) {
1058 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
1059 ->backing.map, target_address, mappings[i].size);
1060 lsf_unload(file_object, entry->base_address, sm_info);
1061 return KERN_FAILURE;
1062 }
1063 if(vm_map_copy_overwrite(((vm_named_entry_t)
1064 local_map->ip_kobject)->backing.map, target_address,
1065 copy_object, FALSE)) {
1066 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
1067 ->backing.map, target_address, mappings[i].size);
1068 lsf_unload(file_object, entry->base_address, sm_info);
1069 return KERN_FAILURE;
1070 }
1071 }
1072 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
1073 ->backing.map, target_address,
1074 round_page_32(target_address + mappings[i].size),
1075 (mappings[i].protection &
1076 (VM_PROT_READ | VM_PROT_EXECUTE)),
1077 TRUE);
1078 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
1079 ->backing.map, target_address,
1080 round_page_32(target_address + mappings[i].size),
1081 (mappings[i].protection &
1082 (VM_PROT_READ | VM_PROT_EXECUTE)),
1083 FALSE);
1084 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
1085 if(file_mapping == 0)
1086 panic("lsf_load: OUT OF MAPPINGS!");
1087 shared_file_available_hash_ele--;
1088 file_mapping->mapping_offset = (mappings[i].mapping_offset)
1089 & region_mask;
1090 file_mapping->size = mappings[i].size;
1091 file_mapping->file_offset = mappings[i].file_offset;
1092 file_mapping->protection = mappings[i].protection;
1093 file_mapping->next = NULL;
1094 *tptr = file_mapping;
1095 tptr = &(file_mapping->next);
1096 }
1097 shared_region_mapping_set_alt_next(sm_info->self, alternate_load_next);
1098 return KERN_SUCCESS;
1099
1100 }
1101
1102
1103 /* finds the file_object extent list in the shared memory hash table */
1104 /* If one is found the associated extents in shared memory are deallocated */
1105 /* and the extent list is freed */
1106
1107 static void
1108 lsf_unload(
1109 void *file_object,
1110 vm_offset_t base_offset,
1111 shared_region_task_mappings_t sm_info)
1112 {
1113 load_struct_t *entry;
1114 ipc_port_t local_map;
1115 loaded_mapping_t *map_ele;
1116 loaded_mapping_t *back_ptr;
1117
1118 entry = lsf_hash_delete(file_object, base_offset, sm_info);
1119 if(entry) {
1120 map_ele = entry->mappings;
1121 while(map_ele != NULL) {
1122 if(map_ele->protection & VM_PROT_COW) {
1123 local_map = (ipc_port_t)sm_info->data_region;
1124 } else {
1125 local_map = (ipc_port_t)sm_info->text_region;
1126 }
1127 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
1128 ->backing.map, entry->base_address +
1129 map_ele->mapping_offset,
1130 map_ele->size);
1131 back_ptr = map_ele;
1132 map_ele = map_ele->next;
1133 zfree(lsf_zone, (vm_offset_t)back_ptr);
1134 shared_file_available_hash_ele++;
1135 }
1136 zfree(lsf_zone, (vm_offset_t)entry);
1137 shared_file_available_hash_ele++;
1138 }
1139 }
1140
1141 /* integer is from 1 to 100 and represents percent full */
1142 unsigned int
1143 lsf_mapping_pool_gauge()
1144 {
1145 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
1146 }