]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_shared_memory_server.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 *
27 * File: vm/vm_shared_memory_server.c
28 * Author: Chris Youngworth
29 *
30 * Support routines for an in-kernel shared memory allocator
31 */
32
33 #include <ipc/ipc_port.h>
34 #include <kern/thread.h>
35 #include <kern/zalloc.h>
36 #include <mach/kern_return.h>
37 #include <mach/vm_inherit.h>
38 #include <machine/cpu_capabilities.h>
39 #include <vm/vm_kern.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
42
43 #include <mach/shared_memory_server.h>
44 #include <vm/vm_shared_memory_server.h>
45
46 /* forward declarations */
47 static kern_return_t
48 shared_file_init(
49 ipc_port_t *shared_text_region_handle,
50 vm_size_t text_region_size,
51 ipc_port_t *shared_data_region_handle,
52 vm_size_t data_region_size,
53 vm_offset_t *shared_file_mapping_array);
54
55 static load_struct_t *
56 lsf_hash_lookup(
57 queue_head_t *hash_table,
58 void *file_object,
59 vm_offset_t recognizableOffset,
60 int size,
61 boolean_t alternate,
62 shared_region_task_mappings_t sm_info);
63
64 static load_struct_t *
65 lsf_hash_delete(
66 void *file_object,
67 vm_offset_t base_offset,
68 shared_region_task_mappings_t sm_info);
69
70 static void
71 lsf_hash_insert(
72 load_struct_t *entry,
73 shared_region_task_mappings_t sm_info);
74
75 static kern_return_t
76 lsf_load(
77 vm_offset_t mapped_file,
78 vm_size_t mapped_file_size,
79 vm_offset_t *base_address,
80 sf_mapping_t *mappings,
81 int map_cnt,
82 void *file_object,
83 int flags,
84 shared_region_task_mappings_t sm_info);
85
86 static void
87 lsf_unload(
88 void *file_object,
89 vm_offset_t base_offset,
90 shared_region_task_mappings_t sm_info);
91
92
93 #define load_file_hash(file_object, size) \
94 ((((natural_t)file_object) & 0xffffff) % size)
95
96 /* Implementation */
97 vm_offset_t shared_file_text_region;
98 vm_offset_t shared_file_data_region;
99
100 ipc_port_t shared_text_region_handle;
101 ipc_port_t shared_data_region_handle;
102 vm_offset_t shared_file_mapping_array = 0;
103
104 shared_region_mapping_t default_environment_shared_regions = NULL;
105 static decl_mutex_data(,default_regions_list_lock_data)
106
107 #define default_regions_list_lock() \
108 mutex_lock(&default_regions_list_lock_data)
109 #define default_regions_list_lock_try() \
110 mutex_try(&default_regions_list_lock_data)
111 #define default_regions_list_unlock() \
112 mutex_unlock(&default_regions_list_lock_data)
113
114
115 ipc_port_t sfma_handle = NULL;
116 zone_t lsf_zone;
117
118 int shared_file_available_hash_ele;
119
120 /* com region support */
121 ipc_port_t com_region_handle = NULL;
122 vm_map_t com_region_map = NULL;
123 vm_size_t com_region_size = _COMM_PAGE_AREA_LENGTH;
124 shared_region_mapping_t com_mapping_resource = NULL;
125
126 #define GLOBAL_COM_REGION_BASE _COMM_PAGE_BASE_ADDRESS
127
128 /* called for the non-default, private branch shared region support */
129 /* system default fields for fs_base and system supported are not */
130 /* relevant as the system default flag is not set */
131 kern_return_t
132 shared_file_create_system_region(
133 shared_region_mapping_t *shared_region)
134 {
135 ipc_port_t text_handle;
136 ipc_port_t data_handle;
137 long text_size;
138 long data_size;
139 vm_offset_t mapping_array;
140 kern_return_t kret;
141
142 text_size = 0x10000000;
143 data_size = 0x10000000;
144
145 kret = shared_file_init(&text_handle,
146 text_size, &data_handle, data_size, &mapping_array);
147 if(kret)
148 return kret;
149 kret = shared_region_mapping_create(text_handle,
150 text_size, data_handle, data_size, mapping_array,
151 GLOBAL_SHARED_TEXT_SEGMENT, shared_region,
152 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
153 if(kret)
154 return kret;
155 (*shared_region)->flags = 0;
156 if(com_mapping_resource) {
157 shared_region_mapping_ref(com_mapping_resource);
158 (*shared_region)->next = com_mapping_resource;
159 }
160
161 return KERN_SUCCESS;
162 }
163
164 /*
165 * load a new default for a specified environment into the default share
166 * regions list. If a previous default exists for the envrionment specification
167 * it is returned along with its reference. It is expected that the new
168 * sytem region structure passes a reference.
169 */
170
171 shared_region_mapping_t
172 update_default_shared_region(
173 shared_region_mapping_t new_system_region)
174 {
175 shared_region_mapping_t old_system_region;
176 unsigned int fs_base;
177 unsigned int system;
178
179 fs_base = new_system_region->fs_base;
180 system = new_system_region->system;
181 new_system_region->flags |= SHARED_REGION_SYSTEM;
182 default_regions_list_lock();
183 old_system_region = default_environment_shared_regions;
184
185 if((old_system_region != NULL) &&
186 (old_system_region->fs_base == fs_base) &&
187 (old_system_region->system == system)) {
188 new_system_region->default_env_list =
189 old_system_region->default_env_list;
190 default_environment_shared_regions = new_system_region;
191 default_regions_list_unlock();
192 old_system_region->flags |= SHARED_REGION_STALE;
193 return old_system_region;
194 }
195 if (old_system_region) {
196 while(old_system_region->default_env_list != NULL) {
197 if((old_system_region->default_env_list->fs_base == fs_base) &&
198 (old_system_region->default_env_list->system == system)) {
199 new_system_region->default_env_list =
200 old_system_region->default_env_list
201 ->default_env_list;
202 old_system_region->default_env_list =
203 new_system_region;
204 default_regions_list_unlock();
205 old_system_region->flags |= SHARED_REGION_STALE;
206 return old_system_region;
207 }
208 old_system_region = old_system_region->default_env_list;
209 }
210 }
211 /* If we get here, we are at the end of the system list and we */
212 /* did not find a pre-existing entry */
213 if(old_system_region) {
214 old_system_region->default_env_list = new_system_region;
215 } else {
216 default_environment_shared_regions = new_system_region;
217 }
218 default_regions_list_unlock();
219 return NULL;
220 }
221
222 /*
223 * lookup a system_shared_region for the environment specified. If one is
224 * found, it is returned along with a reference against the structure
225 */
226
227 shared_region_mapping_t
228 lookup_default_shared_region(
229 unsigned int fs_base,
230 unsigned int system)
231 {
232 shared_region_mapping_t system_region;
233 default_regions_list_lock();
234 system_region = default_environment_shared_regions;
235
236 while(system_region != NULL) {
237 if((system_region->fs_base == fs_base) &&
238 (system_region->system == system)) {
239 break;
240 }
241 system_region = system_region->default_env_list;
242 }
243 if(system_region)
244 shared_region_mapping_ref(system_region);
245 default_regions_list_unlock();
246 return system_region;
247 }
248
249 /*
250 * remove a system_region default if it appears in the default regions list.
251 * Drop a reference on removal.
252 */
253
254 __private_extern__ void
255 remove_default_shared_region_lock(
256 shared_region_mapping_t system_region,
257 int need_lock)
258 {
259 shared_region_mapping_t old_system_region;
260 unsigned int fs_base;
261 unsigned int system;
262
263 default_regions_list_lock();
264 old_system_region = default_environment_shared_regions;
265
266 if(old_system_region == NULL) {
267 default_regions_list_unlock();
268 return;
269 }
270
271 if (old_system_region == system_region) {
272 default_environment_shared_regions
273 = old_system_region->default_env_list;
274 old_system_region->flags |= SHARED_REGION_STALE;
275 shared_region_mapping_dealloc_lock(old_system_region,
276 need_lock);
277 default_regions_list_unlock();
278 return;
279 }
280
281 while(old_system_region->default_env_list != NULL) {
282 if(old_system_region->default_env_list == system_region) {
283 shared_region_mapping_t dead_region;
284 dead_region = old_system_region->default_env_list;
285 old_system_region->default_env_list =
286 old_system_region->default_env_list->default_env_list;
287 dead_region->flags |= SHARED_REGION_STALE;
288 shared_region_mapping_dealloc_lock(dead_region,
289 need_lock);
290 default_regions_list_unlock();
291 return;
292 }
293 old_system_region = old_system_region->default_env_list;
294 }
295 default_regions_list_unlock();
296 }
297
298 /*
299 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
300 * the only caller. Remove this stub function and the corresponding symbol
301 * export for Merlot.
302 */
303 void
304 remove_default_shared_region(
305 shared_region_mapping_t system_region)
306 {
307 remove_default_shared_region_lock(system_region, 1);
308 }
309
310 void
311 remove_all_shared_regions()
312 {
313 shared_region_mapping_t system_region;
314 shared_region_mapping_t next_system_region;
315
316 default_regions_list_lock();
317 system_region = default_environment_shared_regions;
318
319 if(system_region == NULL) {
320 default_regions_list_unlock();
321 return;
322 }
323
324 while(system_region != NULL) {
325 next_system_region = system_region->default_env_list;
326 system_region->flags |= SHARED_REGION_STALE;
327 shared_region_mapping_dealloc(system_region);
328 system_region = next_system_region;
329 }
330 default_environment_shared_regions = NULL;
331 default_regions_list_unlock();
332 }
333
334 /* shared_com_boot_time_init initializes the common page shared data and */
335 /* text region. This region is semi independent of the split libs */
336 /* and so its policies have to be handled differently by the code that */
337 /* manipulates the mapping of shared region environments. However, */
338 /* the shared region delivery system supports both */
339 shared_com_boot_time_init()
340 {
341 kern_return_t kret;
342 vm_named_entry_t named_entry;
343
344 if(com_region_handle) {
345 panic("shared_com_boot_time_init: "
346 "com_region_handle already set\n");
347 }
348
349 /* create com page region */
350 if(kret = vm_region_object_create(kernel_map,
351 com_region_size,
352 &com_region_handle)) {
353 panic("shared_com_boot_time_init: "
354 "unable to create comm page\n");
355 return;
356 }
357 /* now set export the underlying region/map */
358 named_entry = (vm_named_entry_t)com_region_handle->ip_kobject;
359 com_region_map = named_entry->backing.map;
360 /* wrap the com region in its own shared file mapping structure */
361 shared_region_mapping_create(com_region_handle,
362 com_region_size, NULL, 0, 0,
363 GLOBAL_COM_REGION_BASE, &com_mapping_resource,
364 0, 0);
365
366 }
367
368 shared_file_boot_time_init(
369 unsigned int fs_base,
370 unsigned int system)
371 {
372 long shared_text_region_size;
373 long shared_data_region_size;
374 shared_region_mapping_t new_system_region;
375 shared_region_mapping_t old_default_env;
376
377 shared_text_region_size = 0x10000000;
378 shared_data_region_size = 0x10000000;
379 shared_file_init(&shared_text_region_handle,
380 shared_text_region_size, &shared_data_region_handle,
381 shared_data_region_size, &shared_file_mapping_array);
382
383 shared_region_mapping_create(shared_text_region_handle,
384 shared_text_region_size, shared_data_region_handle,
385 shared_data_region_size, shared_file_mapping_array,
386 GLOBAL_SHARED_TEXT_SEGMENT, &new_system_region,
387 SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
388
389 new_system_region->fs_base = fs_base;
390 new_system_region->system = system;
391 new_system_region->flags = SHARED_REGION_SYSTEM;
392
393 /* grab an extra reference for the caller */
394 /* remember to grab before call to update */
395 shared_region_mapping_ref(new_system_region);
396 old_default_env = update_default_shared_region(new_system_region);
397 /* hold an extra reference because these are the system */
398 /* shared regions. */
399 if(old_default_env)
400 shared_region_mapping_dealloc(old_default_env);
401 if(com_mapping_resource == NULL) {
402 shared_com_boot_time_init();
403 }
404 shared_region_mapping_ref(com_mapping_resource);
405 new_system_region->next = com_mapping_resource;
406 vm_set_shared_region(current_task(), new_system_region);
407 }
408
409
410 /* called at boot time, allocates two regions, each 256 megs in size */
411 /* these regions are later mapped into task spaces, allowing them to */
412 /* share the contents of the regions. shared_file_init is part of */
413 /* a shared_memory_server which not only allocates the backing maps */
414 /* but also coordinates requests for space. */
415
416
417 static kern_return_t
418 shared_file_init(
419 ipc_port_t *shared_text_region_handle,
420 vm_size_t text_region_size,
421 ipc_port_t *shared_data_region_handle,
422 vm_size_t data_region_size,
423 vm_offset_t *mapping_array)
424 {
425 vm_offset_t aligned_address;
426 shared_file_info_t *sf_head;
427 vm_offset_t table_mapping_address;
428 int data_table_size;
429 int hash_size;
430 int i;
431 kern_return_t kret;
432
433 vm_object_t buf_object;
434 vm_map_entry_t entry;
435 vm_size_t alloced;
436 vm_offset_t b;
437 vm_page_t p;
438
439 /* create text and data maps/regions */
440 if(kret = vm_region_object_create(kernel_map,
441 text_region_size,
442 shared_text_region_handle)) {
443
444 return kret;
445 }
446 if(kret = vm_region_object_create(kernel_map,
447 data_region_size,
448 shared_data_region_handle)) {
449 ipc_port_release_send(*shared_text_region_handle);
450 return kret;
451 }
452
453 data_table_size = data_region_size >> 9;
454 hash_size = data_region_size >> 14;
455 table_mapping_address = data_region_size - data_table_size;
456
457 if(shared_file_mapping_array == 0) {
458 buf_object = vm_object_allocate(data_table_size);
459
460 if(vm_map_find_space(kernel_map, &shared_file_mapping_array,
461 data_table_size, 0, &entry) != KERN_SUCCESS) {
462 panic("shared_file_init: no space");
463 }
464 *mapping_array = shared_file_mapping_array;
465 vm_map_unlock(kernel_map);
466 entry->object.vm_object = buf_object;
467 entry->offset = 0;
468
469 for (b = *mapping_array, alloced = 0;
470 alloced < (hash_size +
471 round_page_32(sizeof(struct sf_mapping)));
472 alloced += PAGE_SIZE, b += PAGE_SIZE) {
473 vm_object_lock(buf_object);
474 p = vm_page_alloc(buf_object, alloced);
475 if (p == VM_PAGE_NULL) {
476 panic("shared_file_init: no space");
477 }
478 p->busy = FALSE;
479 vm_object_unlock(buf_object);
480 pmap_enter(kernel_pmap, b, p->phys_page,
481 VM_PROT_READ | VM_PROT_WRITE,
482 ((unsigned int)(p->object->wimg_bits))
483 & VM_WIMG_MASK,
484 TRUE);
485 }
486
487
488 /* initialize loaded file array */
489 sf_head = (shared_file_info_t *)*mapping_array;
490 sf_head->hash = (queue_head_t *)
491 (((int)*mapping_array) +
492 sizeof(struct shared_file_info));
493 sf_head->hash_size = hash_size/sizeof(queue_head_t);
494 mutex_init(&(sf_head->lock), (ETAP_VM_MAP));
495 sf_head->hash_init = FALSE;
496
497
498 mach_make_memory_entry(kernel_map, &data_table_size,
499 *mapping_array, VM_PROT_READ, &sfma_handle,
500 NULL);
501
502 if (vm_map_wire(kernel_map, *mapping_array,
503 *mapping_array +
504 (hash_size + round_page_32(sizeof(struct sf_mapping))),
505 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
506 panic("shared_file_init: No memory for data table");
507 }
508
509 lsf_zone = zinit(sizeof(struct load_file_ele),
510 data_table_size -
511 (hash_size + round_page_32(sizeof(struct sf_mapping))),
512 0, "load_file_server");
513
514 zone_change(lsf_zone, Z_EXHAUST, TRUE);
515 zone_change(lsf_zone, Z_COLLECT, FALSE);
516 zone_change(lsf_zone, Z_EXPAND, FALSE);
517 zone_change(lsf_zone, Z_FOREIGN, TRUE);
518
519 /* initialize the global default environment lock */
520 mutex_init(&default_regions_list_lock_data, ETAP_NO_TRACE);
521
522 } else {
523 *mapping_array = shared_file_mapping_array;
524 }
525
526 vm_map(((vm_named_entry_t)
527 (*shared_data_region_handle)->ip_kobject)->backing.map,
528 &table_mapping_address,
529 data_table_size, 0, SHARED_LIB_ALIAS,
530 sfma_handle, 0, FALSE,
531 VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
532
533 }
534
535 /* A call made from user space, copyin_shared_file requires the user to */
536 /* provide the address and size of a mapped file, the full path name of */
537 /* that file and a list of offsets to be mapped into shared memory. */
538 /* By requiring that the file be pre-mapped, copyin_shared_file can */
539 /* guarantee that the file is neither deleted nor changed after the user */
540 /* begins the call. */
541
542 kern_return_t
543 copyin_shared_file(
544 vm_offset_t mapped_file,
545 vm_size_t mapped_file_size,
546 vm_offset_t *base_address,
547 int map_cnt,
548 sf_mapping_t *mappings,
549 memory_object_control_t file_control,
550 shared_region_task_mappings_t sm_info,
551 int *flags)
552 {
553 vm_object_t file_object;
554 vm_map_entry_t entry;
555 shared_file_info_t *shared_file_header;
556 load_struct_t *file_entry;
557 loaded_mapping_t *file_mapping;
558 boolean_t alternate;
559 int i;
560 kern_return_t ret;
561
562 /* wire hash entry pool only as needed, since we are the only */
563 /* users, we take a few liberties with the population of our */
564 /* zone. */
565 static int allocable_hash_pages;
566 static vm_offset_t hash_cram_address;
567
568
569 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
570
571 mutex_lock(&shared_file_header->lock);
572
573 /* If this is the first call to this routine, take the opportunity */
574 /* to initialize the hash table which will be used to look-up */
575 /* mappings based on the file object */
576
577 if(shared_file_header->hash_init == FALSE) {
578 vm_size_t hash_table_size;
579 vm_size_t hash_table_offset;
580
581 hash_table_size = (shared_file_header->hash_size)
582 * sizeof(struct queue_entry);
583 hash_table_offset = hash_table_size +
584 round_page_32(sizeof(struct sf_mapping));
585 for (i = 0; i < shared_file_header->hash_size; i++)
586 queue_init(&shared_file_header->hash[i]);
587
588 allocable_hash_pages =
589 ((hash_table_size<<5) - hash_table_offset)/PAGE_SIZE;
590 hash_cram_address =
591 sm_info->region_mappings + hash_table_offset;
592 shared_file_available_hash_ele = 0;
593
594 shared_file_header->hash_init = TRUE;
595 }
596
597 if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
598 int cram_size;
599
600 cram_size = allocable_hash_pages > 3 ?
601 3 : allocable_hash_pages;
602 allocable_hash_pages -= cram_size;
603 cram_size = cram_size * PAGE_SIZE;
604 if (vm_map_wire(kernel_map, hash_cram_address,
605 hash_cram_address+cram_size,
606 VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
607 panic("shared_file_init: No memory for data table");
608 }
609 zcram(lsf_zone, hash_cram_address, cram_size);
610 shared_file_available_hash_ele
611 += cram_size/sizeof(struct load_file_ele);
612 hash_cram_address += cram_size;
613 }
614
615
616 /* Find the entry in the map associated with the current mapping */
617 /* of the file object */
618 file_object = memory_object_control_to_vm_object(file_control);
619 if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
620 vm_object_t mapped_object;
621 if(entry->is_sub_map) {
622 mutex_unlock(&shared_file_header->lock);
623 return KERN_INVALID_ADDRESS;
624 }
625 mapped_object = entry->object.vm_object;
626 while(mapped_object->shadow != NULL) {
627 mapped_object = mapped_object->shadow;
628 }
629 /* check to see that the file object passed is indeed the */
630 /* same as the mapped object passed */
631 if(file_object != mapped_object) {
632 if(sm_info->flags & SHARED_REGION_SYSTEM) {
633 mutex_unlock(&shared_file_header->lock);
634 return KERN_PROTECTION_FAILURE;
635 } else {
636 file_object = mapped_object;
637 }
638 }
639 } else {
640 mutex_unlock(&shared_file_header->lock);
641 return KERN_INVALID_ADDRESS;
642 }
643
644 alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
645
646 if (file_entry = lsf_hash_lookup(shared_file_header->hash,
647 (void *) file_object, mappings[0].file_offset, shared_file_header->hash_size,
648 alternate, sm_info)) {
649 /* File is loaded, check the load manifest for exact match */
650 /* we simplify by requiring that the elements be the same */
651 /* size and in the same order rather than checking for */
652 /* semantic equivalence. */
653
654 /* If the file is being loaded in the alternate */
655 /* area, one load to alternate is allowed per mapped */
656 /* object the base address is passed back to the */
657 /* caller and the mappings field is filled in. If the */
658 /* caller does not pass the precise mappings_cnt */
659 /* and the Alternate is already loaded, an error */
660 /* is returned. */
661 i = 0;
662 file_mapping = file_entry->mappings;
663 while(file_mapping != NULL) {
664 if(i>=map_cnt) {
665 mutex_unlock(&shared_file_header->lock);
666 return KERN_INVALID_ARGUMENT;
667 }
668 if(((mappings[i].mapping_offset)
669 & SHARED_DATA_REGION_MASK) !=
670 file_mapping->mapping_offset ||
671 mappings[i].size !=
672 file_mapping->size ||
673 mappings[i].file_offset !=
674 file_mapping->file_offset ||
675 mappings[i].protection !=
676 file_mapping->protection) {
677 break;
678 }
679 file_mapping = file_mapping->next;
680 i++;
681 }
682 if(i!=map_cnt) {
683 mutex_unlock(&shared_file_header->lock);
684 return KERN_INVALID_ARGUMENT;
685 }
686 *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK)
687 + file_entry->base_address;
688 *flags = SF_PREV_LOADED;
689 mutex_unlock(&shared_file_header->lock);
690 return KERN_SUCCESS;
691 } else {
692 /* File is not loaded, lets attempt to load it */
693 ret = lsf_load(mapped_file, mapped_file_size, base_address,
694 mappings, map_cnt,
695 (void *)file_object,
696 *flags, sm_info);
697 *flags = 0;
698 if(ret == KERN_NO_SPACE) {
699 shared_region_mapping_t regions;
700 shared_region_mapping_t system_region;
701 regions = (shared_region_mapping_t)sm_info->self;
702 regions->flags |= SHARED_REGION_FULL;
703 system_region = lookup_default_shared_region(
704 regions->fs_base, regions->system);
705 if(system_region == regions) {
706 shared_region_mapping_t new_system_shared_regions;
707 shared_file_boot_time_init(
708 regions->fs_base, regions->system);
709 /* current task must stay with its current */
710 /* regions, drop count on system_shared_region */
711 /* and put back our original set */
712 vm_get_shared_region(current_task(),
713 &new_system_shared_regions);
714 shared_region_mapping_dealloc_lock(
715 new_system_shared_regions, 0);
716 vm_set_shared_region(current_task(), regions);
717 }
718 if(system_region != NULL) {
719 shared_region_mapping_dealloc_lock(
720 system_region, 0);
721 }
722 }
723 mutex_unlock(&shared_file_header->lock);
724 return ret;
725 }
726 }
727
728 /* A hash lookup function for the list of loaded files in */
729 /* shared_memory_server space. */
730
731 static load_struct_t *
732 lsf_hash_lookup(
733 queue_head_t *hash_table,
734 void *file_object,
735 vm_offset_t recognizableOffset,
736 int size,
737 boolean_t alternate,
738 shared_region_task_mappings_t sm_info)
739 {
740 register queue_t bucket;
741 load_struct_t *entry;
742 shared_region_mapping_t target_region;
743 int depth;
744
745 bucket = &(hash_table[load_file_hash((int)file_object, size)]);
746 for (entry = (load_struct_t *)queue_first(bucket);
747 !queue_end(bucket, &entry->links);
748 entry = (load_struct_t *)queue_next(&entry->links)) {
749
750 if ((entry->file_object == (int) file_object) &&
751 (entry->file_offset != recognizableOffset)) {
752 }
753 if ((entry->file_object == (int)file_object) &&
754 (entry->file_offset == recognizableOffset)) {
755 target_region = (shared_region_mapping_t)sm_info->self;
756 depth = target_region->depth;
757 while(target_region) {
758 if((!(sm_info->self)) ||
759 ((target_region == entry->regions_instance) &&
760 (target_region->depth >= entry->depth))) {
761 if(alternate) {
762 if (entry->base_address >=
763 sm_info->alternate_base)
764 return entry;
765 } else {
766 if (entry->base_address <
767 sm_info->alternate_base)
768 return entry;
769 }
770 }
771 if(target_region->object_chain) {
772 target_region = (shared_region_mapping_t)
773 target_region->object_chain->object_chain_region;
774 depth = target_region->object_chain->depth;
775 } else {
776 target_region = NULL;
777 }
778 }
779 }
780 }
781
782 return (load_struct_t *)0;
783 }
784
785 __private_extern__ load_struct_t *
786 lsf_remove_regions_mappings_lock(
787 shared_region_mapping_t region,
788 shared_region_task_mappings_t sm_info,
789 int need_lock)
790 {
791 int i;
792 register queue_t bucket;
793 shared_file_info_t *shared_file_header;
794 load_struct_t *entry;
795 load_struct_t *next_entry;
796 load_struct_t *prev_entry;
797
798 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
799
800 if (need_lock)
801 mutex_lock(&shared_file_header->lock);
802 if(shared_file_header->hash_init == FALSE) {
803 if (need_lock)
804 mutex_unlock(&shared_file_header->lock);
805 return NULL;
806 }
807 for(i = 0; i<shared_file_header->hash_size; i++) {
808 bucket = &shared_file_header->hash[i];
809 for (entry = (load_struct_t *)queue_first(bucket);
810 !queue_end(bucket, &entry->links);) {
811 next_entry = (load_struct_t *)queue_next(&entry->links);
812 if(region == entry->regions_instance) {
813 lsf_unload((void *)entry->file_object,
814 entry->base_address, sm_info);
815 }
816 entry = next_entry;
817 }
818 }
819 if (need_lock)
820 mutex_unlock(&shared_file_header->lock);
821 }
822
823 /*
824 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
825 * only caller. Remove this stub function and the corresponding symbol
826 * export for Merlot.
827 */
828 load_struct_t *
829 lsf_remove_regions_mappings(
830 shared_region_mapping_t region,
831 shared_region_task_mappings_t sm_info)
832 {
833 return lsf_remove_regions_mappings_lock(region, sm_info, 1);
834 }
835
836 /* Removes a map_list, (list of loaded extents) for a file from */
837 /* the loaded file hash table. */
838
839 static load_struct_t *
840 lsf_hash_delete(
841 void *file_object,
842 vm_offset_t base_offset,
843 shared_region_task_mappings_t sm_info)
844 {
845 register queue_t bucket;
846 shared_file_info_t *shared_file_header;
847 load_struct_t *entry;
848 load_struct_t *prev_entry;
849
850 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
851
852 bucket = &shared_file_header->hash
853 [load_file_hash((int)file_object, shared_file_header->hash_size)];
854
855 for (entry = (load_struct_t *)queue_first(bucket);
856 !queue_end(bucket, &entry->links);
857 entry = (load_struct_t *)queue_next(&entry->links)) {
858 if((!(sm_info->self)) || ((shared_region_mapping_t)
859 sm_info->self == entry->regions_instance)) {
860 if ((entry->file_object == (int) file_object) &&
861 (entry->base_address == base_offset)) {
862 queue_remove(bucket, entry,
863 load_struct_ptr_t, links);
864 return entry;
865 }
866 }
867 }
868
869 return (load_struct_t *)0;
870 }
871
872 /* Inserts a new map_list, (list of loaded file extents), into the */
873 /* server loaded file hash table. */
874
875 static void
876 lsf_hash_insert(
877 load_struct_t *entry,
878 shared_region_task_mappings_t sm_info)
879 {
880 shared_file_info_t *shared_file_header;
881
882 shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
883 queue_enter(&shared_file_header->hash
884 [load_file_hash(entry->file_object,
885 shared_file_header->hash_size)],
886 entry, load_struct_ptr_t, links);
887 }
888
889 /* Looks up the file type requested. If already loaded and the */
890 /* file extents are an exact match, returns Success. If not */
891 /* loaded attempts to load the file extents at the given offsets */
892 /* if any extent fails to load or if the file was already loaded */
893 /* in a different configuration, lsf_load fails. */
894
895 static kern_return_t
896 lsf_load(
897 vm_offset_t mapped_file,
898 vm_size_t mapped_file_size,
899 vm_offset_t *base_address,
900 sf_mapping_t *mappings,
901 int map_cnt,
902 void *file_object,
903 int flags,
904 shared_region_task_mappings_t sm_info)
905 {
906
907 load_struct_t *entry;
908 vm_map_copy_t copy_object;
909 loaded_mapping_t *file_mapping;
910 loaded_mapping_t **tptr;
911 int i;
912 ipc_port_t local_map;
913 vm_offset_t original_alt_load_next;
914 vm_offset_t alternate_load_next;
915
916 entry = (load_struct_t *)zalloc(lsf_zone);
917 shared_file_available_hash_ele--;
918 entry->file_object = (int)file_object;
919 entry->mapping_cnt = map_cnt;
920 entry->mappings = NULL;
921 entry->links.prev = (queue_entry_t) 0;
922 entry->links.next = (queue_entry_t) 0;
923 entry->regions_instance = (shared_region_mapping_t)sm_info->self;
924 entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
925 entry->file_offset = mappings[0].file_offset;
926
927 lsf_hash_insert(entry, sm_info);
928 tptr = &(entry->mappings);
929
930
931 alternate_load_next = sm_info->alternate_next;
932 original_alt_load_next = alternate_load_next;
933 if (flags & ALTERNATE_LOAD_SITE) {
934 int max_loadfile_offset;
935
936 *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
937 sm_info->alternate_next;
938 max_loadfile_offset = 0;
939 for(i = 0; i<map_cnt; i++) {
940 if(((mappings[i].mapping_offset
941 & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
942 max_loadfile_offset) {
943 max_loadfile_offset =
944 (mappings[i].mapping_offset
945 & SHARED_TEXT_REGION_MASK)
946 + mappings[i].size;
947 }
948 }
949 if((alternate_load_next + round_page_32(max_loadfile_offset)) >=
950 (sm_info->data_size - (sm_info->data_size>>9))) {
951 entry->base_address =
952 (*base_address) & SHARED_TEXT_REGION_MASK;
953 lsf_unload(file_object, entry->base_address, sm_info);
954
955 return KERN_NO_SPACE;
956 }
957 alternate_load_next += round_page_32(max_loadfile_offset);
958
959 } else {
960 if (((*base_address) & SHARED_TEXT_REGION_MASK) >
961 sm_info->alternate_base) {
962 entry->base_address =
963 (*base_address) & SHARED_TEXT_REGION_MASK;
964 lsf_unload(file_object, entry->base_address, sm_info);
965 return KERN_INVALID_ARGUMENT;
966 }
967 }
968
969 entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
970
971 // Sanity check the mappings -- make sure we don't stray across the
972 // alternate boundary. If any bit of a library that we're not trying
973 // to load in the alternate load space strays across that boundary,
974 // return KERN_INVALID_ARGUMENT immediately so that the caller can
975 // try to load it in the alternate shared area. We do this to avoid
976 // a nasty case: if a library tries to load so that it crosses the
977 // boundary, it'll occupy a bit of the alternate load area without
978 // the kernel being aware. When loads into the alternate load area
979 // at the first free address are tried, the load will fail.
980 // Thus, a single library straddling the boundary causes all sliding
981 // libraries to fail to load. This check will avoid such a case.
982
983 if (!(flags & ALTERNATE_LOAD_SITE)) {
984 for (i = 0; i<map_cnt;i++) {
985 vm_offset_t region_mask;
986 vm_address_t region_start;
987 vm_address_t region_end;
988
989 if ((mappings[i].protection & VM_PROT_WRITE) == 0) {
990 // mapping offsets are relative to start of shared segments.
991 region_mask = SHARED_TEXT_REGION_MASK;
992 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
993 region_end = (mappings[i].size + region_start);
994 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
995 // No library is permitted to load so any bit of it is in the
996 // shared alternate space. If they want it loaded, they can put
997 // it in the alternate space explicitly.
998 printf("Library trying to load across alternate shared region boundary -- denied!\n");
999 lsf_unload(file_object, entry->base_address, sm_info);
1000 return KERN_INVALID_ARGUMENT;
1001 }
1002 } else {
1003 // rw section?
1004 region_mask = SHARED_DATA_REGION_MASK;
1005 region_start = (mappings[i].mapping_offset & region_mask)+entry->base_address;
1006 region_end = (mappings[i].size + region_start);
1007 if (region_end >= SHARED_ALTERNATE_LOAD_BASE) {
1008 printf("Library trying to load across alternate shared region boundary-- denied!\n");
1009 lsf_unload(file_object, entry->base_address, sm_info);
1010 return KERN_INVALID_ARGUMENT;
1011 }
1012 } // write?
1013 } // for
1014 } // if not alternate load site.
1015
1016 /* copyin mapped file data */
1017 for(i = 0; i<map_cnt; i++) {
1018 vm_offset_t target_address;
1019 vm_offset_t region_mask;
1020
1021 if(mappings[i].protection & VM_PROT_COW) {
1022 local_map = (ipc_port_t)sm_info->data_region;
1023 region_mask = SHARED_DATA_REGION_MASK;
1024 if((mappings[i].mapping_offset
1025 & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
1026 lsf_unload(file_object,
1027 entry->base_address, sm_info);
1028 return KERN_INVALID_ARGUMENT;
1029 }
1030 } else {
1031 region_mask = SHARED_TEXT_REGION_MASK;
1032 local_map = (ipc_port_t)sm_info->text_region;
1033 if(mappings[i].mapping_offset
1034 & GLOBAL_SHARED_SEGMENT_MASK) {
1035 lsf_unload(file_object,
1036 entry->base_address, sm_info);
1037 return KERN_INVALID_ARGUMENT;
1038 }
1039 }
1040 if(!(mappings[i].protection & VM_PROT_ZF)
1041 && ((mapped_file + mappings[i].file_offset +
1042 mappings[i].size) >
1043 (mapped_file + mapped_file_size))) {
1044 lsf_unload(file_object, entry->base_address, sm_info);
1045 return KERN_INVALID_ARGUMENT;
1046 }
1047 target_address = ((mappings[i].mapping_offset) & region_mask)
1048 + entry->base_address;
1049 if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
1050 ->backing.map, &target_address,
1051 mappings[i].size, FALSE)) {
1052 lsf_unload(file_object, entry->base_address, sm_info);
1053 return KERN_FAILURE;
1054 }
1055 target_address = ((mappings[i].mapping_offset) & region_mask)
1056 + entry->base_address;
1057 if(!(mappings[i].protection & VM_PROT_ZF)) {
1058 if(vm_map_copyin(current_map(),
1059 mapped_file + mappings[i].file_offset,
1060 round_page_32(mappings[i].size), FALSE, &copy_object)) {
1061 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
1062 ->backing.map, target_address, mappings[i].size);
1063 lsf_unload(file_object, entry->base_address, sm_info);
1064 return KERN_FAILURE;
1065 }
1066 if(vm_map_copy_overwrite(((vm_named_entry_t)
1067 local_map->ip_kobject)->backing.map, target_address,
1068 copy_object, FALSE)) {
1069 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
1070 ->backing.map, target_address, mappings[i].size);
1071 lsf_unload(file_object, entry->base_address, sm_info);
1072 return KERN_FAILURE;
1073 }
1074 }
1075 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
1076 ->backing.map, target_address,
1077 round_page_32(target_address + mappings[i].size),
1078 (mappings[i].protection &
1079 (VM_PROT_READ | VM_PROT_EXECUTE)),
1080 TRUE);
1081 vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
1082 ->backing.map, target_address,
1083 round_page_32(target_address + mappings[i].size),
1084 (mappings[i].protection &
1085 (VM_PROT_READ | VM_PROT_EXECUTE)),
1086 FALSE);
1087 file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
1088 if(file_mapping == 0)
1089 panic("lsf_load: OUT OF MAPPINGS!");
1090 shared_file_available_hash_ele--;
1091 file_mapping->mapping_offset = (mappings[i].mapping_offset)
1092 & region_mask;
1093 file_mapping->size = mappings[i].size;
1094 file_mapping->file_offset = mappings[i].file_offset;
1095 file_mapping->protection = mappings[i].protection;
1096 file_mapping->next = NULL;
1097 *tptr = file_mapping;
1098 tptr = &(file_mapping->next);
1099 }
1100 shared_region_mapping_set_alt_next(sm_info->self, alternate_load_next);
1101 return KERN_SUCCESS;
1102
1103 }
1104
1105
1106 /* finds the file_object extent list in the shared memory hash table */
1107 /* If one is found the associated extents in shared memory are deallocated */
1108 /* and the extent list is freed */
1109
1110 static void
1111 lsf_unload(
1112 void *file_object,
1113 vm_offset_t base_offset,
1114 shared_region_task_mappings_t sm_info)
1115 {
1116 load_struct_t *entry;
1117 ipc_port_t local_map;
1118 loaded_mapping_t *map_ele;
1119 loaded_mapping_t *back_ptr;
1120
1121 entry = lsf_hash_delete(file_object, base_offset, sm_info);
1122 if(entry) {
1123 map_ele = entry->mappings;
1124 while(map_ele != NULL) {
1125 if(map_ele->protection & VM_PROT_COW) {
1126 local_map = (ipc_port_t)sm_info->data_region;
1127 } else {
1128 local_map = (ipc_port_t)sm_info->text_region;
1129 }
1130 vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
1131 ->backing.map, entry->base_address +
1132 map_ele->mapping_offset,
1133 map_ele->size);
1134 back_ptr = map_ele;
1135 map_ele = map_ele->next;
1136 zfree(lsf_zone, (vm_offset_t)back_ptr);
1137 shared_file_available_hash_ele++;
1138 }
1139 zfree(lsf_zone, (vm_offset_t)entry);
1140 shared_file_available_hash_ele++;
1141 }
1142 }
1143
1144 /* integer is from 1 to 100 and represents percent full */
1145 unsigned int
1146 lsf_mapping_pool_gauge()
1147 {
1148 return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
1149 }