2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
27 * File: vm/vm_shared_memory_server.c
28 * Author: Chris Youngworth
30 * Support routines for an in-kernel shared memory allocator
33 #include <ipc/ipc_port.h>
34 #include <kern/thread.h>
35 #include <kern/zalloc.h>
36 #include <mach/kern_return.h>
37 #include <mach/vm_inherit.h>
38 #include <vm/vm_kern.h>
39 #include <vm/vm_map.h>
40 #include <vm/vm_page.h>
42 #include <mach/shared_memory_server.h>
43 #include <vm/vm_shared_memory_server.h>
45 /* forward declarations */
48 ipc_port_t
*shared_text_region_handle
,
49 vm_size_t text_region_size
,
50 ipc_port_t
*shared_data_region_handle
,
51 vm_size_t data_region_size
,
52 vm_offset_t
*shared_file_mapping_array
);
54 static load_struct_t
*
56 queue_head_t
*hash_table
,
58 vm_offset_t recognizableOffset
,
61 shared_region_task_mappings_t sm_info
);
63 static load_struct_t
*
66 vm_offset_t base_offset
,
67 shared_region_task_mappings_t sm_info
);
72 shared_region_task_mappings_t sm_info
);
76 vm_offset_t mapped_file
,
77 vm_size_t mapped_file_size
,
78 vm_offset_t
*base_address
,
79 sf_mapping_t
*mappings
,
83 shared_region_task_mappings_t sm_info
);
88 vm_offset_t base_offset
,
89 shared_region_task_mappings_t sm_info
);
92 #define load_file_hash(file_object, size) \
93 ((((natural_t)file_object) & 0xffffff) % size)
96 vm_offset_t shared_file_text_region
;
97 vm_offset_t shared_file_data_region
;
99 ipc_port_t shared_text_region_handle
;
100 ipc_port_t shared_data_region_handle
;
101 vm_offset_t shared_file_mapping_array
= 0;
103 shared_region_mapping_t default_environment_shared_regions
= NULL
;
104 static decl_mutex_data(,default_regions_list_lock_data
)
106 #define default_regions_list_lock() \
107 mutex_lock(&default_regions_list_lock_data)
108 #define default_regions_list_lock_try() \
109 mutex_try(&default_regions_list_lock_data)
110 #define default_regions_list_unlock() \
111 mutex_unlock(&default_regions_list_lock_data)
114 ipc_port_t sfma_handle
= NULL
;
117 int shared_file_available_hash_ele
;
119 /* com region support */
120 ipc_port_t com_region_handle
= NULL
;
121 vm_map_t com_region_map
= NULL
;
122 vm_size_t com_region_size
= 0x7000;
123 shared_region_mapping_t com_mapping_resource
= NULL
;
125 #define GLOBAL_COM_REGION_BASE 0xFFFF8000
127 /* called for the non-default, private branch shared region support */
128 /* system default fields for fs_base and system supported are not */
129 /* relevant as the system default flag is not set */
131 shared_file_create_system_region(
132 shared_region_mapping_t
*shared_region
)
134 ipc_port_t text_handle
;
135 ipc_port_t data_handle
;
138 vm_offset_t mapping_array
;
141 text_size
= 0x10000000;
142 data_size
= 0x10000000;
144 kret
= shared_file_init(&text_handle
,
145 text_size
, &data_handle
, data_size
, &mapping_array
);
148 kret
= shared_region_mapping_create(text_handle
,
149 text_size
, data_handle
, data_size
, mapping_array
,
150 GLOBAL_SHARED_TEXT_SEGMENT
, shared_region
,
151 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
154 (*shared_region
)->flags
= 0;
155 if(com_mapping_resource
) {
156 shared_region_mapping_ref(com_mapping_resource
);
157 (*shared_region
)->next
= com_mapping_resource
;
164 * load a new default for a specified environment into the default share
165 * regions list. If a previous default exists for the envrionment specification
166 * it is returned along with its reference. It is expected that the new
167 * sytem region structure passes a reference.
170 shared_region_mapping_t
171 update_default_shared_region(
172 shared_region_mapping_t new_system_region
)
174 shared_region_mapping_t old_system_region
;
175 unsigned int fs_base
;
178 fs_base
= new_system_region
->fs_base
;
179 system
= new_system_region
->system
;
180 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
181 default_regions_list_lock();
182 old_system_region
= default_environment_shared_regions
;
184 if((old_system_region
!= NULL
) &&
185 (old_system_region
->fs_base
== fs_base
) &&
186 (old_system_region
->system
== system
)) {
187 new_system_region
->default_env_list
=
188 old_system_region
->default_env_list
;
189 default_environment_shared_regions
= new_system_region
;
190 default_regions_list_unlock();
191 old_system_region
->flags
|= SHARED_REGION_STALE
;
192 return old_system_region
;
194 if (old_system_region
) {
195 while(old_system_region
->default_env_list
!= NULL
) {
196 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
197 (old_system_region
->default_env_list
->system
== system
)) {
198 new_system_region
->default_env_list
=
199 old_system_region
->default_env_list
201 old_system_region
->default_env_list
=
203 default_regions_list_unlock();
204 old_system_region
->flags
|= SHARED_REGION_STALE
;
205 return old_system_region
;
207 old_system_region
= old_system_region
->default_env_list
;
210 /* If we get here, we are at the end of the system list and we */
211 /* did not find a pre-existing entry */
212 if(old_system_region
) {
213 old_system_region
->default_env_list
= new_system_region
;
215 default_environment_shared_regions
= new_system_region
;
217 default_regions_list_unlock();
222 * lookup a system_shared_region for the environment specified. If one is
223 * found, it is returned along with a reference against the structure
226 shared_region_mapping_t
227 lookup_default_shared_region(
228 unsigned int fs_base
,
231 shared_region_mapping_t system_region
;
232 default_regions_list_lock();
233 system_region
= default_environment_shared_regions
;
235 while(system_region
!= NULL
) {
236 if((system_region
->fs_base
== fs_base
) &&
237 (system_region
->system
== system
)) {
240 system_region
= system_region
->default_env_list
;
243 shared_region_mapping_ref(system_region
);
244 default_regions_list_unlock();
245 return system_region
;
249 * remove a system_region default if it appears in the default regions list.
250 * Drop a reference on removal.
254 remove_default_shared_region(
255 shared_region_mapping_t system_region
)
257 shared_region_mapping_t old_system_region
;
258 unsigned int fs_base
;
261 default_regions_list_lock();
262 old_system_region
= default_environment_shared_regions
;
264 if(old_system_region
== NULL
) {
265 default_regions_list_unlock();
269 if (old_system_region
== system_region
) {
270 default_environment_shared_regions
271 = old_system_region
->default_env_list
;
272 old_system_region
->flags
|= SHARED_REGION_STALE
;
273 shared_region_mapping_dealloc(old_system_region
);
274 default_regions_list_unlock();
278 while(old_system_region
->default_env_list
!= NULL
) {
279 if(old_system_region
->default_env_list
== system_region
) {
280 shared_region_mapping_t dead_region
;
281 dead_region
= old_system_region
->default_env_list
;
282 old_system_region
->default_env_list
=
283 old_system_region
->default_env_list
->default_env_list
;
284 dead_region
->flags
|= SHARED_REGION_STALE
;
285 shared_region_mapping_dealloc(dead_region
);
286 default_regions_list_unlock();
289 old_system_region
= old_system_region
->default_env_list
;
291 default_regions_list_unlock();
295 remove_all_shared_regions()
297 shared_region_mapping_t system_region
;
298 shared_region_mapping_t next_system_region
;
300 default_regions_list_lock();
301 system_region
= default_environment_shared_regions
;
303 if(system_region
== NULL
) {
304 default_regions_list_unlock();
308 while(system_region
!= NULL
) {
309 next_system_region
= system_region
->default_env_list
;
310 system_region
->flags
|= SHARED_REGION_STALE
;
311 shared_region_mapping_dealloc(system_region
);
312 system_region
= next_system_region
;
314 default_environment_shared_regions
= NULL
;
315 default_regions_list_unlock();
318 /* shared_com_boot_time_init initializes the common page shared data and */
319 /* text region. This region is semi independent of the split libs */
320 /* and so its policies have to be handled differently by the code that */
321 /* manipulates the mapping of shared region environments. However, */
322 /* the shared region delivery system supports both */
323 shared_com_boot_time_init()
326 vm_named_entry_t named_entry
;
328 if(com_region_handle
) {
329 panic("shared_com_boot_time_init: "
330 "com_region_handle already set\n");
333 /* create com page region */
334 if(kret
= vm_region_object_create(kernel_map
,
336 &com_region_handle
)) {
337 panic("shared_com_boot_time_init: "
338 "unable to create comm page\n");
341 /* now set export the underlying region/map */
342 named_entry
= (vm_named_entry_t
)com_region_handle
->ip_kobject
;
343 com_region_map
= named_entry
->backing
.map
;
344 /* wrap the com region in its own shared file mapping structure */
345 shared_region_mapping_create(com_region_handle
,
346 com_region_size
, NULL
, 0, 0,
347 GLOBAL_COM_REGION_BASE
, &com_mapping_resource
,
352 shared_file_boot_time_init(
353 unsigned int fs_base
,
356 long shared_text_region_size
;
357 long shared_data_region_size
;
358 shared_region_mapping_t new_system_region
;
359 shared_region_mapping_t old_default_env
;
361 shared_text_region_size
= 0x10000000;
362 shared_data_region_size
= 0x10000000;
363 shared_file_init(&shared_text_region_handle
,
364 shared_text_region_size
, &shared_data_region_handle
,
365 shared_data_region_size
, &shared_file_mapping_array
);
367 shared_region_mapping_create(shared_text_region_handle
,
368 shared_text_region_size
, shared_data_region_handle
,
369 shared_data_region_size
, shared_file_mapping_array
,
370 GLOBAL_SHARED_TEXT_SEGMENT
, &new_system_region
,
371 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
373 new_system_region
->fs_base
= fs_base
;
374 new_system_region
->system
= system
;
375 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
377 /* grab an extra reference for the caller */
378 /* remember to grab before call to update */
379 shared_region_mapping_ref(new_system_region
);
380 old_default_env
= update_default_shared_region(new_system_region
);
381 /* hold an extra reference because these are the system */
382 /* shared regions. */
384 shared_region_mapping_dealloc(old_default_env
);
385 if(com_mapping_resource
== NULL
) {
386 shared_com_boot_time_init();
388 shared_region_mapping_ref(com_mapping_resource
);
389 new_system_region
->next
= com_mapping_resource
;
390 vm_set_shared_region(current_task(), new_system_region
);
394 /* called at boot time, allocates two regions, each 256 megs in size */
395 /* these regions are later mapped into task spaces, allowing them to */
396 /* share the contents of the regions. shared_file_init is part of */
397 /* a shared_memory_server which not only allocates the backing maps */
398 /* but also coordinates requests for space. */
403 ipc_port_t
*shared_text_region_handle
,
404 vm_size_t text_region_size
,
405 ipc_port_t
*shared_data_region_handle
,
406 vm_size_t data_region_size
,
407 vm_offset_t
*mapping_array
)
409 vm_offset_t aligned_address
;
410 shared_file_info_t
*sf_head
;
411 vm_offset_t table_mapping_address
;
417 vm_object_t buf_object
;
418 vm_map_entry_t entry
;
423 /* create text and data maps/regions */
424 if(kret
= vm_region_object_create(kernel_map
,
426 shared_text_region_handle
)) {
430 if(kret
= vm_region_object_create(kernel_map
,
432 shared_data_region_handle
)) {
433 ipc_port_release_send(*shared_text_region_handle
);
437 data_table_size
= data_region_size
>> 9;
438 hash_size
= data_region_size
>> 14;
439 table_mapping_address
= data_region_size
- data_table_size
;
441 if(shared_file_mapping_array
== 0) {
442 buf_object
= vm_object_allocate(data_table_size
);
444 if(vm_map_find_space(kernel_map
, &shared_file_mapping_array
,
445 data_table_size
, 0, &entry
) != KERN_SUCCESS
) {
446 panic("shared_file_init: no space");
448 *mapping_array
= shared_file_mapping_array
;
449 vm_map_unlock(kernel_map
);
450 entry
->object
.vm_object
= buf_object
;
453 for (b
= *mapping_array
, alloced
= 0;
454 alloced
< (hash_size
+
455 round_page_32(sizeof(struct sf_mapping
)));
456 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
457 vm_object_lock(buf_object
);
458 p
= vm_page_alloc(buf_object
, alloced
);
459 if (p
== VM_PAGE_NULL
) {
460 panic("shared_file_init: no space");
463 vm_object_unlock(buf_object
);
464 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
465 VM_PROT_READ
| VM_PROT_WRITE
,
466 ((unsigned int)(p
->object
->wimg_bits
))
472 /* initialize loaded file array */
473 sf_head
= (shared_file_info_t
*)*mapping_array
;
474 sf_head
->hash
= (queue_head_t
*)
475 (((int)*mapping_array
) +
476 sizeof(struct shared_file_info
));
477 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
478 mutex_init(&(sf_head
->lock
), (ETAP_VM_MAP
));
479 sf_head
->hash_init
= FALSE
;
482 mach_make_memory_entry(kernel_map
, &data_table_size
,
483 *mapping_array
, VM_PROT_READ
, &sfma_handle
,
486 if (vm_map_wire(kernel_map
, *mapping_array
,
488 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
489 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
490 panic("shared_file_init: No memory for data table");
493 lsf_zone
= zinit(sizeof(struct load_file_ele
),
495 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
496 0, "load_file_server");
498 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
499 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
500 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
501 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
503 /* initialize the global default environment lock */
504 mutex_init(&default_regions_list_lock_data
, ETAP_NO_TRACE
);
507 *mapping_array
= shared_file_mapping_array
;
510 vm_map(((vm_named_entry_t
)
511 (*shared_data_region_handle
)->ip_kobject
)->backing
.map
,
512 &table_mapping_address
,
513 data_table_size
, 0, SHARED_LIB_ALIAS
,
514 sfma_handle
, 0, FALSE
,
515 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
);
519 /* A call made from user space, copyin_shared_file requires the user to */
520 /* provide the address and size of a mapped file, the full path name of */
521 /* that file and a list of offsets to be mapped into shared memory. */
522 /* By requiring that the file be pre-mapped, copyin_shared_file can */
523 /* guarantee that the file is neither deleted nor changed after the user */
524 /* begins the call. */
528 vm_offset_t mapped_file
,
529 vm_size_t mapped_file_size
,
530 vm_offset_t
*base_address
,
532 sf_mapping_t
*mappings
,
533 memory_object_control_t file_control
,
534 shared_region_task_mappings_t sm_info
,
537 vm_object_t file_object
;
538 vm_map_entry_t entry
;
539 shared_file_info_t
*shared_file_header
;
540 load_struct_t
*file_entry
;
541 loaded_mapping_t
*file_mapping
;
546 /* wire hash entry pool only as needed, since we are the only */
547 /* users, we take a few liberties with the population of our */
549 static int allocable_hash_pages
;
550 static vm_offset_t hash_cram_address
;
553 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
555 mutex_lock(&shared_file_header
->lock
);
557 /* If this is the first call to this routine, take the opportunity */
558 /* to initialize the hash table which will be used to look-up */
559 /* mappings based on the file object */
561 if(shared_file_header
->hash_init
== FALSE
) {
562 vm_size_t hash_table_size
;
563 vm_size_t hash_table_offset
;
565 hash_table_size
= (shared_file_header
->hash_size
)
566 * sizeof(struct queue_entry
);
567 hash_table_offset
= hash_table_size
+
568 round_page_32(sizeof(struct sf_mapping
));
569 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
570 queue_init(&shared_file_header
->hash
[i
]);
572 allocable_hash_pages
=
573 ((hash_table_size
<<5) - hash_table_offset
)/PAGE_SIZE
;
575 sm_info
->region_mappings
+ hash_table_offset
;
576 shared_file_available_hash_ele
= 0;
578 shared_file_header
->hash_init
= TRUE
;
581 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
584 cram_size
= allocable_hash_pages
> 3 ?
585 3 : allocable_hash_pages
;
586 allocable_hash_pages
-= cram_size
;
587 cram_size
= cram_size
* PAGE_SIZE
;
588 if (vm_map_wire(kernel_map
, hash_cram_address
,
589 hash_cram_address
+cram_size
,
590 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
591 panic("shared_file_init: No memory for data table");
593 zcram(lsf_zone
, hash_cram_address
, cram_size
);
594 shared_file_available_hash_ele
595 += cram_size
/sizeof(struct load_file_ele
);
596 hash_cram_address
+= cram_size
;
600 /* Find the entry in the map associated with the current mapping */
601 /* of the file object */
602 file_object
= memory_object_control_to_vm_object(file_control
);
603 if(vm_map_lookup_entry(current_map(), mapped_file
, &entry
)) {
604 vm_object_t mapped_object
;
605 if(entry
->is_sub_map
) {
606 mutex_unlock(&shared_file_header
->lock
);
607 return KERN_INVALID_ADDRESS
;
609 mapped_object
= entry
->object
.vm_object
;
610 while(mapped_object
->shadow
!= NULL
) {
611 mapped_object
= mapped_object
->shadow
;
613 /* check to see that the file object passed is indeed the */
614 /* same as the mapped object passed */
615 if(file_object
!= mapped_object
) {
616 if(sm_info
->flags
& SHARED_REGION_SYSTEM
) {
617 mutex_unlock(&shared_file_header
->lock
);
618 return KERN_PROTECTION_FAILURE
;
620 file_object
= mapped_object
;
624 mutex_unlock(&shared_file_header
->lock
);
625 return KERN_INVALID_ADDRESS
;
628 alternate
= (*flags
& ALTERNATE_LOAD_SITE
) ? TRUE
: FALSE
;
630 if (file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
631 (void *) file_object
, mappings
[0].file_offset
, shared_file_header
->hash_size
,
632 alternate
, sm_info
)) {
633 /* File is loaded, check the load manifest for exact match */
634 /* we simplify by requiring that the elements be the same */
635 /* size and in the same order rather than checking for */
636 /* semantic equivalence. */
638 /* If the file is being loaded in the alternate */
639 /* area, one load to alternate is allowed per mapped */
640 /* object the base address is passed back to the */
641 /* caller and the mappings field is filled in. If the */
642 /* caller does not pass the precise mappings_cnt */
643 /* and the Alternate is already loaded, an error */
646 file_mapping
= file_entry
->mappings
;
647 while(file_mapping
!= NULL
) {
649 mutex_unlock(&shared_file_header
->lock
);
650 return KERN_INVALID_ARGUMENT
;
652 if(((mappings
[i
].mapping_offset
)
653 & SHARED_DATA_REGION_MASK
) !=
654 file_mapping
->mapping_offset
||
656 file_mapping
->size
||
657 mappings
[i
].file_offset
!=
658 file_mapping
->file_offset
||
659 mappings
[i
].protection
!=
660 file_mapping
->protection
) {
663 file_mapping
= file_mapping
->next
;
667 mutex_unlock(&shared_file_header
->lock
);
668 return KERN_INVALID_ARGUMENT
;
670 *base_address
= (*base_address
& ~SHARED_TEXT_REGION_MASK
)
671 + file_entry
->base_address
;
672 *flags
= SF_PREV_LOADED
;
673 mutex_unlock(&shared_file_header
->lock
);
676 /* File is not loaded, lets attempt to load it */
677 ret
= lsf_load(mapped_file
, mapped_file_size
, base_address
,
682 if(ret
== KERN_NO_SPACE
) {
683 shared_region_mapping_t regions
;
684 shared_region_mapping_t system_region
;
685 regions
= (shared_region_mapping_t
)sm_info
->self
;
686 regions
->flags
|= SHARED_REGION_FULL
;
687 system_region
= lookup_default_shared_region(
688 regions
->fs_base
, regions
->system
);
689 if(system_region
== regions
) {
690 shared_region_mapping_t new_system_shared_regions
;
691 shared_file_boot_time_init(
692 regions
->fs_base
, regions
->system
);
693 /* current task must stay with its current */
694 /* regions, drop count on system_shared_region */
695 /* and put back our original set */
696 vm_get_shared_region(current_task(),
697 &new_system_shared_regions
);
698 shared_region_mapping_dealloc(
699 new_system_shared_regions
);
700 vm_set_shared_region(current_task(), regions
);
702 if(system_region
!= NULL
) {
703 shared_region_mapping_dealloc(system_region
);
706 mutex_unlock(&shared_file_header
->lock
);
711 /* A hash lookup function for the list of loaded files in */
712 /* shared_memory_server space. */
714 static load_struct_t
*
716 queue_head_t
*hash_table
,
718 vm_offset_t recognizableOffset
,
721 shared_region_task_mappings_t sm_info
)
723 register queue_t bucket
;
724 load_struct_t
*entry
;
725 shared_region_mapping_t target_region
;
728 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
729 for (entry
= (load_struct_t
*)queue_first(bucket
);
730 !queue_end(bucket
, &entry
->links
);
731 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
733 if ((entry
->file_object
== (int) file_object
) &&
734 (entry
->file_offset
!= recognizableOffset
)) {
736 if ((entry
->file_object
== (int)file_object
) &&
737 (entry
->file_offset
== recognizableOffset
)) {
738 target_region
= (shared_region_mapping_t
)sm_info
->self
;
739 depth
= target_region
->depth
;
740 while(target_region
) {
741 if((!(sm_info
->self
)) ||
742 ((target_region
== entry
->regions_instance
) &&
743 (target_region
->depth
>= entry
->depth
))) {
745 if (entry
->base_address
>=
746 sm_info
->alternate_base
)
749 if (entry
->base_address
<
750 sm_info
->alternate_base
)
754 if(target_region
->object_chain
) {
755 target_region
= (shared_region_mapping_t
)
756 target_region
->object_chain
->object_chain_region
;
757 depth
= target_region
->object_chain
->depth
;
759 target_region
= NULL
;
765 return (load_struct_t
*)0;
769 lsf_remove_regions_mappings(
770 shared_region_mapping_t region
,
771 shared_region_task_mappings_t sm_info
)
774 register queue_t bucket
;
775 shared_file_info_t
*shared_file_header
;
776 load_struct_t
*entry
;
777 load_struct_t
*next_entry
;
778 load_struct_t
*prev_entry
;
780 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
782 mutex_lock(&shared_file_header
->lock
);
783 if(shared_file_header
->hash_init
== FALSE
) {
784 mutex_unlock(&shared_file_header
->lock
);
787 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
788 bucket
= &shared_file_header
->hash
[i
];
789 for (entry
= (load_struct_t
*)queue_first(bucket
);
790 !queue_end(bucket
, &entry
->links
);) {
791 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
792 if(region
== entry
->regions_instance
) {
793 lsf_unload((void *)entry
->file_object
,
794 entry
->base_address
, sm_info
);
799 mutex_unlock(&shared_file_header
->lock
);
802 /* Removes a map_list, (list of loaded extents) for a file from */
803 /* the loaded file hash table. */
805 static load_struct_t
*
808 vm_offset_t base_offset
,
809 shared_region_task_mappings_t sm_info
)
811 register queue_t bucket
;
812 shared_file_info_t
*shared_file_header
;
813 load_struct_t
*entry
;
814 load_struct_t
*prev_entry
;
816 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
818 bucket
= &shared_file_header
->hash
819 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
821 for (entry
= (load_struct_t
*)queue_first(bucket
);
822 !queue_end(bucket
, &entry
->links
);
823 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
824 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
825 sm_info
->self
== entry
->regions_instance
)) {
826 if ((entry
->file_object
== (int) file_object
) &&
827 (entry
->base_address
== base_offset
)) {
828 queue_remove(bucket
, entry
,
829 load_struct_ptr_t
, links
);
835 return (load_struct_t
*)0;
838 /* Inserts a new map_list, (list of loaded file extents), into the */
839 /* server loaded file hash table. */
843 load_struct_t
*entry
,
844 shared_region_task_mappings_t sm_info
)
846 shared_file_info_t
*shared_file_header
;
848 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
849 queue_enter(&shared_file_header
->hash
850 [load_file_hash(entry
->file_object
,
851 shared_file_header
->hash_size
)],
852 entry
, load_struct_ptr_t
, links
);
855 /* Looks up the file type requested. If already loaded and the */
856 /* file extents are an exact match, returns Success. If not */
857 /* loaded attempts to load the file extents at the given offsets */
858 /* if any extent fails to load or if the file was already loaded */
859 /* in a different configuration, lsf_load fails. */
863 vm_offset_t mapped_file
,
864 vm_size_t mapped_file_size
,
865 vm_offset_t
*base_address
,
866 sf_mapping_t
*mappings
,
870 shared_region_task_mappings_t sm_info
)
873 load_struct_t
*entry
;
874 vm_map_copy_t copy_object
;
875 loaded_mapping_t
*file_mapping
;
876 loaded_mapping_t
**tptr
;
878 ipc_port_t local_map
;
879 vm_offset_t original_alt_load_next
;
880 vm_offset_t alternate_load_next
;
882 entry
= (load_struct_t
*)zalloc(lsf_zone
);
883 shared_file_available_hash_ele
--;
884 entry
->file_object
= (int)file_object
;
885 entry
->mapping_cnt
= map_cnt
;
886 entry
->mappings
= NULL
;
887 entry
->links
.prev
= (queue_entry_t
) 0;
888 entry
->links
.next
= (queue_entry_t
) 0;
889 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
890 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
891 entry
->file_offset
= mappings
[0].file_offset
;
893 lsf_hash_insert(entry
, sm_info
);
894 tptr
= &(entry
->mappings
);
897 alternate_load_next
= sm_info
->alternate_next
;
898 original_alt_load_next
= alternate_load_next
;
899 if (flags
& ALTERNATE_LOAD_SITE
) {
900 int max_loadfile_offset
;
902 *base_address
= ((*base_address
) & ~SHARED_TEXT_REGION_MASK
) +
903 sm_info
->alternate_next
;
904 max_loadfile_offset
= 0;
905 for(i
= 0; i
<map_cnt
; i
++) {
906 if(((mappings
[i
].mapping_offset
907 & SHARED_TEXT_REGION_MASK
)+ mappings
[i
].size
) >
908 max_loadfile_offset
) {
909 max_loadfile_offset
=
910 (mappings
[i
].mapping_offset
911 & SHARED_TEXT_REGION_MASK
)
915 if((alternate_load_next
+ round_page_32(max_loadfile_offset
)) >=
916 (sm_info
->data_size
- (sm_info
->data_size
>>9))) {
918 return KERN_NO_SPACE
;
920 alternate_load_next
+= round_page_32(max_loadfile_offset
);
923 if (((*base_address
) & SHARED_TEXT_REGION_MASK
) >
924 sm_info
->alternate_base
) {
925 entry
->base_address
=
926 (*base_address
) & SHARED_TEXT_REGION_MASK
;
927 lsf_unload(file_object
, entry
->base_address
, sm_info
);
928 return KERN_INVALID_ARGUMENT
;
932 entry
->base_address
= (*base_address
) & SHARED_TEXT_REGION_MASK
;
934 // Sanity check the mappings -- make sure we don't stray across the
935 // alternate boundary. If any bit of a library that we're not trying
936 // to load in the alternate load space strays across that boundary,
937 // return KERN_INVALID_ARGUMENT immediately so that the caller can
938 // try to load it in the alternate shared area. We do this to avoid
939 // a nasty case: if a library tries to load so that it crosses the
940 // boundary, it'll occupy a bit of the alternate load area without
941 // the kernel being aware. When loads into the alternate load area
942 // at the first free address are tried, the load will fail.
943 // Thus, a single library straddling the boundary causes all sliding
944 // libraries to fail to load. This check will avoid such a case.
946 if (!(flags
& ALTERNATE_LOAD_SITE
)) {
947 for (i
= 0; i
<map_cnt
;i
++) {
948 vm_offset_t region_mask
;
949 vm_address_t region_start
;
950 vm_address_t region_end
;
952 if ((mappings
[i
].protection
& VM_PROT_WRITE
) == 0) {
953 // mapping offsets are relative to start of shared segments.
954 region_mask
= SHARED_TEXT_REGION_MASK
;
955 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
956 region_end
= (mappings
[i
].size
+ region_start
);
957 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
958 // No library is permitted to load so any bit of it is in the
959 // shared alternate space. If they want it loaded, they can put
960 // it in the alternate space explicitly.
961 printf("Library trying to load across alternate shared region boundary -- denied!\n");
962 return KERN_INVALID_ARGUMENT
;
966 region_mask
= SHARED_DATA_REGION_MASK
;
967 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
968 region_end
= (mappings
[i
].size
+ region_start
);
969 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
970 printf("Library trying to load across alternate shared region boundary-- denied!\n");
971 return KERN_INVALID_ARGUMENT
;
975 } // if not alternate load site.
977 /* copyin mapped file data */
978 for(i
= 0; i
<map_cnt
; i
++) {
979 vm_offset_t target_address
;
980 vm_offset_t region_mask
;
982 if(mappings
[i
].protection
& VM_PROT_COW
) {
983 local_map
= (ipc_port_t
)sm_info
->data_region
;
984 region_mask
= SHARED_DATA_REGION_MASK
;
985 if((mappings
[i
].mapping_offset
986 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) {
987 lsf_unload(file_object
,
988 entry
->base_address
, sm_info
);
989 return KERN_INVALID_ARGUMENT
;
992 region_mask
= SHARED_TEXT_REGION_MASK
;
993 local_map
= (ipc_port_t
)sm_info
->text_region
;
994 if(mappings
[i
].mapping_offset
995 & GLOBAL_SHARED_SEGMENT_MASK
) {
996 lsf_unload(file_object
,
997 entry
->base_address
, sm_info
);
998 return KERN_INVALID_ARGUMENT
;
1001 if(!(mappings
[i
].protection
& VM_PROT_ZF
)
1002 && ((mapped_file
+ mappings
[i
].file_offset
+
1004 (mapped_file
+ mapped_file_size
))) {
1005 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1006 return KERN_INVALID_ARGUMENT
;
1008 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
1009 + entry
->base_address
;
1010 if(vm_allocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1011 ->backing
.map
, &target_address
,
1012 mappings
[i
].size
, FALSE
)) {
1013 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1014 return KERN_FAILURE
;
1016 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
1017 + entry
->base_address
;
1018 if(!(mappings
[i
].protection
& VM_PROT_ZF
)) {
1019 if(vm_map_copyin(current_map(),
1020 mapped_file
+ mappings
[i
].file_offset
,
1021 round_page_32(mappings
[i
].size
), FALSE
, ©_object
)) {
1022 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1023 ->backing
.map
, target_address
, mappings
[i
].size
);
1024 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1025 return KERN_FAILURE
;
1027 if(vm_map_copy_overwrite(((vm_named_entry_t
)
1028 local_map
->ip_kobject
)->backing
.map
, target_address
,
1029 copy_object
, FALSE
)) {
1030 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1031 ->backing
.map
, target_address
, mappings
[i
].size
);
1032 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1033 return KERN_FAILURE
;
1036 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
1037 ->backing
.map
, target_address
,
1038 round_page_32(target_address
+ mappings
[i
].size
),
1039 (mappings
[i
].protection
&
1040 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
1042 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
1043 ->backing
.map
, target_address
,
1044 round_page_32(target_address
+ mappings
[i
].size
),
1045 (mappings
[i
].protection
&
1046 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
1048 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
1049 if(file_mapping
== 0)
1050 panic("lsf_load: OUT OF MAPPINGS!");
1051 shared_file_available_hash_ele
--;
1052 file_mapping
->mapping_offset
= (mappings
[i
].mapping_offset
)
1054 file_mapping
->size
= mappings
[i
].size
;
1055 file_mapping
->file_offset
= mappings
[i
].file_offset
;
1056 file_mapping
->protection
= mappings
[i
].protection
;
1057 file_mapping
->next
= NULL
;
1058 *tptr
= file_mapping
;
1059 tptr
= &(file_mapping
->next
);
1061 shared_region_mapping_set_alt_next(sm_info
->self
, alternate_load_next
);
1062 return KERN_SUCCESS
;
1067 /* finds the file_object extent list in the shared memory hash table */
1068 /* If one is found the associated extents in shared memory are deallocated */
1069 /* and the extent list is freed */
1074 vm_offset_t base_offset
,
1075 shared_region_task_mappings_t sm_info
)
1077 load_struct_t
*entry
;
1078 ipc_port_t local_map
;
1079 loaded_mapping_t
*map_ele
;
1080 loaded_mapping_t
*back_ptr
;
1082 entry
= lsf_hash_delete(file_object
, base_offset
, sm_info
);
1084 map_ele
= entry
->mappings
;
1085 while(map_ele
!= NULL
) {
1086 if(map_ele
->protection
& VM_PROT_COW
) {
1087 local_map
= (ipc_port_t
)sm_info
->data_region
;
1089 local_map
= (ipc_port_t
)sm_info
->text_region
;
1091 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1092 ->backing
.map
, entry
->base_address
+
1093 map_ele
->mapping_offset
,
1096 map_ele
= map_ele
->next
;
1097 zfree(lsf_zone
, (vm_offset_t
)back_ptr
);
1098 shared_file_available_hash_ele
++;
1100 zfree(lsf_zone
, (vm_offset_t
)entry
);
1101 shared_file_available_hash_ele
++;
1105 /* integer is from 1 to 100 and represents percent full */
1107 lsf_mapping_pool_gauge()
1109 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;