2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
27 * Support routines for an in-kernel shared memory allocator
30 #include <ipc/ipc_port.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc.h>
33 #include <mach/kern_return.h>
34 #include <mach/vm_inherit.h>
35 #include <machine/cpu_capabilities.h>
36 #include <vm/vm_kern.h>
37 #include <vm/vm_map.h>
38 #include <vm/vm_page.h>
40 #include <mach/shared_memory_server.h>
41 #include <vm/vm_shared_memory_server.h>
43 /* forward declarations */
46 ipc_port_t
*shared_text_region_handle
,
47 vm_size_t text_region_size
,
48 ipc_port_t
*shared_data_region_handle
,
49 vm_size_t data_region_size
,
50 vm_offset_t
*shared_file_mapping_array
);
52 static load_struct_t
*
54 queue_head_t
*hash_table
,
56 vm_offset_t recognizableOffset
,
59 shared_region_task_mappings_t sm_info
);
61 static load_struct_t
*
64 vm_offset_t base_offset
,
65 shared_region_task_mappings_t sm_info
);
70 shared_region_task_mappings_t sm_info
);
74 vm_offset_t mapped_file
,
75 vm_size_t mapped_file_size
,
76 vm_offset_t
*base_address
,
77 sf_mapping_t
*mappings
,
81 shared_region_task_mappings_t sm_info
);
86 vm_offset_t base_offset
,
87 shared_region_task_mappings_t sm_info
);
90 #define load_file_hash(file_object, size) \
91 ((((natural_t)file_object) & 0xffffff) % size)
94 vm_offset_t shared_file_text_region
;
95 vm_offset_t shared_file_data_region
;
97 ipc_port_t shared_text_region_handle
;
98 ipc_port_t shared_data_region_handle
;
99 vm_offset_t shared_file_mapping_array
= 0;
101 shared_region_mapping_t default_environment_shared_regions
= NULL
;
102 static decl_mutex_data(,default_regions_list_lock_data
)
104 #define default_regions_list_lock() \
105 mutex_lock(&default_regions_list_lock_data)
106 #define default_regions_list_lock_try() \
107 mutex_try(&default_regions_list_lock_data)
108 #define default_regions_list_unlock() \
109 mutex_unlock(&default_regions_list_lock_data)
112 ipc_port_t sfma_handle
= NULL
;
115 int shared_file_available_hash_ele
;
117 /* com region support */
118 ipc_port_t com_region_handle
= NULL
;
119 vm_map_t com_region_map
= NULL
;
120 vm_size_t com_region_size
= _COMM_PAGE_AREA_LENGTH
;
121 shared_region_mapping_t com_mapping_resource
= NULL
;
123 #define GLOBAL_COM_REGION_BASE _COMM_PAGE_BASE_ADDRESS
125 /* called for the non-default, private branch shared region support */
126 /* system default fields for fs_base and system supported are not */
127 /* relevant as the system default flag is not set */
129 shared_file_create_system_region(
130 shared_region_mapping_t
*shared_region
)
132 ipc_port_t text_handle
;
133 ipc_port_t data_handle
;
136 vm_offset_t mapping_array
;
139 text_size
= 0x10000000;
140 data_size
= 0x10000000;
142 kret
= shared_file_init(&text_handle
,
143 text_size
, &data_handle
, data_size
, &mapping_array
);
146 kret
= shared_region_mapping_create(text_handle
,
147 text_size
, data_handle
, data_size
, mapping_array
,
148 GLOBAL_SHARED_TEXT_SEGMENT
, shared_region
,
149 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
152 (*shared_region
)->flags
= 0;
153 if(com_mapping_resource
) {
154 shared_region_mapping_ref(com_mapping_resource
);
155 (*shared_region
)->next
= com_mapping_resource
;
162 * load a new default for a specified environment into the default share
163 * regions list. If a previous default exists for the envrionment specification
164 * it is returned along with its reference. It is expected that the new
165 * sytem region structure passes a reference.
168 shared_region_mapping_t
169 update_default_shared_region(
170 shared_region_mapping_t new_system_region
)
172 shared_region_mapping_t old_system_region
;
173 unsigned int fs_base
;
176 fs_base
= new_system_region
->fs_base
;
177 system
= new_system_region
->system
;
178 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
179 default_regions_list_lock();
180 old_system_region
= default_environment_shared_regions
;
182 if((old_system_region
!= NULL
) &&
183 (old_system_region
->fs_base
== fs_base
) &&
184 (old_system_region
->system
== system
)) {
185 new_system_region
->default_env_list
=
186 old_system_region
->default_env_list
;
187 default_environment_shared_regions
= new_system_region
;
188 default_regions_list_unlock();
189 old_system_region
->flags
|= SHARED_REGION_STALE
;
190 return old_system_region
;
192 if (old_system_region
) {
193 while(old_system_region
->default_env_list
!= NULL
) {
194 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
195 (old_system_region
->default_env_list
->system
== system
)) {
196 new_system_region
->default_env_list
=
197 old_system_region
->default_env_list
199 old_system_region
->default_env_list
=
201 default_regions_list_unlock();
202 old_system_region
->flags
|= SHARED_REGION_STALE
;
203 return old_system_region
;
205 old_system_region
= old_system_region
->default_env_list
;
208 /* If we get here, we are at the end of the system list and we */
209 /* did not find a pre-existing entry */
210 if(old_system_region
) {
211 old_system_region
->default_env_list
= new_system_region
;
213 default_environment_shared_regions
= new_system_region
;
215 default_regions_list_unlock();
220 * lookup a system_shared_region for the environment specified. If one is
221 * found, it is returned along with a reference against the structure
224 shared_region_mapping_t
225 lookup_default_shared_region(
226 unsigned int fs_base
,
229 shared_region_mapping_t system_region
;
230 default_regions_list_lock();
231 system_region
= default_environment_shared_regions
;
233 while(system_region
!= NULL
) {
234 if((system_region
->fs_base
== fs_base
) &&
235 (system_region
->system
== system
)) {
238 system_region
= system_region
->default_env_list
;
241 shared_region_mapping_ref(system_region
);
242 default_regions_list_unlock();
243 return system_region
;
247 * remove a system_region default if it appears in the default regions list.
248 * Drop a reference on removal.
251 __private_extern__
void
252 remove_default_shared_region_lock(
253 shared_region_mapping_t system_region
,
256 shared_region_mapping_t old_system_region
;
257 unsigned int fs_base
;
260 default_regions_list_lock();
261 old_system_region
= default_environment_shared_regions
;
263 if(old_system_region
== NULL
) {
264 default_regions_list_unlock();
268 if (old_system_region
== system_region
) {
269 default_environment_shared_regions
270 = old_system_region
->default_env_list
;
271 old_system_region
->flags
|= SHARED_REGION_STALE
;
272 shared_region_mapping_dealloc_lock(old_system_region
,
274 default_regions_list_unlock();
278 while(old_system_region
->default_env_list
!= NULL
) {
279 if(old_system_region
->default_env_list
== system_region
) {
280 shared_region_mapping_t dead_region
;
281 dead_region
= old_system_region
->default_env_list
;
282 old_system_region
->default_env_list
=
283 old_system_region
->default_env_list
->default_env_list
;
284 dead_region
->flags
|= SHARED_REGION_STALE
;
285 shared_region_mapping_dealloc_lock(dead_region
,
287 default_regions_list_unlock();
290 old_system_region
= old_system_region
->default_env_list
;
292 default_regions_list_unlock();
296 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
297 * the only caller. Remove this stub function and the corresponding symbol
301 remove_default_shared_region(
302 shared_region_mapping_t system_region
)
304 remove_default_shared_region_lock(system_region
, 1);
308 remove_all_shared_regions()
310 shared_region_mapping_t system_region
;
311 shared_region_mapping_t next_system_region
;
313 default_regions_list_lock();
314 system_region
= default_environment_shared_regions
;
316 if(system_region
== NULL
) {
317 default_regions_list_unlock();
321 while(system_region
!= NULL
) {
322 next_system_region
= system_region
->default_env_list
;
323 system_region
->flags
|= SHARED_REGION_STALE
;
324 shared_region_mapping_dealloc(system_region
);
325 system_region
= next_system_region
;
327 default_environment_shared_regions
= NULL
;
328 default_regions_list_unlock();
331 /* shared_com_boot_time_init initializes the common page shared data and */
332 /* text region. This region is semi independent of the split libs */
333 /* and so its policies have to be handled differently by the code that */
334 /* manipulates the mapping of shared region environments. However, */
335 /* the shared region delivery system supports both */
336 shared_com_boot_time_init()
339 vm_named_entry_t named_entry
;
341 if(com_region_handle
) {
342 panic("shared_com_boot_time_init: "
343 "com_region_handle already set\n");
346 /* create com page region */
347 if(kret
= vm_region_object_create(kernel_map
,
349 &com_region_handle
)) {
350 panic("shared_com_boot_time_init: "
351 "unable to create comm page\n");
354 /* now set export the underlying region/map */
355 named_entry
= (vm_named_entry_t
)com_region_handle
->ip_kobject
;
356 com_region_map
= named_entry
->backing
.map
;
357 /* wrap the com region in its own shared file mapping structure */
358 shared_region_mapping_create(com_region_handle
,
359 com_region_size
, NULL
, 0, 0,
360 GLOBAL_COM_REGION_BASE
, &com_mapping_resource
,
365 shared_file_boot_time_init(
366 unsigned int fs_base
,
369 long shared_text_region_size
;
370 long shared_data_region_size
;
371 shared_region_mapping_t new_system_region
;
372 shared_region_mapping_t old_default_env
;
374 shared_text_region_size
= 0x10000000;
375 shared_data_region_size
= 0x10000000;
376 shared_file_init(&shared_text_region_handle
,
377 shared_text_region_size
, &shared_data_region_handle
,
378 shared_data_region_size
, &shared_file_mapping_array
);
380 shared_region_mapping_create(shared_text_region_handle
,
381 shared_text_region_size
, shared_data_region_handle
,
382 shared_data_region_size
, shared_file_mapping_array
,
383 GLOBAL_SHARED_TEXT_SEGMENT
, &new_system_region
,
384 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
386 new_system_region
->fs_base
= fs_base
;
387 new_system_region
->system
= system
;
388 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
390 /* grab an extra reference for the caller */
391 /* remember to grab before call to update */
392 shared_region_mapping_ref(new_system_region
);
393 old_default_env
= update_default_shared_region(new_system_region
);
394 /* hold an extra reference because these are the system */
395 /* shared regions. */
397 shared_region_mapping_dealloc(old_default_env
);
398 if(com_mapping_resource
== NULL
) {
399 shared_com_boot_time_init();
401 shared_region_mapping_ref(com_mapping_resource
);
402 new_system_region
->next
= com_mapping_resource
;
403 vm_set_shared_region(current_task(), new_system_region
);
407 /* called at boot time, allocates two regions, each 256 megs in size */
408 /* these regions are later mapped into task spaces, allowing them to */
409 /* share the contents of the regions. shared_file_init is part of */
410 /* a shared_memory_server which not only allocates the backing maps */
411 /* but also coordinates requests for space. */
416 ipc_port_t
*shared_text_region_handle
,
417 vm_size_t text_region_size
,
418 ipc_port_t
*shared_data_region_handle
,
419 vm_size_t data_region_size
,
420 vm_offset_t
*mapping_array
)
422 vm_offset_t aligned_address
;
423 shared_file_info_t
*sf_head
;
424 vm_offset_t table_mapping_address
;
430 vm_object_t buf_object
;
431 vm_map_entry_t entry
;
436 /* create text and data maps/regions */
437 if(kret
= vm_region_object_create(kernel_map
,
439 shared_text_region_handle
)) {
443 if(kret
= vm_region_object_create(kernel_map
,
445 shared_data_region_handle
)) {
446 ipc_port_release_send(*shared_text_region_handle
);
450 data_table_size
= data_region_size
>> 9;
451 hash_size
= data_region_size
>> 14;
452 table_mapping_address
= data_region_size
- data_table_size
;
454 if(shared_file_mapping_array
== 0) {
455 buf_object
= vm_object_allocate(data_table_size
);
457 if(vm_map_find_space(kernel_map
, &shared_file_mapping_array
,
458 data_table_size
, 0, &entry
) != KERN_SUCCESS
) {
459 panic("shared_file_init: no space");
461 *mapping_array
= shared_file_mapping_array
;
462 vm_map_unlock(kernel_map
);
463 entry
->object
.vm_object
= buf_object
;
466 for (b
= *mapping_array
, alloced
= 0;
467 alloced
< (hash_size
+
468 round_page_32(sizeof(struct sf_mapping
)));
469 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
470 vm_object_lock(buf_object
);
471 p
= vm_page_alloc(buf_object
, alloced
);
472 if (p
== VM_PAGE_NULL
) {
473 panic("shared_file_init: no space");
476 vm_object_unlock(buf_object
);
477 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
478 VM_PROT_READ
| VM_PROT_WRITE
,
479 ((unsigned int)(p
->object
->wimg_bits
))
485 /* initialize loaded file array */
486 sf_head
= (shared_file_info_t
*)*mapping_array
;
487 sf_head
->hash
= (queue_head_t
*)
488 (((int)*mapping_array
) +
489 sizeof(struct shared_file_info
));
490 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
491 mutex_init(&(sf_head
->lock
), (ETAP_VM_MAP
));
492 sf_head
->hash_init
= FALSE
;
495 mach_make_memory_entry(kernel_map
, &data_table_size
,
496 *mapping_array
, VM_PROT_READ
, &sfma_handle
,
499 if (vm_map_wire(kernel_map
, *mapping_array
,
501 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
502 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
503 panic("shared_file_init: No memory for data table");
506 lsf_zone
= zinit(sizeof(struct load_file_ele
),
508 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
509 0, "load_file_server");
511 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
512 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
513 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
514 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
516 /* initialize the global default environment lock */
517 mutex_init(&default_regions_list_lock_data
, ETAP_NO_TRACE
);
520 *mapping_array
= shared_file_mapping_array
;
523 vm_map(((vm_named_entry_t
)
524 (*shared_data_region_handle
)->ip_kobject
)->backing
.map
,
525 &table_mapping_address
,
526 data_table_size
, 0, SHARED_LIB_ALIAS
,
527 sfma_handle
, 0, FALSE
,
528 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
);
532 /* A call made from user space, copyin_shared_file requires the user to */
533 /* provide the address and size of a mapped file, the full path name of */
534 /* that file and a list of offsets to be mapped into shared memory. */
535 /* By requiring that the file be pre-mapped, copyin_shared_file can */
536 /* guarantee that the file is neither deleted nor changed after the user */
537 /* begins the call. */
541 vm_offset_t mapped_file
,
542 vm_size_t mapped_file_size
,
543 vm_offset_t
*base_address
,
545 sf_mapping_t
*mappings
,
546 memory_object_control_t file_control
,
547 shared_region_task_mappings_t sm_info
,
550 vm_object_t file_object
;
551 vm_map_entry_t entry
;
552 shared_file_info_t
*shared_file_header
;
553 load_struct_t
*file_entry
;
554 loaded_mapping_t
*file_mapping
;
559 /* wire hash entry pool only as needed, since we are the only */
560 /* users, we take a few liberties with the population of our */
562 static int allocable_hash_pages
;
563 static vm_offset_t hash_cram_address
;
566 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
568 mutex_lock(&shared_file_header
->lock
);
570 /* If this is the first call to this routine, take the opportunity */
571 /* to initialize the hash table which will be used to look-up */
572 /* mappings based on the file object */
574 if(shared_file_header
->hash_init
== FALSE
) {
575 vm_size_t hash_table_size
;
576 vm_size_t hash_table_offset
;
578 hash_table_size
= (shared_file_header
->hash_size
)
579 * sizeof(struct queue_entry
);
580 hash_table_offset
= hash_table_size
+
581 round_page_32(sizeof(struct sf_mapping
));
582 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
583 queue_init(&shared_file_header
->hash
[i
]);
585 allocable_hash_pages
=
586 ((hash_table_size
<<5) - hash_table_offset
)/PAGE_SIZE
;
588 sm_info
->region_mappings
+ hash_table_offset
;
589 shared_file_available_hash_ele
= 0;
591 shared_file_header
->hash_init
= TRUE
;
594 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
597 cram_size
= allocable_hash_pages
> 3 ?
598 3 : allocable_hash_pages
;
599 allocable_hash_pages
-= cram_size
;
600 cram_size
= cram_size
* PAGE_SIZE
;
601 if (vm_map_wire(kernel_map
, hash_cram_address
,
602 hash_cram_address
+cram_size
,
603 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
604 panic("shared_file_init: No memory for data table");
606 zcram(lsf_zone
, hash_cram_address
, cram_size
);
607 shared_file_available_hash_ele
608 += cram_size
/sizeof(struct load_file_ele
);
609 hash_cram_address
+= cram_size
;
613 /* Find the entry in the map associated with the current mapping */
614 /* of the file object */
615 file_object
= memory_object_control_to_vm_object(file_control
);
616 if(vm_map_lookup_entry(current_map(), mapped_file
, &entry
)) {
617 vm_object_t mapped_object
;
618 if(entry
->is_sub_map
) {
619 mutex_unlock(&shared_file_header
->lock
);
620 return KERN_INVALID_ADDRESS
;
622 mapped_object
= entry
->object
.vm_object
;
623 while(mapped_object
->shadow
!= NULL
) {
624 mapped_object
= mapped_object
->shadow
;
626 /* check to see that the file object passed is indeed the */
627 /* same as the mapped object passed */
628 if(file_object
!= mapped_object
) {
629 if(sm_info
->flags
& SHARED_REGION_SYSTEM
) {
630 mutex_unlock(&shared_file_header
->lock
);
631 return KERN_PROTECTION_FAILURE
;
633 file_object
= mapped_object
;
637 mutex_unlock(&shared_file_header
->lock
);
638 return KERN_INVALID_ADDRESS
;
641 alternate
= (*flags
& ALTERNATE_LOAD_SITE
) ? TRUE
: FALSE
;
643 if (file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
644 (void *) file_object
, mappings
[0].file_offset
, shared_file_header
->hash_size
,
645 alternate
, sm_info
)) {
646 /* File is loaded, check the load manifest for exact match */
647 /* we simplify by requiring that the elements be the same */
648 /* size and in the same order rather than checking for */
649 /* semantic equivalence. */
651 /* If the file is being loaded in the alternate */
652 /* area, one load to alternate is allowed per mapped */
653 /* object the base address is passed back to the */
654 /* caller and the mappings field is filled in. If the */
655 /* caller does not pass the precise mappings_cnt */
656 /* and the Alternate is already loaded, an error */
659 file_mapping
= file_entry
->mappings
;
660 while(file_mapping
!= NULL
) {
662 mutex_unlock(&shared_file_header
->lock
);
663 return KERN_INVALID_ARGUMENT
;
665 if(((mappings
[i
].mapping_offset
)
666 & SHARED_DATA_REGION_MASK
) !=
667 file_mapping
->mapping_offset
||
669 file_mapping
->size
||
670 mappings
[i
].file_offset
!=
671 file_mapping
->file_offset
||
672 mappings
[i
].protection
!=
673 file_mapping
->protection
) {
676 file_mapping
= file_mapping
->next
;
680 mutex_unlock(&shared_file_header
->lock
);
681 return KERN_INVALID_ARGUMENT
;
683 *base_address
= (*base_address
& ~SHARED_TEXT_REGION_MASK
)
684 + file_entry
->base_address
;
685 *flags
= SF_PREV_LOADED
;
686 mutex_unlock(&shared_file_header
->lock
);
689 /* File is not loaded, lets attempt to load it */
690 ret
= lsf_load(mapped_file
, mapped_file_size
, base_address
,
695 if(ret
== KERN_NO_SPACE
) {
696 shared_region_mapping_t regions
;
697 shared_region_mapping_t system_region
;
698 regions
= (shared_region_mapping_t
)sm_info
->self
;
699 regions
->flags
|= SHARED_REGION_FULL
;
700 system_region
= lookup_default_shared_region(
701 regions
->fs_base
, regions
->system
);
702 if(system_region
== regions
) {
703 shared_region_mapping_t new_system_shared_regions
;
704 shared_file_boot_time_init(
705 regions
->fs_base
, regions
->system
);
706 /* current task must stay with its current */
707 /* regions, drop count on system_shared_region */
708 /* and put back our original set */
709 vm_get_shared_region(current_task(),
710 &new_system_shared_regions
);
711 shared_region_mapping_dealloc_lock(
712 new_system_shared_regions
, 0);
713 vm_set_shared_region(current_task(), regions
);
715 if(system_region
!= NULL
) {
716 shared_region_mapping_dealloc_lock(
720 mutex_unlock(&shared_file_header
->lock
);
725 /* A hash lookup function for the list of loaded files in */
726 /* shared_memory_server space. */
728 static load_struct_t
*
730 queue_head_t
*hash_table
,
732 vm_offset_t recognizableOffset
,
735 shared_region_task_mappings_t sm_info
)
737 register queue_t bucket
;
738 load_struct_t
*entry
;
739 shared_region_mapping_t target_region
;
742 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
743 for (entry
= (load_struct_t
*)queue_first(bucket
);
744 !queue_end(bucket
, &entry
->links
);
745 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
747 if ((entry
->file_object
== (int) file_object
) &&
748 (entry
->file_offset
!= recognizableOffset
)) {
750 if ((entry
->file_object
== (int)file_object
) &&
751 (entry
->file_offset
== recognizableOffset
)) {
752 target_region
= (shared_region_mapping_t
)sm_info
->self
;
753 depth
= target_region
->depth
;
754 while(target_region
) {
755 if((!(sm_info
->self
)) ||
756 ((target_region
== entry
->regions_instance
) &&
757 (target_region
->depth
>= entry
->depth
))) {
759 if (entry
->base_address
>=
760 sm_info
->alternate_base
)
763 if (entry
->base_address
<
764 sm_info
->alternate_base
)
768 if(target_region
->object_chain
) {
769 target_region
= (shared_region_mapping_t
)
770 target_region
->object_chain
->object_chain_region
;
771 depth
= target_region
->object_chain
->depth
;
773 target_region
= NULL
;
779 return (load_struct_t
*)0;
782 __private_extern__ load_struct_t
*
783 lsf_remove_regions_mappings_lock(
784 shared_region_mapping_t region
,
785 shared_region_task_mappings_t sm_info
,
789 register queue_t bucket
;
790 shared_file_info_t
*shared_file_header
;
791 load_struct_t
*entry
;
792 load_struct_t
*next_entry
;
793 load_struct_t
*prev_entry
;
795 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
798 mutex_lock(&shared_file_header
->lock
);
799 if(shared_file_header
->hash_init
== FALSE
) {
801 mutex_unlock(&shared_file_header
->lock
);
804 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
805 bucket
= &shared_file_header
->hash
[i
];
806 for (entry
= (load_struct_t
*)queue_first(bucket
);
807 !queue_end(bucket
, &entry
->links
);) {
808 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
809 if(region
== entry
->regions_instance
) {
810 lsf_unload((void *)entry
->file_object
,
811 entry
->base_address
, sm_info
);
817 mutex_unlock(&shared_file_header
->lock
);
821 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
822 * only caller. Remove this stub function and the corresponding symbol
826 lsf_remove_regions_mappings(
827 shared_region_mapping_t region
,
828 shared_region_task_mappings_t sm_info
)
830 return lsf_remove_regions_mappings_lock(region
, sm_info
, 1);
833 /* Removes a map_list, (list of loaded extents) for a file from */
834 /* the loaded file hash table. */
836 static load_struct_t
*
839 vm_offset_t base_offset
,
840 shared_region_task_mappings_t sm_info
)
842 register queue_t bucket
;
843 shared_file_info_t
*shared_file_header
;
844 load_struct_t
*entry
;
845 load_struct_t
*prev_entry
;
847 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
849 bucket
= &shared_file_header
->hash
850 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
852 for (entry
= (load_struct_t
*)queue_first(bucket
);
853 !queue_end(bucket
, &entry
->links
);
854 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
855 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
856 sm_info
->self
== entry
->regions_instance
)) {
857 if ((entry
->file_object
== (int) file_object
) &&
858 (entry
->base_address
== base_offset
)) {
859 queue_remove(bucket
, entry
,
860 load_struct_ptr_t
, links
);
866 return (load_struct_t
*)0;
869 /* Inserts a new map_list, (list of loaded file extents), into the */
870 /* server loaded file hash table. */
874 load_struct_t
*entry
,
875 shared_region_task_mappings_t sm_info
)
877 shared_file_info_t
*shared_file_header
;
879 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
880 queue_enter(&shared_file_header
->hash
881 [load_file_hash(entry
->file_object
,
882 shared_file_header
->hash_size
)],
883 entry
, load_struct_ptr_t
, links
);
886 /* Looks up the file type requested. If already loaded and the */
887 /* file extents are an exact match, returns Success. If not */
888 /* loaded attempts to load the file extents at the given offsets */
889 /* if any extent fails to load or if the file was already loaded */
890 /* in a different configuration, lsf_load fails. */
894 vm_offset_t mapped_file
,
895 vm_size_t mapped_file_size
,
896 vm_offset_t
*base_address
,
897 sf_mapping_t
*mappings
,
901 shared_region_task_mappings_t sm_info
)
904 load_struct_t
*entry
;
905 vm_map_copy_t copy_object
;
906 loaded_mapping_t
*file_mapping
;
907 loaded_mapping_t
**tptr
;
909 ipc_port_t local_map
;
910 vm_offset_t original_alt_load_next
;
911 vm_offset_t alternate_load_next
;
913 entry
= (load_struct_t
*)zalloc(lsf_zone
);
914 shared_file_available_hash_ele
--;
915 entry
->file_object
= (int)file_object
;
916 entry
->mapping_cnt
= map_cnt
;
917 entry
->mappings
= NULL
;
918 entry
->links
.prev
= (queue_entry_t
) 0;
919 entry
->links
.next
= (queue_entry_t
) 0;
920 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
921 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
922 entry
->file_offset
= mappings
[0].file_offset
;
924 lsf_hash_insert(entry
, sm_info
);
925 tptr
= &(entry
->mappings
);
928 alternate_load_next
= sm_info
->alternate_next
;
929 original_alt_load_next
= alternate_load_next
;
930 if (flags
& ALTERNATE_LOAD_SITE
) {
931 int max_loadfile_offset
;
933 *base_address
= ((*base_address
) & ~SHARED_TEXT_REGION_MASK
) +
934 sm_info
->alternate_next
;
935 max_loadfile_offset
= 0;
936 for(i
= 0; i
<map_cnt
; i
++) {
937 if(((mappings
[i
].mapping_offset
938 & SHARED_TEXT_REGION_MASK
)+ mappings
[i
].size
) >
939 max_loadfile_offset
) {
940 max_loadfile_offset
=
941 (mappings
[i
].mapping_offset
942 & SHARED_TEXT_REGION_MASK
)
946 if((alternate_load_next
+ round_page_32(max_loadfile_offset
)) >=
947 (sm_info
->data_size
- (sm_info
->data_size
>>9))) {
948 entry
->base_address
=
949 (*base_address
) & SHARED_TEXT_REGION_MASK
;
950 lsf_unload(file_object
, entry
->base_address
, sm_info
);
952 return KERN_NO_SPACE
;
954 alternate_load_next
+= round_page_32(max_loadfile_offset
);
957 if (((*base_address
) & SHARED_TEXT_REGION_MASK
) >
958 sm_info
->alternate_base
) {
959 entry
->base_address
=
960 (*base_address
) & SHARED_TEXT_REGION_MASK
;
961 lsf_unload(file_object
, entry
->base_address
, sm_info
);
962 return KERN_INVALID_ARGUMENT
;
966 entry
->base_address
= (*base_address
) & SHARED_TEXT_REGION_MASK
;
968 // Sanity check the mappings -- make sure we don't stray across the
969 // alternate boundary. If any bit of a library that we're not trying
970 // to load in the alternate load space strays across that boundary,
971 // return KERN_INVALID_ARGUMENT immediately so that the caller can
972 // try to load it in the alternate shared area. We do this to avoid
973 // a nasty case: if a library tries to load so that it crosses the
974 // boundary, it'll occupy a bit of the alternate load area without
975 // the kernel being aware. When loads into the alternate load area
976 // at the first free address are tried, the load will fail.
977 // Thus, a single library straddling the boundary causes all sliding
978 // libraries to fail to load. This check will avoid such a case.
980 if (!(flags
& ALTERNATE_LOAD_SITE
)) {
981 for (i
= 0; i
<map_cnt
;i
++) {
982 vm_offset_t region_mask
;
983 vm_address_t region_start
;
984 vm_address_t region_end
;
986 if ((mappings
[i
].protection
& VM_PROT_WRITE
) == 0) {
987 // mapping offsets are relative to start of shared segments.
988 region_mask
= SHARED_TEXT_REGION_MASK
;
989 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
990 region_end
= (mappings
[i
].size
+ region_start
);
991 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
992 // No library is permitted to load so any bit of it is in the
993 // shared alternate space. If they want it loaded, they can put
994 // it in the alternate space explicitly.
995 printf("Library trying to load across alternate shared region boundary -- denied!\n");
996 lsf_unload(file_object
, entry
->base_address
, sm_info
);
997 return KERN_INVALID_ARGUMENT
;
1001 region_mask
= SHARED_DATA_REGION_MASK
;
1002 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
1003 region_end
= (mappings
[i
].size
+ region_start
);
1004 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
1005 printf("Library trying to load across alternate shared region boundary-- denied!\n");
1006 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1007 return KERN_INVALID_ARGUMENT
;
1011 } // if not alternate load site.
1013 /* copyin mapped file data */
1014 for(i
= 0; i
<map_cnt
; i
++) {
1015 vm_offset_t target_address
;
1016 vm_offset_t region_mask
;
1018 if(mappings
[i
].protection
& VM_PROT_COW
) {
1019 local_map
= (ipc_port_t
)sm_info
->data_region
;
1020 region_mask
= SHARED_DATA_REGION_MASK
;
1021 if((mappings
[i
].mapping_offset
1022 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) {
1023 lsf_unload(file_object
,
1024 entry
->base_address
, sm_info
);
1025 return KERN_INVALID_ARGUMENT
;
1028 region_mask
= SHARED_TEXT_REGION_MASK
;
1029 local_map
= (ipc_port_t
)sm_info
->text_region
;
1030 if(mappings
[i
].mapping_offset
1031 & GLOBAL_SHARED_SEGMENT_MASK
) {
1032 lsf_unload(file_object
,
1033 entry
->base_address
, sm_info
);
1034 return KERN_INVALID_ARGUMENT
;
1037 if(!(mappings
[i
].protection
& VM_PROT_ZF
)
1038 && ((mapped_file
+ mappings
[i
].file_offset
+
1040 (mapped_file
+ mapped_file_size
))) {
1041 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1042 return KERN_INVALID_ARGUMENT
;
1044 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
1045 + entry
->base_address
;
1046 if(vm_allocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1047 ->backing
.map
, &target_address
,
1048 mappings
[i
].size
, FALSE
)) {
1049 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1050 return KERN_FAILURE
;
1052 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
1053 + entry
->base_address
;
1054 if(!(mappings
[i
].protection
& VM_PROT_ZF
)) {
1055 if(vm_map_copyin(current_map(),
1056 mapped_file
+ mappings
[i
].file_offset
,
1057 round_page_32(mappings
[i
].size
), FALSE
, ©_object
)) {
1058 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1059 ->backing
.map
, target_address
, mappings
[i
].size
);
1060 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1061 return KERN_FAILURE
;
1063 if(vm_map_copy_overwrite(((vm_named_entry_t
)
1064 local_map
->ip_kobject
)->backing
.map
, target_address
,
1065 copy_object
, FALSE
)) {
1066 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1067 ->backing
.map
, target_address
, mappings
[i
].size
);
1068 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1069 return KERN_FAILURE
;
1072 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
1073 ->backing
.map
, target_address
,
1074 round_page_32(target_address
+ mappings
[i
].size
),
1075 (mappings
[i
].protection
&
1076 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
1078 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
1079 ->backing
.map
, target_address
,
1080 round_page_32(target_address
+ mappings
[i
].size
),
1081 (mappings
[i
].protection
&
1082 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
1084 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
1085 if(file_mapping
== 0)
1086 panic("lsf_load: OUT OF MAPPINGS!");
1087 shared_file_available_hash_ele
--;
1088 file_mapping
->mapping_offset
= (mappings
[i
].mapping_offset
)
1090 file_mapping
->size
= mappings
[i
].size
;
1091 file_mapping
->file_offset
= mappings
[i
].file_offset
;
1092 file_mapping
->protection
= mappings
[i
].protection
;
1093 file_mapping
->next
= NULL
;
1094 *tptr
= file_mapping
;
1095 tptr
= &(file_mapping
->next
);
1097 shared_region_mapping_set_alt_next(sm_info
->self
, alternate_load_next
);
1098 return KERN_SUCCESS
;
1103 /* finds the file_object extent list in the shared memory hash table */
1104 /* If one is found the associated extents in shared memory are deallocated */
1105 /* and the extent list is freed */
1110 vm_offset_t base_offset
,
1111 shared_region_task_mappings_t sm_info
)
1113 load_struct_t
*entry
;
1114 ipc_port_t local_map
;
1115 loaded_mapping_t
*map_ele
;
1116 loaded_mapping_t
*back_ptr
;
1118 entry
= lsf_hash_delete(file_object
, base_offset
, sm_info
);
1120 map_ele
= entry
->mappings
;
1121 while(map_ele
!= NULL
) {
1122 if(map_ele
->protection
& VM_PROT_COW
) {
1123 local_map
= (ipc_port_t
)sm_info
->data_region
;
1125 local_map
= (ipc_port_t
)sm_info
->text_region
;
1127 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1128 ->backing
.map
, entry
->base_address
+
1129 map_ele
->mapping_offset
,
1132 map_ele
= map_ele
->next
;
1133 zfree(lsf_zone
, (vm_offset_t
)back_ptr
);
1134 shared_file_available_hash_ele
++;
1136 zfree(lsf_zone
, (vm_offset_t
)entry
);
1137 shared_file_available_hash_ele
++;
1141 /* integer is from 1 to 100 and represents percent full */
1143 lsf_mapping_pool_gauge()
1145 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;