2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
27 * File: vm/vm_shared_memory_server.c
28 * Author: Chris Youngworth
30 * Support routines for an in-kernel shared memory allocator
33 #include <ipc/ipc_port.h>
34 #include <kern/thread.h>
35 #include <kern/zalloc.h>
36 #include <mach/kern_return.h>
37 #include <mach/vm_inherit.h>
38 #include <machine/cpu_capabilities.h>
39 #include <vm/vm_kern.h>
40 #include <vm/vm_map.h>
41 #include <vm/vm_page.h>
43 #include <mach/shared_memory_server.h>
44 #include <vm/vm_shared_memory_server.h>
46 /* forward declarations */
49 ipc_port_t
*shared_text_region_handle
,
50 vm_size_t text_region_size
,
51 ipc_port_t
*shared_data_region_handle
,
52 vm_size_t data_region_size
,
53 vm_offset_t
*shared_file_mapping_array
);
55 static load_struct_t
*
57 queue_head_t
*hash_table
,
59 vm_offset_t recognizableOffset
,
62 shared_region_task_mappings_t sm_info
);
64 static load_struct_t
*
67 vm_offset_t base_offset
,
68 shared_region_task_mappings_t sm_info
);
73 shared_region_task_mappings_t sm_info
);
77 vm_offset_t mapped_file
,
78 vm_size_t mapped_file_size
,
79 vm_offset_t
*base_address
,
80 sf_mapping_t
*mappings
,
84 shared_region_task_mappings_t sm_info
);
89 vm_offset_t base_offset
,
90 shared_region_task_mappings_t sm_info
);
93 #define load_file_hash(file_object, size) \
94 ((((natural_t)file_object) & 0xffffff) % size)
97 vm_offset_t shared_file_text_region
;
98 vm_offset_t shared_file_data_region
;
100 ipc_port_t shared_text_region_handle
;
101 ipc_port_t shared_data_region_handle
;
102 vm_offset_t shared_file_mapping_array
= 0;
104 shared_region_mapping_t default_environment_shared_regions
= NULL
;
105 static decl_mutex_data(,default_regions_list_lock_data
)
107 #define default_regions_list_lock() \
108 mutex_lock(&default_regions_list_lock_data)
109 #define default_regions_list_lock_try() \
110 mutex_try(&default_regions_list_lock_data)
111 #define default_regions_list_unlock() \
112 mutex_unlock(&default_regions_list_lock_data)
115 ipc_port_t sfma_handle
= NULL
;
118 int shared_file_available_hash_ele
;
120 /* com region support */
121 ipc_port_t com_region_handle
= NULL
;
122 vm_map_t com_region_map
= NULL
;
123 vm_size_t com_region_size
= _COMM_PAGE_AREA_LENGTH
;
124 shared_region_mapping_t com_mapping_resource
= NULL
;
126 #define GLOBAL_COM_REGION_BASE _COMM_PAGE_BASE_ADDRESS
128 /* called for the non-default, private branch shared region support */
129 /* system default fields for fs_base and system supported are not */
130 /* relevant as the system default flag is not set */
132 shared_file_create_system_region(
133 shared_region_mapping_t
*shared_region
)
135 ipc_port_t text_handle
;
136 ipc_port_t data_handle
;
139 vm_offset_t mapping_array
;
142 text_size
= 0x10000000;
143 data_size
= 0x10000000;
145 kret
= shared_file_init(&text_handle
,
146 text_size
, &data_handle
, data_size
, &mapping_array
);
149 kret
= shared_region_mapping_create(text_handle
,
150 text_size
, data_handle
, data_size
, mapping_array
,
151 GLOBAL_SHARED_TEXT_SEGMENT
, shared_region
,
152 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
155 (*shared_region
)->flags
= 0;
156 if(com_mapping_resource
) {
157 shared_region_mapping_ref(com_mapping_resource
);
158 (*shared_region
)->next
= com_mapping_resource
;
165 * load a new default for a specified environment into the default share
166 * regions list. If a previous default exists for the envrionment specification
167 * it is returned along with its reference. It is expected that the new
168 * sytem region structure passes a reference.
171 shared_region_mapping_t
172 update_default_shared_region(
173 shared_region_mapping_t new_system_region
)
175 shared_region_mapping_t old_system_region
;
176 unsigned int fs_base
;
179 fs_base
= new_system_region
->fs_base
;
180 system
= new_system_region
->system
;
181 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
182 default_regions_list_lock();
183 old_system_region
= default_environment_shared_regions
;
185 if((old_system_region
!= NULL
) &&
186 (old_system_region
->fs_base
== fs_base
) &&
187 (old_system_region
->system
== system
)) {
188 new_system_region
->default_env_list
=
189 old_system_region
->default_env_list
;
190 default_environment_shared_regions
= new_system_region
;
191 default_regions_list_unlock();
192 old_system_region
->flags
|= SHARED_REGION_STALE
;
193 return old_system_region
;
195 if (old_system_region
) {
196 while(old_system_region
->default_env_list
!= NULL
) {
197 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
198 (old_system_region
->default_env_list
->system
== system
)) {
199 new_system_region
->default_env_list
=
200 old_system_region
->default_env_list
202 old_system_region
->default_env_list
=
204 default_regions_list_unlock();
205 old_system_region
->flags
|= SHARED_REGION_STALE
;
206 return old_system_region
;
208 old_system_region
= old_system_region
->default_env_list
;
211 /* If we get here, we are at the end of the system list and we */
212 /* did not find a pre-existing entry */
213 if(old_system_region
) {
214 old_system_region
->default_env_list
= new_system_region
;
216 default_environment_shared_regions
= new_system_region
;
218 default_regions_list_unlock();
223 * lookup a system_shared_region for the environment specified. If one is
224 * found, it is returned along with a reference against the structure
227 shared_region_mapping_t
228 lookup_default_shared_region(
229 unsigned int fs_base
,
232 shared_region_mapping_t system_region
;
233 default_regions_list_lock();
234 system_region
= default_environment_shared_regions
;
236 while(system_region
!= NULL
) {
237 if((system_region
->fs_base
== fs_base
) &&
238 (system_region
->system
== system
)) {
241 system_region
= system_region
->default_env_list
;
244 shared_region_mapping_ref(system_region
);
245 default_regions_list_unlock();
246 return system_region
;
250 * remove a system_region default if it appears in the default regions list.
251 * Drop a reference on removal.
254 __private_extern__
void
255 remove_default_shared_region_lock(
256 shared_region_mapping_t system_region
,
259 shared_region_mapping_t old_system_region
;
260 unsigned int fs_base
;
263 default_regions_list_lock();
264 old_system_region
= default_environment_shared_regions
;
266 if(old_system_region
== NULL
) {
267 default_regions_list_unlock();
271 if (old_system_region
== system_region
) {
272 default_environment_shared_regions
273 = old_system_region
->default_env_list
;
274 old_system_region
->flags
|= SHARED_REGION_STALE
;
275 shared_region_mapping_dealloc_lock(old_system_region
,
277 default_regions_list_unlock();
281 while(old_system_region
->default_env_list
!= NULL
) {
282 if(old_system_region
->default_env_list
== system_region
) {
283 shared_region_mapping_t dead_region
;
284 dead_region
= old_system_region
->default_env_list
;
285 old_system_region
->default_env_list
=
286 old_system_region
->default_env_list
->default_env_list
;
287 dead_region
->flags
|= SHARED_REGION_STALE
;
288 shared_region_mapping_dealloc_lock(dead_region
,
290 default_regions_list_unlock();
293 old_system_region
= old_system_region
->default_env_list
;
295 default_regions_list_unlock();
299 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
300 * the only caller. Remove this stub function and the corresponding symbol
304 remove_default_shared_region(
305 shared_region_mapping_t system_region
)
307 remove_default_shared_region_lock(system_region
, 1);
311 remove_all_shared_regions()
313 shared_region_mapping_t system_region
;
314 shared_region_mapping_t next_system_region
;
316 default_regions_list_lock();
317 system_region
= default_environment_shared_regions
;
319 if(system_region
== NULL
) {
320 default_regions_list_unlock();
324 while(system_region
!= NULL
) {
325 next_system_region
= system_region
->default_env_list
;
326 system_region
->flags
|= SHARED_REGION_STALE
;
327 shared_region_mapping_dealloc(system_region
);
328 system_region
= next_system_region
;
330 default_environment_shared_regions
= NULL
;
331 default_regions_list_unlock();
334 /* shared_com_boot_time_init initializes the common page shared data and */
335 /* text region. This region is semi independent of the split libs */
336 /* and so its policies have to be handled differently by the code that */
337 /* manipulates the mapping of shared region environments. However, */
338 /* the shared region delivery system supports both */
339 shared_com_boot_time_init()
342 vm_named_entry_t named_entry
;
344 if(com_region_handle
) {
345 panic("shared_com_boot_time_init: "
346 "com_region_handle already set\n");
349 /* create com page region */
350 if(kret
= vm_region_object_create(kernel_map
,
352 &com_region_handle
)) {
353 panic("shared_com_boot_time_init: "
354 "unable to create comm page\n");
357 /* now set export the underlying region/map */
358 named_entry
= (vm_named_entry_t
)com_region_handle
->ip_kobject
;
359 com_region_map
= named_entry
->backing
.map
;
360 /* wrap the com region in its own shared file mapping structure */
361 shared_region_mapping_create(com_region_handle
,
362 com_region_size
, NULL
, 0, 0,
363 GLOBAL_COM_REGION_BASE
, &com_mapping_resource
,
368 shared_file_boot_time_init(
369 unsigned int fs_base
,
372 long shared_text_region_size
;
373 long shared_data_region_size
;
374 shared_region_mapping_t new_system_region
;
375 shared_region_mapping_t old_default_env
;
377 shared_text_region_size
= 0x10000000;
378 shared_data_region_size
= 0x10000000;
379 shared_file_init(&shared_text_region_handle
,
380 shared_text_region_size
, &shared_data_region_handle
,
381 shared_data_region_size
, &shared_file_mapping_array
);
383 shared_region_mapping_create(shared_text_region_handle
,
384 shared_text_region_size
, shared_data_region_handle
,
385 shared_data_region_size
, shared_file_mapping_array
,
386 GLOBAL_SHARED_TEXT_SEGMENT
, &new_system_region
,
387 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
389 new_system_region
->fs_base
= fs_base
;
390 new_system_region
->system
= system
;
391 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
393 /* grab an extra reference for the caller */
394 /* remember to grab before call to update */
395 shared_region_mapping_ref(new_system_region
);
396 old_default_env
= update_default_shared_region(new_system_region
);
397 /* hold an extra reference because these are the system */
398 /* shared regions. */
400 shared_region_mapping_dealloc(old_default_env
);
401 if(com_mapping_resource
== NULL
) {
402 shared_com_boot_time_init();
404 shared_region_mapping_ref(com_mapping_resource
);
405 new_system_region
->next
= com_mapping_resource
;
406 vm_set_shared_region(current_task(), new_system_region
);
410 /* called at boot time, allocates two regions, each 256 megs in size */
411 /* these regions are later mapped into task spaces, allowing them to */
412 /* share the contents of the regions. shared_file_init is part of */
413 /* a shared_memory_server which not only allocates the backing maps */
414 /* but also coordinates requests for space. */
419 ipc_port_t
*shared_text_region_handle
,
420 vm_size_t text_region_size
,
421 ipc_port_t
*shared_data_region_handle
,
422 vm_size_t data_region_size
,
423 vm_offset_t
*mapping_array
)
425 vm_offset_t aligned_address
;
426 shared_file_info_t
*sf_head
;
427 vm_offset_t table_mapping_address
;
433 vm_object_t buf_object
;
434 vm_map_entry_t entry
;
439 /* create text and data maps/regions */
440 if(kret
= vm_region_object_create(kernel_map
,
442 shared_text_region_handle
)) {
446 if(kret
= vm_region_object_create(kernel_map
,
448 shared_data_region_handle
)) {
449 ipc_port_release_send(*shared_text_region_handle
);
453 data_table_size
= data_region_size
>> 9;
454 hash_size
= data_region_size
>> 14;
455 table_mapping_address
= data_region_size
- data_table_size
;
457 if(shared_file_mapping_array
== 0) {
458 buf_object
= vm_object_allocate(data_table_size
);
460 if(vm_map_find_space(kernel_map
, &shared_file_mapping_array
,
461 data_table_size
, 0, &entry
) != KERN_SUCCESS
) {
462 panic("shared_file_init: no space");
464 *mapping_array
= shared_file_mapping_array
;
465 vm_map_unlock(kernel_map
);
466 entry
->object
.vm_object
= buf_object
;
469 for (b
= *mapping_array
, alloced
= 0;
470 alloced
< (hash_size
+
471 round_page_32(sizeof(struct sf_mapping
)));
472 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
473 vm_object_lock(buf_object
);
474 p
= vm_page_alloc(buf_object
, alloced
);
475 if (p
== VM_PAGE_NULL
) {
476 panic("shared_file_init: no space");
479 vm_object_unlock(buf_object
);
480 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
481 VM_PROT_READ
| VM_PROT_WRITE
,
482 ((unsigned int)(p
->object
->wimg_bits
))
488 /* initialize loaded file array */
489 sf_head
= (shared_file_info_t
*)*mapping_array
;
490 sf_head
->hash
= (queue_head_t
*)
491 (((int)*mapping_array
) +
492 sizeof(struct shared_file_info
));
493 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
494 mutex_init(&(sf_head
->lock
), (ETAP_VM_MAP
));
495 sf_head
->hash_init
= FALSE
;
498 mach_make_memory_entry(kernel_map
, &data_table_size
,
499 *mapping_array
, VM_PROT_READ
, &sfma_handle
,
502 if (vm_map_wire(kernel_map
, *mapping_array
,
504 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
505 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
506 panic("shared_file_init: No memory for data table");
509 lsf_zone
= zinit(sizeof(struct load_file_ele
),
511 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
512 0, "load_file_server");
514 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
515 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
516 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
517 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
519 /* initialize the global default environment lock */
520 mutex_init(&default_regions_list_lock_data
, ETAP_NO_TRACE
);
523 *mapping_array
= shared_file_mapping_array
;
526 vm_map(((vm_named_entry_t
)
527 (*shared_data_region_handle
)->ip_kobject
)->backing
.map
,
528 &table_mapping_address
,
529 data_table_size
, 0, SHARED_LIB_ALIAS
,
530 sfma_handle
, 0, FALSE
,
531 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
);
535 /* A call made from user space, copyin_shared_file requires the user to */
536 /* provide the address and size of a mapped file, the full path name of */
537 /* that file and a list of offsets to be mapped into shared memory. */
538 /* By requiring that the file be pre-mapped, copyin_shared_file can */
539 /* guarantee that the file is neither deleted nor changed after the user */
540 /* begins the call. */
544 vm_offset_t mapped_file
,
545 vm_size_t mapped_file_size
,
546 vm_offset_t
*base_address
,
548 sf_mapping_t
*mappings
,
549 memory_object_control_t file_control
,
550 shared_region_task_mappings_t sm_info
,
553 vm_object_t file_object
;
554 vm_map_entry_t entry
;
555 shared_file_info_t
*shared_file_header
;
556 load_struct_t
*file_entry
;
557 loaded_mapping_t
*file_mapping
;
562 /* wire hash entry pool only as needed, since we are the only */
563 /* users, we take a few liberties with the population of our */
565 static int allocable_hash_pages
;
566 static vm_offset_t hash_cram_address
;
569 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
571 mutex_lock(&shared_file_header
->lock
);
573 /* If this is the first call to this routine, take the opportunity */
574 /* to initialize the hash table which will be used to look-up */
575 /* mappings based on the file object */
577 if(shared_file_header
->hash_init
== FALSE
) {
578 vm_size_t hash_table_size
;
579 vm_size_t hash_table_offset
;
581 hash_table_size
= (shared_file_header
->hash_size
)
582 * sizeof(struct queue_entry
);
583 hash_table_offset
= hash_table_size
+
584 round_page_32(sizeof(struct sf_mapping
));
585 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
586 queue_init(&shared_file_header
->hash
[i
]);
588 allocable_hash_pages
=
589 ((hash_table_size
<<5) - hash_table_offset
)/PAGE_SIZE
;
591 sm_info
->region_mappings
+ hash_table_offset
;
592 shared_file_available_hash_ele
= 0;
594 shared_file_header
->hash_init
= TRUE
;
597 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
600 cram_size
= allocable_hash_pages
> 3 ?
601 3 : allocable_hash_pages
;
602 allocable_hash_pages
-= cram_size
;
603 cram_size
= cram_size
* PAGE_SIZE
;
604 if (vm_map_wire(kernel_map
, hash_cram_address
,
605 hash_cram_address
+cram_size
,
606 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
607 panic("shared_file_init: No memory for data table");
609 zcram(lsf_zone
, hash_cram_address
, cram_size
);
610 shared_file_available_hash_ele
611 += cram_size
/sizeof(struct load_file_ele
);
612 hash_cram_address
+= cram_size
;
616 /* Find the entry in the map associated with the current mapping */
617 /* of the file object */
618 file_object
= memory_object_control_to_vm_object(file_control
);
619 if(vm_map_lookup_entry(current_map(), mapped_file
, &entry
)) {
620 vm_object_t mapped_object
;
621 if(entry
->is_sub_map
) {
622 mutex_unlock(&shared_file_header
->lock
);
623 return KERN_INVALID_ADDRESS
;
625 mapped_object
= entry
->object
.vm_object
;
626 while(mapped_object
->shadow
!= NULL
) {
627 mapped_object
= mapped_object
->shadow
;
629 /* check to see that the file object passed is indeed the */
630 /* same as the mapped object passed */
631 if(file_object
!= mapped_object
) {
632 if(sm_info
->flags
& SHARED_REGION_SYSTEM
) {
633 mutex_unlock(&shared_file_header
->lock
);
634 return KERN_PROTECTION_FAILURE
;
636 file_object
= mapped_object
;
640 mutex_unlock(&shared_file_header
->lock
);
641 return KERN_INVALID_ADDRESS
;
644 alternate
= (*flags
& ALTERNATE_LOAD_SITE
) ? TRUE
: FALSE
;
646 if (file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
647 (void *) file_object
, mappings
[0].file_offset
, shared_file_header
->hash_size
,
648 alternate
, sm_info
)) {
649 /* File is loaded, check the load manifest for exact match */
650 /* we simplify by requiring that the elements be the same */
651 /* size and in the same order rather than checking for */
652 /* semantic equivalence. */
654 /* If the file is being loaded in the alternate */
655 /* area, one load to alternate is allowed per mapped */
656 /* object the base address is passed back to the */
657 /* caller and the mappings field is filled in. If the */
658 /* caller does not pass the precise mappings_cnt */
659 /* and the Alternate is already loaded, an error */
662 file_mapping
= file_entry
->mappings
;
663 while(file_mapping
!= NULL
) {
665 mutex_unlock(&shared_file_header
->lock
);
666 return KERN_INVALID_ARGUMENT
;
668 if(((mappings
[i
].mapping_offset
)
669 & SHARED_DATA_REGION_MASK
) !=
670 file_mapping
->mapping_offset
||
672 file_mapping
->size
||
673 mappings
[i
].file_offset
!=
674 file_mapping
->file_offset
||
675 mappings
[i
].protection
!=
676 file_mapping
->protection
) {
679 file_mapping
= file_mapping
->next
;
683 mutex_unlock(&shared_file_header
->lock
);
684 return KERN_INVALID_ARGUMENT
;
686 *base_address
= (*base_address
& ~SHARED_TEXT_REGION_MASK
)
687 + file_entry
->base_address
;
688 *flags
= SF_PREV_LOADED
;
689 mutex_unlock(&shared_file_header
->lock
);
692 /* File is not loaded, lets attempt to load it */
693 ret
= lsf_load(mapped_file
, mapped_file_size
, base_address
,
698 if(ret
== KERN_NO_SPACE
) {
699 shared_region_mapping_t regions
;
700 shared_region_mapping_t system_region
;
701 regions
= (shared_region_mapping_t
)sm_info
->self
;
702 regions
->flags
|= SHARED_REGION_FULL
;
703 system_region
= lookup_default_shared_region(
704 regions
->fs_base
, regions
->system
);
705 if(system_region
== regions
) {
706 shared_region_mapping_t new_system_shared_regions
;
707 shared_file_boot_time_init(
708 regions
->fs_base
, regions
->system
);
709 /* current task must stay with its current */
710 /* regions, drop count on system_shared_region */
711 /* and put back our original set */
712 vm_get_shared_region(current_task(),
713 &new_system_shared_regions
);
714 shared_region_mapping_dealloc_lock(
715 new_system_shared_regions
, 0);
716 vm_set_shared_region(current_task(), regions
);
718 if(system_region
!= NULL
) {
719 shared_region_mapping_dealloc_lock(
723 mutex_unlock(&shared_file_header
->lock
);
728 /* A hash lookup function for the list of loaded files in */
729 /* shared_memory_server space. */
731 static load_struct_t
*
733 queue_head_t
*hash_table
,
735 vm_offset_t recognizableOffset
,
738 shared_region_task_mappings_t sm_info
)
740 register queue_t bucket
;
741 load_struct_t
*entry
;
742 shared_region_mapping_t target_region
;
745 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
746 for (entry
= (load_struct_t
*)queue_first(bucket
);
747 !queue_end(bucket
, &entry
->links
);
748 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
750 if ((entry
->file_object
== (int) file_object
) &&
751 (entry
->file_offset
!= recognizableOffset
)) {
753 if ((entry
->file_object
== (int)file_object
) &&
754 (entry
->file_offset
== recognizableOffset
)) {
755 target_region
= (shared_region_mapping_t
)sm_info
->self
;
756 depth
= target_region
->depth
;
757 while(target_region
) {
758 if((!(sm_info
->self
)) ||
759 ((target_region
== entry
->regions_instance
) &&
760 (target_region
->depth
>= entry
->depth
))) {
762 if (entry
->base_address
>=
763 sm_info
->alternate_base
)
766 if (entry
->base_address
<
767 sm_info
->alternate_base
)
771 if(target_region
->object_chain
) {
772 target_region
= (shared_region_mapping_t
)
773 target_region
->object_chain
->object_chain_region
;
774 depth
= target_region
->object_chain
->depth
;
776 target_region
= NULL
;
782 return (load_struct_t
*)0;
785 __private_extern__ load_struct_t
*
786 lsf_remove_regions_mappings_lock(
787 shared_region_mapping_t region
,
788 shared_region_task_mappings_t sm_info
,
792 register queue_t bucket
;
793 shared_file_info_t
*shared_file_header
;
794 load_struct_t
*entry
;
795 load_struct_t
*next_entry
;
796 load_struct_t
*prev_entry
;
798 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
801 mutex_lock(&shared_file_header
->lock
);
802 if(shared_file_header
->hash_init
== FALSE
) {
804 mutex_unlock(&shared_file_header
->lock
);
807 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
808 bucket
= &shared_file_header
->hash
[i
];
809 for (entry
= (load_struct_t
*)queue_first(bucket
);
810 !queue_end(bucket
, &entry
->links
);) {
811 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
812 if(region
== entry
->regions_instance
) {
813 lsf_unload((void *)entry
->file_object
,
814 entry
->base_address
, sm_info
);
820 mutex_unlock(&shared_file_header
->lock
);
824 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
825 * only caller. Remove this stub function and the corresponding symbol
829 lsf_remove_regions_mappings(
830 shared_region_mapping_t region
,
831 shared_region_task_mappings_t sm_info
)
833 return lsf_remove_regions_mappings_lock(region
, sm_info
, 1);
836 /* Removes a map_list, (list of loaded extents) for a file from */
837 /* the loaded file hash table. */
839 static load_struct_t
*
842 vm_offset_t base_offset
,
843 shared_region_task_mappings_t sm_info
)
845 register queue_t bucket
;
846 shared_file_info_t
*shared_file_header
;
847 load_struct_t
*entry
;
848 load_struct_t
*prev_entry
;
850 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
852 bucket
= &shared_file_header
->hash
853 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
855 for (entry
= (load_struct_t
*)queue_first(bucket
);
856 !queue_end(bucket
, &entry
->links
);
857 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
858 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
859 sm_info
->self
== entry
->regions_instance
)) {
860 if ((entry
->file_object
== (int) file_object
) &&
861 (entry
->base_address
== base_offset
)) {
862 queue_remove(bucket
, entry
,
863 load_struct_ptr_t
, links
);
869 return (load_struct_t
*)0;
872 /* Inserts a new map_list, (list of loaded file extents), into the */
873 /* server loaded file hash table. */
877 load_struct_t
*entry
,
878 shared_region_task_mappings_t sm_info
)
880 shared_file_info_t
*shared_file_header
;
882 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
883 queue_enter(&shared_file_header
->hash
884 [load_file_hash(entry
->file_object
,
885 shared_file_header
->hash_size
)],
886 entry
, load_struct_ptr_t
, links
);
889 /* Looks up the file type requested. If already loaded and the */
890 /* file extents are an exact match, returns Success. If not */
891 /* loaded attempts to load the file extents at the given offsets */
892 /* if any extent fails to load or if the file was already loaded */
893 /* in a different configuration, lsf_load fails. */
897 vm_offset_t mapped_file
,
898 vm_size_t mapped_file_size
,
899 vm_offset_t
*base_address
,
900 sf_mapping_t
*mappings
,
904 shared_region_task_mappings_t sm_info
)
907 load_struct_t
*entry
;
908 vm_map_copy_t copy_object
;
909 loaded_mapping_t
*file_mapping
;
910 loaded_mapping_t
**tptr
;
912 ipc_port_t local_map
;
913 vm_offset_t original_alt_load_next
;
914 vm_offset_t alternate_load_next
;
916 entry
= (load_struct_t
*)zalloc(lsf_zone
);
917 shared_file_available_hash_ele
--;
918 entry
->file_object
= (int)file_object
;
919 entry
->mapping_cnt
= map_cnt
;
920 entry
->mappings
= NULL
;
921 entry
->links
.prev
= (queue_entry_t
) 0;
922 entry
->links
.next
= (queue_entry_t
) 0;
923 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
924 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
925 entry
->file_offset
= mappings
[0].file_offset
;
927 lsf_hash_insert(entry
, sm_info
);
928 tptr
= &(entry
->mappings
);
931 alternate_load_next
= sm_info
->alternate_next
;
932 original_alt_load_next
= alternate_load_next
;
933 if (flags
& ALTERNATE_LOAD_SITE
) {
934 int max_loadfile_offset
;
936 *base_address
= ((*base_address
) & ~SHARED_TEXT_REGION_MASK
) +
937 sm_info
->alternate_next
;
938 max_loadfile_offset
= 0;
939 for(i
= 0; i
<map_cnt
; i
++) {
940 if(((mappings
[i
].mapping_offset
941 & SHARED_TEXT_REGION_MASK
)+ mappings
[i
].size
) >
942 max_loadfile_offset
) {
943 max_loadfile_offset
=
944 (mappings
[i
].mapping_offset
945 & SHARED_TEXT_REGION_MASK
)
949 if((alternate_load_next
+ round_page_32(max_loadfile_offset
)) >=
950 (sm_info
->data_size
- (sm_info
->data_size
>>9))) {
951 entry
->base_address
=
952 (*base_address
) & SHARED_TEXT_REGION_MASK
;
953 lsf_unload(file_object
, entry
->base_address
, sm_info
);
955 return KERN_NO_SPACE
;
957 alternate_load_next
+= round_page_32(max_loadfile_offset
);
960 if (((*base_address
) & SHARED_TEXT_REGION_MASK
) >
961 sm_info
->alternate_base
) {
962 entry
->base_address
=
963 (*base_address
) & SHARED_TEXT_REGION_MASK
;
964 lsf_unload(file_object
, entry
->base_address
, sm_info
);
965 return KERN_INVALID_ARGUMENT
;
969 entry
->base_address
= (*base_address
) & SHARED_TEXT_REGION_MASK
;
971 // Sanity check the mappings -- make sure we don't stray across the
972 // alternate boundary. If any bit of a library that we're not trying
973 // to load in the alternate load space strays across that boundary,
974 // return KERN_INVALID_ARGUMENT immediately so that the caller can
975 // try to load it in the alternate shared area. We do this to avoid
976 // a nasty case: if a library tries to load so that it crosses the
977 // boundary, it'll occupy a bit of the alternate load area without
978 // the kernel being aware. When loads into the alternate load area
979 // at the first free address are tried, the load will fail.
980 // Thus, a single library straddling the boundary causes all sliding
981 // libraries to fail to load. This check will avoid such a case.
983 if (!(flags
& ALTERNATE_LOAD_SITE
)) {
984 for (i
= 0; i
<map_cnt
;i
++) {
985 vm_offset_t region_mask
;
986 vm_address_t region_start
;
987 vm_address_t region_end
;
989 if ((mappings
[i
].protection
& VM_PROT_WRITE
) == 0) {
990 // mapping offsets are relative to start of shared segments.
991 region_mask
= SHARED_TEXT_REGION_MASK
;
992 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
993 region_end
= (mappings
[i
].size
+ region_start
);
994 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
995 // No library is permitted to load so any bit of it is in the
996 // shared alternate space. If they want it loaded, they can put
997 // it in the alternate space explicitly.
998 printf("Library trying to load across alternate shared region boundary -- denied!\n");
999 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1000 return KERN_INVALID_ARGUMENT
;
1004 region_mask
= SHARED_DATA_REGION_MASK
;
1005 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
1006 region_end
= (mappings
[i
].size
+ region_start
);
1007 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
1008 printf("Library trying to load across alternate shared region boundary-- denied!\n");
1009 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1010 return KERN_INVALID_ARGUMENT
;
1014 } // if not alternate load site.
1016 /* copyin mapped file data */
1017 for(i
= 0; i
<map_cnt
; i
++) {
1018 vm_offset_t target_address
;
1019 vm_offset_t region_mask
;
1021 if(mappings
[i
].protection
& VM_PROT_COW
) {
1022 local_map
= (ipc_port_t
)sm_info
->data_region
;
1023 region_mask
= SHARED_DATA_REGION_MASK
;
1024 if((mappings
[i
].mapping_offset
1025 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) {
1026 lsf_unload(file_object
,
1027 entry
->base_address
, sm_info
);
1028 return KERN_INVALID_ARGUMENT
;
1031 region_mask
= SHARED_TEXT_REGION_MASK
;
1032 local_map
= (ipc_port_t
)sm_info
->text_region
;
1033 if(mappings
[i
].mapping_offset
1034 & GLOBAL_SHARED_SEGMENT_MASK
) {
1035 lsf_unload(file_object
,
1036 entry
->base_address
, sm_info
);
1037 return KERN_INVALID_ARGUMENT
;
1040 if(!(mappings
[i
].protection
& VM_PROT_ZF
)
1041 && ((mapped_file
+ mappings
[i
].file_offset
+
1043 (mapped_file
+ mapped_file_size
))) {
1044 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1045 return KERN_INVALID_ARGUMENT
;
1047 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
1048 + entry
->base_address
;
1049 if(vm_allocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1050 ->backing
.map
, &target_address
,
1051 mappings
[i
].size
, FALSE
)) {
1052 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1053 return KERN_FAILURE
;
1055 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
1056 + entry
->base_address
;
1057 if(!(mappings
[i
].protection
& VM_PROT_ZF
)) {
1058 if(vm_map_copyin(current_map(),
1059 mapped_file
+ mappings
[i
].file_offset
,
1060 round_page_32(mappings
[i
].size
), FALSE
, ©_object
)) {
1061 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1062 ->backing
.map
, target_address
, mappings
[i
].size
);
1063 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1064 return KERN_FAILURE
;
1066 if(vm_map_copy_overwrite(((vm_named_entry_t
)
1067 local_map
->ip_kobject
)->backing
.map
, target_address
,
1068 copy_object
, FALSE
)) {
1069 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1070 ->backing
.map
, target_address
, mappings
[i
].size
);
1071 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1072 return KERN_FAILURE
;
1075 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
1076 ->backing
.map
, target_address
,
1077 round_page_32(target_address
+ mappings
[i
].size
),
1078 (mappings
[i
].protection
&
1079 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
1081 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
1082 ->backing
.map
, target_address
,
1083 round_page_32(target_address
+ mappings
[i
].size
),
1084 (mappings
[i
].protection
&
1085 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
1087 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
1088 if(file_mapping
== 0)
1089 panic("lsf_load: OUT OF MAPPINGS!");
1090 shared_file_available_hash_ele
--;
1091 file_mapping
->mapping_offset
= (mappings
[i
].mapping_offset
)
1093 file_mapping
->size
= mappings
[i
].size
;
1094 file_mapping
->file_offset
= mappings
[i
].file_offset
;
1095 file_mapping
->protection
= mappings
[i
].protection
;
1096 file_mapping
->next
= NULL
;
1097 *tptr
= file_mapping
;
1098 tptr
= &(file_mapping
->next
);
1100 shared_region_mapping_set_alt_next(sm_info
->self
, alternate_load_next
);
1101 return KERN_SUCCESS
;
1106 /* finds the file_object extent list in the shared memory hash table */
1107 /* If one is found the associated extents in shared memory are deallocated */
1108 /* and the extent list is freed */
1113 vm_offset_t base_offset
,
1114 shared_region_task_mappings_t sm_info
)
1116 load_struct_t
*entry
;
1117 ipc_port_t local_map
;
1118 loaded_mapping_t
*map_ele
;
1119 loaded_mapping_t
*back_ptr
;
1121 entry
= lsf_hash_delete(file_object
, base_offset
, sm_info
);
1123 map_ele
= entry
->mappings
;
1124 while(map_ele
!= NULL
) {
1125 if(map_ele
->protection
& VM_PROT_COW
) {
1126 local_map
= (ipc_port_t
)sm_info
->data_region
;
1128 local_map
= (ipc_port_t
)sm_info
->text_region
;
1130 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
1131 ->backing
.map
, entry
->base_address
+
1132 map_ele
->mapping_offset
,
1135 map_ele
= map_ele
->next
;
1136 zfree(lsf_zone
, (vm_offset_t
)back_ptr
);
1137 shared_file_available_hash_ele
++;
1139 zfree(lsf_zone
, (vm_offset_t
)entry
);
1140 shared_file_available_hash_ele
++;
1144 /* integer is from 1 to 100 and represents percent full */
1146 lsf_mapping_pool_gauge()
1148 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;