2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * File: vm/vm_shared_memory_server.c
31 * Author: Chris Youngworth
33 * Support routines for an in-kernel shared memory allocator
38 #include <mach/mach_types.h>
39 #include <mach/kern_return.h>
40 #include <mach/vm_inherit.h>
41 #include <mach/vm_map.h>
42 #include <machine/cpu_capabilities.h>
44 #include <kern/kern_types.h>
45 #include <kern/ipc_kobject.h>
46 #include <kern/thread.h>
47 #include <kern/zalloc.h>
48 #include <kern/kalloc.h>
50 #include <ipc/ipc_types.h>
51 #include <ipc/ipc_port.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_page.h>
57 #include <mach/mach_vm.h>
58 #include <mach/shared_memory_server.h>
59 #include <vm/vm_shared_memory_server.h>
63 int lsf_alloc_debug
= 0;
64 #define LSF_DEBUG(args) \
70 #define LSF_ALLOC_DEBUG(args) \
72 if (lsf_alloc_debug) { \
77 #define LSF_DEBUG(args)
78 #define LSF_ALLOC_DEBUG(args)
81 /* forward declarations */
83 shared_region_object_create(
85 ipc_port_t
*object_handle
);
88 shared_region_mapping_dealloc_lock(
89 shared_region_mapping_t shared_region
,
96 ipc_port_t
*text_region_handle
,
97 vm_size_t text_region_size
,
98 ipc_port_t
*data_region_handle
,
99 vm_size_t data_region_size
,
100 vm_offset_t
*file_mapping_array
);
103 shared_file_header_init(
104 shared_file_info_t
*shared_file_header
);
106 static load_struct_t
*
108 queue_head_t
*hash_table
,
110 vm_offset_t recognizableOffset
,
114 shared_region_task_mappings_t sm_info
);
116 static load_struct_t
*
119 vm_offset_t base_offset
,
120 shared_region_task_mappings_t sm_info
);
124 load_struct_t
*entry
,
125 shared_region_task_mappings_t sm_info
);
129 unsigned int map_cnt
,
130 struct shared_file_mapping_np
*mappings
,
131 shared_region_task_mappings_t sm_info
,
132 mach_vm_offset_t
*base_offset_p
);
136 struct shared_file_mapping_np
*mappings
,
139 memory_object_size_t file_size
,
140 shared_region_task_mappings_t sm_info
,
141 mach_vm_offset_t base_offset
,
142 mach_vm_offset_t
*slide_p
);
147 vm_offset_t base_offset
,
148 shared_region_task_mappings_t sm_info
);
153 vm_offset_t base_offset
,
154 shared_region_task_mappings_t sm_info
,
158 #define load_file_hash(file_object, size) \
159 ((((natural_t)file_object) & 0xffffff) % size)
162 vm_offset_t shared_file_text_region
;
163 vm_offset_t shared_file_data_region
;
165 ipc_port_t shared_text_region_handle
;
166 ipc_port_t shared_data_region_handle
;
167 vm_offset_t shared_file_mapping_array
= 0;
169 shared_region_mapping_t default_environment_shared_regions
= NULL
;
170 static decl_mutex_data(,default_regions_list_lock_data
)
172 #define default_regions_list_lock() \
173 mutex_lock(&default_regions_list_lock_data)
174 #define default_regions_list_lock_try() \
175 mutex_try(&default_regions_list_lock_data)
176 #define default_regions_list_unlock() \
177 mutex_unlock(&default_regions_list_lock_data)
180 ipc_port_t sfma_handle
= NULL
;
183 int shared_file_available_hash_ele
;
185 /* com region support */
186 ipc_port_t com_region_handle32
= NULL
;
187 ipc_port_t com_region_handle64
= NULL
;
188 vm_map_t com_region_map32
= NULL
;
189 vm_map_t com_region_map64
= NULL
;
190 vm_size_t com_region_size
= _COMM_PAGE_AREA_LENGTH
;
191 shared_region_mapping_t com_mapping_resource
= NULL
;
195 int shared_region_debug
= 0;
200 vm_get_shared_region(
202 shared_region_mapping_t
*shared_region
)
204 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
205 if (*shared_region
) {
206 assert((*shared_region
)->ref_count
> 0);
208 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
209 task
, *shared_region
));
214 vm_set_shared_region(
216 shared_region_mapping_t shared_region
)
218 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
219 "shared_region=%p)\n",
220 task
, shared_region
));
222 assert(shared_region
->ref_count
> 0);
224 task
->system_shared_region
= shared_region
;
229 * shared_region_object_chain_detach:
231 * Mark the shared region as being detached or standalone. This means
232 * that we won't keep track of which file is mapped and how, for this shared
233 * region. And we don't have a "shadow" shared region.
234 * This is used when we clone a private shared region and we intend to remove
235 * some mappings from it. It won't need to maintain mappings info because it's
236 * now private. It can't have a "shadow" shared region because we don't want
237 * to see the shadow of the mappings we're about to remove.
240 shared_region_object_chain_detached(
241 shared_region_mapping_t target_region
)
243 shared_region_mapping_lock(target_region
);
244 target_region
->flags
|= SHARED_REGION_STANDALONE
;
245 shared_region_mapping_unlock(target_region
);
249 * shared_region_object_chain_attach:
251 * Link "target_region" to "object_chain_region". "object_chain_region"
252 * is treated as a shadow of "target_region" for the purpose of looking up
253 * mappings. Since the "target_region" preserves all the mappings of the
254 * older "object_chain_region", we won't duplicate all the mappings info and
255 * we'll just lookup the next region in the "object_chain" if we can't find
256 * what we're looking for in the "target_region". See lsf_hash_lookup().
259 shared_region_object_chain_attach(
260 shared_region_mapping_t target_region
,
261 shared_region_mapping_t object_chain_region
)
263 shared_region_object_chain_t object_ele
;
265 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
266 "target_region=%p, object_chain_region=%p\n",
267 target_region
, object_chain_region
));
268 assert(target_region
->ref_count
> 0);
269 assert(object_chain_region
->ref_count
> 0);
270 if(target_region
->object_chain
)
272 object_ele
= (shared_region_object_chain_t
)
273 kalloc(sizeof (struct shared_region_object_chain
));
274 shared_region_mapping_lock(object_chain_region
);
275 target_region
->object_chain
= object_ele
;
276 object_ele
->object_chain_region
= object_chain_region
;
277 object_ele
->next
= object_chain_region
->object_chain
;
278 object_ele
->depth
= object_chain_region
->depth
;
279 object_chain_region
->depth
++;
280 target_region
->alternate_next
= object_chain_region
->alternate_next
;
281 shared_region_mapping_unlock(object_chain_region
);
285 /* LP64todo - need 64-bit safe version */
287 shared_region_mapping_create(
288 ipc_port_t text_region
,
290 ipc_port_t data_region
,
292 vm_offset_t region_mappings
,
293 vm_offset_t client_base
,
294 shared_region_mapping_t
*shared_region
,
295 vm_offset_t alt_base
,
296 vm_offset_t alt_next
)
298 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
299 *shared_region
= (shared_region_mapping_t
)
300 kalloc(sizeof (struct shared_region_mapping
));
301 if(*shared_region
== NULL
) {
302 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
306 shared_region_mapping_lock_init((*shared_region
));
307 (*shared_region
)->text_region
= text_region
;
308 (*shared_region
)->text_size
= text_size
;
309 (*shared_region
)->fs_base
= ENV_DEFAULT_ROOT
;
310 (*shared_region
)->system
= cpu_type();
311 (*shared_region
)->data_region
= data_region
;
312 (*shared_region
)->data_size
= data_size
;
313 (*shared_region
)->region_mappings
= region_mappings
;
314 (*shared_region
)->client_base
= client_base
;
315 (*shared_region
)->ref_count
= 1;
316 (*shared_region
)->next
= NULL
;
317 (*shared_region
)->object_chain
= NULL
;
318 (*shared_region
)->self
= *shared_region
;
319 (*shared_region
)->flags
= 0;
320 (*shared_region
)->depth
= 0;
321 (*shared_region
)->default_env_list
= NULL
;
322 (*shared_region
)->alternate_base
= alt_base
;
323 (*shared_region
)->alternate_next
= alt_next
;
324 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
329 /* LP64todo - need 64-bit safe version */
331 shared_region_mapping_info(
332 shared_region_mapping_t shared_region
,
333 ipc_port_t
*text_region
,
334 vm_size_t
*text_size
,
335 ipc_port_t
*data_region
,
336 vm_size_t
*data_size
,
337 vm_offset_t
*region_mappings
,
338 vm_offset_t
*client_base
,
339 vm_offset_t
*alt_base
,
340 vm_offset_t
*alt_next
,
341 unsigned int *fs_base
,
342 unsigned int *system
,
344 shared_region_mapping_t
*next
)
346 shared_region_mapping_lock(shared_region
);
348 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
350 assert(shared_region
->ref_count
> 0);
351 *text_region
= shared_region
->text_region
;
352 *text_size
= shared_region
->text_size
;
353 *data_region
= shared_region
->data_region
;
354 *data_size
= shared_region
->data_size
;
355 *region_mappings
= shared_region
->region_mappings
;
356 *client_base
= shared_region
->client_base
;
357 *alt_base
= shared_region
->alternate_base
;
358 *alt_next
= shared_region
->alternate_next
;
359 *flags
= shared_region
->flags
;
360 *fs_base
= shared_region
->fs_base
;
361 *system
= shared_region
->system
;
362 *next
= shared_region
->next
;
364 shared_region_mapping_unlock(shared_region
);
368 shared_region_mapping_ref(
369 shared_region_mapping_t shared_region
)
371 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
372 "ref_count=%d + 1\n",
374 shared_region
? shared_region
->ref_count
: 0));
375 if(shared_region
== NULL
)
377 assert(shared_region
->ref_count
> 0);
378 hw_atomic_add(&shared_region
->ref_count
, 1);
383 shared_region_mapping_dealloc_lock(
384 shared_region_mapping_t shared_region
,
388 struct shared_region_task_mappings sm_info
;
389 shared_region_mapping_t next
= NULL
;
392 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
393 "(shared_region=%p,%d,%d) ref_count=%d\n",
394 shared_region
, need_sfh_lock
, need_drl_lock
,
395 shared_region
? shared_region
->ref_count
: 0));
396 while (shared_region
) {
397 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
399 shared_region
, shared_region
->ref_count
));
400 assert(shared_region
->ref_count
> 0);
402 hw_atomic_sub(&shared_region
->ref_count
, 1)) == 0) {
403 shared_region_mapping_lock(shared_region
);
405 sm_info
.text_region
= shared_region
->text_region
;
406 sm_info
.text_size
= shared_region
->text_size
;
407 sm_info
.data_region
= shared_region
->data_region
;
408 sm_info
.data_size
= shared_region
->data_size
;
409 sm_info
.region_mappings
= shared_region
->region_mappings
;
410 sm_info
.client_base
= shared_region
->client_base
;
411 sm_info
.alternate_base
= shared_region
->alternate_base
;
412 sm_info
.alternate_next
= shared_region
->alternate_next
;
413 sm_info
.flags
= shared_region
->flags
;
414 sm_info
.self
= (vm_offset_t
)shared_region
;
416 if(shared_region
->region_mappings
) {
417 lsf_remove_regions_mappings_lock(shared_region
, &sm_info
, need_sfh_lock
);
419 if(((vm_named_entry_t
)
420 (shared_region
->text_region
->ip_kobject
))
421 ->backing
.map
->pmap
) {
422 pmap_remove(((vm_named_entry_t
)
423 (shared_region
->text_region
->ip_kobject
))
426 sm_info
.client_base
+ sm_info
.text_size
);
428 ipc_port_release_send(shared_region
->text_region
);
429 if(shared_region
->data_region
)
430 ipc_port_release_send(shared_region
->data_region
);
431 if (shared_region
->object_chain
) {
432 next
= shared_region
->object_chain
->object_chain_region
;
433 kfree(shared_region
->object_chain
,
434 sizeof (struct shared_region_object_chain
));
438 shared_region_mapping_unlock(shared_region
);
440 ("shared_region_mapping_dealloc_lock(%p): "
443 bzero((void *)shared_region
,
444 sizeof (*shared_region
)); /* FBDP debug */
446 sizeof (struct shared_region_mapping
));
447 shared_region
= next
;
449 /* Stale indicates that a system region is no */
450 /* longer in the default environment list. */
451 if((ref_count
== 1) &&
452 (shared_region
->flags
& SHARED_REGION_SYSTEM
)
453 && !(shared_region
->flags
& SHARED_REGION_STALE
)) {
455 ("shared_region_mapping_dealloc_lock"
456 "(%p): removing stale\n",
458 remove_default_shared_region_lock(shared_region
,need_sfh_lock
, need_drl_lock
);
463 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
469 * Stub function; always indicates that the lock needs to be taken in the
470 * call to lsf_remove_regions_mappings_lock().
473 shared_region_mapping_dealloc(
474 shared_region_mapping_t shared_region
)
476 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
477 "(shared_region=%p)\n",
480 assert(shared_region
->ref_count
> 0);
482 return shared_region_mapping_dealloc_lock(shared_region
, 1, 1);
487 shared_region_object_create(
489 ipc_port_t
*object_handle
)
491 vm_named_entry_t user_entry
;
492 ipc_port_t user_handle
;
497 user_entry
= (vm_named_entry_t
)
498 kalloc(sizeof (struct vm_named_entry
));
499 if(user_entry
== NULL
) {
502 named_entry_lock_init(user_entry
);
503 user_handle
= ipc_port_alloc_kernel();
506 ip_lock(user_handle
);
508 /* make a sonce right */
509 user_handle
->ip_sorights
++;
510 ip_reference(user_handle
);
512 user_handle
->ip_destination
= IP_NULL
;
513 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
514 user_handle
->ip_receiver
= ipc_space_kernel
;
516 /* make a send right */
517 user_handle
->ip_mscount
++;
518 user_handle
->ip_srights
++;
519 ip_reference(user_handle
);
521 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
522 /* nsrequest unlocks user_handle */
524 /* Create a named object based on a submap of specified size */
526 new_map
= vm_map_create(pmap_create(0), 0, size
, TRUE
);
527 user_entry
->backing
.map
= new_map
;
528 user_entry
->internal
= TRUE
;
529 user_entry
->is_sub_map
= TRUE
;
530 user_entry
->is_pager
= FALSE
;
531 user_entry
->offset
= 0;
532 user_entry
->protection
= VM_PROT_ALL
;
533 user_entry
->size
= size
;
534 user_entry
->ref_count
= 1;
536 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
538 *object_handle
= user_handle
;
542 /* called for the non-default, private branch shared region support */
543 /* system default fields for fs_base and system supported are not */
544 /* relevant as the system default flag is not set */
546 shared_file_create_system_region(
547 shared_region_mapping_t
*shared_region
)
549 ipc_port_t text_handle
;
550 ipc_port_t data_handle
;
553 vm_offset_t mapping_array
;
556 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
558 text_size
= 0x10000000;
559 data_size
= 0x10000000;
561 kret
= shared_file_init(&text_handle
,
562 text_size
, &data_handle
, data_size
, &mapping_array
);
564 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
565 "shared_file_init failed kret=0x%x\n",
569 kret
= shared_region_mapping_create(text_handle
,
570 text_size
, data_handle
, data_size
, mapping_array
,
571 GLOBAL_SHARED_TEXT_SEGMENT
, shared_region
,
572 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
574 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
575 "shared_region_mapping_create failed "
580 (*shared_region
)->flags
= 0;
581 if(com_mapping_resource
) {
582 shared_region_mapping_ref(com_mapping_resource
);
583 (*shared_region
)->next
= com_mapping_resource
;
586 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
587 "-> shared_region=%p\n",
593 * load a new default for a specified environment into the default share
594 * regions list. If a previous default exists for the envrionment specification
595 * it is returned along with its reference. It is expected that the new
596 * sytem region structure passes a reference.
599 shared_region_mapping_t
600 update_default_shared_region(
601 shared_region_mapping_t new_system_region
)
603 shared_region_mapping_t old_system_region
;
604 unsigned int fs_base
;
607 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
609 assert(new_system_region
->ref_count
> 0);
610 fs_base
= new_system_region
->fs_base
;
611 system
= new_system_region
->system
;
612 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
613 default_regions_list_lock();
614 old_system_region
= default_environment_shared_regions
;
616 if((old_system_region
!= NULL
) &&
617 (old_system_region
->fs_base
== fs_base
) &&
618 (old_system_region
->system
== system
)) {
619 new_system_region
->default_env_list
=
620 old_system_region
->default_env_list
;
621 old_system_region
->default_env_list
= NULL
;
622 default_environment_shared_regions
= new_system_region
;
623 old_system_region
->flags
|= SHARED_REGION_STALE
;
624 default_regions_list_unlock();
625 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
627 new_system_region
, old_system_region
));
628 assert(old_system_region
->ref_count
> 0);
629 return old_system_region
;
631 if (old_system_region
) {
632 while(old_system_region
->default_env_list
!= NULL
) {
633 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
634 (old_system_region
->default_env_list
->system
== system
)) {
635 shared_region_mapping_t tmp_system_region
;
638 old_system_region
->default_env_list
;
639 new_system_region
->default_env_list
=
640 tmp_system_region
->default_env_list
;
641 tmp_system_region
->default_env_list
= NULL
;
642 old_system_region
->default_env_list
=
644 old_system_region
= tmp_system_region
;
645 old_system_region
->flags
|= SHARED_REGION_STALE
;
646 default_regions_list_unlock();
647 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
648 ": old=%p stale 2\n",
651 assert(old_system_region
->ref_count
> 0);
652 return old_system_region
;
654 old_system_region
= old_system_region
->default_env_list
;
657 /* If we get here, we are at the end of the system list and we */
658 /* did not find a pre-existing entry */
659 if(old_system_region
) {
660 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
661 "adding after old=%p\n",
662 new_system_region
, old_system_region
));
663 assert(old_system_region
->ref_count
> 0);
664 old_system_region
->default_env_list
= new_system_region
;
666 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
669 default_environment_shared_regions
= new_system_region
;
671 assert(new_system_region
->ref_count
> 0);
672 default_regions_list_unlock();
677 * lookup a system_shared_region for the environment specified. If one is
678 * found, it is returned along with a reference against the structure
681 shared_region_mapping_t
682 lookup_default_shared_region(
683 unsigned int fs_base
,
686 shared_region_mapping_t system_region
;
687 default_regions_list_lock();
688 system_region
= default_environment_shared_regions
;
690 SHARED_REGION_DEBUG(("lookup_default_shared_region"
691 "(base=0x%x, system=0x%x)\n",
693 while(system_region
!= NULL
) {
694 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
695 ": system_region=%p base=0x%x system=0x%x"
697 fs_base
, system
, system_region
,
698 system_region
->fs_base
,
699 system_region
->system
,
700 system_region
->ref_count
));
701 assert(system_region
->ref_count
> 0);
702 if((system_region
->fs_base
== fs_base
) &&
703 (system_region
->system
== system
)) {
706 system_region
= system_region
->default_env_list
;
709 shared_region_mapping_ref(system_region
);
710 default_regions_list_unlock();
711 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
713 return system_region
;
717 * remove a system_region default if it appears in the default regions list.
718 * Drop a reference on removal.
721 __private_extern__
void
722 remove_default_shared_region_lock(
723 shared_region_mapping_t system_region
,
727 shared_region_mapping_t old_system_region
;
729 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
730 "(system_region=%p, %d, %d)\n",
731 system_region
, need_sfh_lock
, need_drl_lock
));
733 default_regions_list_lock();
735 old_system_region
= default_environment_shared_regions
;
737 if(old_system_region
== NULL
) {
738 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
739 "-> default_env=NULL\n",
742 default_regions_list_unlock();
747 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
749 system_region
, old_system_region
));
750 assert(old_system_region
->ref_count
> 0);
751 if (old_system_region
== system_region
) {
752 default_environment_shared_regions
753 = old_system_region
->default_env_list
;
754 old_system_region
->default_env_list
= NULL
;
755 old_system_region
->flags
|= SHARED_REGION_STALE
;
756 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
757 "old=%p ref_count=%d STALE\n",
758 system_region
, old_system_region
,
759 old_system_region
->ref_count
));
760 shared_region_mapping_dealloc_lock(old_system_region
,
764 default_regions_list_unlock();
769 while(old_system_region
->default_env_list
!= NULL
) {
770 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
771 "old=%p->default_env=%p\n",
772 system_region
, old_system_region
,
773 old_system_region
->default_env_list
));
774 assert(old_system_region
->default_env_list
->ref_count
> 0);
775 if(old_system_region
->default_env_list
== system_region
) {
776 shared_region_mapping_t dead_region
;
777 dead_region
= old_system_region
->default_env_list
;
778 old_system_region
->default_env_list
=
779 dead_region
->default_env_list
;
780 dead_region
->default_env_list
= NULL
;
781 dead_region
->flags
|= SHARED_REGION_STALE
;
783 ("remove_default_shared_region_lock(%p): "
784 "dead=%p ref_count=%d stale\n",
785 system_region
, dead_region
,
786 dead_region
->ref_count
));
787 shared_region_mapping_dealloc_lock(dead_region
,
791 default_regions_list_unlock();
795 old_system_region
= old_system_region
->default_env_list
;
798 default_regions_list_unlock();
803 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
804 * the only caller. Remove this stub function and the corresponding symbol
808 remove_default_shared_region(
809 shared_region_mapping_t system_region
)
811 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
814 assert(system_region
->ref_count
> 0);
816 remove_default_shared_region_lock(system_region
, 1, 1);
820 remove_all_shared_regions(void)
822 shared_region_mapping_t system_region
;
823 shared_region_mapping_t next_system_region
;
825 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
826 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
827 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
828 default_regions_list_lock();
829 system_region
= default_environment_shared_regions
;
831 if(system_region
== NULL
) {
832 default_regions_list_unlock();
836 while(system_region
!= NULL
) {
837 next_system_region
= system_region
->default_env_list
;
838 system_region
->default_env_list
= NULL
;
839 system_region
->flags
|= SHARED_REGION_STALE
;
840 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
841 "%p ref_count=%d stale\n",
842 system_region
, system_region
->ref_count
));
843 assert(system_region
->ref_count
> 0);
844 shared_region_mapping_dealloc_lock(system_region
, 1, 0);
845 system_region
= next_system_region
;
847 default_environment_shared_regions
= NULL
;
848 default_regions_list_unlock();
849 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
850 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
851 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
854 /* shared_com_boot_time_init initializes the common page shared data and */
855 /* text region. This region is semi independent of the split libs */
856 /* and so its policies have to be handled differently by the code that */
857 /* manipulates the mapping of shared region environments. However, */
858 /* the shared region delivery system supports both */
859 void shared_com_boot_time_init(void); /* forward */
861 shared_com_boot_time_init(void)
864 vm_named_entry_t named_entry
;
866 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
867 if(com_region_handle32
) {
868 panic("shared_com_boot_time_init: "
869 "com_region_handle32 already set\n");
871 if(com_region_handle64
) {
872 panic("shared_com_boot_time_init: "
873 "com_region_handle64 already set\n");
876 /* create com page regions, 1 each for 32 and 64-bit code */
877 if((kret
= shared_region_object_create(
879 &com_region_handle32
))) {
880 panic("shared_com_boot_time_init: "
881 "unable to create 32-bit comm page\n");
884 if((kret
= shared_region_object_create(
886 &com_region_handle64
))) {
887 panic("shared_com_boot_time_init: "
888 "unable to create 64-bit comm page\n");
892 /* now set export the underlying region/map */
893 named_entry
= (vm_named_entry_t
)com_region_handle32
->ip_kobject
;
894 com_region_map32
= named_entry
->backing
.map
;
895 named_entry
= (vm_named_entry_t
)com_region_handle64
->ip_kobject
;
896 com_region_map64
= named_entry
->backing
.map
;
898 /* wrap the com region in its own shared file mapping structure */
899 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
900 kret
= shared_region_mapping_create(com_region_handle32
,
901 com_region_size
, NULL
, 0, 0,
902 _COMM_PAGE_BASE_ADDRESS
, &com_mapping_resource
,
905 panic("shared_region_mapping_create failed for commpage");
910 shared_file_boot_time_init(
911 unsigned int fs_base
,
914 long text_region_size
;
915 long data_region_size
;
916 shared_region_mapping_t new_system_region
;
917 shared_region_mapping_t old_default_env
;
919 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
920 "(base=0x%x,system=0x%x)\n",
922 text_region_size
= 0x10000000;
923 data_region_size
= 0x10000000;
924 shared_file_init(&shared_text_region_handle
,
926 &shared_data_region_handle
,
928 &shared_file_mapping_array
);
930 shared_region_mapping_create(shared_text_region_handle
,
932 shared_data_region_handle
,
934 shared_file_mapping_array
,
935 GLOBAL_SHARED_TEXT_SEGMENT
,
937 SHARED_ALTERNATE_LOAD_BASE
,
938 SHARED_ALTERNATE_LOAD_BASE
);
940 new_system_region
->fs_base
= fs_base
;
941 new_system_region
->system
= system
;
942 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
944 /* grab an extra reference for the caller */
945 /* remember to grab before call to update */
946 shared_region_mapping_ref(new_system_region
);
947 old_default_env
= update_default_shared_region(new_system_region
);
948 /* hold an extra reference because these are the system */
949 /* shared regions. */
951 shared_region_mapping_dealloc(old_default_env
);
952 if(com_mapping_resource
== NULL
) {
953 shared_com_boot_time_init();
955 shared_region_mapping_ref(com_mapping_resource
);
956 new_system_region
->next
= com_mapping_resource
;
957 vm_set_shared_region(current_task(), new_system_region
);
958 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
963 /* called at boot time, allocates two regions, each 256 megs in size */
964 /* these regions are later mapped into task spaces, allowing them to */
965 /* share the contents of the regions. shared_file_init is part of */
966 /* a shared_memory_server which not only allocates the backing maps */
967 /* but also coordinates requests for space. */
972 ipc_port_t
*text_region_handle
,
973 vm_size_t text_region_size
,
974 ipc_port_t
*data_region_handle
,
975 vm_size_t data_region_size
,
976 vm_offset_t
*file_mapping_array
)
978 shared_file_info_t
*sf_head
;
979 vm_offset_t table_mapping_address
;
984 vm_object_t buf_object
;
985 vm_map_entry_t entry
;
990 SHARED_REGION_DEBUG(("shared_file_init()\n"));
991 /* create text and data maps/regions */
992 kret
= shared_region_object_create(
998 kret
= shared_region_object_create(
1000 data_region_handle
);
1002 ipc_port_release_send(*text_region_handle
);
1006 data_table_size
= data_region_size
>> 9;
1007 hash_size
= data_region_size
>> 14;
1008 table_mapping_address
= data_region_size
- data_table_size
;
1010 if(shared_file_mapping_array
== 0) {
1011 vm_map_address_t map_addr
;
1012 buf_object
= vm_object_allocate(data_table_size
);
1014 if(vm_map_find_space(kernel_map
, &map_addr
,
1015 data_table_size
, 0, &entry
)
1017 panic("shared_file_init: no space");
1019 shared_file_mapping_array
= CAST_DOWN(vm_offset_t
, map_addr
);
1020 *file_mapping_array
= shared_file_mapping_array
;
1021 vm_map_unlock(kernel_map
);
1022 entry
->object
.vm_object
= buf_object
;
1025 for (b
= *file_mapping_array
, alloced
= 0;
1026 alloced
< (hash_size
+
1027 round_page(sizeof(struct sf_mapping
)));
1028 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
1029 vm_object_lock(buf_object
);
1030 p
= vm_page_alloc(buf_object
, alloced
);
1031 if (p
== VM_PAGE_NULL
) {
1032 panic("shared_file_init: no space");
1035 vm_object_unlock(buf_object
);
1036 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
1037 VM_PROT_READ
| VM_PROT_WRITE
,
1038 ((unsigned int)(p
->object
->wimg_bits
))
1044 /* initialize loaded file array */
1045 sf_head
= (shared_file_info_t
*)*file_mapping_array
;
1046 sf_head
->hash
= (queue_head_t
*)
1047 (((int)*file_mapping_array
) +
1048 sizeof(struct shared_file_info
));
1049 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
1050 mutex_init(&(sf_head
->lock
), 0);
1051 sf_head
->hash_init
= FALSE
;
1054 mach_make_memory_entry(kernel_map
, &data_table_size
,
1055 *file_mapping_array
, VM_PROT_READ
, &sfma_handle
,
1058 if (vm_map_wire(kernel_map
,
1059 vm_map_trunc_page(*file_mapping_array
),
1060 vm_map_round_page(*file_mapping_array
+
1062 round_page(sizeof(struct sf_mapping
))),
1063 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1064 panic("shared_file_init: No memory for data table");
1067 lsf_zone
= zinit(sizeof(struct load_file_ele
),
1069 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
1070 0, "load_file_server");
1072 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
1073 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
1074 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
1075 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
1077 /* initialize the global default environment lock */
1078 mutex_init(&default_regions_list_lock_data
, 0);
1081 *file_mapping_array
= shared_file_mapping_array
;
1084 kret
= vm_map(((vm_named_entry_t
)
1085 (*data_region_handle
)->ip_kobject
)->backing
.map
,
1086 &table_mapping_address
,
1088 SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
1089 sfma_handle
, 0, FALSE
,
1090 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
);
1092 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1096 static kern_return_t
1097 shared_file_header_init(
1098 shared_file_info_t
*shared_file_header
)
1100 vm_size_t hash_table_size
;
1101 vm_size_t hash_table_offset
;
1103 /* wire hash entry pool only as needed, since we are the only */
1104 /* users, we take a few liberties with the population of our */
1106 static int allocable_hash_pages
;
1107 static vm_offset_t hash_cram_address
;
1110 hash_table_size
= shared_file_header
->hash_size
1111 * sizeof (struct queue_entry
);
1112 hash_table_offset
= hash_table_size
+
1113 round_page(sizeof (struct sf_mapping
));
1114 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
1115 queue_init(&shared_file_header
->hash
[i
]);
1117 allocable_hash_pages
= (((hash_table_size
<< 5) - hash_table_offset
)
1119 hash_cram_address
= ((vm_offset_t
) shared_file_header
)
1120 + hash_table_offset
;
1121 shared_file_available_hash_ele
= 0;
1123 shared_file_header
->hash_init
= TRUE
;
1125 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
1126 int cram_pages
, cram_size
;
1128 cram_pages
= allocable_hash_pages
> 3 ?
1129 3 : allocable_hash_pages
;
1130 cram_size
= cram_pages
* PAGE_SIZE
;
1131 if (vm_map_wire(kernel_map
, hash_cram_address
,
1132 hash_cram_address
+ cram_size
,
1133 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1134 printf("shared_file_header_init: "
1135 "No memory for data table\n");
1136 return KERN_NO_SPACE
;
1138 allocable_hash_pages
-= cram_pages
;
1139 zcram(lsf_zone
, (void *) hash_cram_address
, cram_size
);
1140 shared_file_available_hash_ele
1141 += cram_size
/sizeof(struct load_file_ele
);
1142 hash_cram_address
+= cram_size
;
1145 return KERN_SUCCESS
;
1152 * Attempt to map a split library into the shared region. Check if the mappings
1153 * are already in place.
1158 struct shared_file_mapping_np
*mappings
,
1159 memory_object_control_t file_control
,
1160 memory_object_size_t file_size
,
1161 shared_region_task_mappings_t sm_info
,
1162 mach_vm_offset_t base_offset
,
1163 mach_vm_offset_t
*slide_p
)
1165 vm_object_t file_object
;
1166 shared_file_info_t
*shared_file_header
;
1167 load_struct_t
*file_entry
;
1168 loaded_mapping_t
*file_mapping
;
1171 mach_vm_offset_t slide
;
1173 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1175 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1177 mutex_lock(&shared_file_header
->lock
);
1179 /* If this is the first call to this routine, take the opportunity */
1180 /* to initialize the hash table which will be used to look-up */
1181 /* mappings based on the file object */
1183 if(shared_file_header
->hash_init
== FALSE
) {
1184 ret
= shared_file_header_init(shared_file_header
);
1185 if (ret
!= KERN_SUCCESS
) {
1186 mutex_unlock(&shared_file_header
->lock
);
1187 return KERN_NO_SPACE
;
1192 /* Find the entry in the map associated with the current mapping */
1193 /* of the file object */
1194 file_object
= memory_object_control_to_vm_object(file_control
);
1196 file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
1197 (void *) file_object
,
1198 mappings
[0].sfm_file_offset
,
1199 shared_file_header
->hash_size
,
1200 TRUE
, TRUE
, sm_info
);
1202 /* File is loaded, check the load manifest for exact match */
1203 /* we simplify by requiring that the elements be the same */
1204 /* size and in the same order rather than checking for */
1205 /* semantic equivalence. */
1208 file_mapping
= file_entry
->mappings
;
1209 while(file_mapping
!= NULL
) {
1211 mutex_unlock(&shared_file_header
->lock
);
1212 return KERN_INVALID_ARGUMENT
;
1214 if(((mappings
[i
].sfm_address
)
1215 & SHARED_DATA_REGION_MASK
) !=
1216 file_mapping
->mapping_offset
||
1217 mappings
[i
].sfm_size
!= file_mapping
->size
||
1218 mappings
[i
].sfm_file_offset
!= file_mapping
->file_offset
||
1219 mappings
[i
].sfm_init_prot
!= file_mapping
->protection
) {
1222 file_mapping
= file_mapping
->next
;
1226 mutex_unlock(&shared_file_header
->lock
);
1227 return KERN_INVALID_ARGUMENT
;
1230 slide
= file_entry
->base_address
- base_offset
;
1231 if (slide_p
!= NULL
) {
1233 * File already mapped but at different address,
1234 * and the caller is OK with the sliding.
1240 * The caller doesn't want any sliding. The file needs
1241 * to be mapped at the requested address or not mapped.
1245 * The file is already mapped but at a different
1248 * XXX should we attempt to load at
1249 * requested address too ?
1254 * The file is already mapped at the correct
1261 mutex_unlock(&shared_file_header
->lock
);
1264 /* File is not loaded, lets attempt to load it */
1265 ret
= lsf_map(mappings
, map_cnt
,
1266 (void *)file_control
,
1271 if(ret
== KERN_NO_SPACE
) {
1272 shared_region_mapping_t regions
;
1273 shared_region_mapping_t system_region
;
1274 regions
= (shared_region_mapping_t
)sm_info
->self
;
1275 regions
->flags
|= SHARED_REGION_FULL
;
1276 system_region
= lookup_default_shared_region(
1277 regions
->fs_base
, regions
->system
);
1278 if (system_region
== regions
) {
1279 shared_region_mapping_t new_system_shared_region
;
1280 shared_file_boot_time_init(
1281 regions
->fs_base
, regions
->system
);
1282 /* current task must stay with its current */
1283 /* regions, drop count on system_shared_region */
1284 /* and put back our original set */
1285 vm_get_shared_region(current_task(),
1286 &new_system_shared_region
);
1287 shared_region_mapping_dealloc_lock(
1288 new_system_shared_region
, 0, 1);
1289 vm_set_shared_region(current_task(), regions
);
1290 } else if (system_region
!= NULL
) {
1291 shared_region_mapping_dealloc_lock(
1292 system_region
, 0, 1);
1295 mutex_unlock(&shared_file_header
->lock
);
1301 * shared_region_cleanup:
1303 * Deallocates all the mappings in the shared region, except those explicitly
1304 * specified in the "ranges" set of address ranges.
1307 shared_region_cleanup(
1308 unsigned int range_count
,
1309 struct shared_region_range_np
*ranges
,
1310 shared_region_task_mappings_t sm_info
)
1313 ipc_port_t region_handle
;
1314 vm_named_entry_t region_named_entry
;
1315 vm_map_t text_submap
, data_submap
, submap
, next_submap
;
1316 unsigned int i_range
;
1317 vm_map_offset_t range_start
, range_end
;
1318 vm_map_offset_t submap_base
, submap_end
, submap_offset
;
1319 vm_map_size_t delete_size
;
1321 struct shared_region_range_np tmp_range
;
1322 unsigned int sort_index
, sorted_index
;
1323 vm_map_offset_t sort_min_address
;
1324 unsigned int sort_min_index
;
1327 * Since we want to deallocate the holes between the "ranges",
1328 * sort the array by increasing addresses.
1330 for (sorted_index
= 0;
1331 sorted_index
< range_count
;
1334 /* first remaining entry is our new starting point */
1335 sort_min_index
= sorted_index
;
1336 sort_min_address
= ranges
[sort_min_index
].srr_address
;
1338 /* find the lowest mapping_offset in the remaining entries */
1339 for (sort_index
= sorted_index
+ 1;
1340 sort_index
< range_count
;
1342 if (ranges
[sort_index
].srr_address
< sort_min_address
) {
1343 /* lowest address so far... */
1344 sort_min_index
= sort_index
;
1346 ranges
[sort_min_index
].srr_address
;
1350 if (sort_min_index
!= sorted_index
) {
1352 tmp_range
= ranges
[sort_min_index
];
1353 ranges
[sort_min_index
] = ranges
[sorted_index
];
1354 ranges
[sorted_index
] = tmp_range
;
1358 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1359 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1360 text_submap
= region_named_entry
->backing
.map
;
1362 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1363 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1364 data_submap
= region_named_entry
->backing
.map
;
1366 submap
= text_submap
;
1367 next_submap
= submap
;
1368 submap_base
= sm_info
->client_base
;
1370 submap_end
= submap_base
+ sm_info
->text_size
;
1372 i_range
< range_count
;
1375 /* get the next range of addresses to keep */
1376 range_start
= ranges
[i_range
].srr_address
;
1377 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1378 /* align them to page boundaries */
1379 range_start
= vm_map_trunc_page(range_start
);
1380 range_end
= vm_map_round_page(range_end
);
1382 /* make sure we don't go beyond the submap's boundaries */
1383 if (range_start
< submap_base
) {
1384 range_start
= submap_base
;
1385 } else if (range_start
>= submap_end
) {
1386 range_start
= submap_end
;
1388 if (range_end
< submap_base
) {
1389 range_end
= submap_base
;
1390 } else if (range_end
>= submap_end
) {
1391 range_end
= submap_end
;
1394 if (range_start
> submap_base
+ submap_offset
) {
1396 * Deallocate everything between the last offset in the
1397 * submap and the start of this range.
1399 delete_size
= range_start
-
1400 (submap_base
+ submap_offset
);
1401 (void) vm_deallocate(submap
,
1408 /* skip to the end of the range */
1409 submap_offset
+= delete_size
+ (range_end
- range_start
);
1411 if (submap_base
+ submap_offset
>= submap_end
) {
1412 /* get to next submap */
1414 if (submap
== data_submap
) {
1415 /* no other submap after data: done ! */
1419 /* get original range again */
1420 range_start
= ranges
[i_range
].srr_address
;
1421 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1422 range_start
= vm_map_trunc_page(range_start
);
1423 range_end
= vm_map_round_page(range_end
);
1425 if (range_end
> submap_end
) {
1427 * This last range overlaps with the next
1428 * submap. We need to process it again
1429 * after switching submaps. Otherwise, we'll
1430 * just continue with the next range.
1435 if (submap
== text_submap
) {
1437 * Switch to the data submap.
1439 submap
= data_submap
;
1441 submap_base
= sm_info
->client_base
+
1443 submap_end
= submap_base
+ sm_info
->data_size
;
1448 if (submap_base
+ submap_offset
< submap_end
) {
1449 /* delete remainder of this submap, from "offset" to the end */
1450 (void) vm_deallocate(submap
,
1452 submap_end
- submap_base
- submap_offset
);
1453 /* if nothing to keep in data submap, delete it all */
1454 if (submap
== text_submap
) {
1455 submap
= data_submap
;
1457 submap_base
= sm_info
->client_base
+ sm_info
->text_size
;
1458 submap_end
= submap_base
+ sm_info
->data_size
;
1459 (void) vm_deallocate(data_submap
,
1461 submap_end
- submap_base
);
1469 /* A hash lookup function for the list of loaded files in */
1470 /* shared_memory_server space. */
1472 static load_struct_t
*
1474 queue_head_t
*hash_table
,
1476 vm_offset_t recognizableOffset
,
1479 boolean_t alternate
,
1480 shared_region_task_mappings_t sm_info
)
1482 register queue_t bucket
;
1483 load_struct_t
*entry
;
1484 shared_region_mapping_t target_region
;
1487 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1488 "reg=%d alt=%d sm_info=%p\n",
1489 hash_table
, file_object
, recognizableOffset
, size
,
1490 regular
, alternate
, sm_info
));
1492 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
1493 for (entry
= (load_struct_t
*)queue_first(bucket
);
1494 !queue_end(bucket
, &entry
->links
);
1495 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1497 if ((entry
->file_object
== (int)file_object
) &&
1498 (entry
->file_offset
== recognizableOffset
)) {
1499 target_region
= (shared_region_mapping_t
)sm_info
->self
;
1500 depth
= target_region
->depth
;
1501 while(target_region
) {
1502 if((!(sm_info
->self
)) ||
1503 ((target_region
== entry
->regions_instance
) &&
1504 (target_region
->depth
>= entry
->depth
))) {
1506 entry
->base_address
>= sm_info
->alternate_base
) {
1507 LSF_DEBUG(("lsf_hash_lookup: "
1508 "alt=%d found entry %p "
1512 entry
->base_address
,
1513 sm_info
->alternate_base
));
1517 entry
->base_address
< sm_info
->alternate_base
) {
1518 LSF_DEBUG(("lsf_hash_lookup: "
1519 "reg=%d found entry %p "
1523 entry
->base_address
,
1524 sm_info
->alternate_base
));
1528 if(target_region
->object_chain
) {
1529 target_region
= (shared_region_mapping_t
)
1530 target_region
->object_chain
->object_chain_region
;
1531 depth
= target_region
->object_chain
->depth
;
1533 target_region
= NULL
;
1539 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1540 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1541 hash_table
, file_object
, recognizableOffset
, size
,
1542 regular
, alternate
, sm_info
));
1543 return (load_struct_t
*)0;
1546 __private_extern__ load_struct_t
*
1547 lsf_remove_regions_mappings_lock(
1548 shared_region_mapping_t region
,
1549 shared_region_task_mappings_t sm_info
,
1553 register queue_t bucket
;
1554 shared_file_info_t
*shared_file_header
;
1555 load_struct_t
*entry
;
1556 load_struct_t
*next_entry
;
1558 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1560 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1562 region
, sm_info
, shared_file_header
));
1564 mutex_lock(&shared_file_header
->lock
);
1565 if(shared_file_header
->hash_init
== FALSE
) {
1567 mutex_unlock(&shared_file_header
->lock
);
1568 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1569 "(region=%p,sm_info=%p): not inited\n",
1573 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
1574 bucket
= &shared_file_header
->hash
[i
];
1575 for (entry
= (load_struct_t
*)queue_first(bucket
);
1576 !queue_end(bucket
, &entry
->links
);) {
1577 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
1578 if(region
== entry
->regions_instance
) {
1579 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1580 "entry %p region %p: "
1583 lsf_unload((void *)entry
->file_object
,
1584 entry
->base_address
, sm_info
);
1586 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1587 "entry %p region %p target region %p: "
1589 entry
, entry
->regions_instance
, region
));
1596 mutex_unlock(&shared_file_header
->lock
);
1597 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1599 return NULL
; /* XXX */
1603 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1604 * only caller. Remove this stub function and the corresponding symbol
1605 * export for Merlot.
1608 lsf_remove_regions_mappings(
1609 shared_region_mapping_t region
,
1610 shared_region_task_mappings_t sm_info
)
1612 return lsf_remove_regions_mappings_lock(region
, sm_info
, 1);
1615 /* Removes a map_list, (list of loaded extents) for a file from */
1616 /* the loaded file hash table. */
1618 static load_struct_t
*
1621 vm_offset_t base_offset
,
1622 shared_region_task_mappings_t sm_info
)
1624 register queue_t bucket
;
1625 shared_file_info_t
*shared_file_header
;
1626 load_struct_t
*entry
;
1628 LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
1629 file_object
, base_offset
, sm_info
));
1631 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1633 bucket
= &shared_file_header
->hash
1634 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
1636 for (entry
= (load_struct_t
*)queue_first(bucket
);
1637 !queue_end(bucket
, &entry
->links
);
1638 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1639 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
1640 sm_info
->self
== entry
->regions_instance
)) {
1641 if ((entry
->file_object
== (int) file_object
) &&
1642 (entry
->base_address
== base_offset
)) {
1643 queue_remove(bucket
, entry
,
1644 load_struct_ptr_t
, links
);
1645 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1651 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1652 return (load_struct_t
*)0;
1655 /* Inserts a new map_list, (list of loaded file extents), into the */
1656 /* server loaded file hash table. */
1660 load_struct_t
*entry
,
1661 shared_region_task_mappings_t sm_info
)
1663 shared_file_info_t
*shared_file_header
;
1665 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1666 entry
, sm_info
, entry
->file_object
, entry
->base_address
));
1668 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1669 queue_enter(&shared_file_header
->hash
1670 [load_file_hash(entry
->file_object
,
1671 shared_file_header
->hash_size
)],
1672 entry
, load_struct_ptr_t
, links
);
1680 * Look in the shared region, starting from the end, for a place to fit all the
1681 * mappings while respecting their relative offsets.
1683 static kern_return_t
1685 unsigned int map_cnt
,
1686 struct shared_file_mapping_np
*mappings_in
,
1687 shared_region_task_mappings_t sm_info
,
1688 mach_vm_offset_t
*base_offset_p
)
1690 mach_vm_offset_t max_mapping_offset
;
1692 vm_map_entry_t map_entry
, prev_entry
, next_entry
;
1693 mach_vm_offset_t prev_hole_start
, prev_hole_end
;
1694 mach_vm_offset_t mapping_offset
, mapping_end_offset
;
1695 mach_vm_offset_t base_offset
;
1696 mach_vm_size_t mapping_size
;
1697 mach_vm_offset_t wiggle_room
, wiggle
;
1698 vm_map_t text_map
, data_map
, map
;
1699 vm_named_entry_t region_entry
;
1700 ipc_port_t region_handle
;
1703 struct shared_file_mapping_np
*mappings
, tmp_mapping
;
1704 unsigned int sort_index
, sorted_index
;
1705 vm_map_offset_t sort_min_address
;
1706 unsigned int sort_min_index
;
1709 * Sort the mappings array, so that we can try and fit them in
1710 * in the right order as we progress along the VM maps.
1712 * We can't modify the original array (the original order is
1713 * important when doing lookups of the mappings), so copy it first.
1716 kr
= kmem_alloc(kernel_map
,
1717 (vm_offset_t
*) &mappings
,
1718 (vm_size_t
) (map_cnt
* sizeof (mappings
[0])));
1719 if (kr
!= KERN_SUCCESS
) {
1720 return KERN_NO_SPACE
;
1723 bcopy(mappings_in
, mappings
, map_cnt
* sizeof (mappings
[0]));
1725 max_mapping_offset
= 0;
1726 for (sorted_index
= 0;
1727 sorted_index
< map_cnt
;
1730 /* first remaining entry is our new starting point */
1731 sort_min_index
= sorted_index
;
1732 mapping_end_offset
= ((mappings
[sort_min_index
].sfm_address
&
1733 SHARED_TEXT_REGION_MASK
) +
1734 mappings
[sort_min_index
].sfm_size
);
1735 sort_min_address
= mapping_end_offset
;
1736 /* compute the highest mapping_offset as well... */
1737 if (mapping_end_offset
> max_mapping_offset
) {
1738 max_mapping_offset
= mapping_end_offset
;
1740 /* find the lowest mapping_offset in the remaining entries */
1741 for (sort_index
= sorted_index
+ 1;
1742 sort_index
< map_cnt
;
1745 mapping_end_offset
=
1746 ((mappings
[sort_index
].sfm_address
&
1747 SHARED_TEXT_REGION_MASK
) +
1748 mappings
[sort_index
].sfm_size
);
1750 if (mapping_end_offset
< sort_min_address
) {
1751 /* lowest mapping_offset so far... */
1752 sort_min_index
= sort_index
;
1753 sort_min_address
= mapping_end_offset
;
1756 if (sort_min_index
!= sorted_index
) {
1758 tmp_mapping
= mappings
[sort_min_index
];
1759 mappings
[sort_min_index
] = mappings
[sorted_index
];
1760 mappings
[sorted_index
] = tmp_mapping
;
1765 max_mapping_offset
= vm_map_round_page(max_mapping_offset
);
1767 /* start from the end of the shared area */
1768 base_offset
= sm_info
->text_size
;
1770 /* can all the mappings fit ? */
1771 if (max_mapping_offset
> base_offset
) {
1772 kmem_free(kernel_map
,
1773 (vm_offset_t
) mappings
,
1774 map_cnt
* sizeof (mappings
[0]));
1775 return KERN_FAILURE
;
1779 * Align the last mapping to the end of the submaps
1780 * and start from there.
1782 base_offset
-= max_mapping_offset
;
1784 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1785 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1786 text_map
= region_entry
->backing
.map
;
1788 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1789 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1790 data_map
= region_entry
->backing
.map
;
1792 vm_map_lock_read(text_map
);
1793 vm_map_lock_read(data_map
);
1797 * At first, we can wiggle all the way from our starting point
1798 * (base_offset) towards the start of the map (0), if needed.
1800 wiggle_room
= base_offset
;
1802 for (i
= (signed) map_cnt
- 1; i
>= 0; i
--) {
1803 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
1804 /* copy-on-write mappings are in the data submap */
1807 /* other mappings are in the text submap */
1810 /* get the offset within the appropriate submap */
1811 mapping_offset
= (mappings
[i
].sfm_address
&
1812 SHARED_TEXT_REGION_MASK
);
1813 mapping_size
= mappings
[i
].sfm_size
;
1814 mapping_end_offset
= mapping_offset
+ mapping_size
;
1815 mapping_offset
= vm_map_trunc_page(mapping_offset
);
1816 mapping_end_offset
= vm_map_round_page(mapping_end_offset
);
1817 mapping_size
= mapping_end_offset
- mapping_offset
;
1820 if (vm_map_lookup_entry(map
,
1821 base_offset
+ mapping_offset
,
1824 * The start address for that mapping
1825 * is already mapped: no fit.
1826 * Locate the hole immediately before this map
1829 prev_hole_end
= map_entry
->vme_start
;
1830 prev_entry
= map_entry
->vme_prev
;
1831 if (prev_entry
== vm_map_to_entry(map
)) {
1832 /* no previous entry */
1833 prev_hole_start
= map
->min_offset
;
1835 /* previous entry ends here */
1836 prev_hole_start
= prev_entry
->vme_end
;
1840 * The start address for that mapping is not
1842 * Locate the start and end of the hole
1845 /* map_entry is the previous entry */
1846 if (map_entry
== vm_map_to_entry(map
)) {
1847 /* no previous entry */
1848 prev_hole_start
= map
->min_offset
;
1850 /* previous entry ends there */
1851 prev_hole_start
= map_entry
->vme_end
;
1853 next_entry
= map_entry
->vme_next
;
1854 if (next_entry
== vm_map_to_entry(map
)) {
1856 prev_hole_end
= map
->max_offset
;
1858 prev_hole_end
= next_entry
->vme_start
;
1862 if (prev_hole_end
<= base_offset
+ mapping_offset
) {
1863 /* hole is to our left: try and wiggle to fit */
1864 wiggle
= base_offset
+ mapping_offset
- prev_hole_end
+ mapping_size
;
1865 if (wiggle
> base_offset
) {
1866 /* we're getting out of the map */
1870 base_offset
-= wiggle
;
1871 if (wiggle
> wiggle_room
) {
1872 /* can't wiggle that much: start over */
1875 /* account for the wiggling done */
1876 wiggle_room
-= wiggle
;
1880 base_offset
+ mapping_offset
+ mapping_size
) {
1882 * The hole extends further to the right
1883 * than what we need. Ignore the extra space.
1885 prev_hole_end
= (base_offset
+ mapping_offset
+
1890 base_offset
+ mapping_offset
+ mapping_size
) {
1892 * The hole is not big enough to establish
1893 * the mapping right there: wiggle towards
1894 * the beginning of the hole so that the end
1895 * of our mapping fits in the hole...
1897 wiggle
= base_offset
+ mapping_offset
1898 + mapping_size
- prev_hole_end
;
1899 if (wiggle
> base_offset
) {
1900 /* we're getting out of the map */
1904 base_offset
-= wiggle
;
1905 if (wiggle
> wiggle_room
) {
1906 /* can't wiggle that much: start over */
1909 /* account for the wiggling done */
1910 wiggle_room
-= wiggle
;
1912 /* keep searching from this new base */
1916 if (prev_hole_start
> base_offset
+ mapping_offset
) {
1917 /* no hole found: keep looking */
1921 /* compute wiggling room at this hole */
1922 wiggle
= base_offset
+ mapping_offset
- prev_hole_start
;
1923 if (wiggle
< wiggle_room
) {
1924 /* less wiggle room than before... */
1925 wiggle_room
= wiggle
;
1928 /* found a hole that fits: skip to next mapping */
1930 } /* while we look for a hole */
1931 } /* for each mapping */
1933 *base_offset_p
= base_offset
;
1937 vm_map_unlock_read(text_map
);
1938 vm_map_unlock_read(data_map
);
1940 kmem_free(kernel_map
,
1941 (vm_offset_t
) mappings
,
1942 map_cnt
* sizeof (mappings
[0]));
1950 * Attempt to establish the mappings for a split library into the shared region.
1952 static kern_return_t
1954 struct shared_file_mapping_np
*mappings
,
1957 memory_object_offset_t file_size
,
1958 shared_region_task_mappings_t sm_info
,
1959 mach_vm_offset_t base_offset
,
1960 mach_vm_offset_t
*slide_p
)
1962 load_struct_t
*entry
;
1963 loaded_mapping_t
*file_mapping
;
1964 loaded_mapping_t
**tptr
;
1965 ipc_port_t region_handle
;
1966 vm_named_entry_t region_entry
;
1967 mach_port_t map_port
;
1968 vm_object_t file_object
;
1971 mach_vm_offset_t original_base_offset
;
1973 /* get the VM object from the file's memory object handle */
1974 file_object
= memory_object_control_to_vm_object(file_control
);
1976 original_base_offset
= base_offset
;
1978 LSF_DEBUG(("lsf_map"
1979 "(cnt=%d,file=%p,sm_info=%p)"
1981 map_cnt
, file_object
,
1984 restart_after_slide
:
1985 /* get a new "load_struct_t" to described the mappings for that file */
1986 entry
= (load_struct_t
*)zalloc(lsf_zone
);
1987 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry
, map_cnt
));
1988 LSF_DEBUG(("lsf_map"
1989 "(cnt=%d,file=%p,sm_info=%p) "
1991 map_cnt
, file_object
,
1993 if (entry
== NULL
) {
1994 printf("lsf_map: unable to allocate memory\n");
1995 return KERN_NO_SPACE
;
1997 shared_file_available_hash_ele
--;
1998 entry
->file_object
= (int)file_object
;
1999 entry
->mapping_cnt
= map_cnt
;
2000 entry
->mappings
= NULL
;
2001 entry
->links
.prev
= (queue_entry_t
) 0;
2002 entry
->links
.next
= (queue_entry_t
) 0;
2003 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
2004 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
2005 entry
->file_offset
= mappings
[0].sfm_file_offset
;
2007 /* insert the new file entry in the hash table, for later lookups */
2008 lsf_hash_insert(entry
, sm_info
);
2010 /* where we should add the next mapping description for that file */
2011 tptr
= &(entry
->mappings
);
2013 entry
->base_address
= base_offset
;
2016 /* establish each requested mapping */
2017 for (i
= 0; i
< map_cnt
; i
++) {
2018 mach_vm_offset_t target_address
;
2019 mach_vm_offset_t region_mask
;
2021 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2022 region_handle
= (ipc_port_t
)sm_info
->data_region
;
2023 region_mask
= SHARED_DATA_REGION_MASK
;
2024 if ((((mappings
[i
].sfm_address
+ base_offset
)
2025 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) ||
2026 (((mappings
[i
].sfm_address
+ base_offset
+
2027 mappings
[i
].sfm_size
- 1)
2028 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000)) {
2029 lsf_unload(file_object
,
2030 entry
->base_address
, sm_info
);
2031 return KERN_INVALID_ARGUMENT
;
2034 region_mask
= SHARED_TEXT_REGION_MASK
;
2035 region_handle
= (ipc_port_t
)sm_info
->text_region
;
2036 if (((mappings
[i
].sfm_address
+ base_offset
)
2037 & GLOBAL_SHARED_SEGMENT_MASK
) ||
2038 ((mappings
[i
].sfm_address
+ base_offset
+
2039 mappings
[i
].sfm_size
- 1)
2040 & GLOBAL_SHARED_SEGMENT_MASK
)) {
2041 lsf_unload(file_object
,
2042 entry
->base_address
, sm_info
);
2043 return KERN_INVALID_ARGUMENT
;
2046 if (!(mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) &&
2047 ((mappings
[i
].sfm_file_offset
+ mappings
[i
].sfm_size
) >
2049 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2050 return KERN_INVALID_ARGUMENT
;
2052 target_address
= entry
->base_address
+
2053 ((mappings
[i
].sfm_address
) & region_mask
);
2054 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
2055 map_port
= MACH_PORT_NULL
;
2057 map_port
= (ipc_port_t
) file_object
->pager
;
2059 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2061 if (mach_vm_map(region_entry
->backing
.map
,
2063 vm_map_round_page(mappings
[i
].sfm_size
),
2067 mappings
[i
].sfm_file_offset
,
2069 (mappings
[i
].sfm_init_prot
&
2070 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2071 (mappings
[i
].sfm_max_prot
&
2072 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2073 VM_INHERIT_DEFAULT
) != KERN_SUCCESS
) {
2074 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2076 if (slide_p
!= NULL
) {
2078 * Requested mapping failed but the caller
2079 * is OK with sliding the library in the
2080 * shared region, so let's try and slide it...
2083 /* lookup an appropriate spot */
2084 kr
= lsf_slide(map_cnt
, mappings
,
2085 sm_info
, &base_offset
);
2086 if (kr
== KERN_SUCCESS
) {
2087 /* try and map it there ... */
2088 entry
->base_address
= base_offset
;
2089 goto restart_after_slide
;
2091 /* couldn't slide ... */
2094 return KERN_FAILURE
;
2097 /* record this mapping */
2098 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
2099 if (file_mapping
== NULL
) {
2100 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2101 printf("lsf_map: unable to allocate memory\n");
2102 return KERN_NO_SPACE
;
2104 shared_file_available_hash_ele
--;
2105 file_mapping
->mapping_offset
= (mappings
[i
].sfm_address
)
2107 file_mapping
->size
= mappings
[i
].sfm_size
;
2108 file_mapping
->file_offset
= mappings
[i
].sfm_file_offset
;
2109 file_mapping
->protection
= mappings
[i
].sfm_init_prot
;
2110 file_mapping
->next
= NULL
;
2111 LSF_DEBUG(("lsf_map: file_mapping %p "
2112 "for offset=0x%x size=0x%x\n",
2113 file_mapping
, file_mapping
->mapping_offset
,
2114 file_mapping
->size
));
2116 /* and link it to the file entry */
2117 *tptr
= file_mapping
;
2119 /* where to put the next mapping's description */
2120 tptr
= &(file_mapping
->next
);
2123 if (slide_p
!= NULL
) {
2124 *slide_p
= base_offset
- original_base_offset
;
2127 if (sm_info
->flags
& SHARED_REGION_STANDALONE
) {
2129 * We have a standalone and private shared region, so we
2130 * don't really need to keep the information about each file
2131 * and each mapping. Just deallocate it all.
2132 * XXX we still have the hash table, though...
2134 lsf_deallocate(file_object
, entry
->base_address
, sm_info
,
2138 LSF_DEBUG(("lsf_map: done\n"));
2139 return KERN_SUCCESS
;
2143 /* finds the file_object extent list in the shared memory hash table */
2144 /* If one is found the associated extents in shared memory are deallocated */
2145 /* and the extent list is freed */
2150 vm_offset_t base_offset
,
2151 shared_region_task_mappings_t sm_info
)
2153 lsf_deallocate(file_object
, base_offset
, sm_info
, TRUE
);
2159 * Deallocates all the "shared region" internal data structures describing
2160 * the file and its mappings.
2161 * Also deallocate the actual file mappings if requested ("unload" arg).
2166 vm_offset_t base_offset
,
2167 shared_region_task_mappings_t sm_info
,
2170 load_struct_t
*entry
;
2171 loaded_mapping_t
*map_ele
;
2172 loaded_mapping_t
*back_ptr
;
2174 LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2175 file_object
, base_offset
, sm_info
, unload
));
2176 entry
= lsf_hash_delete(file_object
, base_offset
, sm_info
);
2178 map_ele
= entry
->mappings
;
2179 while(map_ele
!= NULL
) {
2181 ipc_port_t region_handle
;
2182 vm_named_entry_t region_entry
;
2184 if(map_ele
->protection
& VM_PROT_COW
) {
2185 region_handle
= (ipc_port_t
)
2186 sm_info
->data_region
;
2188 region_handle
= (ipc_port_t
)
2189 sm_info
->text_region
;
2191 region_entry
= (vm_named_entry_t
)
2192 region_handle
->ip_kobject
;
2194 vm_deallocate(region_entry
->backing
.map
,
2195 (entry
->base_address
+
2196 map_ele
->mapping_offset
),
2200 map_ele
= map_ele
->next
;
2201 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2202 "offset 0x%x size 0x%x\n",
2203 back_ptr
, back_ptr
->mapping_offset
,
2205 zfree(lsf_zone
, back_ptr
);
2206 shared_file_available_hash_ele
++;
2208 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry
));
2209 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry
));
2210 zfree(lsf_zone
, entry
);
2211 shared_file_available_hash_ele
++;
2213 LSF_DEBUG(("lsf_unload: done\n"));
2216 /* integer is from 1 to 100 and represents percent full */
2218 lsf_mapping_pool_gauge(void)
2220 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;