2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
27 * Support routines for an in-kernel shared memory allocator
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/vm_inherit.h>
35 #include <mach/vm_map.h>
36 #include <machine/cpu_capabilities.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/thread.h>
41 #include <kern/zalloc.h>
42 #include <kern/kalloc.h>
44 #include <ipc/ipc_types.h>
45 #include <ipc/ipc_port.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
51 #include <mach/mach_vm.h>
52 #include <mach/shared_memory_server.h>
53 #include <vm/vm_shared_memory_server.h>
57 int lsf_alloc_debug
= 0;
58 #define LSF_DEBUG(args) \
64 #define LSF_ALLOC_DEBUG(args) \
66 if (lsf_alloc_debug) { \
71 #define LSF_DEBUG(args)
72 #define LSF_ALLOC_DEBUG(args)
75 /* forward declarations */
77 shared_region_object_create(
79 ipc_port_t
*object_handle
);
82 shared_region_mapping_dealloc_lock(
83 shared_region_mapping_t shared_region
,
90 ipc_port_t
*text_region_handle
,
91 vm_size_t text_region_size
,
92 ipc_port_t
*data_region_handle
,
93 vm_size_t data_region_size
,
94 vm_offset_t
*file_mapping_array
);
97 shared_file_header_init(
98 shared_file_info_t
*shared_file_header
);
100 static load_struct_t
*
102 queue_head_t
*hash_table
,
104 vm_offset_t recognizableOffset
,
108 shared_region_task_mappings_t sm_info
);
110 static load_struct_t
*
113 vm_offset_t base_offset
,
114 shared_region_task_mappings_t sm_info
);
118 load_struct_t
*entry
,
119 shared_region_task_mappings_t sm_info
);
123 vm_offset_t mapped_file
,
124 vm_size_t mapped_file_size
,
125 vm_offset_t
*base_address
,
126 sf_mapping_t
*mappings
,
130 shared_region_task_mappings_t sm_info
);
134 unsigned int map_cnt
,
135 struct shared_file_mapping_np
*mappings
,
136 shared_region_task_mappings_t sm_info
,
137 mach_vm_offset_t
*base_offset_p
);
141 struct shared_file_mapping_np
*mappings
,
144 memory_object_size_t file_size
,
145 shared_region_task_mappings_t sm_info
,
146 mach_vm_offset_t base_offset
,
147 mach_vm_offset_t
*slide_p
);
152 vm_offset_t base_offset
,
153 shared_region_task_mappings_t sm_info
);
158 vm_offset_t base_offset
,
159 shared_region_task_mappings_t sm_info
,
163 #define load_file_hash(file_object, size) \
164 ((((natural_t)file_object) & 0xffffff) % size)
167 vm_offset_t shared_file_text_region
;
168 vm_offset_t shared_file_data_region
;
170 ipc_port_t shared_text_region_handle
;
171 ipc_port_t shared_data_region_handle
;
172 vm_offset_t shared_file_mapping_array
= 0;
174 shared_region_mapping_t default_environment_shared_regions
= NULL
;
175 static decl_mutex_data(,default_regions_list_lock_data
)
177 #define default_regions_list_lock() \
178 mutex_lock(&default_regions_list_lock_data)
179 #define default_regions_list_lock_try() \
180 mutex_try(&default_regions_list_lock_data)
181 #define default_regions_list_unlock() \
182 mutex_unlock(&default_regions_list_lock_data)
185 ipc_port_t sfma_handle
= NULL
;
188 int shared_file_available_hash_ele
;
190 /* com region support */
191 ipc_port_t com_region_handle32
= NULL
;
192 ipc_port_t com_region_handle64
= NULL
;
193 vm_map_t com_region_map32
= NULL
;
194 vm_map_t com_region_map64
= NULL
;
195 vm_size_t com_region_size
= _COMM_PAGE_AREA_LENGTH
;
196 shared_region_mapping_t com_mapping_resource
= NULL
;
200 int shared_region_debug
= 0;
205 vm_get_shared_region(
207 shared_region_mapping_t
*shared_region
)
209 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
210 if (*shared_region
) {
211 assert((*shared_region
)->ref_count
> 0);
213 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
214 task
, *shared_region
));
219 vm_set_shared_region(
221 shared_region_mapping_t shared_region
)
223 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
224 "shared_region=%p)\n",
225 task
, shared_region
));
227 assert(shared_region
->ref_count
> 0);
229 task
->system_shared_region
= shared_region
;
234 * shared_region_object_chain_detach:
236 * Mark the shared region as being detached or standalone. This means
237 * that we won't keep track of which file is mapped and how, for this shared
238 * region. And we don't have a "shadow" shared region.
239 * This is used when we clone a private shared region and we intend to remove
240 * some mappings from it. It won't need to maintain mappings info because it's
241 * now private. It can't have a "shadow" shared region because we don't want
242 * to see the shadow of the mappings we're about to remove.
245 shared_region_object_chain_detached(
246 shared_region_mapping_t target_region
)
248 shared_region_mapping_lock(target_region
);
249 target_region
->flags
|= SHARED_REGION_STANDALONE
;
250 shared_region_mapping_unlock(target_region
);
254 * shared_region_object_chain_attach:
256 * Link "target_region" to "object_chain_region". "object_chain_region"
257 * is treated as a shadow of "target_region" for the purpose of looking up
258 * mappings. Since the "target_region" preserves all the mappings of the
259 * older "object_chain_region", we won't duplicate all the mappings info and
260 * we'll just lookup the next region in the "object_chain" if we can't find
261 * what we're looking for in the "target_region". See lsf_hash_lookup().
264 shared_region_object_chain_attach(
265 shared_region_mapping_t target_region
,
266 shared_region_mapping_t object_chain_region
)
268 shared_region_object_chain_t object_ele
;
270 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
271 "target_region=%p, object_chain_region=%p\n",
272 target_region
, object_chain_region
));
273 assert(target_region
->ref_count
> 0);
274 assert(object_chain_region
->ref_count
> 0);
275 if(target_region
->object_chain
)
277 object_ele
= (shared_region_object_chain_t
)
278 kalloc(sizeof (struct shared_region_object_chain
));
279 shared_region_mapping_lock(object_chain_region
);
280 target_region
->object_chain
= object_ele
;
281 object_ele
->object_chain_region
= object_chain_region
;
282 object_ele
->next
= object_chain_region
->object_chain
;
283 object_ele
->depth
= object_chain_region
->depth
;
284 object_chain_region
->depth
++;
285 target_region
->alternate_next
= object_chain_region
->alternate_next
;
286 shared_region_mapping_unlock(object_chain_region
);
290 /* LP64todo - need 64-bit safe version */
292 shared_region_mapping_create(
293 ipc_port_t text_region
,
295 ipc_port_t data_region
,
297 vm_offset_t region_mappings
,
298 vm_offset_t client_base
,
299 shared_region_mapping_t
*shared_region
,
300 vm_offset_t alt_base
,
301 vm_offset_t alt_next
)
303 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
304 *shared_region
= (shared_region_mapping_t
)
305 kalloc(sizeof (struct shared_region_mapping
));
306 if(*shared_region
== NULL
) {
307 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
311 shared_region_mapping_lock_init((*shared_region
));
312 (*shared_region
)->text_region
= text_region
;
313 (*shared_region
)->text_size
= text_size
;
314 (*shared_region
)->fs_base
= ENV_DEFAULT_ROOT
;
315 (*shared_region
)->system
= cpu_type();
316 (*shared_region
)->data_region
= data_region
;
317 (*shared_region
)->data_size
= data_size
;
318 (*shared_region
)->region_mappings
= region_mappings
;
319 (*shared_region
)->client_base
= client_base
;
320 (*shared_region
)->ref_count
= 1;
321 (*shared_region
)->next
= NULL
;
322 (*shared_region
)->object_chain
= NULL
;
323 (*shared_region
)->self
= *shared_region
;
324 (*shared_region
)->flags
= 0;
325 (*shared_region
)->depth
= 0;
326 (*shared_region
)->default_env_list
= NULL
;
327 (*shared_region
)->alternate_base
= alt_base
;
328 (*shared_region
)->alternate_next
= alt_next
;
329 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
334 /* LP64todo - need 64-bit safe version */
336 shared_region_mapping_info(
337 shared_region_mapping_t shared_region
,
338 ipc_port_t
*text_region
,
339 vm_size_t
*text_size
,
340 ipc_port_t
*data_region
,
341 vm_size_t
*data_size
,
342 vm_offset_t
*region_mappings
,
343 vm_offset_t
*client_base
,
344 vm_offset_t
*alt_base
,
345 vm_offset_t
*alt_next
,
346 unsigned int *fs_base
,
347 unsigned int *system
,
349 shared_region_mapping_t
*next
)
351 shared_region_mapping_lock(shared_region
);
353 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
355 assert(shared_region
->ref_count
> 0);
356 *text_region
= shared_region
->text_region
;
357 *text_size
= shared_region
->text_size
;
358 *data_region
= shared_region
->data_region
;
359 *data_size
= shared_region
->data_size
;
360 *region_mappings
= shared_region
->region_mappings
;
361 *client_base
= shared_region
->client_base
;
362 *alt_base
= shared_region
->alternate_base
;
363 *alt_next
= shared_region
->alternate_next
;
364 *flags
= shared_region
->flags
;
365 *fs_base
= shared_region
->fs_base
;
366 *system
= shared_region
->system
;
367 *next
= shared_region
->next
;
369 shared_region_mapping_unlock(shared_region
);
372 /* LP64todo - need 64-bit safe version */
374 shared_region_mapping_set_alt_next(
375 shared_region_mapping_t shared_region
,
376 vm_offset_t alt_next
)
378 SHARED_REGION_DEBUG(("shared_region_mapping_set_alt_next"
379 "(shared_region=%p, alt_next=0%x)\n",
380 shared_region
, alt_next
));
381 assert(shared_region
->ref_count
> 0);
382 shared_region
->alternate_next
= alt_next
;
387 shared_region_mapping_ref(
388 shared_region_mapping_t shared_region
)
390 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
391 "ref_count=%d + 1\n",
393 shared_region
? shared_region
->ref_count
: 0));
394 if(shared_region
== NULL
)
396 assert(shared_region
->ref_count
> 0);
397 hw_atomic_add(&shared_region
->ref_count
, 1);
402 shared_region_mapping_dealloc_lock(
403 shared_region_mapping_t shared_region
,
407 struct shared_region_task_mappings sm_info
;
408 shared_region_mapping_t next
= NULL
;
411 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
412 "(shared_region=%p,%d,%d) ref_count=%d\n",
413 shared_region
, need_sfh_lock
, need_drl_lock
,
414 shared_region
? shared_region
->ref_count
: 0));
415 while (shared_region
) {
416 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
418 shared_region
, shared_region
->ref_count
));
419 assert(shared_region
->ref_count
> 0);
421 hw_atomic_sub(&shared_region
->ref_count
, 1)) == 0) {
422 shared_region_mapping_lock(shared_region
);
424 sm_info
.text_region
= shared_region
->text_region
;
425 sm_info
.text_size
= shared_region
->text_size
;
426 sm_info
.data_region
= shared_region
->data_region
;
427 sm_info
.data_size
= shared_region
->data_size
;
428 sm_info
.region_mappings
= shared_region
->region_mappings
;
429 sm_info
.client_base
= shared_region
->client_base
;
430 sm_info
.alternate_base
= shared_region
->alternate_base
;
431 sm_info
.alternate_next
= shared_region
->alternate_next
;
432 sm_info
.flags
= shared_region
->flags
;
433 sm_info
.self
= (vm_offset_t
)shared_region
;
435 if(shared_region
->region_mappings
) {
436 lsf_remove_regions_mappings_lock(shared_region
, &sm_info
, need_sfh_lock
);
438 if(((vm_named_entry_t
)
439 (shared_region
->text_region
->ip_kobject
))
440 ->backing
.map
->pmap
) {
441 pmap_remove(((vm_named_entry_t
)
442 (shared_region
->text_region
->ip_kobject
))
445 sm_info
.client_base
+ sm_info
.text_size
);
447 ipc_port_release_send(shared_region
->text_region
);
448 if(shared_region
->data_region
)
449 ipc_port_release_send(shared_region
->data_region
);
450 if (shared_region
->object_chain
) {
451 next
= shared_region
->object_chain
->object_chain_region
;
452 kfree(shared_region
->object_chain
,
453 sizeof (struct shared_region_object_chain
));
457 shared_region_mapping_unlock(shared_region
);
459 ("shared_region_mapping_dealloc_lock(%p): "
462 bzero((void *)shared_region
,
463 sizeof (*shared_region
)); /* FBDP debug */
465 sizeof (struct shared_region_mapping
));
466 shared_region
= next
;
468 /* Stale indicates that a system region is no */
469 /* longer in the default environment list. */
470 if((ref_count
== 1) &&
471 (shared_region
->flags
& SHARED_REGION_SYSTEM
)
472 && !(shared_region
->flags
& SHARED_REGION_STALE
)) {
474 ("shared_region_mapping_dealloc_lock"
475 "(%p): removing stale\n",
477 remove_default_shared_region_lock(shared_region
,need_sfh_lock
, need_drl_lock
);
482 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
488 * Stub function; always indicates that the lock needs to be taken in the
489 * call to lsf_remove_regions_mappings_lock().
492 shared_region_mapping_dealloc(
493 shared_region_mapping_t shared_region
)
495 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
496 "(shared_region=%p)\n",
499 assert(shared_region
->ref_count
> 0);
501 return shared_region_mapping_dealloc_lock(shared_region
, 1, 1);
506 shared_region_object_create(
508 ipc_port_t
*object_handle
)
510 vm_named_entry_t user_entry
;
511 ipc_port_t user_handle
;
516 user_entry
= (vm_named_entry_t
)
517 kalloc(sizeof (struct vm_named_entry
));
518 if(user_entry
== NULL
) {
521 named_entry_lock_init(user_entry
);
522 user_handle
= ipc_port_alloc_kernel();
525 ip_lock(user_handle
);
527 /* make a sonce right */
528 user_handle
->ip_sorights
++;
529 ip_reference(user_handle
);
531 user_handle
->ip_destination
= IP_NULL
;
532 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
533 user_handle
->ip_receiver
= ipc_space_kernel
;
535 /* make a send right */
536 user_handle
->ip_mscount
++;
537 user_handle
->ip_srights
++;
538 ip_reference(user_handle
);
540 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
541 /* nsrequest unlocks user_handle */
543 /* Create a named object based on a submap of specified size */
545 new_map
= vm_map_create(pmap_create(0), 0, size
, TRUE
);
546 user_entry
->backing
.map
= new_map
;
547 user_entry
->internal
= TRUE
;
548 user_entry
->is_sub_map
= TRUE
;
549 user_entry
->is_pager
= FALSE
;
550 user_entry
->offset
= 0;
551 user_entry
->protection
= VM_PROT_ALL
;
552 user_entry
->size
= size
;
553 user_entry
->ref_count
= 1;
555 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
557 *object_handle
= user_handle
;
561 /* called for the non-default, private branch shared region support */
562 /* system default fields for fs_base and system supported are not */
563 /* relevant as the system default flag is not set */
565 shared_file_create_system_region(
566 shared_region_mapping_t
*shared_region
)
568 ipc_port_t text_handle
;
569 ipc_port_t data_handle
;
572 vm_offset_t mapping_array
;
575 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
577 text_size
= 0x10000000;
578 data_size
= 0x10000000;
580 kret
= shared_file_init(&text_handle
,
581 text_size
, &data_handle
, data_size
, &mapping_array
);
583 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
584 "shared_file_init failed kret=0x%x\n",
588 kret
= shared_region_mapping_create(text_handle
,
589 text_size
, data_handle
, data_size
, mapping_array
,
590 GLOBAL_SHARED_TEXT_SEGMENT
, shared_region
,
591 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
593 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
594 "shared_region_mapping_create failed "
599 (*shared_region
)->flags
= 0;
600 if(com_mapping_resource
) {
601 shared_region_mapping_ref(com_mapping_resource
);
602 (*shared_region
)->next
= com_mapping_resource
;
605 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
606 "-> shared_region=%p\n",
612 * load a new default for a specified environment into the default share
613 * regions list. If a previous default exists for the envrionment specification
614 * it is returned along with its reference. It is expected that the new
615 * sytem region structure passes a reference.
618 shared_region_mapping_t
619 update_default_shared_region(
620 shared_region_mapping_t new_system_region
)
622 shared_region_mapping_t old_system_region
;
623 unsigned int fs_base
;
626 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
628 assert(new_system_region
->ref_count
> 0);
629 fs_base
= new_system_region
->fs_base
;
630 system
= new_system_region
->system
;
631 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
632 default_regions_list_lock();
633 old_system_region
= default_environment_shared_regions
;
635 if((old_system_region
!= NULL
) &&
636 (old_system_region
->fs_base
== fs_base
) &&
637 (old_system_region
->system
== system
)) {
638 new_system_region
->default_env_list
=
639 old_system_region
->default_env_list
;
640 old_system_region
->default_env_list
= NULL
;
641 default_environment_shared_regions
= new_system_region
;
642 old_system_region
->flags
|= SHARED_REGION_STALE
;
643 default_regions_list_unlock();
644 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
646 new_system_region
, old_system_region
));
647 assert(old_system_region
->ref_count
> 0);
648 return old_system_region
;
650 if (old_system_region
) {
651 while(old_system_region
->default_env_list
!= NULL
) {
652 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
653 (old_system_region
->default_env_list
->system
== system
)) {
654 shared_region_mapping_t tmp_system_region
;
657 old_system_region
->default_env_list
;
658 new_system_region
->default_env_list
=
659 tmp_system_region
->default_env_list
;
660 tmp_system_region
->default_env_list
= NULL
;
661 old_system_region
->default_env_list
=
663 old_system_region
= tmp_system_region
;
664 old_system_region
->flags
|= SHARED_REGION_STALE
;
665 default_regions_list_unlock();
666 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
667 ": old=%p stale 2\n",
670 assert(old_system_region
->ref_count
> 0);
671 return old_system_region
;
673 old_system_region
= old_system_region
->default_env_list
;
676 /* If we get here, we are at the end of the system list and we */
677 /* did not find a pre-existing entry */
678 if(old_system_region
) {
679 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
680 "adding after old=%p\n",
681 new_system_region
, old_system_region
));
682 assert(old_system_region
->ref_count
> 0);
683 old_system_region
->default_env_list
= new_system_region
;
685 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
688 default_environment_shared_regions
= new_system_region
;
690 assert(new_system_region
->ref_count
> 0);
691 default_regions_list_unlock();
696 * lookup a system_shared_region for the environment specified. If one is
697 * found, it is returned along with a reference against the structure
700 shared_region_mapping_t
701 lookup_default_shared_region(
702 unsigned int fs_base
,
705 shared_region_mapping_t system_region
;
706 default_regions_list_lock();
707 system_region
= default_environment_shared_regions
;
709 SHARED_REGION_DEBUG(("lookup_default_shared_region"
710 "(base=0x%x, system=0x%x)\n",
712 while(system_region
!= NULL
) {
713 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
714 ": system_region=%p base=0x%x system=0x%x"
716 fs_base
, system
, system_region
,
717 system_region
->fs_base
,
718 system_region
->system
,
719 system_region
->ref_count
));
720 assert(system_region
->ref_count
> 0);
721 if((system_region
->fs_base
== fs_base
) &&
722 (system_region
->system
== system
)) {
725 system_region
= system_region
->default_env_list
;
728 shared_region_mapping_ref(system_region
);
729 default_regions_list_unlock();
730 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
732 return system_region
;
736 * remove a system_region default if it appears in the default regions list.
737 * Drop a reference on removal.
740 __private_extern__
void
741 remove_default_shared_region_lock(
742 shared_region_mapping_t system_region
,
746 shared_region_mapping_t old_system_region
;
748 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
749 "(system_region=%p, %d, %d)\n",
750 system_region
, need_sfh_lock
, need_drl_lock
));
752 default_regions_list_lock();
754 old_system_region
= default_environment_shared_regions
;
756 if(old_system_region
== NULL
) {
757 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
758 "-> default_env=NULL\n",
761 default_regions_list_unlock();
766 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
768 system_region
, old_system_region
));
769 assert(old_system_region
->ref_count
> 0);
770 if (old_system_region
== system_region
) {
771 default_environment_shared_regions
772 = old_system_region
->default_env_list
;
773 old_system_region
->default_env_list
= NULL
;
774 old_system_region
->flags
|= SHARED_REGION_STALE
;
775 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
776 "old=%p ref_count=%d STALE\n",
777 system_region
, old_system_region
,
778 old_system_region
->ref_count
));
779 shared_region_mapping_dealloc_lock(old_system_region
,
783 default_regions_list_unlock();
788 while(old_system_region
->default_env_list
!= NULL
) {
789 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
790 "old=%p->default_env=%p\n",
791 system_region
, old_system_region
,
792 old_system_region
->default_env_list
));
793 assert(old_system_region
->default_env_list
->ref_count
> 0);
794 if(old_system_region
->default_env_list
== system_region
) {
795 shared_region_mapping_t dead_region
;
796 dead_region
= old_system_region
->default_env_list
;
797 old_system_region
->default_env_list
=
798 dead_region
->default_env_list
;
799 dead_region
->default_env_list
= NULL
;
800 dead_region
->flags
|= SHARED_REGION_STALE
;
802 ("remove_default_shared_region_lock(%p): "
803 "dead=%p ref_count=%d stale\n",
804 system_region
, dead_region
,
805 dead_region
->ref_count
));
806 shared_region_mapping_dealloc_lock(dead_region
,
810 default_regions_list_unlock();
814 old_system_region
= old_system_region
->default_env_list
;
817 default_regions_list_unlock();
822 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
823 * the only caller. Remove this stub function and the corresponding symbol
827 remove_default_shared_region(
828 shared_region_mapping_t system_region
)
830 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
833 assert(system_region
->ref_count
> 0);
835 remove_default_shared_region_lock(system_region
, 1, 1);
839 remove_all_shared_regions(void)
841 shared_region_mapping_t system_region
;
842 shared_region_mapping_t next_system_region
;
844 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
845 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
846 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
847 default_regions_list_lock();
848 system_region
= default_environment_shared_regions
;
850 if(system_region
== NULL
) {
851 default_regions_list_unlock();
855 while(system_region
!= NULL
) {
856 next_system_region
= system_region
->default_env_list
;
857 system_region
->default_env_list
= NULL
;
858 system_region
->flags
|= SHARED_REGION_STALE
;
859 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
860 "%p ref_count=%d stale\n",
861 system_region
, system_region
->ref_count
));
862 assert(system_region
->ref_count
> 0);
863 shared_region_mapping_dealloc_lock(system_region
, 1, 0);
864 system_region
= next_system_region
;
866 default_environment_shared_regions
= NULL
;
867 default_regions_list_unlock();
868 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
869 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
870 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
873 /* shared_com_boot_time_init initializes the common page shared data and */
874 /* text region. This region is semi independent of the split libs */
875 /* and so its policies have to be handled differently by the code that */
876 /* manipulates the mapping of shared region environments. However, */
877 /* the shared region delivery system supports both */
878 void shared_com_boot_time_init(void); /* forward */
880 shared_com_boot_time_init(void)
883 vm_named_entry_t named_entry
;
885 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
886 if(com_region_handle32
) {
887 panic("shared_com_boot_time_init: "
888 "com_region_handle32 already set\n");
890 if(com_region_handle64
) {
891 panic("shared_com_boot_time_init: "
892 "com_region_handle64 already set\n");
895 /* create com page regions, 1 each for 32 and 64-bit code */
896 if((kret
= shared_region_object_create(
898 &com_region_handle32
))) {
899 panic("shared_com_boot_time_init: "
900 "unable to create 32-bit comm page\n");
903 if((kret
= shared_region_object_create(
905 &com_region_handle64
))) {
906 panic("shared_com_boot_time_init: "
907 "unable to create 64-bit comm page\n");
911 /* now set export the underlying region/map */
912 named_entry
= (vm_named_entry_t
)com_region_handle32
->ip_kobject
;
913 com_region_map32
= named_entry
->backing
.map
;
914 named_entry
= (vm_named_entry_t
)com_region_handle64
->ip_kobject
;
915 com_region_map64
= named_entry
->backing
.map
;
917 /* wrap the com region in its own shared file mapping structure */
918 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
919 kret
= shared_region_mapping_create(com_region_handle32
,
920 com_region_size
, NULL
, 0, 0,
921 _COMM_PAGE_BASE_ADDRESS
, &com_mapping_resource
,
924 panic("shared_region_mapping_create failed for commpage");
929 shared_file_boot_time_init(
930 unsigned int fs_base
,
933 long text_region_size
;
934 long data_region_size
;
935 shared_region_mapping_t new_system_region
;
936 shared_region_mapping_t old_default_env
;
938 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
939 "(base=0x%x,system=0x%x)\n",
941 text_region_size
= 0x10000000;
942 data_region_size
= 0x10000000;
943 shared_file_init(&shared_text_region_handle
,
945 &shared_data_region_handle
,
947 &shared_file_mapping_array
);
949 shared_region_mapping_create(shared_text_region_handle
,
951 shared_data_region_handle
,
953 shared_file_mapping_array
,
954 GLOBAL_SHARED_TEXT_SEGMENT
,
956 SHARED_ALTERNATE_LOAD_BASE
,
957 SHARED_ALTERNATE_LOAD_BASE
);
959 new_system_region
->fs_base
= fs_base
;
960 new_system_region
->system
= system
;
961 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
963 /* grab an extra reference for the caller */
964 /* remember to grab before call to update */
965 shared_region_mapping_ref(new_system_region
);
966 old_default_env
= update_default_shared_region(new_system_region
);
967 /* hold an extra reference because these are the system */
968 /* shared regions. */
970 shared_region_mapping_dealloc(old_default_env
);
971 if(com_mapping_resource
== NULL
) {
972 shared_com_boot_time_init();
974 shared_region_mapping_ref(com_mapping_resource
);
975 new_system_region
->next
= com_mapping_resource
;
976 vm_set_shared_region(current_task(), new_system_region
);
977 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
982 /* called at boot time, allocates two regions, each 256 megs in size */
983 /* these regions are later mapped into task spaces, allowing them to */
984 /* share the contents of the regions. shared_file_init is part of */
985 /* a shared_memory_server which not only allocates the backing maps */
986 /* but also coordinates requests for space. */
991 ipc_port_t
*text_region_handle
,
992 vm_size_t text_region_size
,
993 ipc_port_t
*data_region_handle
,
994 vm_size_t data_region_size
,
995 vm_offset_t
*file_mapping_array
)
997 shared_file_info_t
*sf_head
;
998 vm_offset_t table_mapping_address
;
1003 vm_object_t buf_object
;
1004 vm_map_entry_t entry
;
1009 SHARED_REGION_DEBUG(("shared_file_init()\n"));
1010 /* create text and data maps/regions */
1011 kret
= shared_region_object_create(
1013 text_region_handle
);
1017 kret
= shared_region_object_create(
1019 data_region_handle
);
1021 ipc_port_release_send(*text_region_handle
);
1025 data_table_size
= data_region_size
>> 9;
1026 hash_size
= data_region_size
>> 14;
1027 table_mapping_address
= data_region_size
- data_table_size
;
1029 if(shared_file_mapping_array
== 0) {
1030 vm_map_address_t map_addr
;
1031 buf_object
= vm_object_allocate(data_table_size
);
1033 if(vm_map_find_space(kernel_map
, &map_addr
,
1034 data_table_size
, 0, &entry
)
1036 panic("shared_file_init: no space");
1038 shared_file_mapping_array
= CAST_DOWN(vm_offset_t
, map_addr
);
1039 *file_mapping_array
= shared_file_mapping_array
;
1040 vm_map_unlock(kernel_map
);
1041 entry
->object
.vm_object
= buf_object
;
1044 for (b
= *file_mapping_array
, alloced
= 0;
1045 alloced
< (hash_size
+
1046 round_page(sizeof(struct sf_mapping
)));
1047 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
1048 vm_object_lock(buf_object
);
1049 p
= vm_page_alloc(buf_object
, alloced
);
1050 if (p
== VM_PAGE_NULL
) {
1051 panic("shared_file_init: no space");
1054 vm_object_unlock(buf_object
);
1055 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
1056 VM_PROT_READ
| VM_PROT_WRITE
,
1057 ((unsigned int)(p
->object
->wimg_bits
))
1063 /* initialize loaded file array */
1064 sf_head
= (shared_file_info_t
*)*file_mapping_array
;
1065 sf_head
->hash
= (queue_head_t
*)
1066 (((int)*file_mapping_array
) +
1067 sizeof(struct shared_file_info
));
1068 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
1069 mutex_init(&(sf_head
->lock
), 0);
1070 sf_head
->hash_init
= FALSE
;
1073 mach_make_memory_entry(kernel_map
, &data_table_size
,
1074 *file_mapping_array
, VM_PROT_READ
, &sfma_handle
,
1077 if (vm_map_wire(kernel_map
,
1078 vm_map_trunc_page(*file_mapping_array
),
1079 vm_map_round_page(*file_mapping_array
+
1081 round_page(sizeof(struct sf_mapping
))),
1082 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1083 panic("shared_file_init: No memory for data table");
1086 lsf_zone
= zinit(sizeof(struct load_file_ele
),
1088 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
1089 0, "load_file_server");
1091 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
1092 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
1093 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
1094 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
1096 /* initialize the global default environment lock */
1097 mutex_init(&default_regions_list_lock_data
, 0);
1100 *file_mapping_array
= shared_file_mapping_array
;
1103 kret
= vm_map(((vm_named_entry_t
)
1104 (*data_region_handle
)->ip_kobject
)->backing
.map
,
1105 &table_mapping_address
,
1107 SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
1108 sfma_handle
, 0, FALSE
,
1109 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
);
1111 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1115 static kern_return_t
1116 shared_file_header_init(
1117 shared_file_info_t
*shared_file_header
)
1119 vm_size_t hash_table_size
;
1120 vm_size_t hash_table_offset
;
1122 /* wire hash entry pool only as needed, since we are the only */
1123 /* users, we take a few liberties with the population of our */
1125 static int allocable_hash_pages
;
1126 static vm_offset_t hash_cram_address
;
1129 hash_table_size
= shared_file_header
->hash_size
1130 * sizeof (struct queue_entry
);
1131 hash_table_offset
= hash_table_size
+
1132 round_page(sizeof (struct sf_mapping
));
1133 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
1134 queue_init(&shared_file_header
->hash
[i
]);
1136 allocable_hash_pages
= (((hash_table_size
<< 5) - hash_table_offset
)
1138 hash_cram_address
= ((vm_offset_t
) shared_file_header
)
1139 + hash_table_offset
;
1140 shared_file_available_hash_ele
= 0;
1142 shared_file_header
->hash_init
= TRUE
;
1144 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
1145 int cram_pages
, cram_size
;
1147 cram_pages
= allocable_hash_pages
> 3 ?
1148 3 : allocable_hash_pages
;
1149 cram_size
= cram_pages
* PAGE_SIZE
;
1150 if (vm_map_wire(kernel_map
, hash_cram_address
,
1151 hash_cram_address
+ cram_size
,
1152 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1153 printf("shared_file_header_init: "
1154 "No memory for data table\n");
1155 return KERN_NO_SPACE
;
1157 allocable_hash_pages
-= cram_pages
;
1158 zcram(lsf_zone
, (void *) hash_cram_address
, cram_size
);
1159 shared_file_available_hash_ele
1160 += cram_size
/sizeof(struct load_file_ele
);
1161 hash_cram_address
+= cram_size
;
1164 return KERN_SUCCESS
;
1168 /* A call made from user space, copyin_shared_file requires the user to */
1169 /* provide the address and size of a mapped file, the full path name of */
1170 /* that file and a list of offsets to be mapped into shared memory. */
1171 /* By requiring that the file be pre-mapped, copyin_shared_file can */
1172 /* guarantee that the file is neither deleted nor changed after the user */
1173 /* begins the call. */
1177 vm_offset_t mapped_file
,
1178 vm_size_t mapped_file_size
,
1179 vm_offset_t
*base_address
,
1181 sf_mapping_t
*mappings
,
1182 memory_object_control_t file_control
,
1183 shared_region_task_mappings_t sm_info
,
1186 vm_object_t file_object
;
1187 vm_map_entry_t entry
;
1188 shared_file_info_t
*shared_file_header
;
1189 load_struct_t
*file_entry
;
1190 loaded_mapping_t
*file_mapping
;
1191 boolean_t alternate
;
1195 SHARED_REGION_DEBUG(("copyin_shared_file()\n"));
1197 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1199 mutex_lock(&shared_file_header
->lock
);
1201 /* If this is the first call to this routine, take the opportunity */
1202 /* to initialize the hash table which will be used to look-up */
1203 /* mappings based on the file object */
1205 if(shared_file_header
->hash_init
== FALSE
) {
1206 ret
= shared_file_header_init(shared_file_header
);
1207 if (ret
!= KERN_SUCCESS
) {
1208 mutex_unlock(&shared_file_header
->lock
);
1213 /* Find the entry in the map associated with the current mapping */
1214 /* of the file object */
1215 file_object
= memory_object_control_to_vm_object(file_control
);
1216 if(vm_map_lookup_entry(current_map(), mapped_file
, &entry
)) {
1217 vm_object_t mapped_object
;
1218 if(entry
->is_sub_map
||
1219 entry
->object
.vm_object
== VM_OBJECT_NULL
) {
1220 mutex_unlock(&shared_file_header
->lock
);
1221 return KERN_INVALID_ADDRESS
;
1223 mapped_object
= entry
->object
.vm_object
;
1224 while(mapped_object
->shadow
!= NULL
) {
1225 mapped_object
= mapped_object
->shadow
;
1227 /* check to see that the file object passed is indeed the */
1228 /* same as the mapped object passed */
1229 if(file_object
!= mapped_object
) {
1230 if(sm_info
->flags
& SHARED_REGION_SYSTEM
) {
1231 mutex_unlock(&shared_file_header
->lock
);
1232 return KERN_PROTECTION_FAILURE
;
1234 file_object
= mapped_object
;
1238 mutex_unlock(&shared_file_header
->lock
);
1239 return KERN_INVALID_ADDRESS
;
1242 alternate
= (*flags
& ALTERNATE_LOAD_SITE
) ? TRUE
: FALSE
;
1244 file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
1245 (void *) file_object
,
1246 mappings
[0].file_offset
,
1247 shared_file_header
->hash_size
,
1248 !alternate
, alternate
, sm_info
);
1250 /* File is loaded, check the load manifest for exact match */
1251 /* we simplify by requiring that the elements be the same */
1252 /* size and in the same order rather than checking for */
1253 /* semantic equivalence. */
1255 /* If the file is being loaded in the alternate */
1256 /* area, one load to alternate is allowed per mapped */
1257 /* object the base address is passed back to the */
1258 /* caller and the mappings field is filled in. If the */
1259 /* caller does not pass the precise mappings_cnt */
1260 /* and the Alternate is already loaded, an error */
1263 file_mapping
= file_entry
->mappings
;
1264 while(file_mapping
!= NULL
) {
1266 mutex_unlock(&shared_file_header
->lock
);
1267 return KERN_INVALID_ARGUMENT
;
1269 if(((mappings
[i
].mapping_offset
)
1270 & SHARED_DATA_REGION_MASK
) !=
1271 file_mapping
->mapping_offset
||
1273 file_mapping
->size
||
1274 mappings
[i
].file_offset
!=
1275 file_mapping
->file_offset
||
1276 mappings
[i
].protection
!=
1277 file_mapping
->protection
) {
1280 file_mapping
= file_mapping
->next
;
1284 mutex_unlock(&shared_file_header
->lock
);
1285 return KERN_INVALID_ARGUMENT
;
1287 *base_address
= (*base_address
& ~SHARED_TEXT_REGION_MASK
)
1288 + file_entry
->base_address
;
1289 *flags
= SF_PREV_LOADED
;
1290 mutex_unlock(&shared_file_header
->lock
);
1291 return KERN_SUCCESS
;
1293 /* File is not loaded, lets attempt to load it */
1294 ret
= lsf_load(mapped_file
, mapped_file_size
, base_address
,
1296 (void *)file_object
,
1299 if(ret
== KERN_NO_SPACE
) {
1300 shared_region_mapping_t regions
;
1301 shared_region_mapping_t system_region
;
1302 regions
= (shared_region_mapping_t
)sm_info
->self
;
1303 regions
->flags
|= SHARED_REGION_FULL
;
1304 system_region
= lookup_default_shared_region(
1305 regions
->fs_base
, regions
->system
);
1306 if(system_region
== regions
) {
1307 shared_region_mapping_t new_system_shared_region
;
1308 shared_file_boot_time_init(
1309 regions
->fs_base
, regions
->system
);
1310 /* current task must stay with its current */
1311 /* regions, drop count on system_shared_region */
1312 /* and put back our original set */
1313 vm_get_shared_region(current_task(),
1314 &new_system_shared_region
);
1315 shared_region_mapping_dealloc_lock(
1316 new_system_shared_region
, 0, 1);
1317 vm_set_shared_region(current_task(), regions
);
1318 } else if(system_region
!= NULL
) {
1319 shared_region_mapping_dealloc_lock(
1320 system_region
, 0, 1);
1323 mutex_unlock(&shared_file_header
->lock
);
1331 * Attempt to map a split library into the shared region. Check if the mappings
1332 * are already in place.
1337 struct shared_file_mapping_np
*mappings
,
1338 memory_object_control_t file_control
,
1339 memory_object_size_t file_size
,
1340 shared_region_task_mappings_t sm_info
,
1341 mach_vm_offset_t base_offset
,
1342 mach_vm_offset_t
*slide_p
)
1344 vm_object_t file_object
;
1345 shared_file_info_t
*shared_file_header
;
1346 load_struct_t
*file_entry
;
1347 loaded_mapping_t
*file_mapping
;
1350 mach_vm_offset_t slide
;
1352 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1354 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1356 mutex_lock(&shared_file_header
->lock
);
1358 /* If this is the first call to this routine, take the opportunity */
1359 /* to initialize the hash table which will be used to look-up */
1360 /* mappings based on the file object */
1362 if(shared_file_header
->hash_init
== FALSE
) {
1363 ret
= shared_file_header_init(shared_file_header
);
1364 if (ret
!= KERN_SUCCESS
) {
1365 mutex_unlock(&shared_file_header
->lock
);
1366 return KERN_NO_SPACE
;
1371 /* Find the entry in the map associated with the current mapping */
1372 /* of the file object */
1373 file_object
= memory_object_control_to_vm_object(file_control
);
1375 file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
1376 (void *) file_object
,
1377 mappings
[0].sfm_file_offset
,
1378 shared_file_header
->hash_size
,
1379 TRUE
, TRUE
, sm_info
);
1381 /* File is loaded, check the load manifest for exact match */
1382 /* we simplify by requiring that the elements be the same */
1383 /* size and in the same order rather than checking for */
1384 /* semantic equivalence. */
1387 file_mapping
= file_entry
->mappings
;
1388 while(file_mapping
!= NULL
) {
1390 mutex_unlock(&shared_file_header
->lock
);
1391 return KERN_INVALID_ARGUMENT
;
1393 if(((mappings
[i
].sfm_address
)
1394 & SHARED_DATA_REGION_MASK
) !=
1395 file_mapping
->mapping_offset
||
1396 mappings
[i
].sfm_size
!= file_mapping
->size
||
1397 mappings
[i
].sfm_file_offset
!= file_mapping
->file_offset
||
1398 mappings
[i
].sfm_init_prot
!= file_mapping
->protection
) {
1401 file_mapping
= file_mapping
->next
;
1405 mutex_unlock(&shared_file_header
->lock
);
1406 return KERN_INVALID_ARGUMENT
;
1409 slide
= file_entry
->base_address
- base_offset
;
1410 if (slide_p
!= NULL
) {
1412 * File already mapped but at different address,
1413 * and the caller is OK with the sliding.
1419 * The caller doesn't want any sliding. The file needs
1420 * to be mapped at the requested address or not mapped.
1424 * The file is already mapped but at a different
1427 * XXX should we attempt to load at
1428 * requested address too ?
1433 * The file is already mapped at the correct
1440 mutex_unlock(&shared_file_header
->lock
);
1443 /* File is not loaded, lets attempt to load it */
1444 ret
= lsf_map(mappings
, map_cnt
,
1445 (void *)file_control
,
1450 if(ret
== KERN_NO_SPACE
) {
1451 shared_region_mapping_t regions
;
1452 shared_region_mapping_t system_region
;
1453 regions
= (shared_region_mapping_t
)sm_info
->self
;
1454 regions
->flags
|= SHARED_REGION_FULL
;
1455 system_region
= lookup_default_shared_region(
1456 regions
->fs_base
, regions
->system
);
1457 if (system_region
== regions
) {
1458 shared_region_mapping_t new_system_shared_region
;
1459 shared_file_boot_time_init(
1460 regions
->fs_base
, regions
->system
);
1461 /* current task must stay with its current */
1462 /* regions, drop count on system_shared_region */
1463 /* and put back our original set */
1464 vm_get_shared_region(current_task(),
1465 &new_system_shared_region
);
1466 shared_region_mapping_dealloc_lock(
1467 new_system_shared_region
, 0, 1);
1468 vm_set_shared_region(current_task(), regions
);
1469 } else if (system_region
!= NULL
) {
1470 shared_region_mapping_dealloc_lock(
1471 system_region
, 0, 1);
1474 mutex_unlock(&shared_file_header
->lock
);
1480 * shared_region_cleanup:
1482 * Deallocates all the mappings in the shared region, except those explicitly
1483 * specified in the "ranges" set of address ranges.
1486 shared_region_cleanup(
1487 unsigned int range_count
,
1488 struct shared_region_range_np
*ranges
,
1489 shared_region_task_mappings_t sm_info
)
1492 ipc_port_t region_handle
;
1493 vm_named_entry_t region_named_entry
;
1494 vm_map_t text_submap
, data_submap
, submap
, next_submap
;
1495 unsigned int i_range
;
1496 vm_map_offset_t range_start
, range_end
;
1497 vm_map_offset_t submap_base
, submap_end
, submap_offset
;
1498 vm_map_size_t delete_size
;
1500 struct shared_region_range_np tmp_range
;
1501 unsigned int sort_index
, sorted_index
;
1502 vm_map_offset_t sort_min_address
;
1503 unsigned int sort_min_index
;
1506 * Since we want to deallocate the holes between the "ranges",
1507 * sort the array by increasing addresses.
1509 for (sorted_index
= 0;
1510 sorted_index
< range_count
;
1513 /* first remaining entry is our new starting point */
1514 sort_min_index
= sorted_index
;
1515 sort_min_address
= ranges
[sort_min_index
].srr_address
;
1517 /* find the lowest mapping_offset in the remaining entries */
1518 for (sort_index
= sorted_index
+ 1;
1519 sort_index
< range_count
;
1521 if (ranges
[sort_index
].srr_address
< sort_min_address
) {
1522 /* lowest address so far... */
1523 sort_min_index
= sort_index
;
1525 ranges
[sort_min_index
].srr_address
;
1529 if (sort_min_index
!= sorted_index
) {
1531 tmp_range
= ranges
[sort_min_index
];
1532 ranges
[sort_min_index
] = ranges
[sorted_index
];
1533 ranges
[sorted_index
] = tmp_range
;
1537 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1538 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1539 text_submap
= region_named_entry
->backing
.map
;
1541 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1542 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1543 data_submap
= region_named_entry
->backing
.map
;
1545 submap
= text_submap
;
1546 next_submap
= submap
;
1547 submap_base
= sm_info
->client_base
;
1549 submap_end
= submap_base
+ sm_info
->text_size
;
1551 i_range
< range_count
;
1554 /* get the next range of addresses to keep */
1555 range_start
= ranges
[i_range
].srr_address
;
1556 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1557 /* align them to page boundaries */
1558 range_start
= vm_map_trunc_page(range_start
);
1559 range_end
= vm_map_round_page(range_end
);
1561 /* make sure we don't go beyond the submap's boundaries */
1562 if (range_start
< submap_base
) {
1563 range_start
= submap_base
;
1564 } else if (range_start
>= submap_end
) {
1565 range_start
= submap_end
;
1567 if (range_end
< submap_base
) {
1568 range_end
= submap_base
;
1569 } else if (range_end
>= submap_end
) {
1570 range_end
= submap_end
;
1573 if (range_start
> submap_base
+ submap_offset
) {
1575 * Deallocate everything between the last offset in the
1576 * submap and the start of this range.
1578 delete_size
= range_start
-
1579 (submap_base
+ submap_offset
);
1580 (void) vm_deallocate(submap
,
1587 /* skip to the end of the range */
1588 submap_offset
+= delete_size
+ (range_end
- range_start
);
1590 if (submap_base
+ submap_offset
>= submap_end
) {
1591 /* get to next submap */
1593 if (submap
== data_submap
) {
1594 /* no other submap after data: done ! */
1598 /* get original range again */
1599 range_start
= ranges
[i_range
].srr_address
;
1600 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1601 range_start
= vm_map_trunc_page(range_start
);
1602 range_end
= vm_map_round_page(range_end
);
1604 if (range_end
> submap_end
) {
1606 * This last range overlaps with the next
1607 * submap. We need to process it again
1608 * after switching submaps. Otherwise, we'll
1609 * just continue with the next range.
1614 if (submap
== text_submap
) {
1616 * Switch to the data submap.
1618 submap
= data_submap
;
1620 submap_base
= sm_info
->client_base
+
1622 submap_end
= submap_base
+ sm_info
->data_size
;
1627 if (submap_base
+ submap_offset
< submap_end
) {
1628 /* delete remainder of this submap, from "offset" to the end */
1629 (void) vm_deallocate(submap
,
1631 submap_end
- submap_base
- submap_offset
);
1632 /* if nothing to keep in data submap, delete it all */
1633 if (submap
== text_submap
) {
1634 submap
= data_submap
;
1636 submap_base
= sm_info
->client_base
+ sm_info
->text_size
;
1637 submap_end
= submap_base
+ sm_info
->data_size
;
1638 (void) vm_deallocate(data_submap
,
1640 submap_end
- submap_base
);
1648 /* A hash lookup function for the list of loaded files in */
1649 /* shared_memory_server space. */
1651 static load_struct_t
*
1653 queue_head_t
*hash_table
,
1655 vm_offset_t recognizableOffset
,
1658 boolean_t alternate
,
1659 shared_region_task_mappings_t sm_info
)
1661 register queue_t bucket
;
1662 load_struct_t
*entry
;
1663 shared_region_mapping_t target_region
;
1666 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1667 "reg=%d alt=%d sm_info=%p\n",
1668 hash_table
, file_object
, recognizableOffset
, size
,
1669 regular
, alternate
, sm_info
));
1671 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
1672 for (entry
= (load_struct_t
*)queue_first(bucket
);
1673 !queue_end(bucket
, &entry
->links
);
1674 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1676 if ((entry
->file_object
== (int)file_object
) &&
1677 (entry
->file_offset
== recognizableOffset
)) {
1678 target_region
= (shared_region_mapping_t
)sm_info
->self
;
1679 depth
= target_region
->depth
;
1680 while(target_region
) {
1681 if((!(sm_info
->self
)) ||
1682 ((target_region
== entry
->regions_instance
) &&
1683 (target_region
->depth
>= entry
->depth
))) {
1685 entry
->base_address
>= sm_info
->alternate_base
) {
1686 LSF_DEBUG(("lsf_hash_lookup: "
1687 "alt=%d found entry %p "
1691 entry
->base_address
,
1692 sm_info
->alternate_base
));
1696 entry
->base_address
< sm_info
->alternate_base
) {
1697 LSF_DEBUG(("lsf_hash_lookup: "
1698 "reg=%d found entry %p "
1702 entry
->base_address
,
1703 sm_info
->alternate_base
));
1707 if(target_region
->object_chain
) {
1708 target_region
= (shared_region_mapping_t
)
1709 target_region
->object_chain
->object_chain_region
;
1710 depth
= target_region
->object_chain
->depth
;
1712 target_region
= NULL
;
1718 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1719 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1720 hash_table
, file_object
, recognizableOffset
, size
,
1721 regular
, alternate
, sm_info
));
1722 return (load_struct_t
*)0;
1725 __private_extern__ load_struct_t
*
1726 lsf_remove_regions_mappings_lock(
1727 shared_region_mapping_t region
,
1728 shared_region_task_mappings_t sm_info
,
1732 register queue_t bucket
;
1733 shared_file_info_t
*shared_file_header
;
1734 load_struct_t
*entry
;
1735 load_struct_t
*next_entry
;
1737 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1739 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1741 region
, sm_info
, shared_file_header
));
1743 mutex_lock(&shared_file_header
->lock
);
1744 if(shared_file_header
->hash_init
== FALSE
) {
1746 mutex_unlock(&shared_file_header
->lock
);
1747 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1748 "(region=%p,sm_info=%p): not inited\n",
1752 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
1753 bucket
= &shared_file_header
->hash
[i
];
1754 for (entry
= (load_struct_t
*)queue_first(bucket
);
1755 !queue_end(bucket
, &entry
->links
);) {
1756 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
1757 if(region
== entry
->regions_instance
) {
1758 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1759 "entry %p region %p: "
1762 lsf_unload((void *)entry
->file_object
,
1763 entry
->base_address
, sm_info
);
1765 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1766 "entry %p region %p target region %p: "
1768 entry
, entry
->regions_instance
, region
));
1775 mutex_unlock(&shared_file_header
->lock
);
1776 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1778 return NULL
; /* XXX */
1782 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1783 * only caller. Remove this stub function and the corresponding symbol
1784 * export for Merlot.
1787 lsf_remove_regions_mappings(
1788 shared_region_mapping_t region
,
1789 shared_region_task_mappings_t sm_info
)
1791 return lsf_remove_regions_mappings_lock(region
, sm_info
, 1);
1794 /* Removes a map_list, (list of loaded extents) for a file from */
1795 /* the loaded file hash table. */
1797 static load_struct_t
*
1800 vm_offset_t base_offset
,
1801 shared_region_task_mappings_t sm_info
)
1803 register queue_t bucket
;
1804 shared_file_info_t
*shared_file_header
;
1805 load_struct_t
*entry
;
1807 LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
1808 file_object
, base_offset
, sm_info
));
1810 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1812 bucket
= &shared_file_header
->hash
1813 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
1815 for (entry
= (load_struct_t
*)queue_first(bucket
);
1816 !queue_end(bucket
, &entry
->links
);
1817 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1818 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
1819 sm_info
->self
== entry
->regions_instance
)) {
1820 if ((entry
->file_object
== (int) file_object
) &&
1821 (entry
->base_address
== base_offset
)) {
1822 queue_remove(bucket
, entry
,
1823 load_struct_ptr_t
, links
);
1824 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1830 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1831 return (load_struct_t
*)0;
1834 /* Inserts a new map_list, (list of loaded file extents), into the */
1835 /* server loaded file hash table. */
1839 load_struct_t
*entry
,
1840 shared_region_task_mappings_t sm_info
)
1842 shared_file_info_t
*shared_file_header
;
1844 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1845 entry
, sm_info
, entry
->file_object
, entry
->base_address
));
1847 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1848 queue_enter(&shared_file_header
->hash
1849 [load_file_hash(entry
->file_object
,
1850 shared_file_header
->hash_size
)],
1851 entry
, load_struct_ptr_t
, links
);
1854 /* Looks up the file type requested. If already loaded and the */
1855 /* file extents are an exact match, returns Success. If not */
1856 /* loaded attempts to load the file extents at the given offsets */
1857 /* if any extent fails to load or if the file was already loaded */
1858 /* in a different configuration, lsf_load fails. */
1860 static kern_return_t
1862 vm_offset_t mapped_file
,
1863 vm_size_t mapped_file_size
,
1864 vm_offset_t
*base_address
,
1865 sf_mapping_t
*mappings
,
1869 shared_region_task_mappings_t sm_info
)
1872 load_struct_t
*entry
;
1873 vm_map_copy_t copy_object
;
1874 loaded_mapping_t
*file_mapping
;
1875 loaded_mapping_t
**tptr
;
1877 ipc_port_t local_map
;
1878 vm_offset_t original_alt_load_next
;
1879 vm_offset_t alternate_load_next
;
1881 LSF_DEBUG(("lsf_load"
1882 "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p)"
1884 mapped_file_size
, *base_address
, map_cnt
, file_object
,
1886 entry
= (load_struct_t
*)zalloc(lsf_zone
);
1887 LSF_ALLOC_DEBUG(("lsf_load: entry=%p map_cnt=%d\n", entry
, map_cnt
));
1888 LSF_DEBUG(("lsf_load"
1889 "(size=0x%x,base=0x%x,cnt=%d,file=%p,flags=%d,sm_info=%p) "
1891 mapped_file_size
, *base_address
, map_cnt
, file_object
,
1892 flags
, sm_info
, entry
));
1893 if (entry
== NULL
) {
1894 printf("lsf_load: unable to allocate memory\n");
1895 return KERN_NO_SPACE
;
1898 shared_file_available_hash_ele
--;
1899 entry
->file_object
= (int)file_object
;
1900 entry
->mapping_cnt
= map_cnt
;
1901 entry
->mappings
= NULL
;
1902 entry
->links
.prev
= (queue_entry_t
) 0;
1903 entry
->links
.next
= (queue_entry_t
) 0;
1904 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
1905 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
1906 entry
->file_offset
= mappings
[0].file_offset
;
1908 lsf_hash_insert(entry
, sm_info
);
1909 tptr
= &(entry
->mappings
);
1912 alternate_load_next
= sm_info
->alternate_next
;
1913 original_alt_load_next
= alternate_load_next
;
1914 if (flags
& ALTERNATE_LOAD_SITE
) {
1915 vm_offset_t max_loadfile_offset
;
1917 *base_address
= ((*base_address
) & ~SHARED_TEXT_REGION_MASK
) +
1918 sm_info
->alternate_next
;
1919 max_loadfile_offset
= 0;
1920 for(i
= 0; i
<map_cnt
; i
++) {
1921 if(((mappings
[i
].mapping_offset
1922 & SHARED_TEXT_REGION_MASK
)+ mappings
[i
].size
) >
1923 max_loadfile_offset
) {
1924 max_loadfile_offset
=
1925 (mappings
[i
].mapping_offset
1926 & SHARED_TEXT_REGION_MASK
)
1930 if((alternate_load_next
+ round_page(max_loadfile_offset
)) >=
1931 (sm_info
->data_size
- (sm_info
->data_size
>>9))) {
1932 entry
->base_address
=
1933 (*base_address
) & SHARED_TEXT_REGION_MASK
;
1934 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1936 return KERN_NO_SPACE
;
1938 alternate_load_next
+= round_page(max_loadfile_offset
);
1941 if (((*base_address
) & SHARED_TEXT_REGION_MASK
) >
1942 sm_info
->alternate_base
) {
1943 entry
->base_address
=
1944 (*base_address
) & SHARED_TEXT_REGION_MASK
;
1945 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1946 return KERN_INVALID_ARGUMENT
;
1950 entry
->base_address
= (*base_address
) & SHARED_TEXT_REGION_MASK
;
1952 // Sanity check the mappings -- make sure we don't stray across the
1953 // alternate boundary. If any bit of a library that we're not trying
1954 // to load in the alternate load space strays across that boundary,
1955 // return KERN_INVALID_ARGUMENT immediately so that the caller can
1956 // try to load it in the alternate shared area. We do this to avoid
1957 // a nasty case: if a library tries to load so that it crosses the
1958 // boundary, it'll occupy a bit of the alternate load area without
1959 // the kernel being aware. When loads into the alternate load area
1960 // at the first free address are tried, the load will fail.
1961 // Thus, a single library straddling the boundary causes all sliding
1962 // libraries to fail to load. This check will avoid such a case.
1964 if (!(flags
& ALTERNATE_LOAD_SITE
)) {
1965 for (i
= 0; i
<map_cnt
;i
++) {
1966 vm_offset_t region_mask
;
1967 vm_address_t region_start
;
1968 vm_address_t region_end
;
1970 if ((mappings
[i
].protection
& VM_PROT_WRITE
) == 0) {
1971 // mapping offsets are relative to start of shared segments.
1972 region_mask
= SHARED_TEXT_REGION_MASK
;
1973 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
1974 region_end
= (mappings
[i
].size
+ region_start
);
1975 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
1976 // No library is permitted to load so any bit of it is in the
1977 // shared alternate space. If they want it loaded, they can put
1978 // it in the alternate space explicitly.
1979 printf("Library trying to load across alternate shared region boundary -- denied!\n");
1980 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1981 return KERN_INVALID_ARGUMENT
;
1985 region_mask
= SHARED_DATA_REGION_MASK
;
1986 region_start
= (mappings
[i
].mapping_offset
& region_mask
)+entry
->base_address
;
1987 region_end
= (mappings
[i
].size
+ region_start
);
1988 if (region_end
>= SHARED_ALTERNATE_LOAD_BASE
) {
1989 printf("Library trying to load across alternate shared region boundary-- denied!\n");
1990 lsf_unload(file_object
, entry
->base_address
, sm_info
);
1991 return KERN_INVALID_ARGUMENT
;
1995 } // if not alternate load site.
1997 /* copyin mapped file data */
1998 for(i
= 0; i
<map_cnt
; i
++) {
1999 vm_offset_t target_address
;
2000 vm_offset_t region_mask
;
2002 if(mappings
[i
].protection
& VM_PROT_COW
) {
2003 local_map
= (ipc_port_t
)sm_info
->data_region
;
2004 region_mask
= SHARED_DATA_REGION_MASK
;
2005 if((mappings
[i
].mapping_offset
2006 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) {
2007 lsf_unload(file_object
,
2008 entry
->base_address
, sm_info
);
2009 return KERN_INVALID_ARGUMENT
;
2012 region_mask
= SHARED_TEXT_REGION_MASK
;
2013 local_map
= (ipc_port_t
)sm_info
->text_region
;
2014 if(mappings
[i
].mapping_offset
2015 & GLOBAL_SHARED_SEGMENT_MASK
) {
2016 lsf_unload(file_object
,
2017 entry
->base_address
, sm_info
);
2018 return KERN_INVALID_ARGUMENT
;
2021 if(!(mappings
[i
].protection
& VM_PROT_ZF
)
2022 && ((mapped_file
+ mappings
[i
].file_offset
+
2024 (mapped_file
+ mapped_file_size
))) {
2025 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2026 return KERN_INVALID_ARGUMENT
;
2028 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
2029 + entry
->base_address
;
2030 if(vm_allocate(((vm_named_entry_t
)local_map
->ip_kobject
)
2031 ->backing
.map
, &target_address
,
2032 mappings
[i
].size
, VM_FLAGS_FIXED
)) {
2033 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2034 return KERN_FAILURE
;
2036 target_address
= ((mappings
[i
].mapping_offset
) & region_mask
)
2037 + entry
->base_address
;
2038 if(!(mappings
[i
].protection
& VM_PROT_ZF
)) {
2039 if(vm_map_copyin(current_map(),
2040 (vm_map_address_t
)(mapped_file
+ mappings
[i
].file_offset
),
2041 vm_map_round_page(mappings
[i
].size
), FALSE
, ©_object
)) {
2042 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
2043 ->backing
.map
, target_address
, mappings
[i
].size
);
2044 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2045 return KERN_FAILURE
;
2047 if(vm_map_copy_overwrite(((vm_named_entry_t
)
2048 local_map
->ip_kobject
)->backing
.map
,
2049 (vm_map_address_t
)target_address
,
2050 copy_object
, FALSE
)) {
2051 vm_deallocate(((vm_named_entry_t
)local_map
->ip_kobject
)
2052 ->backing
.map
, target_address
, mappings
[i
].size
);
2053 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2054 return KERN_FAILURE
;
2058 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
2059 if (file_mapping
== NULL
) {
2060 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2061 printf("lsf_load: unable to allocate memory\n");
2062 return KERN_NO_SPACE
;
2064 shared_file_available_hash_ele
--;
2065 file_mapping
->mapping_offset
= (mappings
[i
].mapping_offset
)
2067 file_mapping
->size
= mappings
[i
].size
;
2068 file_mapping
->file_offset
= mappings
[i
].file_offset
;
2069 file_mapping
->protection
= mappings
[i
].protection
;
2070 file_mapping
->next
= NULL
;
2071 LSF_DEBUG(("lsf_load: file_mapping %p "
2072 "for offset=0x%x size=0x%x\n",
2073 file_mapping
, file_mapping
->mapping_offset
,
2074 file_mapping
->size
));
2076 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
2077 ->backing
.map
, target_address
,
2078 round_page(target_address
+ mappings
[i
].size
),
2079 (mappings
[i
].protection
&
2080 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
2082 vm_map_protect(((vm_named_entry_t
)local_map
->ip_kobject
)
2083 ->backing
.map
, target_address
,
2084 round_page(target_address
+ mappings
[i
].size
),
2085 (mappings
[i
].protection
&
2086 (VM_PROT_READ
| VM_PROT_EXECUTE
)),
2089 *tptr
= file_mapping
;
2090 tptr
= &(file_mapping
->next
);
2092 shared_region_mapping_set_alt_next(
2093 (shared_region_mapping_t
) sm_info
->self
,
2094 alternate_load_next
);
2095 LSF_DEBUG(("lsf_load: done\n"));
2096 return KERN_SUCCESS
;
2103 * Look in the shared region, starting from the end, for a place to fit all the
2104 * mappings while respecting their relative offsets.
2106 static kern_return_t
2108 unsigned int map_cnt
,
2109 struct shared_file_mapping_np
*mappings_in
,
2110 shared_region_task_mappings_t sm_info
,
2111 mach_vm_offset_t
*base_offset_p
)
2113 mach_vm_offset_t max_mapping_offset
;
2115 vm_map_entry_t map_entry
, prev_entry
, next_entry
;
2116 mach_vm_offset_t prev_hole_start
, prev_hole_end
;
2117 mach_vm_offset_t mapping_offset
, mapping_end_offset
;
2118 mach_vm_offset_t base_offset
;
2119 mach_vm_size_t mapping_size
;
2120 mach_vm_offset_t wiggle_room
, wiggle
;
2121 vm_map_t text_map
, data_map
, map
;
2122 vm_named_entry_t region_entry
;
2123 ipc_port_t region_handle
;
2126 struct shared_file_mapping_np
*mappings
, tmp_mapping
;
2127 unsigned int sort_index
, sorted_index
;
2128 vm_map_offset_t sort_min_address
;
2129 unsigned int sort_min_index
;
2132 * Sort the mappings array, so that we can try and fit them in
2133 * in the right order as we progress along the VM maps.
2135 * We can't modify the original array (the original order is
2136 * important when doing lookups of the mappings), so copy it first.
2139 kr
= kmem_alloc(kernel_map
,
2140 (vm_offset_t
*) &mappings
,
2141 (vm_size_t
) (map_cnt
* sizeof (mappings
[0])));
2142 if (kr
!= KERN_SUCCESS
) {
2143 return KERN_NO_SPACE
;
2146 bcopy(mappings_in
, mappings
, map_cnt
* sizeof (mappings
[0]));
2148 max_mapping_offset
= 0;
2149 for (sorted_index
= 0;
2150 sorted_index
< map_cnt
;
2153 /* first remaining entry is our new starting point */
2154 sort_min_index
= sorted_index
;
2155 mapping_end_offset
= ((mappings
[sort_min_index
].sfm_address
&
2156 SHARED_TEXT_REGION_MASK
) +
2157 mappings
[sort_min_index
].sfm_size
);
2158 sort_min_address
= mapping_end_offset
;
2159 /* compute the highest mapping_offset as well... */
2160 if (mapping_end_offset
> max_mapping_offset
) {
2161 max_mapping_offset
= mapping_end_offset
;
2163 /* find the lowest mapping_offset in the remaining entries */
2164 for (sort_index
= sorted_index
+ 1;
2165 sort_index
< map_cnt
;
2168 mapping_end_offset
=
2169 ((mappings
[sort_index
].sfm_address
&
2170 SHARED_TEXT_REGION_MASK
) +
2171 mappings
[sort_index
].sfm_size
);
2173 if (mapping_end_offset
< sort_min_address
) {
2174 /* lowest mapping_offset so far... */
2175 sort_min_index
= sort_index
;
2176 sort_min_address
= mapping_end_offset
;
2179 if (sort_min_index
!= sorted_index
) {
2181 tmp_mapping
= mappings
[sort_min_index
];
2182 mappings
[sort_min_index
] = mappings
[sorted_index
];
2183 mappings
[sorted_index
] = tmp_mapping
;
2188 max_mapping_offset
= vm_map_round_page(max_mapping_offset
);
2190 /* start from the end of the shared area */
2191 base_offset
= sm_info
->text_size
;
2193 /* can all the mappings fit ? */
2194 if (max_mapping_offset
> base_offset
) {
2195 kmem_free(kernel_map
,
2196 (vm_offset_t
) mappings
,
2197 map_cnt
* sizeof (mappings
[0]));
2198 return KERN_FAILURE
;
2202 * Align the last mapping to the end of the submaps
2203 * and start from there.
2205 base_offset
-= max_mapping_offset
;
2207 region_handle
= (ipc_port_t
) sm_info
->text_region
;
2208 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2209 text_map
= region_entry
->backing
.map
;
2211 region_handle
= (ipc_port_t
) sm_info
->data_region
;
2212 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2213 data_map
= region_entry
->backing
.map
;
2215 vm_map_lock_read(text_map
);
2216 vm_map_lock_read(data_map
);
2220 * At first, we can wiggle all the way from our starting point
2221 * (base_offset) towards the start of the map (0), if needed.
2223 wiggle_room
= base_offset
;
2225 for (i
= (signed) map_cnt
- 1; i
>= 0; i
--) {
2226 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2227 /* copy-on-write mappings are in the data submap */
2230 /* other mappings are in the text submap */
2233 /* get the offset within the appropriate submap */
2234 mapping_offset
= (mappings
[i
].sfm_address
&
2235 SHARED_TEXT_REGION_MASK
);
2236 mapping_size
= mappings
[i
].sfm_size
;
2237 mapping_end_offset
= mapping_offset
+ mapping_size
;
2238 mapping_offset
= vm_map_trunc_page(mapping_offset
);
2239 mapping_end_offset
= vm_map_round_page(mapping_end_offset
);
2240 mapping_size
= mapping_end_offset
- mapping_offset
;
2243 if (vm_map_lookup_entry(map
,
2244 base_offset
+ mapping_offset
,
2247 * The start address for that mapping
2248 * is already mapped: no fit.
2249 * Locate the hole immediately before this map
2252 prev_hole_end
= map_entry
->vme_start
;
2253 prev_entry
= map_entry
->vme_prev
;
2254 if (prev_entry
== vm_map_to_entry(map
)) {
2255 /* no previous entry */
2256 prev_hole_start
= map
->min_offset
;
2258 /* previous entry ends here */
2259 prev_hole_start
= prev_entry
->vme_end
;
2263 * The start address for that mapping is not
2265 * Locate the start and end of the hole
2268 /* map_entry is the previous entry */
2269 if (map_entry
== vm_map_to_entry(map
)) {
2270 /* no previous entry */
2271 prev_hole_start
= map
->min_offset
;
2273 /* previous entry ends there */
2274 prev_hole_start
= map_entry
->vme_end
;
2276 next_entry
= map_entry
->vme_next
;
2277 if (next_entry
== vm_map_to_entry(map
)) {
2279 prev_hole_end
= map
->max_offset
;
2281 prev_hole_end
= next_entry
->vme_start
;
2285 if (prev_hole_end
<= base_offset
+ mapping_offset
) {
2286 /* hole is to our left: try and wiggle to fit */
2287 wiggle
= base_offset
+ mapping_offset
- prev_hole_end
+ mapping_size
;
2288 if (wiggle
> base_offset
) {
2289 /* we're getting out of the map */
2293 base_offset
-= wiggle
;
2294 if (wiggle
> wiggle_room
) {
2295 /* can't wiggle that much: start over */
2298 /* account for the wiggling done */
2299 wiggle_room
-= wiggle
;
2303 base_offset
+ mapping_offset
+ mapping_size
) {
2305 * The hole extends further to the right
2306 * than what we need. Ignore the extra space.
2308 prev_hole_end
= (base_offset
+ mapping_offset
+
2313 base_offset
+ mapping_offset
+ mapping_size
) {
2315 * The hole is not big enough to establish
2316 * the mapping right there: wiggle towards
2317 * the beginning of the hole so that the end
2318 * of our mapping fits in the hole...
2320 wiggle
= base_offset
+ mapping_offset
2321 + mapping_size
- prev_hole_end
;
2322 if (wiggle
> base_offset
) {
2323 /* we're getting out of the map */
2327 base_offset
-= wiggle
;
2328 if (wiggle
> wiggle_room
) {
2329 /* can't wiggle that much: start over */
2332 /* account for the wiggling done */
2333 wiggle_room
-= wiggle
;
2335 /* keep searching from this new base */
2339 if (prev_hole_start
> base_offset
+ mapping_offset
) {
2340 /* no hole found: keep looking */
2344 /* compute wiggling room at this hole */
2345 wiggle
= base_offset
+ mapping_offset
- prev_hole_start
;
2346 if (wiggle
< wiggle_room
) {
2347 /* less wiggle room than before... */
2348 wiggle_room
= wiggle
;
2351 /* found a hole that fits: skip to next mapping */
2353 } /* while we look for a hole */
2354 } /* for each mapping */
2356 *base_offset_p
= base_offset
;
2360 vm_map_unlock_read(text_map
);
2361 vm_map_unlock_read(data_map
);
2363 kmem_free(kernel_map
,
2364 (vm_offset_t
) mappings
,
2365 map_cnt
* sizeof (mappings
[0]));
2373 * Attempt to establish the mappings for a split library into the shared region.
2375 static kern_return_t
2377 struct shared_file_mapping_np
*mappings
,
2380 memory_object_offset_t file_size
,
2381 shared_region_task_mappings_t sm_info
,
2382 mach_vm_offset_t base_offset
,
2383 mach_vm_offset_t
*slide_p
)
2385 load_struct_t
*entry
;
2386 loaded_mapping_t
*file_mapping
;
2387 loaded_mapping_t
**tptr
;
2388 ipc_port_t region_handle
;
2389 vm_named_entry_t region_entry
;
2390 mach_port_t map_port
;
2391 vm_object_t file_object
;
2394 mach_vm_offset_t original_base_offset
;
2396 /* get the VM object from the file's memory object handle */
2397 file_object
= memory_object_control_to_vm_object(file_control
);
2399 original_base_offset
= base_offset
;
2401 LSF_DEBUG(("lsf_map"
2402 "(cnt=%d,file=%p,sm_info=%p)"
2404 map_cnt
, file_object
,
2407 restart_after_slide
:
2408 /* get a new "load_struct_t" to described the mappings for that file */
2409 entry
= (load_struct_t
*)zalloc(lsf_zone
);
2410 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry
, map_cnt
));
2411 LSF_DEBUG(("lsf_map"
2412 "(cnt=%d,file=%p,sm_info=%p) "
2414 map_cnt
, file_object
,
2416 if (entry
== NULL
) {
2417 printf("lsf_map: unable to allocate memory\n");
2418 return KERN_NO_SPACE
;
2420 shared_file_available_hash_ele
--;
2421 entry
->file_object
= (int)file_object
;
2422 entry
->mapping_cnt
= map_cnt
;
2423 entry
->mappings
= NULL
;
2424 entry
->links
.prev
= (queue_entry_t
) 0;
2425 entry
->links
.next
= (queue_entry_t
) 0;
2426 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
2427 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
2428 entry
->file_offset
= mappings
[0].sfm_file_offset
;
2430 /* insert the new file entry in the hash table, for later lookups */
2431 lsf_hash_insert(entry
, sm_info
);
2433 /* where we should add the next mapping description for that file */
2434 tptr
= &(entry
->mappings
);
2436 entry
->base_address
= base_offset
;
2439 /* establish each requested mapping */
2440 for (i
= 0; i
< map_cnt
; i
++) {
2441 mach_vm_offset_t target_address
;
2442 mach_vm_offset_t region_mask
;
2444 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2445 region_handle
= (ipc_port_t
)sm_info
->data_region
;
2446 region_mask
= SHARED_DATA_REGION_MASK
;
2447 if ((((mappings
[i
].sfm_address
+ base_offset
)
2448 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) ||
2449 (((mappings
[i
].sfm_address
+ base_offset
+
2450 mappings
[i
].sfm_size
- 1)
2451 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000)) {
2452 lsf_unload(file_object
,
2453 entry
->base_address
, sm_info
);
2454 return KERN_INVALID_ARGUMENT
;
2457 region_mask
= SHARED_TEXT_REGION_MASK
;
2458 region_handle
= (ipc_port_t
)sm_info
->text_region
;
2459 if (((mappings
[i
].sfm_address
+ base_offset
)
2460 & GLOBAL_SHARED_SEGMENT_MASK
) ||
2461 ((mappings
[i
].sfm_address
+ base_offset
+
2462 mappings
[i
].sfm_size
- 1)
2463 & GLOBAL_SHARED_SEGMENT_MASK
)) {
2464 lsf_unload(file_object
,
2465 entry
->base_address
, sm_info
);
2466 return KERN_INVALID_ARGUMENT
;
2469 if (!(mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) &&
2470 ((mappings
[i
].sfm_file_offset
+ mappings
[i
].sfm_size
) >
2472 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2473 return KERN_INVALID_ARGUMENT
;
2475 target_address
= entry
->base_address
+
2476 ((mappings
[i
].sfm_address
) & region_mask
);
2477 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
2478 map_port
= MACH_PORT_NULL
;
2480 map_port
= (ipc_port_t
) file_object
->pager
;
2482 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2484 if (mach_vm_map(region_entry
->backing
.map
,
2486 vm_map_round_page(mappings
[i
].sfm_size
),
2490 mappings
[i
].sfm_file_offset
,
2492 (mappings
[i
].sfm_init_prot
&
2493 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2494 (mappings
[i
].sfm_max_prot
&
2495 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2496 VM_INHERIT_DEFAULT
) != KERN_SUCCESS
) {
2497 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2499 if (slide_p
!= NULL
) {
2501 * Requested mapping failed but the caller
2502 * is OK with sliding the library in the
2503 * shared region, so let's try and slide it...
2506 /* lookup an appropriate spot */
2507 kr
= lsf_slide(map_cnt
, mappings
,
2508 sm_info
, &base_offset
);
2509 if (kr
== KERN_SUCCESS
) {
2510 /* try and map it there ... */
2511 entry
->base_address
= base_offset
;
2512 goto restart_after_slide
;
2514 /* couldn't slide ... */
2517 return KERN_FAILURE
;
2520 /* record this mapping */
2521 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
2522 if (file_mapping
== NULL
) {
2523 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2524 printf("lsf_map: unable to allocate memory\n");
2525 return KERN_NO_SPACE
;
2527 shared_file_available_hash_ele
--;
2528 file_mapping
->mapping_offset
= (mappings
[i
].sfm_address
)
2530 file_mapping
->size
= mappings
[i
].sfm_size
;
2531 file_mapping
->file_offset
= mappings
[i
].sfm_file_offset
;
2532 file_mapping
->protection
= mappings
[i
].sfm_init_prot
;
2533 file_mapping
->next
= NULL
;
2534 LSF_DEBUG(("lsf_map: file_mapping %p "
2535 "for offset=0x%x size=0x%x\n",
2536 file_mapping
, file_mapping
->mapping_offset
,
2537 file_mapping
->size
));
2539 /* and link it to the file entry */
2540 *tptr
= file_mapping
;
2542 /* where to put the next mapping's description */
2543 tptr
= &(file_mapping
->next
);
2546 if (slide_p
!= NULL
) {
2547 *slide_p
= base_offset
- original_base_offset
;
2550 if (sm_info
->flags
& SHARED_REGION_STANDALONE
) {
2552 * We have a standalone and private shared region, so we
2553 * don't really need to keep the information about each file
2554 * and each mapping. Just deallocate it all.
2555 * XXX we still have the hash table, though...
2557 lsf_deallocate(file_object
, entry
->base_address
, sm_info
,
2561 LSF_DEBUG(("lsf_map: done\n"));
2562 return KERN_SUCCESS
;
2566 /* finds the file_object extent list in the shared memory hash table */
2567 /* If one is found the associated extents in shared memory are deallocated */
2568 /* and the extent list is freed */
2573 vm_offset_t base_offset
,
2574 shared_region_task_mappings_t sm_info
)
2576 lsf_deallocate(file_object
, base_offset
, sm_info
, TRUE
);
2582 * Deallocates all the "shared region" internal data structures describing
2583 * the file and its mappings.
2584 * Also deallocate the actual file mappings if requested ("unload" arg).
2589 vm_offset_t base_offset
,
2590 shared_region_task_mappings_t sm_info
,
2593 load_struct_t
*entry
;
2594 loaded_mapping_t
*map_ele
;
2595 loaded_mapping_t
*back_ptr
;
2597 LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2598 file_object
, base_offset
, sm_info
, unload
));
2599 entry
= lsf_hash_delete(file_object
, base_offset
, sm_info
);
2601 map_ele
= entry
->mappings
;
2602 while(map_ele
!= NULL
) {
2604 ipc_port_t region_handle
;
2605 vm_named_entry_t region_entry
;
2607 if(map_ele
->protection
& VM_PROT_COW
) {
2608 region_handle
= (ipc_port_t
)
2609 sm_info
->data_region
;
2611 region_handle
= (ipc_port_t
)
2612 sm_info
->text_region
;
2614 region_entry
= (vm_named_entry_t
)
2615 region_handle
->ip_kobject
;
2617 vm_deallocate(region_entry
->backing
.map
,
2618 (entry
->base_address
+
2619 map_ele
->mapping_offset
),
2623 map_ele
= map_ele
->next
;
2624 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2625 "offset 0x%x size 0x%x\n",
2626 back_ptr
, back_ptr
->mapping_offset
,
2628 zfree(lsf_zone
, back_ptr
);
2629 shared_file_available_hash_ele
++;
2631 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry
));
2632 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry
));
2633 zfree(lsf_zone
, entry
);
2634 shared_file_available_hash_ele
++;
2636 LSF_DEBUG(("lsf_unload: done\n"));
2639 /* integer is from 1 to 100 and represents percent full */
2641 lsf_mapping_pool_gauge(void)
2643 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;