2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
32 * File: vm/vm_shared_memory_server.c
33 * Author: Chris Youngworth
35 * Support routines for an in-kernel shared memory allocator
40 #include <mach/mach_types.h>
41 #include <mach/kern_return.h>
42 #include <mach/vm_inherit.h>
43 #include <mach/vm_map.h>
44 #include <machine/cpu_capabilities.h>
46 #include <kern/kern_types.h>
47 #include <kern/ipc_kobject.h>
48 #include <kern/thread.h>
49 #include <kern/zalloc.h>
50 #include <kern/kalloc.h>
52 #include <ipc/ipc_types.h>
53 #include <ipc/ipc_port.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
59 #include <mach/mach_vm.h>
60 #include <mach/shared_memory_server.h>
61 #include <vm/vm_shared_memory_server.h>
65 int lsf_alloc_debug
= 0;
66 #define LSF_DEBUG(args) \
72 #define LSF_ALLOC_DEBUG(args) \
74 if (lsf_alloc_debug) { \
79 #define LSF_DEBUG(args)
80 #define LSF_ALLOC_DEBUG(args)
83 /* forward declarations */
85 shared_region_object_create(
87 ipc_port_t
*object_handle
);
90 shared_region_mapping_dealloc_lock(
91 shared_region_mapping_t shared_region
,
98 ipc_port_t
*text_region_handle
,
99 vm_size_t text_region_size
,
100 ipc_port_t
*data_region_handle
,
101 vm_size_t data_region_size
,
102 vm_offset_t
*file_mapping_array
);
105 shared_file_header_init(
106 shared_file_info_t
*shared_file_header
);
108 static load_struct_t
*
110 queue_head_t
*hash_table
,
112 vm_offset_t recognizableOffset
,
116 shared_region_task_mappings_t sm_info
);
118 static load_struct_t
*
121 vm_offset_t base_offset
,
122 shared_region_task_mappings_t sm_info
);
126 load_struct_t
*entry
,
127 shared_region_task_mappings_t sm_info
);
131 unsigned int map_cnt
,
132 struct shared_file_mapping_np
*mappings
,
133 shared_region_task_mappings_t sm_info
,
134 mach_vm_offset_t
*base_offset_p
);
138 struct shared_file_mapping_np
*mappings
,
141 memory_object_size_t file_size
,
142 shared_region_task_mappings_t sm_info
,
143 mach_vm_offset_t base_offset
,
144 mach_vm_offset_t
*slide_p
);
149 vm_offset_t base_offset
,
150 shared_region_task_mappings_t sm_info
);
155 vm_offset_t base_offset
,
156 shared_region_task_mappings_t sm_info
,
160 #define load_file_hash(file_object, size) \
161 ((((natural_t)file_object) & 0xffffff) % size)
164 vm_offset_t shared_file_text_region
;
165 vm_offset_t shared_file_data_region
;
167 ipc_port_t shared_text_region_handle
;
168 ipc_port_t shared_data_region_handle
;
169 vm_offset_t shared_file_mapping_array
= 0;
171 shared_region_mapping_t default_environment_shared_regions
= NULL
;
172 static decl_mutex_data(,default_regions_list_lock_data
)
174 #define default_regions_list_lock() \
175 mutex_lock(&default_regions_list_lock_data)
176 #define default_regions_list_lock_try() \
177 mutex_try(&default_regions_list_lock_data)
178 #define default_regions_list_unlock() \
179 mutex_unlock(&default_regions_list_lock_data)
182 ipc_port_t sfma_handle
= NULL
;
185 int shared_file_available_hash_ele
;
187 /* com region support */
188 ipc_port_t com_region_handle32
= NULL
;
189 ipc_port_t com_region_handle64
= NULL
;
190 vm_map_t com_region_map32
= NULL
;
191 vm_map_t com_region_map64
= NULL
;
192 vm_size_t com_region_size
= _COMM_PAGE_AREA_LENGTH
;
193 shared_region_mapping_t com_mapping_resource
= NULL
;
197 int shared_region_debug
= 0;
202 vm_get_shared_region(
204 shared_region_mapping_t
*shared_region
)
206 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
207 if (*shared_region
) {
208 assert((*shared_region
)->ref_count
> 0);
210 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
211 task
, *shared_region
));
216 vm_set_shared_region(
218 shared_region_mapping_t shared_region
)
220 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
221 "shared_region=%p)\n",
222 task
, shared_region
));
224 assert(shared_region
->ref_count
> 0);
226 task
->system_shared_region
= shared_region
;
231 * shared_region_object_chain_detach:
233 * Mark the shared region as being detached or standalone. This means
234 * that we won't keep track of which file is mapped and how, for this shared
235 * region. And we don't have a "shadow" shared region.
236 * This is used when we clone a private shared region and we intend to remove
237 * some mappings from it. It won't need to maintain mappings info because it's
238 * now private. It can't have a "shadow" shared region because we don't want
239 * to see the shadow of the mappings we're about to remove.
242 shared_region_object_chain_detached(
243 shared_region_mapping_t target_region
)
245 shared_region_mapping_lock(target_region
);
246 target_region
->flags
|= SHARED_REGION_STANDALONE
;
247 shared_region_mapping_unlock(target_region
);
251 * shared_region_object_chain_attach:
253 * Link "target_region" to "object_chain_region". "object_chain_region"
254 * is treated as a shadow of "target_region" for the purpose of looking up
255 * mappings. Since the "target_region" preserves all the mappings of the
256 * older "object_chain_region", we won't duplicate all the mappings info and
257 * we'll just lookup the next region in the "object_chain" if we can't find
258 * what we're looking for in the "target_region". See lsf_hash_lookup().
261 shared_region_object_chain_attach(
262 shared_region_mapping_t target_region
,
263 shared_region_mapping_t object_chain_region
)
265 shared_region_object_chain_t object_ele
;
267 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
268 "target_region=%p, object_chain_region=%p\n",
269 target_region
, object_chain_region
));
270 assert(target_region
->ref_count
> 0);
271 assert(object_chain_region
->ref_count
> 0);
272 if(target_region
->object_chain
)
274 object_ele
= (shared_region_object_chain_t
)
275 kalloc(sizeof (struct shared_region_object_chain
));
276 shared_region_mapping_lock(object_chain_region
);
277 target_region
->object_chain
= object_ele
;
278 object_ele
->object_chain_region
= object_chain_region
;
279 object_ele
->next
= object_chain_region
->object_chain
;
280 object_ele
->depth
= object_chain_region
->depth
;
281 object_chain_region
->depth
++;
282 target_region
->alternate_next
= object_chain_region
->alternate_next
;
283 shared_region_mapping_unlock(object_chain_region
);
287 /* LP64todo - need 64-bit safe version */
289 shared_region_mapping_create(
290 ipc_port_t text_region
,
292 ipc_port_t data_region
,
294 vm_offset_t region_mappings
,
295 vm_offset_t client_base
,
296 shared_region_mapping_t
*shared_region
,
297 vm_offset_t alt_base
,
298 vm_offset_t alt_next
)
300 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
301 *shared_region
= (shared_region_mapping_t
)
302 kalloc(sizeof (struct shared_region_mapping
));
303 if(*shared_region
== NULL
) {
304 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
308 shared_region_mapping_lock_init((*shared_region
));
309 (*shared_region
)->text_region
= text_region
;
310 (*shared_region
)->text_size
= text_size
;
311 (*shared_region
)->fs_base
= ENV_DEFAULT_ROOT
;
312 (*shared_region
)->system
= cpu_type();
313 (*shared_region
)->data_region
= data_region
;
314 (*shared_region
)->data_size
= data_size
;
315 (*shared_region
)->region_mappings
= region_mappings
;
316 (*shared_region
)->client_base
= client_base
;
317 (*shared_region
)->ref_count
= 1;
318 (*shared_region
)->next
= NULL
;
319 (*shared_region
)->object_chain
= NULL
;
320 (*shared_region
)->self
= *shared_region
;
321 (*shared_region
)->flags
= 0;
322 (*shared_region
)->depth
= 0;
323 (*shared_region
)->default_env_list
= NULL
;
324 (*shared_region
)->alternate_base
= alt_base
;
325 (*shared_region
)->alternate_next
= alt_next
;
326 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
331 /* LP64todo - need 64-bit safe version */
333 shared_region_mapping_info(
334 shared_region_mapping_t shared_region
,
335 ipc_port_t
*text_region
,
336 vm_size_t
*text_size
,
337 ipc_port_t
*data_region
,
338 vm_size_t
*data_size
,
339 vm_offset_t
*region_mappings
,
340 vm_offset_t
*client_base
,
341 vm_offset_t
*alt_base
,
342 vm_offset_t
*alt_next
,
343 unsigned int *fs_base
,
344 unsigned int *system
,
346 shared_region_mapping_t
*next
)
348 shared_region_mapping_lock(shared_region
);
350 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
352 assert(shared_region
->ref_count
> 0);
353 *text_region
= shared_region
->text_region
;
354 *text_size
= shared_region
->text_size
;
355 *data_region
= shared_region
->data_region
;
356 *data_size
= shared_region
->data_size
;
357 *region_mappings
= shared_region
->region_mappings
;
358 *client_base
= shared_region
->client_base
;
359 *alt_base
= shared_region
->alternate_base
;
360 *alt_next
= shared_region
->alternate_next
;
361 *flags
= shared_region
->flags
;
362 *fs_base
= shared_region
->fs_base
;
363 *system
= shared_region
->system
;
364 *next
= shared_region
->next
;
366 shared_region_mapping_unlock(shared_region
);
370 shared_region_mapping_ref(
371 shared_region_mapping_t shared_region
)
373 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
374 "ref_count=%d + 1\n",
376 shared_region
? shared_region
->ref_count
: 0));
377 if(shared_region
== NULL
)
379 assert(shared_region
->ref_count
> 0);
380 hw_atomic_add(&shared_region
->ref_count
, 1);
385 shared_region_mapping_dealloc_lock(
386 shared_region_mapping_t shared_region
,
390 struct shared_region_task_mappings sm_info
;
391 shared_region_mapping_t next
= NULL
;
394 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
395 "(shared_region=%p,%d,%d) ref_count=%d\n",
396 shared_region
, need_sfh_lock
, need_drl_lock
,
397 shared_region
? shared_region
->ref_count
: 0));
398 while (shared_region
) {
399 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
401 shared_region
, shared_region
->ref_count
));
402 assert(shared_region
->ref_count
> 0);
404 hw_atomic_sub(&shared_region
->ref_count
, 1)) == 0) {
405 shared_region_mapping_lock(shared_region
);
407 sm_info
.text_region
= shared_region
->text_region
;
408 sm_info
.text_size
= shared_region
->text_size
;
409 sm_info
.data_region
= shared_region
->data_region
;
410 sm_info
.data_size
= shared_region
->data_size
;
411 sm_info
.region_mappings
= shared_region
->region_mappings
;
412 sm_info
.client_base
= shared_region
->client_base
;
413 sm_info
.alternate_base
= shared_region
->alternate_base
;
414 sm_info
.alternate_next
= shared_region
->alternate_next
;
415 sm_info
.flags
= shared_region
->flags
;
416 sm_info
.self
= (vm_offset_t
)shared_region
;
418 if(shared_region
->region_mappings
) {
419 lsf_remove_regions_mappings_lock(shared_region
, &sm_info
, need_sfh_lock
);
421 if(((vm_named_entry_t
)
422 (shared_region
->text_region
->ip_kobject
))
423 ->backing
.map
->pmap
) {
424 pmap_remove(((vm_named_entry_t
)
425 (shared_region
->text_region
->ip_kobject
))
428 sm_info
.client_base
+ sm_info
.text_size
);
430 ipc_port_release_send(shared_region
->text_region
);
431 if(shared_region
->data_region
)
432 ipc_port_release_send(shared_region
->data_region
);
433 if (shared_region
->object_chain
) {
434 next
= shared_region
->object_chain
->object_chain_region
;
435 kfree(shared_region
->object_chain
,
436 sizeof (struct shared_region_object_chain
));
440 shared_region_mapping_unlock(shared_region
);
442 ("shared_region_mapping_dealloc_lock(%p): "
445 bzero((void *)shared_region
,
446 sizeof (*shared_region
)); /* FBDP debug */
448 sizeof (struct shared_region_mapping
));
449 shared_region
= next
;
451 /* Stale indicates that a system region is no */
452 /* longer in the default environment list. */
453 if((ref_count
== 1) &&
454 (shared_region
->flags
& SHARED_REGION_SYSTEM
)
455 && !(shared_region
->flags
& SHARED_REGION_STALE
)) {
457 ("shared_region_mapping_dealloc_lock"
458 "(%p): removing stale\n",
460 remove_default_shared_region_lock(shared_region
,need_sfh_lock
, need_drl_lock
);
465 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
471 * Stub function; always indicates that the lock needs to be taken in the
472 * call to lsf_remove_regions_mappings_lock().
475 shared_region_mapping_dealloc(
476 shared_region_mapping_t shared_region
)
478 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
479 "(shared_region=%p)\n",
482 assert(shared_region
->ref_count
> 0);
484 return shared_region_mapping_dealloc_lock(shared_region
, 1, 1);
489 shared_region_object_create(
491 ipc_port_t
*object_handle
)
493 vm_named_entry_t user_entry
;
494 ipc_port_t user_handle
;
499 user_entry
= (vm_named_entry_t
)
500 kalloc(sizeof (struct vm_named_entry
));
501 if(user_entry
== NULL
) {
504 named_entry_lock_init(user_entry
);
505 user_handle
= ipc_port_alloc_kernel();
508 ip_lock(user_handle
);
510 /* make a sonce right */
511 user_handle
->ip_sorights
++;
512 ip_reference(user_handle
);
514 user_handle
->ip_destination
= IP_NULL
;
515 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
516 user_handle
->ip_receiver
= ipc_space_kernel
;
518 /* make a send right */
519 user_handle
->ip_mscount
++;
520 user_handle
->ip_srights
++;
521 ip_reference(user_handle
);
523 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
524 /* nsrequest unlocks user_handle */
526 /* Create a named object based on a submap of specified size */
528 new_map
= vm_map_create(pmap_create(0), 0, size
, TRUE
);
529 user_entry
->backing
.map
= new_map
;
530 user_entry
->internal
= TRUE
;
531 user_entry
->is_sub_map
= TRUE
;
532 user_entry
->is_pager
= FALSE
;
533 user_entry
->offset
= 0;
534 user_entry
->protection
= VM_PROT_ALL
;
535 user_entry
->size
= size
;
536 user_entry
->ref_count
= 1;
538 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
540 *object_handle
= user_handle
;
544 /* called for the non-default, private branch shared region support */
545 /* system default fields for fs_base and system supported are not */
546 /* relevant as the system default flag is not set */
548 shared_file_create_system_region(
549 shared_region_mapping_t
*shared_region
)
551 ipc_port_t text_handle
;
552 ipc_port_t data_handle
;
555 vm_offset_t mapping_array
;
558 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
560 text_size
= 0x10000000;
561 data_size
= 0x10000000;
563 kret
= shared_file_init(&text_handle
,
564 text_size
, &data_handle
, data_size
, &mapping_array
);
566 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
567 "shared_file_init failed kret=0x%x\n",
571 kret
= shared_region_mapping_create(text_handle
,
572 text_size
, data_handle
, data_size
, mapping_array
,
573 GLOBAL_SHARED_TEXT_SEGMENT
, shared_region
,
574 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
576 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
577 "shared_region_mapping_create failed "
582 (*shared_region
)->flags
= 0;
583 if(com_mapping_resource
) {
584 shared_region_mapping_ref(com_mapping_resource
);
585 (*shared_region
)->next
= com_mapping_resource
;
588 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
589 "-> shared_region=%p\n",
595 * load a new default for a specified environment into the default share
596 * regions list. If a previous default exists for the envrionment specification
597 * it is returned along with its reference. It is expected that the new
598 * sytem region structure passes a reference.
601 shared_region_mapping_t
602 update_default_shared_region(
603 shared_region_mapping_t new_system_region
)
605 shared_region_mapping_t old_system_region
;
606 unsigned int fs_base
;
609 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
611 assert(new_system_region
->ref_count
> 0);
612 fs_base
= new_system_region
->fs_base
;
613 system
= new_system_region
->system
;
614 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
615 default_regions_list_lock();
616 old_system_region
= default_environment_shared_regions
;
618 if((old_system_region
!= NULL
) &&
619 (old_system_region
->fs_base
== fs_base
) &&
620 (old_system_region
->system
== system
)) {
621 new_system_region
->default_env_list
=
622 old_system_region
->default_env_list
;
623 old_system_region
->default_env_list
= NULL
;
624 default_environment_shared_regions
= new_system_region
;
625 old_system_region
->flags
|= SHARED_REGION_STALE
;
626 default_regions_list_unlock();
627 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
629 new_system_region
, old_system_region
));
630 assert(old_system_region
->ref_count
> 0);
631 return old_system_region
;
633 if (old_system_region
) {
634 while(old_system_region
->default_env_list
!= NULL
) {
635 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
636 (old_system_region
->default_env_list
->system
== system
)) {
637 shared_region_mapping_t tmp_system_region
;
640 old_system_region
->default_env_list
;
641 new_system_region
->default_env_list
=
642 tmp_system_region
->default_env_list
;
643 tmp_system_region
->default_env_list
= NULL
;
644 old_system_region
->default_env_list
=
646 old_system_region
= tmp_system_region
;
647 old_system_region
->flags
|= SHARED_REGION_STALE
;
648 default_regions_list_unlock();
649 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
650 ": old=%p stale 2\n",
653 assert(old_system_region
->ref_count
> 0);
654 return old_system_region
;
656 old_system_region
= old_system_region
->default_env_list
;
659 /* If we get here, we are at the end of the system list and we */
660 /* did not find a pre-existing entry */
661 if(old_system_region
) {
662 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
663 "adding after old=%p\n",
664 new_system_region
, old_system_region
));
665 assert(old_system_region
->ref_count
> 0);
666 old_system_region
->default_env_list
= new_system_region
;
668 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
671 default_environment_shared_regions
= new_system_region
;
673 assert(new_system_region
->ref_count
> 0);
674 default_regions_list_unlock();
679 * lookup a system_shared_region for the environment specified. If one is
680 * found, it is returned along with a reference against the structure
683 shared_region_mapping_t
684 lookup_default_shared_region(
685 unsigned int fs_base
,
688 shared_region_mapping_t system_region
;
689 default_regions_list_lock();
690 system_region
= default_environment_shared_regions
;
692 SHARED_REGION_DEBUG(("lookup_default_shared_region"
693 "(base=0x%x, system=0x%x)\n",
695 while(system_region
!= NULL
) {
696 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
697 ": system_region=%p base=0x%x system=0x%x"
699 fs_base
, system
, system_region
,
700 system_region
->fs_base
,
701 system_region
->system
,
702 system_region
->ref_count
));
703 assert(system_region
->ref_count
> 0);
704 if((system_region
->fs_base
== fs_base
) &&
705 (system_region
->system
== system
)) {
708 system_region
= system_region
->default_env_list
;
711 shared_region_mapping_ref(system_region
);
712 default_regions_list_unlock();
713 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
715 return system_region
;
719 * remove a system_region default if it appears in the default regions list.
720 * Drop a reference on removal.
723 __private_extern__
void
724 remove_default_shared_region_lock(
725 shared_region_mapping_t system_region
,
729 shared_region_mapping_t old_system_region
;
731 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
732 "(system_region=%p, %d, %d)\n",
733 system_region
, need_sfh_lock
, need_drl_lock
));
735 default_regions_list_lock();
737 old_system_region
= default_environment_shared_regions
;
739 if(old_system_region
== NULL
) {
740 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
741 "-> default_env=NULL\n",
744 default_regions_list_unlock();
749 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
751 system_region
, old_system_region
));
752 assert(old_system_region
->ref_count
> 0);
753 if (old_system_region
== system_region
) {
754 default_environment_shared_regions
755 = old_system_region
->default_env_list
;
756 old_system_region
->default_env_list
= NULL
;
757 old_system_region
->flags
|= SHARED_REGION_STALE
;
758 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
759 "old=%p ref_count=%d STALE\n",
760 system_region
, old_system_region
,
761 old_system_region
->ref_count
));
762 shared_region_mapping_dealloc_lock(old_system_region
,
766 default_regions_list_unlock();
771 while(old_system_region
->default_env_list
!= NULL
) {
772 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
773 "old=%p->default_env=%p\n",
774 system_region
, old_system_region
,
775 old_system_region
->default_env_list
));
776 assert(old_system_region
->default_env_list
->ref_count
> 0);
777 if(old_system_region
->default_env_list
== system_region
) {
778 shared_region_mapping_t dead_region
;
779 dead_region
= old_system_region
->default_env_list
;
780 old_system_region
->default_env_list
=
781 dead_region
->default_env_list
;
782 dead_region
->default_env_list
= NULL
;
783 dead_region
->flags
|= SHARED_REGION_STALE
;
785 ("remove_default_shared_region_lock(%p): "
786 "dead=%p ref_count=%d stale\n",
787 system_region
, dead_region
,
788 dead_region
->ref_count
));
789 shared_region_mapping_dealloc_lock(dead_region
,
793 default_regions_list_unlock();
797 old_system_region
= old_system_region
->default_env_list
;
800 default_regions_list_unlock();
805 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
806 * the only caller. Remove this stub function and the corresponding symbol
810 remove_default_shared_region(
811 shared_region_mapping_t system_region
)
813 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
816 assert(system_region
->ref_count
> 0);
818 remove_default_shared_region_lock(system_region
, 1, 1);
822 remove_all_shared_regions(void)
824 shared_region_mapping_t system_region
;
825 shared_region_mapping_t next_system_region
;
827 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
828 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
829 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
830 default_regions_list_lock();
831 system_region
= default_environment_shared_regions
;
833 if(system_region
== NULL
) {
834 default_regions_list_unlock();
838 while(system_region
!= NULL
) {
839 next_system_region
= system_region
->default_env_list
;
840 system_region
->default_env_list
= NULL
;
841 system_region
->flags
|= SHARED_REGION_STALE
;
842 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
843 "%p ref_count=%d stale\n",
844 system_region
, system_region
->ref_count
));
845 assert(system_region
->ref_count
> 0);
846 shared_region_mapping_dealloc_lock(system_region
, 1, 0);
847 system_region
= next_system_region
;
849 default_environment_shared_regions
= NULL
;
850 default_regions_list_unlock();
851 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
852 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
853 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
856 /* shared_com_boot_time_init initializes the common page shared data and */
857 /* text region. This region is semi independent of the split libs */
858 /* and so its policies have to be handled differently by the code that */
859 /* manipulates the mapping of shared region environments. However, */
860 /* the shared region delivery system supports both */
861 void shared_com_boot_time_init(void); /* forward */
863 shared_com_boot_time_init(void)
866 vm_named_entry_t named_entry
;
868 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
869 if(com_region_handle32
) {
870 panic("shared_com_boot_time_init: "
871 "com_region_handle32 already set\n");
873 if(com_region_handle64
) {
874 panic("shared_com_boot_time_init: "
875 "com_region_handle64 already set\n");
878 /* create com page regions, 1 each for 32 and 64-bit code */
879 if((kret
= shared_region_object_create(
881 &com_region_handle32
))) {
882 panic("shared_com_boot_time_init: "
883 "unable to create 32-bit comm page\n");
886 if((kret
= shared_region_object_create(
888 &com_region_handle64
))) {
889 panic("shared_com_boot_time_init: "
890 "unable to create 64-bit comm page\n");
894 /* now set export the underlying region/map */
895 named_entry
= (vm_named_entry_t
)com_region_handle32
->ip_kobject
;
896 com_region_map32
= named_entry
->backing
.map
;
897 named_entry
= (vm_named_entry_t
)com_region_handle64
->ip_kobject
;
898 com_region_map64
= named_entry
->backing
.map
;
900 /* wrap the com region in its own shared file mapping structure */
901 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
902 kret
= shared_region_mapping_create(com_region_handle32
,
903 com_region_size
, NULL
, 0, 0,
904 _COMM_PAGE_BASE_ADDRESS
, &com_mapping_resource
,
907 panic("shared_region_mapping_create failed for commpage");
912 shared_file_boot_time_init(
913 unsigned int fs_base
,
916 long text_region_size
;
917 long data_region_size
;
918 shared_region_mapping_t new_system_region
;
919 shared_region_mapping_t old_default_env
;
921 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
922 "(base=0x%x,system=0x%x)\n",
924 text_region_size
= 0x10000000;
925 data_region_size
= 0x10000000;
926 shared_file_init(&shared_text_region_handle
,
928 &shared_data_region_handle
,
930 &shared_file_mapping_array
);
932 shared_region_mapping_create(shared_text_region_handle
,
934 shared_data_region_handle
,
936 shared_file_mapping_array
,
937 GLOBAL_SHARED_TEXT_SEGMENT
,
939 SHARED_ALTERNATE_LOAD_BASE
,
940 SHARED_ALTERNATE_LOAD_BASE
);
942 new_system_region
->fs_base
= fs_base
;
943 new_system_region
->system
= system
;
944 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
946 /* grab an extra reference for the caller */
947 /* remember to grab before call to update */
948 shared_region_mapping_ref(new_system_region
);
949 old_default_env
= update_default_shared_region(new_system_region
);
950 /* hold an extra reference because these are the system */
951 /* shared regions. */
953 shared_region_mapping_dealloc(old_default_env
);
954 if(com_mapping_resource
== NULL
) {
955 shared_com_boot_time_init();
957 shared_region_mapping_ref(com_mapping_resource
);
958 new_system_region
->next
= com_mapping_resource
;
959 vm_set_shared_region(current_task(), new_system_region
);
960 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
965 /* called at boot time, allocates two regions, each 256 megs in size */
966 /* these regions are later mapped into task spaces, allowing them to */
967 /* share the contents of the regions. shared_file_init is part of */
968 /* a shared_memory_server which not only allocates the backing maps */
969 /* but also coordinates requests for space. */
974 ipc_port_t
*text_region_handle
,
975 vm_size_t text_region_size
,
976 ipc_port_t
*data_region_handle
,
977 vm_size_t data_region_size
,
978 vm_offset_t
*file_mapping_array
)
980 shared_file_info_t
*sf_head
;
981 vm_offset_t table_mapping_address
;
986 vm_object_t buf_object
;
987 vm_map_entry_t entry
;
992 SHARED_REGION_DEBUG(("shared_file_init()\n"));
993 /* create text and data maps/regions */
994 kret
= shared_region_object_create(
1000 kret
= shared_region_object_create(
1002 data_region_handle
);
1004 ipc_port_release_send(*text_region_handle
);
1008 data_table_size
= data_region_size
>> 9;
1009 hash_size
= data_region_size
>> 14;
1010 table_mapping_address
= data_region_size
- data_table_size
;
1012 if(shared_file_mapping_array
== 0) {
1013 vm_map_address_t map_addr
;
1014 buf_object
= vm_object_allocate(data_table_size
);
1016 if(vm_map_find_space(kernel_map
, &map_addr
,
1017 data_table_size
, 0, &entry
)
1019 panic("shared_file_init: no space");
1021 shared_file_mapping_array
= CAST_DOWN(vm_offset_t
, map_addr
);
1022 *file_mapping_array
= shared_file_mapping_array
;
1023 vm_map_unlock(kernel_map
);
1024 entry
->object
.vm_object
= buf_object
;
1027 for (b
= *file_mapping_array
, alloced
= 0;
1028 alloced
< (hash_size
+
1029 round_page(sizeof(struct sf_mapping
)));
1030 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
1031 vm_object_lock(buf_object
);
1032 p
= vm_page_alloc(buf_object
, alloced
);
1033 if (p
== VM_PAGE_NULL
) {
1034 panic("shared_file_init: no space");
1037 vm_object_unlock(buf_object
);
1038 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
1039 VM_PROT_READ
| VM_PROT_WRITE
,
1040 ((unsigned int)(p
->object
->wimg_bits
))
1046 /* initialize loaded file array */
1047 sf_head
= (shared_file_info_t
*)*file_mapping_array
;
1048 sf_head
->hash
= (queue_head_t
*)
1049 (((int)*file_mapping_array
) +
1050 sizeof(struct shared_file_info
));
1051 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
1052 mutex_init(&(sf_head
->lock
), 0);
1053 sf_head
->hash_init
= FALSE
;
1056 mach_make_memory_entry(kernel_map
, &data_table_size
,
1057 *file_mapping_array
, VM_PROT_READ
, &sfma_handle
,
1060 if (vm_map_wire(kernel_map
,
1061 vm_map_trunc_page(*file_mapping_array
),
1062 vm_map_round_page(*file_mapping_array
+
1064 round_page(sizeof(struct sf_mapping
))),
1065 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1066 panic("shared_file_init: No memory for data table");
1069 lsf_zone
= zinit(sizeof(struct load_file_ele
),
1071 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
1072 0, "load_file_server");
1074 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
1075 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
1076 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
1077 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
1079 /* initialize the global default environment lock */
1080 mutex_init(&default_regions_list_lock_data
, 0);
1083 *file_mapping_array
= shared_file_mapping_array
;
1086 kret
= vm_map(((vm_named_entry_t
)
1087 (*data_region_handle
)->ip_kobject
)->backing
.map
,
1088 &table_mapping_address
,
1090 SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
1091 sfma_handle
, 0, FALSE
,
1092 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
);
1094 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1098 static kern_return_t
1099 shared_file_header_init(
1100 shared_file_info_t
*shared_file_header
)
1102 vm_size_t hash_table_size
;
1103 vm_size_t hash_table_offset
;
1105 /* wire hash entry pool only as needed, since we are the only */
1106 /* users, we take a few liberties with the population of our */
1108 static int allocable_hash_pages
;
1109 static vm_offset_t hash_cram_address
;
1112 hash_table_size
= shared_file_header
->hash_size
1113 * sizeof (struct queue_entry
);
1114 hash_table_offset
= hash_table_size
+
1115 round_page(sizeof (struct sf_mapping
));
1116 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
1117 queue_init(&shared_file_header
->hash
[i
]);
1119 allocable_hash_pages
= (((hash_table_size
<< 5) - hash_table_offset
)
1121 hash_cram_address
= ((vm_offset_t
) shared_file_header
)
1122 + hash_table_offset
;
1123 shared_file_available_hash_ele
= 0;
1125 shared_file_header
->hash_init
= TRUE
;
1127 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
1128 int cram_pages
, cram_size
;
1130 cram_pages
= allocable_hash_pages
> 3 ?
1131 3 : allocable_hash_pages
;
1132 cram_size
= cram_pages
* PAGE_SIZE
;
1133 if (vm_map_wire(kernel_map
, hash_cram_address
,
1134 hash_cram_address
+ cram_size
,
1135 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1136 printf("shared_file_header_init: "
1137 "No memory for data table\n");
1138 return KERN_NO_SPACE
;
1140 allocable_hash_pages
-= cram_pages
;
1141 zcram(lsf_zone
, (void *) hash_cram_address
, cram_size
);
1142 shared_file_available_hash_ele
1143 += cram_size
/sizeof(struct load_file_ele
);
1144 hash_cram_address
+= cram_size
;
1147 return KERN_SUCCESS
;
1154 * Attempt to map a split library into the shared region. Check if the mappings
1155 * are already in place.
1160 struct shared_file_mapping_np
*mappings
,
1161 memory_object_control_t file_control
,
1162 memory_object_size_t file_size
,
1163 shared_region_task_mappings_t sm_info
,
1164 mach_vm_offset_t base_offset
,
1165 mach_vm_offset_t
*slide_p
)
1167 vm_object_t file_object
;
1168 shared_file_info_t
*shared_file_header
;
1169 load_struct_t
*file_entry
;
1170 loaded_mapping_t
*file_mapping
;
1173 mach_vm_offset_t slide
;
1175 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1177 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1179 mutex_lock(&shared_file_header
->lock
);
1181 /* If this is the first call to this routine, take the opportunity */
1182 /* to initialize the hash table which will be used to look-up */
1183 /* mappings based on the file object */
1185 if(shared_file_header
->hash_init
== FALSE
) {
1186 ret
= shared_file_header_init(shared_file_header
);
1187 if (ret
!= KERN_SUCCESS
) {
1188 mutex_unlock(&shared_file_header
->lock
);
1189 return KERN_NO_SPACE
;
1194 /* Find the entry in the map associated with the current mapping */
1195 /* of the file object */
1196 file_object
= memory_object_control_to_vm_object(file_control
);
1198 file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
1199 (void *) file_object
,
1200 mappings
[0].sfm_file_offset
,
1201 shared_file_header
->hash_size
,
1202 TRUE
, TRUE
, sm_info
);
1204 /* File is loaded, check the load manifest for exact match */
1205 /* we simplify by requiring that the elements be the same */
1206 /* size and in the same order rather than checking for */
1207 /* semantic equivalence. */
1210 file_mapping
= file_entry
->mappings
;
1211 while(file_mapping
!= NULL
) {
1213 mutex_unlock(&shared_file_header
->lock
);
1214 return KERN_INVALID_ARGUMENT
;
1216 if(((mappings
[i
].sfm_address
)
1217 & SHARED_DATA_REGION_MASK
) !=
1218 file_mapping
->mapping_offset
||
1219 mappings
[i
].sfm_size
!= file_mapping
->size
||
1220 mappings
[i
].sfm_file_offset
!= file_mapping
->file_offset
||
1221 mappings
[i
].sfm_init_prot
!= file_mapping
->protection
) {
1224 file_mapping
= file_mapping
->next
;
1228 mutex_unlock(&shared_file_header
->lock
);
1229 return KERN_INVALID_ARGUMENT
;
1232 slide
= file_entry
->base_address
- base_offset
;
1233 if (slide_p
!= NULL
) {
1235 * File already mapped but at different address,
1236 * and the caller is OK with the sliding.
1242 * The caller doesn't want any sliding. The file needs
1243 * to be mapped at the requested address or not mapped.
1247 * The file is already mapped but at a different
1250 * XXX should we attempt to load at
1251 * requested address too ?
1256 * The file is already mapped at the correct
1263 mutex_unlock(&shared_file_header
->lock
);
1266 /* File is not loaded, lets attempt to load it */
1267 ret
= lsf_map(mappings
, map_cnt
,
1268 (void *)file_control
,
1273 if(ret
== KERN_NO_SPACE
) {
1274 shared_region_mapping_t regions
;
1275 shared_region_mapping_t system_region
;
1276 regions
= (shared_region_mapping_t
)sm_info
->self
;
1277 regions
->flags
|= SHARED_REGION_FULL
;
1278 system_region
= lookup_default_shared_region(
1279 regions
->fs_base
, regions
->system
);
1280 if (system_region
== regions
) {
1281 shared_region_mapping_t new_system_shared_region
;
1282 shared_file_boot_time_init(
1283 regions
->fs_base
, regions
->system
);
1284 /* current task must stay with its current */
1285 /* regions, drop count on system_shared_region */
1286 /* and put back our original set */
1287 vm_get_shared_region(current_task(),
1288 &new_system_shared_region
);
1289 shared_region_mapping_dealloc_lock(
1290 new_system_shared_region
, 0, 1);
1291 vm_set_shared_region(current_task(), regions
);
1292 } else if (system_region
!= NULL
) {
1293 shared_region_mapping_dealloc_lock(
1294 system_region
, 0, 1);
1297 mutex_unlock(&shared_file_header
->lock
);
1303 * shared_region_cleanup:
1305 * Deallocates all the mappings in the shared region, except those explicitly
1306 * specified in the "ranges" set of address ranges.
1309 shared_region_cleanup(
1310 unsigned int range_count
,
1311 struct shared_region_range_np
*ranges
,
1312 shared_region_task_mappings_t sm_info
)
1315 ipc_port_t region_handle
;
1316 vm_named_entry_t region_named_entry
;
1317 vm_map_t text_submap
, data_submap
, submap
, next_submap
;
1318 unsigned int i_range
;
1319 vm_map_offset_t range_start
, range_end
;
1320 vm_map_offset_t submap_base
, submap_end
, submap_offset
;
1321 vm_map_size_t delete_size
;
1323 struct shared_region_range_np tmp_range
;
1324 unsigned int sort_index
, sorted_index
;
1325 vm_map_offset_t sort_min_address
;
1326 unsigned int sort_min_index
;
1329 * Since we want to deallocate the holes between the "ranges",
1330 * sort the array by increasing addresses.
1332 for (sorted_index
= 0;
1333 sorted_index
< range_count
;
1336 /* first remaining entry is our new starting point */
1337 sort_min_index
= sorted_index
;
1338 sort_min_address
= ranges
[sort_min_index
].srr_address
;
1340 /* find the lowest mapping_offset in the remaining entries */
1341 for (sort_index
= sorted_index
+ 1;
1342 sort_index
< range_count
;
1344 if (ranges
[sort_index
].srr_address
< sort_min_address
) {
1345 /* lowest address so far... */
1346 sort_min_index
= sort_index
;
1348 ranges
[sort_min_index
].srr_address
;
1352 if (sort_min_index
!= sorted_index
) {
1354 tmp_range
= ranges
[sort_min_index
];
1355 ranges
[sort_min_index
] = ranges
[sorted_index
];
1356 ranges
[sorted_index
] = tmp_range
;
1360 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1361 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1362 text_submap
= region_named_entry
->backing
.map
;
1364 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1365 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1366 data_submap
= region_named_entry
->backing
.map
;
1368 submap
= text_submap
;
1369 next_submap
= submap
;
1370 submap_base
= sm_info
->client_base
;
1372 submap_end
= submap_base
+ sm_info
->text_size
;
1374 i_range
< range_count
;
1377 /* get the next range of addresses to keep */
1378 range_start
= ranges
[i_range
].srr_address
;
1379 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1380 /* align them to page boundaries */
1381 range_start
= vm_map_trunc_page(range_start
);
1382 range_end
= vm_map_round_page(range_end
);
1384 /* make sure we don't go beyond the submap's boundaries */
1385 if (range_start
< submap_base
) {
1386 range_start
= submap_base
;
1387 } else if (range_start
>= submap_end
) {
1388 range_start
= submap_end
;
1390 if (range_end
< submap_base
) {
1391 range_end
= submap_base
;
1392 } else if (range_end
>= submap_end
) {
1393 range_end
= submap_end
;
1396 if (range_start
> submap_base
+ submap_offset
) {
1398 * Deallocate everything between the last offset in the
1399 * submap and the start of this range.
1401 delete_size
= range_start
-
1402 (submap_base
+ submap_offset
);
1403 (void) vm_deallocate(submap
,
1410 /* skip to the end of the range */
1411 submap_offset
+= delete_size
+ (range_end
- range_start
);
1413 if (submap_base
+ submap_offset
>= submap_end
) {
1414 /* get to next submap */
1416 if (submap
== data_submap
) {
1417 /* no other submap after data: done ! */
1421 /* get original range again */
1422 range_start
= ranges
[i_range
].srr_address
;
1423 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1424 range_start
= vm_map_trunc_page(range_start
);
1425 range_end
= vm_map_round_page(range_end
);
1427 if (range_end
> submap_end
) {
1429 * This last range overlaps with the next
1430 * submap. We need to process it again
1431 * after switching submaps. Otherwise, we'll
1432 * just continue with the next range.
1437 if (submap
== text_submap
) {
1439 * Switch to the data submap.
1441 submap
= data_submap
;
1443 submap_base
= sm_info
->client_base
+
1445 submap_end
= submap_base
+ sm_info
->data_size
;
1450 if (submap_base
+ submap_offset
< submap_end
) {
1451 /* delete remainder of this submap, from "offset" to the end */
1452 (void) vm_deallocate(submap
,
1454 submap_end
- submap_base
- submap_offset
);
1455 /* if nothing to keep in data submap, delete it all */
1456 if (submap
== text_submap
) {
1457 submap
= data_submap
;
1459 submap_base
= sm_info
->client_base
+ sm_info
->text_size
;
1460 submap_end
= submap_base
+ sm_info
->data_size
;
1461 (void) vm_deallocate(data_submap
,
1463 submap_end
- submap_base
);
1471 /* A hash lookup function for the list of loaded files in */
1472 /* shared_memory_server space. */
1474 static load_struct_t
*
1476 queue_head_t
*hash_table
,
1478 vm_offset_t recognizableOffset
,
1481 boolean_t alternate
,
1482 shared_region_task_mappings_t sm_info
)
1484 register queue_t bucket
;
1485 load_struct_t
*entry
;
1486 shared_region_mapping_t target_region
;
1489 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1490 "reg=%d alt=%d sm_info=%p\n",
1491 hash_table
, file_object
, recognizableOffset
, size
,
1492 regular
, alternate
, sm_info
));
1494 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
1495 for (entry
= (load_struct_t
*)queue_first(bucket
);
1496 !queue_end(bucket
, &entry
->links
);
1497 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1499 if ((entry
->file_object
== (int)file_object
) &&
1500 (entry
->file_offset
== recognizableOffset
)) {
1501 target_region
= (shared_region_mapping_t
)sm_info
->self
;
1502 depth
= target_region
->depth
;
1503 while(target_region
) {
1504 if((!(sm_info
->self
)) ||
1505 ((target_region
== entry
->regions_instance
) &&
1506 (target_region
->depth
>= entry
->depth
))) {
1508 entry
->base_address
>= sm_info
->alternate_base
) {
1509 LSF_DEBUG(("lsf_hash_lookup: "
1510 "alt=%d found entry %p "
1514 entry
->base_address
,
1515 sm_info
->alternate_base
));
1519 entry
->base_address
< sm_info
->alternate_base
) {
1520 LSF_DEBUG(("lsf_hash_lookup: "
1521 "reg=%d found entry %p "
1525 entry
->base_address
,
1526 sm_info
->alternate_base
));
1530 if(target_region
->object_chain
) {
1531 target_region
= (shared_region_mapping_t
)
1532 target_region
->object_chain
->object_chain_region
;
1533 depth
= target_region
->object_chain
->depth
;
1535 target_region
= NULL
;
1541 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1542 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1543 hash_table
, file_object
, recognizableOffset
, size
,
1544 regular
, alternate
, sm_info
));
1545 return (load_struct_t
*)0;
1548 __private_extern__ load_struct_t
*
1549 lsf_remove_regions_mappings_lock(
1550 shared_region_mapping_t region
,
1551 shared_region_task_mappings_t sm_info
,
1555 register queue_t bucket
;
1556 shared_file_info_t
*shared_file_header
;
1557 load_struct_t
*entry
;
1558 load_struct_t
*next_entry
;
1560 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1562 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1564 region
, sm_info
, shared_file_header
));
1566 mutex_lock(&shared_file_header
->lock
);
1567 if(shared_file_header
->hash_init
== FALSE
) {
1569 mutex_unlock(&shared_file_header
->lock
);
1570 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1571 "(region=%p,sm_info=%p): not inited\n",
1575 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
1576 bucket
= &shared_file_header
->hash
[i
];
1577 for (entry
= (load_struct_t
*)queue_first(bucket
);
1578 !queue_end(bucket
, &entry
->links
);) {
1579 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
1580 if(region
== entry
->regions_instance
) {
1581 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1582 "entry %p region %p: "
1585 lsf_unload((void *)entry
->file_object
,
1586 entry
->base_address
, sm_info
);
1588 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1589 "entry %p region %p target region %p: "
1591 entry
, entry
->regions_instance
, region
));
1598 mutex_unlock(&shared_file_header
->lock
);
1599 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1601 return NULL
; /* XXX */
1605 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1606 * only caller. Remove this stub function and the corresponding symbol
1607 * export for Merlot.
1610 lsf_remove_regions_mappings(
1611 shared_region_mapping_t region
,
1612 shared_region_task_mappings_t sm_info
)
1614 return lsf_remove_regions_mappings_lock(region
, sm_info
, 1);
1617 /* Removes a map_list, (list of loaded extents) for a file from */
1618 /* the loaded file hash table. */
1620 static load_struct_t
*
1623 vm_offset_t base_offset
,
1624 shared_region_task_mappings_t sm_info
)
1626 register queue_t bucket
;
1627 shared_file_info_t
*shared_file_header
;
1628 load_struct_t
*entry
;
1630 LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
1631 file_object
, base_offset
, sm_info
));
1633 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1635 bucket
= &shared_file_header
->hash
1636 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
1638 for (entry
= (load_struct_t
*)queue_first(bucket
);
1639 !queue_end(bucket
, &entry
->links
);
1640 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1641 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
1642 sm_info
->self
== entry
->regions_instance
)) {
1643 if ((entry
->file_object
== (int) file_object
) &&
1644 (entry
->base_address
== base_offset
)) {
1645 queue_remove(bucket
, entry
,
1646 load_struct_ptr_t
, links
);
1647 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1653 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1654 return (load_struct_t
*)0;
1657 /* Inserts a new map_list, (list of loaded file extents), into the */
1658 /* server loaded file hash table. */
1662 load_struct_t
*entry
,
1663 shared_region_task_mappings_t sm_info
)
1665 shared_file_info_t
*shared_file_header
;
1667 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1668 entry
, sm_info
, entry
->file_object
, entry
->base_address
));
1670 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1671 queue_enter(&shared_file_header
->hash
1672 [load_file_hash(entry
->file_object
,
1673 shared_file_header
->hash_size
)],
1674 entry
, load_struct_ptr_t
, links
);
1682 * Look in the shared region, starting from the end, for a place to fit all the
1683 * mappings while respecting their relative offsets.
1685 static kern_return_t
1687 unsigned int map_cnt
,
1688 struct shared_file_mapping_np
*mappings_in
,
1689 shared_region_task_mappings_t sm_info
,
1690 mach_vm_offset_t
*base_offset_p
)
1692 mach_vm_offset_t max_mapping_offset
;
1694 vm_map_entry_t map_entry
, prev_entry
, next_entry
;
1695 mach_vm_offset_t prev_hole_start
, prev_hole_end
;
1696 mach_vm_offset_t mapping_offset
, mapping_end_offset
;
1697 mach_vm_offset_t base_offset
;
1698 mach_vm_size_t mapping_size
;
1699 mach_vm_offset_t wiggle_room
, wiggle
;
1700 vm_map_t text_map
, data_map
, map
;
1701 vm_named_entry_t region_entry
;
1702 ipc_port_t region_handle
;
1705 struct shared_file_mapping_np
*mappings
, tmp_mapping
;
1706 unsigned int sort_index
, sorted_index
;
1707 vm_map_offset_t sort_min_address
;
1708 unsigned int sort_min_index
;
1711 * Sort the mappings array, so that we can try and fit them in
1712 * in the right order as we progress along the VM maps.
1714 * We can't modify the original array (the original order is
1715 * important when doing lookups of the mappings), so copy it first.
1718 kr
= kmem_alloc(kernel_map
,
1719 (vm_offset_t
*) &mappings
,
1720 (vm_size_t
) (map_cnt
* sizeof (mappings
[0])));
1721 if (kr
!= KERN_SUCCESS
) {
1722 return KERN_NO_SPACE
;
1725 bcopy(mappings_in
, mappings
, map_cnt
* sizeof (mappings
[0]));
1727 max_mapping_offset
= 0;
1728 for (sorted_index
= 0;
1729 sorted_index
< map_cnt
;
1732 /* first remaining entry is our new starting point */
1733 sort_min_index
= sorted_index
;
1734 mapping_end_offset
= ((mappings
[sort_min_index
].sfm_address
&
1735 SHARED_TEXT_REGION_MASK
) +
1736 mappings
[sort_min_index
].sfm_size
);
1737 sort_min_address
= mapping_end_offset
;
1738 /* compute the highest mapping_offset as well... */
1739 if (mapping_end_offset
> max_mapping_offset
) {
1740 max_mapping_offset
= mapping_end_offset
;
1742 /* find the lowest mapping_offset in the remaining entries */
1743 for (sort_index
= sorted_index
+ 1;
1744 sort_index
< map_cnt
;
1747 mapping_end_offset
=
1748 ((mappings
[sort_index
].sfm_address
&
1749 SHARED_TEXT_REGION_MASK
) +
1750 mappings
[sort_index
].sfm_size
);
1752 if (mapping_end_offset
< sort_min_address
) {
1753 /* lowest mapping_offset so far... */
1754 sort_min_index
= sort_index
;
1755 sort_min_address
= mapping_end_offset
;
1758 if (sort_min_index
!= sorted_index
) {
1760 tmp_mapping
= mappings
[sort_min_index
];
1761 mappings
[sort_min_index
] = mappings
[sorted_index
];
1762 mappings
[sorted_index
] = tmp_mapping
;
1767 max_mapping_offset
= vm_map_round_page(max_mapping_offset
);
1769 /* start from the end of the shared area */
1770 base_offset
= sm_info
->text_size
;
1772 /* can all the mappings fit ? */
1773 if (max_mapping_offset
> base_offset
) {
1774 kmem_free(kernel_map
,
1775 (vm_offset_t
) mappings
,
1776 map_cnt
* sizeof (mappings
[0]));
1777 return KERN_FAILURE
;
1781 * Align the last mapping to the end of the submaps
1782 * and start from there.
1784 base_offset
-= max_mapping_offset
;
1786 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1787 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1788 text_map
= region_entry
->backing
.map
;
1790 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1791 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1792 data_map
= region_entry
->backing
.map
;
1794 vm_map_lock_read(text_map
);
1795 vm_map_lock_read(data_map
);
1799 * At first, we can wiggle all the way from our starting point
1800 * (base_offset) towards the start of the map (0), if needed.
1802 wiggle_room
= base_offset
;
1804 for (i
= (signed) map_cnt
- 1; i
>= 0; i
--) {
1805 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
1806 /* copy-on-write mappings are in the data submap */
1809 /* other mappings are in the text submap */
1812 /* get the offset within the appropriate submap */
1813 mapping_offset
= (mappings
[i
].sfm_address
&
1814 SHARED_TEXT_REGION_MASK
);
1815 mapping_size
= mappings
[i
].sfm_size
;
1816 mapping_end_offset
= mapping_offset
+ mapping_size
;
1817 mapping_offset
= vm_map_trunc_page(mapping_offset
);
1818 mapping_end_offset
= vm_map_round_page(mapping_end_offset
);
1819 mapping_size
= mapping_end_offset
- mapping_offset
;
1822 if (vm_map_lookup_entry(map
,
1823 base_offset
+ mapping_offset
,
1826 * The start address for that mapping
1827 * is already mapped: no fit.
1828 * Locate the hole immediately before this map
1831 prev_hole_end
= map_entry
->vme_start
;
1832 prev_entry
= map_entry
->vme_prev
;
1833 if (prev_entry
== vm_map_to_entry(map
)) {
1834 /* no previous entry */
1835 prev_hole_start
= map
->min_offset
;
1837 /* previous entry ends here */
1838 prev_hole_start
= prev_entry
->vme_end
;
1842 * The start address for that mapping is not
1844 * Locate the start and end of the hole
1847 /* map_entry is the previous entry */
1848 if (map_entry
== vm_map_to_entry(map
)) {
1849 /* no previous entry */
1850 prev_hole_start
= map
->min_offset
;
1852 /* previous entry ends there */
1853 prev_hole_start
= map_entry
->vme_end
;
1855 next_entry
= map_entry
->vme_next
;
1856 if (next_entry
== vm_map_to_entry(map
)) {
1858 prev_hole_end
= map
->max_offset
;
1860 prev_hole_end
= next_entry
->vme_start
;
1864 if (prev_hole_end
<= base_offset
+ mapping_offset
) {
1865 /* hole is to our left: try and wiggle to fit */
1866 wiggle
= base_offset
+ mapping_offset
- prev_hole_end
+ mapping_size
;
1867 if (wiggle
> base_offset
) {
1868 /* we're getting out of the map */
1872 base_offset
-= wiggle
;
1873 if (wiggle
> wiggle_room
) {
1874 /* can't wiggle that much: start over */
1877 /* account for the wiggling done */
1878 wiggle_room
-= wiggle
;
1882 base_offset
+ mapping_offset
+ mapping_size
) {
1884 * The hole extends further to the right
1885 * than what we need. Ignore the extra space.
1887 prev_hole_end
= (base_offset
+ mapping_offset
+
1892 base_offset
+ mapping_offset
+ mapping_size
) {
1894 * The hole is not big enough to establish
1895 * the mapping right there: wiggle towards
1896 * the beginning of the hole so that the end
1897 * of our mapping fits in the hole...
1899 wiggle
= base_offset
+ mapping_offset
1900 + mapping_size
- prev_hole_end
;
1901 if (wiggle
> base_offset
) {
1902 /* we're getting out of the map */
1906 base_offset
-= wiggle
;
1907 if (wiggle
> wiggle_room
) {
1908 /* can't wiggle that much: start over */
1911 /* account for the wiggling done */
1912 wiggle_room
-= wiggle
;
1914 /* keep searching from this new base */
1918 if (prev_hole_start
> base_offset
+ mapping_offset
) {
1919 /* no hole found: keep looking */
1923 /* compute wiggling room at this hole */
1924 wiggle
= base_offset
+ mapping_offset
- prev_hole_start
;
1925 if (wiggle
< wiggle_room
) {
1926 /* less wiggle room than before... */
1927 wiggle_room
= wiggle
;
1930 /* found a hole that fits: skip to next mapping */
1932 } /* while we look for a hole */
1933 } /* for each mapping */
1935 *base_offset_p
= base_offset
;
1939 vm_map_unlock_read(text_map
);
1940 vm_map_unlock_read(data_map
);
1942 kmem_free(kernel_map
,
1943 (vm_offset_t
) mappings
,
1944 map_cnt
* sizeof (mappings
[0]));
1952 * Attempt to establish the mappings for a split library into the shared region.
1954 static kern_return_t
1956 struct shared_file_mapping_np
*mappings
,
1959 memory_object_offset_t file_size
,
1960 shared_region_task_mappings_t sm_info
,
1961 mach_vm_offset_t base_offset
,
1962 mach_vm_offset_t
*slide_p
)
1964 load_struct_t
*entry
;
1965 loaded_mapping_t
*file_mapping
;
1966 loaded_mapping_t
**tptr
;
1967 ipc_port_t region_handle
;
1968 vm_named_entry_t region_entry
;
1969 mach_port_t map_port
;
1970 vm_object_t file_object
;
1973 mach_vm_offset_t original_base_offset
;
1975 /* get the VM object from the file's memory object handle */
1976 file_object
= memory_object_control_to_vm_object(file_control
);
1978 original_base_offset
= base_offset
;
1980 LSF_DEBUG(("lsf_map"
1981 "(cnt=%d,file=%p,sm_info=%p)"
1983 map_cnt
, file_object
,
1986 restart_after_slide
:
1987 /* get a new "load_struct_t" to described the mappings for that file */
1988 entry
= (load_struct_t
*)zalloc(lsf_zone
);
1989 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry
, map_cnt
));
1990 LSF_DEBUG(("lsf_map"
1991 "(cnt=%d,file=%p,sm_info=%p) "
1993 map_cnt
, file_object
,
1995 if (entry
== NULL
) {
1996 printf("lsf_map: unable to allocate memory\n");
1997 return KERN_NO_SPACE
;
1999 shared_file_available_hash_ele
--;
2000 entry
->file_object
= (int)file_object
;
2001 entry
->mapping_cnt
= map_cnt
;
2002 entry
->mappings
= NULL
;
2003 entry
->links
.prev
= (queue_entry_t
) 0;
2004 entry
->links
.next
= (queue_entry_t
) 0;
2005 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
2006 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
2007 entry
->file_offset
= mappings
[0].sfm_file_offset
;
2009 /* insert the new file entry in the hash table, for later lookups */
2010 lsf_hash_insert(entry
, sm_info
);
2012 /* where we should add the next mapping description for that file */
2013 tptr
= &(entry
->mappings
);
2015 entry
->base_address
= base_offset
;
2018 /* establish each requested mapping */
2019 for (i
= 0; i
< map_cnt
; i
++) {
2020 mach_vm_offset_t target_address
;
2021 mach_vm_offset_t region_mask
;
2023 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2024 region_handle
= (ipc_port_t
)sm_info
->data_region
;
2025 region_mask
= SHARED_DATA_REGION_MASK
;
2026 if ((((mappings
[i
].sfm_address
+ base_offset
)
2027 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) ||
2028 (((mappings
[i
].sfm_address
+ base_offset
+
2029 mappings
[i
].sfm_size
- 1)
2030 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000)) {
2031 lsf_unload(file_object
,
2032 entry
->base_address
, sm_info
);
2033 return KERN_INVALID_ARGUMENT
;
2036 region_mask
= SHARED_TEXT_REGION_MASK
;
2037 region_handle
= (ipc_port_t
)sm_info
->text_region
;
2038 if (((mappings
[i
].sfm_address
+ base_offset
)
2039 & GLOBAL_SHARED_SEGMENT_MASK
) ||
2040 ((mappings
[i
].sfm_address
+ base_offset
+
2041 mappings
[i
].sfm_size
- 1)
2042 & GLOBAL_SHARED_SEGMENT_MASK
)) {
2043 lsf_unload(file_object
,
2044 entry
->base_address
, sm_info
);
2045 return KERN_INVALID_ARGUMENT
;
2048 if (!(mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) &&
2049 ((mappings
[i
].sfm_file_offset
+ mappings
[i
].sfm_size
) >
2051 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2052 return KERN_INVALID_ARGUMENT
;
2054 target_address
= entry
->base_address
+
2055 ((mappings
[i
].sfm_address
) & region_mask
);
2056 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
2057 map_port
= MACH_PORT_NULL
;
2059 map_port
= (ipc_port_t
) file_object
->pager
;
2061 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2063 if (mach_vm_map(region_entry
->backing
.map
,
2065 vm_map_round_page(mappings
[i
].sfm_size
),
2069 mappings
[i
].sfm_file_offset
,
2071 (mappings
[i
].sfm_init_prot
&
2072 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2073 (mappings
[i
].sfm_max_prot
&
2074 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2075 VM_INHERIT_DEFAULT
) != KERN_SUCCESS
) {
2076 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2078 if (slide_p
!= NULL
) {
2080 * Requested mapping failed but the caller
2081 * is OK with sliding the library in the
2082 * shared region, so let's try and slide it...
2085 /* lookup an appropriate spot */
2086 kr
= lsf_slide(map_cnt
, mappings
,
2087 sm_info
, &base_offset
);
2088 if (kr
== KERN_SUCCESS
) {
2089 /* try and map it there ... */
2090 entry
->base_address
= base_offset
;
2091 goto restart_after_slide
;
2093 /* couldn't slide ... */
2096 return KERN_FAILURE
;
2099 /* record this mapping */
2100 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
2101 if (file_mapping
== NULL
) {
2102 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2103 printf("lsf_map: unable to allocate memory\n");
2104 return KERN_NO_SPACE
;
2106 shared_file_available_hash_ele
--;
2107 file_mapping
->mapping_offset
= (mappings
[i
].sfm_address
)
2109 file_mapping
->size
= mappings
[i
].sfm_size
;
2110 file_mapping
->file_offset
= mappings
[i
].sfm_file_offset
;
2111 file_mapping
->protection
= mappings
[i
].sfm_init_prot
;
2112 file_mapping
->next
= NULL
;
2113 LSF_DEBUG(("lsf_map: file_mapping %p "
2114 "for offset=0x%x size=0x%x\n",
2115 file_mapping
, file_mapping
->mapping_offset
,
2116 file_mapping
->size
));
2118 /* and link it to the file entry */
2119 *tptr
= file_mapping
;
2121 /* where to put the next mapping's description */
2122 tptr
= &(file_mapping
->next
);
2125 if (slide_p
!= NULL
) {
2126 *slide_p
= base_offset
- original_base_offset
;
2129 if (sm_info
->flags
& SHARED_REGION_STANDALONE
) {
2131 * We have a standalone and private shared region, so we
2132 * don't really need to keep the information about each file
2133 * and each mapping. Just deallocate it all.
2134 * XXX we still have the hash table, though...
2136 lsf_deallocate(file_object
, entry
->base_address
, sm_info
,
2140 LSF_DEBUG(("lsf_map: done\n"));
2141 return KERN_SUCCESS
;
2145 /* finds the file_object extent list in the shared memory hash table */
2146 /* If one is found the associated extents in shared memory are deallocated */
2147 /* and the extent list is freed */
2152 vm_offset_t base_offset
,
2153 shared_region_task_mappings_t sm_info
)
2155 lsf_deallocate(file_object
, base_offset
, sm_info
, TRUE
);
2161 * Deallocates all the "shared region" internal data structures describing
2162 * the file and its mappings.
2163 * Also deallocate the actual file mappings if requested ("unload" arg).
2168 vm_offset_t base_offset
,
2169 shared_region_task_mappings_t sm_info
,
2172 load_struct_t
*entry
;
2173 loaded_mapping_t
*map_ele
;
2174 loaded_mapping_t
*back_ptr
;
2176 LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2177 file_object
, base_offset
, sm_info
, unload
));
2178 entry
= lsf_hash_delete(file_object
, base_offset
, sm_info
);
2180 map_ele
= entry
->mappings
;
2181 while(map_ele
!= NULL
) {
2183 ipc_port_t region_handle
;
2184 vm_named_entry_t region_entry
;
2186 if(map_ele
->protection
& VM_PROT_COW
) {
2187 region_handle
= (ipc_port_t
)
2188 sm_info
->data_region
;
2190 region_handle
= (ipc_port_t
)
2191 sm_info
->text_region
;
2193 region_entry
= (vm_named_entry_t
)
2194 region_handle
->ip_kobject
;
2196 vm_deallocate(region_entry
->backing
.map
,
2197 (entry
->base_address
+
2198 map_ele
->mapping_offset
),
2202 map_ele
= map_ele
->next
;
2203 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2204 "offset 0x%x size 0x%x\n",
2205 back_ptr
, back_ptr
->mapping_offset
,
2207 zfree(lsf_zone
, back_ptr
);
2208 shared_file_available_hash_ele
++;
2210 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry
));
2211 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry
));
2212 zfree(lsf_zone
, entry
);
2213 shared_file_available_hash_ele
++;
2215 LSF_DEBUG(("lsf_unload: done\n"));
2218 /* integer is from 1 to 100 and represents percent full */
2220 lsf_mapping_pool_gauge(void)
2222 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;