2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
24 * File: vm/vm_shared_memory_server.c
25 * Author: Chris Youngworth
27 * Support routines for an in-kernel shared memory allocator
32 #include <mach/mach_types.h>
33 #include <mach/kern_return.h>
34 #include <mach/vm_inherit.h>
35 #include <mach/vm_map.h>
36 #include <machine/cpu_capabilities.h>
38 #include <kern/kern_types.h>
39 #include <kern/ipc_kobject.h>
40 #include <kern/thread.h>
41 #include <kern/zalloc.h>
42 #include <kern/kalloc.h>
44 #include <ipc/ipc_types.h>
45 #include <ipc/ipc_port.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_page.h>
51 #include <mach/mach_vm.h>
52 #include <mach/shared_memory_server.h>
53 #include <vm/vm_shared_memory_server.h>
57 int lsf_alloc_debug
= 0;
58 #define LSF_DEBUG(args) \
64 #define LSF_ALLOC_DEBUG(args) \
66 if (lsf_alloc_debug) { \
71 #define LSF_DEBUG(args)
72 #define LSF_ALLOC_DEBUG(args)
75 /* forward declarations */
77 shared_region_object_create(
79 ipc_port_t
*object_handle
);
82 shared_region_mapping_dealloc_lock(
83 shared_region_mapping_t shared_region
,
90 ipc_port_t
*text_region_handle
,
91 vm_size_t text_region_size
,
92 ipc_port_t
*data_region_handle
,
93 vm_size_t data_region_size
,
94 vm_offset_t
*file_mapping_array
);
97 shared_file_header_init(
98 shared_file_info_t
*shared_file_header
);
100 static load_struct_t
*
102 queue_head_t
*hash_table
,
104 vm_offset_t recognizableOffset
,
108 shared_region_task_mappings_t sm_info
);
110 static load_struct_t
*
113 vm_offset_t base_offset
,
114 shared_region_task_mappings_t sm_info
);
118 load_struct_t
*entry
,
119 shared_region_task_mappings_t sm_info
);
123 unsigned int map_cnt
,
124 struct shared_file_mapping_np
*mappings
,
125 shared_region_task_mappings_t sm_info
,
126 mach_vm_offset_t
*base_offset_p
);
130 struct shared_file_mapping_np
*mappings
,
133 memory_object_size_t file_size
,
134 shared_region_task_mappings_t sm_info
,
135 mach_vm_offset_t base_offset
,
136 mach_vm_offset_t
*slide_p
);
141 vm_offset_t base_offset
,
142 shared_region_task_mappings_t sm_info
);
147 vm_offset_t base_offset
,
148 shared_region_task_mappings_t sm_info
,
152 #define load_file_hash(file_object, size) \
153 ((((natural_t)file_object) & 0xffffff) % size)
156 vm_offset_t shared_file_text_region
;
157 vm_offset_t shared_file_data_region
;
159 ipc_port_t shared_text_region_handle
;
160 ipc_port_t shared_data_region_handle
;
161 vm_offset_t shared_file_mapping_array
= 0;
163 shared_region_mapping_t default_environment_shared_regions
= NULL
;
164 static decl_mutex_data(,default_regions_list_lock_data
)
166 #define default_regions_list_lock() \
167 mutex_lock(&default_regions_list_lock_data)
168 #define default_regions_list_lock_try() \
169 mutex_try(&default_regions_list_lock_data)
170 #define default_regions_list_unlock() \
171 mutex_unlock(&default_regions_list_lock_data)
174 ipc_port_t sfma_handle
= NULL
;
177 int shared_file_available_hash_ele
;
179 /* com region support */
180 ipc_port_t com_region_handle32
= NULL
;
181 ipc_port_t com_region_handle64
= NULL
;
182 vm_map_t com_region_map32
= NULL
;
183 vm_map_t com_region_map64
= NULL
;
184 vm_size_t com_region_size
= _COMM_PAGE_AREA_LENGTH
;
185 shared_region_mapping_t com_mapping_resource
= NULL
;
189 int shared_region_debug
= 0;
194 vm_get_shared_region(
196 shared_region_mapping_t
*shared_region
)
198 *shared_region
= (shared_region_mapping_t
) task
->system_shared_region
;
199 if (*shared_region
) {
200 assert((*shared_region
)->ref_count
> 0);
202 SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
203 task
, *shared_region
));
208 vm_set_shared_region(
210 shared_region_mapping_t shared_region
)
212 SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
213 "shared_region=%p)\n",
214 task
, shared_region
));
216 assert(shared_region
->ref_count
> 0);
218 task
->system_shared_region
= shared_region
;
223 * shared_region_object_chain_detach:
225 * Mark the shared region as being detached or standalone. This means
226 * that we won't keep track of which file is mapped and how, for this shared
227 * region. And we don't have a "shadow" shared region.
228 * This is used when we clone a private shared region and we intend to remove
229 * some mappings from it. It won't need to maintain mappings info because it's
230 * now private. It can't have a "shadow" shared region because we don't want
231 * to see the shadow of the mappings we're about to remove.
234 shared_region_object_chain_detached(
235 shared_region_mapping_t target_region
)
237 shared_region_mapping_lock(target_region
);
238 target_region
->flags
|= SHARED_REGION_STANDALONE
;
239 shared_region_mapping_unlock(target_region
);
243 * shared_region_object_chain_attach:
245 * Link "target_region" to "object_chain_region". "object_chain_region"
246 * is treated as a shadow of "target_region" for the purpose of looking up
247 * mappings. Since the "target_region" preserves all the mappings of the
248 * older "object_chain_region", we won't duplicate all the mappings info and
249 * we'll just lookup the next region in the "object_chain" if we can't find
250 * what we're looking for in the "target_region". See lsf_hash_lookup().
253 shared_region_object_chain_attach(
254 shared_region_mapping_t target_region
,
255 shared_region_mapping_t object_chain_region
)
257 shared_region_object_chain_t object_ele
;
259 SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
260 "target_region=%p, object_chain_region=%p\n",
261 target_region
, object_chain_region
));
262 assert(target_region
->ref_count
> 0);
263 assert(object_chain_region
->ref_count
> 0);
264 if(target_region
->object_chain
)
266 object_ele
= (shared_region_object_chain_t
)
267 kalloc(sizeof (struct shared_region_object_chain
));
268 shared_region_mapping_lock(object_chain_region
);
269 target_region
->object_chain
= object_ele
;
270 object_ele
->object_chain_region
= object_chain_region
;
271 object_ele
->next
= object_chain_region
->object_chain
;
272 object_ele
->depth
= object_chain_region
->depth
;
273 object_chain_region
->depth
++;
274 target_region
->alternate_next
= object_chain_region
->alternate_next
;
275 shared_region_mapping_unlock(object_chain_region
);
279 /* LP64todo - need 64-bit safe version */
281 shared_region_mapping_create(
282 ipc_port_t text_region
,
284 ipc_port_t data_region
,
286 vm_offset_t region_mappings
,
287 vm_offset_t client_base
,
288 shared_region_mapping_t
*shared_region
,
289 vm_offset_t alt_base
,
290 vm_offset_t alt_next
)
292 SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
293 *shared_region
= (shared_region_mapping_t
)
294 kalloc(sizeof (struct shared_region_mapping
));
295 if(*shared_region
== NULL
) {
296 SHARED_REGION_DEBUG(("shared_region_mapping_create: "
300 shared_region_mapping_lock_init((*shared_region
));
301 (*shared_region
)->text_region
= text_region
;
302 (*shared_region
)->text_size
= text_size
;
303 (*shared_region
)->fs_base
= ENV_DEFAULT_ROOT
;
304 (*shared_region
)->system
= cpu_type();
305 (*shared_region
)->data_region
= data_region
;
306 (*shared_region
)->data_size
= data_size
;
307 (*shared_region
)->region_mappings
= region_mappings
;
308 (*shared_region
)->client_base
= client_base
;
309 (*shared_region
)->ref_count
= 1;
310 (*shared_region
)->next
= NULL
;
311 (*shared_region
)->object_chain
= NULL
;
312 (*shared_region
)->self
= *shared_region
;
313 (*shared_region
)->flags
= 0;
314 (*shared_region
)->depth
= 0;
315 (*shared_region
)->default_env_list
= NULL
;
316 (*shared_region
)->alternate_base
= alt_base
;
317 (*shared_region
)->alternate_next
= alt_next
;
318 SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
323 /* LP64todo - need 64-bit safe version */
325 shared_region_mapping_info(
326 shared_region_mapping_t shared_region
,
327 ipc_port_t
*text_region
,
328 vm_size_t
*text_size
,
329 ipc_port_t
*data_region
,
330 vm_size_t
*data_size
,
331 vm_offset_t
*region_mappings
,
332 vm_offset_t
*client_base
,
333 vm_offset_t
*alt_base
,
334 vm_offset_t
*alt_next
,
335 unsigned int *fs_base
,
336 unsigned int *system
,
338 shared_region_mapping_t
*next
)
340 shared_region_mapping_lock(shared_region
);
342 SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
344 assert(shared_region
->ref_count
> 0);
345 *text_region
= shared_region
->text_region
;
346 *text_size
= shared_region
->text_size
;
347 *data_region
= shared_region
->data_region
;
348 *data_size
= shared_region
->data_size
;
349 *region_mappings
= shared_region
->region_mappings
;
350 *client_base
= shared_region
->client_base
;
351 *alt_base
= shared_region
->alternate_base
;
352 *alt_next
= shared_region
->alternate_next
;
353 *flags
= shared_region
->flags
;
354 *fs_base
= shared_region
->fs_base
;
355 *system
= shared_region
->system
;
356 *next
= shared_region
->next
;
358 shared_region_mapping_unlock(shared_region
);
362 shared_region_mapping_ref(
363 shared_region_mapping_t shared_region
)
365 SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
366 "ref_count=%d + 1\n",
368 shared_region
? shared_region
->ref_count
: 0));
369 if(shared_region
== NULL
)
371 assert(shared_region
->ref_count
> 0);
372 hw_atomic_add(&shared_region
->ref_count
, 1);
377 shared_region_mapping_dealloc_lock(
378 shared_region_mapping_t shared_region
,
382 struct shared_region_task_mappings sm_info
;
383 shared_region_mapping_t next
= NULL
;
386 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
387 "(shared_region=%p,%d,%d) ref_count=%d\n",
388 shared_region
, need_sfh_lock
, need_drl_lock
,
389 shared_region
? shared_region
->ref_count
: 0));
390 while (shared_region
) {
391 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
393 shared_region
, shared_region
->ref_count
));
394 assert(shared_region
->ref_count
> 0);
396 hw_atomic_sub(&shared_region
->ref_count
, 1)) == 0) {
397 shared_region_mapping_lock(shared_region
);
399 sm_info
.text_region
= shared_region
->text_region
;
400 sm_info
.text_size
= shared_region
->text_size
;
401 sm_info
.data_region
= shared_region
->data_region
;
402 sm_info
.data_size
= shared_region
->data_size
;
403 sm_info
.region_mappings
= shared_region
->region_mappings
;
404 sm_info
.client_base
= shared_region
->client_base
;
405 sm_info
.alternate_base
= shared_region
->alternate_base
;
406 sm_info
.alternate_next
= shared_region
->alternate_next
;
407 sm_info
.flags
= shared_region
->flags
;
408 sm_info
.self
= (vm_offset_t
)shared_region
;
410 if(shared_region
->region_mappings
) {
411 lsf_remove_regions_mappings_lock(shared_region
, &sm_info
, need_sfh_lock
);
413 if(((vm_named_entry_t
)
414 (shared_region
->text_region
->ip_kobject
))
415 ->backing
.map
->pmap
) {
416 pmap_remove(((vm_named_entry_t
)
417 (shared_region
->text_region
->ip_kobject
))
420 sm_info
.client_base
+ sm_info
.text_size
);
422 ipc_port_release_send(shared_region
->text_region
);
423 if(shared_region
->data_region
)
424 ipc_port_release_send(shared_region
->data_region
);
425 if (shared_region
->object_chain
) {
426 next
= shared_region
->object_chain
->object_chain_region
;
427 kfree(shared_region
->object_chain
,
428 sizeof (struct shared_region_object_chain
));
432 shared_region_mapping_unlock(shared_region
);
434 ("shared_region_mapping_dealloc_lock(%p): "
437 bzero((void *)shared_region
,
438 sizeof (*shared_region
)); /* FBDP debug */
440 sizeof (struct shared_region_mapping
));
441 shared_region
= next
;
443 /* Stale indicates that a system region is no */
444 /* longer in the default environment list. */
445 if((ref_count
== 1) &&
446 (shared_region
->flags
& SHARED_REGION_SYSTEM
)
447 && !(shared_region
->flags
& SHARED_REGION_STALE
)) {
449 ("shared_region_mapping_dealloc_lock"
450 "(%p): removing stale\n",
452 remove_default_shared_region_lock(shared_region
,need_sfh_lock
, need_drl_lock
);
457 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
463 * Stub function; always indicates that the lock needs to be taken in the
464 * call to lsf_remove_regions_mappings_lock().
467 shared_region_mapping_dealloc(
468 shared_region_mapping_t shared_region
)
470 SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
471 "(shared_region=%p)\n",
474 assert(shared_region
->ref_count
> 0);
476 return shared_region_mapping_dealloc_lock(shared_region
, 1, 1);
481 shared_region_object_create(
483 ipc_port_t
*object_handle
)
485 vm_named_entry_t user_entry
;
486 ipc_port_t user_handle
;
491 user_entry
= (vm_named_entry_t
)
492 kalloc(sizeof (struct vm_named_entry
));
493 if(user_entry
== NULL
) {
496 named_entry_lock_init(user_entry
);
497 user_handle
= ipc_port_alloc_kernel();
500 ip_lock(user_handle
);
502 /* make a sonce right */
503 user_handle
->ip_sorights
++;
504 ip_reference(user_handle
);
506 user_handle
->ip_destination
= IP_NULL
;
507 user_handle
->ip_receiver_name
= MACH_PORT_NULL
;
508 user_handle
->ip_receiver
= ipc_space_kernel
;
510 /* make a send right */
511 user_handle
->ip_mscount
++;
512 user_handle
->ip_srights
++;
513 ip_reference(user_handle
);
515 ipc_port_nsrequest(user_handle
, 1, user_handle
, &previous
);
516 /* nsrequest unlocks user_handle */
518 /* Create a named object based on a submap of specified size */
520 new_map
= vm_map_create(pmap_create(0), 0, size
, TRUE
);
521 user_entry
->backing
.map
= new_map
;
522 user_entry
->internal
= TRUE
;
523 user_entry
->is_sub_map
= TRUE
;
524 user_entry
->is_pager
= FALSE
;
525 user_entry
->offset
= 0;
526 user_entry
->protection
= VM_PROT_ALL
;
527 user_entry
->size
= size
;
528 user_entry
->ref_count
= 1;
530 ipc_kobject_set(user_handle
, (ipc_kobject_t
) user_entry
,
532 *object_handle
= user_handle
;
536 /* called for the non-default, private branch shared region support */
537 /* system default fields for fs_base and system supported are not */
538 /* relevant as the system default flag is not set */
540 shared_file_create_system_region(
541 shared_region_mapping_t
*shared_region
)
543 ipc_port_t text_handle
;
544 ipc_port_t data_handle
;
547 vm_offset_t mapping_array
;
550 SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
552 text_size
= 0x10000000;
553 data_size
= 0x10000000;
555 kret
= shared_file_init(&text_handle
,
556 text_size
, &data_handle
, data_size
, &mapping_array
);
558 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
559 "shared_file_init failed kret=0x%x\n",
563 kret
= shared_region_mapping_create(text_handle
,
564 text_size
, data_handle
, data_size
, mapping_array
,
565 GLOBAL_SHARED_TEXT_SEGMENT
, shared_region
,
566 SHARED_ALTERNATE_LOAD_BASE
, SHARED_ALTERNATE_LOAD_BASE
);
568 SHARED_REGION_DEBUG(("shared_file_create_system_region: "
569 "shared_region_mapping_create failed "
574 (*shared_region
)->flags
= 0;
575 if(com_mapping_resource
) {
576 shared_region_mapping_ref(com_mapping_resource
);
577 (*shared_region
)->next
= com_mapping_resource
;
580 SHARED_REGION_DEBUG(("shared_file_create_system_region() "
581 "-> shared_region=%p\n",
587 * load a new default for a specified environment into the default share
588 * regions list. If a previous default exists for the envrionment specification
589 * it is returned along with its reference. It is expected that the new
590 * sytem region structure passes a reference.
593 shared_region_mapping_t
594 update_default_shared_region(
595 shared_region_mapping_t new_system_region
)
597 shared_region_mapping_t old_system_region
;
598 unsigned int fs_base
;
601 SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
603 assert(new_system_region
->ref_count
> 0);
604 fs_base
= new_system_region
->fs_base
;
605 system
= new_system_region
->system
;
606 new_system_region
->flags
|= SHARED_REGION_SYSTEM
;
607 default_regions_list_lock();
608 old_system_region
= default_environment_shared_regions
;
610 if((old_system_region
!= NULL
) &&
611 (old_system_region
->fs_base
== fs_base
) &&
612 (old_system_region
->system
== system
)) {
613 new_system_region
->default_env_list
=
614 old_system_region
->default_env_list
;
615 old_system_region
->default_env_list
= NULL
;
616 default_environment_shared_regions
= new_system_region
;
617 old_system_region
->flags
|= SHARED_REGION_STALE
;
618 default_regions_list_unlock();
619 SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
621 new_system_region
, old_system_region
));
622 assert(old_system_region
->ref_count
> 0);
623 return old_system_region
;
625 if (old_system_region
) {
626 while(old_system_region
->default_env_list
!= NULL
) {
627 if((old_system_region
->default_env_list
->fs_base
== fs_base
) &&
628 (old_system_region
->default_env_list
->system
== system
)) {
629 shared_region_mapping_t tmp_system_region
;
632 old_system_region
->default_env_list
;
633 new_system_region
->default_env_list
=
634 tmp_system_region
->default_env_list
;
635 tmp_system_region
->default_env_list
= NULL
;
636 old_system_region
->default_env_list
=
638 old_system_region
= tmp_system_region
;
639 old_system_region
->flags
|= SHARED_REGION_STALE
;
640 default_regions_list_unlock();
641 SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
642 ": old=%p stale 2\n",
645 assert(old_system_region
->ref_count
> 0);
646 return old_system_region
;
648 old_system_region
= old_system_region
->default_env_list
;
651 /* If we get here, we are at the end of the system list and we */
652 /* did not find a pre-existing entry */
653 if(old_system_region
) {
654 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
655 "adding after old=%p\n",
656 new_system_region
, old_system_region
));
657 assert(old_system_region
->ref_count
> 0);
658 old_system_region
->default_env_list
= new_system_region
;
660 SHARED_REGION_DEBUG(("update_default_system_region(%p): "
663 default_environment_shared_regions
= new_system_region
;
665 assert(new_system_region
->ref_count
> 0);
666 default_regions_list_unlock();
671 * lookup a system_shared_region for the environment specified. If one is
672 * found, it is returned along with a reference against the structure
675 shared_region_mapping_t
676 lookup_default_shared_region(
677 unsigned int fs_base
,
680 shared_region_mapping_t system_region
;
681 default_regions_list_lock();
682 system_region
= default_environment_shared_regions
;
684 SHARED_REGION_DEBUG(("lookup_default_shared_region"
685 "(base=0x%x, system=0x%x)\n",
687 while(system_region
!= NULL
) {
688 SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
689 ": system_region=%p base=0x%x system=0x%x"
691 fs_base
, system
, system_region
,
692 system_region
->fs_base
,
693 system_region
->system
,
694 system_region
->ref_count
));
695 assert(system_region
->ref_count
> 0);
696 if((system_region
->fs_base
== fs_base
) &&
697 (system_region
->system
== system
)) {
700 system_region
= system_region
->default_env_list
;
703 shared_region_mapping_ref(system_region
);
704 default_regions_list_unlock();
705 SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
707 return system_region
;
711 * remove a system_region default if it appears in the default regions list.
712 * Drop a reference on removal.
715 __private_extern__
void
716 remove_default_shared_region_lock(
717 shared_region_mapping_t system_region
,
721 shared_region_mapping_t old_system_region
;
723 SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
724 "(system_region=%p, %d, %d)\n",
725 system_region
, need_sfh_lock
, need_drl_lock
));
727 default_regions_list_lock();
729 old_system_region
= default_environment_shared_regions
;
731 if(old_system_region
== NULL
) {
732 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
733 "-> default_env=NULL\n",
736 default_regions_list_unlock();
741 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
743 system_region
, old_system_region
));
744 assert(old_system_region
->ref_count
> 0);
745 if (old_system_region
== system_region
) {
746 default_environment_shared_regions
747 = old_system_region
->default_env_list
;
748 old_system_region
->default_env_list
= NULL
;
749 old_system_region
->flags
|= SHARED_REGION_STALE
;
750 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
751 "old=%p ref_count=%d STALE\n",
752 system_region
, old_system_region
,
753 old_system_region
->ref_count
));
754 shared_region_mapping_dealloc_lock(old_system_region
,
758 default_regions_list_unlock();
763 while(old_system_region
->default_env_list
!= NULL
) {
764 SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
765 "old=%p->default_env=%p\n",
766 system_region
, old_system_region
,
767 old_system_region
->default_env_list
));
768 assert(old_system_region
->default_env_list
->ref_count
> 0);
769 if(old_system_region
->default_env_list
== system_region
) {
770 shared_region_mapping_t dead_region
;
771 dead_region
= old_system_region
->default_env_list
;
772 old_system_region
->default_env_list
=
773 dead_region
->default_env_list
;
774 dead_region
->default_env_list
= NULL
;
775 dead_region
->flags
|= SHARED_REGION_STALE
;
777 ("remove_default_shared_region_lock(%p): "
778 "dead=%p ref_count=%d stale\n",
779 system_region
, dead_region
,
780 dead_region
->ref_count
));
781 shared_region_mapping_dealloc_lock(dead_region
,
785 default_regions_list_unlock();
789 old_system_region
= old_system_region
->default_env_list
;
792 default_regions_list_unlock();
797 * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
798 * the only caller. Remove this stub function and the corresponding symbol
802 remove_default_shared_region(
803 shared_region_mapping_t system_region
)
805 SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
808 assert(system_region
->ref_count
> 0);
810 remove_default_shared_region_lock(system_region
, 1, 1);
814 remove_all_shared_regions(void)
816 shared_region_mapping_t system_region
;
817 shared_region_mapping_t next_system_region
;
819 SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
820 LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
821 LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
822 default_regions_list_lock();
823 system_region
= default_environment_shared_regions
;
825 if(system_region
== NULL
) {
826 default_regions_list_unlock();
830 while(system_region
!= NULL
) {
831 next_system_region
= system_region
->default_env_list
;
832 system_region
->default_env_list
= NULL
;
833 system_region
->flags
|= SHARED_REGION_STALE
;
834 SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
835 "%p ref_count=%d stale\n",
836 system_region
, system_region
->ref_count
));
837 assert(system_region
->ref_count
> 0);
838 shared_region_mapping_dealloc_lock(system_region
, 1, 0);
839 system_region
= next_system_region
;
841 default_environment_shared_regions
= NULL
;
842 default_regions_list_unlock();
843 SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
844 LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
845 LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
848 /* shared_com_boot_time_init initializes the common page shared data and */
849 /* text region. This region is semi independent of the split libs */
850 /* and so its policies have to be handled differently by the code that */
851 /* manipulates the mapping of shared region environments. However, */
852 /* the shared region delivery system supports both */
853 void shared_com_boot_time_init(void); /* forward */
855 shared_com_boot_time_init(void)
858 vm_named_entry_t named_entry
;
860 SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
861 if(com_region_handle32
) {
862 panic("shared_com_boot_time_init: "
863 "com_region_handle32 already set\n");
865 if(com_region_handle64
) {
866 panic("shared_com_boot_time_init: "
867 "com_region_handle64 already set\n");
870 /* create com page regions, 1 each for 32 and 64-bit code */
871 if((kret
= shared_region_object_create(
873 &com_region_handle32
))) {
874 panic("shared_com_boot_time_init: "
875 "unable to create 32-bit comm page\n");
878 if((kret
= shared_region_object_create(
880 &com_region_handle64
))) {
881 panic("shared_com_boot_time_init: "
882 "unable to create 64-bit comm page\n");
886 /* now set export the underlying region/map */
887 named_entry
= (vm_named_entry_t
)com_region_handle32
->ip_kobject
;
888 com_region_map32
= named_entry
->backing
.map
;
889 named_entry
= (vm_named_entry_t
)com_region_handle64
->ip_kobject
;
890 com_region_map64
= named_entry
->backing
.map
;
892 /* wrap the com region in its own shared file mapping structure */
893 /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
894 kret
= shared_region_mapping_create(com_region_handle32
,
895 com_region_size
, NULL
, 0, 0,
896 _COMM_PAGE_BASE_ADDRESS
, &com_mapping_resource
,
899 panic("shared_region_mapping_create failed for commpage");
904 shared_file_boot_time_init(
905 unsigned int fs_base
,
908 long text_region_size
;
909 long data_region_size
;
910 shared_region_mapping_t new_system_region
;
911 shared_region_mapping_t old_default_env
;
913 SHARED_REGION_DEBUG(("shared_file_boot_time_init"
914 "(base=0x%x,system=0x%x)\n",
916 text_region_size
= 0x10000000;
917 data_region_size
= 0x10000000;
918 shared_file_init(&shared_text_region_handle
,
920 &shared_data_region_handle
,
922 &shared_file_mapping_array
);
924 shared_region_mapping_create(shared_text_region_handle
,
926 shared_data_region_handle
,
928 shared_file_mapping_array
,
929 GLOBAL_SHARED_TEXT_SEGMENT
,
931 SHARED_ALTERNATE_LOAD_BASE
,
932 SHARED_ALTERNATE_LOAD_BASE
);
934 new_system_region
->fs_base
= fs_base
;
935 new_system_region
->system
= system
;
936 new_system_region
->flags
= SHARED_REGION_SYSTEM
;
938 /* grab an extra reference for the caller */
939 /* remember to grab before call to update */
940 shared_region_mapping_ref(new_system_region
);
941 old_default_env
= update_default_shared_region(new_system_region
);
942 /* hold an extra reference because these are the system */
943 /* shared regions. */
945 shared_region_mapping_dealloc(old_default_env
);
946 if(com_mapping_resource
== NULL
) {
947 shared_com_boot_time_init();
949 shared_region_mapping_ref(com_mapping_resource
);
950 new_system_region
->next
= com_mapping_resource
;
951 vm_set_shared_region(current_task(), new_system_region
);
952 SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
957 /* called at boot time, allocates two regions, each 256 megs in size */
958 /* these regions are later mapped into task spaces, allowing them to */
959 /* share the contents of the regions. shared_file_init is part of */
960 /* a shared_memory_server which not only allocates the backing maps */
961 /* but also coordinates requests for space. */
966 ipc_port_t
*text_region_handle
,
967 vm_size_t text_region_size
,
968 ipc_port_t
*data_region_handle
,
969 vm_size_t data_region_size
,
970 vm_offset_t
*file_mapping_array
)
972 shared_file_info_t
*sf_head
;
973 vm_offset_t table_mapping_address
;
978 vm_object_t buf_object
;
979 vm_map_entry_t entry
;
984 SHARED_REGION_DEBUG(("shared_file_init()\n"));
985 /* create text and data maps/regions */
986 kret
= shared_region_object_create(
992 kret
= shared_region_object_create(
996 ipc_port_release_send(*text_region_handle
);
1000 data_table_size
= data_region_size
>> 9;
1001 hash_size
= data_region_size
>> 14;
1002 table_mapping_address
= data_region_size
- data_table_size
;
1004 if(shared_file_mapping_array
== 0) {
1005 vm_map_address_t map_addr
;
1006 buf_object
= vm_object_allocate(data_table_size
);
1008 if(vm_map_find_space(kernel_map
, &map_addr
,
1009 data_table_size
, 0, &entry
)
1011 panic("shared_file_init: no space");
1013 shared_file_mapping_array
= CAST_DOWN(vm_offset_t
, map_addr
);
1014 *file_mapping_array
= shared_file_mapping_array
;
1015 vm_map_unlock(kernel_map
);
1016 entry
->object
.vm_object
= buf_object
;
1019 for (b
= *file_mapping_array
, alloced
= 0;
1020 alloced
< (hash_size
+
1021 round_page(sizeof(struct sf_mapping
)));
1022 alloced
+= PAGE_SIZE
, b
+= PAGE_SIZE
) {
1023 vm_object_lock(buf_object
);
1024 p
= vm_page_alloc(buf_object
, alloced
);
1025 if (p
== VM_PAGE_NULL
) {
1026 panic("shared_file_init: no space");
1029 vm_object_unlock(buf_object
);
1030 pmap_enter(kernel_pmap
, b
, p
->phys_page
,
1031 VM_PROT_READ
| VM_PROT_WRITE
,
1032 ((unsigned int)(p
->object
->wimg_bits
))
1038 /* initialize loaded file array */
1039 sf_head
= (shared_file_info_t
*)*file_mapping_array
;
1040 sf_head
->hash
= (queue_head_t
*)
1041 (((int)*file_mapping_array
) +
1042 sizeof(struct shared_file_info
));
1043 sf_head
->hash_size
= hash_size
/sizeof(queue_head_t
);
1044 mutex_init(&(sf_head
->lock
), 0);
1045 sf_head
->hash_init
= FALSE
;
1048 mach_make_memory_entry(kernel_map
, &data_table_size
,
1049 *file_mapping_array
, VM_PROT_READ
, &sfma_handle
,
1052 if (vm_map_wire(kernel_map
,
1053 vm_map_trunc_page(*file_mapping_array
),
1054 vm_map_round_page(*file_mapping_array
+
1056 round_page(sizeof(struct sf_mapping
))),
1057 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1058 panic("shared_file_init: No memory for data table");
1061 lsf_zone
= zinit(sizeof(struct load_file_ele
),
1063 (hash_size
+ round_page_32(sizeof(struct sf_mapping
))),
1064 0, "load_file_server");
1066 zone_change(lsf_zone
, Z_EXHAUST
, TRUE
);
1067 zone_change(lsf_zone
, Z_COLLECT
, FALSE
);
1068 zone_change(lsf_zone
, Z_EXPAND
, FALSE
);
1069 zone_change(lsf_zone
, Z_FOREIGN
, TRUE
);
1071 /* initialize the global default environment lock */
1072 mutex_init(&default_regions_list_lock_data
, 0);
1075 *file_mapping_array
= shared_file_mapping_array
;
1078 kret
= vm_map(((vm_named_entry_t
)
1079 (*data_region_handle
)->ip_kobject
)->backing
.map
,
1080 &table_mapping_address
,
1082 SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
1083 sfma_handle
, 0, FALSE
,
1084 VM_PROT_READ
, VM_PROT_READ
, VM_INHERIT_NONE
);
1086 SHARED_REGION_DEBUG(("shared_file_init() done\n"));
1090 static kern_return_t
1091 shared_file_header_init(
1092 shared_file_info_t
*shared_file_header
)
1094 vm_size_t hash_table_size
;
1095 vm_size_t hash_table_offset
;
1097 /* wire hash entry pool only as needed, since we are the only */
1098 /* users, we take a few liberties with the population of our */
1100 static int allocable_hash_pages
;
1101 static vm_offset_t hash_cram_address
;
1104 hash_table_size
= shared_file_header
->hash_size
1105 * sizeof (struct queue_entry
);
1106 hash_table_offset
= hash_table_size
+
1107 round_page(sizeof (struct sf_mapping
));
1108 for (i
= 0; i
< shared_file_header
->hash_size
; i
++)
1109 queue_init(&shared_file_header
->hash
[i
]);
1111 allocable_hash_pages
= (((hash_table_size
<< 5) - hash_table_offset
)
1113 hash_cram_address
= ((vm_offset_t
) shared_file_header
)
1114 + hash_table_offset
;
1115 shared_file_available_hash_ele
= 0;
1117 shared_file_header
->hash_init
= TRUE
;
1119 if ((shared_file_available_hash_ele
< 20) && (allocable_hash_pages
)) {
1120 int cram_pages
, cram_size
;
1122 cram_pages
= allocable_hash_pages
> 3 ?
1123 3 : allocable_hash_pages
;
1124 cram_size
= cram_pages
* PAGE_SIZE
;
1125 if (vm_map_wire(kernel_map
, hash_cram_address
,
1126 hash_cram_address
+ cram_size
,
1127 VM_PROT_DEFAULT
, FALSE
) != KERN_SUCCESS
) {
1128 printf("shared_file_header_init: "
1129 "No memory for data table\n");
1130 return KERN_NO_SPACE
;
1132 allocable_hash_pages
-= cram_pages
;
1133 zcram(lsf_zone
, (void *) hash_cram_address
, cram_size
);
1134 shared_file_available_hash_ele
1135 += cram_size
/sizeof(struct load_file_ele
);
1136 hash_cram_address
+= cram_size
;
1139 return KERN_SUCCESS
;
1146 * Attempt to map a split library into the shared region. Check if the mappings
1147 * are already in place.
1152 struct shared_file_mapping_np
*mappings
,
1153 memory_object_control_t file_control
,
1154 memory_object_size_t file_size
,
1155 shared_region_task_mappings_t sm_info
,
1156 mach_vm_offset_t base_offset
,
1157 mach_vm_offset_t
*slide_p
)
1159 vm_object_t file_object
;
1160 shared_file_info_t
*shared_file_header
;
1161 load_struct_t
*file_entry
;
1162 loaded_mapping_t
*file_mapping
;
1165 mach_vm_offset_t slide
;
1167 SHARED_REGION_DEBUG(("map_shared_file()\n"));
1169 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1171 mutex_lock(&shared_file_header
->lock
);
1173 /* If this is the first call to this routine, take the opportunity */
1174 /* to initialize the hash table which will be used to look-up */
1175 /* mappings based on the file object */
1177 if(shared_file_header
->hash_init
== FALSE
) {
1178 ret
= shared_file_header_init(shared_file_header
);
1179 if (ret
!= KERN_SUCCESS
) {
1180 mutex_unlock(&shared_file_header
->lock
);
1181 return KERN_NO_SPACE
;
1186 /* Find the entry in the map associated with the current mapping */
1187 /* of the file object */
1188 file_object
= memory_object_control_to_vm_object(file_control
);
1190 file_entry
= lsf_hash_lookup(shared_file_header
->hash
,
1191 (void *) file_object
,
1192 mappings
[0].sfm_file_offset
,
1193 shared_file_header
->hash_size
,
1194 TRUE
, TRUE
, sm_info
);
1196 /* File is loaded, check the load manifest for exact match */
1197 /* we simplify by requiring that the elements be the same */
1198 /* size and in the same order rather than checking for */
1199 /* semantic equivalence. */
1202 file_mapping
= file_entry
->mappings
;
1203 while(file_mapping
!= NULL
) {
1205 mutex_unlock(&shared_file_header
->lock
);
1206 return KERN_INVALID_ARGUMENT
;
1208 if(((mappings
[i
].sfm_address
)
1209 & SHARED_DATA_REGION_MASK
) !=
1210 file_mapping
->mapping_offset
||
1211 mappings
[i
].sfm_size
!= file_mapping
->size
||
1212 mappings
[i
].sfm_file_offset
!= file_mapping
->file_offset
||
1213 mappings
[i
].sfm_init_prot
!= file_mapping
->protection
) {
1216 file_mapping
= file_mapping
->next
;
1220 mutex_unlock(&shared_file_header
->lock
);
1221 return KERN_INVALID_ARGUMENT
;
1224 slide
= file_entry
->base_address
- base_offset
;
1225 if (slide_p
!= NULL
) {
1227 * File already mapped but at different address,
1228 * and the caller is OK with the sliding.
1234 * The caller doesn't want any sliding. The file needs
1235 * to be mapped at the requested address or not mapped.
1239 * The file is already mapped but at a different
1242 * XXX should we attempt to load at
1243 * requested address too ?
1248 * The file is already mapped at the correct
1255 mutex_unlock(&shared_file_header
->lock
);
1258 /* File is not loaded, lets attempt to load it */
1259 ret
= lsf_map(mappings
, map_cnt
,
1260 (void *)file_control
,
1265 if(ret
== KERN_NO_SPACE
) {
1266 shared_region_mapping_t regions
;
1267 shared_region_mapping_t system_region
;
1268 regions
= (shared_region_mapping_t
)sm_info
->self
;
1269 regions
->flags
|= SHARED_REGION_FULL
;
1270 system_region
= lookup_default_shared_region(
1271 regions
->fs_base
, regions
->system
);
1272 if (system_region
== regions
) {
1273 shared_region_mapping_t new_system_shared_region
;
1274 shared_file_boot_time_init(
1275 regions
->fs_base
, regions
->system
);
1276 /* current task must stay with its current */
1277 /* regions, drop count on system_shared_region */
1278 /* and put back our original set */
1279 vm_get_shared_region(current_task(),
1280 &new_system_shared_region
);
1281 shared_region_mapping_dealloc_lock(
1282 new_system_shared_region
, 0, 1);
1283 vm_set_shared_region(current_task(), regions
);
1284 } else if (system_region
!= NULL
) {
1285 shared_region_mapping_dealloc_lock(
1286 system_region
, 0, 1);
1289 mutex_unlock(&shared_file_header
->lock
);
1295 * shared_region_cleanup:
1297 * Deallocates all the mappings in the shared region, except those explicitly
1298 * specified in the "ranges" set of address ranges.
1301 shared_region_cleanup(
1302 unsigned int range_count
,
1303 struct shared_region_range_np
*ranges
,
1304 shared_region_task_mappings_t sm_info
)
1307 ipc_port_t region_handle
;
1308 vm_named_entry_t region_named_entry
;
1309 vm_map_t text_submap
, data_submap
, submap
, next_submap
;
1310 unsigned int i_range
;
1311 vm_map_offset_t range_start
, range_end
;
1312 vm_map_offset_t submap_base
, submap_end
, submap_offset
;
1313 vm_map_size_t delete_size
;
1315 struct shared_region_range_np tmp_range
;
1316 unsigned int sort_index
, sorted_index
;
1317 vm_map_offset_t sort_min_address
;
1318 unsigned int sort_min_index
;
1321 * Since we want to deallocate the holes between the "ranges",
1322 * sort the array by increasing addresses.
1324 for (sorted_index
= 0;
1325 sorted_index
< range_count
;
1328 /* first remaining entry is our new starting point */
1329 sort_min_index
= sorted_index
;
1330 sort_min_address
= ranges
[sort_min_index
].srr_address
;
1332 /* find the lowest mapping_offset in the remaining entries */
1333 for (sort_index
= sorted_index
+ 1;
1334 sort_index
< range_count
;
1336 if (ranges
[sort_index
].srr_address
< sort_min_address
) {
1337 /* lowest address so far... */
1338 sort_min_index
= sort_index
;
1340 ranges
[sort_min_index
].srr_address
;
1344 if (sort_min_index
!= sorted_index
) {
1346 tmp_range
= ranges
[sort_min_index
];
1347 ranges
[sort_min_index
] = ranges
[sorted_index
];
1348 ranges
[sorted_index
] = tmp_range
;
1352 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1353 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1354 text_submap
= region_named_entry
->backing
.map
;
1356 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1357 region_named_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1358 data_submap
= region_named_entry
->backing
.map
;
1360 submap
= text_submap
;
1361 next_submap
= submap
;
1362 submap_base
= sm_info
->client_base
;
1364 submap_end
= submap_base
+ sm_info
->text_size
;
1366 i_range
< range_count
;
1369 /* get the next range of addresses to keep */
1370 range_start
= ranges
[i_range
].srr_address
;
1371 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1372 /* align them to page boundaries */
1373 range_start
= vm_map_trunc_page(range_start
);
1374 range_end
= vm_map_round_page(range_end
);
1376 /* make sure we don't go beyond the submap's boundaries */
1377 if (range_start
< submap_base
) {
1378 range_start
= submap_base
;
1379 } else if (range_start
>= submap_end
) {
1380 range_start
= submap_end
;
1382 if (range_end
< submap_base
) {
1383 range_end
= submap_base
;
1384 } else if (range_end
>= submap_end
) {
1385 range_end
= submap_end
;
1388 if (range_start
> submap_base
+ submap_offset
) {
1390 * Deallocate everything between the last offset in the
1391 * submap and the start of this range.
1393 delete_size
= range_start
-
1394 (submap_base
+ submap_offset
);
1395 (void) vm_deallocate(submap
,
1402 /* skip to the end of the range */
1403 submap_offset
+= delete_size
+ (range_end
- range_start
);
1405 if (submap_base
+ submap_offset
>= submap_end
) {
1406 /* get to next submap */
1408 if (submap
== data_submap
) {
1409 /* no other submap after data: done ! */
1413 /* get original range again */
1414 range_start
= ranges
[i_range
].srr_address
;
1415 range_end
= range_start
+ ranges
[i_range
].srr_size
;
1416 range_start
= vm_map_trunc_page(range_start
);
1417 range_end
= vm_map_round_page(range_end
);
1419 if (range_end
> submap_end
) {
1421 * This last range overlaps with the next
1422 * submap. We need to process it again
1423 * after switching submaps. Otherwise, we'll
1424 * just continue with the next range.
1429 if (submap
== text_submap
) {
1431 * Switch to the data submap.
1433 submap
= data_submap
;
1435 submap_base
= sm_info
->client_base
+
1437 submap_end
= submap_base
+ sm_info
->data_size
;
1442 if (submap_base
+ submap_offset
< submap_end
) {
1443 /* delete remainder of this submap, from "offset" to the end */
1444 (void) vm_deallocate(submap
,
1446 submap_end
- submap_base
- submap_offset
);
1447 /* if nothing to keep in data submap, delete it all */
1448 if (submap
== text_submap
) {
1449 submap
= data_submap
;
1451 submap_base
= sm_info
->client_base
+ sm_info
->text_size
;
1452 submap_end
= submap_base
+ sm_info
->data_size
;
1453 (void) vm_deallocate(data_submap
,
1455 submap_end
- submap_base
);
1463 /* A hash lookup function for the list of loaded files in */
1464 /* shared_memory_server space. */
1466 static load_struct_t
*
1468 queue_head_t
*hash_table
,
1470 vm_offset_t recognizableOffset
,
1473 boolean_t alternate
,
1474 shared_region_task_mappings_t sm_info
)
1476 register queue_t bucket
;
1477 load_struct_t
*entry
;
1478 shared_region_mapping_t target_region
;
1481 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1482 "reg=%d alt=%d sm_info=%p\n",
1483 hash_table
, file_object
, recognizableOffset
, size
,
1484 regular
, alternate
, sm_info
));
1486 bucket
= &(hash_table
[load_file_hash((int)file_object
, size
)]);
1487 for (entry
= (load_struct_t
*)queue_first(bucket
);
1488 !queue_end(bucket
, &entry
->links
);
1489 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1491 if ((entry
->file_object
== (int)file_object
) &&
1492 (entry
->file_offset
== recognizableOffset
)) {
1493 target_region
= (shared_region_mapping_t
)sm_info
->self
;
1494 depth
= target_region
->depth
;
1495 while(target_region
) {
1496 if((!(sm_info
->self
)) ||
1497 ((target_region
== entry
->regions_instance
) &&
1498 (target_region
->depth
>= entry
->depth
))) {
1500 entry
->base_address
>= sm_info
->alternate_base
) {
1501 LSF_DEBUG(("lsf_hash_lookup: "
1502 "alt=%d found entry %p "
1506 entry
->base_address
,
1507 sm_info
->alternate_base
));
1511 entry
->base_address
< sm_info
->alternate_base
) {
1512 LSF_DEBUG(("lsf_hash_lookup: "
1513 "reg=%d found entry %p "
1517 entry
->base_address
,
1518 sm_info
->alternate_base
));
1522 if(target_region
->object_chain
) {
1523 target_region
= (shared_region_mapping_t
)
1524 target_region
->object_chain
->object_chain_region
;
1525 depth
= target_region
->object_chain
->depth
;
1527 target_region
= NULL
;
1533 LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
1534 "reg=%d alt=%d sm_info=%p NOT FOUND\n",
1535 hash_table
, file_object
, recognizableOffset
, size
,
1536 regular
, alternate
, sm_info
));
1537 return (load_struct_t
*)0;
1540 __private_extern__ load_struct_t
*
1541 lsf_remove_regions_mappings_lock(
1542 shared_region_mapping_t region
,
1543 shared_region_task_mappings_t sm_info
,
1547 register queue_t bucket
;
1548 shared_file_info_t
*shared_file_header
;
1549 load_struct_t
*entry
;
1550 load_struct_t
*next_entry
;
1552 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1554 LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
1556 region
, sm_info
, shared_file_header
));
1558 mutex_lock(&shared_file_header
->lock
);
1559 if(shared_file_header
->hash_init
== FALSE
) {
1561 mutex_unlock(&shared_file_header
->lock
);
1562 LSF_DEBUG(("lsf_remove_regions_mappings_lock"
1563 "(region=%p,sm_info=%p): not inited\n",
1567 for(i
= 0; i
<shared_file_header
->hash_size
; i
++) {
1568 bucket
= &shared_file_header
->hash
[i
];
1569 for (entry
= (load_struct_t
*)queue_first(bucket
);
1570 !queue_end(bucket
, &entry
->links
);) {
1571 next_entry
= (load_struct_t
*)queue_next(&entry
->links
);
1572 if(region
== entry
->regions_instance
) {
1573 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1574 "entry %p region %p: "
1577 lsf_unload((void *)entry
->file_object
,
1578 entry
->base_address
, sm_info
);
1580 LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
1581 "entry %p region %p target region %p: "
1583 entry
, entry
->regions_instance
, region
));
1590 mutex_unlock(&shared_file_header
->lock
);
1591 LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
1593 return NULL
; /* XXX */
1597 * Symbol compatability; we believe shared_region_mapping_dealloc() is the
1598 * only caller. Remove this stub function and the corresponding symbol
1599 * export for Merlot.
1602 lsf_remove_regions_mappings(
1603 shared_region_mapping_t region
,
1604 shared_region_task_mappings_t sm_info
)
1606 return lsf_remove_regions_mappings_lock(region
, sm_info
, 1);
1609 /* Removes a map_list, (list of loaded extents) for a file from */
1610 /* the loaded file hash table. */
1612 static load_struct_t
*
1615 vm_offset_t base_offset
,
1616 shared_region_task_mappings_t sm_info
)
1618 register queue_t bucket
;
1619 shared_file_info_t
*shared_file_header
;
1620 load_struct_t
*entry
;
1622 LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
1623 file_object
, base_offset
, sm_info
));
1625 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1627 bucket
= &shared_file_header
->hash
1628 [load_file_hash((int)file_object
, shared_file_header
->hash_size
)];
1630 for (entry
= (load_struct_t
*)queue_first(bucket
);
1631 !queue_end(bucket
, &entry
->links
);
1632 entry
= (load_struct_t
*)queue_next(&entry
->links
)) {
1633 if((!(sm_info
->self
)) || ((shared_region_mapping_t
)
1634 sm_info
->self
== entry
->regions_instance
)) {
1635 if ((entry
->file_object
== (int) file_object
) &&
1636 (entry
->base_address
== base_offset
)) {
1637 queue_remove(bucket
, entry
,
1638 load_struct_ptr_t
, links
);
1639 LSF_DEBUG(("lsf_hash_delete: found it\n"));
1645 LSF_DEBUG(("lsf_hash_delete; not found\n"));
1646 return (load_struct_t
*)0;
1649 /* Inserts a new map_list, (list of loaded file extents), into the */
1650 /* server loaded file hash table. */
1654 load_struct_t
*entry
,
1655 shared_region_task_mappings_t sm_info
)
1657 shared_file_info_t
*shared_file_header
;
1659 LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
1660 entry
, sm_info
, entry
->file_object
, entry
->base_address
));
1662 shared_file_header
= (shared_file_info_t
*)sm_info
->region_mappings
;
1663 queue_enter(&shared_file_header
->hash
1664 [load_file_hash(entry
->file_object
,
1665 shared_file_header
->hash_size
)],
1666 entry
, load_struct_ptr_t
, links
);
1674 * Look in the shared region, starting from the end, for a place to fit all the
1675 * mappings while respecting their relative offsets.
1677 static kern_return_t
1679 unsigned int map_cnt
,
1680 struct shared_file_mapping_np
*mappings_in
,
1681 shared_region_task_mappings_t sm_info
,
1682 mach_vm_offset_t
*base_offset_p
)
1684 mach_vm_offset_t max_mapping_offset
;
1686 vm_map_entry_t map_entry
, prev_entry
, next_entry
;
1687 mach_vm_offset_t prev_hole_start
, prev_hole_end
;
1688 mach_vm_offset_t mapping_offset
, mapping_end_offset
;
1689 mach_vm_offset_t base_offset
;
1690 mach_vm_size_t mapping_size
;
1691 mach_vm_offset_t wiggle_room
, wiggle
;
1692 vm_map_t text_map
, data_map
, map
;
1693 vm_named_entry_t region_entry
;
1694 ipc_port_t region_handle
;
1697 struct shared_file_mapping_np
*mappings
, tmp_mapping
;
1698 unsigned int sort_index
, sorted_index
;
1699 vm_map_offset_t sort_min_address
;
1700 unsigned int sort_min_index
;
1703 * Sort the mappings array, so that we can try and fit them in
1704 * in the right order as we progress along the VM maps.
1706 * We can't modify the original array (the original order is
1707 * important when doing lookups of the mappings), so copy it first.
1710 kr
= kmem_alloc(kernel_map
,
1711 (vm_offset_t
*) &mappings
,
1712 (vm_size_t
) (map_cnt
* sizeof (mappings
[0])));
1713 if (kr
!= KERN_SUCCESS
) {
1714 return KERN_NO_SPACE
;
1717 bcopy(mappings_in
, mappings
, map_cnt
* sizeof (mappings
[0]));
1719 max_mapping_offset
= 0;
1720 for (sorted_index
= 0;
1721 sorted_index
< map_cnt
;
1724 /* first remaining entry is our new starting point */
1725 sort_min_index
= sorted_index
;
1726 mapping_end_offset
= ((mappings
[sort_min_index
].sfm_address
&
1727 SHARED_TEXT_REGION_MASK
) +
1728 mappings
[sort_min_index
].sfm_size
);
1729 sort_min_address
= mapping_end_offset
;
1730 /* compute the highest mapping_offset as well... */
1731 if (mapping_end_offset
> max_mapping_offset
) {
1732 max_mapping_offset
= mapping_end_offset
;
1734 /* find the lowest mapping_offset in the remaining entries */
1735 for (sort_index
= sorted_index
+ 1;
1736 sort_index
< map_cnt
;
1739 mapping_end_offset
=
1740 ((mappings
[sort_index
].sfm_address
&
1741 SHARED_TEXT_REGION_MASK
) +
1742 mappings
[sort_index
].sfm_size
);
1744 if (mapping_end_offset
< sort_min_address
) {
1745 /* lowest mapping_offset so far... */
1746 sort_min_index
= sort_index
;
1747 sort_min_address
= mapping_end_offset
;
1750 if (sort_min_index
!= sorted_index
) {
1752 tmp_mapping
= mappings
[sort_min_index
];
1753 mappings
[sort_min_index
] = mappings
[sorted_index
];
1754 mappings
[sorted_index
] = tmp_mapping
;
1759 max_mapping_offset
= vm_map_round_page(max_mapping_offset
);
1761 /* start from the end of the shared area */
1762 base_offset
= sm_info
->text_size
;
1764 /* can all the mappings fit ? */
1765 if (max_mapping_offset
> base_offset
) {
1766 kmem_free(kernel_map
,
1767 (vm_offset_t
) mappings
,
1768 map_cnt
* sizeof (mappings
[0]));
1769 return KERN_FAILURE
;
1773 * Align the last mapping to the end of the submaps
1774 * and start from there.
1776 base_offset
-= max_mapping_offset
;
1778 region_handle
= (ipc_port_t
) sm_info
->text_region
;
1779 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1780 text_map
= region_entry
->backing
.map
;
1782 region_handle
= (ipc_port_t
) sm_info
->data_region
;
1783 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
1784 data_map
= region_entry
->backing
.map
;
1786 vm_map_lock_read(text_map
);
1787 vm_map_lock_read(data_map
);
1791 * At first, we can wiggle all the way from our starting point
1792 * (base_offset) towards the start of the map (0), if needed.
1794 wiggle_room
= base_offset
;
1796 for (i
= (signed) map_cnt
- 1; i
>= 0; i
--) {
1797 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
1798 /* copy-on-write mappings are in the data submap */
1801 /* other mappings are in the text submap */
1804 /* get the offset within the appropriate submap */
1805 mapping_offset
= (mappings
[i
].sfm_address
&
1806 SHARED_TEXT_REGION_MASK
);
1807 mapping_size
= mappings
[i
].sfm_size
;
1808 mapping_end_offset
= mapping_offset
+ mapping_size
;
1809 mapping_offset
= vm_map_trunc_page(mapping_offset
);
1810 mapping_end_offset
= vm_map_round_page(mapping_end_offset
);
1811 mapping_size
= mapping_end_offset
- mapping_offset
;
1814 if (vm_map_lookup_entry(map
,
1815 base_offset
+ mapping_offset
,
1818 * The start address for that mapping
1819 * is already mapped: no fit.
1820 * Locate the hole immediately before this map
1823 prev_hole_end
= map_entry
->vme_start
;
1824 prev_entry
= map_entry
->vme_prev
;
1825 if (prev_entry
== vm_map_to_entry(map
)) {
1826 /* no previous entry */
1827 prev_hole_start
= map
->min_offset
;
1829 /* previous entry ends here */
1830 prev_hole_start
= prev_entry
->vme_end
;
1834 * The start address for that mapping is not
1836 * Locate the start and end of the hole
1839 /* map_entry is the previous entry */
1840 if (map_entry
== vm_map_to_entry(map
)) {
1841 /* no previous entry */
1842 prev_hole_start
= map
->min_offset
;
1844 /* previous entry ends there */
1845 prev_hole_start
= map_entry
->vme_end
;
1847 next_entry
= map_entry
->vme_next
;
1848 if (next_entry
== vm_map_to_entry(map
)) {
1850 prev_hole_end
= map
->max_offset
;
1852 prev_hole_end
= next_entry
->vme_start
;
1856 if (prev_hole_end
<= base_offset
+ mapping_offset
) {
1857 /* hole is to our left: try and wiggle to fit */
1858 wiggle
= base_offset
+ mapping_offset
- prev_hole_end
+ mapping_size
;
1859 if (wiggle
> base_offset
) {
1860 /* we're getting out of the map */
1864 base_offset
-= wiggle
;
1865 if (wiggle
> wiggle_room
) {
1866 /* can't wiggle that much: start over */
1869 /* account for the wiggling done */
1870 wiggle_room
-= wiggle
;
1874 base_offset
+ mapping_offset
+ mapping_size
) {
1876 * The hole extends further to the right
1877 * than what we need. Ignore the extra space.
1879 prev_hole_end
= (base_offset
+ mapping_offset
+
1884 base_offset
+ mapping_offset
+ mapping_size
) {
1886 * The hole is not big enough to establish
1887 * the mapping right there: wiggle towards
1888 * the beginning of the hole so that the end
1889 * of our mapping fits in the hole...
1891 wiggle
= base_offset
+ mapping_offset
1892 + mapping_size
- prev_hole_end
;
1893 if (wiggle
> base_offset
) {
1894 /* we're getting out of the map */
1898 base_offset
-= wiggle
;
1899 if (wiggle
> wiggle_room
) {
1900 /* can't wiggle that much: start over */
1903 /* account for the wiggling done */
1904 wiggle_room
-= wiggle
;
1906 /* keep searching from this new base */
1910 if (prev_hole_start
> base_offset
+ mapping_offset
) {
1911 /* no hole found: keep looking */
1915 /* compute wiggling room at this hole */
1916 wiggle
= base_offset
+ mapping_offset
- prev_hole_start
;
1917 if (wiggle
< wiggle_room
) {
1918 /* less wiggle room than before... */
1919 wiggle_room
= wiggle
;
1922 /* found a hole that fits: skip to next mapping */
1924 } /* while we look for a hole */
1925 } /* for each mapping */
1927 *base_offset_p
= base_offset
;
1931 vm_map_unlock_read(text_map
);
1932 vm_map_unlock_read(data_map
);
1934 kmem_free(kernel_map
,
1935 (vm_offset_t
) mappings
,
1936 map_cnt
* sizeof (mappings
[0]));
1944 * Attempt to establish the mappings for a split library into the shared region.
1946 static kern_return_t
1948 struct shared_file_mapping_np
*mappings
,
1951 memory_object_offset_t file_size
,
1952 shared_region_task_mappings_t sm_info
,
1953 mach_vm_offset_t base_offset
,
1954 mach_vm_offset_t
*slide_p
)
1956 load_struct_t
*entry
;
1957 loaded_mapping_t
*file_mapping
;
1958 loaded_mapping_t
**tptr
;
1959 ipc_port_t region_handle
;
1960 vm_named_entry_t region_entry
;
1961 mach_port_t map_port
;
1962 vm_object_t file_object
;
1965 mach_vm_offset_t original_base_offset
;
1967 /* get the VM object from the file's memory object handle */
1968 file_object
= memory_object_control_to_vm_object(file_control
);
1970 original_base_offset
= base_offset
;
1972 LSF_DEBUG(("lsf_map"
1973 "(cnt=%d,file=%p,sm_info=%p)"
1975 map_cnt
, file_object
,
1978 restart_after_slide
:
1979 /* get a new "load_struct_t" to described the mappings for that file */
1980 entry
= (load_struct_t
*)zalloc(lsf_zone
);
1981 LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry
, map_cnt
));
1982 LSF_DEBUG(("lsf_map"
1983 "(cnt=%d,file=%p,sm_info=%p) "
1985 map_cnt
, file_object
,
1987 if (entry
== NULL
) {
1988 printf("lsf_map: unable to allocate memory\n");
1989 return KERN_NO_SPACE
;
1991 shared_file_available_hash_ele
--;
1992 entry
->file_object
= (int)file_object
;
1993 entry
->mapping_cnt
= map_cnt
;
1994 entry
->mappings
= NULL
;
1995 entry
->links
.prev
= (queue_entry_t
) 0;
1996 entry
->links
.next
= (queue_entry_t
) 0;
1997 entry
->regions_instance
= (shared_region_mapping_t
)sm_info
->self
;
1998 entry
->depth
=((shared_region_mapping_t
)sm_info
->self
)->depth
;
1999 entry
->file_offset
= mappings
[0].sfm_file_offset
;
2001 /* insert the new file entry in the hash table, for later lookups */
2002 lsf_hash_insert(entry
, sm_info
);
2004 /* where we should add the next mapping description for that file */
2005 tptr
= &(entry
->mappings
);
2007 entry
->base_address
= base_offset
;
2010 /* establish each requested mapping */
2011 for (i
= 0; i
< map_cnt
; i
++) {
2012 mach_vm_offset_t target_address
;
2013 mach_vm_offset_t region_mask
;
2015 if (mappings
[i
].sfm_init_prot
& VM_PROT_COW
) {
2016 region_handle
= (ipc_port_t
)sm_info
->data_region
;
2017 region_mask
= SHARED_DATA_REGION_MASK
;
2018 if ((((mappings
[i
].sfm_address
+ base_offset
)
2019 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000) ||
2020 (((mappings
[i
].sfm_address
+ base_offset
+
2021 mappings
[i
].sfm_size
- 1)
2022 & GLOBAL_SHARED_SEGMENT_MASK
) != 0x10000000)) {
2023 lsf_unload(file_object
,
2024 entry
->base_address
, sm_info
);
2025 return KERN_INVALID_ARGUMENT
;
2028 region_mask
= SHARED_TEXT_REGION_MASK
;
2029 region_handle
= (ipc_port_t
)sm_info
->text_region
;
2030 if (((mappings
[i
].sfm_address
+ base_offset
)
2031 & GLOBAL_SHARED_SEGMENT_MASK
) ||
2032 ((mappings
[i
].sfm_address
+ base_offset
+
2033 mappings
[i
].sfm_size
- 1)
2034 & GLOBAL_SHARED_SEGMENT_MASK
)) {
2035 lsf_unload(file_object
,
2036 entry
->base_address
, sm_info
);
2037 return KERN_INVALID_ARGUMENT
;
2040 if (!(mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) &&
2041 ((mappings
[i
].sfm_file_offset
+ mappings
[i
].sfm_size
) >
2043 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2044 return KERN_INVALID_ARGUMENT
;
2046 target_address
= entry
->base_address
+
2047 ((mappings
[i
].sfm_address
) & region_mask
);
2048 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
2049 map_port
= MACH_PORT_NULL
;
2051 map_port
= (ipc_port_t
) file_object
->pager
;
2053 region_entry
= (vm_named_entry_t
) region_handle
->ip_kobject
;
2055 if (mach_vm_map(region_entry
->backing
.map
,
2057 vm_map_round_page(mappings
[i
].sfm_size
),
2061 mappings
[i
].sfm_file_offset
,
2063 (mappings
[i
].sfm_init_prot
&
2064 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2065 (mappings
[i
].sfm_max_prot
&
2066 (VM_PROT_READ
|VM_PROT_EXECUTE
)),
2067 VM_INHERIT_DEFAULT
) != KERN_SUCCESS
) {
2068 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2070 if (slide_p
!= NULL
) {
2072 * Requested mapping failed but the caller
2073 * is OK with sliding the library in the
2074 * shared region, so let's try and slide it...
2077 /* lookup an appropriate spot */
2078 kr
= lsf_slide(map_cnt
, mappings
,
2079 sm_info
, &base_offset
);
2080 if (kr
== KERN_SUCCESS
) {
2081 /* try and map it there ... */
2082 entry
->base_address
= base_offset
;
2083 goto restart_after_slide
;
2085 /* couldn't slide ... */
2088 return KERN_FAILURE
;
2091 /* record this mapping */
2092 file_mapping
= (loaded_mapping_t
*)zalloc(lsf_zone
);
2093 if (file_mapping
== NULL
) {
2094 lsf_unload(file_object
, entry
->base_address
, sm_info
);
2095 printf("lsf_map: unable to allocate memory\n");
2096 return KERN_NO_SPACE
;
2098 shared_file_available_hash_ele
--;
2099 file_mapping
->mapping_offset
= (mappings
[i
].sfm_address
)
2101 file_mapping
->size
= mappings
[i
].sfm_size
;
2102 file_mapping
->file_offset
= mappings
[i
].sfm_file_offset
;
2103 file_mapping
->protection
= mappings
[i
].sfm_init_prot
;
2104 file_mapping
->next
= NULL
;
2105 LSF_DEBUG(("lsf_map: file_mapping %p "
2106 "for offset=0x%x size=0x%x\n",
2107 file_mapping
, file_mapping
->mapping_offset
,
2108 file_mapping
->size
));
2110 /* and link it to the file entry */
2111 *tptr
= file_mapping
;
2113 /* where to put the next mapping's description */
2114 tptr
= &(file_mapping
->next
);
2117 if (slide_p
!= NULL
) {
2118 *slide_p
= base_offset
- original_base_offset
;
2121 if (sm_info
->flags
& SHARED_REGION_STANDALONE
) {
2123 * We have a standalone and private shared region, so we
2124 * don't really need to keep the information about each file
2125 * and each mapping. Just deallocate it all.
2126 * XXX we still have the hash table, though...
2128 lsf_deallocate(file_object
, entry
->base_address
, sm_info
,
2132 LSF_DEBUG(("lsf_map: done\n"));
2133 return KERN_SUCCESS
;
2137 /* finds the file_object extent list in the shared memory hash table */
2138 /* If one is found the associated extents in shared memory are deallocated */
2139 /* and the extent list is freed */
2144 vm_offset_t base_offset
,
2145 shared_region_task_mappings_t sm_info
)
2147 lsf_deallocate(file_object
, base_offset
, sm_info
, TRUE
);
2153 * Deallocates all the "shared region" internal data structures describing
2154 * the file and its mappings.
2155 * Also deallocate the actual file mappings if requested ("unload" arg).
2160 vm_offset_t base_offset
,
2161 shared_region_task_mappings_t sm_info
,
2164 load_struct_t
*entry
;
2165 loaded_mapping_t
*map_ele
;
2166 loaded_mapping_t
*back_ptr
;
2168 LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
2169 file_object
, base_offset
, sm_info
, unload
));
2170 entry
= lsf_hash_delete(file_object
, base_offset
, sm_info
);
2172 map_ele
= entry
->mappings
;
2173 while(map_ele
!= NULL
) {
2175 ipc_port_t region_handle
;
2176 vm_named_entry_t region_entry
;
2178 if(map_ele
->protection
& VM_PROT_COW
) {
2179 region_handle
= (ipc_port_t
)
2180 sm_info
->data_region
;
2182 region_handle
= (ipc_port_t
)
2183 sm_info
->text_region
;
2185 region_entry
= (vm_named_entry_t
)
2186 region_handle
->ip_kobject
;
2188 vm_deallocate(region_entry
->backing
.map
,
2189 (entry
->base_address
+
2190 map_ele
->mapping_offset
),
2194 map_ele
= map_ele
->next
;
2195 LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
2196 "offset 0x%x size 0x%x\n",
2197 back_ptr
, back_ptr
->mapping_offset
,
2199 zfree(lsf_zone
, back_ptr
);
2200 shared_file_available_hash_ele
++;
2202 LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry
));
2203 LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry
));
2204 zfree(lsf_zone
, entry
);
2205 shared_file_available_hash_ele
++;
2207 LSF_DEBUG(("lsf_unload: done\n"));
2210 /* integer is from 1 to 100 and represents percent full */
2212 lsf_mapping_pool_gauge(void)
2214 return ((lsf_zone
->count
* lsf_zone
->elem_size
) * 100)/lsf_zone
->max_size
;